Commit 13d7d84e authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc

* git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (36 commits)
  [POWERPC] Generic BUG for powerpc
  [PPC] Fix compile failure do to introduction of PHY_POLL
  [POWERPC] Only export __mtdcr/__mfdcr if CONFIG_PPC_DCR is set
  [POWERPC] Remove old dcr.S
  [POWERPC] Fix SPU coredump code for max_fdset removal
  [POWERPC] Fix irq routing on some 32-bit PowerMacs
  [POWERPC] ps3: Add vuart support
  [POWERPC] Support ibm,dynamic-reconfiguration-memory nodes
  [POWERPC] dont allow pSeries_probe to succeed without initialising MMU
  [POWERPC] micro optimise pSeries_probe
  [POWERPC] Add SPURR SPR to sysfs
  [POWERPC] Add DSCR SPR to sysfs
  [POWERPC] Fix 440SPe CPU table entry
  [POWERPC] Add support for FP emulation for the e300c2 core
  [POWERPC] of_device_register: propagate device_create_file return code
  [POWERPC] Fix mmap of PCI resource with hack for X
  [POWERPC] iSeries: head_64.o needs to depend on lparmap.s
  [POWERPC] cbe_thermal: Fix initialization of sysfs attribute_group
  [POWERPC] Remove QE header files from lite5200.c
  [POWERPC] of_platform_make_bus_id(): make `magic' int
  ...
parents cbb8fc07 73c9ceab
......@@ -107,6 +107,11 @@ config AUDIT_ARCH
bool
default y
config GENERIC_BUG
bool
default y
depends on BUG
config DEFAULT_UIMAGE
bool
help
......@@ -478,6 +483,7 @@ config PPC_MAPLE
select PPC_UDBG_16550
select PPC_970_NAP
select PPC_NATIVE
select PPC_RTAS
default n
help
This option enables support for the Maple 970FX Evaluation Board.
......@@ -714,7 +720,7 @@ config FORCE_MAX_ZONEORDER
config MATH_EMULATION
bool "Math emulation"
depends on 4xx || 8xx || E200 || E500
depends on 4xx || 8xx || E200 || PPC_83xx || E500
---help---
Some PowerPC chips designed for embedded applications do not have
a floating-point unit and therefore do not implement the
......
......@@ -157,6 +157,7 @@ CONFIG_SPU_BASE=y
CONFIG_PS3_HTAB_SIZE=20
CONFIG_PS3_DYNAMIC_DMA=y
CONFIG_PS3_USE_LPAR_ADDR=y
CONFIG_PS3_VUART=y
#
# Kernel options
......
......@@ -77,6 +77,7 @@ endif
ifeq ($(CONFIG_PPC_ISERIES),y)
extra-y += lparmap.s
$(obj)/head_64.o: $(obj)/lparmap.s
AFLAGS_head_64.o += -I$(obj)
endif
......
......@@ -833,7 +833,7 @@ static struct cpu_spec cpu_specs[] = {
.pvr_mask = 0x7fff0000,
.pvr_value = 0x00840000,
.cpu_name = "e300c2",
.cpu_features = CPU_FTRS_E300,
.cpu_features = CPU_FTRS_E300C2,
.cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
.icache_bsize = 32,
.dcache_bsize = 32,
......@@ -1136,8 +1136,7 @@ static struct cpu_spec cpu_specs[] = {
.pvr_mask = 0xff000fff,
.pvr_value = 0x53000890,
.cpu_name = "440SPe Rev. A",
.cpu_features = CPU_FTR_SPLIT_ID_CACHE |
CPU_FTR_USE_TB,
.cpu_features = CPU_FTRS_44X,
.cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
......
......@@ -437,6 +437,13 @@ Alignment:
/* Floating-point unavailable */
. = 0x800
FPUnavailable:
BEGIN_FTR_SECTION
/*
* Certain Freescale cores don't have a FPU and treat fp instructions
* as a FP Unavailable exception. Redirect to illegal/emulation handling.
*/
b ProgramCheck
END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
EXCEPTION_PROLOG
bne load_up_fpu /* if from user, just load it up */
addi r3,r1,STACK_FRAME_OVERHEAD
......
......@@ -23,6 +23,7 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/cache.h>
#include <linux/bug.h>
#include "setup.h"
......@@ -290,23 +291,11 @@ int module_finalize(const Elf_Ehdr *hdr,
struct module *me)
{
const Elf_Shdr *sect;
int err;
me->arch.bug_table = NULL;
me->arch.num_bugs = 0;
/* Find the __bug_table section, if present */
sect = find_section(hdr, sechdrs, "__bug_table");
if (sect != NULL) {
me->arch.bug_table = (void *) sect->sh_addr;
me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
}
/*
* Strictly speaking this should have a spinlock to protect against
* traversals, but since we only traverse on BUG()s, a spinlock
* could potentially lead to deadlock and thus be counter-productive.
*/
list_add(&me->arch.bug_list, &module_bug_list);
err = module_bug_finalize(hdr, sechdrs, me);
if (err) /* never true, currently */
return err;
/* Apply feature fixups */
sect = find_section(hdr, sechdrs, "__ftr_fixup");
......@@ -320,7 +309,7 @@ int module_finalize(const Elf_Ehdr *hdr,
void module_arch_cleanup(struct module *mod)
{
list_del(&mod->arch.bug_list);
module_bug_cleanup(mod);
}
struct bug_entry *module_find_bug(unsigned long bugaddr)
......
......@@ -20,6 +20,7 @@
#include <linux/moduleloader.h>
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/bug.h>
#include <asm/module.h>
#include <asm/uaccess.h>
#include <asm/firmware.h>
......@@ -439,23 +440,11 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, struct module *me)
{
const Elf_Shdr *sect;
int err;
me->arch.bug_table = NULL;
me->arch.num_bugs = 0;
/* Find the __bug_table section, if present */
sect = find_section(hdr, sechdrs, "__bug_table");
if (sect != NULL) {
me->arch.bug_table = (void *) sect->sh_addr;
me->arch.num_bugs = sect->sh_size / sizeof(struct bug_entry);
}
/*
* Strictly speaking this should have a spinlock to protect against
* traversals, but since we only traverse on BUG()s, a spinlock
* could potentially lead to deadlock and thus be counter-productive.
*/
list_add(&me->arch.bug_list, &module_bug_list);
err = module_bug_finalize(hdr, sechdrs, me);
if (err)
return err;
/* Apply feature fixups */
sect = find_section(hdr, sechdrs, "__ftr_fixup");
......@@ -475,7 +464,7 @@ int module_finalize(const Elf_Ehdr *hdr,
void module_arch_cleanup(struct module *mod)
{
list_del(&mod->arch.bug_list);
module_bug_cleanup(mod);
}
struct bug_entry *module_find_bug(unsigned long bugaddr)
......
......@@ -109,9 +109,7 @@ int of_device_register(struct of_device *ofdev)
if (rc)
return rc;
device_create_file(&ofdev->dev, &dev_attr_devspec);
return 0;
return device_create_file(&ofdev->dev, &dev_attr_devspec);
}
void of_device_unregister(struct of_device *ofdev)
......
......@@ -169,7 +169,7 @@ static void of_platform_make_bus_id(struct of_device *dev)
char *name = dev->dev.bus_id;
const u32 *reg;
u64 addr;
long magic;
int magic;
/*
* If it's a DCR based device, use 'd' for native DCRs
......
......@@ -736,25 +736,51 @@ scan_OF_pci_childs(struct device_node* node, pci_OF_scan_iterator filter, void*
return NULL;
}
static int
scan_OF_pci_childs_iterator(struct device_node* node, void* data)
static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
unsigned int devfn)
{
const unsigned int *reg;
u8* fdata = (u8*)data;
struct device_node *np = NULL;
const u32 *reg;
unsigned int psize;
reg = get_property(node, "reg", NULL);
if (reg && ((reg[0] >> 8) & 0xff) == fdata[1]
&& ((reg[0] >> 16) & 0xff) == fdata[0])
return 1;
return 0;
while ((np = of_get_next_child(parent, np)) != NULL) {
reg = get_property(np, "reg", &psize);
if (reg == NULL || psize < 4)
continue;
if (((reg[0] >> 8) & 0xff) == devfn)
return np;
}
return NULL;
}
static struct device_node*
scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
{
u8 filter_data[2] = {bus, dev_fn};
struct device_node *parent, *np;
return scan_OF_pci_childs(node, scan_OF_pci_childs_iterator, filter_data);
/* Are we a root bus ? */
if (bus->self == NULL || bus->parent == NULL) {
struct pci_controller *hose = pci_bus_to_hose(bus->number);
if (hose == NULL)
return NULL;
return of_node_get(hose->arch_data);
}
/* not a root bus, we need to get our parent */
parent = scan_OF_for_pci_bus(bus->parent);
if (parent == NULL)
return NULL;
/* now iterate for children for a match */
np = scan_OF_for_pci_dev(parent, bus->self->devfn);
of_node_put(parent);
/* sanity check */
if (strcmp(np->type, "pci") != 0)
printk(KERN_WARNING "pci: wrong type \"%s\" for bridge %s\n",
np->type, np->full_name);
return np;
}
/*
......@@ -763,43 +789,25 @@ scan_OF_childs_for_device(struct device_node* node, u8 bus, u8 dev_fn)
struct device_node *
pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
{
struct pci_controller *hose;
struct device_node *node;
int busnr;
struct device_node *parent, *np;
if (!have_of)
return NULL;
/* Lookup the hose */
busnr = bus->number;
hose = pci_bus_to_hose(busnr);
if (!hose)
DBG("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
parent = scan_OF_for_pci_bus(bus);
if (parent == NULL)
return NULL;
/* Check it has an OF node associated */
node = (struct device_node *) hose->arch_data;
if (!node)
return NULL;
/* Fixup bus number according to what OF think it is. */
#ifdef CONFIG_PPC_PMAC
/* The G5 need a special case here. Basically, we don't remap all
* busses on it so we don't create the pci-OF-map. However, we do
* remap the AGP bus and so have to deal with it. A future better
* fix has to be done by making the remapping per-host and always
* filling the pci_to_OF map. --BenH
DBG(" parent is %s\n", parent ? parent->full_name : "<NULL>");
np = scan_OF_for_pci_dev(parent, devfn);
of_node_put(parent);
DBG(" result is %s\n", np ? np->full_name : "<NULL>");
/* XXX most callers don't release the returned node
* mostly because ppc64 doesn't increase the refcount,
* we need to fix that.
*/
if (machine_is(powermac) && busnr >= 0xf0)
busnr -= 0xf0;
else
#endif
if (pci_to_OF_bus_map)
busnr = pci_to_OF_bus_map[busnr];
if (busnr == 0xff)
return NULL;
/* Now, lookup childs of the hose */
return scan_OF_childs_for_device(node->child, busnr, devfn);
return np;
}
EXPORT_SYMBOL(pci_busdev_to_OF_node);
......@@ -1544,7 +1552,7 @@ pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
unsigned long *offset,
resource_size_t *offset,
enum pci_mmap_state mmap_state)
{
struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
......@@ -1556,7 +1564,9 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
/* If memory, add on the PCI bridge address offset */
if (mmap_state == pci_mmap_mem) {
#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
*offset += hose->pci_mem_offset;
#endif
res_bit = IORESOURCE_MEM;
} else {
io_offset = hose->io_base_virt - (void __iomem *)_IO_BASE;
......@@ -1624,9 +1634,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
else
prot |= _PAGE_GUARDED;
printk("PCI map for %s:%llx, prot: %lx\n", pci_name(dev),
(unsigned long long)rp->start, prot);
return __pgprot(prot);
}
......@@ -1695,7 +1702,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine)
{
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
struct resource *rp;
int ret;
......@@ -1808,22 +1815,42 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
resource_size_t *start, resource_size_t *end)
{
struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
unsigned long offset = 0;
resource_size_t offset = 0;
if (hose == NULL)
return;
if (rsrc->flags & IORESOURCE_IO)
offset = (void __iomem *)_IO_BASE - hose->io_base_virt
+ hose->io_base_phys;
offset = (unsigned long)hose->io_base_virt - _IO_BASE;
/* We pass a fully fixed up address to userland for MMIO instead of
* a BAR value because X is lame and expects to be able to use that
* to pass to /dev/mem !
*
* That means that we'll have potentially 64 bits values where some
* userland apps only expect 32 (like X itself since it thinks only
* Sparc has 64 bits MMIO) but if we don't do that, we break it on
* 32 bits CHRPs :-(
*
* Hopefully, the sysfs insterface is immune to that gunk. Once X
* has been fixed (and the fix spread enough), we can re-enable the
* 2 lines below and pass down a BAR value to userland. In that case
* we'll also have to re-enable the matching code in
* __pci_mmap_make_offset().
*
* BenH.
*/
#if 0
else if (rsrc->flags & IORESOURCE_MEM)
offset = hose->pci_mem_offset;
#endif
*start = rsrc->start + offset;
*end = rsrc->end + offset;
*start = rsrc->start - offset;
*end = rsrc->end - offset;
}
void __init
pci_init_resource(struct resource *res, unsigned long start, unsigned long end,
int flags, char *name)
void __init pci_init_resource(struct resource *res, resource_size_t start,
resource_size_t end, int flags, char *name)
{
res->start = start;
res->end = end;
......
......@@ -682,7 +682,7 @@ int pci_proc_domain(struct pci_bus *bus)
* Returns negative error code on failure, zero on success.
*/
static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
unsigned long *offset,
resource_size_t *offset,
enum pci_mmap_state mmap_state)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
......@@ -694,7 +694,9 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
/* If memory, add on the PCI bridge address offset */
if (mmap_state == pci_mmap_mem) {
#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
*offset += hose->pci_mem_offset;
#endif
res_bit = IORESOURCE_MEM;
} else {
io_offset = (unsigned long)hose->io_base_virt - pci_io_base;
......@@ -762,9 +764,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
else
prot |= _PAGE_GUARDED;
printk(KERN_DEBUG "PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start,
prot);
return __pgprot(prot);
}
......@@ -832,7 +831,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
struct resource *rp;
int ret;
......@@ -1333,20 +1332,41 @@ EXPORT_SYMBOL(pci_read_irq_line);
void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
u64 *start, u64 *end)
resource_size_t *start, resource_size_t *end)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
unsigned long offset = 0;
resource_size_t offset = 0;
if (hose == NULL)
return;
if (rsrc->flags & IORESOURCE_IO)
offset = pci_io_base - (unsigned long)hose->io_base_virt +
hose->io_base_phys;
offset = (unsigned long)hose->io_base_virt - pci_io_base;
/* We pass a fully fixed up address to userland for MMIO instead of
* a BAR value because X is lame and expects to be able to use that
* to pass to /dev/mem !
*
* That means that we'll have potentially 64 bits values where some
* userland apps only expect 32 (like X itself since it thinks only
* Sparc has 64 bits MMIO) but if we don't do that, we break it on
* 32 bits CHRPs :-(
*
* Hopefully, the sysfs insterface is immune to that gunk. Once X
* has been fixed (and the fix spread enough), we can re-enable the
* 2 lines below and pass down a BAR value to userland. In that case
* we'll also have to re-enable the matching code in
* __pci_mmap_make_offset().
*
* BenH.
*/
#if 0
else if (rsrc->flags & IORESOURCE_MEM)
offset = hose->pci_mem_offset;
#endif
*start = rsrc->start + offset;
*end = rsrc->end + offset;
*start = rsrc->start - offset;
*end = rsrc->end - offset;
}
struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
......
......@@ -208,7 +208,7 @@ EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */
extern long *intercept_table;
EXPORT_SYMBOL(intercept_table);
#endif /* CONFIG_PPC_STD_MMU_32 */
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
#ifdef CONFIG_PPC_DCR_NATIVE
EXPORT_SYMBOL(__mtdcr);
EXPORT_SYMBOL(__mfdcr);
#endif
......@@ -804,6 +804,56 @@ static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp)
return of_read_ulong(p, s);
}
#ifdef CONFIG_PPC_PSERIES
/*
* Interpret the ibm,dynamic-memory property in the
* /ibm,dynamic-reconfiguration-memory node.
* This contains a list of memory blocks along with NUMA affinity
* information.
*/
static int __init early_init_dt_scan_drconf_memory(unsigned long node)
{
cell_t *dm, *ls;
unsigned long l, n;
unsigned long base, size, lmb_size, flags;
ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t))
return 0;
lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls);
dm = (cell_t *)of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l);
if (dm == NULL || l < sizeof(cell_t))
return 0;
n = *dm++; /* number of entries */
if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(cell_t))
return 0;
for (; n != 0; --n) {
base = dt_mem_next_cell(dt_root_addr_cells, &dm);
flags = dm[3];
/* skip DRC index, pad, assoc. list index, flags */
dm += 4;
/* skip this block if the reserved bit is set in flags (0x80)
or if the block is not assigned to this partition (0x8) */
if ((flags & 0x80) || !(flags & 0x8))
continue;
size = lmb_size;
if (iommu_is_off) {
if (base >= 0x80000000ul)
continue;
if ((base + size) > 0x80000000ul)
size = 0x80000000ul - base;
}
lmb_add(base, size);
}
lmb_dump_all();
return 0;
}
#else
#define early_init_dt_scan_drconf_memory(node) 0
#endif /* CONFIG_PPC_PSERIES */
static int __init early_init_dt_scan_memory(unsigned long node,
const char *uname, int depth, void *data)
......@@ -812,6 +862,11 @@ static int __init early_init_dt_scan_memory(unsigned long node,
cell_t *reg, *endp;
unsigned long l;
/* Look for the ibm,dynamic-reconfiguration-memory node */
if (depth == 1 &&
strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0)
return early_init_dt_scan_drconf_memory(node);
/* We are scanning "memory" nodes only */
if (type == NULL) {
/*
......
......@@ -679,7 +679,7 @@ static unsigned char ibm_architecture_vec[] = {
/* option vector 5: PAPR/OF options */
3 - 2, /* length */
0, /* don't ignore, don't halt */
OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES,
OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY,
};
/* Old method - ELF header with PT_NOTE sections */
......
......@@ -303,6 +303,12 @@ int rtas_token(const char *service)
}
EXPORT_SYMBOL(rtas_token);
int rtas_service_present(const char *service)
{
return rtas_token(service) != RTAS_UNKNOWN_SERVICE;
}
EXPORT_SYMBOL(rtas_service_present);
#ifdef CONFIG_RTAS_ERROR_LOGGING
/*
* Return the firmware-specified size of the error log buffer
......@@ -810,32 +816,6 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
/* This version can't take the spinlock, because it never returns */
static struct rtas_args rtas_stop_self_args = {
/* The token is initialized for real in setup_system() */
.token = RTAS_UNKNOWN_SERVICE,
.nargs = 0,
.nret = 1,
.rets = &rtas_stop_self_args.args[0],
};
void rtas_stop_self(void)
{
struct rtas_args *rtas_args = &rtas_stop_self_args;
local_irq_disable();
BUG_ON(rtas_args->token == RTAS_UNKNOWN_SERVICE);
printk("cpu %u (hwid %u) Ready to die...\n",
smp_processor_id(), hard_smp_processor_id());
enter_rtas(__pa(rtas_args));
panic("Alas, I survived.\n");
}
#endif
/*
* Call early during boot, before mem init or bootmem, to retrieve the RTAS
* informations from the device-tree and allocate the RMO buffer for userland
......@@ -880,9 +860,6 @@ void __init rtas_initialize(void)
#endif
rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
#ifdef CONFIG_HOTPLUG_CPU
rtas_stop_self_args.token = rtas_token("stop-self");
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_RTAS_ERROR_LOGGING
rtas_last_error_token = rtas_token("rtas-last-error");
#endif
......
......@@ -181,6 +181,8 @@ SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
SYSFS_PMCSETUP(purr, SPRN_PURR);
SYSFS_PMCSETUP(spurr, SPRN_SPURR);
SYSFS_PMCSETUP(dscr, SPRN_DSCR);
static SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0);
static SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1);
......@@ -194,6 +196,8 @@ static SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6);
static SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7);
static SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8);
static SYSDEV_ATTR(purr, 0600, show_purr, NULL);
static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL);
static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr);
static void register_cpu_online(unsigned int cpu)
{
......@@ -231,6 +235,12 @@ static void register_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_PURR))
sysdev_create_file(s, &attr_purr);
if (cpu_has_feature(CPU_FTR_SPURR))
sysdev_create_file(s, &attr_spurr);
if (cpu_has_feature(CPU_FTR_DSCR))
sysdev_create_file(s, &attr_dscr);
}
#ifdef CONFIG_HOTPLUG_CPU
......@@ -272,6 +282,12 @@ static void unregister_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_PURR))
sysdev_remove_file(s, &attr_purr);
if (cpu_has_feature(CPU_FTR_SPURR))
sysdev_remove_file(s, &attr_spurr);
if (cpu_has_feature(CPU_FTR_DSCR))
sysdev_remove_file(s, &attr_dscr);
}
#endif /* CONFIG_HOTPLUG_CPU */
......
......@@ -32,6 +32,7 @@
#include <linux/kprobes.h>
#include <linux/kexec.h>
#include <linux/backlight.h>
#include <linux/bug.h>
#include <asm/kdebug.h>
#include <asm/pgtable.h>
......@@ -727,54 +728,9 @@ static int emulate_instruction(struct pt_regs *regs)
return -EINVAL;
}
/*
* Look through the list of trap instructions that are used for BUG(),
* BUG_ON() and WARN_ON() and see if we hit one. At this point we know
* that the exception was caused by a trap instruction of some kind.
* Returns 1 if we should continue (i.e. it was a WARN_ON) or 0
* otherwise.
*/
extern struct bug_entry __start___bug_table[], __stop___bug_table[];
#ifndef CONFIG_MODULES
#define module_find_bug(x) NULL
#endif
struct bug_entry *find_bug(unsigned long bugaddr)
int is_valid_bugaddr(unsigned long addr)
{
struct bug_entry *bug;
for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
if (bugaddr == bug->bug_addr)
return bug;
return module_find_bug(bugaddr);
}
static int check_bug_trap(struct pt_regs *regs)
{
struct bug_entry *bug;
unsigned long addr;
if (regs->msr & MSR_PR)
return 0; /* not in kernel */
addr = regs->nip; /* address of trap instruction */
if (addr < PAGE_OFFSET)
return 0;
bug = find_bug(regs->nip);
if (bug == NULL)
return 0;
if (bug->line & BUG_WARNING_TRAP) {
/* this is a WARN_ON rather than BUG/BUG_ON */
printk(KERN_ERR "Badness in %s at %s:%ld\n",
bug->function, bug->file,
bug->line & ~BUG_WARNING_TRAP);
dump_stack();
return 1;
}
printk(KERN_CRIT "kernel BUG in %s at %s:%ld!\n",
bug->function, bug->file, bug->line);
return 0;
return is_kernel_addr(addr);
}
void __kprobes program_check_exception(struct pt_regs *regs)
......@@ -782,6 +738,8 @@ void __kprobes program_check_exception(struct pt_regs *regs)
unsigned int reason = get_reason(regs);
extern int do_mathemu(struct pt_regs *regs);
/* We can now get here via a FP Unavailable exception if the core
* has no FPU, in that case no reason flags will be set */
#ifdef CONFIG_MATH_EMULATION
/* (reason & REASON_ILLEGAL) would be the obvious thing here,
* but there seems to be a hardware bug on the 405GP (RevD)
......@@ -808,7 +766,9 @@ void __kprobes program_check_exception(struct pt_regs *regs)
return;
if (debugger_bpt(regs))
return;
if (check_bug_trap(regs)) {
if (!(regs->msr & MSR_PR) && /* not user-mode */
report_bug(regs->nip) == BUG_TRAP_TYPE_WARN) {
regs->nip += 4;
return;
}
......
......@@ -62,11 +62,7 @@ SECTIONS
__stop___ex_table = .;
}
__bug_table : {
__start___bug_table = .;
*(__bug_table)
__stop___bug_table = .;
}
BUG_TABLE
/*
* Init sections discarded at runtime
......
......@@ -295,6 +295,63 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
return lmb_end_of_DRAM() - start;
}
/*
* Extract NUMA information from the ibm,dynamic-reconfiguration-memory
* node. This assumes n_mem_{addr,size}_cells have been set.
*/
static void __init parse_drconf_memory(struct device_node *memory)
{
const unsigned int *lm, *dm, *aa;
unsigned int ls, ld, la;
unsigned int n, aam, aalen;
unsigned long lmb_size, size;
int nid, default_nid = 0;
unsigned int start, ai, flags;
lm = get_property(memory, "ibm,lmb-size", &ls);
dm = get_property(memory, "ibm,dynamic-memory", &ld);
aa = get_property(memory, "ibm,associativity-lookup-arrays", &la);
if (!lm || !dm || !aa ||
ls < sizeof(unsigned int) || ld < sizeof(unsigned int) ||
la < 2 * sizeof(unsigned int))
return;
lmb_size = read_n_cells(n_mem_size_cells, &lm);
n = *dm++; /* number of LMBs */
aam = *aa++; /* number of associativity lists */
aalen = *aa++; /* length of each associativity list */
if (ld < (n * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int) ||
la < (aam * aalen + 2) * sizeof(unsigned int))
return;
for (; n != 0; --n) {
start = read_n_cells(n_mem_addr_cells, &dm);
ai = dm[2];
flags = dm[3];
dm += 4;
/* 0x80 == reserved, 0x8 = assigned to us */
if ((flags & 0x80) || !(flags & 0x8))
continue;
nid = default_nid;
/* flags & 0x40 means associativity index is invalid */
if (min_common_depth > 0 && min_common_depth <= aalen &&
(flags & 0x40) == 0 && ai < aam) {
/* this is like of_node_to_nid_single */
nid = aa[ai * aalen + min_common_depth - 1];
if (nid == 0xffff || nid >= MAX_NUMNODES)
nid = default_nid;
}
node_set_online(nid);
size = numa_enforce_memory_limit(start, lmb_size);
if (!size)
continue;
add_active_range(nid, start >> PAGE_SHIFT,
(start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
}
}
static int __init parse_numa_properties(void)
{
struct device_node *cpu = NULL;
......@@ -385,6 +442,14 @@ static int __init parse_numa_properties(void)
goto new_range;
}
/*
* Now do the same thing for each LMB listed in the ibm,dynamic-memory
* property in the ibm,dynamic-reconfiguration-memory node.
*/
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (memory)
parse_drconf_memory(memory);
return 0;
}
......
......@@ -40,8 +40,6 @@
#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <asm/qe.h>
#include <asm/qe_ic.h>
#include <asm/of_platform.h>
#include <asm/mpc52xx.h>
......
......@@ -115,6 +115,7 @@ static struct sysdev_attribute attr_spu_temperature = {
static struct attribute *spu_attributes[] = {
&attr_spu_temperature.attr,
NULL,
};
static struct attribute_group spu_attribute_group = {
......@@ -135,6 +136,7 @@ static struct sysdev_attribute attr_ppe_temperature1 = {
static struct attribute *ppe_attributes[] = {
&attr_ppe_temperature0.attr,
&attr_ppe_temperature1.attr,
NULL,
};
static struct attribute_group ppe_attribute_group = {
......
......@@ -382,11 +382,14 @@ static irqreturn_t cbe_pm_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
int __init cbe_init_pm_irq(void)
static int __init cbe_init_pm_irq(void)
{
unsigned int irq;
int rc, node;
if (!machine_is(cell))
return 0;
for_each_node(node) {
irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
(node << IIC_IRQ_NODE_SHIFT));
......
......@@ -147,7 +147,7 @@ static int spufs_arch_notes_size(void)
struct fdtable *fdt = files_fdtable(current->files);
int size = 0, fd;
for (fd = 0; fd < fdt->max_fdset && fd < fdt->max_fds; fd++) {
for (fd = 0; fd < fdt->max_fds; fd++) {
if (FD_ISSET(fd, fdt->open_fds)) {
struct file *file = fcheck(fd);
......
......@@ -562,7 +562,7 @@ void __init maple_pci_init(void)
for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
if (np->name == NULL)
continue;
if (strcmp(np->name, "pci") == 0) {
if (!strcmp(np->name, "pci") || !strcmp(np->name, "pcie")) {
if (add_bridge(np) == 0)
of_node_get(np);
}
......
......@@ -60,6 +60,7 @@
#include <asm/of_device.h>
#include <asm/lmb.h>
#include <asm/mpic.h>
#include <asm/rtas.h>
#include <asm/udbg.h>
#include "maple.h"
......@@ -166,6 +167,16 @@ struct smp_ops_t maple_smp_ops = {
};
#endif /* CONFIG_SMP */
static void __init maple_use_rtas_reboot_and_halt_if_present(void)
{
if (rtas_service_present("system-reboot") &&
rtas_service_present("power-off")) {
ppc_md.restart = rtas_restart;
ppc_md.power_off = rtas_power_off;
ppc_md.halt = rtas_halt;
}
}
void __init maple_setup_arch(void)
{
/* init to some ~sane value until calibrate_delay() runs */
......@@ -181,6 +192,7 @@ void __init maple_setup_arch(void)
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
maple_use_rtas_reboot_and_halt_if_present();
printk(KERN_DEBUG "Using native/NAP idle loop\n");
}
......
......@@ -40,4 +40,15 @@ config PS3_USE_LPAR_ADDR
If you have any doubt, choose the default y.
config PS3_VUART
depends on PPC_PS3
bool "PS3 Virtual UART support"
default y
help
Include support for the PS3 Virtual UART.
This support is required for several system services
including the System Manager and AV Settings. In
general, all users will say Y.
endmenu
......@@ -10,6 +10,8 @@ obj-$(CONFIG_XICS) += xics.o
obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
obj-$(CONFIG_HVCS) += hvcserver.o
obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
......@@ -337,6 +337,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
printk (KERN_ERR "EEH: Device driver ignored %d bad reads, panicing\n",
pdn->eeh_check_count);
dump_stack();
msleep(5000);
/* re-read the slot reset state */
if (read_slot_reset_state(pdn, rets) != 0)
......
......@@ -170,14 +170,19 @@ static void eeh_report_reset(struct pci_dev *dev, void *userdata)
static void eeh_report_resume(struct pci_dev *dev, void *userdata)
{
struct pci_driver *driver = dev->driver;
struct device_node *dn = pci_device_to_OF_node(dev);
dev->error_state = pci_channel_io_normal;
if (!driver)
return;
if (!driver->err_handler)
return;
if (!driver->err_handler->resume)
if ((PCI_DN(dn)->eeh_mode) & EEH_MODE_IRQ_DISABLED) {
PCI_DN(dn)->eeh_mode &= ~EEH_MODE_IRQ_DISABLED;
enable_irq(dev->irq);
}
if (!driver->err_handler ||
!driver->err_handler->resume)
return;
driver->err_handler->resume(dev);
......@@ -407,6 +412,8 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
if (rc)
result = PCI_ERS_RESULT_NEED_RESET;
else
result = PCI_ERS_RESULT_RECOVERED;
}
/* If any device has a hard failure, then shut off everything. */
......
/*
* pseries CPU Hotplug infrastructure.
*
* Split out from arch/powerpc/platforms/pseries/setup.c
* arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c
*
* Peter Bergner, IBM March 2001.
* Copyright (C) 2001 IBM.
* Dave Engebretsen, Peter Bergner, and
* Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
* Plus various changes from other IBM teams...
*
* Copyright (C) 2006 Michael Ellerman, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <asm/system.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
#include <asm/pSeries_reconfig.h>
#include "xics.h"
/* This version can't take the spinlock, because it never returns */
static struct rtas_args rtas_stop_self_args = {
.token = RTAS_UNKNOWN_SERVICE,
.nargs = 0,
.nret = 1,
.rets = &rtas_stop_self_args.args[0],
};
static void rtas_stop_self(void)
{
struct rtas_args *args = &rtas_stop_self_args;
local_irq_disable();
BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
printk("cpu %u (hwid %u) Ready to die...\n",
smp_processor_id(), hard_smp_processor_id());
enter_rtas(__pa(args));
panic("Alas, I survived.\n");
}
static void pseries_mach_cpu_die(void)
{
local_irq_disable();
idle_task_exit();
xics_teardown_cpu(0);
rtas_stop_self();
/* Should never get here... */
BUG();
for(;;);
}
static int qcss_tok; /* query-cpu-stopped-state token */
/* Get state of physical CPU.
* Return codes:
* 0 - The processor is in the RTAS stopped state
* 1 - stop-self is in progress
* 2 - The processor is not in the RTAS stopped state
* -1 - Hardware Error
* -2 - Hardware Busy, Try again later.
*/
static int query_cpu_stopped(unsigned int pcpu)
{
int cpu_status, status;
status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
if (status != 0) {
printk(KERN_ERR
"RTAS query-cpu-stopped-state failed: %i\n", status);
return status;
}
return cpu_status;
}
static int pseries_cpu_disable(void)
{
int cpu = smp_processor_id();
cpu_clear(cpu, cpu_online_map);
vdso_data->processorCount--;
/*fix boot_cpuid here*/
if (cpu == boot_cpuid)
boot_cpuid = any_online_cpu(cpu_online_map);
/* FIXME: abstract this to not be platform specific later on */
xics_migrate_irqs_away();
return 0;
}
static void pseries_cpu_die(unsigned int cpu)
{
int tries;
int cpu_status;
unsigned int pcpu = get_hard_smp_processor_id(cpu);
for (tries = 0; tries < 25; tries++) {
cpu_status = query_cpu_stopped(pcpu);
if (cpu_status == 0 || cpu_status == -1)
break;
msleep(200);
}
if (cpu_status != 0) {
printk("Querying DEAD? cpu %i (%i) shows %i\n",
cpu, pcpu, cpu_status);
}
/* Isolation and deallocation are definatly done by
* drslot_chrp_cpu. If they were not they would be
* done here. Change isolate state to Isolate and
* change allocation-state to Unusable.
*/
paca[cpu].cpu_start = 0;
}
/*
* Update cpu_present_map and paca(s) for a new cpu node. The wrinkle
* here is that a cpu device node may represent up to two logical cpus
* in the SMT case. We must honor the assumption in other code that
* the logical ids for sibling SMT threads x and y are adjacent, such
* that x^1 == y and y^1 == x.
*/
static int pseries_add_processor(struct device_node *np)
{
unsigned int cpu;
cpumask_t candidate_map, tmp = CPU_MASK_NONE;
int err = -ENOSPC, len, nthreads, i;
const u32 *intserv;
intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
return 0;
nthreads = len / sizeof(u32);
for (i = 0; i < nthreads; i++)
cpu_set(i, tmp);
lock_cpu_hotplug();
BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
/* Get a bitmap of unoccupied slots. */
cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
if (cpus_empty(candidate_map)) {
/* If we get here, it most likely means that NR_CPUS is
* less than the partition's max processors setting.
*/
printk(KERN_ERR "Cannot add cpu %s; this system configuration"
" supports %d logical cpus.\n", np->full_name,
cpus_weight(cpu_possible_map));
goto out_unlock;
}
while (!cpus_empty(tmp))
if (cpus_subset(tmp, candidate_map))
/* Found a range where we can insert the new cpu(s) */
break;
else
cpus_shift_left(tmp, tmp, nthreads);
if (cpus_empty(tmp)) {
printk(KERN_ERR "Unable to find space in cpu_present_map for"
" processor %s with %d thread(s)\n", np->name,
nthreads);
goto out_unlock;
}
for_each_cpu_mask(cpu, tmp) {
BUG_ON(cpu_isset(cpu, cpu_present_map));
cpu_set(cpu, cpu_present_map);
set_hard_smp_processor_id(cpu, *intserv++);
}
err = 0;
out_unlock:
unlock_cpu_hotplug();
return err;
}
/*
* Update the present map for a cpu node which is going away, and set
* the hard id in the paca(s) to -1 to be consistent with boot time
* convention for non-present cpus.
*/
static void pseries_remove_processor(struct device_node *np)
{
unsigned int cpu;
int len, nthreads, i;
const u32 *intserv;
intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
return;
nthreads = len / sizeof(u32);
lock_cpu_hotplug();
for (i = 0; i < nthreads; i++) {
for_each_present_cpu(cpu) {
if (get_hard_smp_processor_id(cpu) != intserv[i])
continue;
BUG_ON(cpu_online(cpu));
cpu_clear(cpu, cpu_present_map);
set_hard_smp_processor_id(cpu, -1);
break;
}
if (cpu == NR_CPUS)
printk(KERN_WARNING "Could not find cpu to remove "
"with physical id 0x%x\n", intserv[i]);
}
unlock_cpu_hotplug();
}
static int pseries_smp_notifier(struct notifier_block *nb,
unsigned long action, void *node)
{
int err = NOTIFY_OK;
switch (action) {
case PSERIES_RECONFIG_ADD:
if (pseries_add_processor(node))
err = NOTIFY_BAD;
break;
case PSERIES_RECONFIG_REMOVE:
pseries_remove_processor(node);
break;
default:
err = NOTIFY_DONE;
break;
}
return err;
}
static struct notifier_block pseries_smp_nb = {
.notifier_call = pseries_smp_notifier,
};
static int __init pseries_cpu_hotplug_init(void)
{
rtas_stop_self_args.token = rtas_token("stop-self");
qcss_tok = rtas_token("query-cpu-stopped-state");
if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
qcss_tok == RTAS_UNKNOWN_SERVICE) {
printk(KERN_INFO "CPU Hotplug not supported by firmware "
"- disabling.\n");
return 0;
}
ppc_md.cpu_die = pseries_mach_cpu_die;
smp_ops->cpu_disable = pseries_cpu_disable;
smp_ops->cpu_die = pseries_cpu_die;
/* Processors can be added/removed only on LPAR */
if (firmware_has_feature(FW_FEATURE_LPAR))
pSeries_reconfig_notifier_register(&pseries_smp_nb);
return 0;
}
arch_initcall(pseries_cpu_hotplug_init);
......@@ -347,21 +347,6 @@ static int __init pSeries_init_panel(void)
}
arch_initcall(pSeries_init_panel);
#ifdef CONFIG_HOTPLUG_CPU
static void pSeries_mach_cpu_die(void)
{
local_irq_disable();
idle_task_exit();
xics_teardown_cpu(0);
rtas_stop_self();
/* Should never get here... */
BUG();
for(;;);
}
#else
#define pSeries_mach_cpu_die NULL
#endif
static int pseries_set_dabr(unsigned long dabr)
{
return plpar_hcall_norets(H_SET_DABR, dabr);
......@@ -437,19 +422,14 @@ static int __init pSeries_probe_hypertas(unsigned long node,
if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL) != NULL)
powerpc_firmware_features |= FW_FEATURE_LPAR;
if (firmware_has_feature(FW_FEATURE_LPAR))
hpte_init_lpar();
else
hpte_init_native();
return 1;
}
static int __init pSeries_probe(void)
{
unsigned long root = of_get_flat_dt_root();
char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
"device_type", NULL);
char *dtype = of_get_flat_dt_prop(root, "device_type", NULL);
if (dtype == NULL)
return 0;
if (strcmp(dtype, "chrp"))
......@@ -467,6 +447,11 @@ static int __init pSeries_probe(void)
/* Now try to figure out if we are running on LPAR */
of_scan_flat_dt(pSeries_probe_hypertas, NULL);
if (firmware_has_feature(FW_FEATURE_LPAR))
hpte_init_lpar();
else
hpte_init_native();
DBG("Machine is%s LPAR !\n",
(powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
......@@ -561,7 +546,6 @@ define_machine(pseries) {
.power_off = rtas_power_off,
.halt = rtas_halt,
.panic = rtas_os_term,
.cpu_die = pSeries_mach_cpu_die,
.get_boot_time = rtas_get_boot_time,
.get_rtc_time = rtas_get_rtc_time,
.set_rtc_time = rtas_set_rtc_time,
......
......@@ -64,197 +64,6 @@ static cpumask_t of_spin_map;
extern void generic_secondary_smp_init(unsigned long);
#ifdef CONFIG_HOTPLUG_CPU
/* Get state of physical CPU.
* Return codes:
* 0 - The processor is in the RTAS stopped state
* 1 - stop-self is in progress
* 2 - The processor is not in the RTAS stopped state
* -1 - Hardware Error
* -2 - Hardware Busy, Try again later.
*/
static int query_cpu_stopped(unsigned int pcpu)
{
int cpu_status;
int status, qcss_tok;
qcss_tok = rtas_token("query-cpu-stopped-state");
if (qcss_tok == RTAS_UNKNOWN_SERVICE)
return -1;
status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
if (status != 0) {
printk(KERN_ERR
"RTAS query-cpu-stopped-state failed: %i\n", status);
return status;
}
return cpu_status;
}
static int pSeries_cpu_disable(void)
{
int cpu = smp_processor_id();
cpu_clear(cpu, cpu_online_map);
vdso_data->processorCount--;
/*fix boot_cpuid here*/
if (cpu == boot_cpuid)
boot_cpuid = any_online_cpu(cpu_online_map);
/* FIXME: abstract this to not be platform specific later on */
xics_migrate_irqs_away();
return 0;
}
static void pSeries_cpu_die(unsigned int cpu)
{
int tries;
int cpu_status;
unsigned int pcpu = get_hard_smp_processor_id(cpu);
for (tries = 0; tries < 25; tries++) {
cpu_status = query_cpu_stopped(pcpu);
if (cpu_status == 0 || cpu_status == -1)
break;
msleep(200);
}
if (cpu_status != 0) {
printk("Querying DEAD? cpu %i (%i) shows %i\n",
cpu, pcpu, cpu_status);
}
/* Isolation and deallocation are definatly done by
* drslot_chrp_cpu. If they were not they would be
* done here. Change isolate state to Isolate and
* change allocation-state to Unusable.
*/
paca[cpu].cpu_start = 0;
}
/*
* Update cpu_present_map and paca(s) for a new cpu node. The wrinkle
* here is that a cpu device node may represent up to two logical cpus
* in the SMT case. We must honor the assumption in other code that
* the logical ids for sibling SMT threads x and y are adjacent, such
* that x^1 == y and y^1 == x.
*/
static int pSeries_add_processor(struct device_node *np)
{
unsigned int cpu;
cpumask_t candidate_map, tmp = CPU_MASK_NONE;
int err = -ENOSPC, len, nthreads, i;
const u32 *intserv;
intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
return 0;
nthreads = len / sizeof(u32);
for (i = 0; i < nthreads; i++)
cpu_set(i, tmp);
lock_cpu_hotplug();
BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
/* Get a bitmap of unoccupied slots. */
cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
if (cpus_empty(candidate_map)) {
/* If we get here, it most likely means that NR_CPUS is
* less than the partition's max processors setting.
*/
printk(KERN_ERR "Cannot add cpu %s; this system configuration"
" supports %d logical cpus.\n", np->full_name,
cpus_weight(cpu_possible_map));
goto out_unlock;
}
while (!cpus_empty(tmp))
if (cpus_subset(tmp, candidate_map))
/* Found a range where we can insert the new cpu(s) */
break;
else
cpus_shift_left(tmp, tmp, nthreads);
if (cpus_empty(tmp)) {
printk(KERN_ERR "Unable to find space in cpu_present_map for"
" processor %s with %d thread(s)\n", np->name,
nthreads);
goto out_unlock;
}
for_each_cpu_mask(cpu, tmp) {
BUG_ON(cpu_isset(cpu, cpu_present_map));
cpu_set(cpu, cpu_present_map);
set_hard_smp_processor_id(cpu, *intserv++);
}
err = 0;
out_unlock:
unlock_cpu_hotplug();
return err;
}
/*
* Update the present map for a cpu node which is going away, and set
* the hard id in the paca(s) to -1 to be consistent with boot time
* convention for non-present cpus.
*/
static void pSeries_remove_processor(struct device_node *np)
{
unsigned int cpu;
int len, nthreads, i;
const u32 *intserv;
intserv = get_property(np, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
return;
nthreads = len / sizeof(u32);
lock_cpu_hotplug();
for (i = 0; i < nthreads; i++) {
for_each_present_cpu(cpu) {
if (get_hard_smp_processor_id(cpu) != intserv[i])
continue;
BUG_ON(cpu_online(cpu));
cpu_clear(cpu, cpu_present_map);
set_hard_smp_processor_id(cpu, -1);
break;
}
if (cpu == NR_CPUS)
printk(KERN_WARNING "Could not find cpu to remove "
"with physical id 0x%x\n", intserv[i]);
}
unlock_cpu_hotplug();
}
static int pSeries_smp_notifier(struct notifier_block *nb, unsigned long action, void *node)
{
int err = NOTIFY_OK;
switch (action) {
case PSERIES_RECONFIG_ADD:
if (pSeries_add_processor(node))
err = NOTIFY_BAD;
break;
case PSERIES_RECONFIG_REMOVE:
pSeries_remove_processor(node);
break;
default:
err = NOTIFY_DONE;
break;
}
return err;
}
static struct notifier_block pSeries_smp_nb = {
.notifier_call = pSeries_smp_notifier,
};
#endif /* CONFIG_HOTPLUG_CPU */
/**
* smp_startup_cpu() - start the given cpu
*
......@@ -422,15 +231,6 @@ static void __init smp_init_pseries(void)
DBG(" -> smp_init_pSeries()\n");
#ifdef CONFIG_HOTPLUG_CPU
smp_ops->cpu_disable = pSeries_cpu_disable;
smp_ops->cpu_die = pSeries_cpu_die;
/* Processors can be added/removed only on LPAR */
if (firmware_has_feature(FW_FEATURE_LPAR))
pSeries_reconfig_notifier_register(&pSeries_smp_nb);
#endif
/* Mark threads which are still spinning in hold loops. */
if (cpu_has_feature(CPU_FTR_SMT)) {
for_each_present_cpu(i) {
......
......@@ -5,7 +5,8 @@ endif
obj-$(CONFIG_MPIC) += mpic.o
obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
obj-$(CONFIG_PPC_MPC106) += grackle.o
obj-$(CONFIG_PPC_DCR) += dcr.o dcr-low.o
obj-$(CONFIG_PPC_DCR) += dcr.o
obj-$(CONFIG_PPC_DCR_NATIVE) += dcr-low.o
obj-$(CONFIG_U3_DART) += dart_iommu.o
obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
obj-$(CONFIG_FSL_SOC) += fsl_soc.o
......
/*
* "Indirect" DCR access
*
* Copyright (c) 2004 Eugene Surovegin <ebs@ebshome.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <asm/ppc_asm.h>
#include <asm/processor.h>
#define DCR_ACCESS_PROLOG(table) \
rlwinm r3,r3,4,18,27; \
lis r5,table@h; \
ori r5,r5,table@l; \
add r3,r3,r5; \
mtctr r3; \
bctr
_GLOBAL(__mfdcr)
DCR_ACCESS_PROLOG(__mfdcr_table)
_GLOBAL(__mtdcr)
DCR_ACCESS_PROLOG(__mtdcr_table)
__mfdcr_table:
mfdcr r3,0; blr
__mtdcr_table:
mtdcr 0,r4; blr
dcr = 1
.rept 1023
mfdcr r3,dcr; blr
mtdcr dcr,r4; blr
dcr = dcr + 1
.endr
......@@ -223,23 +223,15 @@ static void qe_ic_mask_irq(unsigned int virq)
qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
temp & ~qe_ic_info[src].mask);
spin_unlock_irqrestore(&qe_ic_lock, flags);
}
static void qe_ic_mask_irq_and_ack(unsigned int virq)
{
struct qe_ic *qe_ic = qe_ic_from_irq(virq);
unsigned int src = virq_to_hw(virq);
unsigned long flags;
u32 temp;
spin_lock_irqsave(&qe_ic_lock, flags);
temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
temp & ~qe_ic_info[src].mask);
/* There is nothing to do for ack here, ack is handled in ISR */
/* Flush the above write before enabling interrupts; otherwise,
* spurious interrupts will sometimes happen. To be 100% sure
* that the write has reached the device before interrupts are
* enabled, the mask register would have to be read back; however,
* this is not required for correctness, only to avoid wasting
* time on a large number of spurious interrupts. In testing,
* a sync reduced the observed spurious interrupts to zero.
*/
mb();
spin_unlock_irqrestore(&qe_ic_lock, flags);
}
......@@ -248,7 +240,7 @@ static struct irq_chip qe_ic_irq_chip = {
.typename = " QEIC ",
.unmask = qe_ic_unmask_irq,
.mask = qe_ic_mask_irq,
.mask_ack = qe_ic_mask_irq_and_ack,
.mask_ack = qe_ic_mask_irq,
};
static int qe_ic_host_match(struct irq_host *h, struct device_node *node)
......@@ -331,34 +323,22 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
return irq_linear_revmap(qe_ic->irqhost, irq);
}
/* FIXME: We mask all the QE Low interrupts while handling. We should
* let other interrupt come in, but BAD interrupts are generated */
void fastcall qe_ic_cascade_low(unsigned int irq, struct irq_desc *desc)
{
struct qe_ic *qe_ic = desc->handler_data;
struct irq_chip *chip = irq_desc[irq].chip;
unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
chip->mask_ack(irq);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
chip->unmask(irq);
}
/* FIXME: We mask all the QE High interrupts while handling. We should
* let other interrupt come in, but BAD interrupts are generated */
void fastcall qe_ic_cascade_high(unsigned int irq, struct irq_desc *desc)
{
struct qe_ic *qe_ic = desc->handler_data;
struct irq_chip *chip = irq_desc[irq].chip;
unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
chip->mask_ack(irq);
if (cascade_irq != NO_IRQ)
generic_handle_irq(cascade_irq);
chip->unmask(irq);
}
void __init qe_ic_init(struct device_node *node, unsigned int flags)
......
......@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <asm/of_device.h>
#include <asm/of_platform.h>
static int __init powerpc_flash_init(void)
{
......
......@@ -22,6 +22,7 @@
#include <linux/sysrq.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/bug.h>
#include <asm/ptrace.h>
#include <asm/string.h>
......@@ -35,7 +36,6 @@
#include <asm/cputable.h>
#include <asm/rtas.h>
#include <asm/sstep.h>
#include <asm/bug.h>
#include <asm/irq_regs.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
......@@ -1346,7 +1346,7 @@ static void backtrace(struct pt_regs *excp)
static void print_bug_trap(struct pt_regs *regs)
{
struct bug_entry *bug;
const struct bug_entry *bug;
unsigned long addr;
if (regs->msr & MSR_PR)
......@@ -1357,11 +1357,11 @@ static void print_bug_trap(struct pt_regs *regs)
bug = find_bug(regs->nip);
if (bug == NULL)
return;
if (bug->line & BUG_WARNING_TRAP)
if (is_warning_bug(bug))
return;
printf("kernel BUG in %s at %s:%d!\n",
bug->function, bug->file, (unsigned int)bug->line);
printf("kernel BUG at %s:%u!\n",
bug->file, bug->line);
}
void excprint(struct pt_regs *fp)
......
......@@ -879,7 +879,7 @@ pci_resource_to_bus(struct pci_dev *pdev, struct resource *res)
static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
unsigned long *offset,
resource_size_t *offset,
enum pci_mmap_state mmap_state)
{
struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
......@@ -891,7 +891,9 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
/* If memory, add on the PCI bridge address offset */
if (mmap_state == pci_mmap_mem) {
#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
*offset += hose->pci_mem_offset;
#endif
res_bit = IORESOURCE_MEM;
} else {
io_offset = hose->io_base_virt - ___IO_BASE;
......@@ -1030,7 +1032,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine)
{
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT;
struct resource *rp;
int ret;
......@@ -1132,21 +1134,42 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
resource_size_t *start, resource_size_t *end)
{
struct pci_controller *hose = pci_bus_to_hose(dev->bus->number);
unsigned long offset = 0;
resource_size_t offset = 0;
if (hose == NULL)
return;
if (rsrc->flags & IORESOURCE_IO)
offset = ___IO_BASE - hose->io_base_virt + hose->io_base_phys;
offset = (unsigned long)hose->io_base_virt - _IO_BASE;
/* We pass a fully fixed up address to userland for MMIO instead of
* a BAR value because X is lame and expects to be able to use that
* to pass to /dev/mem !
*
* That means that we'll have potentially 64 bits values where some
* userland apps only expect 32 (like X itself since it thinks only
* Sparc has 64 bits MMIO) but if we don't do that, we break it on
* 32 bits CHRPs :-(
*
* Hopefully, the sysfs insterface is immune to that gunk. Once X
* has been fixed (and the fix spread enough), we can re-enable the
* 2 lines below and pass down a BAR value to userland. In that case
* we'll also have to re-enable the matching code in
* __pci_mmap_make_offset().
*
* BenH.
*/
#if 0
else if (rsrc->flags & IORESOURCE_MEM)
offset = hose->pci_mem_offset;
#endif
*start = rsrc->start + offset;
*end = rsrc->end + offset;
*start = rsrc->start - offset;
*end = rsrc->end - offset;
}
void __init
pci_init_resource(struct resource *res, unsigned long start, unsigned long end,
int flags, char *name)
void __init pci_init_resource(struct resource *res, resource_size_t start,
resource_size_t end, int flags, char *name)
{
res->start = start;
res->end = end;
......
obj-y += system-bus.o
obj-$(CONFIG_PS3_VUART) += vuart.o
This diff is collapsed.
/*
* PS3 virtual uart
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#if !defined(_PS3_VUART_H)
#define _PS3_VUART_H
struct ps3_vuart_stats {
unsigned long bytes_written;
unsigned long bytes_read;
unsigned long tx_interrupts;
unsigned long rx_interrupts;
unsigned long disconnect_interrupts;
};
/**
* struct ps3_vuart_port_device - a device on a vuart port
*/
struct ps3_vuart_port_device {
enum ps3_match_id match_id;
struct device core;
/* private driver variables */
unsigned int port_number;
unsigned long interrupt_mask;
struct {
spinlock_t lock;
struct list_head head;
} tx_list;
struct {
unsigned long bytes_held;
spinlock_t lock;
struct list_head head;
} rx_list;
struct ps3_vuart_stats stats;
};
/**
* struct ps3_vuart_port_driver - a driver for a device on a vuart port
*/
struct ps3_vuart_port_driver {
enum ps3_match_id match_id;
struct device_driver core;
int (*probe)(struct ps3_vuart_port_device *);
int (*remove)(struct ps3_vuart_port_device *);
int (*tx_event)(struct ps3_vuart_port_device *dev);
int (*rx_event)(struct ps3_vuart_port_device *dev);
int (*disconnect_event)(struct ps3_vuart_port_device *dev);
/* int (*suspend)(struct ps3_vuart_port_device *, pm_message_t); */
/* int (*resume)(struct ps3_vuart_port_device *); */
};
int ps3_vuart_port_device_register(struct ps3_vuart_port_device *dev);
int ps3_vuart_port_driver_register(struct ps3_vuart_port_driver *drv);
void ps3_vuart_port_driver_unregister(struct ps3_vuart_port_driver *drv);
int ps3_vuart_write(struct ps3_vuart_port_device *dev,
const void* buf, unsigned int bytes);
int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
unsigned int bytes);
static inline struct ps3_vuart_port_driver *to_ps3_vuart_port_driver(
struct device_driver *_drv)
{
return container_of(_drv, struct ps3_vuart_port_driver, core);
}
static inline struct ps3_vuart_port_device *to_ps3_vuart_port_device(
struct device *_dev)
{
return container_of(_dev, struct ps3_vuart_port_device, core);
}
int ps3_vuart_write(struct ps3_vuart_port_device *dev, const void* buf,
unsigned int bytes);
int ps3_vuart_read(struct ps3_vuart_port_device *dev, void* buf,
unsigned int bytes);
#endif
......@@ -17,7 +17,6 @@ header-y += ipc.h
header-y += poll.h
header-y += shmparam.h
header-y += sockios.h
header-y += spu_info.h
header-y += ucontext.h
header-y += ioctl.h
header-y += linkage.h
......@@ -37,6 +36,7 @@ unifdef-y += posix_types.h
unifdef-y += ptrace.h
unifdef-y += seccomp.h
unifdef-y += signal.h
unifdef-y += spu_info.h
unifdef-y += termios.h
unifdef-y += types.h
unifdef-y += unistd.h
......@@ -13,23 +13,26 @@
#ifndef __ASSEMBLY__
struct bug_entry {
unsigned long bug_addr;
long line;
const char *file;
const char *function;
};
struct bug_entry *find_bug(unsigned long bugaddr);
/*
* If this bit is set in the line number it means that the trap
* is for WARN_ON rather than BUG or BUG_ON.
*/
#define BUG_WARNING_TRAP 0x1000000
#ifdef CONFIG_BUG
/* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and
sizeof(struct bug_entry), respectively */
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define _EMIT_BUG_ENTRY \
".section __bug_table,\"a\"\n" \
"2:\t" PPC_LONG "1b, %0\n" \
"\t.short %1, %2\n" \
".org 2b+%3\n" \
".previous\n"
#else
#define _EMIT_BUG_ENTRY \
".section __bug_table,\"a\"\n" \
"2:\t" PPC_LONG "1b\n" \
"\t.short %2\n" \
".org 2b+%3\n" \
".previous\n"
#endif
/*
* BUG_ON() and WARN_ON() do their best to cooperate with compile-time
* optimisations. However depending on the complexity of the condition
......@@ -39,10 +42,10 @@ struct bug_entry *find_bug(unsigned long bugaddr);
#define BUG() do { \
__asm__ __volatile__( \
"1: twi 31,0,0\n" \
".section __bug_table,\"a\"\n" \
"\t"PPC_LONG" 1b,%0,%1,%2\n" \
".previous" \
: : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
_EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (0), "i" (sizeof(struct bug_entry))); \
for(;;) ; \
} while (0)
#define BUG_ON(x) do { \
......@@ -51,23 +54,21 @@ struct bug_entry *find_bug(unsigned long bugaddr);
BUG(); \
} else { \
__asm__ __volatile__( \
"1: "PPC_TLNEI" %0,0\n" \
".section __bug_table,\"a\"\n" \
"\t"PPC_LONG" 1b,%1,%2,%3\n" \
".previous" \
: : "r" ((long)(x)), "i" (__LINE__), \
"i" (__FILE__), "i" (__FUNCTION__)); \
"1: "PPC_TLNEI" %4,0\n" \
_EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), "i" (0), \
"i" (sizeof(struct bug_entry)), \
"r" ((long)(x))); \
} \
} while (0)
#define __WARN() do { \
__asm__ __volatile__( \
"1: twi 31,0,0\n" \
".section __bug_table,\"a\"\n" \
"\t"PPC_LONG" 1b,%0,%1,%2\n" \
".previous" \
: : "i" (__LINE__ + BUG_WARNING_TRAP), \
"i" (__FILE__), "i" (__FUNCTION__)); \
_EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (BUGFLAG_WARNING), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#define WARN_ON(x) ({ \
......@@ -77,13 +78,12 @@ struct bug_entry *find_bug(unsigned long bugaddr);
__WARN(); \
} else { \
__asm__ __volatile__( \
"1: "PPC_TLNEI" %0,0\n" \
".section __bug_table,\"a\"\n" \
"\t"PPC_LONG" 1b,%1,%2,%3\n" \
".previous" \
: : "r" (__ret_warn_on), \
"i" (__LINE__ + BUG_WARNING_TRAP), \
"i" (__FILE__), "i" (__FUNCTION__)); \
"1: "PPC_TLNEI" %4,0\n" \
_EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (BUGFLAG_WARNING), \
"i" (sizeof(struct bug_entry)), \
"r" (__ret_warn_on)); \
} \
unlikely(__ret_warn_on); \
})
......
......@@ -126,6 +126,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000)
#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000)
#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x0000000000800000)
/*
* Add the 64-bit processor unique features in the top half of the word;
......@@ -152,6 +153,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
#define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000)
#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000)
#define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000)
#define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000)
#ifndef __ASSEMBLY__
......@@ -295,6 +297,9 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
#define CPU_FTRS_E300 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \
CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
CPU_FTR_COMMON)
#define CPU_FTRS_E300C2 (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \
CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE)
#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \
CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE)
#define CPU_FTRS_8XX (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB)
......@@ -330,13 +335,14 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE)
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
CPU_FTR_DSCR)
#define CPU_FTRS_POWER6X (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_MMCRA | CPU_FTR_SMT | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
CPU_FTR_PURR | CPU_FTR_CI_LARGE_PAGE | \
CPU_FTR_SPURR | CPU_FTR_REAL_LE)
CPU_FTR_SPURR | CPU_FTR_REAL_LE | CPU_FTR_DSCR)
#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
......@@ -364,7 +370,8 @@ enum {
CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 |
CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 |
CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX |
CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_CLASSIC32 |
CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 |
CPU_FTRS_CLASSIC32 |
#else
CPU_FTRS_GENERIC_32 |
#endif
......@@ -403,7 +410,8 @@ enum {
CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 &
CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 &
CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX &
CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_CLASSIC32 &
CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 &
CPU_FTRS_CLASSIC32 &
#else
CPU_FTRS_GENERIC_32 &
#endif
......
......@@ -20,8 +20,7 @@
#ifndef _ASM_POWERPC_DCR_NATIVE_H
#define _ASM_POWERPC_DCR_NATIVE_H
#ifdef __KERNEL__
#include <asm/reg.h>
#ifndef __ASSEMBLY__
typedef struct {} dcr_host_t;
......@@ -32,7 +31,41 @@ typedef struct {} dcr_host_t;
#define dcr_read(host, dcr_n) mfdcr(dcr_n)
#define dcr_write(host, dcr_n, value) mtdcr(dcr_n, value)
/* Device Control Registers */
void __mtdcr(int reg, unsigned int val);
unsigned int __mfdcr(int reg);
#define mfdcr(rn) \
({unsigned int rval; \
if (__builtin_constant_p(rn)) \
asm volatile("mfdcr %0," __stringify(rn) \
: "=r" (rval)); \
else \
rval = __mfdcr(rn); \
rval;})
#define mtdcr(rn, v) \
do { \
if (__builtin_constant_p(rn)) \
asm volatile("mtdcr " __stringify(rn) ",%0" \
: : "r" (v)); \
else \
__mtdcr(rn, v); \
} while (0)
/* R/W of indirect DCRs make use of standard naming conventions for DCRs */
#define mfdcri(base, reg) \
({ \
mtdcr(base ## _CFGADDR, base ## _ ## reg); \
mfdcr(base ## _CFGDATA); \
})
#define mtdcri(base, reg, data) \
do { \
mtdcr(base ## _CFGADDR, base ## _ ## reg); \
mtdcr(base ## _CFGDATA, data); \
} while (0)
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_DCR_NATIVE_H */
......
......@@ -20,6 +20,7 @@
#ifndef _ASM_POWERPC_DCR_H
#define _ASM_POWERPC_DCR_H
#ifdef __KERNEL__
#ifdef CONFIG_PPC_DCR
#ifdef CONFIG_PPC_DCR_NATIVE
#include <asm/dcr-native.h>
......@@ -38,5 +39,6 @@ extern unsigned int dcr_resource_len(struct device_node *np,
unsigned int index);
#endif /* CONFIG_PPC_MERGE */
#endif /* CONFIG_PPC_DCR */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_DCR_H */
......@@ -107,25 +107,6 @@ static inline void local_irq_save_ptr(unsigned long *flags)
#endif /* CONFIG_PPC64 */
#define mask_irq(irq) \
({ \
irq_desc_t *desc = get_irq_desc(irq); \
if (desc->chip && desc->chip->disable) \
desc->chip->disable(irq); \
})
#define unmask_irq(irq) \
({ \
irq_desc_t *desc = get_irq_desc(irq); \
if (desc->chip && desc->chip->enable) \
desc->chip->enable(irq); \
})
#define ack_irq(irq) \
({ \
irq_desc_t *desc = get_irq_desc(irq); \
if (desc->chip && desc->chip->ack) \
desc->chip->ack(irq); \
})
/*
* interrupt-retrigger: should we handle this via lost interrupts and IPIs
* or should we not care like we do now ? --BenH.
......
......@@ -46,8 +46,6 @@ struct mod_arch_specific {
unsigned int num_bugs;
};
extern struct bug_entry *module_find_bug(unsigned long bugaddr);
/*
* Select ELF headers.
* Make empty section for module_frob_arch_sections to expand.
......
......@@ -31,12 +31,12 @@ struct pci_controller {
int last_busno;
void __iomem *io_base_virt;
unsigned long io_base_phys;
resource_size_t io_base_phys;
/* Some machines have a non 1:1 mapping of
* the PCI memory space in the CPU bus space
*/
unsigned long pci_mem_offset;
resource_size_t pci_mem_offset;
unsigned long pci_io_size;
struct pci_ops *ops;
......
......@@ -143,8 +143,13 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1
#ifdef CONFIG_PPC64
/* pci_unmap_{single,page} is not a nop, thus... */
#if defined(CONFIG_PPC64) || defined(CONFIG_NOT_COHERENT_CACHE)
/*
* For 64-bit kernels, pci_unmap_{single,page} is not a nop.
* For 32-bit non-coherent kernels, pci_dma_sync_single_for_cpu() and
* so on are not nops.
* and thus...
*/
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
......@@ -158,6 +163,20 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL))
#else /* 32-bit && coherent */
/* pci_unmap_{page,single} is a nop so... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
#define pci_unmap_addr(PTR, ADDR_NAME) (0)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
#define pci_unmap_len(PTR, LEN_NAME) (0)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
#endif /* CONFIG_PPC64 || CONFIG_NOT_COHERENT_CACHE */
#ifdef CONFIG_PPC64
/* The PCI address space does not equal the physical memory address
* space (we have an IOMMU). The IDE and SCSI device layers use
* this boolean for bounce buffer decisions.
......@@ -172,14 +191,6 @@ int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
*/
#define PCI_DMA_BUS_IS_PHYS (1)
/* pci_unmap_{page,single} is a nop so... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
#define pci_unmap_addr(PTR, ADDR_NAME) (0)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
#define pci_unmap_len(PTR, LEN_NAME) (0)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
#endif /* CONFIG_PPC64 */
extern void pcibios_resource_to_bus(struct pci_dev *dev,
......
......@@ -143,6 +143,7 @@
/* Special Purpose Registers (SPRNs)*/
#define SPRN_CTR 0x009 /* Count Register */
#define SPRN_DSCR 0x11
#define SPRN_CTRLF 0x088
#define SPRN_CTRLT 0x098
#define CTRL_CT 0xc0000000 /* current thread */
......@@ -163,6 +164,7 @@
#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
#define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */
#define SPRN_SPURR 0x134 /* Scaled PURR */
#define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
#define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */
#define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */
......
......@@ -159,6 +159,7 @@ extern struct rtas_t rtas;
extern void enter_rtas(unsigned long);
extern int rtas_token(const char *service);
extern int rtas_service_present(const char *service);
extern int rtas_call(int token, int, int, int *, ...);
extern void rtas_restart(char *cmd);
extern void rtas_power_off(void);
......@@ -221,8 +222,6 @@ extern int rtas_get_error_log_max(void);
extern spinlock_t rtas_data_buf_lock;
extern char rtas_data_buf[RTAS_DATA_BUF_SIZE];
extern void rtas_stop_self(void);
/* RMO buffer reserved for user-space RTAS use */
extern unsigned long rtas_rmo_buf;
......
......@@ -20,8 +20,8 @@ extern unsigned long pci_bus_mem_base_phys(unsigned int bus);
extern struct pci_controller* pcibios_alloc_controller(void);
/* Helper function for setting up resources */
extern void pci_init_resource(struct resource *res, unsigned long start,
unsigned long end, int flags, char *name);
extern void pci_init_resource(struct resource *res, resource_size_t start,
resource_size_t end, int flags, char *name);
/* Get the PCI host controller for a bus */
extern struct pci_controller* pci_bus_to_hose(int bus);
......@@ -50,12 +50,12 @@ struct pci_controller {
int bus_offset;
void __iomem *io_base_virt;
unsigned long io_base_phys;
resource_size_t io_base_phys;
/* Some machines (PReP) have a non 1:1 mapping of
* the PCI memory space in the CPU bus space
*/
unsigned long pci_mem_offset;
resource_size_t pci_mem_offset;
struct pci_ops *ops;
volatile unsigned int __iomem *cfg_addr;
......
......@@ -61,6 +61,27 @@ extern unsigned long pci_bus_to_phys(unsigned int ba, int busnr);
*/
#define PCI_DMA_BUS_IS_PHYS (1)
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
* pci_unmap_{page,single} are NOPs but pci_dma_sync_single_for_cpu()
* and so on are not, so...
*/
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
__u32 LEN_NAME;
#define pci_unmap_addr(PTR, ADDR_NAME) \
((PTR)->ADDR_NAME)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
(((PTR)->ADDR_NAME) = (VAL))
#define pci_unmap_len(PTR, LEN_NAME) \
((PTR)->LEN_NAME)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL))
#else /* coherent */
/* pci_unmap_{page,single} is a nop so... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
......@@ -69,6 +90,8 @@ extern unsigned long pci_bus_to_phys(unsigned int ba, int busnr);
#define pci_unmap_len(PTR, LEN_NAME) (0)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
#endif /* CONFIG_NOT_COHERENT_CACHE */
#ifdef CONFIG_PCI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
......
......@@ -9,41 +9,9 @@
#ifndef __ASM_PPC_REG_BOOKE_H__
#define __ASM_PPC_REG_BOOKE_H__
#ifndef __ASSEMBLY__
/* Device Control Registers */
void __mtdcr(int reg, unsigned int val);
unsigned int __mfdcr(int reg);
#define mfdcr(rn) \
({unsigned int rval; \
if (__builtin_constant_p(rn)) \
asm volatile("mfdcr %0," __stringify(rn) \
: "=r" (rval)); \
else \
rval = __mfdcr(rn); \
rval;})
#define mtdcr(rn, v) \
do { \
if (__builtin_constant_p(rn)) \
asm volatile("mtdcr " __stringify(rn) ",%0" \
: : "r" (v)); \
else \
__mtdcr(rn, v); \
} while (0)
/* R/W of indirect DCRs make use of standard naming conventions for DCRs */
#define mfdcri(base, reg) \
({ \
mtdcr(base ## _CFGADDR, base ## _ ## reg); \
mfdcr(base ## _CFGDATA); \
})
#define mtdcri(base, reg, data) \
do { \
mtdcr(base ## _CFGADDR, base ## _ ## reg); \
mtdcr(base ## _CFGDATA, data); \
} while (0)
#include <asm/dcr.h>
#ifndef __ASSEMBLY__
/* Performance Monitor Registers */
#define mfpmr(rn) ({unsigned int rval; \
asm volatile("mfpmr %0," __stringify(rn) \
......
......@@ -19,6 +19,7 @@
#define _FSL_DEVICE_H_
#include <linux/types.h>
#include <linux/phy.h>
/*
* Some conventions on how we handle peripherals on Freescale chips
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment