Commit 919a6d10 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (29 commits)
  powerpc/rtas: Fix watchdog driver temperature read functionality
  powerpc/mm: Fix potential access to freed pages when using hugetlbfs
  powerpc/440: Fix warning early debug code
  powerpc/of: Fix usage of dev_set_name() in of_device_alloc()
  powerpc/pasemi: Use raw spinlock in SMP TB sync
  powerpc: Use one common impl. of RTAS timebase sync and use raw spinlock
  powerpc/rtas: Turn rtas lock into a raw spinlock
  powerpc: Add irqtrace support for 32-bit powerpc
  powerpc/BSR: Fix BSR to allow mmap of small BSR on 64k kernel
  powerpc/BSR: add 4096 byte BSR size
  powerpc: Map more memory early on 601 processors
  powerpc/pmac: Fix DMA ops for MacIO devices
  powerpc/mm: Make k(un)map_atomic out of line
  powerpc: Fix mpic alloc warning
  powerpc: Fix output from show_regs
  powerpc/pmac: Fix issues with PowerMac "PowerSurge" SMP
  powerpc/amigaone: Limit ISA I/O range to 4k in the device tree
  powerpc/warp: Platform fix for i2c change
  powerpc: Have git ignore generated files from dtc compile
  powerpc/mpic: Fix mapping of "DCR" based MPIC variants
  ...
parents cf2acfb2 fd0cca75
4xx/Axon EMAC ethernet nodes
The EMAC ethernet controller in IBM and AMCC 4xx chips, and also
the Axon bridge. To operate this needs to interact with a ths
special McMAL DMA controller, and sometimes an RGMII or ZMII
interface. In addition to the nodes and properties described
below, the node for the OPB bus on which the EMAC sits must have a
correct clock-frequency property.
i) The EMAC node itself
Required properties:
- device_type : "network"
- compatible : compatible list, contains 2 entries, first is
"ibm,emac-CHIP" where CHIP is the host ASIC (440gx,
405gp, Axon) and second is either "ibm,emac" or
"ibm,emac4". For Axon, thus, we have: "ibm,emac-axon",
"ibm,emac4"
- interrupts : <interrupt mapping for EMAC IRQ and WOL IRQ>
- interrupt-parent : optional, if needed for interrupt mapping
- reg : <registers mapping>
- local-mac-address : 6 bytes, MAC address
- mal-device : phandle of the associated McMAL node
- mal-tx-channel : 1 cell, index of the tx channel on McMAL associated
with this EMAC
- mal-rx-channel : 1 cell, index of the rx channel on McMAL associated
with this EMAC
- cell-index : 1 cell, hardware index of the EMAC cell on a given
ASIC (typically 0x0 and 0x1 for EMAC0 and EMAC1 on
each Axon chip)
- max-frame-size : 1 cell, maximum frame size supported in bytes
- rx-fifo-size : 1 cell, Rx fifo size in bytes for 10 and 100 Mb/sec
operations.
For Axon, 2048
- tx-fifo-size : 1 cell, Tx fifo size in bytes for 10 and 100 Mb/sec
operations.
For Axon, 2048.
- fifo-entry-size : 1 cell, size of a fifo entry (used to calculate
thresholds).
For Axon, 0x00000010
- mal-burst-size : 1 cell, MAL burst size (used to calculate thresholds)
in bytes.
For Axon, 0x00000100 (I think ...)
- phy-mode : string, mode of operations of the PHY interface.
Supported values are: "mii", "rmii", "smii", "rgmii",
"tbi", "gmii", rtbi", "sgmii".
For Axon on CAB, it is "rgmii"
- mdio-device : 1 cell, required iff using shared MDIO registers
(440EP). phandle of the EMAC to use to drive the
MDIO lines for the PHY used by this EMAC.
- zmii-device : 1 cell, required iff connected to a ZMII. phandle of
the ZMII device node
- zmii-channel : 1 cell, required iff connected to a ZMII. Which ZMII
channel or 0xffffffff if ZMII is only used for MDIO.
- rgmii-device : 1 cell, required iff connected to an RGMII. phandle
of the RGMII device node.
For Axon: phandle of plb5/plb4/opb/rgmii
- rgmii-channel : 1 cell, required iff connected to an RGMII. Which
RGMII channel is used by this EMAC.
Fox Axon: present, whatever value is appropriate for each
EMAC, that is the content of the current (bogus) "phy-port"
property.
Optional properties:
- phy-address : 1 cell, optional, MDIO address of the PHY. If absent,
a search is performed.
- phy-map : 1 cell, optional, bitmap of addresses to probe the PHY
for, used if phy-address is absent. bit 0x00000001 is
MDIO address 0.
For Axon it can be absent, though my current driver
doesn't handle phy-address yet so for now, keep
0x00ffffff in it.
- rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
operations (if absent the value is the same as
rx-fifo-size). For Axon, either absent or 2048.
- tx-fifo-size-gige : 1 cell, Tx fifo size in bytes for 1000 Mb/sec
operations (if absent the value is the same as
tx-fifo-size). For Axon, either absent or 2048.
- tah-device : 1 cell, optional. If connected to a TAH engine for
offload, phandle of the TAH device node.
- tah-channel : 1 cell, optional. If appropriate, channel used on the
TAH engine.
Example:
EMAC0: ethernet@40000800 {
device_type = "network";
compatible = "ibm,emac-440gp", "ibm,emac";
interrupt-parent = <&UIC1>;
interrupts = <1c 4 1d 4>;
reg = <40000800 70>;
local-mac-address = [00 04 AC E3 1B 1E];
mal-device = <&MAL0>;
mal-tx-channel = <0 1>;
mal-rx-channel = <0>;
cell-index = <0>;
max-frame-size = <5dc>;
rx-fifo-size = <1000>;
tx-fifo-size = <800>;
phy-mode = "rmii";
phy-map = <00000001>;
zmii-device = <&ZMII0>;
zmii-channel = <0>;
};
ii) McMAL node
Required properties:
- device_type : "dma-controller"
- compatible : compatible list, containing 2 entries, first is
"ibm,mcmal-CHIP" where CHIP is the host ASIC (like
emac) and the second is either "ibm,mcmal" or
"ibm,mcmal2".
For Axon, "ibm,mcmal-axon","ibm,mcmal2"
- interrupts : <interrupt mapping for the MAL interrupts sources:
5 sources: tx_eob, rx_eob, serr, txde, rxde>.
For Axon: This is _different_ from the current
firmware. We use the "delayed" interrupts for txeob
and rxeob. Thus we end up with mapping those 5 MPIC
interrupts, all level positive sensitive: 10, 11, 32,
33, 34 (in decimal)
- dcr-reg : < DCR registers range >
- dcr-parent : if needed for dcr-reg
- num-tx-chans : 1 cell, number of Tx channels
- num-rx-chans : 1 cell, number of Rx channels
iii) ZMII node
Required properties:
- compatible : compatible list, containing 2 entries, first is
"ibm,zmii-CHIP" where CHIP is the host ASIC (like
EMAC) and the second is "ibm,zmii".
For Axon, there is no ZMII node.
- reg : <registers mapping>
iv) RGMII node
Required properties:
- compatible : compatible list, containing 2 entries, first is
"ibm,rgmii-CHIP" where CHIP is the host ASIC (like
EMAC) and the second is "ibm,rgmii".
For Axon, "ibm,rgmii-axon","ibm,rgmii"
- reg : <registers mapping>
- revision : as provided by the RGMII new version register if
available.
For Axon: 0x0000012a
Specifying GPIO information for devices
============================================
1) gpios property
-----------------
Nodes that makes use of GPIOs should define them using `gpios' property,
format of which is: <&gpio-controller1-phandle gpio1-specifier
&gpio-controller2-phandle gpio2-specifier
0 /* holes are permitted, means no GPIO 3 */
&gpio-controller4-phandle gpio4-specifier
...>;
Note that gpio-specifier length is controller dependent.
gpio-specifier may encode: bank, pin position inside the bank,
whether pin is open-drain and whether pin is logically inverted.
Example of the node using GPIOs:
node {
gpios = <&qe_pio_e 18 0>;
};
In this example gpio-specifier is "18 0" and encodes GPIO pin number,
and empty GPIO flags as accepted by the "qe_pio_e" gpio-controller.
2) gpio-controller nodes
------------------------
Every GPIO controller node must have #gpio-cells property defined,
this information will be used to translate gpio-specifiers.
Example of two SOC GPIO banks defined as gpio-controller nodes:
qe_pio_a: gpio-controller@1400 {
#gpio-cells = <2>;
compatible = "fsl,qe-pario-bank-a", "fsl,qe-pario-bank";
reg = <0x1400 0x18>;
gpio-controller;
};
qe_pio_e: gpio-controller@1460 {
#gpio-cells = <2>;
compatible = "fsl,qe-pario-bank-e", "fsl,qe-pario-bank";
reg = <0x1460 0x18>;
gpio-controller;
};
MDIO on GPIOs
Currently defined compatibles:
- virtual,gpio-mdio
MDC and MDIO lines connected to GPIO controllers are listed in the
gpios property as described in section VIII.1 in the following order:
MDC, MDIO.
Example:
mdio {
compatible = "virtual,mdio-gpio";
#address-cells = <1>;
#size-cells = <0>;
gpios = <&qe_pio_a 11
&qe_pio_c 6>;
};
This diff is collapsed.
PHY nodes
Required properties:
- device_type : Should be "ethernet-phy"
- interrupts : <a b> where a is the interrupt number and b is a
field that represents an encoding of the sense and level
information for the interrupt. This should be encoded based on
the information in section 2) depending on the type of interrupt
controller you have.
- interrupt-parent : the phandle for the interrupt controller that
services interrupts for this device.
- reg : The ID number for the phy, usually a small integer
- linux,phandle : phandle for this node; likely referenced by an
ethernet controller node.
Example:
ethernet-phy@0 {
linux,phandle = <2452000>
interrupt-parent = <40000>;
interrupts = <35 1>;
reg = <0>;
device_type = "ethernet-phy";
};
SPI (Serial Peripheral Interface) busses
SPI busses can be described with a node for the SPI master device
and a set of child nodes for each SPI slave on the bus. For this
discussion, it is assumed that the system's SPI controller is in
SPI master mode. This binding does not describe SPI controllers
in slave mode.
The SPI master node requires the following properties:
- #address-cells - number of cells required to define a chip select
address on the SPI bus.
- #size-cells - should be zero.
- compatible - name of SPI bus controller following generic names
recommended practice.
No other properties are required in the SPI bus node. It is assumed
that a driver for an SPI bus device will understand that it is an SPI bus.
However, the binding does not attempt to define the specific method for
assigning chip select numbers. Since SPI chip select configuration is
flexible and non-standardized, it is left out of this binding with the
assumption that board specific platform code will be used to manage
chip selects. Individual drivers can define additional properties to
support describing the chip select layout.
SPI slave nodes must be children of the SPI master node and can
contain the following properties.
- reg - (required) chip select address of device.
- compatible - (required) name of SPI device following generic names
recommended practice
- spi-max-frequency - (required) Maximum SPI clocking speed of device in Hz
- spi-cpol - (optional) Empty property indicating device requires
inverse clock polarity (CPOL) mode
- spi-cpha - (optional) Empty property indicating device requires
shifted clock phase (CPHA) mode
- spi-cs-high - (optional) Empty property indicating device requires
chip select active high
SPI example for an MPC5200 SPI bus:
spi@f00 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
reg = <0xf00 0x20>;
interrupts = <2 13 0 2 14 0>;
interrupt-parent = <&mpc5200_pic>;
ethernet-switch@0 {
compatible = "micrel,ks8995m";
spi-max-frequency = <1000000>;
reg = <0>;
};
codec@1 {
compatible = "ti,tlv320aic26";
spi-max-frequency = <100000>;
reg = <1>;
};
};
USB EHCI controllers
Required properties:
- compatible : should be "usb-ehci".
- reg : should contain at least address and length of the standard EHCI
register set for the device. Optional platform-dependent registers
(debug-port or other) can be also specified here, but only after
definition of standard EHCI registers.
- interrupts : one EHCI interrupt should be described here.
If device registers are implemented in big endian mode, the device
node should have "big-endian-regs" property.
If controller implementation operates with big endian descriptors,
"big-endian-desc" property should be specified.
If both big endian registers and descriptors are used by the controller
implementation, "big-endian" property can be specified instead of having
both "big-endian-regs" and "big-endian-desc".
Example (Sequoia 440EPx):
ehci@e0000300 {
compatible = "ibm,usb-ehci-440epx", "usb-ehci";
interrupt-parent = <&UIC0>;
interrupts = <1a 4>;
reg = <0 e0000300 90 0 e0000390 70>;
big-endian;
};
This diff is collapsed.
...@@ -62,7 +62,6 @@ config HAVE_LATENCYTOP_SUPPORT ...@@ -62,7 +62,6 @@ config HAVE_LATENCYTOP_SUPPORT
config TRACE_IRQFLAGS_SUPPORT config TRACE_IRQFLAGS_SUPPORT
bool bool
depends on PPC64
default y default y
config LOCKDEP_SUPPORT config LOCKDEP_SUPPORT
......
...@@ -36,3 +36,13 @@ zImage.pseries ...@@ -36,3 +36,13 @@ zImage.pseries
zconf.h zconf.h
zlib.h zlib.h
zutil.h zutil.h
fdt.c
fdt.h
fdt_ro.c
fdt_rw.c
fdt_strerror.c
fdt_sw.c
fdt_wip.c
libfdt.h
libfdt_internal.h
...@@ -70,8 +70,8 @@ isa@7 { ...@@ -70,8 +70,8 @@ isa@7 {
devsel-speed = <0x00000001>; devsel-speed = <0x00000001>;
min-grant = <0>; min-grant = <0>;
max-latency = <0>; max-latency = <0>;
/* First 64k for I/O at 0x0 on PCI mapped to 0x0 on ISA. */ /* First 4k for I/O at 0x0 on PCI mapped to 0x0 on ISA. */
ranges = <0x00000001 0 0x01000000 0 0x00000000 0x00010000>; ranges = <0x00000001 0 0x01000000 0 0x00000000 0x00001000>;
interrupt-parent = <&i8259>; interrupt-parent = <&i8259>;
#interrupt-cells = <2>; #interrupt-cells = <2>;
#address-cells = <2>; #address-cells = <2>;
......
...@@ -253,6 +253,7 @@ sdhci@2e000 { ...@@ -253,6 +253,7 @@ sdhci@2e000 {
/* Filled in by U-Boot */ /* Filled in by U-Boot */
clock-frequency = <0>; clock-frequency = <0>;
status = "disabled"; status = "disabled";
sdhci,1-bit-only;
}; };
crypto@30000 { crypto@30000 {
......
...@@ -598,8 +598,6 @@ typedef struct risc_timer_pram { ...@@ -598,8 +598,6 @@ typedef struct risc_timer_pram {
#define CICR_IEN ((uint)0x00000080) /* Int. enable */ #define CICR_IEN ((uint)0x00000080) /* Int. enable */
#define CICR_SPS ((uint)0x00000001) /* SCC Spread */ #define CICR_SPS ((uint)0x00000001) /* SCC Spread */
#define IMAP_ADDR (get_immrbase())
#define CPM_PIN_INPUT 0 #define CPM_PIN_INPUT 0
#define CPM_PIN_OUTPUT 1 #define CPM_PIN_OUTPUT 1
#define CPM_PIN_PRIMARY 0 #define CPM_PIN_PRIMARY 0
......
...@@ -309,7 +309,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev, ...@@ -309,7 +309,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev); struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops); BUG_ON(!dma_ops);
dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
if (dma_ops->sync_single_range_for_cpu)
dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
size, direction); size, direction);
} }
...@@ -320,7 +322,9 @@ static inline void dma_sync_single_for_device(struct device *dev, ...@@ -320,7 +322,9 @@ static inline void dma_sync_single_for_device(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev); struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops); BUG_ON(!dma_ops);
dma_ops->sync_single_range_for_device(dev, dma_handle,
if (dma_ops->sync_single_range_for_device)
dma_ops->sync_single_range_for_device(dev, dma_handle,
0, size, direction); 0, size, direction);
} }
...@@ -331,7 +335,9 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, ...@@ -331,7 +335,9 @@ static inline void dma_sync_sg_for_cpu(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev); struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops); BUG_ON(!dma_ops);
dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
if (dma_ops->sync_sg_for_cpu)
dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
} }
static inline void dma_sync_sg_for_device(struct device *dev, static inline void dma_sync_sg_for_device(struct device *dev,
...@@ -341,7 +347,9 @@ static inline void dma_sync_sg_for_device(struct device *dev, ...@@ -341,7 +347,9 @@ static inline void dma_sync_sg_for_device(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev); struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops); BUG_ON(!dma_ops);
dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
if (dma_ops->sync_sg_for_device)
dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
} }
static inline void dma_sync_single_range_for_cpu(struct device *dev, static inline void dma_sync_single_range_for_cpu(struct device *dev,
...@@ -351,7 +359,9 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, ...@@ -351,7 +359,9 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev); struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops); BUG_ON(!dma_ops);
dma_ops->sync_single_range_for_cpu(dev, dma_handle,
if (dma_ops->sync_single_range_for_cpu)
dma_ops->sync_single_range_for_cpu(dev, dma_handle,
offset, size, direction); offset, size, direction);
} }
...@@ -362,7 +372,9 @@ static inline void dma_sync_single_range_for_device(struct device *dev, ...@@ -362,7 +372,9 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
struct dma_mapping_ops *dma_ops = get_dma_ops(dev); struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
BUG_ON(!dma_ops); BUG_ON(!dma_ops);
dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
if (dma_ops->sync_single_range_for_device)
dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
size, direction); size, direction);
} }
#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */ #else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
......
...@@ -22,9 +22,7 @@ ...@@ -22,9 +22,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/init.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/highmem.h>
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -62,6 +60,9 @@ extern pte_t *pkmap_page_table; ...@@ -62,6 +60,9 @@ extern pte_t *pkmap_page_table;
extern void *kmap_high(struct page *page); extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page); extern void kunmap_high(struct page *page);
extern void *kmap_atomic_prot(struct page *page, enum km_type type,
pgprot_t prot);
extern void kunmap_atomic(void *kvaddr, enum km_type type);
static inline void *kmap(struct page *page) static inline void *kmap(struct page *page)
{ {
...@@ -79,62 +80,11 @@ static inline void kunmap(struct page *page) ...@@ -79,62 +80,11 @@ static inline void kunmap(struct page *page)
kunmap_high(page); kunmap_high(page);
} }
/*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
{
unsigned int idx;
unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
debug_kmap_atomic(type);
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
local_flush_tlb_page(NULL, vaddr);
return (void*) vaddr;
}
static inline void *kmap_atomic(struct page *page, enum km_type type) static inline void *kmap_atomic(struct page *page, enum km_type type)
{ {
return kmap_atomic_prot(page, type, kmap_prot); return kmap_atomic_prot(page, type, kmap_prot);
} }
static inline void kunmap_atomic(void *kvaddr, enum km_type type)
{
#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable();
return;
}
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_page(NULL, vaddr);
#endif
pagefault_enable();
}
static inline struct page *kmap_atomic_to_page(void *ptr) static inline struct page *kmap_atomic_to_page(void *ptr)
{ {
unsigned long idx, vaddr = (unsigned long) ptr; unsigned long idx, vaddr = (unsigned long) ptr;
...@@ -148,6 +98,7 @@ static inline struct page *kmap_atomic_to_page(void *ptr) ...@@ -148,6 +98,7 @@ static inline struct page *kmap_atomic_to_page(void *ptr)
return pte_page(*pte); return pte_page(*pte);
} }
#define flush_cache_kmaps() flush_cache_all() #define flush_cache_kmaps() flush_cache_all()
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -68,13 +68,13 @@ static inline int irqs_disabled_flags(unsigned long flags) ...@@ -68,13 +68,13 @@ static inline int irqs_disabled_flags(unsigned long flags)
#if defined(CONFIG_BOOKE) #if defined(CONFIG_BOOKE)
#define SET_MSR_EE(x) mtmsr(x) #define SET_MSR_EE(x) mtmsr(x)
#define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") #define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
#else #else
#define SET_MSR_EE(x) mtmsr(x) #define SET_MSR_EE(x) mtmsr(x)
#define local_irq_restore(flags) mtmsr(flags) #define raw_local_irq_restore(flags) mtmsr(flags)
#endif #endif
static inline void local_irq_disable(void) static inline void raw_local_irq_disable(void)
{ {
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE
__asm__ __volatile__("wrteei 0": : :"memory"); __asm__ __volatile__("wrteei 0": : :"memory");
...@@ -86,7 +86,7 @@ static inline void local_irq_disable(void) ...@@ -86,7 +86,7 @@ static inline void local_irq_disable(void)
#endif #endif
} }
static inline void local_irq_enable(void) static inline void raw_local_irq_enable(void)
{ {
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE
__asm__ __volatile__("wrteei 1": : :"memory"); __asm__ __volatile__("wrteei 1": : :"memory");
...@@ -98,7 +98,7 @@ static inline void local_irq_enable(void) ...@@ -98,7 +98,7 @@ static inline void local_irq_enable(void)
#endif #endif
} }
static inline void local_irq_save_ptr(unsigned long *flags) static inline void raw_local_irq_save_ptr(unsigned long *flags)
{ {
unsigned long msr; unsigned long msr;
msr = mfmsr(); msr = mfmsr();
...@@ -110,12 +110,12 @@ static inline void local_irq_save_ptr(unsigned long *flags) ...@@ -110,12 +110,12 @@ static inline void local_irq_save_ptr(unsigned long *flags)
#endif #endif
} }
#define local_save_flags(flags) ((flags) = mfmsr()) #define raw_local_save_flags(flags) ((flags) = mfmsr())
#define local_irq_save(flags) local_irq_save_ptr(&flags) #define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags)
#define irqs_disabled() ((mfmsr() & MSR_EE) == 0) #define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0)
#define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0)
#define hard_irq_enable() local_irq_enable() #define hard_irq_disable() raw_local_irq_disable()
#define hard_irq_disable() local_irq_disable()
static inline int irqs_disabled_flags(unsigned long flags) static inline int irqs_disabled_flags(unsigned long flags)
{ {
......
...@@ -47,7 +47,8 @@ ...@@ -47,7 +47,8 @@
* generic accessors and iterators here * generic accessors and iterators here
*/ */
#define __real_pte(e,p) ((real_pte_t) { \ #define __real_pte(e,p) ((real_pte_t) { \
(e), pte_val(*((p) + PTRS_PER_PTE)) }) (e), ((e) & _PAGE_COMBO) ? \
(pte_val(*((p) + PTRS_PER_PTE))) : 0 })
#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
(((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
#define __rpte_to_pte(r) ((r).pte) #define __rpte_to_pte(r) ((r).pte)
......
...@@ -58,7 +58,7 @@ struct rtas_t { ...@@ -58,7 +58,7 @@ struct rtas_t {
unsigned long entry; /* physical address pointer */ unsigned long entry; /* physical address pointer */
unsigned long base; /* physical address pointer */ unsigned long base; /* physical address pointer */
unsigned long size; unsigned long size;
spinlock_t lock; raw_spinlock_t lock;
struct rtas_args args; struct rtas_args args;
struct device_node *dev; /* virtual address pointer */ struct device_node *dev; /* virtual address pointer */
}; };
...@@ -245,5 +245,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg) ...@@ -245,5 +245,8 @@ static inline u32 rtas_config_addr(int busno, int devfn, int reg)
(devfn << 8) | (reg & 0xff); (devfn << 8) | (reg & 0xff);
} }
extern void __cpuinit rtas_give_timebase(void);
extern void __cpuinit rtas_take_timebase(void);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _POWERPC_RTAS_H */ #endif /* _POWERPC_RTAS_H */
...@@ -191,11 +191,49 @@ transfer_to_handler_cont: ...@@ -191,11 +191,49 @@ transfer_to_handler_cont:
mflr r9 mflr r9
lwz r11,0(r9) /* virtual address of handler */ lwz r11,0(r9) /* virtual address of handler */
lwz r9,4(r9) /* where to go when done */ lwz r9,4(r9) /* where to go when done */
#ifdef CONFIG_TRACE_IRQFLAGS
lis r12,reenable_mmu@h
ori r12,r12,reenable_mmu@l
mtspr SPRN_SRR0,r12
mtspr SPRN_SRR1,r10
SYNC
RFI
reenable_mmu: /* re-enable mmu so we can */
mfmsr r10
lwz r12,_MSR(r1)
xor r10,r10,r12
andi. r10,r10,MSR_EE /* Did EE change? */
beq 1f
/* Save handler and return address into the 2 unused words
* of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
* else can be recovered from the pt_regs except r3 which for
* normal interrupts has been set to pt_regs and for syscalls
* is an argument, so we temporarily use ORIG_GPR3 to save it
*/
stw r9,8(r1)
stw r11,12(r1)
stw r3,ORIG_GPR3(r1)
bl trace_hardirqs_off
lwz r0,GPR0(r1)
lwz r3,ORIG_GPR3(r1)
lwz r4,GPR4(r1)
lwz r5,GPR5(r1)
lwz r6,GPR6(r1)
lwz r7,GPR7(r1)
lwz r8,GPR8(r1)
lwz r9,8(r1)
lwz r11,12(r1)
1: mtctr r11
mtlr r9
bctr /* jump to handler */
#else /* CONFIG_TRACE_IRQFLAGS */
mtspr SPRN_SRR0,r11 mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r10 mtspr SPRN_SRR1,r10
mtlr r9 mtlr r9
SYNC SYNC
RFI /* jump to handler, enable MMU */ RFI /* jump to handler, enable MMU */
#endif /* CONFIG_TRACE_IRQFLAGS */
#if defined (CONFIG_6xx) || defined(CONFIG_E500) #if defined (CONFIG_6xx) || defined(CONFIG_E500)
4: rlwinm r12,r12,0,~_TLF_NAPPING 4: rlwinm r12,r12,0,~_TLF_NAPPING
...@@ -251,6 +289,31 @@ _GLOBAL(DoSyscall) ...@@ -251,6 +289,31 @@ _GLOBAL(DoSyscall)
#ifdef SHOW_SYSCALLS #ifdef SHOW_SYSCALLS
bl do_show_syscall bl do_show_syscall
#endif /* SHOW_SYSCALLS */ #endif /* SHOW_SYSCALLS */
#ifdef CONFIG_TRACE_IRQFLAGS
/* Return from syscalls can (and generally will) hard enable
* interrupts. You aren't supposed to call a syscall with
* interrupts disabled in the first place. However, to ensure
* that we get it right vs. lockdep if it happens, we force
* that hard enable here with appropriate tracing if we see
* that we have been called with interrupts off
*/
mfmsr r11
andi. r12,r11,MSR_EE
bne+ 1f
/* We came in with interrupts disabled, we enable them now */
bl trace_hardirqs_on
mfmsr r11
lwz r0,GPR0(r1)
lwz r3,GPR3(r1)
lwz r4,GPR4(r1)
ori r11,r11,MSR_EE
lwz r5,GPR5(r1)
lwz r6,GPR6(r1)
lwz r7,GPR7(r1)
lwz r8,GPR8(r1)
mtmsr r11
1:
#endif /* CONFIG_TRACE_IRQFLAGS */
rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
lwz r11,TI_FLAGS(r10) lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_T_OR_A andi. r11,r11,_TIF_SYSCALL_T_OR_A
...@@ -275,6 +338,7 @@ ret_from_syscall: ...@@ -275,6 +338,7 @@ ret_from_syscall:
rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
/* disable interrupts so current_thread_info()->flags can't change */ /* disable interrupts so current_thread_info()->flags can't change */
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
/* Note: We don't bother telling lockdep about it */
SYNC SYNC
MTMSRD(r10) MTMSRD(r10)
lwz r9,TI_FLAGS(r12) lwz r9,TI_FLAGS(r12)
...@@ -288,6 +352,19 @@ ret_from_syscall: ...@@ -288,6 +352,19 @@ ret_from_syscall:
oris r11,r11,0x1000 /* Set SO bit in CR */ oris r11,r11,0x1000 /* Set SO bit in CR */
stw r11,_CCR(r1) stw r11,_CCR(r1)
syscall_exit_cont: syscall_exit_cont:
lwz r8,_MSR(r1)
#ifdef CONFIG_TRACE_IRQFLAGS
/* If we are going to return from the syscall with interrupts
* off, we trace that here. It shouldn't happen though but we
* want to catch the bugger if it does right ?
*/
andi. r10,r8,MSR_EE
bne+ 1f
stw r3,GPR3(r1)
bl trace_hardirqs_off
lwz r3,GPR3(r1)
1:
#endif /* CONFIG_TRACE_IRQFLAGS */
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
/* If the process has its own DBCR0 value, load it up. The internal /* If the process has its own DBCR0 value, load it up. The internal
debug mode bit tells us that dbcr0 should be loaded. */ debug mode bit tells us that dbcr0 should be loaded. */
...@@ -311,7 +388,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) ...@@ -311,7 +388,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
mtlr r4 mtlr r4
mtcr r5 mtcr r5
lwz r7,_NIP(r1) lwz r7,_NIP(r1)
lwz r8,_MSR(r1)
FIX_SRR1(r8, r0) FIX_SRR1(r8, r0)
lwz r2,GPR2(r1) lwz r2,GPR2(r1)
lwz r1,GPR1(r1) lwz r1,GPR1(r1)
...@@ -394,7 +470,9 @@ syscall_exit_work: ...@@ -394,7 +470,9 @@ syscall_exit_work:
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
beq ret_from_except beq ret_from_except
/* Re-enable interrupts */ /* Re-enable interrupts. There is no need to trace that with
* lockdep as we are supposed to have IRQs on at this point
*/
ori r10,r10,MSR_EE ori r10,r10,MSR_EE
SYNC SYNC
MTMSRD(r10) MTMSRD(r10)
...@@ -705,6 +783,7 @@ ret_from_except: ...@@ -705,6 +783,7 @@ ret_from_except:
/* Hard-disable interrupts so that current_thread_info()->flags /* Hard-disable interrupts so that current_thread_info()->flags
* can't change between when we test it and when we return * can't change between when we test it and when we return
* from the interrupt. */ * from the interrupt. */
/* Note: We don't bother telling lockdep about it */
LOAD_MSR_KERNEL(r10,MSR_KERNEL) LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC /* Some chip revs have problems here... */ SYNC /* Some chip revs have problems here... */
MTMSRD(r10) /* disable interrupts */ MTMSRD(r10) /* disable interrupts */
...@@ -744,11 +823,24 @@ resume_kernel: ...@@ -744,11 +823,24 @@ resume_kernel:
beq+ restore beq+ restore
andi. r0,r3,MSR_EE /* interrupts off? */ andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */ beq restore /* don't schedule if so */
#ifdef CONFIG_TRACE_IRQFLAGS
/* Lockdep thinks irqs are enabled, we need to call
* preempt_schedule_irq with IRQs off, so we inform lockdep
* now that we -did- turn them off already
*/
bl trace_hardirqs_off
#endif
1: bl preempt_schedule_irq 1: bl preempt_schedule_irq
rlwinm r9,r1,0,0,(31-THREAD_SHIFT) rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r3,TI_FLAGS(r9) lwz r3,TI_FLAGS(r9)
andi. r0,r3,_TIF_NEED_RESCHED andi. r0,r3,_TIF_NEED_RESCHED
bne- 1b bne- 1b
#ifdef CONFIG_TRACE_IRQFLAGS
/* And now, to properly rebalance the above, we tell lockdep they
* are being turned back on, which will happen when we return
*/
bl trace_hardirqs_on
#endif
#else #else
resume_kernel: resume_kernel:
#endif /* CONFIG_PREEMPT */ #endif /* CONFIG_PREEMPT */
...@@ -765,6 +857,28 @@ restore: ...@@ -765,6 +857,28 @@ restore:
stw r6,icache_44x_need_flush@l(r4) stw r6,icache_44x_need_flush@l(r4)
1: 1:
#endif /* CONFIG_44x */ #endif /* CONFIG_44x */
lwz r9,_MSR(r1)
#ifdef CONFIG_TRACE_IRQFLAGS
/* Lockdep doesn't know about the fact that IRQs are temporarily turned
* off in this assembly code while peeking at TI_FLAGS() and such. However
* we need to inform it if the exception turned interrupts off, and we
* are about to trun them back on.
*
* The problem here sadly is that we don't know whether the exceptions was
* one that turned interrupts off or not. So we always tell lockdep about
* turning them on here when we go back to wherever we came from with EE
* on, even if that may meen some redudant calls being tracked. Maybe later
* we could encode what the exception did somewhere or test the exception
* type in the pt_regs but that sounds overkill
*/
andi. r10,r9,MSR_EE
beq 1f
bl trace_hardirqs_on
lwz r9,_MSR(r1)
1:
#endif /* CONFIG_TRACE_IRQFLAGS */
lwz r0,GPR0(r1) lwz r0,GPR0(r1)
lwz r2,GPR2(r1) lwz r2,GPR2(r1)
REST_4GPRS(3, r1) REST_4GPRS(3, r1)
...@@ -782,7 +896,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) ...@@ -782,7 +896,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */ stwcx. r0,0,r1 /* to clear the reservation */
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
lwz r9,_MSR(r1)
andi. r10,r9,MSR_RI /* check if this exception occurred */ andi. r10,r9,MSR_RI /* check if this exception occurred */
beql nonrecoverable /* at a bad place (MSR:RI = 0) */ beql nonrecoverable /* at a bad place (MSR:RI = 0) */
...@@ -805,7 +918,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) ...@@ -805,7 +918,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
MTMSRD(r10) /* clear the RI bit */ MTMSRD(r10) /* clear the RI bit */
.globl exc_exit_restart .globl exc_exit_restart
exc_exit_restart: exc_exit_restart:
lwz r9,_MSR(r1)
lwz r12,_NIP(r1) lwz r12,_NIP(r1)
FIX_SRR1(r9,r10) FIX_SRR1(r9,r10)
mtspr SPRN_SRR0,r12 mtspr SPRN_SRR0,r12
...@@ -1035,11 +1147,18 @@ do_work: /* r10 contains MSR_KERNEL here */ ...@@ -1035,11 +1147,18 @@ do_work: /* r10 contains MSR_KERNEL here */
beq do_user_signal beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */ do_resched: /* r10 contains MSR_KERNEL here */
/* Note: We don't need to inform lockdep that we are enabling
* interrupts here. As far as it knows, they are already enabled
*/
ori r10,r10,MSR_EE ori r10,r10,MSR_EE
SYNC SYNC
MTMSRD(r10) /* hard-enable interrupts */ MTMSRD(r10) /* hard-enable interrupts */
bl schedule bl schedule
recheck: recheck:
/* Note: And we don't tell it we are disabling them again
* neither. Those disable/enable cycles used to peek at
* TI_FLAGS aren't advertised.
*/
LOAD_MSR_KERNEL(r10,MSR_KERNEL) LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC SYNC
MTMSRD(r10) /* disable interrupts */ MTMSRD(r10) /* disable interrupts */
......
...@@ -1124,9 +1124,8 @@ mmu_off: ...@@ -1124,9 +1124,8 @@ mmu_off:
RFI RFI
/* /*
* Use the first pair of BAT registers to map the 1st 16MB * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
* of RAM to PAGE_OFFSET. From this point on we can't safely * (we keep one for debugging) and on others, we use one 256M BAT.
* call OF any more.
*/ */
initial_bats: initial_bats:
lis r11,PAGE_OFFSET@h lis r11,PAGE_OFFSET@h
...@@ -1136,12 +1135,16 @@ initial_bats: ...@@ -1136,12 +1135,16 @@ initial_bats:
bne 4f bne 4f
ori r11,r11,4 /* set up BAT registers for 601 */ ori r11,r11,4 /* set up BAT registers for 601 */
li r8,0x7f /* valid, block length = 8MB */ li r8,0x7f /* valid, block length = 8MB */
oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */ mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
mtspr SPRN_IBAT0L,r8 /* lower BAT register */ mtspr SPRN_IBAT0L,r8 /* lower BAT register */
mtspr SPRN_IBAT1U,r9 addis r11,r11,0x800000@h
mtspr SPRN_IBAT1L,r10 addis r8,r8,0x800000@h
mtspr SPRN_IBAT1U,r11
mtspr SPRN_IBAT1L,r8
addis r11,r11,0x800000@h
addis r8,r8,0x800000@h
mtspr SPRN_IBAT2U,r11
mtspr SPRN_IBAT2L,r8
isync isync
blr blr
......
...@@ -76,7 +76,7 @@ struct of_device *of_device_alloc(struct device_node *np, ...@@ -76,7 +76,7 @@ struct of_device *of_device_alloc(struct device_node *np,
dev->dev.archdata.of_node = np; dev->dev.archdata.of_node = np;
if (bus_id) if (bus_id)
dev_set_name(&dev->dev, bus_id); dev_set_name(&dev->dev, "%s", bus_id);
else else
of_device_make_bus_id(dev); of_device_make_bus_id(dev);
......
...@@ -528,7 +528,7 @@ void show_regs(struct pt_regs * regs) ...@@ -528,7 +528,7 @@ void show_regs(struct pt_regs * regs)
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
if ((i % REGS_PER_LINE) == 0) if ((i % REGS_PER_LINE) == 0)
printk("\n" KERN_INFO "GPR%02d: ", i); printk("\nGPR%02d: ", i);
printk(REG " ", regs->gpr[i]); printk(REG " ", regs->gpr[i]);
if (i == LAST_VOLATILE && !FULL_REGS(regs)) if (i == LAST_VOLATILE && !FULL_REGS(regs))
break; break;
......
...@@ -38,9 +38,10 @@ ...@@ -38,9 +38,10 @@
#include <asm/syscalls.h> #include <asm/syscalls.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/time.h>
struct rtas_t rtas = { struct rtas_t rtas = {
.lock = SPIN_LOCK_UNLOCKED .lock = __RAW_SPIN_LOCK_UNLOCKED
}; };
EXPORT_SYMBOL(rtas); EXPORT_SYMBOL(rtas);
...@@ -67,6 +68,28 @@ unsigned long rtas_rmo_buf; ...@@ -67,6 +68,28 @@ unsigned long rtas_rmo_buf;
void (*rtas_flash_term_hook)(int); void (*rtas_flash_term_hook)(int);
EXPORT_SYMBOL(rtas_flash_term_hook); EXPORT_SYMBOL(rtas_flash_term_hook);
/* RTAS use home made raw locking instead of spin_lock_irqsave
* because those can be called from within really nasty contexts
* such as having the timebase stopped which would lockup with
* normal locks and spinlock debugging enabled
*/
static unsigned long lock_rtas(void)
{
unsigned long flags;
local_irq_save(flags);
preempt_disable();
__raw_spin_lock_flags(&rtas.lock, flags);
return flags;
}
static void unlock_rtas(unsigned long flags)
{
__raw_spin_unlock(&rtas.lock);
local_irq_restore(flags);
preempt_enable();
}
/* /*
* call_rtas_display_status and call_rtas_display_status_delay * call_rtas_display_status and call_rtas_display_status_delay
* are designed only for very early low-level debugging, which * are designed only for very early low-level debugging, which
...@@ -79,7 +102,7 @@ static void call_rtas_display_status(char c) ...@@ -79,7 +102,7 @@ static void call_rtas_display_status(char c)
if (!rtas.base) if (!rtas.base)
return; return;
spin_lock_irqsave(&rtas.lock, s); s = lock_rtas();
args->token = 10; args->token = 10;
args->nargs = 1; args->nargs = 1;
...@@ -89,7 +112,7 @@ static void call_rtas_display_status(char c) ...@@ -89,7 +112,7 @@ static void call_rtas_display_status(char c)
enter_rtas(__pa(args)); enter_rtas(__pa(args));
spin_unlock_irqrestore(&rtas.lock, s); unlock_rtas(s);
} }
static void call_rtas_display_status_delay(char c) static void call_rtas_display_status_delay(char c)
...@@ -411,8 +434,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) ...@@ -411,8 +434,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
return -1; return -1;
/* Gotta do something different here, use global lock for now... */ s = lock_rtas();
spin_lock_irqsave(&rtas.lock, s);
rtas_args = &rtas.args; rtas_args = &rtas.args;
rtas_args->token = token; rtas_args->token = token;
...@@ -439,8 +461,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...) ...@@ -439,8 +461,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
outputs[i] = rtas_args->rets[i+1]; outputs[i] = rtas_args->rets[i+1];
ret = (nret > 0)? rtas_args->rets[0]: 0; ret = (nret > 0)? rtas_args->rets[0]: 0;
/* Gotta do something different here, use global lock for now... */ unlock_rtas(s);
spin_unlock_irqrestore(&rtas.lock, s);
if (buff_copy) { if (buff_copy) {
log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
...@@ -837,7 +858,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) ...@@ -837,7 +858,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
buff_copy = get_errorlog_buffer(); buff_copy = get_errorlog_buffer();
spin_lock_irqsave(&rtas.lock, flags); flags = lock_rtas();
rtas.args = args; rtas.args = args;
enter_rtas(__pa(&rtas.args)); enter_rtas(__pa(&rtas.args));
...@@ -848,7 +869,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) ...@@ -848,7 +869,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
if (args.rets[0] == -1) if (args.rets[0] == -1)
errbuf = __fetch_rtas_last_error(buff_copy); errbuf = __fetch_rtas_last_error(buff_copy);
spin_unlock_irqrestore(&rtas.lock, flags); unlock_rtas(flags);
if (buff_copy) { if (buff_copy) {
if (errbuf) if (errbuf)
...@@ -951,3 +972,33 @@ int __init early_init_dt_scan_rtas(unsigned long node, ...@@ -951,3 +972,33 @@ int __init early_init_dt_scan_rtas(unsigned long node,
/* break now */ /* break now */
return 1; return 1;
} }
static raw_spinlock_t timebase_lock;
static u64 timebase = 0;
void __cpuinit rtas_give_timebase(void)
{
unsigned long flags;
local_irq_save(flags);
hard_irq_disable();
__raw_spin_lock(&timebase_lock);
rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
timebase = get_tb();
__raw_spin_unlock(&timebase_lock);
while (timebase)
barrier();
rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
local_irq_restore(flags);
}
void __cpuinit rtas_take_timebase(void)
{
while (!timebase)
barrier();
__raw_spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
__raw_spin_unlock(&timebase_lock);
}
...@@ -119,6 +119,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr) ...@@ -119,6 +119,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
*/ */
notrace void __init machine_init(unsigned long dt_ptr) notrace void __init machine_init(unsigned long dt_ptr)
{ {
lockdep_init();
/* Enable early debugging if any specified (see udbg.h) */ /* Enable early debugging if any specified (see udbg.h) */
udbg_early_init(); udbg_early_init();
......
...@@ -68,7 +68,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); ...@@ -68,7 +68,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
/* SMP operations for this machine */ /* SMP operations for this machine */
struct smp_ops_t *smp_ops; struct smp_ops_t *smp_ops;
static volatile unsigned int cpu_callin_map[NR_CPUS]; /* Can't be static due to PowerMac hackery */
volatile unsigned int cpu_callin_map[NR_CPUS];
int smt_enabled_at_boot = 1; int smt_enabled_at_boot = 1;
......
...@@ -219,7 +219,7 @@ void udbg_init_pas_realmode(void) ...@@ -219,7 +219,7 @@ void udbg_init_pas_realmode(void)
#ifdef CONFIG_PPC_EARLY_DEBUG_44x #ifdef CONFIG_PPC_EARLY_DEBUG_44x
#include <platforms/44x/44x.h> #include <platforms/44x/44x.h>
static int udbg_44x_as1_flush(void) static void udbg_44x_as1_flush(void)
{ {
if (udbg_comport) { if (udbg_comport) {
while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0) while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
......
...@@ -30,3 +30,4 @@ obj-$(CONFIG_PPC_MM_SLICES) += slice.o ...@@ -30,3 +30,4 @@ obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
obj-$(CONFIG_HIGHMEM) += highmem.o
/*
* highmem.c: virtual kernel memory mappings for high memory
*
* PowerPC version, stolen from the i386 version.
*
* Used in CONFIG_HIGHMEM systems for memory pages which
* are not addressable by direct kernel virtual addresses.
*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
*
*
* Redesigned the x86 32-bit VM architecture to deal with
* up to 16 Terrabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*
* Reworked for PowerPC by various contributors. Moved from
* highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
*/
#include <linux/highmem.h>
#include <linux/module.h>
/*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
{
unsigned int idx;
unsigned long vaddr;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
debug_kmap_atomic(type);
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte-idx)));
#endif
__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
local_flush_tlb_page(NULL, vaddr);
return (void*) vaddr;
}
EXPORT_SYMBOL(kmap_atomic_prot);
void kunmap_atomic(void *kvaddr, enum km_type type)
{
#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable();
return;
}
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_page(NULL, vaddr);
#endif
pagefault_enable();
}
EXPORT_SYMBOL(kunmap_atomic);
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/of_gpio.h> #include <linux/of_gpio.h>
#include <linux/of_i2c.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/prom.h> #include <asm/prom.h>
...@@ -65,7 +66,6 @@ define_machine(warp) { ...@@ -65,7 +66,6 @@ define_machine(warp) {
static u32 post_info; static u32 post_info;
/* I am not sure this is the best place for this... */
static int __init warp_post_info(void) static int __init warp_post_info(void)
{ {
struct device_node *np; struct device_node *np;
...@@ -194,9 +194,9 @@ static int pika_setup_leds(void) ...@@ -194,9 +194,9 @@ static int pika_setup_leds(void)
return 0; return 0;
} }
static void pika_setup_critical_temp(struct i2c_client *client) static void pika_setup_critical_temp(struct device_node *np,
struct i2c_client *client)
{ {
struct device_node *np;
int irq, rc; int irq, rc;
/* Do this before enabling critical temp interrupt since we /* Do this before enabling critical temp interrupt since we
...@@ -208,14 +208,7 @@ static void pika_setup_critical_temp(struct i2c_client *client) ...@@ -208,14 +208,7 @@ static void pika_setup_critical_temp(struct i2c_client *client)
i2c_smbus_write_byte_data(client, 2, 65); /* Thigh */ i2c_smbus_write_byte_data(client, 2, 65); /* Thigh */
i2c_smbus_write_byte_data(client, 3, 0); /* Tlow */ i2c_smbus_write_byte_data(client, 3, 0); /* Tlow */
np = of_find_compatible_node(NULL, NULL, "adi,ad7414");
if (np == NULL) {
printk(KERN_ERR __FILE__ ": Unable to find ad7414\n");
return;
}
irq = irq_of_parse_and_map(np, 0); irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
if (irq == NO_IRQ) { if (irq == NO_IRQ) {
printk(KERN_ERR __FILE__ ": Unable to get ad7414 irq\n"); printk(KERN_ERR __FILE__ ": Unable to get ad7414 irq\n");
return; return;
...@@ -244,32 +237,24 @@ static inline void pika_dtm_check_fan(void __iomem *fpga) ...@@ -244,32 +237,24 @@ static inline void pika_dtm_check_fan(void __iomem *fpga)
static int pika_dtm_thread(void __iomem *fpga) static int pika_dtm_thread(void __iomem *fpga)
{ {
struct i2c_adapter *adap; struct device_node *np;
struct i2c_client *client; struct i2c_client *client;
/* We loop in case either driver was compiled as a module and np = of_find_compatible_node(NULL, NULL, "adi,ad7414");
* has not been insmoded yet. if (np == NULL)
*/ return -ENOENT;
while (!(adap = i2c_get_adapter(0))) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
}
while (1) {
list_for_each_entry(client, &adap->clients, list)
if (client->addr == 0x4a)
goto found_it;
set_current_state(TASK_INTERRUPTIBLE); client = of_find_i2c_device_by_node(np);
schedule_timeout(HZ); if (client == NULL) {
of_node_put(np);
return -ENOENT;
} }
found_it: pika_setup_critical_temp(np, client);
pika_setup_critical_temp(client);
i2c_put_adapter(adap); of_node_put(np);
printk(KERN_INFO "PIKA DTM thread running.\n"); printk(KERN_INFO "Warp DTM thread running.\n");
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
int val; int val;
...@@ -291,7 +276,6 @@ static int pika_dtm_thread(void __iomem *fpga) ...@@ -291,7 +276,6 @@ static int pika_dtm_thread(void __iomem *fpga)
return 0; return 0;
} }
static int __init pika_dtm_start(void) static int __init pika_dtm_start(void)
{ {
struct task_struct *dtm_thread; struct task_struct *dtm_thread;
......
...@@ -285,6 +285,7 @@ static struct of_device_id mpc85xx_ids[] = { ...@@ -285,6 +285,7 @@ static struct of_device_id mpc85xx_ids[] = {
{ .type = "qe", }, { .type = "qe", },
{ .compatible = "fsl,qe", }, { .compatible = "fsl,qe", },
{ .compatible = "gianfar", }, { .compatible = "gianfar", },
{ .compatible = "fsl,rapidio-delta", },
{}, {},
}; };
......
...@@ -52,20 +52,19 @@ smp_85xx_kick_cpu(int nr) ...@@ -52,20 +52,19 @@ smp_85xx_kick_cpu(int nr)
pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr); pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
local_irq_save(flags);
np = of_get_cpu_node(nr, NULL); np = of_get_cpu_node(nr, NULL);
cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL); cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
if (cpu_rel_addr == NULL) { if (cpu_rel_addr == NULL) {
printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
local_irq_restore(flags);
return; return;
} }
/* Map the spin table */ /* Map the spin table */
bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY); bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
local_irq_save(flags);
out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr); out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
...@@ -73,10 +72,10 @@ smp_85xx_kick_cpu(int nr) ...@@ -73,10 +72,10 @@ smp_85xx_kick_cpu(int nr)
while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
mdelay(1); mdelay(1);
iounmap(bptr_vaddr);
local_irq_restore(flags); local_irq_restore(flags);
iounmap(bptr_vaddr);
pr_debug("waited %d msecs for CPU #%d.\n", n, nr); pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
} }
......
...@@ -102,10 +102,11 @@ static struct of_device_id __initdata socrates_of_bus_ids[] = { ...@@ -102,10 +102,11 @@ static struct of_device_id __initdata socrates_of_bus_ids[] = {
{}, {},
}; };
static void __init socrates_init(void) static int __init socrates_publish_devices(void)
{ {
of_platform_bus_probe(NULL, socrates_of_bus_ids, NULL); return of_platform_bus_probe(NULL, socrates_of_bus_ids, NULL);
} }
machine_device_initcall(socrates, socrates_publish_devices);
/* /*
* Called very early, device-tree isn't unflattened * Called very early, device-tree isn't unflattened
...@@ -124,7 +125,6 @@ define_machine(socrates) { ...@@ -124,7 +125,6 @@ define_machine(socrates) {
.name = "Socrates", .name = "Socrates",
.probe = socrates_probe, .probe = socrates_probe,
.setup_arch = socrates_setup_arch, .setup_arch = socrates_setup_arch,
.init = socrates_init,
.init_IRQ = socrates_pic_init, .init_IRQ = socrates_pic_init,
.get_irq = mpic_get_irq, .get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart, .restart = fsl_rstcr_restart,
......
...@@ -32,7 +32,6 @@ ...@@ -32,7 +32,6 @@
#include <sysdev/fsl_soc.h> #include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h> #include <sysdev/fsl_pci.h>
#include <linux/of_platform.h>
/* A few bit definitions needed for fixups on some boards */ /* A few bit definitions needed for fixups on some boards */
#define MPC85xx_L2CTL_L2E 0x80000000 /* L2 enable */ #define MPC85xx_L2CTL_L2E 0x80000000 /* L2 enable */
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/paca.h> #include <asm/paca.h>
#include <asm/time.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/firmware.h> #include <asm/firmware.h>
...@@ -140,31 +139,6 @@ static void __devinit smp_cell_setup_cpu(int cpu) ...@@ -140,31 +139,6 @@ static void __devinit smp_cell_setup_cpu(int cpu)
mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
} }
static DEFINE_SPINLOCK(timebase_lock);
static unsigned long timebase = 0;
static void __devinit cell_give_timebase(void)
{
spin_lock(&timebase_lock);
rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
timebase = get_tb();
spin_unlock(&timebase_lock);
while (timebase)
barrier();
rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
}
static void __devinit cell_take_timebase(void)
{
while (!timebase)
barrier();
spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
spin_unlock(&timebase_lock);
}
static void __devinit smp_cell_kick_cpu(int nr) static void __devinit smp_cell_kick_cpu(int nr)
{ {
BUG_ON(nr < 0 || nr >= NR_CPUS); BUG_ON(nr < 0 || nr >= NR_CPUS);
...@@ -224,8 +198,8 @@ void __init smp_init_cell(void) ...@@ -224,8 +198,8 @@ void __init smp_init_cell(void)
/* Non-lpar has additional take/give timebase */ /* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
smp_ops->give_timebase = cell_give_timebase; smp_ops->give_timebase = rtas_give_timebase;
smp_ops->take_timebase = cell_take_timebase; smp_ops->take_timebase = rtas_take_timebase;
} }
DBG(" <- smp_init_cell()\n"); DBG(" <- smp_init_cell()\n");
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/time.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/mpic.h> #include <asm/mpic.h>
#include <asm/rtas.h> #include <asm/rtas.h>
...@@ -42,40 +41,12 @@ static void __devinit smp_chrp_setup_cpu(int cpu_nr) ...@@ -42,40 +41,12 @@ static void __devinit smp_chrp_setup_cpu(int cpu_nr)
mpic_setup_this_cpu(); mpic_setup_this_cpu();
} }
static DEFINE_SPINLOCK(timebase_lock);
static unsigned int timebase_upper = 0, timebase_lower = 0;
void __devinit smp_chrp_give_timebase(void)
{
spin_lock(&timebase_lock);
rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
timebase_upper = get_tbu();
timebase_lower = get_tbl();
spin_unlock(&timebase_lock);
while (timebase_upper || timebase_lower)
barrier();
rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
}
void __devinit smp_chrp_take_timebase(void)
{
while (!(timebase_upper || timebase_lower))
barrier();
spin_lock(&timebase_lock);
set_tb(timebase_upper, timebase_lower);
timebase_upper = 0;
timebase_lower = 0;
spin_unlock(&timebase_lock);
printk("CPU %i taken timebase\n", smp_processor_id());
}
/* CHRP with openpic */ /* CHRP with openpic */
struct smp_ops_t chrp_smp_ops = { struct smp_ops_t chrp_smp_ops = {
.message_pass = smp_mpic_message_pass, .message_pass = smp_mpic_message_pass,
.probe = smp_mpic_probe, .probe = smp_mpic_probe,
.kick_cpu = smp_chrp_kick_cpu, .kick_cpu = smp_chrp_kick_cpu,
.setup_cpu = smp_chrp_setup_cpu, .setup_cpu = smp_chrp_setup_cpu,
.give_timebase = smp_chrp_give_timebase, .give_timebase = rtas_give_timebase,
.take_timebase = smp_chrp_take_timebase, .take_timebase = rtas_take_timebase,
}; };
...@@ -71,20 +71,25 @@ static void pas_restart(char *cmd) ...@@ -71,20 +71,25 @@ static void pas_restart(char *cmd)
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static DEFINE_SPINLOCK(timebase_lock); static raw_spinlock_t timebase_lock;
static unsigned long timebase; static unsigned long timebase;
static void __devinit pas_give_timebase(void) static void __devinit pas_give_timebase(void)
{ {
spin_lock(&timebase_lock); unsigned long flags;
local_irq_save(flags);
hard_irq_disable();
__raw_spin_lock(&timebase_lock);
mtspr(SPRN_TBCTL, TBCTL_FREEZE); mtspr(SPRN_TBCTL, TBCTL_FREEZE);
isync(); isync();
timebase = get_tb(); timebase = get_tb();
spin_unlock(&timebase_lock); __raw_spin_unlock(&timebase_lock);
while (timebase) while (timebase)
barrier(); barrier();
mtspr(SPRN_TBCTL, TBCTL_RESTART); mtspr(SPRN_TBCTL, TBCTL_RESTART);
local_irq_restore(flags);
} }
static void __devinit pas_take_timebase(void) static void __devinit pas_take_timebase(void)
...@@ -92,10 +97,10 @@ static void __devinit pas_take_timebase(void) ...@@ -92,10 +97,10 @@ static void __devinit pas_take_timebase(void)
while (!timebase) while (!timebase)
smp_rmb(); smp_rmb();
spin_lock(&timebase_lock); __raw_spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff); set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0; timebase = 0;
spin_unlock(&timebase_lock); __raw_spin_unlock(&timebase_lock);
} }
struct smp_ops_t pas_smp_ops = { struct smp_ops_t pas_smp_ops = {
......
...@@ -103,11 +103,6 @@ unsigned long smu_cmdbuf_abs; ...@@ -103,11 +103,6 @@ unsigned long smu_cmdbuf_abs;
EXPORT_SYMBOL(smu_cmdbuf_abs); EXPORT_SYMBOL(smu_cmdbuf_abs);
#endif #endif
#ifdef CONFIG_SMP
extern struct smp_ops_t psurge_smp_ops;
extern struct smp_ops_t core99_smp_ops;
#endif /* CONFIG_SMP */
static void pmac_show_cpuinfo(struct seq_file *m) static void pmac_show_cpuinfo(struct seq_file *m)
{ {
struct device_node *np; struct device_node *np;
...@@ -341,34 +336,6 @@ static void __init pmac_setup_arch(void) ...@@ -341,34 +336,6 @@ static void __init pmac_setup_arch(void)
ROOT_DEV = DEFAULT_ROOT_DEVICE; ROOT_DEV = DEFAULT_ROOT_DEVICE;
#endif #endif
#ifdef CONFIG_SMP
/* Check for Core99 */
ic = of_find_node_by_name(NULL, "uni-n");
if (!ic)
ic = of_find_node_by_name(NULL, "u3");
if (!ic)
ic = of_find_node_by_name(NULL, "u4");
if (ic) {
of_node_put(ic);
smp_ops = &core99_smp_ops;
}
#ifdef CONFIG_PPC32
else {
/*
* We have to set bits in cpu_possible_map here since the
* secondary CPU(s) aren't in the device tree, and
* setup_per_cpu_areas only allocates per-cpu data for
* CPUs in the cpu_possible_map.
*/
int cpu;
for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
cpu_set(cpu, cpu_possible_map);
smp_ops = &psurge_smp_ops;
}
#endif
#endif /* CONFIG_SMP */
#ifdef CONFIG_ADB #ifdef CONFIG_ADB
if (strstr(cmd_line, "adb_sync")) { if (strstr(cmd_line, "adb_sync")) {
extern int __adb_probe_sync; extern int __adb_probe_sync;
...@@ -512,6 +479,14 @@ static void __init pmac_init_early(void) ...@@ -512,6 +479,14 @@ static void __init pmac_init_early(void)
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
iommu_init_early_dart(); iommu_init_early_dart();
#endif #endif
/* SMP Init has to be done early as we need to patch up
* cpu_possible_map before interrupt stacks are allocated
* or kaboom...
*/
#ifdef CONFIG_SMP
pmac_setup_smp();
#endif
} }
static int __init pmac_declare_of_platform_devices(void) static int __init pmac_declare_of_platform_devices(void)
......
...@@ -64,10 +64,11 @@ ...@@ -64,10 +64,11 @@
extern void __secondary_start_pmac_0(void); extern void __secondary_start_pmac_0(void);
extern int pmac_pfunc_base_install(void); extern int pmac_pfunc_base_install(void);
#ifdef CONFIG_PPC32 static void (*pmac_tb_freeze)(int freeze);
static u64 timebase;
static int tb_req;
/* Sync flag for HW tb sync */ #ifdef CONFIG_PPC32
static volatile int sec_tb_reset = 0;
/* /*
* Powersurge (old powermac SMP) support. * Powersurge (old powermac SMP) support.
...@@ -294,6 +295,9 @@ static int __init smp_psurge_probe(void) ...@@ -294,6 +295,9 @@ static int __init smp_psurge_probe(void)
psurge_quad_init(); psurge_quad_init();
/* All released cards using this HW design have 4 CPUs */ /* All released cards using this HW design have 4 CPUs */
ncpus = 4; ncpus = 4;
/* No sure how timebase sync works on those, let's use SW */
smp_ops->give_timebase = smp_generic_give_timebase;
smp_ops->take_timebase = smp_generic_take_timebase;
} else { } else {
iounmap(quad_base); iounmap(quad_base);
if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) { if ((in_8(hhead_base + HHEAD_CONFIG) & 0x02) == 0) {
...@@ -308,18 +312,15 @@ static int __init smp_psurge_probe(void) ...@@ -308,18 +312,15 @@ static int __init smp_psurge_probe(void)
psurge_start = ioremap(PSURGE_START, 4); psurge_start = ioremap(PSURGE_START, 4);
psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
/* /* This is necessary because OF doesn't know about the
* This is necessary because OF doesn't know about the
* secondary cpu(s), and thus there aren't nodes in the * secondary cpu(s), and thus there aren't nodes in the
* device tree for them, and smp_setup_cpu_maps hasn't * device tree for them, and smp_setup_cpu_maps hasn't
* set their bits in cpu_possible_map and cpu_present_map. * set their bits in cpu_present_map.
*/ */
if (ncpus > NR_CPUS) if (ncpus > NR_CPUS)
ncpus = NR_CPUS; ncpus = NR_CPUS;
for (i = 1; i < ncpus ; ++i) { for (i = 1; i < ncpus ; ++i)
cpu_set(i, cpu_present_map); cpu_set(i, cpu_present_map);
set_hard_smp_processor_id(i, i);
}
if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352); if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
...@@ -329,8 +330,14 @@ static int __init smp_psurge_probe(void) ...@@ -329,8 +330,14 @@ static int __init smp_psurge_probe(void)
static void __init smp_psurge_kick_cpu(int nr) static void __init smp_psurge_kick_cpu(int nr)
{ {
unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8;
unsigned long a; unsigned long a, flags;
int i; int i, j;
/* Defining this here is evil ... but I prefer hiding that
* crap to avoid giving people ideas that they can do the
* same.
*/
extern volatile unsigned int cpu_callin_map[NR_CPUS];
/* may need to flush here if secondary bats aren't setup */ /* may need to flush here if secondary bats aren't setup */
for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32) for (a = KERNELBASE; a < KERNELBASE + 0x800000; a += 32)
...@@ -339,47 +346,52 @@ static void __init smp_psurge_kick_cpu(int nr) ...@@ -339,47 +346,52 @@ static void __init smp_psurge_kick_cpu(int nr)
if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353); if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu", 0x353);
/* This is going to freeze the timeebase, we disable interrupts */
local_irq_save(flags);
out_be32(psurge_start, start); out_be32(psurge_start, start);
mb(); mb();
psurge_set_ipi(nr); psurge_set_ipi(nr);
/* /*
* We can't use udelay here because the timebase is now frozen. * We can't use udelay here because the timebase is now frozen.
*/ */
for (i = 0; i < 2000; ++i) for (i = 0; i < 2000; ++i)
barrier(); asm volatile("nop" : : : "memory");
psurge_clr_ipi(nr); psurge_clr_ipi(nr);
if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354); /*
} * Also, because the timebase is frozen, we must not return to the
* caller which will try to do udelay's etc... Instead, we wait -here-
/* * for the CPU to callin.
* With the dual-cpu powersurge board, the decrementers and timebases */
* of both cpus are frozen after the secondary cpu is started up, for (i = 0; i < 100000 && !cpu_callin_map[nr]; ++i) {
* until we give the secondary cpu another interrupt. This routine for (j = 1; j < 10000; j++)
* uses this to get the timebases synchronized. asm volatile("nop" : : : "memory");
* -- paulus. asm volatile("sync" : : : "memory");
*/ }
static void __init psurge_dual_sync_tb(int cpu_nr) if (!cpu_callin_map[nr])
{ goto stuck;
int t;
/* And we do the TB sync here too for standard dual CPU cards */
set_dec(tb_ticks_per_jiffy); if (psurge_type == PSURGE_DUAL) {
/* XXX fixme */ while(!tb_req)
set_tb(0, 0); barrier();
tb_req = 0;
if (cpu_nr > 0) { mb();
timebase = get_tb();
mb();
while (timebase)
barrier();
mb(); mb();
sec_tb_reset = 1;
return;
} }
stuck:
/* now interrupt the secondary, restarting both TBs */
if (psurge_type == PSURGE_DUAL)
psurge_set_ipi(1);
/* wait for the secondary to have reset its TB before proceeding */ if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354);
for (t = 10000000; t > 0 && !sec_tb_reset; --t)
;
/* now interrupt the secondary, starting both TBs */
psurge_set_ipi(1);
} }
static struct irqaction psurge_irqaction = { static struct irqaction psurge_irqaction = {
...@@ -390,36 +402,35 @@ static struct irqaction psurge_irqaction = { ...@@ -390,36 +402,35 @@ static struct irqaction psurge_irqaction = {
static void __init smp_psurge_setup_cpu(int cpu_nr) static void __init smp_psurge_setup_cpu(int cpu_nr)
{ {
if (cpu_nr != 0)
return;
if (cpu_nr == 0) { /* reset the entry point so if we get another intr we won't
/* If we failed to start the second CPU, we should still * try to startup again */
* send it an IPI to start the timebase & DEC or we might out_be32(psurge_start, 0x100);
* have them stuck. if (setup_irq(30, &psurge_irqaction))
*/ printk(KERN_ERR "Couldn't get primary IPI interrupt");
if (num_online_cpus() < 2) {
if (psurge_type == PSURGE_DUAL)
psurge_set_ipi(1);
return;
}
/* reset the entry point so if we get another intr we won't
* try to startup again */
out_be32(psurge_start, 0x100);
if (setup_irq(30, &psurge_irqaction))
printk(KERN_ERR "Couldn't get primary IPI interrupt");
}
if (psurge_type == PSURGE_DUAL)
psurge_dual_sync_tb(cpu_nr);
} }
void __init smp_psurge_take_timebase(void) void __init smp_psurge_take_timebase(void)
{ {
/* Dummy implementation */ if (psurge_type != PSURGE_DUAL)
return;
tb_req = 1;
mb();
while (!timebase)
barrier();
mb();
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
mb();
set_dec(tb_ticks_per_jiffy/2);
} }
void __init smp_psurge_give_timebase(void) void __init smp_psurge_give_timebase(void)
{ {
/* Dummy implementation */ /* Nothing to do here */
} }
/* PowerSurge-style Macs */ /* PowerSurge-style Macs */
...@@ -437,9 +448,6 @@ struct smp_ops_t psurge_smp_ops = { ...@@ -437,9 +448,6 @@ struct smp_ops_t psurge_smp_ops = {
* Core 99 and later support * Core 99 and later support
*/ */
static void (*pmac_tb_freeze)(int freeze);
static u64 timebase;
static int tb_req;
static void smp_core99_give_timebase(void) static void smp_core99_give_timebase(void)
{ {
...@@ -478,7 +486,6 @@ static void __devinit smp_core99_take_timebase(void) ...@@ -478,7 +486,6 @@ static void __devinit smp_core99_take_timebase(void)
set_tb(timebase >> 32, timebase & 0xffffffff); set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0; timebase = 0;
mb(); mb();
set_dec(tb_ticks_per_jiffy/2);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -920,3 +927,34 @@ struct smp_ops_t core99_smp_ops = { ...@@ -920,3 +927,34 @@ struct smp_ops_t core99_smp_ops = {
# endif # endif
#endif #endif
}; };
void __init pmac_setup_smp(void)
{
struct device_node *np;
/* Check for Core99 */
np = of_find_node_by_name(NULL, "uni-n");
if (!np)
np = of_find_node_by_name(NULL, "u3");
if (!np)
np = of_find_node_by_name(NULL, "u4");
if (np) {
of_node_put(np);
smp_ops = &core99_smp_ops;
}
#ifdef CONFIG_PPC32
else {
/* We have to set bits in cpu_possible_map here since the
* secondary CPU(s) aren't in the device tree. Various
* things won't be initialized for CPUs not in the possible
* map, so we really need to fix it up here.
*/
int cpu;
for (cpu = 1; cpu < 4 && cpu < NR_CPUS; ++cpu)
cpu_set(cpu, cpu_possible_map);
smp_ops = &psurge_smp_ops;
}
#endif /* CONFIG_PPC32 */
}
...@@ -35,7 +35,6 @@ ...@@ -35,7 +35,6 @@
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/paca.h> #include <asm/paca.h>
#include <asm/time.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/firmware.h> #include <asm/firmware.h>
...@@ -118,31 +117,6 @@ static void __devinit smp_xics_setup_cpu(int cpu) ...@@ -118,31 +117,6 @@ static void __devinit smp_xics_setup_cpu(int cpu)
} }
#endif /* CONFIG_XICS */ #endif /* CONFIG_XICS */
static DEFINE_SPINLOCK(timebase_lock);
static unsigned long timebase = 0;
static void __devinit pSeries_give_timebase(void)
{
spin_lock(&timebase_lock);
rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
timebase = get_tb();
spin_unlock(&timebase_lock);
while (timebase)
barrier();
rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
}
static void __devinit pSeries_take_timebase(void)
{
while (!timebase)
barrier();
spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
spin_unlock(&timebase_lock);
}
static void __devinit smp_pSeries_kick_cpu(int nr) static void __devinit smp_pSeries_kick_cpu(int nr)
{ {
BUG_ON(nr < 0 || nr >= NR_CPUS); BUG_ON(nr < 0 || nr >= NR_CPUS);
...@@ -209,8 +183,8 @@ static void __init smp_init_pseries(void) ...@@ -209,8 +183,8 @@ static void __init smp_init_pseries(void)
/* Non-lpar has additional take/give timebase */ /* Non-lpar has additional take/give timebase */
if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
smp_ops->give_timebase = pSeries_give_timebase; smp_ops->give_timebase = rtas_give_timebase;
smp_ops->take_timebase = pSeries_take_timebase; smp_ops->take_timebase = rtas_take_timebase;
} }
pr_debug(" <- smp_init_pSeries()\n"); pr_debug(" <- smp_init_pSeries()\n");
......
...@@ -279,28 +279,29 @@ static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr, ...@@ -279,28 +279,29 @@ static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr,
} }
#ifdef CONFIG_PPC_DCR #ifdef CONFIG_PPC_DCR
static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb, static void _mpic_map_dcr(struct mpic *mpic, struct device_node *node,
struct mpic_reg_bank *rb,
unsigned int offset, unsigned int size) unsigned int offset, unsigned int size)
{ {
const u32 *dbasep; const u32 *dbasep;
dbasep = of_get_property(mpic->irqhost->of_node, "dcr-reg", NULL); dbasep = of_get_property(node, "dcr-reg", NULL);
rb->dhost = dcr_map(mpic->irqhost->of_node, *dbasep + offset, size); rb->dhost = dcr_map(node, *dbasep + offset, size);
BUG_ON(!DCR_MAP_OK(rb->dhost)); BUG_ON(!DCR_MAP_OK(rb->dhost));
} }
static inline void mpic_map(struct mpic *mpic, phys_addr_t phys_addr, static inline void mpic_map(struct mpic *mpic, struct device_node *node,
struct mpic_reg_bank *rb, unsigned int offset, phys_addr_t phys_addr, struct mpic_reg_bank *rb,
unsigned int size) unsigned int offset, unsigned int size)
{ {
if (mpic->flags & MPIC_USES_DCR) if (mpic->flags & MPIC_USES_DCR)
_mpic_map_dcr(mpic, rb, offset, size); _mpic_map_dcr(mpic, node, rb, offset, size);
else else
_mpic_map_mmio(mpic, phys_addr, rb, offset, size); _mpic_map_mmio(mpic, phys_addr, rb, offset, size);
} }
#else /* CONFIG_PPC_DCR */ #else /* CONFIG_PPC_DCR */
#define mpic_map(m,p,b,o,s) _mpic_map_mmio(m,p,b,o,s) #define mpic_map(m,n,p,b,o,s) _mpic_map_mmio(m,p,b,o,s)
#endif /* !CONFIG_PPC_DCR */ #endif /* !CONFIG_PPC_DCR */
...@@ -1052,11 +1053,10 @@ struct mpic * __init mpic_alloc(struct device_node *node, ...@@ -1052,11 +1053,10 @@ struct mpic * __init mpic_alloc(struct device_node *node,
int intvec_top; int intvec_top;
u64 paddr = phys_addr; u64 paddr = phys_addr;
mpic = alloc_bootmem(sizeof(struct mpic)); mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL);
if (mpic == NULL) if (mpic == NULL)
return NULL; return NULL;
memset(mpic, 0, sizeof(struct mpic));
mpic->name = name; mpic->name = name;
mpic->hc_irq = mpic_irq_chip; mpic->hc_irq = mpic_irq_chip;
...@@ -1152,8 +1152,8 @@ struct mpic * __init mpic_alloc(struct device_node *node, ...@@ -1152,8 +1152,8 @@ struct mpic * __init mpic_alloc(struct device_node *node,
} }
/* Map the global registers */ /* Map the global registers */
mpic_map(mpic, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000); mpic_map(mpic, node, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
mpic_map(mpic, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
/* Reset */ /* Reset */
if (flags & MPIC_WANTS_RESET) { if (flags & MPIC_WANTS_RESET) {
...@@ -1194,7 +1194,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, ...@@ -1194,7 +1194,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
/* Map the per-CPU registers */ /* Map the per-CPU registers */
for (i = 0; i < mpic->num_cpus; i++) { for (i = 0; i < mpic->num_cpus; i++) {
mpic_map(mpic, paddr, &mpic->cpuregs[i], mpic_map(mpic, node, paddr, &mpic->cpuregs[i],
MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE), MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE),
0x1000); 0x1000);
} }
...@@ -1202,7 +1202,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, ...@@ -1202,7 +1202,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
/* Initialize main ISU if none provided */ /* Initialize main ISU if none provided */
if (mpic->isu_size == 0) { if (mpic->isu_size == 0) {
mpic->isu_size = mpic->num_sources; mpic->isu_size = mpic->num_sources;
mpic_map(mpic, paddr, &mpic->isus[0], mpic_map(mpic, node, paddr, &mpic->isus[0],
MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
} }
mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
...@@ -1256,8 +1256,10 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, ...@@ -1256,8 +1256,10 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
BUG_ON(isu_num >= MPIC_MAX_ISU); BUG_ON(isu_num >= MPIC_MAX_ISU);
mpic_map(mpic, paddr, &mpic->isus[isu_num], 0, mpic_map(mpic, mpic->irqhost->of_node,
paddr, &mpic->isus[isu_num], 0,
MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
if ((isu_first + mpic->isu_size) > mpic->num_sources) if ((isu_first + mpic->isu_size) > mpic->num_sources)
mpic->num_sources = isu_first + mpic->isu_size; mpic->num_sources = isu_first + mpic->isu_size;
} }
......
...@@ -112,6 +112,7 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) ...@@ -112,6 +112,7 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
{ {
unsigned long flags; unsigned long flags;
u8 mcn_shift = 0, dev_shift = 0; u8 mcn_shift = 0, dev_shift = 0;
u32 ret;
spin_lock_irqsave(&qe_lock, flags); spin_lock_irqsave(&qe_lock, flags);
if (cmd == QE_RESET) { if (cmd == QE_RESET) {
...@@ -139,11 +140,13 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) ...@@ -139,11 +140,13 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
} }
/* wait for the QE_CR_FLG to clear */ /* wait for the QE_CR_FLG to clear */
while(in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
cpu_relax(); 100, 0);
/* On timeout (e.g. failure), the expression will be false (ret == 0),
otherwise it will be true (ret == 1). */
spin_unlock_irqrestore(&qe_lock, flags); spin_unlock_irqrestore(&qe_lock, flags);
return 0; return ret == 1;
} }
EXPORT_SYMBOL(qe_issue_cmd); EXPORT_SYMBOL(qe_issue_cmd);
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/cdev.h> #include <linux/cdev.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/pgtable.h>
#include <asm/io.h> #include <asm/io.h>
/* /*
...@@ -75,12 +76,13 @@ static struct class *bsr_class; ...@@ -75,12 +76,13 @@ static struct class *bsr_class;
static int bsr_major; static int bsr_major;
enum { enum {
BSR_8 = 0, BSR_8 = 0,
BSR_16 = 1, BSR_16 = 1,
BSR_64 = 2, BSR_64 = 2,
BSR_128 = 3, BSR_128 = 3,
BSR_UNKNOWN = 4, BSR_4096 = 4,
BSR_MAX = 5, BSR_UNKNOWN = 5,
BSR_MAX = 6,
}; };
static unsigned bsr_types[BSR_MAX]; static unsigned bsr_types[BSR_MAX];
...@@ -117,15 +119,22 @@ static int bsr_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -117,15 +119,22 @@ static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
{ {
unsigned long size = vma->vm_end - vma->vm_start; unsigned long size = vma->vm_end - vma->vm_start;
struct bsr_dev *dev = filp->private_data; struct bsr_dev *dev = filp->private_data;
int ret;
if (size > dev->bsr_len || (size & (PAGE_SIZE-1)))
return -EINVAL;
vma->vm_flags |= (VM_IO | VM_DONTEXPAND);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, dev->bsr_addr >> PAGE_SHIFT, /* check for the case of a small BSR device and map one 4k page for it*/
size, vma->vm_page_prot)) if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
vma->vm_page_prot);
else if (size <= dev->bsr_len)
ret = io_remap_pfn_range(vma, vma->vm_start,
dev->bsr_addr >> PAGE_SHIFT,
size, vma->vm_page_prot);
else
return -EINVAL;
if (ret)
return -EAGAIN; return -EAGAIN;
return 0; return 0;
...@@ -205,6 +214,11 @@ static int bsr_add_node(struct device_node *bn) ...@@ -205,6 +214,11 @@ static int bsr_add_node(struct device_node *bn)
cur->bsr_stride = bsr_stride[i]; cur->bsr_stride = bsr_stride[i];
cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs); cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
/* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
/* we can only map 4k of it, so only advertise the 4k in sysfs */
if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
cur->bsr_len = 4096;
switch(cur->bsr_bytes) { switch(cur->bsr_bytes) {
case 8: case 8:
cur->bsr_type = BSR_8; cur->bsr_type = BSR_8;
...@@ -218,9 +232,11 @@ static int bsr_add_node(struct device_node *bn) ...@@ -218,9 +232,11 @@ static int bsr_add_node(struct device_node *bn)
case 128: case 128:
cur->bsr_type = BSR_128; cur->bsr_type = BSR_128;
break; break;
case 4096:
cur->bsr_type = BSR_4096;
break;
default: default:
cur->bsr_type = BSR_UNKNOWN; cur->bsr_type = BSR_UNKNOWN;
printk(KERN_INFO "unknown BSR size %d\n",cur->bsr_bytes);
} }
cur->bsr_num = bsr_types[cur->bsr_type]; cur->bsr_num = bsr_types[cur->bsr_type];
......
...@@ -378,6 +378,17 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip, ...@@ -378,6 +378,17 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
dev->ofdev.dev.bus = &macio_bus_type; dev->ofdev.dev.bus = &macio_bus_type;
dev->ofdev.dev.release = macio_release_dev; dev->ofdev.dev.release = macio_release_dev;
#ifdef CONFIG_PCI
/* Set the DMA ops to the ones from the PCI device, this could be
* fishy if we didn't know that on PowerMac it's always direct ops
* or iommu ops that will work fine
*/
dev->ofdev.dev.archdata.dma_ops =
chip->lbus.pdev->dev.archdata.dma_ops;
dev->ofdev.dev.archdata.dma_data =
chip->lbus.pdev->dev.archdata.dma_data;
#endif /* CONFIG_PCI */
#ifdef DEBUG #ifdef DEBUG
printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n", printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n",
dev, &dev->ofdev, &dev->ofdev.dev, &dev->ofdev.dev.kobj); dev, &dev->ofdev, &dev->ofdev.dev, &dev->ofdev.dev.kobj);
......
...@@ -218,16 +218,14 @@ static void wdrtas_timer_keepalive(void) ...@@ -218,16 +218,14 @@ static void wdrtas_timer_keepalive(void)
*/ */
static int wdrtas_get_temperature(void) static int wdrtas_get_temperature(void)
{ {
long result; int result;
int temperature = 0; int temperature = 0;
result = rtas_call(wdrtas_token_get_sensor_state, 2, 2, result = rtas_get_sensor(WDRTAS_THERMAL_SENSOR, 0, &temperature);
(void *)__pa(&temperature),
WDRTAS_THERMAL_SENSOR, 0);
if (result < 0) if (result < 0)
printk(KERN_WARNING "wdrtas: reading the thermal sensor " printk(KERN_WARNING "wdrtas: reading the thermal sensor "
"faild: %li\n", result); "failed: %i\n", result);
else else
temperature = ((temperature * 9) / 5) + 32; /* fahrenheit */ temperature = ((temperature * 9) / 5) + 32; /* fahrenheit */
......
dtc
dtc-lexer.lex.c
dtc-parser.tab.c
dtc-parser.tab.h
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment