Commit b286cedd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-4.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull more powerpc updates from Michael Ellerman:
 "Highlights include:

   - an update of the disassembly code used by xmon to the latest
     versions in binutils. We've received permission from all the
     authors of the relevant binutils changes to relicense their changes
     to the relevant files from GPLv3 to GPLv2, for inclusion in Linux.
     Thanks to Peter Bergner for doing the leg work to get permission
     from everyone.

   - addition of the "architected" Power9 CPU table entry, allowing us
     to boot in Power9 architected mode under a hypervisor.

   - updates to the Power9 PMU code.

   - implementation of clear_bit_unlock_is_negative_byte() to optimise
     unlock_page().

   - Freescale updates from Scott: "Highlights include 8xx breakpoints
     and perf, t1042rdb display support, and board updates."

  Thanks to:
    Al Viro, Andrew Donnellan, Aneesh Kumar K.V, Balbir Singh, Douglas
    Miller, Frédéric Weisbecker, Gavin Shan, Madhavan Srinivasan,
    Michael Roth, Nathan Fontenot, Naveen N. Rao, Nicholas Piggin, Peter
    Bergner, Paul E. McKenney, Rashmica Gupta, Russell Currey, Sahil
    Mehta, Stewart Smith"

* tag 'powerpc-4.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (48 commits)
  powerpc: Remove leftover cputime_to_nsecs call causing build error
  powerpc/mm/hash: Always clear UPRT and Host Radix bits when setting up CPU
  powerpc/optprobes: Fix TOC handling in optprobes trampoline
  powerpc/pseries: Advertise Hot Plug Event support to firmware
  cxl: fix nested locking hang during EEH hotplug
  powerpc/xmon: Dump memory in CPU endian format
  powerpc/pseries: Revert 'Auto-online hotplugged memory'
  powerpc/powernv: Make PCI non-optional
  powerpc/64: Implement clear_bit_unlock_is_negative_byte()
  powerpc/powernv: Remove unused variable in pnv_pci_sriov_disable()
  powerpc/kernel: Remove error message in pcibios_setup_phb_resources()
  powerpc/mm: Fix typo in set_pte_at()
  pci/hotplug/pnv-php: Disable MSI and PCI device properly
  pci/hotplug/pnv-php: Disable surprise hotplug capability on conflicts
  pci/hotplug/pnv-php: Remove WARN_ON() in pnv_php_put_slot()
  powerpc: Add POWER9 architected mode to cputable
  powerpc/perf: use is_kernel_addr macro in perf_get_misc_flags()
  powerpc/perf: Avoid FAB_*_MATCH checks for power9
  powerpc/perf: Add restrictions to PMC5 in power9 DD1
  powerpc/perf: Use Instruction Counter value
  ...
parents 522214d9 9f3768e0
...@@ -7483,18 +7483,24 @@ L: linuxppc-dev@lists.ozlabs.org ...@@ -7483,18 +7483,24 @@ L: linuxppc-dev@lists.ozlabs.org
Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/ Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
S: Supported S: Supported
F: Documentation/ABI/stable/sysfs-firmware-opal-*
F: Documentation/devicetree/bindings/powerpc/opal/
F: Documentation/devicetree/bindings/rtc/rtc-opal.txt
F: Documentation/devicetree/bindings/i2c/i2c-opal.txt
F: Documentation/powerpc/ F: Documentation/powerpc/
F: arch/powerpc/ F: arch/powerpc/
F: drivers/char/tpm/tpm_ibmvtpm* F: drivers/char/tpm/tpm_ibmvtpm*
F: drivers/crypto/nx/ F: drivers/crypto/nx/
F: drivers/crypto/vmx/ F: drivers/crypto/vmx/
F: drivers/i2c/busses/i2c-opal.c
F: drivers/net/ethernet/ibm/ibmveth.* F: drivers/net/ethernet/ibm/ibmveth.*
F: drivers/net/ethernet/ibm/ibmvnic.* F: drivers/net/ethernet/ibm/ibmvnic.*
F: drivers/pci/hotplug/pnv_php.c F: drivers/pci/hotplug/pnv_php.c
F: drivers/pci/hotplug/rpa* F: drivers/pci/hotplug/rpa*
F: drivers/rtc/rtc-opal.c
F: drivers/scsi/ibmvscsi/ F: drivers/scsi/ibmvscsi/
F: drivers/tty/hvc/hvc_opal.c
F: tools/testing/selftests/powerpc F: tools/testing/selftests/powerpc
N: opal
N: /pmac N: /pmac
N: powermac N: powermac
N: powernv N: powernv
......
...@@ -115,7 +115,7 @@ config PPC ...@@ -115,7 +115,7 @@ config PPC
select HAVE_PERF_REGS select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select SPARSE_IRQ select SPARSE_IRQ
select IRQ_DOMAIN select IRQ_DOMAIN
......
/*
* Keymile kmcent2 Device Tree Source, based on T1040RDB DTS
*
* (C) Copyright 2016
* Valentin Longchamp, Keymile AG, valentin.longchamp@keymile.com
*
* Copyright 2014 - 2015 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
/include/ "t104xsi-pre.dtsi"
/ {
model = "keymile,kmcent2";
compatible = "keymile,kmcent2";
aliases {
front_phy = &front_phy;
};
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
bman_fbpr: bman-fbpr {
size = <0 0x1000000>;
alignment = <0 0x1000000>;
};
qman_fqd: qman-fqd {
size = <0 0x400000>;
alignment = <0 0x400000>;
};
qman_pfdr: qman-pfdr {
size = <0 0x2000000>;
alignment = <0 0x2000000>;
};
};
ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x04000000
1 0 0xf 0xfa000000 0x00010000
2 0 0xf 0xfb000000 0x00010000
4 0 0xf 0xc0000000 0x08000000
6 0 0xf 0xd0000000 0x08000000
7 0 0xf 0xd8000000 0x08000000>;
nor@0,0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "cfi-flash";
reg = <0x0 0x0 0x04000000>;
bank-width = <2>;
device-width = <2>;
};
nand@1,0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "fsl,ifc-nand";
reg = <0x1 0x0 0x10000>;
};
board-control@2,0 {
compatible = "keymile,qriox";
reg = <0x2 0x0 0x80>;
};
chassis-mgmt@6,0 {
compatible = "keymile,bfticu";
reg = <6 0 0x100>;
interrupt-controller;
interrupt-parent = <&mpic>;
interrupts = <11 1 0 0>;
#interrupt-cells = <1>;
};
};
memory {
device_type = "memory";
};
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
bportals: bman-portals@ff4000000 {
ranges = <0x0 0xf 0xf4000000 0x2000000>;
};
qportals: qman-portals@ff6000000 {
ranges = <0x0 0xf 0xf6000000 0x2000000>;
};
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
spi@110000 {
network-clock@1 {
compatible = "zarlink,zl30364";
reg = <1>;
spi-max-frequency = <1000000>;
};
};
sdhc@114000 {
status = "disabled";
};
i2c@118000 {
clock-frequency = <100000>;
mux@70 {
compatible = "nxp,pca9547";
reg = <0x70>;
#address-cells = <1>;
#size-cells = <0>;
i2c-mux-idle-disconnect;
i2c@0 {
reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
eeprom@54 {
compatible = "24c02";
reg = <0x54>;
pagesize = <2>;
read-only;
label = "ddr3-spd";
};
};
i2c@7 {
reg = <7>;
#address-cells = <1>;
#size-cells = <0>;
temp-sensor@48 {
compatible = "national,lm75";
reg = <0x48>;
label = "SENSOR_0";
};
temp-sensor@4a {
compatible = "national,lm75";
reg = <0x4a>;
label = "SENSOR_2";
};
temp-sensor@4b {
compatible = "national,lm75";
reg = <0x4b>;
label = "SENSOR_3";
};
};
};
};
i2c@118100 {
clock-frequency = <100000>;
eeprom@50 {
compatible = "atmel,24c08";
reg = <0x50>;
pagesize = <16>;
};
eeprom@54 {
compatible = "atmel,24c08";
reg = <0x54>;
pagesize = <16>;
};
};
i2c@119000 {
status = "disabled";
};
i2c@119100 {
status = "disabled";
};
serial2: serial@11d500 {
status = "disabled";
};
serial3: serial@11d600 {
status = "disabled";
};
usb0: usb@210000 {
status = "disabled";
};
usb1: usb@211000 {
status = "disabled";
};
display@180000 {
status = "disabled";
};
sata@220000 {
status = "disabled";
};
sata@221000 {
status = "disabled";
};
fman@400000 {
ethernet@e0000 {
fixed-link = <0 1 1000 0 0>;
phy-connection-type = "sgmii";
};
ethernet@e2000 {
fixed-link = <1 1 1000 0 0>;
phy-connection-type = "sgmii";
};
ethernet@e4000 {
status = "disabled";
};
ethernet@e6000 {
status = "disabled";
};
ethernet@e8000 {
phy-handle = <&front_phy>;
phy-connection-type = "rgmii";
};
mdio0: mdio@fc000 {
front_phy: ethernet-phy@11 {
reg = <0x11>;
};
};
};
};
pci0: pcie@ffe240000 {
reg = <0xf 0xfe240000 0 0x10000>;
ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
pcie@0 {
ranges = <0x02000000 0 0xe0000000
0x02000000 0 0xe0000000
0 0x20000000
0x01000000 0 0x00000000
0x01000000 0 0x00000000
0 0x00010000>;
};
};
pci1: pcie@ffe250000 {
status = "disabled";
};
pci2: pcie@ffe260000 {
status = "disabled";
};
pci3: pcie@ffe270000 {
status = "disabled";
};
qe: qe@ffe140000 {
ranges = <0x0 0xf 0xfe140000 0x40000>;
reg = <0xf 0xfe140000 0 0x480>;
brg-frequency = <0>;
bus-frequency = <0>;
si1: si@700 {
compatible = "fsl,t1040-qe-si";
reg = <0x700 0x80>;
};
siram1: siram@1000 {
compatible = "fsl,t1040-qe-siram";
reg = <0x1000 0x800>;
};
ucc_hdlc: ucc@2000 {
device_type = "hdlc";
compatible = "fsl,ucc-hdlc";
rx-clock-name = "clk9";
tx-clock-name = "clk9";
fsl,tx-timeslot-mask = <0xfffffffe>;
fsl,rx-timeslot-mask = <0xfffffffe>;
fsl,siram-entry-id = <0>;
};
};
};
#include "t1040si-post.dtsi"
...@@ -83,6 +83,10 @@ flash@2 { ...@@ -83,6 +83,10 @@ flash@2 {
}; };
}; };
sdhc@114000 {
status = "disabled";
};
i2c@119000 { i2c@119000 {
status = "disabled"; status = "disabled";
}; };
......
CONFIG_PPC_85xx=y
CONFIG_SMP=y
CONFIG_NR_CPUS=8
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CGROUPS=y
CONFIG_CGROUP_SCHED=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_MAC_PARTITION=y
CONFIG_CORENET_GENERIC=y
CONFIG_MPIC_MSGR=y
CONFIG_HIGHMEM=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=m
CONFIG_KEXEC=y
CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
# CONFIG_PCIEASPM is not set
CONFIG_PCI_MSI=y
CONFIG_ADVANCED_OPTIONS=y
CONFIG_LOWMEM_SIZE_BOOL=y
CONFIG_LOWMEM_SIZE=0x20000000
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
CONFIG_XFRM_SUB_POLICY=y
CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y
CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_TIPC=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=y
CONFIG_NET_SCH_HTB=y
CONFIG_NET_SCH_HFSC=y
CONFIG_NET_SCH_PRIO=y
CONFIG_NET_SCH_MULTIQ=y
CONFIG_NET_SCH_RED=y
CONFIG_NET_SCH_SFQ=y
CONFIG_NET_SCH_TEQL=y
CONFIG_NET_SCH_TBF=y
CONFIG_NET_SCH_GRED=y
CONFIG_NET_CLS_BASIC=y
CONFIG_NET_CLS_TCINDEX=y
CONFIG_NET_CLS_U32=y
CONFIG_CLS_U32_PERF=y
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=y
CONFIG_NET_CLS_CGROUP=y
CONFIG_UEVENT_HELPER_PATH="/sbin/mdev"
CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_PHRAM=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_ECC_BCH=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_UBI=y
CONFIG_MTD_UBI_GLUEBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=2
CONFIG_BLK_DEV_RAM_SIZE=2048
CONFIG_EEPROM_AT24=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_ADAPTEC is not set
# CONFIG_NET_VENDOR_ALTEON is not set
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_CISCO is not set
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_EXAR is not set
CONFIG_FSL_PQ_MDIO=y
CONFIG_FSL_XGMAC_MDIO=y
# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MYRI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
# CONFIG_NET_PACKET_ENGINE is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_SUN is not set
# CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_NET_VENDOR_XILINX is not set
CONFIG_MARVELL_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_FIXED_PHY=y
# CONFIG_WLAN is not set
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_SERIO_LIBPS2=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_PPC_EPAPR_HV_BYTECHAN=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
CONFIG_NVRAM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MUX=y
CONFIG_I2C_MUX_PCA954x=y
CONFIG_I2C_MPC=y
CONFIG_SPI=y
CONFIG_SPI_FSL_SPI=y
CONFIG_SPI_FSL_ESPI=y
CONFIG_SPI_SPIDEV=m
CONFIG_PTP_1588_CLOCK=y
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_EDAC_MPC85XX=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_UIO=y
CONFIG_STAGING=y
CONFIG_CLK_QORIQ=y
CONFIG_EXT2_FS=y
CONFIG_NTFS_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=y
CONFIG_CRAMFS=y
CONFIG_SQUASHFS=y
CONFIG_SQUASHFS_XZ=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=m
CONFIG_CRC_ITU_T=m
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_SCHEDSTATS=y
CONFIG_RCU_TRACE=y
CONFIG_UPROBE_EVENT=y
CONFIG_CRYPTO_NULL=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRYPTO_DEV_FSL_CAAM=y
...@@ -58,7 +58,6 @@ CONFIG_KEXEC_FILE=y ...@@ -58,7 +58,6 @@ CONFIG_KEXEC_FILE=y
CONFIG_IRQ_ALL_CPUS=y CONFIG_IRQ_ALL_CPUS=y
CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y CONFIG_MEMORY_HOTREMOVE=y
CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y
CONFIG_KSM=y CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_PPC_64K_PAGES=y CONFIG_PPC_64K_PAGES=y
......
...@@ -154,6 +154,34 @@ static __inline__ int test_and_change_bit(unsigned long nr, ...@@ -154,6 +154,34 @@ static __inline__ int test_and_change_bit(unsigned long nr,
return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0; return test_and_change_bits(BIT_MASK(nr), addr + BIT_WORD(nr)) != 0;
} }
#ifdef CONFIG_PPC64
static __inline__ unsigned long clear_bit_unlock_return_word(int nr,
volatile unsigned long *addr)
{
unsigned long old, t;
unsigned long *p = (unsigned long *)addr + BIT_WORD(nr);
unsigned long mask = BIT_MASK(nr);
__asm__ __volatile__ (
PPC_RELEASE_BARRIER
"1:" PPC_LLARX(%0,0,%3,0) "\n"
"andc %1,%0,%2\n"
PPC405_ERR77(0,%3)
PPC_STLCX "%1,0,%3\n"
"bne- 1b\n"
: "=&r" (old), "=&r" (t)
: "r" (mask), "r" (p)
: "cc", "memory");
return old;
}
/* This is a special function for mm/filemap.c */
#define clear_bit_unlock_is_negative_byte(nr, addr) \
(clear_bit_unlock_return_word(nr, addr) & BIT_MASK(PG_waiters))
#endif /* CONFIG_PPC64 */
#include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/non-atomic.h>
static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr) static __inline__ void __clear_bit_unlock(int nr, volatile unsigned long *addr)
......
...@@ -57,6 +57,8 @@ struct pnv_php_slot { ...@@ -57,6 +57,8 @@ struct pnv_php_slot {
uint64_t id; uint64_t id;
char *name; char *name;
int slot_no; int slot_no;
unsigned int flags;
#define PNV_PHP_FLAG_BROKEN_PDC 0x1
struct kref kref; struct kref kref;
#define PNV_PHP_STATE_INITIALIZED 0 #define PNV_PHP_STATE_INITIALIZED 0
#define PNV_PHP_STATE_REGISTERED 1 #define PNV_PHP_STATE_REGISTERED 1
......
...@@ -505,7 +505,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601) ...@@ -505,7 +505,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#define MTMSRD(r) mtmsrd r #define MTMSRD(r) mtmsrd r
#define MTMSR_EERI(reg) mtmsrd reg,1 #define MTMSR_EERI(reg) mtmsrd reg,1
#else #else
#define FIX_SRR1(ra, rb)
#ifndef CONFIG_40x #ifndef CONFIG_40x
#define RFI rfi #define RFI rfi
#else #else
......
...@@ -225,6 +225,7 @@ struct thread_struct { ...@@ -225,6 +225,7 @@ struct thread_struct {
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
unsigned long start_tb; /* Start purr when proc switched in */ unsigned long start_tb; /* Start purr when proc switched in */
unsigned long accum_tb; /* Total accumulated purr for process */ unsigned long accum_tb; /* Total accumulated purr for process */
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT #ifdef CONFIG_HAVE_HW_BREAKPOINT
struct perf_event *ptrace_bps[HBP_NUM]; struct perf_event *ptrace_bps[HBP_NUM];
/* /*
...@@ -233,7 +234,6 @@ struct thread_struct { ...@@ -233,7 +234,6 @@ struct thread_struct {
*/ */
struct perf_event *last_hit_ubp; struct perf_event *last_hit_ubp;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */ struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
unsigned long trap_nr; /* last trap # on this thread */ unsigned long trap_nr; /* last trap # on this thread */
u8 load_fp; u8 load_fp;
......
...@@ -153,6 +153,7 @@ struct of_drconf_cell { ...@@ -153,6 +153,7 @@ struct of_drconf_cell {
#define OV5_XCMO 0x0440 /* Page Coalescing */ #define OV5_XCMO 0x0440 /* Page Coalescing */
#define OV5_TYPE1_AFFINITY 0x0580 /* Type 1 NUMA affinity */ #define OV5_TYPE1_AFFINITY 0x0580 /* Type 1 NUMA affinity */
#define OV5_PRRN 0x0540 /* Platform Resource Reassignment */ #define OV5_PRRN 0x0540 /* Platform Resource Reassignment */
#define OV5_HP_EVT 0x0604 /* Hot Plug Event support */
#define OV5_RESIZE_HPT 0x0601 /* Hash Page Table resizing */ #define OV5_RESIZE_HPT 0x0601 /* Hash Page Table resizing */
#define OV5_PFO_HW_RNG 0x1180 /* PFO Random Number Generator */ #define OV5_PFO_HW_RNG 0x1180 /* PFO Random Number Generator */
#define OV5_PFO_HW_842 0x1140 /* PFO Compression Accelerator */ #define OV5_PFO_HW_842 0x1140 /* PFO Compression Accelerator */
......
...@@ -552,7 +552,9 @@ ...@@ -552,7 +552,9 @@
#define SPRN_IBAT7U 0x236 /* Instruction BAT 7 Upper Register */ #define SPRN_IBAT7U 0x236 /* Instruction BAT 7 Upper Register */
#define SPRN_ICMP 0x3D5 /* Instruction TLB Compare Register */ #define SPRN_ICMP 0x3D5 /* Instruction TLB Compare Register */
#define SPRN_ICTC 0x3FB /* Instruction Cache Throttling Control Reg */ #define SPRN_ICTC 0x3FB /* Instruction Cache Throttling Control Reg */
#ifndef SPRN_ICTRL
#define SPRN_ICTRL 0x3F3 /* 1011 7450 icache and interrupt ctrl */ #define SPRN_ICTRL 0x3F3 /* 1011 7450 icache and interrupt ctrl */
#endif
#define ICTRL_EICE 0x08000000 /* enable icache parity errs */ #define ICTRL_EICE 0x08000000 /* enable icache parity errs */
#define ICTRL_EDC 0x04000000 /* enable dcache parity errs */ #define ICTRL_EDC 0x04000000 /* enable dcache parity errs */
#define ICTRL_EICP 0x00000100 /* enable icache par. check */ #define ICTRL_EICP 0x00000100 /* enable icache par. check */
......
...@@ -28,6 +28,17 @@ ...@@ -28,6 +28,17 @@
/* Special MSR manipulation registers */ /* Special MSR manipulation registers */
#define SPRN_EIE 80 /* External interrupt enable (EE=1, RI=1) */ #define SPRN_EIE 80 /* External interrupt enable (EE=1, RI=1) */
#define SPRN_EID 81 /* External interrupt disable (EE=0, RI=1) */ #define SPRN_EID 81 /* External interrupt disable (EE=0, RI=1) */
#define SPRN_NRI 82 /* Non recoverable interrupt (EE=0, RI=0) */
/* Debug registers */
#define SPRN_CMPA 144
#define SPRN_COUNTA 150
#define SPRN_CMPE 152
#define SPRN_CMPF 153
#define SPRN_LCTRL1 156
#define SPRN_LCTRL2 157
#define SPRN_ICTRL 158
#define SPRN_BAR 159
/* Commands. Only the first few are available to the instruction cache. /* Commands. Only the first few are available to the instruction cache.
*/ */
......
...@@ -307,6 +307,7 @@ struct pseries_hp_errorlog { ...@@ -307,6 +307,7 @@ struct pseries_hp_errorlog {
union { union {
__be32 drc_index; __be32 drc_index;
__be32 drc_count; __be32 drc_count;
struct { __be32 count, index; } ic;
char drc_name[1]; char drc_name[1];
} _drc_u; } _drc_u;
}; };
...@@ -323,6 +324,7 @@ struct pseries_hp_errorlog { ...@@ -323,6 +324,7 @@ struct pseries_hp_errorlog {
#define PSERIES_HP_ELOG_ID_DRC_NAME 1 #define PSERIES_HP_ELOG_ID_DRC_NAME 1
#define PSERIES_HP_ELOG_ID_DRC_INDEX 2 #define PSERIES_HP_ELOG_ID_DRC_INDEX 2
#define PSERIES_HP_ELOG_ID_DRC_COUNT 3 #define PSERIES_HP_ELOG_ID_DRC_COUNT 3
#define PSERIES_HP_ELOG_ID_DRC_IC 4
struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
uint16_t section_id); uint16_t section_id);
......
This diff is collapsed.
...@@ -101,6 +101,8 @@ _GLOBAL(__setup_cpu_power9) ...@@ -101,6 +101,8 @@ _GLOBAL(__setup_cpu_power9)
mfspr r3,SPRN_LPCR mfspr r3,SPRN_LPCR
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
or r3, r3, r4 or r3, r3, r4
LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR)
andc r3, r3, r4
bl __init_LPCR bl __init_LPCR
bl __init_HFSCR bl __init_HFSCR
bl __init_tlb_power9 bl __init_tlb_power9
...@@ -122,6 +124,8 @@ _GLOBAL(__restore_cpu_power9) ...@@ -122,6 +124,8 @@ _GLOBAL(__restore_cpu_power9)
mfspr r3,SPRN_LPCR mfspr r3,SPRN_LPCR
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
or r3, r3, r4 or r3, r3, r4
LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR)
andc r3, r3, r4
bl __init_LPCR bl __init_LPCR
bl __init_HFSCR bl __init_HFSCR
bl __init_tlb_power9 bl __init_tlb_power9
......
...@@ -386,6 +386,23 @@ static struct cpu_spec __initdata cpu_specs[] = { ...@@ -386,6 +386,23 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8, .machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8", .platform = "power8",
}, },
{ /* 3.00-compliant processor, i.e. Power9 "architected" mode */
.pvr_mask = 0xffffffff,
.pvr_value = 0x0f000005,
.cpu_name = "POWER9 (architected)",
.cpu_features = CPU_FTRS_POWER9,
.cpu_user_features = COMMON_USER_POWER9,
.cpu_user_features2 = COMMON_USER2_POWER9,
.mmu_features = MMU_FTRS_POWER9,
.icache_bsize = 128,
.dcache_bsize = 128,
.oprofile_type = PPC_OPROFILE_INVALID,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.cpu_setup = __setup_cpu_power9,
.cpu_restore = __restore_cpu_power9,
.flush_tlb = __flush_tlb_power9,
.platform = "power9",
},
{ /* Power7 */ { /* Power7 */
.pvr_mask = 0xffff0000, .pvr_mask = 0xffff0000,
.pvr_value = 0x003f0000, .pvr_value = 0x003f0000,
......
...@@ -205,6 +205,9 @@ transfer_to_handler_cont: ...@@ -205,6 +205,9 @@ transfer_to_handler_cont:
mflr r9 mflr r9
lwz r11,0(r9) /* virtual address of handler */ lwz r11,0(r9) /* virtual address of handler */
lwz r9,4(r9) /* where to go when done */ lwz r9,4(r9) /* where to go when done */
#ifdef CONFIG_PPC_8xx_PERF_EVENT
mtspr SPRN_NRI, r0
#endif
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
lis r12,reenable_mmu@h lis r12,reenable_mmu@h
ori r12,r12,reenable_mmu@l ori r12,r12,reenable_mmu@l
...@@ -292,7 +295,9 @@ stack_ovf: ...@@ -292,7 +295,9 @@ stack_ovf:
lis r9,StackOverflow@ha lis r9,StackOverflow@ha
addi r9,r9,StackOverflow@l addi r9,r9,StackOverflow@l
LOAD_MSR_KERNEL(r10,MSR_KERNEL) LOAD_MSR_KERNEL(r10,MSR_KERNEL)
FIX_SRR1(r10,r12) #ifdef CONFIG_PPC_8xx_PERF_EVENT
mtspr SPRN_NRI, r0
#endif
mtspr SPRN_SRR0,r9 mtspr SPRN_SRR0,r9
mtspr SPRN_SRR1,r10 mtspr SPRN_SRR1,r10
SYNC SYNC
...@@ -417,9 +422,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) ...@@ -417,9 +422,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
mtlr r4 mtlr r4
mtcr r5 mtcr r5
lwz r7,_NIP(r1) lwz r7,_NIP(r1)
FIX_SRR1(r8, r0)
lwz r2,GPR2(r1) lwz r2,GPR2(r1)
lwz r1,GPR1(r1) lwz r1,GPR1(r1)
#ifdef CONFIG_PPC_8xx_PERF_EVENT
mtspr SPRN_NRI, r0
#endif
mtspr SPRN_SRR0,r7 mtspr SPRN_SRR0,r7
mtspr SPRN_SRR1,r8 mtspr SPRN_SRR1,r8
SYNC SYNC
...@@ -699,6 +706,9 @@ fast_exception_return: ...@@ -699,6 +706,9 @@ fast_exception_return:
lwz r10,_LINK(r11) lwz r10,_LINK(r11)
mtlr r10 mtlr r10
REST_GPR(10, r11) REST_GPR(10, r11)
#ifdef CONFIG_PPC_8xx_PERF_EVENT
mtspr SPRN_NRI, r0
#endif
mtspr SPRN_SRR1,r9 mtspr SPRN_SRR1,r9
mtspr SPRN_SRR0,r12 mtspr SPRN_SRR0,r12
REST_GPR(9, r11) REST_GPR(9, r11)
...@@ -947,7 +957,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) ...@@ -947,7 +957,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
.globl exc_exit_restart .globl exc_exit_restart
exc_exit_restart: exc_exit_restart:
lwz r12,_NIP(r1) lwz r12,_NIP(r1)
FIX_SRR1(r9,r10) #ifdef CONFIG_PPC_8xx_PERF_EVENT
mtspr SPRN_NRI, r0
#endif
mtspr SPRN_SRR0,r12 mtspr SPRN_SRR0,r12
mtspr SPRN_SRR1,r9 mtspr SPRN_SRR1,r9
REST_4GPRS(9, r1) REST_4GPRS(9, r1)
...@@ -1290,7 +1302,6 @@ _GLOBAL(enter_rtas) ...@@ -1290,7 +1302,6 @@ _GLOBAL(enter_rtas)
1: tophys(r9,r1) 1: tophys(r9,r1)
lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */ lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
lwz r9,8(r9) /* original msr value */ lwz r9,8(r9) /* original msr value */
FIX_SRR1(r9,r0)
addi r1,r1,INT_FRAME_SIZE addi r1,r1,INT_FRAME_SIZE
li r0,0 li r0,0
mtspr SPRN_SPRG_RTAS,r0 mtspr SPRN_SPRG_RTAS,r0
......
...@@ -869,7 +869,6 @@ __secondary_start: ...@@ -869,7 +869,6 @@ __secondary_start:
/* enable MMU and jump to start_secondary */ /* enable MMU and jump to start_secondary */
li r4,MSR_KERNEL li r4,MSR_KERNEL
FIX_SRR1(r4,r5)
lis r3,start_secondary@h lis r3,start_secondary@h
ori r3,r3,start_secondary@l ori r3,r3,start_secondary@l
mtspr SPRN_SRR0,r3 mtspr SPRN_SRR0,r3
...@@ -977,7 +976,6 @@ start_here: ...@@ -977,7 +976,6 @@ start_here:
ori r4,r4,2f@l ori r4,r4,2f@l
tophys(r4,r4) tophys(r4,r4)
li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
FIX_SRR1(r3,r5)
mtspr SPRN_SRR0,r4 mtspr SPRN_SRR0,r4
mtspr SPRN_SRR1,r3 mtspr SPRN_SRR1,r3
SYNC SYNC
...@@ -1001,7 +999,6 @@ start_here: ...@@ -1001,7 +999,6 @@ start_here:
/* Now turn on the MMU for real! */ /* Now turn on the MMU for real! */
li r4,MSR_KERNEL li r4,MSR_KERNEL
FIX_SRR1(r4,r5)
lis r3,start_kernel@h lis r3,start_kernel@h
ori r3,r3,start_kernel@l ori r3,r3,start_kernel@l
mtspr SPRN_SRR0,r3 mtspr SPRN_SRR0,r3
......
...@@ -329,6 +329,12 @@ InstructionTLBMiss: ...@@ -329,6 +329,12 @@ InstructionTLBMiss:
mtspr SPRN_SPRG_SCRATCH2, r3 mtspr SPRN_SPRG_SCRATCH2, r3
#endif #endif
EXCEPTION_PROLOG_0 EXCEPTION_PROLOG_0
#ifdef CONFIG_PPC_8xx_PERF_EVENT
lis r10, (itlb_miss_counter - PAGE_OFFSET)@ha
lwz r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
addi r11, r11, 1
stw r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
#endif
/* If we are faulting a kernel address, we have to use the /* If we are faulting a kernel address, we have to use the
* kernel page tables. * kernel page tables.
...@@ -429,6 +435,12 @@ InstructionTLBMiss: ...@@ -429,6 +435,12 @@ InstructionTLBMiss:
DataStoreTLBMiss: DataStoreTLBMiss:
mtspr SPRN_SPRG_SCRATCH2, r3 mtspr SPRN_SPRG_SCRATCH2, r3
EXCEPTION_PROLOG_0 EXCEPTION_PROLOG_0
#ifdef CONFIG_PPC_8xx_PERF_EVENT
lis r10, (dtlb_miss_counter - PAGE_OFFSET)@ha
lwz r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
addi r11, r11, 1
stw r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
#endif
mfcr r3 mfcr r3
/* If we are faulting a kernel address, we have to use the /* If we are faulting a kernel address, we have to use the
...@@ -561,6 +573,7 @@ InstructionTLBError: ...@@ -561,6 +573,7 @@ InstructionTLBError:
andis. r10,r5,0x4000 andis. r10,r5,0x4000
beq+ 1f beq+ 1f
tlbie r4 tlbie r4
itlbie:
/* 0x400 is InstructionAccess exception, needed by bad_page_fault() */ /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
1: EXC_XFER_LITE(0x400, handle_page_fault) 1: EXC_XFER_LITE(0x400, handle_page_fault)
...@@ -585,6 +598,7 @@ DARFixed:/* Return from dcbx instruction bug workaround */ ...@@ -585,6 +598,7 @@ DARFixed:/* Return from dcbx instruction bug workaround */
andis. r10,r5,0x4000 andis. r10,r5,0x4000
beq+ 1f beq+ 1f
tlbie r4 tlbie r4
dtlbie:
1: li r10,RPN_PATTERN 1: li r10,RPN_PATTERN
mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */ mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */
/* 0x300 is DataAccess exception, needed by bad_page_fault() */ /* 0x300 is DataAccess exception, needed by bad_page_fault() */
...@@ -602,8 +616,43 @@ DARFixed:/* Return from dcbx instruction bug workaround */ ...@@ -602,8 +616,43 @@ DARFixed:/* Return from dcbx instruction bug workaround */
* support of breakpoints and such. Someday I will get around to * support of breakpoints and such. Someday I will get around to
* using them. * using them.
*/ */
EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE) . = 0x1c00
DataBreakpoint:
EXCEPTION_PROLOG_0
mfcr r10
mfspr r11, SPRN_SRR0
cmplwi cr0, r11, (dtlbie - PAGE_OFFSET)@l
cmplwi cr7, r11, (itlbie - PAGE_OFFSET)@l
beq- cr0, 11f
beq- cr7, 11f
EXCEPTION_PROLOG_1
EXCEPTION_PROLOG_2
addi r3,r1,STACK_FRAME_OVERHEAD
mfspr r4,SPRN_BAR
stw r4,_DAR(r11)
mfspr r5,SPRN_DSISR
EXC_XFER_EE(0x1c00, do_break)
11:
mtcr r10
EXCEPTION_EPILOG_0
rfi
#ifdef CONFIG_PPC_8xx_PERF_EVENT
. = 0x1d00
InstructionBreakpoint:
EXCEPTION_PROLOG_0
lis r10, (instruction_counter - PAGE_OFFSET)@ha
lwz r11, (instruction_counter - PAGE_OFFSET)@l(r10)
addi r11, r11, -1
stw r11, (instruction_counter - PAGE_OFFSET)@l(r10)
lis r10, 0xffff
ori r10, r10, 0x01
mtspr SPRN_COUNTA, r10
EXCEPTION_EPILOG_0
rfi
#else
EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE) EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
#endif
EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE) EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE) EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
...@@ -977,6 +1026,14 @@ initial_mmu: ...@@ -977,6 +1026,14 @@ initial_mmu:
lis r8, IDC_ENABLE@h lis r8, IDC_ENABLE@h
mtspr SPRN_DC_CST, r8 mtspr SPRN_DC_CST, r8
#endif #endif
/* Disable debug mode entry on breakpoints */
mfspr r8, SPRN_DER
#ifdef CONFIG_PPC_8xx_PERF_EVENT
rlwinm r8, r8, 0, ~0xc
#else
rlwinm r8, r8, 0, ~0x8
#endif
mtspr SPRN_DER, r8
blr blr
...@@ -1010,3 +1067,16 @@ cpu6_errata_word: ...@@ -1010,3 +1067,16 @@ cpu6_errata_word:
.space 16 .space 16
#endif #endif
#ifdef CONFIG_PPC_8xx_PERF_EVENT
.globl itlb_miss_counter
itlb_miss_counter:
.space 4
.globl dtlb_miss_counter
dtlb_miss_counter:
.space 4
.globl instruction_counter
instruction_counter:
.space 4
#endif
...@@ -211,9 +211,11 @@ int hw_breakpoint_handler(struct die_args *args) ...@@ -211,9 +211,11 @@ int hw_breakpoint_handler(struct die_args *args)
int rc = NOTIFY_STOP; int rc = NOTIFY_STOP;
struct perf_event *bp; struct perf_event *bp;
struct pt_regs *regs = args->regs; struct pt_regs *regs = args->regs;
#ifndef CONFIG_PPC_8xx
int stepped = 1; int stepped = 1;
struct arch_hw_breakpoint *info;
unsigned int instr; unsigned int instr;
#endif
struct arch_hw_breakpoint *info;
unsigned long dar = regs->dar; unsigned long dar = regs->dar;
/* Disable breakpoints during exception handling */ /* Disable breakpoints during exception handling */
...@@ -257,6 +259,7 @@ int hw_breakpoint_handler(struct die_args *args) ...@@ -257,6 +259,7 @@ int hw_breakpoint_handler(struct die_args *args)
(dar - bp->attr.bp_addr < bp->attr.bp_len))) (dar - bp->attr.bp_addr < bp->attr.bp_len)))
info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ; info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
#ifndef CONFIG_PPC_8xx
/* Do not emulate user-space instructions, instead single-step them */ /* Do not emulate user-space instructions, instead single-step them */
if (user_mode(regs)) { if (user_mode(regs)) {
current->thread.last_hit_ubp = bp; current->thread.last_hit_ubp = bp;
...@@ -280,6 +283,7 @@ int hw_breakpoint_handler(struct die_args *args) ...@@ -280,6 +283,7 @@ int hw_breakpoint_handler(struct die_args *args)
perf_event_disable_inatomic(bp); perf_event_disable_inatomic(bp);
goto out; goto out;
} }
#endif
/* /*
* As a policy, the callback is invoked in a 'trigger-after-execute' * As a policy, the callback is invoked in a 'trigger-after-execute'
* fashion * fashion
......
...@@ -65,6 +65,13 @@ optprobe_template_entry: ...@@ -65,6 +65,13 @@ optprobe_template_entry:
mfdsisr r5 mfdsisr r5
std r5,_DSISR(r1) std r5,_DSISR(r1)
/*
* We may get here from a module, so load the kernel TOC in r2.
* The original TOC gets restored when pt_regs is restored
* further below.
*/
ld r2,PACATOC(r13)
.global optprobe_template_op_address .global optprobe_template_op_address
optprobe_template_op_address: optprobe_template_op_address:
/* /*
......
...@@ -1560,16 +1560,10 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose, ...@@ -1560,16 +1560,10 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
/* Hookup PHB Memory resources */ /* Hookup PHB Memory resources */
for (i = 0; i < 3; ++i) { for (i = 0; i < 3; ++i) {
res = &hose->mem_resources[i]; res = &hose->mem_resources[i];
if (!res->flags) { if (!res->flags)
if (i == 0)
printk(KERN_ERR "PCI: Memory resource 0 not set for "
"host bridge %s (domain %d)\n",
hose->dn->full_name, hose->global_number);
continue; continue;
}
offset = hose->mem_offset[i];
offset = hose->mem_offset[i];
pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i, pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
res, (unsigned long long)offset); res, (unsigned long long)offset);
......
...@@ -730,6 +730,28 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) ...@@ -730,6 +730,28 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
mtspr(SPRN_DABRX, dabrx); mtspr(SPRN_DABRX, dabrx);
return 0; return 0;
} }
#elif defined(CONFIG_PPC_8xx)
static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
{
unsigned long addr = dabr & ~HW_BRK_TYPE_DABR;
unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */
unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */
if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
lctrl1 |= 0xa0000;
else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
lctrl1 |= 0xf0000;
else if ((dabr & HW_BRK_TYPE_RDWR) == 0)
lctrl2 = 0;
mtspr(SPRN_LCTRL2, 0);
mtspr(SPRN_CMPE, addr);
mtspr(SPRN_CMPF, addr + 4);
mtspr(SPRN_LCTRL1, lctrl1);
mtspr(SPRN_LCTRL2, lctrl2);
return 0;
}
#else #else
static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
{ {
......
...@@ -839,7 +839,7 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { ...@@ -839,7 +839,7 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
0, 0,
#endif #endif
.associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN), .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
.bin_opts = OV5_FEAT(OV5_RESIZE_HPT), .bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
.micro_checkpoint = 0, .micro_checkpoint = 0,
.reserved0 = 0, .reserved0 = 0,
.max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */ .max_cpus = cpu_to_be32(NR_CPUS), /* number of cores supported */
......
...@@ -113,14 +113,12 @@ void __init setup_tlb_core_data(void) ...@@ -113,14 +113,12 @@ void __init setup_tlb_core_data(void)
* If we have threads, we need either tlbsrx. * If we have threads, we need either tlbsrx.
* or e6500 tablewalk mode, or else TLB handlers * or e6500 tablewalk mode, or else TLB handlers
* will be racy and could produce duplicate entries. * will be racy and could produce duplicate entries.
* Should we panic instead?
*/ */
if (smt_enabled_at_boot >= 2 && WARN_ONCE(smt_enabled_at_boot >= 2 &&
!mmu_has_feature(MMU_FTR_USE_TLBRSRV) && !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
book3e_htw_mode != PPC_HTW_E6500) { book3e_htw_mode != PPC_HTW_E6500,
/* Should we panic instead? */ "%s: unsupported MMU configuration\n", __func__);
WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n",
__func__);
}
} }
} }
#endif #endif
......
...@@ -709,7 +709,7 @@ unsigned long long running_clock(void) ...@@ -709,7 +709,7 @@ unsigned long long running_clock(void)
* time and on a host which doesn't do any virtualisation TB *should* equal * time and on a host which doesn't do any virtualisation TB *should* equal
* VTB so it makes no difference anyway. * VTB so it makes no difference anyway.
*/ */
return local_clock() - cputime_to_nsecs(kcpustat_this_cpu->cpustat[CPUTIME_STEAL]); return local_clock() - kcpustat_this_cpu->cpustat[CPUTIME_STEAL];
} }
#endif #endif
......
...@@ -193,9 +193,7 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, ...@@ -193,9 +193,7 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
*/ */
VM_WARN_ON(pte_present(*ptep) && !pte_protnone(*ptep)); VM_WARN_ON(pte_present(*ptep) && !pte_protnone(*ptep));
/* /* Add the pte bit when trying to set a pte */
* Add the pte bit when tryint set a pte
*/
pte = __pte(pte_val(pte) | _PAGE_PTE); pte = __pte(pte_val(pte) | _PAGE_PTE);
/* Note: mm->context.id might not yet have been assigned as /* Note: mm->context.id might not yet have been assigned as
......
...@@ -71,9 +71,9 @@ slb_miss_kernel_load_linear: ...@@ -71,9 +71,9 @@ slb_miss_kernel_load_linear:
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
b slb_finish_load b .Lslb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
b slb_finish_load_1T b .Lslb_finish_load_1T
1: 1:
#ifdef CONFIG_SPARSEMEM_VMEMMAP #ifdef CONFIG_SPARSEMEM_VMEMMAP
...@@ -109,9 +109,9 @@ slb_miss_kernel_load_io: ...@@ -109,9 +109,9 @@ slb_miss_kernel_load_io:
addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
b slb_finish_load b .Lslb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
b slb_finish_load_1T b .Lslb_finish_load_1T
0: /* 0: /*
* For userspace addresses, make sure this is region 0. * For userspace addresses, make sure this is region 0.
...@@ -174,9 +174,9 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) ...@@ -174,9 +174,9 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
ld r9,PACACONTEXTID(r13) ld r9,PACACONTEXTID(r13)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
cmpldi r10,0x1000 cmpldi r10,0x1000
bge slb_finish_load_1T bge .Lslb_finish_load_1T
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
b slb_finish_load b .Lslb_finish_load
8: /* invalid EA - return an error indication */ 8: /* invalid EA - return an error indication */
crset 4*cr0+eq /* indicate failure */ crset 4*cr0+eq /* indicate failure */
...@@ -187,7 +187,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) ...@@ -187,7 +187,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
* *
* r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
*/ */
slb_finish_load: .Lslb_finish_load:
rldimi r10,r9,ESID_BITS,0 rldimi r10,r9,ESID_BITS,0
ASM_VSID_SCRAMBLE(r10,r9,256M) ASM_VSID_SCRAMBLE(r10,r9,256M)
/* /*
...@@ -256,7 +256,7 @@ slb_compare_rr_to_size: ...@@ -256,7 +256,7 @@ slb_compare_rr_to_size:
* *
* r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9 * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
*/ */
slb_finish_load_1T: .Lslb_finish_load_1T:
srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */ srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
rldimi r10,r9,ESID_BITS_1T,0 rldimi r10,r9,ESID_BITS_1T,0
ASM_VSID_SCRAMBLE(r10,r9,1T) ASM_VSID_SCRAMBLE(r10,r9,1T)
...@@ -272,3 +272,11 @@ slb_finish_load_1T: ...@@ -272,3 +272,11 @@ slb_finish_load_1T:
clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */ clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */
b 7b b 7b
_ASM_NOKPROBE_SYMBOL(slb_allocate_realmode)
_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_linear)
_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_io)
_ASM_NOKPROBE_SYMBOL(slb_compare_rr_to_size)
#ifdef CONFIG_SPARSEMEM_VMEMMAP
_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_vmemmap)
#endif
/*
* Performance event support - PPC 8xx
*
* Copyright 2016 Christophe Leroy, CS Systemes d'Information
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
#define PERF_8xx_ID_CPU_CYCLES 1
#define PERF_8xx_ID_HW_INSTRUCTIONS 2
#define PERF_8xx_ID_ITLB_LOAD_MISS 3
#define PERF_8xx_ID_DTLB_LOAD_MISS 4
#define C(x) PERF_COUNT_HW_CACHE_##x
#define DTLB_LOAD_MISS (C(DTLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))
#define ITLB_LOAD_MISS (C(ITLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))
extern unsigned long itlb_miss_counter, dtlb_miss_counter;
extern atomic_t instruction_counter;
static atomic_t insn_ctr_ref;
static s64 get_insn_ctr(void)
{
int ctr;
unsigned long counta;
do {
ctr = atomic_read(&instruction_counter);
counta = mfspr(SPRN_COUNTA);
} while (ctr != atomic_read(&instruction_counter));
return ((s64)ctr << 16) | (counta >> 16);
}
static int event_type(struct perf_event *event)
{
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES)
return PERF_8xx_ID_CPU_CYCLES;
if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS)
return PERF_8xx_ID_HW_INSTRUCTIONS;
break;
case PERF_TYPE_HW_CACHE:
if (event->attr.config == ITLB_LOAD_MISS)
return PERF_8xx_ID_ITLB_LOAD_MISS;
if (event->attr.config == DTLB_LOAD_MISS)
return PERF_8xx_ID_DTLB_LOAD_MISS;
break;
case PERF_TYPE_RAW:
break;
default:
return -ENOENT;
}
return -EOPNOTSUPP;
}
static int mpc8xx_pmu_event_init(struct perf_event *event)
{
int type = event_type(event);
if (type < 0)
return type;
return 0;
}
static int mpc8xx_pmu_add(struct perf_event *event, int flags)
{
int type = event_type(event);
s64 val = 0;
if (type < 0)
return type;
switch (type) {
case PERF_8xx_ID_CPU_CYCLES:
val = get_tb();
break;
case PERF_8xx_ID_HW_INSTRUCTIONS:
if (atomic_inc_return(&insn_ctr_ref) == 1)
mtspr(SPRN_ICTRL, 0xc0080007);
val = get_insn_ctr();
break;
case PERF_8xx_ID_ITLB_LOAD_MISS:
val = itlb_miss_counter;
break;
case PERF_8xx_ID_DTLB_LOAD_MISS:
val = dtlb_miss_counter;
break;
}
local64_set(&event->hw.prev_count, val);
return 0;
}
static void mpc8xx_pmu_read(struct perf_event *event)
{
int type = event_type(event);
s64 prev, val = 0, delta = 0;
if (type < 0)
return;
do {
prev = local64_read(&event->hw.prev_count);
switch (type) {
case PERF_8xx_ID_CPU_CYCLES:
val = get_tb();
delta = 16 * (val - prev);
break;
case PERF_8xx_ID_HW_INSTRUCTIONS:
val = get_insn_ctr();
delta = prev - val;
if (delta < 0)
delta += 0x1000000000000LL;
break;
case PERF_8xx_ID_ITLB_LOAD_MISS:
val = itlb_miss_counter;
delta = (s64)((s32)val - (s32)prev);
break;
case PERF_8xx_ID_DTLB_LOAD_MISS:
val = dtlb_miss_counter;
delta = (s64)((s32)val - (s32)prev);
break;
}
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
local64_add(delta, &event->count);
}
static void mpc8xx_pmu_del(struct perf_event *event, int flags)
{
mpc8xx_pmu_read(event);
if (event_type(event) != PERF_8xx_ID_HW_INSTRUCTIONS)
return;
/* If it was the last user, stop counting to avoid useles overhead */
if (atomic_dec_return(&insn_ctr_ref) == 0)
mtspr(SPRN_ICTRL, 7);
}
static struct pmu mpc8xx_pmu = {
.event_init = mpc8xx_pmu_event_init,
.add = mpc8xx_pmu_add,
.del = mpc8xx_pmu_del,
.read = mpc8xx_pmu_read,
.capabilities = PERF_PMU_CAP_NO_INTERRUPT |
PERF_PMU_CAP_NO_NMI,
};
static int init_mpc8xx_pmu(void)
{
mtspr(SPRN_ICTRL, 7);
mtspr(SPRN_CMPA, 0);
mtspr(SPRN_COUNTA, 0xffff);
return perf_pmu_register(&mpc8xx_pmu, "cpu", PERF_TYPE_RAW);
}
early_initcall(init_mpc8xx_pmu);
...@@ -13,5 +13,7 @@ obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o e6500-pmu.o ...@@ -13,5 +13,7 @@ obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o e6500-pmu.o
obj-$(CONFIG_HV_PERF_CTRS) += hv-24x7.o hv-gpci.o hv-common.o obj-$(CONFIG_HV_PERF_CTRS) += hv-24x7.o hv-gpci.o hv-common.o
obj-$(CONFIG_PPC_8xx_PERF_EVENT) += 8xx-pmu.o
obj-$(CONFIG_PPC64) += $(obj64-y) obj-$(CONFIG_PPC64) += $(obj64-y)
obj-$(CONFIG_PPC32) += $(obj32-y) obj-$(CONFIG_PPC32) += $(obj32-y)
...@@ -57,6 +57,7 @@ struct cpu_hw_events { ...@@ -57,6 +57,7 @@ struct cpu_hw_events {
void *bhrb_context; void *bhrb_context;
struct perf_branch_stack bhrb_stack; struct perf_branch_stack bhrb_stack;
struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES]; struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES];
u64 ic_init;
}; };
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
...@@ -127,6 +128,10 @@ static inline void power_pmu_bhrb_disable(struct perf_event *event) {} ...@@ -127,6 +128,10 @@ static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {} static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}
static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { } static void pmao_restore_workaround(bool ebb) { }
static bool use_ic(u64 event)
{
return false;
}
#endif /* CONFIG_PPC32 */ #endif /* CONFIG_PPC32 */
static bool regs_use_siar(struct pt_regs *regs) static bool regs_use_siar(struct pt_regs *regs)
...@@ -243,7 +248,7 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs) ...@@ -243,7 +248,7 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
*/ */
if (ppmu->flags & PPMU_NO_SIPR) { if (ppmu->flags & PPMU_NO_SIPR) {
unsigned long siar = mfspr(SPRN_SIAR); unsigned long siar = mfspr(SPRN_SIAR);
if (siar >= PAGE_OFFSET) if (is_kernel_addr(siar))
return PERF_RECORD_MISC_KERNEL; return PERF_RECORD_MISC_KERNEL;
return PERF_RECORD_MISC_USER; return PERF_RECORD_MISC_USER;
} }
...@@ -688,6 +693,15 @@ static void pmao_restore_workaround(bool ebb) ...@@ -688,6 +693,15 @@ static void pmao_restore_workaround(bool ebb)
mtspr(SPRN_PMC5, pmcs[4]); mtspr(SPRN_PMC5, pmcs[4]);
mtspr(SPRN_PMC6, pmcs[5]); mtspr(SPRN_PMC6, pmcs[5]);
} }
static bool use_ic(u64 event)
{
if (cpu_has_feature(CPU_FTR_POWER9_DD1) &&
(event == 0x200f2 || event == 0x300f2))
return true;
return false;
}
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs); static void perf_event_interrupt(struct pt_regs *regs);
...@@ -1007,6 +1021,7 @@ static u64 check_and_compute_delta(u64 prev, u64 val) ...@@ -1007,6 +1021,7 @@ static u64 check_and_compute_delta(u64 prev, u64 val)
static void power_pmu_read(struct perf_event *event) static void power_pmu_read(struct perf_event *event)
{ {
s64 val, delta, prev; s64 val, delta, prev;
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
if (event->hw.state & PERF_HES_STOPPED) if (event->hw.state & PERF_HES_STOPPED)
return; return;
...@@ -1016,6 +1031,13 @@ static void power_pmu_read(struct perf_event *event) ...@@ -1016,6 +1031,13 @@ static void power_pmu_read(struct perf_event *event)
if (is_ebb_event(event)) { if (is_ebb_event(event)) {
val = read_pmc(event->hw.idx); val = read_pmc(event->hw.idx);
if (use_ic(event->attr.config)) {
val = mfspr(SPRN_IC);
if (val > cpuhw->ic_init)
val = val - cpuhw->ic_init;
else
val = val + (0 - cpuhw->ic_init);
}
local64_set(&event->hw.prev_count, val); local64_set(&event->hw.prev_count, val);
return; return;
} }
...@@ -1029,6 +1051,13 @@ static void power_pmu_read(struct perf_event *event) ...@@ -1029,6 +1051,13 @@ static void power_pmu_read(struct perf_event *event)
prev = local64_read(&event->hw.prev_count); prev = local64_read(&event->hw.prev_count);
barrier(); barrier();
val = read_pmc(event->hw.idx); val = read_pmc(event->hw.idx);
if (use_ic(event->attr.config)) {
val = mfspr(SPRN_IC);
if (val > cpuhw->ic_init)
val = val - cpuhw->ic_init;
else
val = val + (0 - cpuhw->ic_init);
}
delta = check_and_compute_delta(prev, val); delta = check_and_compute_delta(prev, val);
if (!delta) if (!delta)
return; return;
...@@ -1466,6 +1495,13 @@ static int power_pmu_add(struct perf_event *event, int ef_flags) ...@@ -1466,6 +1495,13 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
event->attr.branch_sample_type); event->attr.branch_sample_type);
} }
/*
* Workaround for POWER9 DD1 to use the Instruction Counter
* register value for instruction counting
*/
if (use_ic(event->attr.config))
cpuhw->ic_init = mfspr(SPRN_IC);
perf_pmu_enable(event->pmu); perf_pmu_enable(event->pmu);
local_irq_restore(flags); local_irq_restore(flags);
return ret; return ret;
......
...@@ -97,6 +97,28 @@ static unsigned long combine_shift(unsigned long pmc) ...@@ -97,6 +97,28 @@ static unsigned long combine_shift(unsigned long pmc)
return MMCR1_COMBINE_SHIFT(pmc); return MMCR1_COMBINE_SHIFT(pmc);
} }
static inline bool event_is_threshold(u64 event)
{
return (event >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
}
static bool is_thresh_cmp_valid(u64 event)
{
unsigned int cmp, exp;
/*
* Check the mantissa upper two bits are not zero, unless the
* exponent is also zero. See the THRESH_CMP_MANTISSA doc.
*/
cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
exp = cmp >> 7;
if (exp && (cmp & 0x60) == 0)
return false;
return true;
}
int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
{ {
unsigned int unit, pmc, cache, ebb; unsigned int unit, pmc, cache, ebb;
...@@ -163,6 +185,12 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) ...@@ -163,6 +185,12 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT); value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
} }
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (event_is_threshold(event) && is_thresh_cmp_valid(event)) {
mask |= CNST_THRESH_MASK;
value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
}
} else {
/* /*
* Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
* the threshold control bits are used for the match value. * the threshold control bits are used for the match value.
...@@ -171,21 +199,13 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) ...@@ -171,21 +199,13 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
mask |= CNST_FAB_MATCH_MASK; mask |= CNST_FAB_MATCH_MASK;
value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT); value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
} else { } else {
/* if (!is_thresh_cmp_valid(event))
* Check the mantissa upper two bits are not zero, unless the
* exponent is also zero. See the THRESH_CMP_MANTISSA doc.
*/
unsigned int cmp, exp;
cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
exp = cmp >> 7;
if (exp && (cmp & 0x60) == 0)
return -1; return -1;
mask |= CNST_THRESH_MASK; mask |= CNST_THRESH_MASK;
value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT); value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
} }
}
if (!pmc && ebb) if (!pmc && ebb)
/* EBB events must specify the PMC */ /* EBB events must specify the PMC */
...@@ -279,7 +299,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev, ...@@ -279,7 +299,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
* PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC, * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
* the threshold bits are used for the match value. * the threshold bits are used for the match value.
*/ */
if (event_is_fab_match(event[i])) { if (!cpu_has_feature(CPU_FTR_ARCH_300) && event_is_fab_match(event[i])) {
mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) & mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT; EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
} else { } else {
...@@ -338,3 +358,39 @@ void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]) ...@@ -338,3 +358,39 @@ void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[])
if (pmc <= 3) if (pmc <= 3)
mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1)); mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
} }
static int find_alternative(u64 event, const unsigned int ev_alt[][MAX_ALT], int size)
{
int i, j;
for (i = 0; i < size; ++i) {
if (event < ev_alt[i][0])
break;
for (j = 0; j < MAX_ALT && ev_alt[i][j]; ++j)
if (event == ev_alt[i][j])
return i;
}
return -1;
}
int isa207_get_alternatives(u64 event, u64 alt[],
const unsigned int ev_alt[][MAX_ALT], int size)
{
int i, j, num_alt = 0;
u64 alt_event;
alt[num_alt++] = event;
i = find_alternative(event, ev_alt, size);
if (i >= 0) {
/* Filter out the original event, it's already in alt[0] */
for (j = 0; j < MAX_ALT; ++j) {
alt_event = ev_alt[i][j];
if (alt_event && alt_event != event)
alt[num_alt++] = alt_event;
}
}
return num_alt;
}
...@@ -222,6 +222,10 @@ ...@@ -222,6 +222,10 @@
CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \ CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
/*
* Lets restrict use of PMC5 for instruction counting.
*/
#define P9_DD1_TEST_ADDER (ISA207_TEST_ADDER | CNST_PMC_VAL(5))
/* Bits in MMCR1 for PowerISA v2.07 */ /* Bits in MMCR1 for PowerISA v2.07 */
#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1))) #define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
...@@ -260,5 +264,8 @@ int isa207_compute_mmcr(u64 event[], int n_ev, ...@@ -260,5 +264,8 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], unsigned long mmcr[], unsigned int hwc[], unsigned long mmcr[],
struct perf_event *pevents[]); struct perf_event *pevents[]);
void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]); void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]);
int isa207_get_alternatives(u64 event, u64 alt[],
const unsigned int ev_alt[][MAX_ALT], int size);
#endif #endif
...@@ -48,43 +48,12 @@ static const unsigned int event_alternatives[][MAX_ALT] = { ...@@ -48,43 +48,12 @@ static const unsigned int event_alternatives[][MAX_ALT] = {
{ PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL }, { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
}; };
/*
* Scan the alternatives table for a match and return the
* index into the alternatives table if found, else -1.
*/
static int find_alternative(u64 event)
{
int i, j;
for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
if (event < event_alternatives[i][0])
break;
for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
if (event == event_alternatives[i][j])
return i;
}
return -1;
}
static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[]) static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{ {
int i, j, num_alt = 0; int i, j, num_alt = 0;
u64 alt_event;
alt[num_alt++] = event;
i = find_alternative(event);
if (i >= 0) {
/* Filter out the original event, it's already in alt[0] */
for (j = 0; j < MAX_ALT; ++j) {
alt_event = event_alternatives[i][j];
if (alt_event && alt_event != event)
alt[num_alt++] = alt_event;
}
}
num_alt = isa207_get_alternatives(event, alt, event_alternatives,
(int)ARRAY_SIZE(event_alternatives));
if (flags & PPMU_ONLY_COUNT_RUN) { if (flags & PPMU_ONLY_COUNT_RUN) {
/* /*
* We're only counting in RUN state, so PM_CYC is equivalent to * We're only counting in RUN state, so PM_CYC is equivalent to
......
...@@ -53,3 +53,6 @@ EVENT(PM_ITLB_MISS, 0x400fc) ...@@ -53,3 +53,6 @@ EVENT(PM_ITLB_MISS, 0x400fc)
EVENT(PM_RUN_INST_CMPL, 0x500fa) EVENT(PM_RUN_INST_CMPL, 0x500fa)
/* Run_cycles */ /* Run_cycles */
EVENT(PM_RUN_CYC, 0x600f4) EVENT(PM_RUN_CYC, 0x600f4)
/* Instruction Dispatched */
EVENT(PM_INST_DISP, 0x200f2)
EVENT(PM_INST_DISP_ALT, 0x300f2)
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
* | | [ ] [ ] [ thresh_cmp ] [ thresh_ctl ] * | | [ ] [ ] [ thresh_cmp ] [ thresh_ctl ]
* | | | | | * | | | | |
* | | *- IFM (Linux) | thresh start/stop OR FAB match -* * | | *- IFM (Linux) | thresh start/stop -*
* | *- BHRB (Linux) *sm * | *- BHRB (Linux) *sm
* *- EBB (Linux) * *- EBB (Linux)
* *
...@@ -50,11 +50,9 @@ ...@@ -50,11 +50,9 @@
* MMCR1[31] = pmc4combine[1] * MMCR1[31] = pmc4combine[1]
* *
* if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011 * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
* # PM_MRK_FAB_RSP_MATCH * MMCR1[20:27] = thresh_ctl
* MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
* else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001 * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
* # PM_MRK_FAB_RSP_MATCH_CYC * MMCR1[20:27] = thresh_ctl
* MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
* else * else
* MMCRA[48:55] = thresh_ctl (THRESH START/END) * MMCRA[48:55] = thresh_ctl (THRESH START/END)
* *
...@@ -106,6 +104,21 @@ enum { ...@@ -106,6 +104,21 @@ enum {
/* PowerISA v2.07 format attribute structure*/ /* PowerISA v2.07 format attribute structure*/
extern struct attribute_group isa207_pmu_format_group; extern struct attribute_group isa207_pmu_format_group;
/* Table of alternatives, sorted by column 0 */
static const unsigned int power9_event_alternatives[][MAX_ALT] = {
{ PM_INST_DISP, PM_INST_DISP_ALT },
};
static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{
int num_alt = 0;
num_alt = isa207_get_alternatives(event, alt, power9_event_alternatives,
(int)ARRAY_SIZE(power9_event_alternatives));
return num_alt;
}
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC); GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC); GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL); GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
...@@ -213,6 +226,17 @@ static const struct attribute_group *power9_pmu_attr_groups[] = { ...@@ -213,6 +226,17 @@ static const struct attribute_group *power9_pmu_attr_groups[] = {
NULL, NULL,
}; };
static int power9_generic_events_dd1[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_DISP,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_CMPL,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
[PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
[PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
};
static int power9_generic_events[] = { static int power9_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC, [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC, [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
...@@ -383,10 +407,11 @@ static struct power_pmu power9_isa207_pmu = { ...@@ -383,10 +407,11 @@ static struct power_pmu power9_isa207_pmu = {
.config_bhrb = power9_config_bhrb, .config_bhrb = power9_config_bhrb,
.bhrb_filter_map = power9_bhrb_filter_map, .bhrb_filter_map = power9_bhrb_filter_map,
.get_constraint = isa207_get_constraint, .get_constraint = isa207_get_constraint,
.get_alternatives = power9_get_alternatives,
.disable_pmc = isa207_disable_pmc, .disable_pmc = isa207_disable_pmc,
.flags = PPMU_NO_SIAR | PPMU_ARCH_207S, .flags = PPMU_NO_SIAR | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power9_generic_events), .n_generic = ARRAY_SIZE(power9_generic_events_dd1),
.generic_events = power9_generic_events, .generic_events = power9_generic_events_dd1,
.cache_events = &power9_cache_events, .cache_events = &power9_cache_events,
.attr_groups = power9_isa207_pmu_attr_groups, .attr_groups = power9_isa207_pmu_attr_groups,
.bhrb_nr = 32, .bhrb_nr = 32,
...@@ -396,11 +421,12 @@ static struct power_pmu power9_pmu = { ...@@ -396,11 +421,12 @@ static struct power_pmu power9_pmu = {
.name = "POWER9", .name = "POWER9",
.n_counter = MAX_PMU_COUNTERS, .n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS, .add_fields = ISA207_ADD_FIELDS,
.test_adder = ISA207_TEST_ADDER, .test_adder = P9_DD1_TEST_ADDER,
.compute_mmcr = isa207_compute_mmcr, .compute_mmcr = isa207_compute_mmcr,
.config_bhrb = power9_config_bhrb, .config_bhrb = power9_config_bhrb,
.bhrb_filter_map = power9_bhrb_filter_map, .bhrb_filter_map = power9_bhrb_filter_map,
.get_constraint = isa207_get_constraint, .get_constraint = isa207_get_constraint,
.get_alternatives = power9_get_alternatives,
.disable_pmc = isa207_disable_pmc, .disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S, .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power9_generic_events), .n_generic = ARRAY_SIZE(power9_generic_events),
...@@ -420,6 +446,11 @@ static int __init init_power9_pmu(void) ...@@ -420,6 +446,11 @@ static int __init init_power9_pmu(void)
return -ENODEV; return -ENODEV;
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
/*
* Since PM_INST_CMPL may not provide right counts in all
* sampling scenarios in power9 DD1, instead use PM_INST_DISP.
*/
EVENT_VAR(PM_INST_CMPL, _g).id = PM_INST_DISP;
rc = register_power_pmu(&power9_isa207_pmu); rc = register_power_pmu(&power9_isa207_pmu);
} else { } else {
rc = register_power_pmu(&power9_pmu); rc = register_power_pmu(&power9_pmu);
......
...@@ -22,6 +22,7 @@ obj-$(CONFIG_P1022_RDK) += p1022_rdk.o ...@@ -22,6 +22,7 @@ obj-$(CONFIG_P1022_RDK) += p1022_rdk.o
obj-$(CONFIG_P1023_RDB) += p1023_rdb.o obj-$(CONFIG_P1023_RDB) += p1023_rdb.o
obj-$(CONFIG_TWR_P102x) += twr_p102x.o obj-$(CONFIG_TWR_P102x) += twr_p102x.o
obj-$(CONFIG_CORENET_GENERIC) += corenet_generic.o obj-$(CONFIG_CORENET_GENERIC) += corenet_generic.o
obj-$(CONFIG_FB_FSL_DIU) += t1042rdb_diu.o
obj-$(CONFIG_STX_GP3) += stx_gp3.o obj-$(CONFIG_STX_GP3) += stx_gp3.o
obj-$(CONFIG_TQM85xx) += tqm85xx.o obj-$(CONFIG_TQM85xx) += tqm85xx.o
obj-$(CONFIG_SBC8548) += sbc8548.o obj-$(CONFIG_SBC8548) += sbc8548.o
......
...@@ -157,6 +157,7 @@ static const char * const boards[] __initconst = { ...@@ -157,6 +157,7 @@ static const char * const boards[] __initconst = {
"fsl,T1040RDB", "fsl,T1040RDB",
"fsl,T1042RDB", "fsl,T1042RDB",
"fsl,T1042RDB_PI", "fsl,T1042RDB_PI",
"keymile,kmcent2",
"keymile,kmcoge4", "keymile,kmcoge4",
"varisys,CYRUS", "varisys,CYRUS",
NULL NULL
......
/*
* T1042 platform DIU operation
*
* Copyright 2014 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <sysdev/fsl_soc.h>
/*DIU Pixel ClockCR offset in scfg*/
#define CCSR_SCFG_PIXCLKCR 0x28
/* DIU Pixel Clock bits of the PIXCLKCR */
#define PIXCLKCR_PXCKEN 0x80000000
#define PIXCLKCR_PXCKINV 0x40000000
#define PIXCLKCR_PXCKDLY 0x0000FF00
#define PIXCLKCR_PXCLK_MASK 0x00FF0000
/* Some CPLD register definitions */
#define CPLD_DIUCSR 0x16
#define CPLD_DIUCSR_DVIEN 0x80
#define CPLD_DIUCSR_BACKLIGHT 0x0f
struct device_node *cpld_node;
/**
* t1042rdb_set_monitor_port: switch the output to a different monitor port
*/
static void t1042rdb_set_monitor_port(enum fsl_diu_monitor_port port)
{
static void __iomem *cpld_base;
cpld_base = of_iomap(cpld_node, 0);
if (!cpld_base) {
pr_err("%s: Could not map cpld registers\n", __func__);
goto exit;
}
switch (port) {
case FSL_DIU_PORT_DVI:
/* Enable the DVI(HDMI) port, disable the DFP and
* the backlight
*/
clrbits8(cpld_base + CPLD_DIUCSR, CPLD_DIUCSR_DVIEN);
break;
case FSL_DIU_PORT_LVDS:
/*
* LVDS also needs backlight enabled, otherwise the display
* will be blank.
*/
/* Enable the DFP port, disable the DVI*/
setbits8(cpld_base + CPLD_DIUCSR, 0x01 << 8);
setbits8(cpld_base + CPLD_DIUCSR, 0x01 << 4);
setbits8(cpld_base + CPLD_DIUCSR, CPLD_DIUCSR_BACKLIGHT);
break;
default:
pr_err("%s: Unsupported monitor port %i\n", __func__, port);
}
iounmap(cpld_base);
exit:
of_node_put(cpld_node);
}
/**
* t1042rdb_set_pixel_clock: program the DIU's clock
* @pixclock: pixel clock in ps (pico seconds)
*/
static void t1042rdb_set_pixel_clock(unsigned int pixclock)
{
struct device_node *scfg_np;
void __iomem *scfg;
unsigned long freq;
u64 temp;
u32 pxclk;
scfg_np = of_find_compatible_node(NULL, NULL, "fsl,t1040-scfg");
if (!scfg_np) {
pr_err("%s: Missing scfg node. Can not display video.\n",
__func__);
return;
}
scfg = of_iomap(scfg_np, 0);
of_node_put(scfg_np);
if (!scfg) {
pr_err("%s: Could not map device. Can not display video.\n",
__func__);
return;
}
/* Convert pixclock into frequency */
temp = 1000000000000ULL;
do_div(temp, pixclock);
freq = temp;
/*
* 'pxclk' is the ratio of the platform clock to the pixel clock.
* This number is programmed into the PIXCLKCR register, and the valid
* range of values is 2-255.
*/
pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq);
pxclk = clamp_t(u32, pxclk, 2, 255);
/* Disable the pixel clock, and set it to non-inverted and no delay */
clrbits32(scfg + CCSR_SCFG_PIXCLKCR,
PIXCLKCR_PXCKEN | PIXCLKCR_PXCKDLY | PIXCLKCR_PXCLK_MASK);
/* Enable the clock and set the pxclk */
setbits32(scfg + CCSR_SCFG_PIXCLKCR, PIXCLKCR_PXCKEN | (pxclk << 16));
iounmap(scfg);
}
/**
* t1042rdb_valid_monitor_port: set the monitor port for sysfs
*/
static enum fsl_diu_monitor_port
t1042rdb_valid_monitor_port(enum fsl_diu_monitor_port port)
{
switch (port) {
case FSL_DIU_PORT_DVI:
case FSL_DIU_PORT_LVDS:
return port;
default:
return FSL_DIU_PORT_DVI; /* Dual-link LVDS is not supported */
}
}
static int __init t1042rdb_diu_init(void)
{
cpld_node = of_find_compatible_node(NULL, NULL, "fsl,t1042rdb-cpld");
if (!cpld_node)
return 0;
diu_ops.set_monitor_port = t1042rdb_set_monitor_port;
diu_ops.set_pixel_clock = t1042rdb_set_pixel_clock;
diu_ops.valid_monitor_port = t1042rdb_valid_monitor_port;
return 0;
}
early_initcall(t1042rdb_diu_init);
...@@ -172,6 +172,13 @@ config PPC_FPU ...@@ -172,6 +172,13 @@ config PPC_FPU
bool bool
default y if PPC64 default y if PPC64
config PPC_8xx_PERF_EVENT
bool "PPC 8xx perf events"
depends on PPC_8xx && PERF_EVENTS
help
This is Performance Events support for PPC 8xx. The 8xx doesn't
have a PMU but some events are emulated using 8xx features.
config FSL_EMB_PERFMON config FSL_EMB_PERFMON
bool "Freescale Embedded Perfmon" bool "Freescale Embedded Perfmon"
depends on E500 || PPC_83xx depends on E500 || PPC_83xx
......
...@@ -683,23 +683,13 @@ size_t spu_ibox_read(struct spu_context *ctx, u32 *data) ...@@ -683,23 +683,13 @@ size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
return ctx->ops->ibox_read(ctx, data); return ctx->ops->ibox_read(ctx, data);
} }
static int spufs_ibox_fasync(int fd, struct file *file, int on)
{
struct spu_context *ctx = file->private_data;
return fasync_helper(fd, file, on, &ctx->ibox_fasync);
}
/* interrupt-level ibox callback function. */ /* interrupt-level ibox callback function. */
void spufs_ibox_callback(struct spu *spu) void spufs_ibox_callback(struct spu *spu)
{ {
struct spu_context *ctx = spu->ctx; struct spu_context *ctx = spu->ctx;
if (!ctx) if (ctx)
return;
wake_up_all(&ctx->ibox_wq); wake_up_all(&ctx->ibox_wq);
kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
} }
/* /*
...@@ -794,7 +784,6 @@ static const struct file_operations spufs_ibox_fops = { ...@@ -794,7 +784,6 @@ static const struct file_operations spufs_ibox_fops = {
.open = spufs_pipe_open, .open = spufs_pipe_open,
.read = spufs_ibox_read, .read = spufs_ibox_read,
.poll = spufs_ibox_poll, .poll = spufs_ibox_poll,
.fasync = spufs_ibox_fasync,
.llseek = no_llseek, .llseek = no_llseek,
}; };
...@@ -832,26 +821,13 @@ size_t spu_wbox_write(struct spu_context *ctx, u32 data) ...@@ -832,26 +821,13 @@ size_t spu_wbox_write(struct spu_context *ctx, u32 data)
return ctx->ops->wbox_write(ctx, data); return ctx->ops->wbox_write(ctx, data);
} }
static int spufs_wbox_fasync(int fd, struct file *file, int on)
{
struct spu_context *ctx = file->private_data;
int ret;
ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
return ret;
}
/* interrupt-level wbox callback function. */ /* interrupt-level wbox callback function. */
void spufs_wbox_callback(struct spu *spu) void spufs_wbox_callback(struct spu *spu)
{ {
struct spu_context *ctx = spu->ctx; struct spu_context *ctx = spu->ctx;
if (!ctx) if (ctx)
return;
wake_up_all(&ctx->wbox_wq); wake_up_all(&ctx->wbox_wq);
kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
} }
/* /*
...@@ -944,7 +920,6 @@ static const struct file_operations spufs_wbox_fops = { ...@@ -944,7 +920,6 @@ static const struct file_operations spufs_wbox_fops = {
.open = spufs_pipe_open, .open = spufs_pipe_open,
.write = spufs_wbox_write, .write = spufs_wbox_write,
.poll = spufs_wbox_poll, .poll = spufs_wbox_poll,
.fasync = spufs_wbox_fasync,
.llseek = no_llseek, .llseek = no_llseek,
}; };
...@@ -1520,28 +1495,8 @@ void spufs_mfc_callback(struct spu *spu) ...@@ -1520,28 +1495,8 @@ void spufs_mfc_callback(struct spu *spu)
{ {
struct spu_context *ctx = spu->ctx; struct spu_context *ctx = spu->ctx;
if (!ctx) if (ctx)
return;
wake_up_all(&ctx->mfc_wq); wake_up_all(&ctx->mfc_wq);
pr_debug("%s %s\n", __func__, spu->name);
if (ctx->mfc_fasync) {
u32 free_elements, tagstatus;
unsigned int mask;
/* no need for spu_acquire in interrupt context */
free_elements = ctx->ops->get_mfc_free_elements(ctx);
tagstatus = ctx->ops->read_mfc_tagstatus(ctx);
mask = 0;
if (free_elements & 0xffff)
mask |= POLLOUT;
if (tagstatus & ctx->tagwait)
mask |= POLLIN;
kill_fasync(&ctx->mfc_fasync, SIGIO, mask);
}
} }
static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status)
...@@ -1803,13 +1758,6 @@ static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int data ...@@ -1803,13 +1758,6 @@ static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int data
return err; return err;
} }
static int spufs_mfc_fasync(int fd, struct file *file, int on)
{
struct spu_context *ctx = file->private_data;
return fasync_helper(fd, file, on, &ctx->mfc_fasync);
}
static const struct file_operations spufs_mfc_fops = { static const struct file_operations spufs_mfc_fops = {
.open = spufs_mfc_open, .open = spufs_mfc_open,
.release = spufs_mfc_release, .release = spufs_mfc_release,
...@@ -1818,7 +1766,6 @@ static const struct file_operations spufs_mfc_fops = { ...@@ -1818,7 +1766,6 @@ static const struct file_operations spufs_mfc_fops = {
.poll = spufs_mfc_poll, .poll = spufs_mfc_poll,
.flush = spufs_mfc_flush, .flush = spufs_mfc_flush,
.fsync = spufs_mfc_fsync, .fsync = spufs_mfc_fsync,
.fasync = spufs_mfc_fasync,
.mmap = spufs_mfc_mmap, .mmap = spufs_mfc_mmap,
.llseek = no_llseek, .llseek = no_llseek,
}; };
......
...@@ -102,9 +102,6 @@ struct spu_context { ...@@ -102,9 +102,6 @@ struct spu_context {
wait_queue_head_t stop_wq; wait_queue_head_t stop_wq;
wait_queue_head_t mfc_wq; wait_queue_head_t mfc_wq;
wait_queue_head_t run_wq; wait_queue_head_t run_wq;
struct fasync_struct *ibox_fasync;
struct fasync_struct *wbox_fasync;
struct fasync_struct *mfc_fasync;
u32 tagwait; u32 tagwait;
struct spu_context_ops *ops; struct spu_context_ops *ops;
struct work_struct reap_work; struct work_struct reap_work;
......
...@@ -5,7 +5,8 @@ config PPC_POWERNV ...@@ -5,7 +5,8 @@ config PPC_POWERNV
select PPC_XICS select PPC_XICS
select PPC_ICP_NATIVE select PPC_ICP_NATIVE
select PPC_P7_NAP select PPC_P7_NAP
select PPC_PCI_CHOICE if EMBEDDED select PCI
select PCI_MSI
select EPAPR_BOOT select EPAPR_BOOT
select PPC_INDIRECT_PIO select PPC_INDIRECT_PIO
select PPC_UDBG_16550 select PPC_UDBG_16550
......
...@@ -1468,14 +1468,12 @@ void pnv_pci_sriov_disable(struct pci_dev *pdev) ...@@ -1468,14 +1468,12 @@ void pnv_pci_sriov_disable(struct pci_dev *pdev)
struct pnv_phb *phb; struct pnv_phb *phb;
struct pnv_ioda_pe *pe; struct pnv_ioda_pe *pe;
struct pci_dn *pdn; struct pci_dn *pdn;
struct pci_sriov *iov;
u16 num_vfs, i; u16 num_vfs, i;
bus = pdev->bus; bus = pdev->bus;
hose = pci_bus_to_host(bus); hose = pci_bus_to_host(bus);
phb = hose->private_data; phb = hose->private_data;
pdn = pci_get_pdn(pdev); pdn = pci_get_pdn(pdev);
iov = pdev->sriov;
num_vfs = pdn->num_vfs; num_vfs = pdn->num_vfs;
/* Release VF PEs */ /* Release VF PEs */
......
...@@ -359,6 +359,12 @@ static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog) ...@@ -359,6 +359,12 @@ static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
case PSERIES_HP_ELOG_ID_DRC_INDEX: case PSERIES_HP_ELOG_ID_DRC_INDEX:
hp_elog->_drc_u.drc_index = hp_elog->_drc_u.drc_index =
be32_to_cpu(hp_elog->_drc_u.drc_index); be32_to_cpu(hp_elog->_drc_u.drc_index);
break;
case PSERIES_HP_ELOG_ID_DRC_IC:
hp_elog->_drc_u.ic.count =
be32_to_cpu(hp_elog->_drc_u.ic.count);
hp_elog->_drc_u.ic.index =
be32_to_cpu(hp_elog->_drc_u.ic.index);
} }
switch (hp_elog->resource) { switch (hp_elog->resource) {
...@@ -467,7 +473,33 @@ static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog) ...@@ -467,7 +473,33 @@ static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
if (!arg) if (!arg)
return -EINVAL; return -EINVAL;
if (sysfs_streq(arg, "index")) { if (sysfs_streq(arg, "indexed-count")) {
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC;
arg = strsep(cmd, " ");
if (!arg) {
pr_err("No DRC count specified.\n");
return -EINVAL;
}
if (kstrtou32(arg, 0, &count)) {
pr_err("Invalid DRC count specified.\n");
return -EINVAL;
}
arg = strsep(cmd, " ");
if (!arg) {
pr_err("No DRC Index specified.\n");
return -EINVAL;
}
if (kstrtou32(arg, 0, &index)) {
pr_err("Invalid DRC Index specified.\n");
return -EINVAL;
}
hp_elog->_drc_u.ic.count = cpu_to_be32(count);
hp_elog->_drc_u.ic.index = cpu_to_be32(index);
} else if (sysfs_streq(arg, "index")) {
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX; hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
arg = strsep(cmd, " "); arg = strsep(cmd, " ");
if (!arg) { if (!arg) {
......
...@@ -320,6 +320,19 @@ static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb) ...@@ -320,6 +320,19 @@ static int dlpar_remove_device_tree_lmb(struct of_drconf_cell *lmb)
return dlpar_update_device_tree_lmb(lmb); return dlpar_update_device_tree_lmb(lmb);
} }
static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
{
unsigned long section_nr;
struct mem_section *mem_sect;
struct memory_block *mem_block;
section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
mem_sect = __nr_to_section(section_nr);
mem_block = find_memory_block(mem_sect);
return mem_block;
}
#ifdef CONFIG_MEMORY_HOTREMOVE #ifdef CONFIG_MEMORY_HOTREMOVE
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{ {
...@@ -407,19 +420,6 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb) ...@@ -407,19 +420,6 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb)
static int dlpar_add_lmb(struct of_drconf_cell *); static int dlpar_add_lmb(struct of_drconf_cell *);
static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
{
unsigned long section_nr;
struct mem_section *mem_sect;
struct memory_block *mem_block;
section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
mem_sect = __nr_to_section(section_nr);
mem_block = find_memory_block(mem_sect);
return mem_block;
}
static int dlpar_remove_lmb(struct of_drconf_cell *lmb) static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
{ {
struct memory_block *mem_block; struct memory_block *mem_block;
...@@ -601,6 +601,94 @@ static int dlpar_memory_readd_by_index(u32 drc_index, struct property *prop) ...@@ -601,6 +601,94 @@ static int dlpar_memory_readd_by_index(u32 drc_index, struct property *prop)
return rc; return rc;
} }
static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index,
struct property *prop)
{
struct of_drconf_cell *lmbs;
u32 num_lmbs, *p;
int i, rc, start_lmb_found;
int lmbs_available = 0, start_index = 0, end_index;
pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
lmbs_to_remove, drc_index);
if (lmbs_to_remove == 0)
return -EINVAL;
p = prop->value;
num_lmbs = *p++;
lmbs = (struct of_drconf_cell *)p;
start_lmb_found = 0;
/* Navigate to drc_index */
while (start_index < num_lmbs) {
if (lmbs[start_index].drc_index == drc_index) {
start_lmb_found = 1;
break;
}
start_index++;
}
if (!start_lmb_found)
return -EINVAL;
end_index = start_index + lmbs_to_remove;
/* Validate that there are enough LMBs to satisfy the request */
for (i = start_index; i < end_index; i++) {
if (lmbs[i].flags & DRCONF_MEM_RESERVED)
break;
lmbs_available++;
}
if (lmbs_available < lmbs_to_remove)
return -EINVAL;
for (i = start_index; i < end_index; i++) {
if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED))
continue;
rc = dlpar_remove_lmb(&lmbs[i]);
if (rc)
break;
lmbs[i].reserved = 1;
}
if (rc) {
pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
for (i = start_index; i < end_index; i++) {
if (!lmbs[i].reserved)
continue;
rc = dlpar_add_lmb(&lmbs[i]);
if (rc)
pr_err("Failed to add LMB, drc index %x\n",
be32_to_cpu(lmbs[i].drc_index));
lmbs[i].reserved = 0;
}
rc = -EINVAL;
} else {
for (i = start_index; i < end_index; i++) {
if (!lmbs[i].reserved)
continue;
dlpar_release_drc(lmbs[i].drc_index);
pr_info("Memory at %llx (drc index %x) was hot-removed\n",
lmbs[i].base_addr, lmbs[i].drc_index);
lmbs[i].reserved = 0;
}
}
return rc;
}
#else #else
static inline int pseries_remove_memblock(unsigned long base, static inline int pseries_remove_memblock(unsigned long base,
unsigned int memblock_size) unsigned int memblock_size)
...@@ -628,9 +716,32 @@ static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop) ...@@ -628,9 +716,32 @@ static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static int dlpar_memory_readd_by_index(u32 drc_index, struct property *prop)
{
return -EOPNOTSUPP;
}
static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index,
struct property *prop)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */ #endif /* CONFIG_MEMORY_HOTREMOVE */
static int dlpar_online_lmb(struct of_drconf_cell *lmb)
{
struct memory_block *mem_block;
int rc;
mem_block = lmb_to_memblock(lmb);
if (!mem_block)
return -EINVAL;
rc = device_online(&mem_block->dev);
put_device(&mem_block->dev);
return rc;
}
static int dlpar_add_lmb(struct of_drconf_cell *lmb) static int dlpar_add_lmb(struct of_drconf_cell *lmb)
{ {
unsigned long block_sz; unsigned long block_sz;
...@@ -654,10 +765,18 @@ static int dlpar_add_lmb(struct of_drconf_cell *lmb) ...@@ -654,10 +765,18 @@ static int dlpar_add_lmb(struct of_drconf_cell *lmb)
/* Add the memory */ /* Add the memory */
rc = add_memory(nid, lmb->base_addr, block_sz); rc = add_memory(nid, lmb->base_addr, block_sz);
if (rc) if (rc) {
dlpar_remove_device_tree_lmb(lmb); dlpar_remove_device_tree_lmb(lmb);
else return rc;
}
rc = dlpar_online_lmb(lmb);
if (rc) {
remove_memory(nid, lmb->base_addr, block_sz);
dlpar_remove_device_tree_lmb(lmb);
} else {
lmb->flags |= DRCONF_MEM_ASSIGNED; lmb->flags |= DRCONF_MEM_ASSIGNED;
}
return rc; return rc;
} }
...@@ -776,6 +895,97 @@ static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop) ...@@ -776,6 +895,97 @@ static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop)
return rc; return rc;
} }
static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index,
struct property *prop)
{
struct of_drconf_cell *lmbs;
u32 num_lmbs, *p;
int i, rc, start_lmb_found;
int lmbs_available = 0, start_index = 0, end_index;
pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
lmbs_to_add, drc_index);
if (lmbs_to_add == 0)
return -EINVAL;
p = prop->value;
num_lmbs = *p++;
lmbs = (struct of_drconf_cell *)p;
start_lmb_found = 0;
/* Navigate to drc_index */
while (start_index < num_lmbs) {
if (lmbs[start_index].drc_index == drc_index) {
start_lmb_found = 1;
break;
}
start_index++;
}
if (!start_lmb_found)
return -EINVAL;
end_index = start_index + lmbs_to_add;
/* Validate that the LMBs in this range are not reserved */
for (i = start_index; i < end_index; i++) {
if (lmbs[i].flags & DRCONF_MEM_RESERVED)
break;
lmbs_available++;
}
if (lmbs_available < lmbs_to_add)
return -EINVAL;
for (i = start_index; i < end_index; i++) {
if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
continue;
rc = dlpar_acquire_drc(lmbs[i].drc_index);
if (rc)
break;
rc = dlpar_add_lmb(&lmbs[i]);
if (rc) {
dlpar_release_drc(lmbs[i].drc_index);
break;
}
lmbs[i].reserved = 1;
}
if (rc) {
pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
for (i = start_index; i < end_index; i++) {
if (!lmbs[i].reserved)
continue;
rc = dlpar_remove_lmb(&lmbs[i]);
if (rc)
pr_err("Failed to remove LMB, drc index %x\n",
be32_to_cpu(lmbs[i].drc_index));
else
dlpar_release_drc(lmbs[i].drc_index);
}
rc = -EINVAL;
} else {
for (i = start_index; i < end_index; i++) {
if (!lmbs[i].reserved)
continue;
pr_info("Memory at %llx (drc index %x) was hot-added\n",
lmbs[i].base_addr, lmbs[i].drc_index);
lmbs[i].reserved = 0;
}
}
return rc;
}
int dlpar_memory(struct pseries_hp_errorlog *hp_elog) int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
{ {
struct device_node *dn; struct device_node *dn;
...@@ -783,9 +993,6 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) ...@@ -783,9 +993,6 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
u32 count, drc_index; u32 count, drc_index;
int rc; int rc;
count = hp_elog->_drc_u.drc_count;
drc_index = hp_elog->_drc_u.drc_index;
lock_device_hotplug(); lock_device_hotplug();
dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
...@@ -802,22 +1009,39 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog) ...@@ -802,22 +1009,39 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
switch (hp_elog->action) { switch (hp_elog->action) {
case PSERIES_HP_ELOG_ACTION_ADD: case PSERIES_HP_ELOG_ACTION_ADD:
if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
count = hp_elog->_drc_u.drc_count;
rc = dlpar_memory_add_by_count(count, prop); rc = dlpar_memory_add_by_count(count, prop);
else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
drc_index = hp_elog->_drc_u.drc_index;
rc = dlpar_memory_add_by_index(drc_index, prop); rc = dlpar_memory_add_by_index(drc_index, prop);
else } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
count = hp_elog->_drc_u.ic.count;
drc_index = hp_elog->_drc_u.ic.index;
rc = dlpar_memory_add_by_ic(count, drc_index, prop);
} else {
rc = -EINVAL; rc = -EINVAL;
}
break; break;
case PSERIES_HP_ELOG_ACTION_REMOVE: case PSERIES_HP_ELOG_ACTION_REMOVE:
if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
count = hp_elog->_drc_u.drc_count;
rc = dlpar_memory_remove_by_count(count, prop); rc = dlpar_memory_remove_by_count(count, prop);
else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
drc_index = hp_elog->_drc_u.drc_index;
rc = dlpar_memory_remove_by_index(drc_index, prop); rc = dlpar_memory_remove_by_index(drc_index, prop);
else } else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
count = hp_elog->_drc_u.ic.count;
drc_index = hp_elog->_drc_u.ic.index;
rc = dlpar_memory_remove_by_ic(count, drc_index, prop);
} else {
rc = -EINVAL; rc = -EINVAL;
}
break; break;
case PSERIES_HP_ELOG_ACTION_READD: case PSERIES_HP_ELOG_ACTION_READD:
drc_index = hp_elog->_drc_u.drc_index;
rc = dlpar_memory_readd_by_index(drc_index, prop); rc = dlpar_memory_readd_by_index(drc_index, prop);
break; break;
default: default:
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -212,6 +212,10 @@ Commands:\n\ ...@@ -212,6 +212,10 @@ Commands:\n\
"\ "\
C checksum\n\ C checksum\n\
d dump bytes\n\ d dump bytes\n\
d1 dump 1 byte values\n\
d2 dump 2 byte values\n\
d4 dump 4 byte values\n\
d8 dump 8 byte values\n\
di dump instructions\n\ di dump instructions\n\
df dump float values\n\ df dump float values\n\
dd dump double values\n\ dd dump double values\n\
...@@ -2334,9 +2338,42 @@ static void dump_pacas(void) ...@@ -2334,9 +2338,42 @@ static void dump_pacas(void)
} }
#endif #endif
static void dump_by_size(unsigned long addr, long count, int size)
{
unsigned char temp[16];
int i, j;
u64 val;
count = ALIGN(count, 16);
for (i = 0; i < count; i += 16, addr += 16) {
printf(REG, addr);
if (mread(addr, temp, 16) != 16) {
printf("\nFaulted reading %d bytes from 0x"REG"\n", 16, addr);
return;
}
for (j = 0; j < 16; j += size) {
putchar(' ');
switch (size) {
case 1: val = temp[j]; break;
case 2: val = *(u16 *)&temp[j]; break;
case 4: val = *(u32 *)&temp[j]; break;
case 8: val = *(u64 *)&temp[j]; break;
default: val = 0;
}
printf("%0*lx", size * 2, val);
}
printf("\n");
}
}
static void static void
dump(void) dump(void)
{ {
static char last[] = { "d?\n" };
int c; int c;
c = inchar(); c = inchar();
...@@ -2350,8 +2387,9 @@ dump(void) ...@@ -2350,8 +2387,9 @@ dump(void)
} }
#endif #endif
if ((isxdigit(c) && c != 'f' && c != 'd') || c == '\n') if (c == '\n')
termch = c; termch = c;
scanhex((void *)&adrs); scanhex((void *)&adrs);
if (termch != '\n') if (termch != '\n')
termch = 0; termch = 0;
...@@ -2383,10 +2421,24 @@ dump(void) ...@@ -2383,10 +2421,24 @@ dump(void)
ndump = 64; ndump = 64;
else if (ndump > MAX_DUMP) else if (ndump > MAX_DUMP)
ndump = MAX_DUMP; ndump = MAX_DUMP;
switch (c) {
case '8':
case '4':
case '2':
case '1':
ndump = ALIGN(ndump, 16);
dump_by_size(adrs, ndump, c - '0');
last[1] = c;
last_cmd = last;
break;
default:
prdump(adrs, ndump); prdump(adrs, ndump);
adrs += ndump;
last_cmd = "d\n"; last_cmd = "d\n";
} }
adrs += ndump;
}
} }
static void static void
......
...@@ -418,8 +418,9 @@ struct cxl_afu { ...@@ -418,8 +418,9 @@ struct cxl_afu {
struct dentry *debugfs; struct dentry *debugfs;
struct mutex contexts_lock; struct mutex contexts_lock;
spinlock_t afu_cntl_lock; spinlock_t afu_cntl_lock;
/* Used to block access to AFU config space while deconfigured */
struct rw_semaphore configured_rwsem; /* -1: AFU deconfigured/locked, >= 0: number of readers */
atomic_t configured_state;
/* AFU error buffer fields and bin attribute for sysfs */ /* AFU error buffer fields and bin attribute for sysfs */
u64 eb_len, eb_offset; u64 eb_len, eb_offset;
......
...@@ -268,8 +268,7 @@ struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice) ...@@ -268,8 +268,7 @@ struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
idr_init(&afu->contexts_idr); idr_init(&afu->contexts_idr);
mutex_init(&afu->contexts_lock); mutex_init(&afu->contexts_lock);
spin_lock_init(&afu->afu_cntl_lock); spin_lock_init(&afu->afu_cntl_lock);
init_rwsem(&afu->configured_rwsem); atomic_set(&afu->configured_state, -1);
down_write(&afu->configured_rwsem);
afu->prefault_mode = CXL_PREFAULT_NONE; afu->prefault_mode = CXL_PREFAULT_NONE;
afu->irqs_max = afu->adapter->user_irqs; afu->irqs_max = afu->adapter->user_irqs;
......
...@@ -1129,7 +1129,7 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc ...@@ -1129,7 +1129,7 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc
if ((rc = cxl_native_register_psl_irq(afu))) if ((rc = cxl_native_register_psl_irq(afu)))
goto err2; goto err2;
up_write(&afu->configured_rwsem); atomic_set(&afu->configured_state, 0);
return 0; return 0;
err2: err2:
...@@ -1142,7 +1142,14 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc ...@@ -1142,7 +1142,14 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc
static void pci_deconfigure_afu(struct cxl_afu *afu) static void pci_deconfigure_afu(struct cxl_afu *afu)
{ {
down_write(&afu->configured_rwsem); /*
* It's okay to deconfigure when AFU is already locked, otherwise wait
* until there are no readers
*/
if (atomic_read(&afu->configured_state) != -1) {
while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1)
schedule();
}
cxl_native_release_psl_irq(afu); cxl_native_release_psl_irq(afu);
if (afu->adapter->native->sl_ops->release_serr_irq) if (afu->adapter->native->sl_ops->release_serr_irq)
afu->adapter->native->sl_ops->release_serr_irq(afu); afu->adapter->native->sl_ops->release_serr_irq(afu);
......
...@@ -83,6 +83,16 @@ static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus) ...@@ -83,6 +83,16 @@ static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus)
return phb ? phb->private_data : NULL; return phb ? phb->private_data : NULL;
} }
static void cxl_afu_configured_put(struct cxl_afu *afu)
{
atomic_dec_if_positive(&afu->configured_state);
}
static bool cxl_afu_configured_get(struct cxl_afu *afu)
{
return atomic_inc_unless_negative(&afu->configured_state);
}
static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn, static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
struct cxl_afu *afu, int *_record) struct cxl_afu *afu, int *_record)
{ {
...@@ -107,7 +117,7 @@ static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn, ...@@ -107,7 +117,7 @@ static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
afu = pci_bus_to_afu(bus); afu = pci_bus_to_afu(bus);
/* Grab a reader lock on afu. */ /* Grab a reader lock on afu. */
if (afu == NULL || !down_read_trylock(&afu->configured_rwsem)) if (afu == NULL || !cxl_afu_configured_get(afu))
return PCIBIOS_DEVICE_NOT_FOUND; return PCIBIOS_DEVICE_NOT_FOUND;
rc = cxl_pcie_config_info(bus, devfn, afu, &record); rc = cxl_pcie_config_info(bus, devfn, afu, &record);
...@@ -132,7 +142,7 @@ static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn, ...@@ -132,7 +142,7 @@ static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
} }
out: out:
up_read(&afu->configured_rwsem); cxl_afu_configured_put(afu);
return rc ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; return rc ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
} }
...@@ -144,7 +154,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn, ...@@ -144,7 +154,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
afu = pci_bus_to_afu(bus); afu = pci_bus_to_afu(bus);
/* Grab a reader lock on afu. */ /* Grab a reader lock on afu. */
if (afu == NULL || !down_read_trylock(&afu->configured_rwsem)) if (afu == NULL || !cxl_afu_configured_get(afu))
return PCIBIOS_DEVICE_NOT_FOUND; return PCIBIOS_DEVICE_NOT_FOUND;
rc = cxl_pcie_config_info(bus, devfn, afu, &record); rc = cxl_pcie_config_info(bus, devfn, afu, &record);
...@@ -166,7 +176,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn, ...@@ -166,7 +176,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
} }
out: out:
up_read(&afu->configured_rwsem); cxl_afu_configured_put(afu);
return rc ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL; return rc ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
} }
......
...@@ -35,9 +35,11 @@ static void pnv_php_register(struct device_node *dn); ...@@ -35,9 +35,11 @@ static void pnv_php_register(struct device_node *dn);
static void pnv_php_unregister_one(struct device_node *dn); static void pnv_php_unregister_one(struct device_node *dn);
static void pnv_php_unregister(struct device_node *dn); static void pnv_php_unregister(struct device_node *dn);
static void pnv_php_disable_irq(struct pnv_php_slot *php_slot) static void pnv_php_disable_irq(struct pnv_php_slot *php_slot,
bool disable_device)
{ {
struct pci_dev *pdev = php_slot->pdev; struct pci_dev *pdev = php_slot->pdev;
int irq = php_slot->irq;
u16 ctrl; u16 ctrl;
if (php_slot->irq > 0) { if (php_slot->irq > 0) {
...@@ -56,10 +58,14 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot) ...@@ -56,10 +58,14 @@ static void pnv_php_disable_irq(struct pnv_php_slot *php_slot)
php_slot->wq = NULL; php_slot->wq = NULL;
} }
if (disable_device || irq > 0) {
if (pdev->msix_enabled) if (pdev->msix_enabled)
pci_disable_msix(pdev); pci_disable_msix(pdev);
else if (pdev->msi_enabled) else if (pdev->msi_enabled)
pci_disable_msi(pdev); pci_disable_msi(pdev);
pci_disable_device(pdev);
}
} }
static void pnv_php_free_slot(struct kref *kref) static void pnv_php_free_slot(struct kref *kref)
...@@ -68,7 +74,7 @@ static void pnv_php_free_slot(struct kref *kref) ...@@ -68,7 +74,7 @@ static void pnv_php_free_slot(struct kref *kref)
struct pnv_php_slot, kref); struct pnv_php_slot, kref);
WARN_ON(!list_empty(&php_slot->children)); WARN_ON(!list_empty(&php_slot->children));
pnv_php_disable_irq(php_slot); pnv_php_disable_irq(php_slot, false);
kfree(php_slot->name); kfree(php_slot->name);
kfree(php_slot); kfree(php_slot);
} }
...@@ -76,7 +82,7 @@ static void pnv_php_free_slot(struct kref *kref) ...@@ -76,7 +82,7 @@ static void pnv_php_free_slot(struct kref *kref)
static inline void pnv_php_put_slot(struct pnv_php_slot *php_slot) static inline void pnv_php_put_slot(struct pnv_php_slot *php_slot)
{ {
if (WARN_ON(!php_slot)) if (!php_slot)
return; return;
kref_put(&php_slot->kref, pnv_php_free_slot); kref_put(&php_slot->kref, pnv_php_free_slot);
...@@ -430,9 +436,21 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan) ...@@ -430,9 +436,21 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan)
if (ret) if (ret)
return ret; return ret;
/* Proceed if there have nothing behind the slot */ /*
if (presence == OPAL_PCI_SLOT_EMPTY) * Proceed if there have nothing behind the slot. However,
* we should leave the slot in registered state at the
* beginning. Otherwise, the PCI devices inserted afterwards
* won't be probed and populated.
*/
if (presence == OPAL_PCI_SLOT_EMPTY) {
if (!php_slot->power_state_check) {
php_slot->power_state_check = true;
return 0;
}
goto scan; goto scan;
}
/* /*
* If the power supply to the slot is off, we can't detect * If the power supply to the slot is off, we can't detect
...@@ -705,10 +723,15 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data) ...@@ -705,10 +723,15 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data)
if (sts & PCI_EXP_SLTSTA_DLLSC) { if (sts & PCI_EXP_SLTSTA_DLLSC) {
pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lsts); pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lsts);
added = !!(lsts & PCI_EXP_LNKSTA_DLLLA); added = !!(lsts & PCI_EXP_LNKSTA_DLLLA);
} else if (sts & PCI_EXP_SLTSTA_PDC) { } else if (!(php_slot->flags & PNV_PHP_FLAG_BROKEN_PDC) &&
(sts & PCI_EXP_SLTSTA_PDC)) {
ret = pnv_pci_get_presence_state(php_slot->id, &presence); ret = pnv_pci_get_presence_state(php_slot->id, &presence);
if (!ret) if (ret) {
dev_warn(&pdev->dev, "PCI slot [%s] error %d getting presence (0x%04x), to retry the operation.\n",
php_slot->name, ret, sts);
return IRQ_HANDLED; return IRQ_HANDLED;
}
added = !!(presence == OPAL_PCI_SLOT_PRESENT); added = !!(presence == OPAL_PCI_SLOT_PRESENT);
} else { } else {
return IRQ_NONE; return IRQ_NONE;
...@@ -752,6 +775,7 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data) ...@@ -752,6 +775,7 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data)
static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq) static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
{ {
struct pci_dev *pdev = php_slot->pdev; struct pci_dev *pdev = php_slot->pdev;
u32 broken_pdc = 0;
u16 sts, ctrl; u16 sts, ctrl;
int ret; int ret;
...@@ -759,12 +783,21 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq) ...@@ -759,12 +783,21 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name); php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name);
if (!php_slot->wq) { if (!php_slot->wq) {
dev_warn(&pdev->dev, "Cannot alloc workqueue\n"); dev_warn(&pdev->dev, "Cannot alloc workqueue\n");
pnv_php_disable_irq(php_slot); pnv_php_disable_irq(php_slot, true);
return; return;
} }
/* Check PDC (Presence Detection Change) is broken or not */
ret = of_property_read_u32(php_slot->dn, "ibm,slot-broken-pdc",
&broken_pdc);
if (!ret && broken_pdc)
php_slot->flags |= PNV_PHP_FLAG_BROKEN_PDC;
/* Clear pending interrupts */ /* Clear pending interrupts */
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts); pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts);
if (php_slot->flags & PNV_PHP_FLAG_BROKEN_PDC)
sts |= PCI_EXP_SLTSTA_DLLSC;
else
sts |= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); sts |= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts); pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts);
...@@ -772,16 +805,22 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq) ...@@ -772,16 +805,22 @@ static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq)
ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED, ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED,
php_slot->name, php_slot); php_slot->name, php_slot);
if (ret) { if (ret) {
pnv_php_disable_irq(php_slot); pnv_php_disable_irq(php_slot, true);
dev_warn(&pdev->dev, "Error %d enabling IRQ %d\n", ret, irq); dev_warn(&pdev->dev, "Error %d enabling IRQ %d\n", ret, irq);
return; return;
} }
/* Enable the interrupts */ /* Enable the interrupts */
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &ctrl); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &ctrl);
if (php_slot->flags & PNV_PHP_FLAG_BROKEN_PDC) {
ctrl &= ~PCI_EXP_SLTCTL_PDCE;
ctrl |= (PCI_EXP_SLTCTL_HPIE |
PCI_EXP_SLTCTL_DLLSCE);
} else {
ctrl |= (PCI_EXP_SLTCTL_HPIE | ctrl |= (PCI_EXP_SLTCTL_HPIE |
PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_PDCE |
PCI_EXP_SLTCTL_DLLSCE); PCI_EXP_SLTCTL_DLLSCE);
}
pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, ctrl); pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, ctrl);
/* The interrupt is initialized successfully when @irq is valid */ /* The interrupt is initialized successfully when @irq is valid */
...@@ -793,6 +832,14 @@ static void pnv_php_enable_irq(struct pnv_php_slot *php_slot) ...@@ -793,6 +832,14 @@ static void pnv_php_enable_irq(struct pnv_php_slot *php_slot)
struct pci_dev *pdev = php_slot->pdev; struct pci_dev *pdev = php_slot->pdev;
int irq, ret; int irq, ret;
/*
* The MSI/MSIx interrupt might have been occupied by other
* drivers. Don't populate the surprise hotplug capability
* in that case.
*/
if (pci_dev_msi_enabled(pdev))
return;
ret = pci_enable_device(pdev); ret = pci_enable_device(pdev);
if (ret) { if (ret) {
dev_warn(&pdev->dev, "Error %d enabling device\n", ret); dev_warn(&pdev->dev, "Error %d enabling device\n", ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment