Commit 3cdeb9d1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-4.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 - opal-prd mmap fix from Vaidy
 - set kernel taint for MCEs from Daniel
 - alignment exception description from Anton
 - ppc4xx_hsta_msi build fix from Daniel
 - opal-elog interrupt fix from Alistair
 - core_idle_state race fix from Shreyas
 - hv-24x7 lockdep fix from Sukadev
 - multiple cxl fixes from Daniel, Ian, Mikey & Maninder
 - update MAINTAINERS to point at shared tree

* tag 'powerpc-4.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  cxl: Check if afu is not null in cxl_slbia
  powerpc: Update MAINTAINERS to point at shared tree
  powerpc/perf/24x7: Fix lockdep warning
  cxl: Fix off by one error allowing subsequent mmap page to be accessed
  cxl: Fail mmap if requested mapping is larger than assigned problem state area
  cxl: Fix refcounting in kernel API
  powerpc/powernv: Fix race in updating core_idle_state
  powerpc/powernv: Fix opal-elog interrupt handler
  powerpc/ppc4xx_hsta_msi: Include ppc-pci.h to fix reference to hose_list
  powerpc: Add plain English description for alignment exception oopses
  cxl: Test the correct mmio space before unmapping
  powerpc: Set the correct kernel taint on machine check errors
  cxl/vphb.c: Use phb pointer after NULL check
  powerpc/powernv: Fix vma page prot flags in opal-prd driver
parents c4b5fd3f 2c069a11
...@@ -6173,7 +6173,7 @@ M: Michael Ellerman <mpe@ellerman.id.au> ...@@ -6173,7 +6173,7 @@ M: Michael Ellerman <mpe@ellerman.id.au>
W: http://www.penguinppc.org/ W: http://www.penguinppc.org/
L: linuxppc-dev@lists.ozlabs.org L: linuxppc-dev@lists.ozlabs.org
Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/ Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
S: Supported S: Supported
F: Documentation/powerpc/ F: Documentation/powerpc/
F: arch/powerpc/ F: arch/powerpc/
......
...@@ -51,6 +51,22 @@ ...@@ -51,6 +51,22 @@
.text .text
/*
* Used by threads when the lock bit of core_idle_state is set.
* Threads will spin in HMT_LOW until the lock bit is cleared.
* r14 - pointer to core_idle_state
* r15 - used to load contents of core_idle_state
*/
core_idle_lock_held:
HMT_LOW
3: lwz r15,0(r14)
andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT
bne 3b
HMT_MEDIUM
lwarx r15,0,r14
blr
/* /*
* Pass requested state in r3: * Pass requested state in r3:
* r3 - PNV_THREAD_NAP/SLEEP/WINKLE * r3 - PNV_THREAD_NAP/SLEEP/WINKLE
...@@ -150,6 +166,10 @@ power7_enter_nap_mode: ...@@ -150,6 +166,10 @@ power7_enter_nap_mode:
ld r14,PACA_CORE_IDLE_STATE_PTR(r13) ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
lwarx_loop1: lwarx_loop1:
lwarx r15,0,r14 lwarx r15,0,r14
andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
bnel core_idle_lock_held
andc r15,r15,r7 /* Clear thread bit */ andc r15,r15,r7 /* Clear thread bit */
andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
...@@ -294,7 +314,7 @@ lwarx_loop2: ...@@ -294,7 +314,7 @@ lwarx_loop2:
* workaround undo code or resyncing timebase or restoring context * workaround undo code or resyncing timebase or restoring context
* In either case loop until the lock bit is cleared. * In either case loop until the lock bit is cleared.
*/ */
bne core_idle_lock_held bnel core_idle_lock_held
cmpwi cr2,r15,0 cmpwi cr2,r15,0
lbz r4,PACA_SUBCORE_SIBLING_MASK(r13) lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
...@@ -319,15 +339,6 @@ lwarx_loop2: ...@@ -319,15 +339,6 @@ lwarx_loop2:
isync isync
b common_exit b common_exit
core_idle_lock_held:
HMT_LOW
core_idle_lock_loop:
lwz r15,0(14)
andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
bne core_idle_lock_loop
HMT_MEDIUM
b lwarx_loop2
first_thread_in_subcore: first_thread_in_subcore:
/* First thread in subcore to wakeup */ /* First thread in subcore to wakeup */
ori r15,r15,PNV_CORE_IDLE_LOCK_BIT ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
......
...@@ -297,6 +297,8 @@ long machine_check_early(struct pt_regs *regs) ...@@ -297,6 +297,8 @@ long machine_check_early(struct pt_regs *regs)
__this_cpu_inc(irq_stat.mce_exceptions); __this_cpu_inc(irq_stat.mce_exceptions);
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
if (cur_cpu_spec && cur_cpu_spec->machine_check_early) if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
handled = cur_cpu_spec->machine_check_early(regs); handled = cur_cpu_spec->machine_check_early(regs);
return handled; return handled;
......
...@@ -529,6 +529,10 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) ...@@ -529,6 +529,10 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
printk(KERN_ALERT "Unable to handle kernel paging request for " printk(KERN_ALERT "Unable to handle kernel paging request for "
"instruction fetch\n"); "instruction fetch\n");
break; break;
case 0x600:
printk(KERN_ALERT "Unable to handle kernel paging request for "
"unaligned access at address 0x%08lx\n", regs->dar);
break;
default: default:
printk(KERN_ALERT "Unable to handle kernel paging request for " printk(KERN_ALERT "Unable to handle kernel paging request for "
"unknown fault\n"); "unknown fault\n");
......
...@@ -320,6 +320,8 @@ static struct attribute *device_str_attr_create_(char *name, char *str) ...@@ -320,6 +320,8 @@ static struct attribute *device_str_attr_create_(char *name, char *str)
if (!attr) if (!attr)
return NULL; return NULL;
sysfs_attr_init(&attr->attr.attr);
attr->var = str; attr->var = str;
attr->attr.attr.name = name; attr->attr.attr.name = name;
attr->attr.attr.mode = 0444; attr->attr.attr.mode = 0444;
......
...@@ -237,7 +237,7 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type) ...@@ -237,7 +237,7 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
return elog; return elog;
} }
static void elog_work_fn(struct work_struct *work) static irqreturn_t elog_event(int irq, void *data)
{ {
__be64 size; __be64 size;
__be64 id; __be64 id;
...@@ -251,7 +251,7 @@ static void elog_work_fn(struct work_struct *work) ...@@ -251,7 +251,7 @@ static void elog_work_fn(struct work_struct *work)
rc = opal_get_elog_size(&id, &size, &type); rc = opal_get_elog_size(&id, &size, &type);
if (rc != OPAL_SUCCESS) { if (rc != OPAL_SUCCESS) {
pr_err("ELOG: OPAL log info read failed\n"); pr_err("ELOG: OPAL log info read failed\n");
return; return IRQ_HANDLED;
} }
elog_size = be64_to_cpu(size); elog_size = be64_to_cpu(size);
...@@ -270,16 +270,10 @@ static void elog_work_fn(struct work_struct *work) ...@@ -270,16 +270,10 @@ static void elog_work_fn(struct work_struct *work)
* entries. * entries.
*/ */
if (kset_find_obj(elog_kset, name)) if (kset_find_obj(elog_kset, name))
return; return IRQ_HANDLED;
create_elog_obj(log_id, elog_size, elog_type); create_elog_obj(log_id, elog_size, elog_type);
}
static DECLARE_WORK(elog_work, elog_work_fn);
static irqreturn_t elog_event(int irq, void *data)
{
schedule_work(&elog_work);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -304,8 +298,8 @@ int __init opal_elog_init(void) ...@@ -304,8 +298,8 @@ int __init opal_elog_init(void)
return irq; return irq;
} }
rc = request_irq(irq, elog_event, rc = request_threaded_irq(irq, NULL, elog_event,
IRQ_TYPE_LEVEL_HIGH, "opal-elog", NULL); IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "opal-elog", NULL);
if (rc) { if (rc) {
pr_err("%s: Can't request OPAL event irq (%d)\n", pr_err("%s: Can't request OPAL event irq (%d)\n",
__func__, rc); __func__, rc);
......
...@@ -112,6 +112,7 @@ static int opal_prd_open(struct inode *inode, struct file *file) ...@@ -112,6 +112,7 @@ static int opal_prd_open(struct inode *inode, struct file *file)
static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma) static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
{ {
size_t addr, size; size_t addr, size;
pgprot_t page_prot;
int rc; int rc;
pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n", pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n",
...@@ -125,13 +126,11 @@ static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -125,13 +126,11 @@ static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
if (!opal_prd_range_is_valid(addr, size)) if (!opal_prd_range_is_valid(addr, size))
return -EINVAL; return -EINVAL;
vma->vm_page_prot = __pgprot(pgprot_val(phys_mem_access_prot(file, page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
vma->vm_pgoff, size, vma->vm_page_prot);
size, vma->vm_page_prot))
| _PAGE_SPECIAL);
rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
vma->vm_page_prot); page_prot);
return rc; return rc;
} }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/semaphore.h> #include <linux/semaphore.h>
#include <asm/msi_bitmap.h> #include <asm/msi_bitmap.h>
#include <asm/ppc-pci.h>
struct ppc4xx_hsta_msi { struct ppc4xx_hsta_msi {
struct device *dev; struct device *dev;
......
...@@ -23,6 +23,7 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) ...@@ -23,6 +23,7 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
afu = cxl_pci_to_afu(dev); afu = cxl_pci_to_afu(dev);
get_device(&afu->dev);
ctx = cxl_context_alloc(); ctx = cxl_context_alloc();
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return ctx; return ctx;
...@@ -31,6 +32,7 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev) ...@@ -31,6 +32,7 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
rc = cxl_context_init(ctx, afu, false, NULL); rc = cxl_context_init(ctx, afu, false, NULL);
if (rc) { if (rc) {
kfree(ctx); kfree(ctx);
put_device(&afu->dev);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
cxl_assign_psn_space(ctx); cxl_assign_psn_space(ctx);
...@@ -60,6 +62,8 @@ int cxl_release_context(struct cxl_context *ctx) ...@@ -60,6 +62,8 @@ int cxl_release_context(struct cxl_context *ctx)
if (ctx->status != CLOSED) if (ctx->status != CLOSED)
return -EBUSY; return -EBUSY;
put_device(&ctx->afu->dev);
cxl_context_free(ctx); cxl_context_free(ctx);
return 0; return 0;
...@@ -159,7 +163,6 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed, ...@@ -159,7 +163,6 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
} }
ctx->status = STARTED; ctx->status = STARTED;
get_device(&ctx->afu->dev);
out: out:
mutex_unlock(&ctx->status_mutex); mutex_unlock(&ctx->status_mutex);
return rc; return rc;
...@@ -175,12 +178,7 @@ EXPORT_SYMBOL_GPL(cxl_process_element); ...@@ -175,12 +178,7 @@ EXPORT_SYMBOL_GPL(cxl_process_element);
/* Stop a context. Returns 0 on success, otherwise -Errno */ /* Stop a context. Returns 0 on success, otherwise -Errno */
int cxl_stop_context(struct cxl_context *ctx) int cxl_stop_context(struct cxl_context *ctx)
{ {
int rc; return __detach_context(ctx);
rc = __detach_context(ctx);
if (!rc)
put_device(&ctx->afu->dev);
return rc;
} }
EXPORT_SYMBOL_GPL(cxl_stop_context); EXPORT_SYMBOL_GPL(cxl_stop_context);
......
...@@ -113,11 +113,11 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -113,11 +113,11 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
area = ctx->afu->psn_phys; area = ctx->afu->psn_phys;
if (offset > ctx->afu->adapter->ps_size) if (offset >= ctx->afu->adapter->ps_size)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} else { } else {
area = ctx->psn_phys; area = ctx->psn_phys;
if (offset > ctx->psn_size) if (offset >= ctx->psn_size)
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
} }
...@@ -145,8 +145,16 @@ static const struct vm_operations_struct cxl_mmap_vmops = { ...@@ -145,8 +145,16 @@ static const struct vm_operations_struct cxl_mmap_vmops = {
*/ */
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
{ {
u64 start = vma->vm_pgoff << PAGE_SHIFT;
u64 len = vma->vm_end - vma->vm_start; u64 len = vma->vm_end - vma->vm_start;
len = min(len, ctx->psn_size);
if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
if (start + len > ctx->afu->adapter->ps_size)
return -EINVAL;
} else {
if (start + len > ctx->psn_size)
return -EINVAL;
}
if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
/* make sure there is a valid per process space for this AFU */ /* make sure there is a valid per process space for this AFU */
......
...@@ -73,7 +73,7 @@ static inline void cxl_slbia_core(struct mm_struct *mm) ...@@ -73,7 +73,7 @@ static inline void cxl_slbia_core(struct mm_struct *mm)
spin_lock(&adapter->afu_list_lock); spin_lock(&adapter->afu_list_lock);
for (slice = 0; slice < adapter->slices; slice++) { for (slice = 0; slice < adapter->slices; slice++) {
afu = adapter->afu[slice]; afu = adapter->afu[slice];
if (!afu->enabled) if (!afu || !afu->enabled)
continue; continue;
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&afu->contexts_idr, ctx, id) idr_for_each_entry(&afu->contexts_idr, ctx, id)
......
...@@ -539,7 +539,7 @@ static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct p ...@@ -539,7 +539,7 @@ static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct p
static void cxl_unmap_slice_regs(struct cxl_afu *afu) static void cxl_unmap_slice_regs(struct cxl_afu *afu)
{ {
if (afu->p1n_mmio) if (afu->p2n_mmio)
iounmap(afu->p2n_mmio); iounmap(afu->p2n_mmio);
if (afu->p1n_mmio) if (afu->p1n_mmio)
iounmap(afu->p1n_mmio); iounmap(afu->p1n_mmio);
......
...@@ -112,9 +112,10 @@ static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn, ...@@ -112,9 +112,10 @@ static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
unsigned long addr; unsigned long addr;
phb = pci_bus_to_host(bus); phb = pci_bus_to_host(bus);
afu = (struct cxl_afu *)phb->private_data;
if (phb == NULL) if (phb == NULL)
return PCIBIOS_DEVICE_NOT_FOUND; return PCIBIOS_DEVICE_NOT_FOUND;
afu = (struct cxl_afu *)phb->private_data;
if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num) if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num)
return PCIBIOS_DEVICE_NOT_FOUND; return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= (unsigned long)phb->cfg_data) if (offset >= (unsigned long)phb->cfg_data)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment