Commit 4b1c46a3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc:
  [POWERPC] Make alignment exception always check exception table
  [POWERPC] Disallow kprobes on emulate_step and branch_taken
  [POWERPC] Make mmiowb's io_sync preempt safe
  [POWERPC] Make high hugepage areas preempt safe
  [POWERPC] Make current preempt-safe
  [POWERPC] qe_lib: qe_issue_cmd writes wrong value to CECDR
  [POWERPC] Use 4kB iommu pages even on 64kB-page systems
  [POWERPC] Fix oprofile support for e500 in arch/powerpc
  [POWERPC] Fix rmb() for e500-based machines it
  [POWERPC] Fix various offb issues
parents 30574b61 4393c4f6
...@@ -38,7 +38,6 @@ obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o ...@@ -38,7 +38,6 @@ obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
obj-$(CONFIG_TAU) += tau_6xx.o obj-$(CONFIG_TAU) += tau_6xx.o
obj32-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_32.o obj32-$(CONFIG_SOFTWARE_SUSPEND) += swsusp_32.o
obj32-$(CONFIG_MODULES) += module_32.o obj32-$(CONFIG_MODULES) += module_32.o
obj-$(CONFIG_E500) += perfmon_fsl_booke.o
ifeq ($(CONFIG_PPC_MERGE),y) ifeq ($(CONFIG_PPC_MERGE),y)
......
...@@ -182,7 +182,7 @@ int btext_initialize(struct device_node *np) ...@@ -182,7 +182,7 @@ int btext_initialize(struct device_node *np)
prop = get_property(np, "linux,bootx-linebytes", NULL); prop = get_property(np, "linux,bootx-linebytes", NULL);
if (prop == NULL) if (prop == NULL)
prop = get_property(np, "linebytes", NULL); prop = get_property(np, "linebytes", NULL);
if (prop) if (prop && *prop != 0xffffffffu)
pitch = *prop; pitch = *prop;
if (pitch == 1) if (pitch == 1)
pitch = 0x1000; pitch = 0x1000;
......
...@@ -47,6 +47,17 @@ static int novmerge = 0; ...@@ -47,6 +47,17 @@ static int novmerge = 0;
static int novmerge = 1; static int novmerge = 1;
#endif #endif
static inline unsigned long iommu_num_pages(unsigned long vaddr,
unsigned long slen)
{
unsigned long npages;
npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
npages >>= IOMMU_PAGE_SHIFT;
return npages;
}
static int __init setup_iommu(char *str) static int __init setup_iommu(char *str)
{ {
if (!strcmp(str, "novmerge")) if (!strcmp(str, "novmerge"))
...@@ -178,10 +189,10 @@ static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page, ...@@ -178,10 +189,10 @@ static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
} }
entry += tbl->it_offset; /* Offset into real TCE table */ entry += tbl->it_offset; /* Offset into real TCE table */
ret = entry << PAGE_SHIFT; /* Set the return dma address */ ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
/* Put the TCEs in the HW table */ /* Put the TCEs in the HW table */
ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK, ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
direction); direction);
...@@ -203,7 +214,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, ...@@ -203,7 +214,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned long entry, free_entry; unsigned long entry, free_entry;
unsigned long i; unsigned long i;
entry = dma_addr >> PAGE_SHIFT; entry = dma_addr >> IOMMU_PAGE_SHIFT;
free_entry = entry - tbl->it_offset; free_entry = entry - tbl->it_offset;
if (((free_entry + npages) > tbl->it_size) || if (((free_entry + npages) > tbl->it_size) ||
...@@ -270,7 +281,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, ...@@ -270,7 +281,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
/* Init first segment length for backout at failure */ /* Init first segment length for backout at failure */
outs->dma_length = 0; outs->dma_length = 0;
DBG("mapping %d elements:\n", nelems); DBG("sg mapping %d elements:\n", nelems);
spin_lock_irqsave(&(tbl->it_lock), flags); spin_lock_irqsave(&(tbl->it_lock), flags);
...@@ -285,9 +296,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, ...@@ -285,9 +296,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
} }
/* Allocate iommu entries for that segment */ /* Allocate iommu entries for that segment */
vaddr = (unsigned long)page_address(s->page) + s->offset; vaddr = (unsigned long)page_address(s->page) + s->offset;
npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK); npages = iommu_num_pages(vaddr, slen);
npages >>= PAGE_SHIFT; entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0);
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
...@@ -301,14 +311,14 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, ...@@ -301,14 +311,14 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
/* Convert entry to a dma_addr_t */ /* Convert entry to a dma_addr_t */
entry += tbl->it_offset; entry += tbl->it_offset;
dma_addr = entry << PAGE_SHIFT; dma_addr = entry << IOMMU_PAGE_SHIFT;
dma_addr |= s->offset; dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n", DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
npages, entry, dma_addr); npages, entry, dma_addr);
/* Insert into HW table */ /* Insert into HW table */
ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction); ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
/* If we are in an open segment, try merging */ /* If we are in an open segment, try merging */
if (segstart != s) { if (segstart != s) {
...@@ -323,7 +333,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, ...@@ -323,7 +333,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
DBG(" can't merge, new segment.\n"); DBG(" can't merge, new segment.\n");
} else { } else {
outs->dma_length += s->length; outs->dma_length += s->length;
DBG(" merged, new len: %lx\n", outs->dma_length); DBG(" merged, new len: %ux\n", outs->dma_length);
} }
} }
...@@ -367,9 +377,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, ...@@ -367,9 +377,8 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
if (s->dma_length != 0) { if (s->dma_length != 0) {
unsigned long vaddr, npages; unsigned long vaddr, npages;
vaddr = s->dma_address & PAGE_MASK; vaddr = s->dma_address & IOMMU_PAGE_MASK;
npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr) npages = iommu_num_pages(s->dma_address, s->dma_length);
>> PAGE_SHIFT;
__iommu_free(tbl, vaddr, npages); __iommu_free(tbl, vaddr, npages);
s->dma_address = DMA_ERROR_CODE; s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0; s->dma_length = 0;
...@@ -398,8 +407,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, ...@@ -398,8 +407,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
if (sglist->dma_length == 0) if (sglist->dma_length == 0)
break; break;
npages = (PAGE_ALIGN(dma_handle + sglist->dma_length) npages = iommu_num_pages(dma_handle,sglist->dma_length);
- (dma_handle & PAGE_MASK)) >> PAGE_SHIFT;
__iommu_free(tbl, dma_handle, npages); __iommu_free(tbl, dma_handle, npages);
sglist++; sglist++;
} }
...@@ -532,12 +540,11 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, ...@@ -532,12 +540,11 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
BUG_ON(direction == DMA_NONE); BUG_ON(direction == DMA_NONE);
uaddr = (unsigned long)vaddr; uaddr = (unsigned long)vaddr;
npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK); npages = iommu_num_pages(uaddr, size);
npages >>= PAGE_SHIFT;
if (tbl) { if (tbl) {
dma_handle = iommu_alloc(tbl, vaddr, npages, direction, dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
mask >> PAGE_SHIFT, 0); mask >> IOMMU_PAGE_SHIFT, 0);
if (dma_handle == DMA_ERROR_CODE) { if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) { if (printk_ratelimit()) {
printk(KERN_INFO "iommu_alloc failed, " printk(KERN_INFO "iommu_alloc failed, "
...@@ -545,7 +552,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, ...@@ -545,7 +552,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
tbl, vaddr, npages); tbl, vaddr, npages);
} }
} else } else
dma_handle |= (uaddr & ~PAGE_MASK); dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
} }
return dma_handle; return dma_handle;
...@@ -554,11 +561,14 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, ...@@ -554,11 +561,14 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction) size_t size, enum dma_data_direction direction)
{ {
unsigned int npages;
BUG_ON(direction == DMA_NONE); BUG_ON(direction == DMA_NONE);
if (tbl) if (tbl) {
iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) - npages = iommu_num_pages(dma_handle, size);
(dma_handle & PAGE_MASK)) >> PAGE_SHIFT); iommu_free(tbl, dma_handle, npages);
}
} }
/* Allocates a contiguous real buffer and creates mappings over it. /* Allocates a contiguous real buffer and creates mappings over it.
...@@ -570,11 +580,11 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, ...@@ -570,11 +580,11 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
{ {
void *ret = NULL; void *ret = NULL;
dma_addr_t mapping; dma_addr_t mapping;
unsigned int npages, order; unsigned int order;
unsigned int nio_pages, io_order;
struct page *page; struct page *page;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
order = get_order(size); order = get_order(size);
/* /*
...@@ -598,8 +608,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, ...@@ -598,8 +608,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
memset(ret, 0, size); memset(ret, 0, size);
/* Set up tces to cover the allocated range */ /* Set up tces to cover the allocated range */
mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, nio_pages = size >> IOMMU_PAGE_SHIFT;
mask >> PAGE_SHIFT, order); io_order = get_iommu_order(size);
mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> IOMMU_PAGE_SHIFT, io_order);
if (mapping == DMA_ERROR_CODE) { if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order); free_pages((unsigned long)ret, order);
return NULL; return NULL;
...@@ -611,12 +623,13 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, ...@@ -611,12 +623,13 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
void iommu_free_coherent(struct iommu_table *tbl, size_t size, void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle) void *vaddr, dma_addr_t dma_handle)
{ {
unsigned int npages;
if (tbl) { if (tbl) {
unsigned int nio_pages;
size = PAGE_ALIGN(size);
nio_pages = size >> IOMMU_PAGE_SHIFT;
iommu_free(tbl, dma_handle, nio_pages);
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
npages = size >> PAGE_SHIFT;
iommu_free(tbl, dma_handle, npages);
free_pages((unsigned long)vaddr, get_order(size)); free_pages((unsigned long)vaddr, get_order(size));
} }
} }
/* arch/powerpc/kernel/perfmon_fsl_booke.c
* Freescale Book-E Performance Monitor code
*
* Author: Andy Fleming
* Copyright (c) 2004 Freescale Semiconductor, Inc
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/a.out.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/prctl.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/reg.h>
#include <asm/xmon.h>
#include <asm/pmc.h>
static inline u32 get_pmlca(int ctr);
static inline void set_pmlca(int ctr, u32 pmlca);
static inline u32 get_pmlca(int ctr)
{
u32 pmlca;
switch (ctr) {
case 0:
pmlca = mfpmr(PMRN_PMLCA0);
break;
case 1:
pmlca = mfpmr(PMRN_PMLCA1);
break;
case 2:
pmlca = mfpmr(PMRN_PMLCA2);
break;
case 3:
pmlca = mfpmr(PMRN_PMLCA3);
break;
default:
panic("Bad ctr number\n");
}
return pmlca;
}
static inline void set_pmlca(int ctr, u32 pmlca)
{
switch (ctr) {
case 0:
mtpmr(PMRN_PMLCA0, pmlca);
break;
case 1:
mtpmr(PMRN_PMLCA1, pmlca);
break;
case 2:
mtpmr(PMRN_PMLCA2, pmlca);
break;
case 3:
mtpmr(PMRN_PMLCA3, pmlca);
break;
default:
panic("Bad ctr number\n");
}
}
void init_pmc_stop(int ctr)
{
u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
PMLCA_FCM1 | PMLCA_FCM0);
u32 pmlcb = 0;
switch (ctr) {
case 0:
mtpmr(PMRN_PMLCA0, pmlca);
mtpmr(PMRN_PMLCB0, pmlcb);
break;
case 1:
mtpmr(PMRN_PMLCA1, pmlca);
mtpmr(PMRN_PMLCB1, pmlcb);
break;
case 2:
mtpmr(PMRN_PMLCA2, pmlca);
mtpmr(PMRN_PMLCB2, pmlcb);
break;
case 3:
mtpmr(PMRN_PMLCA3, pmlca);
mtpmr(PMRN_PMLCB3, pmlcb);
break;
default:
panic("Bad ctr number!\n");
}
}
void set_pmc_event(int ctr, int event)
{
u32 pmlca;
pmlca = get_pmlca(ctr);
pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
((event << PMLCA_EVENT_SHIFT) &
PMLCA_EVENT_MASK);
set_pmlca(ctr, pmlca);
}
void set_pmc_user_kernel(int ctr, int user, int kernel)
{
u32 pmlca;
pmlca = get_pmlca(ctr);
if(user)
pmlca &= ~PMLCA_FCU;
else
pmlca |= PMLCA_FCU;
if(kernel)
pmlca &= ~PMLCA_FCS;
else
pmlca |= PMLCA_FCS;
set_pmlca(ctr, pmlca);
}
void set_pmc_marked(int ctr, int mark0, int mark1)
{
u32 pmlca = get_pmlca(ctr);
if(mark0)
pmlca &= ~PMLCA_FCM0;
else
pmlca |= PMLCA_FCM0;
if(mark1)
pmlca &= ~PMLCA_FCM1;
else
pmlca |= PMLCA_FCM1;
set_pmlca(ctr, pmlca);
}
void pmc_start_ctr(int ctr, int enable)
{
u32 pmlca = get_pmlca(ctr);
pmlca &= ~PMLCA_FC;
if (enable)
pmlca |= PMLCA_CE;
else
pmlca &= ~PMLCA_CE;
set_pmlca(ctr, pmlca);
}
void pmc_start_ctrs(int enable)
{
u32 pmgc0 = mfpmr(PMRN_PMGC0);
pmgc0 &= ~PMGC0_FAC;
pmgc0 |= PMGC0_FCECE;
if (enable)
pmgc0 |= PMGC0_PMIE;
else
pmgc0 &= ~PMGC0_PMIE;
mtpmr(PMRN_PMGC0, pmgc0);
}
void pmc_stop_ctrs(void)
{
u32 pmgc0 = mfpmr(PMRN_PMGC0);
pmgc0 |= PMGC0_FAC;
pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
mtpmr(PMRN_PMGC0, pmgc0);
}
void dump_pmcs(void)
{
printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0));
printk("pmc\t\tpmlca\t\tpmlcb\n");
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0),
mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0));
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1),
mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1));
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2),
mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2));
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3),
mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3));
}
EXPORT_SYMBOL(init_pmc_stop);
EXPORT_SYMBOL(set_pmc_event);
EXPORT_SYMBOL(set_pmc_user_kernel);
EXPORT_SYMBOL(set_pmc_marked);
EXPORT_SYMBOL(pmc_start_ctr);
EXPORT_SYMBOL(pmc_start_ctrs);
EXPORT_SYMBOL(pmc_stop_ctrs);
EXPORT_SYMBOL(dump_pmcs);
...@@ -71,7 +71,7 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq) ...@@ -71,7 +71,7 @@ int reserve_pmc_hardware(perf_irq_t new_perf_irq)
} }
pmc_owner_caller = __builtin_return_address(0); pmc_owner_caller = __builtin_return_address(0);
perf_irq = new_perf_irq ? : dummy_perf; perf_irq = new_perf_irq ? new_perf_irq : dummy_perf;
out: out:
spin_unlock(&pmc_owner_lock); spin_unlock(&pmc_owner_lock);
......
...@@ -843,7 +843,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) ...@@ -843,7 +843,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
void alignment_exception(struct pt_regs *regs) void alignment_exception(struct pt_regs *regs)
{ {
int fixed = 0; int sig, code, fixed = 0;
/* we don't implement logging of alignment exceptions */ /* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
...@@ -857,14 +857,16 @@ void alignment_exception(struct pt_regs *regs) ...@@ -857,14 +857,16 @@ void alignment_exception(struct pt_regs *regs)
/* Operand address was bad */ /* Operand address was bad */
if (fixed == -EFAULT) { if (fixed == -EFAULT) {
if (user_mode(regs)) sig = SIGSEGV;
_exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar); code = SEGV_ACCERR;
else } else {
/* Search exception table */ sig = SIGBUS;
bad_page_fault(regs, regs->dar, SIGSEGV); code = BUS_ADRALN;
return;
} }
_exception(SIGBUS, regs, BUS_ADRALN, regs->dar); if (user_mode(regs))
_exception(sig, regs, code, regs->dar);
else
bad_page_fault(regs, regs->dar, sig);
} }
void StackOverflow(struct pt_regs *regs) void StackOverflow(struct pt_regs *regs)
......
...@@ -92,9 +92,9 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) ...@@ -92,9 +92,9 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
&tbl->it_index, &offset, &size); &tbl->it_index, &offset, &size);
/* TCE table size - measured in tce entries */ /* TCE table size - measured in tce entries */
tbl->it_size = size >> PAGE_SHIFT; tbl->it_size = size >> IOMMU_PAGE_SHIFT;
/* offset for VIO should always be 0 */ /* offset for VIO should always be 0 */
tbl->it_offset = offset >> PAGE_SHIFT; tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
tbl->it_busno = 0; tbl->it_busno = 0;
tbl->it_type = TCE_VB; tbl->it_type = TCE_VB;
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <asm/sstep.h> #include <asm/sstep.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -25,7 +26,7 @@ extern char system_call_common[]; ...@@ -25,7 +26,7 @@ extern char system_call_common[];
/* /*
* Determine whether a conditional branch instruction would branch. * Determine whether a conditional branch instruction would branch.
*/ */
static int branch_taken(unsigned int instr, struct pt_regs *regs) static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
{ {
unsigned int bo = (instr >> 21) & 0x1f; unsigned int bo = (instr >> 21) & 0x1f;
unsigned int bi; unsigned int bi;
...@@ -51,7 +52,7 @@ static int branch_taken(unsigned int instr, struct pt_regs *regs) ...@@ -51,7 +52,7 @@ static int branch_taken(unsigned int instr, struct pt_regs *regs)
* or -1 if the instruction is one that should not be stepped, * or -1 if the instruction is one that should not be stepped,
* such as an rfid, or a mtmsrd that would clear MSR_RI. * such as an rfid, or a mtmsrd that would clear MSR_RI.
*/ */
int emulate_step(struct pt_regs *regs, unsigned int instr) int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
{ {
unsigned int opcode, rd; unsigned int opcode, rd;
unsigned long int imm; unsigned long int imm;
......
...@@ -480,9 +480,6 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas) ...@@ -480,9 +480,6 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
mm->context.high_htlb_areas |= newareas; mm->context.high_htlb_areas |= newareas;
/* update the paca copy of the context struct */
get_paca()->context = mm->context;
/* the context change must make it to memory before the flush, /* the context change must make it to memory before the flush,
* so that further SLB misses do the right thing. */ * so that further SLB misses do the right thing. */
mb(); mb();
......
...@@ -13,4 +13,4 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \ ...@@ -13,4 +13,4 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
oprofile-y := $(DRIVER_OBJS) common.o backtrace.o oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o
oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o
oprofile-$(CONFIG_PPC32) += op_model_7450.o oprofile-$(CONFIG_6xx) += op_model_7450.o
...@@ -34,6 +34,11 @@ static void op_handle_interrupt(struct pt_regs *regs) ...@@ -34,6 +34,11 @@ static void op_handle_interrupt(struct pt_regs *regs)
model->handle_interrupt(regs, ctr); model->handle_interrupt(regs, ctr);
} }
static void op_powerpc_cpu_setup(void *dummy)
{
model->cpu_setup(ctr);
}
static int op_powerpc_setup(void) static int op_powerpc_setup(void)
{ {
int err; int err;
...@@ -47,7 +52,7 @@ static int op_powerpc_setup(void) ...@@ -47,7 +52,7 @@ static int op_powerpc_setup(void)
model->reg_setup(ctr, &sys, model->num_counters); model->reg_setup(ctr, &sys, model->num_counters);
/* Configure the registers on all cpus. */ /* Configure the registers on all cpus. */
on_each_cpu(model->cpu_setup, NULL, 0, 1); on_each_cpu(op_powerpc_cpu_setup, NULL, 0, 1);
return 0; return 0;
} }
...@@ -142,7 +147,8 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) ...@@ -142,7 +147,8 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
case PPC_OPROFILE_POWER4: case PPC_OPROFILE_POWER4:
model = &op_model_power4; model = &op_model_power4;
break; break;
#else #endif
#ifdef CONFIG_6xx
case PPC_OPROFILE_G4: case PPC_OPROFILE_G4:
model = &op_model_7450; model = &op_model_7450;
break; break;
......
...@@ -81,7 +81,7 @@ static void pmc_stop_ctrs(void) ...@@ -81,7 +81,7 @@ static void pmc_stop_ctrs(void)
/* Configures the counters on this CPU based on the global /* Configures the counters on this CPU based on the global
* settings */ * settings */
static void fsl7450_cpu_setup(void *unused) static void fsl7450_cpu_setup(struct op_counter_config *ctr)
{ {
/* freeze all counters */ /* freeze all counters */
pmc_stop_ctrs(); pmc_stop_ctrs();
......
...@@ -32,42 +32,152 @@ static unsigned long reset_value[OP_MAX_COUNTER]; ...@@ -32,42 +32,152 @@ static unsigned long reset_value[OP_MAX_COUNTER];
static int num_counters; static int num_counters;
static int oprofile_running; static int oprofile_running;
static inline unsigned int ctr_read(unsigned int i) static void init_pmc_stop(int ctr)
{ {
switch(i) { u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
case 0: PMLCA_FCM1 | PMLCA_FCM0);
return mfpmr(PMRN_PMC0); u32 pmlcb = 0;
case 1:
return mfpmr(PMRN_PMC1);
case 2:
return mfpmr(PMRN_PMC2);
case 3:
return mfpmr(PMRN_PMC3);
default:
return 0;
}
}
static inline void ctr_write(unsigned int i, unsigned int val) switch (ctr) {
{
switch(i) {
case 0: case 0:
mtpmr(PMRN_PMC0, val); mtpmr(PMRN_PMLCA0, pmlca);
mtpmr(PMRN_PMLCB0, pmlcb);
break; break;
case 1: case 1:
mtpmr(PMRN_PMC1, val); mtpmr(PMRN_PMLCA1, pmlca);
mtpmr(PMRN_PMLCB1, pmlcb);
break; break;
case 2: case 2:
mtpmr(PMRN_PMC2, val); mtpmr(PMRN_PMLCA2, pmlca);
mtpmr(PMRN_PMLCB2, pmlcb);
break; break;
case 3: case 3:
mtpmr(PMRN_PMC3, val); mtpmr(PMRN_PMLCA3, pmlca);
mtpmr(PMRN_PMLCB3, pmlcb);
break; break;
default: default:
break; panic("Bad ctr number!\n");
} }
} }
static void set_pmc_event(int ctr, int event)
{
u32 pmlca;
pmlca = get_pmlca(ctr);
pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
((event << PMLCA_EVENT_SHIFT) &
PMLCA_EVENT_MASK);
set_pmlca(ctr, pmlca);
}
static void set_pmc_user_kernel(int ctr, int user, int kernel)
{
u32 pmlca;
pmlca = get_pmlca(ctr);
if(user)
pmlca &= ~PMLCA_FCU;
else
pmlca |= PMLCA_FCU;
if(kernel)
pmlca &= ~PMLCA_FCS;
else
pmlca |= PMLCA_FCS;
set_pmlca(ctr, pmlca);
}
static void set_pmc_marked(int ctr, int mark0, int mark1)
{
u32 pmlca = get_pmlca(ctr);
if(mark0)
pmlca &= ~PMLCA_FCM0;
else
pmlca |= PMLCA_FCM0;
if(mark1)
pmlca &= ~PMLCA_FCM1;
else
pmlca |= PMLCA_FCM1;
set_pmlca(ctr, pmlca);
}
static void pmc_start_ctr(int ctr, int enable)
{
u32 pmlca = get_pmlca(ctr);
pmlca &= ~PMLCA_FC;
if (enable)
pmlca |= PMLCA_CE;
else
pmlca &= ~PMLCA_CE;
set_pmlca(ctr, pmlca);
}
static void pmc_start_ctrs(int enable)
{
u32 pmgc0 = mfpmr(PMRN_PMGC0);
pmgc0 &= ~PMGC0_FAC;
pmgc0 |= PMGC0_FCECE;
if (enable)
pmgc0 |= PMGC0_PMIE;
else
pmgc0 &= ~PMGC0_PMIE;
mtpmr(PMRN_PMGC0, pmgc0);
}
static void pmc_stop_ctrs(void)
{
u32 pmgc0 = mfpmr(PMRN_PMGC0);
pmgc0 |= PMGC0_FAC;
pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
mtpmr(PMRN_PMGC0, pmgc0);
}
static void dump_pmcs(void)
{
printk("pmgc0: %x\n", mfpmr(PMRN_PMGC0));
printk("pmc\t\tpmlca\t\tpmlcb\n");
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC0),
mfpmr(PMRN_PMLCA0), mfpmr(PMRN_PMLCB0));
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC1),
mfpmr(PMRN_PMLCA1), mfpmr(PMRN_PMLCB1));
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC2),
mfpmr(PMRN_PMLCA2), mfpmr(PMRN_PMLCB2));
printk("%8x\t%8x\t%8x\n", mfpmr(PMRN_PMC3),
mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3));
}
static void fsl_booke_cpu_setup(struct op_counter_config *ctr)
{
int i;
/* freeze all counters */
pmc_stop_ctrs();
for (i = 0;i < num_counters;i++) {
init_pmc_stop(i);
set_pmc_event(i, ctr[i].event);
set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
}
}
static void fsl_booke_reg_setup(struct op_counter_config *ctr, static void fsl_booke_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys, struct op_system_config *sys,
...@@ -77,23 +187,14 @@ static void fsl_booke_reg_setup(struct op_counter_config *ctr, ...@@ -77,23 +187,14 @@ static void fsl_booke_reg_setup(struct op_counter_config *ctr,
num_counters = num_ctrs; num_counters = num_ctrs;
/* freeze all counters */
pmc_stop_ctrs();
/* Our counters count up, and "count" refers to /* Our counters count up, and "count" refers to
* how much before the next interrupt, and we interrupt * how much before the next interrupt, and we interrupt
* on overflow. So we calculate the starting value * on overflow. So we calculate the starting value
* which will give us "count" until overflow. * which will give us "count" until overflow.
* Then we set the events on the enabled counters */ * Then we set the events on the enabled counters */
for (i = 0; i < num_counters; ++i) { for (i = 0; i < num_counters; ++i)
reset_value[i] = 0x80000000UL - ctr[i].count; reset_value[i] = 0x80000000UL - ctr[i].count;
init_pmc_stop(i);
set_pmc_event(i, ctr[i].event);
set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
}
} }
static void fsl_booke_start(struct op_counter_config *ctr) static void fsl_booke_start(struct op_counter_config *ctr)
...@@ -105,8 +206,8 @@ static void fsl_booke_start(struct op_counter_config *ctr) ...@@ -105,8 +206,8 @@ static void fsl_booke_start(struct op_counter_config *ctr)
for (i = 0; i < num_counters; ++i) { for (i = 0; i < num_counters; ++i) {
if (ctr[i].enabled) { if (ctr[i].enabled) {
ctr_write(i, reset_value[i]); ctr_write(i, reset_value[i]);
/* Set Each enabled counterd to only /* Set each enabled counter to only
* count when the Mark bit is not set */ * count when the Mark bit is *not* set */
set_pmc_marked(i, 1, 0); set_pmc_marked(i, 1, 0);
pmc_start_ctr(i, 1); pmc_start_ctr(i, 1);
} else { } else {
...@@ -177,6 +278,7 @@ static void fsl_booke_handle_interrupt(struct pt_regs *regs, ...@@ -177,6 +278,7 @@ static void fsl_booke_handle_interrupt(struct pt_regs *regs,
struct op_powerpc_model op_model_fsl_booke = { struct op_powerpc_model op_model_fsl_booke = {
.reg_setup = fsl_booke_reg_setup, .reg_setup = fsl_booke_reg_setup,
.cpu_setup = fsl_booke_cpu_setup,
.start = fsl_booke_start, .start = fsl_booke_start,
.stop = fsl_booke_stop, .stop = fsl_booke_stop,
.handle_interrupt = fsl_booke_handle_interrupt, .handle_interrupt = fsl_booke_handle_interrupt,
......
...@@ -82,7 +82,7 @@ static inline int mmcra_must_set_sample(void) ...@@ -82,7 +82,7 @@ static inline int mmcra_must_set_sample(void)
return 0; return 0;
} }
static void power4_cpu_setup(void *unused) static void power4_cpu_setup(struct op_counter_config *ctr)
{ {
unsigned int mmcr0 = mmcr0_val; unsigned int mmcr0 = mmcr0_val;
unsigned long mmcra = mmcra_val; unsigned long mmcra = mmcra_val;
......
...@@ -102,7 +102,7 @@ static void rs64_reg_setup(struct op_counter_config *ctr, ...@@ -102,7 +102,7 @@ static void rs64_reg_setup(struct op_counter_config *ctr,
/* XXX setup user and kernel profiling */ /* XXX setup user and kernel profiling */
} }
static void rs64_cpu_setup(void *unused) static void rs64_cpu_setup(struct op_counter_config *ctr)
{ {
unsigned int mmcr0; unsigned int mmcr0;
......
...@@ -43,9 +43,6 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages, ...@@ -43,9 +43,6 @@ static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
u64 rc; u64 rc;
u64 tce, rpn; u64 tce, rpn;
index <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
while (npages--) { while (npages--) {
rpn = virt_to_abs(uaddr) >> TCE_SHIFT; rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
...@@ -75,9 +72,6 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) ...@@ -75,9 +72,6 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
{ {
u64 rc; u64 rc;
npages <<= TCE_PAGE_FACTOR;
index <<= TCE_PAGE_FACTOR;
while (npages--) { while (npages--) {
rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0); rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
if (rc) if (rc)
...@@ -136,10 +130,9 @@ void iommu_table_getparms_iSeries(unsigned long busno, ...@@ -136,10 +130,9 @@ void iommu_table_getparms_iSeries(unsigned long busno,
panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms); panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
/* itc_size is in pages worth of table, it_size is in # of entries */ /* itc_size is in pages worth of table, it_size is in # of entries */
tbl->it_size = ((parms->itc_size * TCE_PAGE_SIZE) / tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE;
TCE_ENTRY_SIZE) >> TCE_PAGE_FACTOR;
tbl->it_busno = parms->itc_busno; tbl->it_busno = parms->itc_busno;
tbl->it_offset = parms->itc_offset >> TCE_PAGE_FACTOR; tbl->it_offset = parms->itc_offset;
tbl->it_index = parms->itc_index; tbl->it_index = parms->itc_index;
tbl->it_blocksize = 1; tbl->it_blocksize = 1;
tbl->it_type = virtbus ? TCE_VB : TCE_PCI; tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
......
...@@ -57,9 +57,6 @@ static void tce_build_pSeries(struct iommu_table *tbl, long index, ...@@ -57,9 +57,6 @@ static void tce_build_pSeries(struct iommu_table *tbl, long index,
u64 *tcep; u64 *tcep;
u64 rpn; u64 rpn;
index <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
proto_tce = TCE_PCI_READ; // Read allowed proto_tce = TCE_PCI_READ; // Read allowed
if (direction != DMA_TO_DEVICE) if (direction != DMA_TO_DEVICE)
...@@ -82,9 +79,6 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages) ...@@ -82,9 +79,6 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
{ {
u64 *tcep; u64 *tcep;
npages <<= TCE_PAGE_FACTOR;
index <<= TCE_PAGE_FACTOR;
tcep = ((u64 *)tbl->it_base) + index; tcep = ((u64 *)tbl->it_base) + index;
while (npages--) while (npages--)
...@@ -95,7 +89,6 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index) ...@@ -95,7 +89,6 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
{ {
u64 *tcep; u64 *tcep;
index <<= TCE_PAGE_FACTOR;
tcep = ((u64 *)tbl->it_base) + index; tcep = ((u64 *)tbl->it_base) + index;
return *tcep; return *tcep;
...@@ -109,9 +102,6 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, ...@@ -109,9 +102,6 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
u64 proto_tce, tce; u64 proto_tce, tce;
u64 rpn; u64 rpn;
tcenum <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
proto_tce = TCE_PCI_READ; proto_tce = TCE_PCI_READ;
if (direction != DMA_TO_DEVICE) if (direction != DMA_TO_DEVICE)
...@@ -146,7 +136,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, ...@@ -146,7 +136,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
u64 rpn; u64 rpn;
long l, limit; long l, limit;
if (TCE_PAGE_FACTOR == 0 && npages == 1) if (npages == 1)
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction); direction);
...@@ -164,9 +154,6 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, ...@@ -164,9 +154,6 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
__get_cpu_var(tce_page) = tcep; __get_cpu_var(tce_page) = tcep;
} }
tcenum <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT; rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
proto_tce = TCE_PCI_READ; proto_tce = TCE_PCI_READ;
if (direction != DMA_TO_DEVICE) if (direction != DMA_TO_DEVICE)
...@@ -207,9 +194,6 @@ static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages ...@@ -207,9 +194,6 @@ static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages
{ {
u64 rc; u64 rc;
tcenum <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
while (npages--) { while (npages--) {
rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0); rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
...@@ -229,9 +213,6 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n ...@@ -229,9 +213,6 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
{ {
u64 rc; u64 rc;
tcenum <<= TCE_PAGE_FACTOR;
npages <<= TCE_PAGE_FACTOR;
rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages); rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
if (rc && printk_ratelimit()) { if (rc && printk_ratelimit()) {
...@@ -248,7 +229,6 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) ...@@ -248,7 +229,6 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
u64 rc; u64 rc;
unsigned long tce_ret; unsigned long tce_ret;
tcenum <<= TCE_PAGE_FACTOR;
rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret); rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
if (rc && printk_ratelimit()) { if (rc && printk_ratelimit()) {
...@@ -289,7 +269,7 @@ static void iommu_table_setparms(struct pci_controller *phb, ...@@ -289,7 +269,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
tbl->it_busno = phb->bus->number; tbl->it_busno = phb->bus->number;
/* Units of tce entries */ /* Units of tce entries */
tbl->it_offset = phb->dma_window_base_cur >> PAGE_SHIFT; tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT;
/* Test if we are going over 2GB of DMA space */ /* Test if we are going over 2GB of DMA space */
if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
...@@ -300,7 +280,7 @@ static void iommu_table_setparms(struct pci_controller *phb, ...@@ -300,7 +280,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
phb->dma_window_base_cur += phb->dma_window_size; phb->dma_window_base_cur += phb->dma_window_size;
/* Set the tce table size - measured in entries */ /* Set the tce table size - measured in entries */
tbl->it_size = phb->dma_window_size >> PAGE_SHIFT; tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT;
tbl->it_index = 0; tbl->it_index = 0;
tbl->it_blocksize = 16; tbl->it_blocksize = 16;
...@@ -325,8 +305,8 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb, ...@@ -325,8 +305,8 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
tbl->it_base = 0; tbl->it_base = 0;
tbl->it_blocksize = 16; tbl->it_blocksize = 16;
tbl->it_type = TCE_PCI; tbl->it_type = TCE_PCI;
tbl->it_offset = offset >> PAGE_SHIFT; tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
tbl->it_size = size >> PAGE_SHIFT; tbl->it_size = size >> IOMMU_PAGE_SHIFT;
} }
static void iommu_bus_setup_pSeries(struct pci_bus *bus) static void iommu_bus_setup_pSeries(struct pci_bus *bus)
...@@ -522,8 +502,6 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev) ...@@ -522,8 +502,6 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
const void *dma_window = NULL; const void *dma_window = NULL;
struct pci_dn *pci; struct pci_dn *pci;
DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, pci_name(dev));
/* dev setup for LPAR is a little tricky, since the device tree might /* dev setup for LPAR is a little tricky, since the device tree might
* contain the dma-window properties per-device and not neccesarily * contain the dma-window properties per-device and not neccesarily
* for the bus. So we need to search upwards in the tree until we * for the bus. So we need to search upwards in the tree until we
...@@ -532,6 +510,9 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev) ...@@ -532,6 +510,9 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev)
*/ */
dn = pci_device_to_OF_node(dev); dn = pci_device_to_OF_node(dev);
DBG("iommu_dev_setup_pSeriesLP, dev %p (%s) %s\n",
dev, pci_name(dev), dn->full_name);
for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table; for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
pdn = pdn->parent) { pdn = pdn->parent) {
dma_window = get_property(pdn, "ibm,dma-window", NULL); dma_window = get_property(pdn, "ibm,dma-window", NULL);
......
...@@ -72,7 +72,6 @@ ...@@ -72,7 +72,6 @@
#define DART_PAGE_SHIFT 12 #define DART_PAGE_SHIFT 12
#define DART_PAGE_SIZE (1 << DART_PAGE_SHIFT) #define DART_PAGE_SIZE (1 << DART_PAGE_SHIFT)
#define DART_PAGE_FACTOR (PAGE_SHIFT - DART_PAGE_SHIFT)
#endif /* _POWERPC_SYSDEV_DART_H */ #endif /* _POWERPC_SYSDEV_DART_H */
...@@ -156,9 +156,6 @@ static void dart_build(struct iommu_table *tbl, long index, ...@@ -156,9 +156,6 @@ static void dart_build(struct iommu_table *tbl, long index,
DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr);
index <<= DART_PAGE_FACTOR;
npages <<= DART_PAGE_FACTOR;
dp = ((unsigned int*)tbl->it_base) + index; dp = ((unsigned int*)tbl->it_base) + index;
/* On U3, all memory is contigous, so we can move this /* On U3, all memory is contigous, so we can move this
...@@ -199,9 +196,6 @@ static void dart_free(struct iommu_table *tbl, long index, long npages) ...@@ -199,9 +196,6 @@ static void dart_free(struct iommu_table *tbl, long index, long npages)
DBG("dart: free at: %lx, %lx\n", index, npages); DBG("dart: free at: %lx, %lx\n", index, npages);
index <<= DART_PAGE_FACTOR;
npages <<= DART_PAGE_FACTOR;
dp = ((unsigned int *)tbl->it_base) + index; dp = ((unsigned int *)tbl->it_base) + index;
while (npages--) while (npages--)
...@@ -281,7 +275,7 @@ static void iommu_table_dart_setup(void) ...@@ -281,7 +275,7 @@ static void iommu_table_dart_setup(void)
iommu_table_dart.it_busno = 0; iommu_table_dart.it_busno = 0;
iommu_table_dart.it_offset = 0; iommu_table_dart.it_offset = 0;
/* it_size is in number of entries */ /* it_size is in number of entries */
iommu_table_dart.it_size = (dart_tablesize / sizeof(u32)) >> DART_PAGE_FACTOR; iommu_table_dart.it_size = dart_tablesize / sizeof(u32);
/* Initialize the common IOMMU code */ /* Initialize the common IOMMU code */
iommu_table_dart.it_base = (unsigned long)dart_vbase; iommu_table_dart.it_base = (unsigned long)dart_vbase;
......
...@@ -122,8 +122,7 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input) ...@@ -122,8 +122,7 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
mcn_shift = QE_CR_MCN_NORMAL_SHIFT; mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
} }
out_be32(&qe_immr->cp.cecdr, out_be32(&qe_immr->cp.cecdr, cmd_input);
immrbar_virt_to_phys((void *)cmd_input));
out_be32(&qe_immr->cp.cecr, out_be32(&qe_immr->cp.cecr,
(cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32) (cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
mcn_protocol << mcn_shift)); mcn_protocol << mcn_shift));
......
...@@ -708,7 +708,7 @@ void single_step_exception(struct pt_regs *regs) ...@@ -708,7 +708,7 @@ void single_step_exception(struct pt_regs *regs)
void alignment_exception(struct pt_regs *regs) void alignment_exception(struct pt_regs *regs)
{ {
int fixed; int sig, code, fixed = 0;
fixed = fix_alignment(regs); fixed = fix_alignment(regs);
if (fixed == 1) { if (fixed == 1) {
...@@ -717,14 +717,16 @@ void alignment_exception(struct pt_regs *regs) ...@@ -717,14 +717,16 @@ void alignment_exception(struct pt_regs *regs)
return; return;
} }
if (fixed == -EFAULT) { if (fixed == -EFAULT) {
/* fixed == -EFAULT means the operand address was bad */ sig = SIGSEGV;
if (user_mode(regs)) code = SEGV_ACCERR;
_exception(SIGSEGV, regs, SEGV_ACCERR, regs->dar); } else {
else sig = SIGBUS;
bad_page_fault(regs, regs->dar, SIGSEGV); code = BUS_ADRALN;
return;
} }
_exception(SIGBUS, regs, BUS_ADRALN, regs->dar); if (user_mode(regs))
_exception(sig, regs, code, regs->dar);
else
bad_page_fault(regs, regs->dar, sig);
} }
void StackOverflow(struct pt_regs *regs) void StackOverflow(struct pt_regs *regs)
......
...@@ -157,7 +157,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, ...@@ -157,7 +157,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue)); out_le32(par->cmap_adr + 0xb4, (red << 16 | green << 8 | blue));
break; break;
case cmap_gxt2000: case cmap_gxt2000:
out_le32((unsigned __iomem *) par->cmap_adr + regno, out_le32(((unsigned __iomem *) par->cmap_adr) + regno,
(red << 16 | green << 8 | blue)); (red << 16 | green << 8 | blue));
break; break;
} }
...@@ -213,7 +213,7 @@ static int offb_blank(int blank, struct fb_info *info) ...@@ -213,7 +213,7 @@ static int offb_blank(int blank, struct fb_info *info)
out_le32(par->cmap_adr + 0xb4, 0); out_le32(par->cmap_adr + 0xb4, 0);
break; break;
case cmap_gxt2000: case cmap_gxt2000:
out_le32((unsigned __iomem *) par->cmap_adr + i, out_le32(((unsigned __iomem *) par->cmap_adr) + i,
0); 0);
break; break;
} }
...@@ -226,13 +226,23 @@ static int offb_blank(int blank, struct fb_info *info) ...@@ -226,13 +226,23 @@ static int offb_blank(int blank, struct fb_info *info)
static void __iomem *offb_map_reg(struct device_node *np, int index, static void __iomem *offb_map_reg(struct device_node *np, int index,
unsigned long offset, unsigned long size) unsigned long offset, unsigned long size)
{ {
struct resource r; const u32 *addrp;
u64 asize, taddr;
if (of_address_to_resource(np, index, &r)) unsigned int flags;
return 0;
if ((r.start + offset + size) > r.end) addrp = of_get_pci_address(np, index, &asize, &flags);
return 0; if (addrp == NULL)
return ioremap(r.start + offset, size); addrp = of_get_address(np, index, &asize, &flags);
if (addrp == NULL)
return NULL;
if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
return NULL;
if ((offset + size) > asize)
return NULL;
taddr = of_translate_address(np, addrp);
if (taddr == OF_BAD_ADDR)
return NULL;
return ioremap(taddr + offset, size);
} }
static void __init offb_init_fb(const char *name, const char *full_name, static void __init offb_init_fb(const char *name, const char *full_name,
...@@ -289,7 +299,6 @@ static void __init offb_init_fb(const char *name, const char *full_name, ...@@ -289,7 +299,6 @@ static void __init offb_init_fb(const char *name, const char *full_name,
par->cmap_type = cmap_unknown; par->cmap_type = cmap_unknown;
if (depth == 8) { if (depth == 8) {
/* Palette hacks disabled for now */
if (dp && !strncmp(name, "ATY,Rage128", 11)) { if (dp && !strncmp(name, "ATY,Rage128", 11)) {
par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff); par->cmap_adr = offb_map_reg(dp, 2, 0, 0x1fff);
if (par->cmap_adr) if (par->cmap_adr)
...@@ -313,7 +322,8 @@ static void __init offb_init_fb(const char *name, const char *full_name, ...@@ -313,7 +322,8 @@ static void __init offb_init_fb(const char *name, const char *full_name,
ioremap(base + 0x7ff000, 0x1000) + 0xcc0; ioremap(base + 0x7ff000, 0x1000) + 0xcc0;
par->cmap_data = par->cmap_adr + 1; par->cmap_data = par->cmap_adr + 1;
par->cmap_type = cmap_m64; par->cmap_type = cmap_m64;
} else if (dp && device_is_compatible(dp, "pci1014,b7")) { } else if (dp && (device_is_compatible(dp, "pci1014,b7") ||
device_is_compatible(dp, "pci1014,21c"))) {
par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000); par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000);
if (par->cmap_adr) if (par->cmap_adr)
par->cmap_type = cmap_gxt2000; par->cmap_type = cmap_gxt2000;
...@@ -433,7 +443,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) ...@@ -433,7 +443,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
pp = get_property(dp, "linux,bootx-linebytes", &len); pp = get_property(dp, "linux,bootx-linebytes", &len);
if (pp == NULL) if (pp == NULL)
pp = get_property(dp, "linebytes", &len); pp = get_property(dp, "linebytes", &len);
if (pp && len == sizeof(u32)) if (pp && len == sizeof(u32) && (*pp != 0xffffffffu))
pitch = *pp; pitch = *pp;
else else
pitch = width * ((depth + 7) / 8); pitch = width * ((depth + 7) / 8);
...@@ -496,7 +506,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node) ...@@ -496,7 +506,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
offb_init_fb(no_real_node ? "bootx" : dp->name, offb_init_fb(no_real_node ? "bootx" : dp->name,
no_real_node ? "display" : dp->full_name, no_real_node ? "display" : dp->full_name,
width, height, depth, pitch, address, width, height, depth, pitch, address,
no_real_node ? dp : NULL); no_real_node ? NULL : dp);
} }
} }
......
...@@ -14,7 +14,17 @@ struct task_struct; ...@@ -14,7 +14,17 @@ struct task_struct;
#ifdef __powerpc64__ #ifdef __powerpc64__
#include <asm/paca.h> #include <asm/paca.h>
#define current (get_paca()->__current) static inline struct task_struct *get_current(void)
{
struct task_struct *task;
__asm__ __volatile__("ld %0,%1(13)"
: "=r" (task)
: "i" (offsetof(struct paca_struct, __current)));
return task;
}
#define current get_current()
#else #else
......
...@@ -163,8 +163,11 @@ extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count); ...@@ -163,8 +163,11 @@ extern void _outsl_ns(volatile u32 __iomem *port, const void *buf, long count);
static inline void mmiowb(void) static inline void mmiowb(void)
{ {
__asm__ __volatile__ ("sync" : : : "memory"); unsigned long tmp;
get_paca()->io_sync = 0;
__asm__ __volatile__("sync; li %0,0; stb %0,%1(13)"
: "=&r" (tmp) : "i" (offsetof(struct paca_struct, io_sync))
: "memory");
} }
/* /*
......
...@@ -22,17 +22,35 @@ ...@@ -22,17 +22,35 @@
#define _ASM_IOMMU_H #define _ASM_IOMMU_H
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/types.h> #include <linux/compiler.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <asm/types.h>
#include <asm/bitops.h>
#define IOMMU_PAGE_SHIFT 12
#define IOMMU_PAGE_SIZE (ASM_CONST(1) << IOMMU_PAGE_SHIFT)
#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1))
#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE)
#ifndef __ASSEMBLY__
/* Pure 2^n version of get_order */
static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
{
return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1;
}
#endif /* __ASSEMBLY__ */
/* /*
* IOMAP_MAX_ORDER defines the largest contiguous block * IOMAP_MAX_ORDER defines the largest contiguous block
* of dma space we can get. IOMAP_MAX_ORDER = 13 * of dma space we can get. IOMAP_MAX_ORDER = 13
* allows up to 2**12 pages (4096 * 4096) = 16 MB * allows up to 2**12 pages (4096 * 4096) = 16 MB
*/ */
#define IOMAP_MAX_ORDER 13 #define IOMAP_MAX_ORDER 13
struct iommu_table { struct iommu_table {
unsigned long it_busno; /* Bus number this table belongs to */ unsigned long it_busno; /* Bus number this table belongs to */
......
...@@ -42,7 +42,7 @@ struct op_powerpc_model { ...@@ -42,7 +42,7 @@ struct op_powerpc_model {
void (*reg_setup) (struct op_counter_config *, void (*reg_setup) (struct op_counter_config *,
struct op_system_config *, struct op_system_config *,
int num_counters); int num_counters);
void (*cpu_setup) (void *); void (*cpu_setup) (struct op_counter_config *);
void (*start) (struct op_counter_config *); void (*start) (struct op_counter_config *);
void (*stop) (void); void (*stop) (void);
void (*handle_interrupt) (struct pt_regs *, void (*handle_interrupt) (struct pt_regs *,
...@@ -121,7 +121,90 @@ static inline void ctr_write(unsigned int i, unsigned int val) ...@@ -121,7 +121,90 @@ static inline void ctr_write(unsigned int i, unsigned int val)
break; break;
} }
} }
#endif /* !CONFIG_FSL_BOOKE */ #else /* CONFIG_FSL_BOOKE */
static inline u32 get_pmlca(int ctr)
{
u32 pmlca;
switch (ctr) {
case 0:
pmlca = mfpmr(PMRN_PMLCA0);
break;
case 1:
pmlca = mfpmr(PMRN_PMLCA1);
break;
case 2:
pmlca = mfpmr(PMRN_PMLCA2);
break;
case 3:
pmlca = mfpmr(PMRN_PMLCA3);
break;
default:
panic("Bad ctr number\n");
}
return pmlca;
}
static inline void set_pmlca(int ctr, u32 pmlca)
{
switch (ctr) {
case 0:
mtpmr(PMRN_PMLCA0, pmlca);
break;
case 1:
mtpmr(PMRN_PMLCA1, pmlca);
break;
case 2:
mtpmr(PMRN_PMLCA2, pmlca);
break;
case 3:
mtpmr(PMRN_PMLCA3, pmlca);
break;
default:
panic("Bad ctr number\n");
}
}
static inline unsigned int ctr_read(unsigned int i)
{
switch(i) {
case 0:
return mfpmr(PMRN_PMC0);
case 1:
return mfpmr(PMRN_PMC1);
case 2:
return mfpmr(PMRN_PMC2);
case 3:
return mfpmr(PMRN_PMC3);
default:
return 0;
}
}
static inline void ctr_write(unsigned int i, unsigned int val)
{
switch(i) {
case 0:
mtpmr(PMRN_PMC0, val);
break;
case 1:
mtpmr(PMRN_PMC1, val);
break;
case 2:
mtpmr(PMRN_PMC2, val);
break;
case 3:
mtpmr(PMRN_PMC3, val);
break;
default:
break;
}
}
#endif /* CONFIG_FSL_BOOKE */
extern void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth); extern void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth);
......
...@@ -32,18 +32,5 @@ void release_pmc_hardware(void); ...@@ -32,18 +32,5 @@ void release_pmc_hardware(void);
void power4_enable_pmcs(void); void power4_enable_pmcs(void);
#endif #endif
#ifdef CONFIG_FSL_BOOKE
void init_pmc_stop(int ctr);
void set_pmc_event(int ctr, int event);
void set_pmc_user_kernel(int ctr, int user, int kernel);
void set_pmc_marked(int ctr, int mark0, int mark1);
void pmc_start_ctr(int ctr, int enable);
void pmc_start_ctrs(int enable);
void pmc_stop_ctrs(void);
void dump_pmcs(void);
extern struct op_powerpc_model op_model_fsl_booke;
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _POWERPC_PMC_H */ #endif /* _POWERPC_PMC_H */
...@@ -25,8 +25,8 @@ ...@@ -25,8 +25,8 @@
* *
* We have to use the sync instructions for mb(), since lwsync doesn't * We have to use the sync instructions for mb(), since lwsync doesn't
* order loads with respect to previous stores. Lwsync is fine for * order loads with respect to previous stores. Lwsync is fine for
* rmb(), though. Note that lwsync is interpreted as sync by * rmb(), though. Note that rmb() actually uses a sync on 32-bit
* 32-bit and older 64-bit CPUs. * architectures.
* *
* For wmb(), we use sync since wmb is used in drivers to order * For wmb(), we use sync since wmb is used in drivers to order
* stores to system memory with respect to writes to the device. * stores to system memory with respect to writes to the device.
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
* SMP since it is only used to order updates to system memory. * SMP since it is only used to order updates to system memory.
*/ */
#define mb() __asm__ __volatile__ ("sync" : : : "memory") #define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") #define rmb() __asm__ __volatile__ (__stringify(LWSYNC) : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory") #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
#define read_barrier_depends() do { } while(0) #define read_barrier_depends() do { } while(0)
......
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
#define _ASM_POWERPC_TCE_H #define _ASM_POWERPC_TCE_H
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/iommu.h>
/* /*
* Tces come in two formats, one for the virtual bus and a different * Tces come in two formats, one for the virtual bus and a different
* format for PCI * format for PCI
...@@ -33,7 +35,6 @@ ...@@ -33,7 +35,6 @@
#define TCE_SHIFT 12 #define TCE_SHIFT 12
#define TCE_PAGE_SIZE (1 << TCE_SHIFT) #define TCE_PAGE_SIZE (1 << TCE_SHIFT)
#define TCE_PAGE_FACTOR (PAGE_SHIFT - TCE_SHIFT)
#define TCE_ENTRY_SIZE 8 /* each TCE is 64 bits */ #define TCE_ENTRY_SIZE 8 /* each TCE is 64 bits */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment