Commit 9850b6c6 authored by Viresh Kumar's avatar Viresh Kumar

arch: powerpc: Remove oprofile

The previous commit already disabled building oprofile, lets remove the
oprofile directory now.
Suggested-by: default avatarChristoph Hellwig <hch@infradead.org>
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Acked-by: default avatarRobert Richter <rric@kernel.org>
Acked-by: default avatarWilliam Cohen <wcohen@redhat.com>
Acked-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 7a3c90df
# SPDX-License-Identifier: GPL-2.0
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
obj-$(CONFIG_OPROFILE) += oprofile.o
DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
oprof.o cpu_buffer.o buffer_sync.o \
event_buffer.o oprofile_files.o \
oprofilefs.o oprofile_stats.o \
timer_int.o )
oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
oprofile-$(CONFIG_OPROFILE_CELL) += op_model_cell.o \
cell/spu_profiler.o cell/vma_map.o \
cell/spu_task_sync.o
oprofile-$(CONFIG_PPC_BOOK3S_64) += op_model_power4.o op_model_pa6t.o
oprofile-$(CONFIG_FSL_EMB_PERFMON) += op_model_fsl_emb.o
oprofile-$(CONFIG_PPC_BOOK3S_32) += op_model_7450.o
// SPDX-License-Identifier: GPL-2.0-or-later
/**
* Copyright (C) 2005 Brian Rogan <bcr6@cornell.edu>, IBM
*
**/
#include <linux/time.h>
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
#include <asm/oprofile_impl.h>
#define STACK_SP(STACK) *(STACK)
#define STACK_LR64(STACK) *((unsigned long *)(STACK) + 2)
#define STACK_LR32(STACK) *((unsigned int *)(STACK) + 1)
#ifdef CONFIG_PPC64
#define STACK_LR(STACK) STACK_LR64(STACK)
#else
#define STACK_LR(STACK) STACK_LR32(STACK)
#endif
static unsigned int user_getsp32(unsigned int sp, int is_first)
{
unsigned int stack_frame[2];
void __user *p = compat_ptr(sp);
/*
* The most likely reason for this is that we returned -EFAULT,
* which means that we've done all that we can do from
* interrupt context.
*/
if (copy_from_user_nofault(stack_frame, (void __user *)p,
sizeof(stack_frame)))
return 0;
if (!is_first)
oprofile_add_trace(STACK_LR32(stack_frame));
/*
* We do not enforce increasing stack addresses here because
* we may transition to a different stack, eg a signal handler.
*/
return STACK_SP(stack_frame);
}
#ifdef CONFIG_PPC64
static unsigned long user_getsp64(unsigned long sp, int is_first)
{
unsigned long stack_frame[3];
if (copy_from_user_nofault(stack_frame, (void __user *)sp,
sizeof(stack_frame)))
return 0;
if (!is_first)
oprofile_add_trace(STACK_LR64(stack_frame));
return STACK_SP(stack_frame);
}
#endif
static unsigned long kernel_getsp(unsigned long sp, int is_first)
{
unsigned long *stack_frame = (unsigned long *)sp;
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return 0;
if (!is_first)
oprofile_add_trace(STACK_LR(stack_frame));
/*
* We do not enforce increasing stack addresses here because
* we might be transitioning from an interrupt stack to a kernel
* stack. validate_sp() is designed to understand this, so just
* use it.
*/
return STACK_SP(stack_frame);
}
void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
{
unsigned long sp = regs->gpr[1];
int first_frame = 1;
/* We ditch the top stackframe so need to loop through an extra time */
depth += 1;
if (!user_mode(regs)) {
while (depth--) {
sp = kernel_getsp(sp, first_frame);
if (!sp)
break;
first_frame = 0;
}
} else {
#ifdef CONFIG_PPC64
if (!is_32bit_task()) {
while (depth--) {
sp = user_getsp64(sp, first_frame);
if (!sp)
break;
first_frame = 0;
}
return;
}
#endif
while (depth--) {
sp = user_getsp32(sp, first_frame);
if (!sp)
break;
first_frame = 0;
}
}
}
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Cell Broadband Engine OProfile Support
*
* (C) Copyright IBM Corporation 2006
*
* Author: Maynard Johnson <maynardj@us.ibm.com>
*/
#ifndef PR_UTIL_H
#define PR_UTIL_H
#include <linux/cpumask.h>
#include <linux/oprofile.h>
#include <asm/cell-pmu.h>
#include <asm/cell-regs.h>
#include <asm/spu.h>
/* Defines used for sync_start */
#define SKIP_GENERIC_SYNC 0
#define SYNC_START_ERROR -1
#define DO_GENERIC_SYNC 1
#define SPUS_PER_NODE 8
#define DEFAULT_TIMER_EXPIRE (HZ / 10)
extern struct delayed_work spu_work;
extern int spu_prof_running;
#define TRACE_ARRAY_SIZE 1024
extern spinlock_t oprof_spu_smpl_arry_lck;
struct spu_overlay_info { /* map of sections within an SPU overlay */
unsigned int vma; /* SPU virtual memory address from elf */
unsigned int size; /* size of section from elf */
unsigned int offset; /* offset of section into elf file */
unsigned int buf;
};
struct vma_to_fileoffset_map { /* map of sections within an SPU program */
struct vma_to_fileoffset_map *next; /* list pointer */
unsigned int vma; /* SPU virtual memory address from elf */
unsigned int size; /* size of section from elf */
unsigned int offset; /* offset of section into elf file */
unsigned int guard_ptr;
unsigned int guard_val;
/*
* The guard pointer is an entry in the _ovly_buf_table,
* computed using ovly.buf as the index into the table. Since
* ovly.buf values begin at '1' to reference the first (or 0th)
* entry in the _ovly_buf_table, the computation subtracts 1
* from ovly.buf.
* The guard value is stored in the _ovly_buf_table entry and
* is an index (starting at 1) back to the _ovly_table entry
* that is pointing at this _ovly_buf_table entry. So, for
* example, for an overlay scenario with one overlay segment
* and two overlay sections:
* - Section 1 points to the first entry of the
* _ovly_buf_table, which contains a guard value
* of '1', referencing the first (index=0) entry of
* _ovly_table.
* - Section 2 points to the second entry of the
* _ovly_buf_table, which contains a guard value
* of '2', referencing the second (index=1) entry of
* _ovly_table.
*/
};
struct spu_buffer {
int last_guard_val;
int ctx_sw_seen;
unsigned long *buff;
unsigned int head, tail;
};
/* The three functions below are for maintaining and accessing
* the vma-to-fileoffset map.
*/
struct vma_to_fileoffset_map *create_vma_map(const struct spu *spu,
unsigned long objectid);
unsigned int vma_map_lookup(struct vma_to_fileoffset_map *map,
unsigned int vma, const struct spu *aSpu,
int *grd_val);
void vma_map_free(struct vma_to_fileoffset_map *map);
/*
* Entry point for SPU profiling.
* cycles_reset is the SPU_CYCLES count value specified by the user.
*/
int start_spu_profiling_cycles(unsigned int cycles_reset);
void start_spu_profiling_events(void);
void stop_spu_profiling_cycles(void);
void stop_spu_profiling_events(void);
/* add the necessary profiling hooks */
int spu_sync_start(void);
/* remove the hooks */
int spu_sync_stop(void);
/* Record SPU program counter samples to the oprofile event buffer. */
void spu_sync_buffer(int spu_num, unsigned int *samples,
int num_samples);
void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset);
#endif /* PR_UTIL_H */
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cell Broadband Engine OProfile Support
*
* (C) Copyright IBM Corporation 2006
*
* Authors: Maynard Johnson <maynardj@us.ibm.com>
* Carl Love <carll@us.ibm.com>
*/
#include <linux/hrtimer.h>
#include <linux/smp.h>
#include <linux/slab.h>
#include <asm/cell-pmu.h>
#include <asm/time.h>
#include "pr_util.h"
#define SCALE_SHIFT 14
static u32 *samples;
/* spu_prof_running is a flag used to indicate if spu profiling is enabled
* or not. It is set by the routines start_spu_profiling_cycles() and
* start_spu_profiling_events(). The flag is cleared by the routines
* stop_spu_profiling_cycles() and stop_spu_profiling_events(). These
* routines are called via global_start() and global_stop() which are called in
* op_powerpc_start() and op_powerpc_stop(). These routines are called once
* per system as a result of the user starting/stopping oprofile. Hence, only
* one CPU per user at a time will be changing the value of spu_prof_running.
* In general, OProfile does not protect against multiple users trying to run
* OProfile at a time.
*/
int spu_prof_running;
static unsigned int profiling_interval;
#define NUM_SPU_BITS_TRBUF 16
#define SPUS_PER_TB_ENTRY 4
#define SPU_PC_MASK 0xFFFF
DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
static unsigned long oprof_spu_smpl_arry_lck_flags;
void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
{
unsigned long ns_per_cyc;
if (!freq_khz)
freq_khz = ppc_proc_freq/1000;
/* To calculate a timeout in nanoseconds, the basic
* formula is ns = cycles_reset * (NSEC_PER_SEC / cpu frequency).
* To avoid floating point math, we use the scale math
* technique as described in linux/jiffies.h. We use
* a scale factor of SCALE_SHIFT, which provides 4 decimal places
* of precision. This is close enough for the purpose at hand.
*
* The value of the timeout should be small enough that the hw
* trace buffer will not get more than about 1/3 full for the
* maximum user specified (the LFSR value) hw sampling frequency.
* This is to ensure the trace buffer will never fill even if the
* kernel thread scheduling varies under a heavy system load.
*/
ns_per_cyc = (USEC_PER_SEC << SCALE_SHIFT)/freq_khz;
profiling_interval = (ns_per_cyc * cycles_reset) >> SCALE_SHIFT;
}
/*
* Extract SPU PC from trace buffer entry
*/
static void spu_pc_extract(int cpu, int entry)
{
/* the trace buffer is 128 bits */
u64 trace_buffer[2];
u64 spu_mask;
int spu;
spu_mask = SPU_PC_MASK;
/* Each SPU PC is 16 bits; hence, four spus in each of
* the two 64-bit buffer entries that make up the
* 128-bit trace_buffer entry. Process two 64-bit values
* simultaneously.
* trace[0] SPU PC contents are: 0 1 2 3
* trace[1] SPU PC contents are: 4 5 6 7
*/
cbe_read_trace_buffer(cpu, trace_buffer);
for (spu = SPUS_PER_TB_ENTRY-1; spu >= 0; spu--) {
/* spu PC trace entry is upper 16 bits of the
* 18 bit SPU program counter
*/
samples[spu * TRACE_ARRAY_SIZE + entry]
= (spu_mask & trace_buffer[0]) << 2;
samples[(spu + SPUS_PER_TB_ENTRY) * TRACE_ARRAY_SIZE + entry]
= (spu_mask & trace_buffer[1]) << 2;
trace_buffer[0] = trace_buffer[0] >> NUM_SPU_BITS_TRBUF;
trace_buffer[1] = trace_buffer[1] >> NUM_SPU_BITS_TRBUF;
}
}
static int cell_spu_pc_collection(int cpu)
{
u32 trace_addr;
int entry;
/* process the collected SPU PC for the node */
entry = 0;
trace_addr = cbe_read_pm(cpu, trace_address);
while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
/* there is data in the trace buffer to process */
spu_pc_extract(cpu, entry);
entry++;
if (entry >= TRACE_ARRAY_SIZE)
/* spu_samples is full */
break;
trace_addr = cbe_read_pm(cpu, trace_address);
}
return entry;
}
static enum hrtimer_restart profile_spus(struct hrtimer *timer)
{
ktime_t kt;
int cpu, node, k, num_samples, spu_num;
if (!spu_prof_running)
goto stop;
for_each_online_cpu(cpu) {
if (cbe_get_hw_thread_id(cpu))
continue;
node = cbe_cpu_to_node(cpu);
/* There should only be one kernel thread at a time processing
* the samples. In the very unlikely case that the processing
* is taking a very long time and multiple kernel threads are
* started to process the samples. Make sure only one kernel
* thread is working on the samples array at a time. The
* sample array must be loaded and then processed for a given
* cpu. The sample array is not per cpu.
*/
spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
oprof_spu_smpl_arry_lck_flags);
num_samples = cell_spu_pc_collection(cpu);
if (num_samples == 0) {
spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
oprof_spu_smpl_arry_lck_flags);
continue;
}
for (k = 0; k < SPUS_PER_NODE; k++) {
spu_num = k + (node * SPUS_PER_NODE);
spu_sync_buffer(spu_num,
samples + (k * TRACE_ARRAY_SIZE),
num_samples);
}
spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
oprof_spu_smpl_arry_lck_flags);
}
smp_wmb(); /* insure spu event buffer updates are written */
/* don't want events intermingled... */
kt = profiling_interval;
if (!spu_prof_running)
goto stop;
hrtimer_forward(timer, timer->base->get_time(), kt);
return HRTIMER_RESTART;
stop:
printk(KERN_INFO "SPU_PROF: spu-prof timer ending\n");
return HRTIMER_NORESTART;
}
static struct hrtimer timer;
/*
* Entry point for SPU cycle profiling.
* NOTE: SPU profiling is done system-wide, not per-CPU.
*
* cycles_reset is the count value specified by the user when
* setting up OProfile to count SPU_CYCLES.
*/
int start_spu_profiling_cycles(unsigned int cycles_reset)
{
ktime_t kt;
pr_debug("timer resolution: %lu\n", TICK_NSEC);
kt = profiling_interval;
hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_set_expires(&timer, kt);
timer.function = profile_spus;
/* Allocate arrays for collecting SPU PC samples */
samples = kcalloc(SPUS_PER_NODE * TRACE_ARRAY_SIZE, sizeof(u32),
GFP_KERNEL);
if (!samples)
return -ENOMEM;
spu_prof_running = 1;
hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
return 0;
}
/*
* Entry point for SPU event profiling.
* NOTE: SPU profiling is done system-wide, not per-CPU.
*
* cycles_reset is the count value specified by the user when
* setting up OProfile to count SPU_CYCLES.
*/
void start_spu_profiling_events(void)
{
spu_prof_running = 1;
schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
return;
}
void stop_spu_profiling_cycles(void)
{
spu_prof_running = 0;
hrtimer_cancel(&timer);
kfree(samples);
pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n");
}
void stop_spu_profiling_events(void)
{
spu_prof_running = 0;
}
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cell Broadband Engine OProfile Support
*
* (C) Copyright IBM Corporation 2006
*
* Author: Maynard Johnson <maynardj@us.ibm.com>
*/
/* The code in this source file is responsible for generating
* vma-to-fileOffset maps for both overlay and non-overlay SPU
* applications.
*/
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/elf.h>
#include <linux/slab.h>
#include "pr_util.h"
void vma_map_free(struct vma_to_fileoffset_map *map)
{
while (map) {
struct vma_to_fileoffset_map *next = map->next;
kfree(map);
map = next;
}
}
unsigned int
vma_map_lookup(struct vma_to_fileoffset_map *map, unsigned int vma,
const struct spu *aSpu, int *grd_val)
{
/*
* Default the offset to the physical address + a flag value.
* Addresses of dynamically generated code can't be found in the vma
* map. For those addresses the flagged value will be sent on to
* the user space tools so they can be reported rather than just
* thrown away.
*/
u32 offset = 0x10000000 + vma;
u32 ovly_grd;
for (; map; map = map->next) {
if (vma < map->vma || vma >= map->vma + map->size)
continue;
if (map->guard_ptr) {
ovly_grd = *(u32 *)(aSpu->local_store + map->guard_ptr);
if (ovly_grd != map->guard_val)
continue;
*grd_val = ovly_grd;
}
offset = vma - map->vma + map->offset;
break;
}
return offset;
}
static struct vma_to_fileoffset_map *
vma_map_add(struct vma_to_fileoffset_map *map, unsigned int vma,
unsigned int size, unsigned int offset, unsigned int guard_ptr,
unsigned int guard_val)
{
struct vma_to_fileoffset_map *new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new) {
printk(KERN_ERR "SPU_PROF: %s, line %d: malloc failed\n",
__func__, __LINE__);
vma_map_free(map);
return NULL;
}
new->next = map;
new->vma = vma;
new->size = size;
new->offset = offset;
new->guard_ptr = guard_ptr;
new->guard_val = guard_val;
return new;
}
/* Parse SPE ELF header and generate a list of vma_maps.
* A pointer to the first vma_map in the generated list
* of vma_maps is returned. */
struct vma_to_fileoffset_map *create_vma_map(const struct spu *aSpu,
unsigned long __spu_elf_start)
{
static const unsigned char expected[EI_PAD] = {
[EI_MAG0] = ELFMAG0,
[EI_MAG1] = ELFMAG1,
[EI_MAG2] = ELFMAG2,
[EI_MAG3] = ELFMAG3,
[EI_CLASS] = ELFCLASS32,
[EI_DATA] = ELFDATA2MSB,
[EI_VERSION] = EV_CURRENT,
[EI_OSABI] = ELFOSABI_NONE
};
int grd_val;
struct vma_to_fileoffset_map *map = NULL;
void __user *spu_elf_start = (void __user *)__spu_elf_start;
struct spu_overlay_info ovly;
unsigned int overlay_tbl_offset = -1;
Elf32_Phdr __user *phdr_start;
Elf32_Shdr __user *shdr_start;
Elf32_Ehdr ehdr;
Elf32_Phdr phdr;
Elf32_Shdr shdr, shdr_str;
Elf32_Sym sym;
int i, j;
char name[32];
unsigned int ovly_table_sym = 0;
unsigned int ovly_buf_table_sym = 0;
unsigned int ovly_table_end_sym = 0;
unsigned int ovly_buf_table_end_sym = 0;
struct spu_overlay_info __user *ovly_table;
unsigned int n_ovlys;
/* Get and validate ELF header. */
if (copy_from_user(&ehdr, spu_elf_start, sizeof (ehdr)))
goto fail;
if (memcmp(ehdr.e_ident, expected, EI_PAD) != 0) {
printk(KERN_ERR "SPU_PROF: "
"%s, line %d: Unexpected e_ident parsing SPU ELF\n",
__func__, __LINE__);
goto fail;
}
if (ehdr.e_machine != EM_SPU) {
printk(KERN_ERR "SPU_PROF: "
"%s, line %d: Unexpected e_machine parsing SPU ELF\n",
__func__, __LINE__);
goto fail;
}
if (ehdr.e_type != ET_EXEC) {
printk(KERN_ERR "SPU_PROF: "
"%s, line %d: Unexpected e_type parsing SPU ELF\n",
__func__, __LINE__);
goto fail;
}
phdr_start = spu_elf_start + ehdr.e_phoff;
shdr_start = spu_elf_start + ehdr.e_shoff;
/* Traverse program headers. */
for (i = 0; i < ehdr.e_phnum; i++) {
if (copy_from_user(&phdr, phdr_start + i, sizeof(phdr)))
goto fail;
if (phdr.p_type != PT_LOAD)
continue;
if (phdr.p_flags & (1 << 27))
continue;
map = vma_map_add(map, phdr.p_vaddr, phdr.p_memsz,
phdr.p_offset, 0, 0);
if (!map)
goto fail;
}
pr_debug("SPU_PROF: Created non-overlay maps\n");
/* Traverse section table and search for overlay-related symbols. */
for (i = 0; i < ehdr.e_shnum; i++) {
if (copy_from_user(&shdr, shdr_start + i, sizeof(shdr)))
goto fail;
if (shdr.sh_type != SHT_SYMTAB)
continue;
if (shdr.sh_entsize != sizeof (sym))
continue;
if (copy_from_user(&shdr_str,
shdr_start + shdr.sh_link,
sizeof(shdr)))
goto fail;
if (shdr_str.sh_type != SHT_STRTAB)
goto fail;
for (j = 0; j < shdr.sh_size / sizeof (sym); j++) {
if (copy_from_user(&sym, spu_elf_start +
shdr.sh_offset +
j * sizeof (sym),
sizeof (sym)))
goto fail;
if (copy_from_user(name,
spu_elf_start + shdr_str.sh_offset +
sym.st_name,
20))
goto fail;
if (memcmp(name, "_ovly_table", 12) == 0)
ovly_table_sym = sym.st_value;
if (memcmp(name, "_ovly_buf_table", 16) == 0)
ovly_buf_table_sym = sym.st_value;
if (memcmp(name, "_ovly_table_end", 16) == 0)
ovly_table_end_sym = sym.st_value;
if (memcmp(name, "_ovly_buf_table_end", 20) == 0)
ovly_buf_table_end_sym = sym.st_value;
}
}
/* If we don't have overlays, we're done. */
if (ovly_table_sym == 0 || ovly_buf_table_sym == 0
|| ovly_table_end_sym == 0 || ovly_buf_table_end_sym == 0) {
pr_debug("SPU_PROF: No overlay table found\n");
goto out;
} else {
pr_debug("SPU_PROF: Overlay table found\n");
}
/* The _ovly_table symbol represents a table with one entry
* per overlay section. The _ovly_buf_table symbol represents
* a table with one entry per overlay region.
* The struct spu_overlay_info gives the structure of the _ovly_table
* entries. The structure of _ovly_table_buf is simply one
* u32 word per entry.
*/
overlay_tbl_offset = vma_map_lookup(map, ovly_table_sym,
aSpu, &grd_val);
if (overlay_tbl_offset > 0x10000000) {
printk(KERN_ERR "SPU_PROF: "
"%s, line %d: Error finding SPU overlay table\n",
__func__, __LINE__);
goto fail;
}
ovly_table = spu_elf_start + overlay_tbl_offset;
n_ovlys = (ovly_table_end_sym -
ovly_table_sym) / sizeof (ovly);
/* Traverse overlay table. */
for (i = 0; i < n_ovlys; i++) {
if (copy_from_user(&ovly, ovly_table + i, sizeof (ovly)))
goto fail;
/* The ovly.vma/size/offset arguments are analogous to the same
* arguments used above for non-overlay maps. The final two
* args are referred to as the guard pointer and the guard
* value.
* The guard pointer is an entry in the _ovly_buf_table,
* computed using ovly.buf as the index into the table. Since
* ovly.buf values begin at '1' to reference the first (or 0th)
* entry in the _ovly_buf_table, the computation subtracts 1
* from ovly.buf.
* The guard value is stored in the _ovly_buf_table entry and
* is an index (starting at 1) back to the _ovly_table entry
* that is pointing at this _ovly_buf_table entry. So, for
* example, for an overlay scenario with one overlay segment
* and two overlay sections:
* - Section 1 points to the first entry of the
* _ovly_buf_table, which contains a guard value
* of '1', referencing the first (index=0) entry of
* _ovly_table.
* - Section 2 points to the second entry of the
* _ovly_buf_table, which contains a guard value
* of '2', referencing the second (index=1) entry of
* _ovly_table.
*/
map = vma_map_add(map, ovly.vma, ovly.size, ovly.offset,
ovly_buf_table_sym + (ovly.buf-1) * 4, i+1);
if (!map)
goto fail;
}
goto out;
fail:
map = NULL;
out:
return map;
}
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PPC 64 oprofile support:
* Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
* PPC 32 oprofile support: (based on PPC 64 support)
* Copyright (C) Freescale Semiconductor, Inc 2004
* Author: Andy Fleming
*
* Based on alpha version.
*/
#include <linux/oprofile.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <asm/ptrace.h>
#include <asm/pmc.h>
#include <asm/cputable.h>
#include <asm/oprofile_impl.h>
#include <asm/firmware.h>
static struct op_powerpc_model *model;
static struct op_counter_config ctr[OP_MAX_COUNTER];
static struct op_system_config sys;
static int op_per_cpu_rc;
static void op_handle_interrupt(struct pt_regs *regs)
{
model->handle_interrupt(regs, ctr);
}
static void op_powerpc_cpu_setup(void *dummy)
{
int ret;
ret = model->cpu_setup(ctr);
if (ret != 0)
op_per_cpu_rc = ret;
}
static int op_powerpc_setup(void)
{
int err;
op_per_cpu_rc = 0;
/* Grab the hardware */
err = reserve_pmc_hardware(op_handle_interrupt);
if (err)
return err;
/* Pre-compute the values to stuff in the hardware registers. */
op_per_cpu_rc = model->reg_setup(ctr, &sys, model->num_counters);
if (op_per_cpu_rc)
goto out;
/* Configure the registers on all cpus. If an error occurs on one
* of the cpus, op_per_cpu_rc will be set to the error */
on_each_cpu(op_powerpc_cpu_setup, NULL, 1);
out: if (op_per_cpu_rc) {
/* error on setup release the performance counter hardware */
release_pmc_hardware();
}
return op_per_cpu_rc;
}
static void op_powerpc_shutdown(void)
{
release_pmc_hardware();
}
static void op_powerpc_cpu_start(void *dummy)
{
/* If any of the cpus have return an error, set the
* global flag to the error so it can be returned
* to the generic OProfile caller.
*/
int ret;
ret = model->start(ctr);
if (ret != 0)
op_per_cpu_rc = ret;
}
static int op_powerpc_start(void)
{
op_per_cpu_rc = 0;
if (model->global_start)
return model->global_start(ctr);
if (model->start) {
on_each_cpu(op_powerpc_cpu_start, NULL, 1);
return op_per_cpu_rc;
}
return -EIO; /* No start function is defined for this
power architecture */
}
static inline void op_powerpc_cpu_stop(void *dummy)
{
model->stop();
}
static void op_powerpc_stop(void)
{
if (model->stop)
on_each_cpu(op_powerpc_cpu_stop, NULL, 1);
if (model->global_stop)
model->global_stop();
}
static int op_powerpc_create_files(struct dentry *root)
{
int i;
#ifdef CONFIG_PPC64
/*
* There is one mmcr0, mmcr1 and mmcra for setting the events for
* all of the counters.
*/
oprofilefs_create_ulong(root, "mmcr0", &sys.mmcr0);
oprofilefs_create_ulong(root, "mmcr1", &sys.mmcr1);
oprofilefs_create_ulong(root, "mmcra", &sys.mmcra);
#ifdef CONFIG_OPROFILE_CELL
/* create a file the user tool can check to see what level of profiling
* support exits with this kernel. Initialize bit mask to indicate
* what support the kernel has:
* bit 0 - Supports SPU event profiling in addition to PPU
* event and cycles; and SPU cycle profiling
* bits 1-31 - Currently unused.
*
* If the file does not exist, then the kernel only supports SPU
* cycle profiling, PPU event and cycle profiling.
*/
oprofilefs_create_ulong(root, "cell_support", &sys.cell_support);
sys.cell_support = 0x1; /* Note, the user OProfile tool must check
* that this bit is set before attempting to
* user SPU event profiling. Older kernels
* will not have this file, hence the user
* tool is not allowed to do SPU event
* profiling on older kernels. Older kernels
* will accept SPU events but collected data
* is garbage.
*/
#endif
#endif
for (i = 0; i < model->num_counters; ++i) {
struct dentry *dir;
char buf[4];
snprintf(buf, sizeof buf, "%d", i);
dir = oprofilefs_mkdir(root, buf);
oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
oprofilefs_create_ulong(dir, "event", &ctr[i].event);
oprofilefs_create_ulong(dir, "count", &ctr[i].count);
/*
* Classic PowerPC doesn't support per-counter
* control like this, but the options are
* expected, so they remain. For Freescale
* Book-E style performance monitors, we do
* support them.
*/
oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
oprofilefs_create_ulong(dir, "user", &ctr[i].user);
oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
}
oprofilefs_create_ulong(root, "enable_kernel", &sys.enable_kernel);
oprofilefs_create_ulong(root, "enable_user", &sys.enable_user);
/* Default to tracing both kernel and user */
sys.enable_kernel = 1;
sys.enable_user = 1;
return 0;
}
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
if (!cur_cpu_spec->oprofile_cpu_type)
return -ENODEV;
switch (cur_cpu_spec->oprofile_type) {
#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_OPROFILE_CELL
case PPC_OPROFILE_CELL:
if (firmware_has_feature(FW_FEATURE_LPAR))
return -ENODEV;
model = &op_model_cell;
ops->sync_start = model->sync_start;
ops->sync_stop = model->sync_stop;
break;
#endif
case PPC_OPROFILE_POWER4:
model = &op_model_power4;
break;
case PPC_OPROFILE_PA6T:
model = &op_model_pa6t;
break;
#endif
#ifdef CONFIG_PPC_BOOK3S_32
case PPC_OPROFILE_G4:
model = &op_model_7450;
break;
#endif
#if defined(CONFIG_FSL_EMB_PERFMON)
case PPC_OPROFILE_FSL_EMB:
model = &op_model_fsl_emb;
break;
#endif
default:
return -ENODEV;
}
model->num_counters = cur_cpu_spec->num_pmcs;
ops->cpu_type = cur_cpu_spec->oprofile_cpu_type;
ops->create_files = op_powerpc_create_files;
ops->setup = op_powerpc_setup;
ops->shutdown = op_powerpc_shutdown;
ops->start = op_powerpc_start;
ops->stop = op_powerpc_stop;
ops->backtrace = op_powerpc_backtrace;
printk(KERN_DEBUG "oprofile: using %s performance monitoring.\n",
ops->cpu_type);
return 0;
}
void oprofile_arch_exit(void)
{
}
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* arch/powerpc/oprofile/op_model_7450.c
*
* Freescale 745x/744x oprofile support, based on fsl_booke support
* Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
*
* Copyright (c) 2004 Freescale Semiconductor, Inc
*
* Author: Andy Fleming
* Maintainer: Kumar Gala <galak@kernel.crashing.org>
*/
#include <linux/oprofile.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/page.h>
#include <asm/pmc.h>
#include <asm/oprofile_impl.h>
static unsigned long reset_value[OP_MAX_COUNTER];
static int oprofile_running;
static u32 mmcr0_val, mmcr1_val, mmcr2_val, num_pmcs;
#define MMCR0_PMC1_SHIFT 6
#define MMCR0_PMC2_SHIFT 0
#define MMCR1_PMC3_SHIFT 27
#define MMCR1_PMC4_SHIFT 22
#define MMCR1_PMC5_SHIFT 17
#define MMCR1_PMC6_SHIFT 11
#define mmcr0_event1(event) \
((event << MMCR0_PMC1_SHIFT) & MMCR0_PMC1SEL)
#define mmcr0_event2(event) \
((event << MMCR0_PMC2_SHIFT) & MMCR0_PMC2SEL)
#define mmcr1_event3(event) \
((event << MMCR1_PMC3_SHIFT) & MMCR1_PMC3SEL)
#define mmcr1_event4(event) \
((event << MMCR1_PMC4_SHIFT) & MMCR1_PMC4SEL)
#define mmcr1_event5(event) \
((event << MMCR1_PMC5_SHIFT) & MMCR1_PMC5SEL)
#define mmcr1_event6(event) \
((event << MMCR1_PMC6_SHIFT) & MMCR1_PMC6SEL)
#define MMCR0_INIT (MMCR0_FC | MMCR0_FCS | MMCR0_FCP | MMCR0_FCM1 | MMCR0_FCM0)
/* Unfreezes the counters on this CPU, enables the interrupt,
* enables the counters to trigger the interrupt, and sets the
* counters to only count when the mark bit is not set.
*/
static void pmc_start_ctrs(void)
{
u32 mmcr0 = mfspr(SPRN_MMCR0);
mmcr0 &= ~(MMCR0_FC | MMCR0_FCM0);
mmcr0 |= (MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
mtspr(SPRN_MMCR0, mmcr0);
}
/* Disables the counters on this CPU, and freezes them */
static void pmc_stop_ctrs(void)
{
u32 mmcr0 = mfspr(SPRN_MMCR0);
mmcr0 |= MMCR0_FC;
mmcr0 &= ~(MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
mtspr(SPRN_MMCR0, mmcr0);
}
/* Configures the counters on this CPU based on the global
* settings */
static int fsl7450_cpu_setup(struct op_counter_config *ctr)
{
/* freeze all counters */
pmc_stop_ctrs();
mtspr(SPRN_MMCR0, mmcr0_val);
mtspr(SPRN_MMCR1, mmcr1_val);
if (num_pmcs > 4)
mtspr(SPRN_MMCR2, mmcr2_val);
return 0;
}
/* Configures the global settings for the countes on all CPUs. */
static int fsl7450_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys,
int num_ctrs)
{
int i;
num_pmcs = num_ctrs;
/* Our counters count up, and "count" refers to
* how much before the next interrupt, and we interrupt
* on overflow. So we calculate the starting value
* which will give us "count" until overflow.
* Then we set the events on the enabled counters */
for (i = 0; i < num_ctrs; ++i)
reset_value[i] = 0x80000000UL - ctr[i].count;
/* Set events for Counters 1 & 2 */
mmcr0_val = MMCR0_INIT | mmcr0_event1(ctr[0].event)
| mmcr0_event2(ctr[1].event);
/* Setup user/kernel bits */
if (sys->enable_kernel)
mmcr0_val &= ~(MMCR0_FCS);
if (sys->enable_user)
mmcr0_val &= ~(MMCR0_FCP);
/* Set events for Counters 3-6 */
mmcr1_val = mmcr1_event3(ctr[2].event)
| mmcr1_event4(ctr[3].event);
if (num_ctrs > 4)
mmcr1_val |= mmcr1_event5(ctr[4].event)
| mmcr1_event6(ctr[5].event);
mmcr2_val = 0;
return 0;
}
/* Sets the counters on this CPU to the chosen values, and starts them */
static int fsl7450_start(struct op_counter_config *ctr)
{
int i;
mtmsr(mfmsr() | MSR_PMM);
for (i = 0; i < num_pmcs; ++i) {
if (ctr[i].enabled)
classic_ctr_write(i, reset_value[i]);
else
classic_ctr_write(i, 0);
}
/* Clear the freeze bit, and enable the interrupt.
* The counters won't actually start until the rfi clears
* the PMM bit */
pmc_start_ctrs();
oprofile_running = 1;
return 0;
}
/* Stop the counters on this CPU */
static void fsl7450_stop(void)
{
/* freeze counters */
pmc_stop_ctrs();
oprofile_running = 0;
mb();
}
/* Handle the interrupt on this CPU, and log a sample for each
* event that triggered the interrupt */
static void fsl7450_handle_interrupt(struct pt_regs *regs,
struct op_counter_config *ctr)
{
unsigned long pc;
int is_kernel;
int val;
int i;
/* set the PMM bit (see comment below) */
mtmsr(mfmsr() | MSR_PMM);
pc = mfspr(SPRN_SIAR);
is_kernel = is_kernel_addr(pc);
for (i = 0; i < num_pmcs; ++i) {
val = classic_ctr_read(i);
if (val < 0) {
if (oprofile_running && ctr[i].enabled) {
oprofile_add_ext_sample(pc, regs, i, is_kernel);
classic_ctr_write(i, reset_value[i]);
} else {
classic_ctr_write(i, 0);
}
}
}
/* The freeze bit was set by the interrupt. */
/* Clear the freeze bit, and reenable the interrupt.
* The counters won't actually start until the rfi clears
* the PM/M bit */
pmc_start_ctrs();
}
struct op_powerpc_model op_model_7450= {
.reg_setup = fsl7450_reg_setup,
.cpu_setup = fsl7450_cpu_setup,
.start = fsl7450_start,
.stop = fsl7450_stop,
.handle_interrupt = fsl7450_handle_interrupt,
};
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale Embedded oprofile support, based on ppc64 oprofile support
* Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
*
* Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
*
* Author: Andy Fleming
* Maintainer: Kumar Gala <galak@kernel.crashing.org>
*/
#include <linux/oprofile.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/reg_fsl_emb.h>
#include <asm/page.h>
#include <asm/pmc.h>
#include <asm/oprofile_impl.h>
static unsigned long reset_value[OP_MAX_COUNTER];
static int num_counters;
static int oprofile_running;
static inline u32 get_pmlca(int ctr)
{
u32 pmlca;
switch (ctr) {
case 0:
pmlca = mfpmr(PMRN_PMLCA0);
break;
case 1:
pmlca = mfpmr(PMRN_PMLCA1);
break;
case 2:
pmlca = mfpmr(PMRN_PMLCA2);
break;
case 3:
pmlca = mfpmr(PMRN_PMLCA3);
break;
case 4:
pmlca = mfpmr(PMRN_PMLCA4);
break;
case 5:
pmlca = mfpmr(PMRN_PMLCA5);
break;
default:
panic("Bad ctr number\n");
}
return pmlca;
}
static inline void set_pmlca(int ctr, u32 pmlca)
{
switch (ctr) {
case 0:
mtpmr(PMRN_PMLCA0, pmlca);
break;
case 1:
mtpmr(PMRN_PMLCA1, pmlca);
break;
case 2:
mtpmr(PMRN_PMLCA2, pmlca);
break;
case 3:
mtpmr(PMRN_PMLCA3, pmlca);
break;
case 4:
mtpmr(PMRN_PMLCA4, pmlca);
break;
case 5:
mtpmr(PMRN_PMLCA5, pmlca);
break;
default:
panic("Bad ctr number\n");
}
}
static inline unsigned int ctr_read(unsigned int i)
{
switch(i) {
case 0:
return mfpmr(PMRN_PMC0);
case 1:
return mfpmr(PMRN_PMC1);
case 2:
return mfpmr(PMRN_PMC2);
case 3:
return mfpmr(PMRN_PMC3);
case 4:
return mfpmr(PMRN_PMC4);
case 5:
return mfpmr(PMRN_PMC5);
default:
return 0;
}
}
static inline void ctr_write(unsigned int i, unsigned int val)
{
switch(i) {
case 0:
mtpmr(PMRN_PMC0, val);
break;
case 1:
mtpmr(PMRN_PMC1, val);
break;
case 2:
mtpmr(PMRN_PMC2, val);
break;
case 3:
mtpmr(PMRN_PMC3, val);
break;
case 4:
mtpmr(PMRN_PMC4, val);
break;
case 5:
mtpmr(PMRN_PMC5, val);
break;
default:
break;
}
}
static void init_pmc_stop(int ctr)
{
u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
PMLCA_FCM1 | PMLCA_FCM0);
u32 pmlcb = 0;
switch (ctr) {
case 0:
mtpmr(PMRN_PMLCA0, pmlca);
mtpmr(PMRN_PMLCB0, pmlcb);
break;
case 1:
mtpmr(PMRN_PMLCA1, pmlca);
mtpmr(PMRN_PMLCB1, pmlcb);
break;
case 2:
mtpmr(PMRN_PMLCA2, pmlca);
mtpmr(PMRN_PMLCB2, pmlcb);
break;
case 3:
mtpmr(PMRN_PMLCA3, pmlca);
mtpmr(PMRN_PMLCB3, pmlcb);
break;
case 4:
mtpmr(PMRN_PMLCA4, pmlca);
mtpmr(PMRN_PMLCB4, pmlcb);
break;
case 5:
mtpmr(PMRN_PMLCA5, pmlca);
mtpmr(PMRN_PMLCB5, pmlcb);
break;
default:
panic("Bad ctr number!\n");
}
}
static void set_pmc_event(int ctr, int event)
{
u32 pmlca;
pmlca = get_pmlca(ctr);
pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
((event << PMLCA_EVENT_SHIFT) &
PMLCA_EVENT_MASK);
set_pmlca(ctr, pmlca);
}
static void set_pmc_user_kernel(int ctr, int user, int kernel)
{
u32 pmlca;
pmlca = get_pmlca(ctr);
if(user)
pmlca &= ~PMLCA_FCU;
else
pmlca |= PMLCA_FCU;
if(kernel)
pmlca &= ~PMLCA_FCS;
else
pmlca |= PMLCA_FCS;
set_pmlca(ctr, pmlca);
}
static void set_pmc_marked(int ctr, int mark0, int mark1)
{
u32 pmlca = get_pmlca(ctr);
if(mark0)
pmlca &= ~PMLCA_FCM0;
else
pmlca |= PMLCA_FCM0;
if(mark1)
pmlca &= ~PMLCA_FCM1;
else
pmlca |= PMLCA_FCM1;
set_pmlca(ctr, pmlca);
}
static void pmc_start_ctr(int ctr, int enable)
{
u32 pmlca = get_pmlca(ctr);
pmlca &= ~PMLCA_FC;
if (enable)
pmlca |= PMLCA_CE;
else
pmlca &= ~PMLCA_CE;
set_pmlca(ctr, pmlca);
}
static void pmc_start_ctrs(int enable)
{
u32 pmgc0 = mfpmr(PMRN_PMGC0);
pmgc0 &= ~PMGC0_FAC;
pmgc0 |= PMGC0_FCECE;
if (enable)
pmgc0 |= PMGC0_PMIE;
else
pmgc0 &= ~PMGC0_PMIE;
mtpmr(PMRN_PMGC0, pmgc0);
}
static void pmc_stop_ctrs(void)
{
u32 pmgc0 = mfpmr(PMRN_PMGC0);
pmgc0 |= PMGC0_FAC;
pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
mtpmr(PMRN_PMGC0, pmgc0);
}
static int fsl_emb_cpu_setup(struct op_counter_config *ctr)
{
int i;
/* freeze all counters */
pmc_stop_ctrs();
for (i = 0;i < num_counters;i++) {
init_pmc_stop(i);
set_pmc_event(i, ctr[i].event);
set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
}
return 0;
}
static int fsl_emb_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys,
int num_ctrs)
{
int i;
num_counters = num_ctrs;
/* Our counters count up, and "count" refers to
* how much before the next interrupt, and we interrupt
* on overflow. So we calculate the starting value
* which will give us "count" until overflow.
* Then we set the events on the enabled counters */
for (i = 0; i < num_counters; ++i)
reset_value[i] = 0x80000000UL - ctr[i].count;
return 0;
}
static int fsl_emb_start(struct op_counter_config *ctr)
{
int i;
mtmsr(mfmsr() | MSR_PMM);
for (i = 0; i < num_counters; ++i) {
if (ctr[i].enabled) {
ctr_write(i, reset_value[i]);
/* Set each enabled counter to only
* count when the Mark bit is *not* set */
set_pmc_marked(i, 1, 0);
pmc_start_ctr(i, 1);
} else {
ctr_write(i, 0);
/* Set the ctr to be stopped */
pmc_start_ctr(i, 0);
}
}
/* Clear the freeze bit, and enable the interrupt.
* The counters won't actually start until the rfi clears
* the PMM bit */
pmc_start_ctrs(1);
oprofile_running = 1;
pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
mfpmr(PMRN_PMGC0));
return 0;
}
static void fsl_emb_stop(void)
{
/* freeze counters */
pmc_stop_ctrs();
oprofile_running = 0;
pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(),
mfpmr(PMRN_PMGC0));
mb();
}
static void fsl_emb_handle_interrupt(struct pt_regs *regs,
struct op_counter_config *ctr)
{
unsigned long pc;
int is_kernel;
int val;
int i;
pc = regs->nip;
is_kernel = is_kernel_addr(pc);
for (i = 0; i < num_counters; ++i) {
val = ctr_read(i);
if (val < 0) {
if (oprofile_running && ctr[i].enabled) {
oprofile_add_ext_sample(pc, regs, i, is_kernel);
ctr_write(i, reset_value[i]);
} else {
ctr_write(i, 0);
}
}
}
/* The freeze bit was set by the interrupt. */
/* Clear the freeze bit, and reenable the interrupt. The
* counters won't actually start until the rfi clears the PMM
* bit. The PMM bit should not be set until after the interrupt
* is cleared to avoid it getting lost in some hypervisor
* environments.
*/
mtmsr(mfmsr() | MSR_PMM);
pmc_start_ctrs(1);
}
struct op_powerpc_model op_model_fsl_emb = {
.reg_setup = fsl_emb_reg_setup,
.cpu_setup = fsl_emb_cpu_setup,
.start = fsl_emb_start,
.stop = fsl_emb_stop,
.handle_interrupt = fsl_emb_handle_interrupt,
};
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2007 PA Semi, Inc
*
* Author: Shashi Rao, PA Semi
*
* Maintained by: Olof Johansson <olof@lixom.net>
*
* Based on arch/powerpc/oprofile/op_model_power4.c
*/
#include <linux/oprofile.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/oprofile_impl.h>
#include <asm/reg.h>
static unsigned char oprofile_running;
/* mmcr values are set in pa6t_reg_setup, used in pa6t_cpu_setup */
static u64 mmcr0_val;
static u64 mmcr1_val;
/* inited in pa6t_reg_setup */
static u64 reset_value[OP_MAX_COUNTER];
static inline u64 ctr_read(unsigned int i)
{
switch (i) {
case 0:
return mfspr(SPRN_PA6T_PMC0);
case 1:
return mfspr(SPRN_PA6T_PMC1);
case 2:
return mfspr(SPRN_PA6T_PMC2);
case 3:
return mfspr(SPRN_PA6T_PMC3);
case 4:
return mfspr(SPRN_PA6T_PMC4);
case 5:
return mfspr(SPRN_PA6T_PMC5);
default:
printk(KERN_ERR "ctr_read called with bad arg %u\n", i);
return 0;
}
}
static inline void ctr_write(unsigned int i, u64 val)
{
switch (i) {
case 0:
mtspr(SPRN_PA6T_PMC0, val);
break;
case 1:
mtspr(SPRN_PA6T_PMC1, val);
break;
case 2:
mtspr(SPRN_PA6T_PMC2, val);
break;
case 3:
mtspr(SPRN_PA6T_PMC3, val);
break;
case 4:
mtspr(SPRN_PA6T_PMC4, val);
break;
case 5:
mtspr(SPRN_PA6T_PMC5, val);
break;
default:
printk(KERN_ERR "ctr_write called with bad arg %u\n", i);
break;
}
}
/* precompute the values to stuff in the hardware registers */
static int pa6t_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys,
int num_ctrs)
{
int pmc;
/*
* adjust the mmcr0.en[0-5] and mmcr0.inten[0-5] values obtained from the
* event_mappings file by turning off the counters that the user doesn't
* care about
*
* setup user and kernel profiling
*/
for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++)
if (!ctr[pmc].enabled) {
sys->mmcr0 &= ~(0x1UL << pmc);
sys->mmcr0 &= ~(0x1UL << (pmc+12));
pr_debug("turned off counter %u\n", pmc);
}
if (sys->enable_kernel)
sys->mmcr0 |= PA6T_MMCR0_SUPEN | PA6T_MMCR0_HYPEN;
else
sys->mmcr0 &= ~(PA6T_MMCR0_SUPEN | PA6T_MMCR0_HYPEN);
if (sys->enable_user)
sys->mmcr0 |= PA6T_MMCR0_PREN;
else
sys->mmcr0 &= ~PA6T_MMCR0_PREN;
/*
* The performance counter event settings are given in the mmcr0 and
* mmcr1 values passed from the user in the op_system_config
* structure (sys variable).
*/
mmcr0_val = sys->mmcr0;
mmcr1_val = sys->mmcr1;
pr_debug("mmcr0_val inited to %016lx\n", sys->mmcr0);
pr_debug("mmcr1_val inited to %016lx\n", sys->mmcr1);
for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++) {
/* counters are 40 bit. Move to cputable at some point? */
reset_value[pmc] = (0x1UL << 39) - ctr[pmc].count;
pr_debug("reset_value for pmc%u inited to 0x%llx\n",
pmc, reset_value[pmc]);
}
return 0;
}
/* configure registers on this cpu */
static int pa6t_cpu_setup(struct op_counter_config *ctr)
{
u64 mmcr0 = mmcr0_val;
u64 mmcr1 = mmcr1_val;
/* Default is all PMCs off */
mmcr0 &= ~(0x3FUL);
mtspr(SPRN_PA6T_MMCR0, mmcr0);
/* program selected programmable events in */
mtspr(SPRN_PA6T_MMCR1, mmcr1);
pr_debug("setup on cpu %d, mmcr0 %016lx\n", smp_processor_id(),
mfspr(SPRN_PA6T_MMCR0));
pr_debug("setup on cpu %d, mmcr1 %016lx\n", smp_processor_id(),
mfspr(SPRN_PA6T_MMCR1));
return 0;
}
static int pa6t_start(struct op_counter_config *ctr)
{
int i;
/* Hold off event counting until rfid */
u64 mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS;
for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
if (ctr[i].enabled)
ctr_write(i, reset_value[i]);
else
ctr_write(i, 0UL);
mtspr(SPRN_PA6T_MMCR0, mmcr0);
oprofile_running = 1;
pr_debug("start on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
return 0;
}
static void pa6t_stop(void)
{
u64 mmcr0;
/* freeze counters */
mmcr0 = mfspr(SPRN_PA6T_MMCR0);
mmcr0 |= PA6T_MMCR0_FCM0;
mtspr(SPRN_PA6T_MMCR0, mmcr0);
oprofile_running = 0;
pr_debug("stop on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
}
/* handle the perfmon overflow vector */
static void pa6t_handle_interrupt(struct pt_regs *regs,
struct op_counter_config *ctr)
{
unsigned long pc = mfspr(SPRN_PA6T_SIAR);
int is_kernel = is_kernel_addr(pc);
u64 val;
int i;
u64 mmcr0;
/* disable perfmon counting until rfid */
mmcr0 = mfspr(SPRN_PA6T_MMCR0);
mtspr(SPRN_PA6T_MMCR0, mmcr0 | PA6T_MMCR0_HANDDIS);
/* Record samples. We've got one global bit for whether a sample
* was taken, so add it for any counter that triggered overflow.
*/
for (i = 0; i < cur_cpu_spec->num_pmcs; i++) {
val = ctr_read(i);
if (val & (0x1UL << 39)) { /* Overflow bit set */
if (oprofile_running && ctr[i].enabled) {
if (mmcr0 & PA6T_MMCR0_SIARLOG)
oprofile_add_ext_sample(pc, regs, i, is_kernel);
ctr_write(i, reset_value[i]);
} else {
ctr_write(i, 0UL);
}
}
}
/* Restore mmcr0 to a good known value since the PMI changes it */
mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS;
mtspr(SPRN_PA6T_MMCR0, mmcr0);
}
struct op_powerpc_model op_model_pa6t = {
.reg_setup = pa6t_reg_setup,
.cpu_setup = pa6t_cpu_setup,
.start = pa6t_start,
.stop = pa6t_stop,
.handle_interrupt = pa6t_handle_interrupt,
};
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment