Commit 36af1eb0 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ppc64: Add HW PMC support to oprofile

From: Anton Blanchard <anton@samba.org>

Add HW PMC support to oprofile
parent 12c9ae0d
......@@ -12,6 +12,71 @@
/* PMC stuff */
/*
* Enabling PMCs will slow partition context switch times so we only do
* it the first time we write to the PMCs.
*/
static DEFINE_PER_CPU(char, pmcs_enabled);
#ifdef CONFIG_PPC_ISERIES
void ppc64_enable_pmcs(void)
{
/* XXX Implement for iseries */
}
#else
void ppc64_enable_pmcs(void)
{
unsigned long hid0;
unsigned long set, reset;
int ret;
/* Only need to enable them once */
if (__get_cpu_var(pmcs_enabled))
return;
__get_cpu_var(pmcs_enabled) = 1;
switch (systemcfg->platform) {
case PLATFORM_PSERIES:
hid0 = mfspr(HID0);
hid0 |= 1UL << (63 - 20);
/* POWER4 requires the following sequence */
asm volatile(
"sync\n"
"mtspr %1, %0\n"
"mfspr %0, %1\n"
"mfspr %0, %1\n"
"mfspr %0, %1\n"
"mfspr %0, %1\n"
"mfspr %0, %1\n"
"mfspr %0, %1\n"
"isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0):
"memory");
break;
case PLATFORM_PSERIES_LPAR:
set = 1UL << 63;
reset = 0;
ret = plpar_hcall_norets(H_PERFMON, set, reset);
if (ret)
printk(KERN_ERR "H_PERFMON call returned %d",
ret);
break;
default:
break;
}
/* instruct hypervisor to maintain PMCs */
if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
char *ptr = (char *)&paca[smp_processor_id()].xLpPaca;
ptr[0xBB] = 1;
}
}
#endif
/* XXX convert to rusty's on_one_cpu */
static unsigned long run_on_cpu(unsigned long cpu,
unsigned long (*func)(unsigned long),
......@@ -38,6 +103,7 @@ static unsigned long read_##NAME(unsigned long junk) \
} \
static unsigned long write_##NAME(unsigned long val) \
{ \
ppc64_enable_pmcs(); \
mtspr(ADDRESS, val); \
return 0; \
} \
......
......@@ -449,16 +449,16 @@ SingleStepException(struct pt_regs *regs)
_exception(SIGTRAP, &info, regs);
}
static void dummy_perf(struct pt_regs *regs)
{
}
void (*perf_irq)(struct pt_regs *) = dummy_perf;
void
PerformanceMonitorException(struct pt_regs *regs)
{
siginfo_t info;
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = 0;
_exception(SIGTRAP, &info, regs);
perf_irq(regs);
}
void
......
......@@ -6,4 +6,4 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o \
timer_int.o )
oprofile-y := $(DRIVER_OBJS) init.o
oprofile-y := $(DRIVER_OBJS) common.o op_model_rs64.o op_model_power4.o
/*
* Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
*
* Based on alpha version.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/oprofile.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include "op_impl.h"
extern struct op_ppc64_model op_model_rs64;
extern struct op_ppc64_model op_model_power4;
static struct op_ppc64_model *model;
extern void (*perf_irq)(struct pt_regs *);
static void (*save_perf_irq)(struct pt_regs *);
static struct op_counter_config ctr[OP_MAX_COUNTER];
static struct op_system_config sys;
static void op_handle_interrupt(struct pt_regs *regs)
{
model->handle_interrupt(regs, ctr);
}
static int op_ppc64_setup(void)
{
/* Install our interrupt handler into the existing hook. */
save_perf_irq = perf_irq;
perf_irq = op_handle_interrupt;
mb();
/* Pre-compute the values to stuff in the hardware registers. */
model->reg_setup(ctr, &sys, model->num_counters);
/* Configure the registers on all cpus. */
on_each_cpu(model->cpu_setup, NULL, 0, 1);
return 0;
}
static void op_ppc64_shutdown(void)
{
/*
* We need to be sure we have cleared all pending exceptions before
* removing the interrupt handler. For the moment we play it safe and
* leave it in
*/
#if 0
mb();
/* Remove our interrupt handler. We may be removing this module. */
perf_irq = save_perf_irq;
#endif
}
static void op_ppc64_cpu_start(void *dummy)
{
model->start(ctr);
}
static int op_ppc64_start(void)
{
on_each_cpu(op_ppc64_cpu_start, NULL, 0, 1);
return 0;
}
static inline void op_ppc64_cpu_stop(void *dummy)
{
model->stop();
}
static void op_ppc64_stop(void)
{
on_each_cpu(op_ppc64_cpu_stop, NULL, 0, 1);
}
static int op_ppc64_create_files(struct super_block *sb, struct dentry *root)
{
int i;
for (i = 0; i < model->num_counters; ++i) {
struct dentry *dir;
char buf[3];
snprintf(buf, sizeof buf, "%d", i);
dir = oprofilefs_mkdir(sb, root, buf);
oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
/*
* We dont support per counter user/kernel selection, but
* we leave the entries because userspace expects them
*/
oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
}
oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
return 0;
}
static struct oprofile_operations oprof_ppc64_ops = {
.create_files = op_ppc64_create_files,
.setup = op_ppc64_setup,
.shutdown = op_ppc64_shutdown,
.start = op_ppc64_start,
.stop = op_ppc64_stop,
.cpu_type = NULL /* To be filled in below. */
};
int __init oprofile_arch_init(struct oprofile_operations **ops)
{
unsigned int pvr;
pvr = _get_PVR();
switch (PVR_VER(pvr)) {
case PV_630:
case PV_630p:
model = &op_model_rs64;
model->num_counters = 8;
oprof_ppc64_ops.cpu_type = "ppc64/power3";
break;
case PV_NORTHSTAR:
case PV_PULSAR:
case PV_ICESTAR:
case PV_SSTAR:
model = &op_model_rs64;
model->num_counters = 8;
oprof_ppc64_ops.cpu_type = "ppc64/rs64";
break;
case PV_POWER4:
case PV_POWER4p:
model = &op_model_power4;
model->num_counters = 8;
oprof_ppc64_ops.cpu_type = "ppc64/power4";
break;
case PV_GPUL:
model = &op_model_power4;
model->num_counters = 8;
oprof_ppc64_ops.cpu_type = "ppc64/970";
break;
case PV_POWER5:
model = &op_model_power4;
model->num_counters = 6;
oprof_ppc64_ops.cpu_type = "ppc64/power5";
break;
default:
return -ENODEV;
}
*ops = &oprof_ppc64_ops;
printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
oprof_ppc64_ops.cpu_type);
return 0;
}
void oprofile_arch_exit(void)
{
}
/**
* @file init.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#include <linux/kernel.h>
#include <linux/oprofile.h>
#include <linux/init.h>
extern void timer_init(struct oprofile_operations ** ops);
int __init oprofile_arch_init(struct oprofile_operations ** ops)
{
return -ENODEV;
}
void oprofile_arch_exit(void)
{
}
/*
* Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
*
* Based on alpha version.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef OP_IMPL_H
#define OP_IMPL_H 1
#define OP_MAX_COUNTER 8
#define MSR_PMM (1UL << (63 - 61))
/* freeze counters. set to 1 on a perfmon exception */
#define MMCR0_FC (1UL << (31 - 0))
/* freeze counters while MSR mark = 1 */
#define MMCR0_FCM1 (1UL << (31 - 3))
/* performance monitor exception enable */
#define MMCR0_PMXE (1UL << (31 - 5))
/* freeze counters on enabled condition or event */
#define MMCR0_FCECE (1UL << (31 - 6))
/* performance monitor alert has occurred, set to 0 after handling exception */
#define MMCR0_PMAO (1UL << (31 - 24))
/* PMC1 count enable*/
#define MMCR0_PMC1INTCONTROL (1UL << (31 - 16))
/* PMCn count enable*/
#define MMCR0_PMCNINTCONTROL (1UL << (31 - 17))
/* state of MSR HV when SIAR set */
#define MMCRA_SIHV (1UL << (63 - 35))
/* state of MSR PR when SIAR set */
#define MMCRA_SIPR (1UL << (63 - 36))
/* enable sampling */
#define MMCRA_SAMPLE_ENABLE (1UL << (63 - 63))
/* Per-counter configuration as set via oprofilefs. */
struct op_counter_config {
unsigned long valid;
unsigned long enabled;
unsigned long event;
unsigned long count;
unsigned long kernel;
/* We dont support per counter user/kernel selection */
unsigned long user;
unsigned long unit_mask;
};
/* System-wide configuration as set via oprofilefs. */
struct op_system_config {
unsigned long enable_kernel;
unsigned long enable_user;
};
/* Per-arch configuration */
struct op_ppc64_model {
void (*reg_setup) (struct op_counter_config *,
struct op_system_config *,
int num_counters);
void (*cpu_setup) (void *);
void (*start) (struct op_counter_config *);
void (*stop) (void);
void (*handle_interrupt) (struct pt_regs *,
struct op_counter_config *);
int num_counters;
};
static inline unsigned int ctr_read(unsigned int i)
{
switch(i) {
case 0:
return mfspr(SPRN_PMC1);
case 1:
return mfspr(SPRN_PMC2);
case 2:
return mfspr(SPRN_PMC3);
case 3:
return mfspr(SPRN_PMC4);
case 4:
return mfspr(SPRN_PMC5);
case 5:
return mfspr(SPRN_PMC6);
case 6:
return mfspr(SPRN_PMC7);
case 7:
return mfspr(SPRN_PMC8);
default:
return 0;
}
}
static inline void ctr_write(unsigned int i, unsigned int val)
{
switch(i) {
case 0:
mtspr(SPRN_PMC1, val);
break;
case 1:
mtspr(SPRN_PMC2, val);
break;
case 2:
mtspr(SPRN_PMC3, val);
break;
case 3:
mtspr(SPRN_PMC4, val);
break;
case 4:
mtspr(SPRN_PMC5, val);
break;
case 5:
mtspr(SPRN_PMC6, val);
break;
case 6:
mtspr(SPRN_PMC7, val);
break;
case 7:
mtspr(SPRN_PMC8, val);
break;
default:
break;
}
}
#endif
/*
* Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/oprofile.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/systemcfg.h>
#include <asm/rtas.h>
#define dbg(args...) printk(args)
#include "op_impl.h"
static unsigned long reset_value[OP_MAX_COUNTER];
static int num_counters;
static void power4_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys,
int num_ctrs)
{
int i;
num_counters = num_ctrs;
for (i = 0; i < num_counters; ++i)
reset_value[i] = 0x80000000UL - ctr[i].count;
/* XXX setup user and kernel profiling */
}
extern void ppc64_enable_pmcs(void);
static void power4_cpu_setup(void *unused)
{
unsigned int mmcr0 = mfspr(SPRN_MMCR0);
unsigned long mmcra = mfspr(SPRN_MMCRA);
ppc64_enable_pmcs();
/* set the freeze bit */
mmcr0 |= MMCR0_FC;
mtspr(SPRN_MMCR0, mmcr0);
mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE;
mmcr0 |= MMCR0_PMC1INTCONTROL|MMCR0_PMCNINTCONTROL;
mtspr(SPRN_MMCR0, mmcr0);
mmcra |= MMCRA_SAMPLE_ENABLE;
mtspr(SPRN_MMCRA, mmcra);
dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(),
mfspr(SPRN_MMCR0));
dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(),
mfspr(SPRN_MMCR1));
dbg("setup on cpu %d, mmcra %lx\n", smp_processor_id(),
mfspr(SPRN_MMCRA));
}
static void power4_start(struct op_counter_config *ctr)
{
int i;
unsigned int mmcr0;
/* set the PMM bit (see comment below) */
mtmsrd(mfmsr() | MSR_PMM);
for (i = 0; i < num_counters; ++i) {
if (ctr[i].enabled) {
ctr_write(i, reset_value[i]);
} else {
ctr_write(i, 0);
}
}
mmcr0 = mfspr(SPRN_MMCR0);
/*
* We must clear the PMAO bit on some (GQ) chips. Just do it
* all the time
*/
mmcr0 &= ~MMCR0_PMAO;
/*
* now clear the freeze bit, counting will not start until we
* rfid from this excetion, because only at that point will
* the PMM bit be cleared
*/
mmcr0 &= ~MMCR0_FC;
mtspr(SPRN_MMCR0, mmcr0);
dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
}
static void power4_stop(void)
{
unsigned int mmcr0;
/* freeze counters */
mmcr0 = mfspr(SPRN_MMCR0);
mmcr0 |= MMCR0_FC;
mtspr(SPRN_MMCR0, mmcr0);
dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
mb();
}
/* Fake functions used by canonicalize_pc */
static void __attribute_used__ hypervisor_bucket(void)
{
}
static void __attribute_used__ rtas_bucket(void)
{
}
static void __attribute_used__ kernel_unknown_bucket(void)
{
}
/* XXX Not currently working */
static int mmcra_has_sihv = 0;
/*
* On GQ and newer the MMCRA stores the HV and PR bits at the time
* the SIAR was sampled. We use that to work out if the SIAR was sampled in
* the hypervisor, our exception vectors or RTAS.
*/
static unsigned long get_pc(void)
{
unsigned long pc = mfspr(SPRN_SIAR);
unsigned long mmcra;
/* Cant do much about it */
if (!mmcra_has_sihv)
return pc;
mmcra = mfspr(SPRN_MMCRA);
/* Were we in the hypervisor? */
if ((systemcfg->platform == PLATFORM_PSERIES_LPAR) &&
(mmcra & MMCRA_SIHV))
/* function descriptor madness */
return *((unsigned long *)hypervisor_bucket);
/* We were in userspace, nothing to do */
if (mmcra & MMCRA_SIPR)
return pc;
/* Were we in our exception vectors? */
if (pc < 0x4000UL)
return (unsigned long)__va(pc);
#ifdef CONFIG_PPC_PSERIES
/* Were we in RTAS? */
if (pc >= rtas.base && pc < (rtas.base + rtas.size))
/* function descriptor madness */
return *((unsigned long *)rtas_bucket);
#endif
/* Not sure where we were */
if (pc < KERNELBASE)
/* function descriptor madness */
return *((unsigned long *)kernel_unknown_bucket);
return pc;
}
static int get_kernel(unsigned long pc)
{
int is_kernel;
if (!mmcra_has_sihv) {
is_kernel = (pc >= KERNELBASE);
} else {
unsigned long mmcra = mfspr(SPRN_MMCRA);
is_kernel = ((mmcra & MMCRA_SIPR) == 0);
}
return is_kernel;
}
static void power4_handle_interrupt(struct pt_regs *regs,
struct op_counter_config *ctr)
{
unsigned long pc;
int is_kernel;
int val;
int i;
unsigned int cpu = smp_processor_id();
unsigned int mmcr0;
pc = get_pc();
is_kernel = get_kernel(pc);
/* set the PMM bit (see comment below) */
mtmsrd(mfmsr() | MSR_PMM);
for (i = 0; i < num_counters; ++i) {
val = ctr_read(i);
if (val < 0) {
if (ctr[i].enabled) {
oprofile_add_sample(pc, is_kernel, i, cpu);
ctr_write(i, reset_value[i]);
} else {
ctr_write(i, 0);
}
}
}
mmcr0 = mfspr(SPRN_MMCR0);
/* reset the perfmon trigger */
mmcr0 |= MMCR0_PMXE;
/*
* We must clear the PMAO bit on some (GQ) chips. Just do it
* all the time
*/
mmcr0 &= ~MMCR0_PMAO;
/*
* now clear the freeze bit, counting will not start until we
* rfid from this exception, because only at that point will
* the PMM bit be cleared
*/
mmcr0 &= ~MMCR0_FC;
mtspr(SPRN_MMCR0, mmcr0);
}
struct op_ppc64_model op_model_power4 = {
.reg_setup = power4_reg_setup,
.cpu_setup = power4_cpu_setup,
.start = power4_start,
.stop = power4_stop,
.handle_interrupt = power4_handle_interrupt,
};
/*
* Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/oprofile.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#define dbg(args...) printk(args)
#include "op_impl.h"
static void ctrl_write(unsigned int i, unsigned int val)
{
unsigned int tmp;
unsigned long shift, mask;
dbg("ctrl_write %d %x\n", i, val);
switch(i) {
case 0:
tmp = mfspr(SPRN_MMCR0);
shift = 6;
mask = 0x7F;
break;
case 1:
tmp = mfspr(SPRN_MMCR0);
shift = 0;
mask = 0x3F;
break;
case 2:
tmp = mfspr(SPRN_MMCR1);
shift = 31 - 4;
mask = 0x1F;
break;
case 3:
tmp = mfspr(SPRN_MMCR1);
shift = 31 - 9;
mask = 0x1F;
break;
case 4:
tmp = mfspr(SPRN_MMCR1);
shift = 31 - 14;
mask = 0x1F;
break;
case 5:
tmp = mfspr(SPRN_MMCR1);
shift = 31 - 19;
mask = 0x1F;
break;
case 6:
tmp = mfspr(SPRN_MMCR1);
shift = 31 - 24;
mask = 0x1F;
break;
case 7:
tmp = mfspr(SPRN_MMCR1);
shift = 31 - 28;
mask = 0xF;
break;
}
tmp = tmp & ~(mask << shift);
tmp |= val << shift;
switch(i) {
case 0:
case 1:
mtspr(SPRN_MMCR0, tmp);
break;
default:
mtspr(SPRN_MMCR1, tmp);
}
dbg("ctrl_write mmcr0 %lx mmcr1 %lx\n", mfspr(SPRN_MMCR0),
mfspr(SPRN_MMCR1));
}
static unsigned long reset_value[OP_MAX_COUNTER];
static int num_counters;
static void rs64_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys,
int num_ctrs)
{
int i;
num_counters = num_ctrs;
for (i = 0; i < num_counters; ++i)
reset_value[i] = 0x80000000UL - ctr[i].count;
/* XXX setup user and kernel profiling */
}
static void rs64_cpu_setup(void *unused)
{
unsigned int mmcr0;
/* reset MMCR0 and set the freeze bit */
mmcr0 = MMCR0_FC;
mtspr(SPRN_MMCR0, mmcr0);
/* reset MMCR1, MMCRA */
mtspr(SPRN_MMCR1, 0);
if (cur_cpu_spec->cpu_features & CPU_FTR_MMCRA)
mtspr(SPRN_MMCRA, 0);
mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE;
/* Only applies to POWER3, but should be safe on RS64 */
mmcr0 |= MMCR0_PMC1INTCONTROL|MMCR0_PMCNINTCONTROL;
mtspr(SPRN_MMCR0, mmcr0);
dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(),
mfspr(SPRN_MMCR0));
dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(),
mfspr(SPRN_MMCR1));
}
static void rs64_start(struct op_counter_config *ctr)
{
int i;
unsigned int mmcr0;
/* set the PMM bit (see comment below) */
mtmsrd(mfmsr() | MSR_PMM);
for (i = 0; i < num_counters; ++i) {
if (ctr[i].enabled) {
ctr_write(i, reset_value[i]);
ctrl_write(i, ctr[i].event);
} else {
ctr_write(i, 0);
}
}
mmcr0 = mfspr(SPRN_MMCR0);
/*
* now clear the freeze bit, counting will not start until we
* rfid from this excetion, because only at that point will
* the PMM bit be cleared
*/
mmcr0 &= ~MMCR0_FC;
mtspr(SPRN_MMCR0, mmcr0);
dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
}
static void rs64_stop(void)
{
unsigned int mmcr0;
/* freeze counters */
mmcr0 = mfspr(SPRN_MMCR0);
mmcr0 |= MMCR0_FC;
mtspr(SPRN_MMCR0, mmcr0);
dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
mb();
}
static void rs64_handle_interrupt(struct pt_regs *regs,
struct op_counter_config *ctr)
{
unsigned int mmcr0;
int val;
int i;
unsigned long pc = mfspr(SPRN_SIAR);
int is_kernel = (pc >= KERNELBASE);
unsigned int cpu = smp_processor_id();
/* set the PMM bit (see comment below) */
mtmsrd(mfmsr() | MSR_PMM);
for (i = 0; i < num_counters; ++i) {
val = ctr_read(i);
if (val < 0) {
if (ctr[i].enabled) {
oprofile_add_sample(pc, is_kernel, i, cpu);
ctr_write(i, reset_value[i]);
} else {
ctr_write(i, 0);
}
}
}
mmcr0 = mfspr(SPRN_MMCR0);
/* reset the perfmon trigger */
mmcr0 |= MMCR0_PMXE;
/*
* now clear the freeze bit, counting will not start until we
* rfid from this exception, because only at that point will
* the PMM bit be cleared
*/
mmcr0 &= ~MMCR0_FC;
mtspr(SPRN_MMCR0, mmcr0);
}
struct op_ppc64_model op_model_rs64 = {
.reg_setup = rs64_reg_setup,
.cpu_setup = rs64_cpu_setup,
.start = rs64_start,
.stop = rs64_stop,
.handle_interrupt = rs64_handle_interrupt,
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment