Commit 18280eda authored by David Daney's avatar David Daney Committed by Ralf Baechle

MIPS: Add code for new system 'paravirt'

For para-virtualized guests running under KVM or other equivalent
hypervisor.
Signed-off-by: default avatarDavid Daney <david.daney@cavium.com>
Signed-off-by: default avatarAndreas Herrmann <andreas.herrmann@caviumnetworks.com>
Cc: linux-mips@linux-mips.org
Cc: James Hogan <james.hogan@imgtec.com>
Cc: kvm@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/7004/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 90dfdc7c
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Cavium, Inc.
*/
#ifndef __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H
#define __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H
#define cpu_has_4kex 1
#define cpu_has_3k_cache 0
#define cpu_has_tx39_cache 0
#define cpu_has_counter 1
#define cpu_has_llsc 1
/*
* We Disable LL/SC on non SMP systems as it is faster to disable
* interrupts for atomic access than a LL/SC.
*/
#ifdef CONFIG_SMP
# define kernel_uses_llsc 1
#else
# define kernel_uses_llsc 0
#endif
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#define cpu_dcache_line_size() 128
#define cpu_icache_line_size() 128
#define cpu_has_octeon_cache 1
#define cpu_has_4k_cache 0
#else
#define cpu_has_octeon_cache 0
#define cpu_has_4k_cache 1
#endif
#endif /* __ASM_MACH_PARAVIRT_CPU_FEATURE_OVERRIDES_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Cavium, Inc.
*/
#ifndef __ASM_MACH_PARAVIRT_IRQ_H__
#define __ASM_MACH_PARAVIRT_IRQ_H__
#define NR_IRQS 64
#define MIPS_CPU_IRQ_BASE 1
#define MIPS_IRQ_PCIA (MIPS_CPU_IRQ_BASE + 8)
#define MIPS_IRQ_MBOX0 (MIPS_CPU_IRQ_BASE + 32)
#define MIPS_IRQ_MBOX1 (MIPS_CPU_IRQ_BASE + 33)
#endif /* __ASM_MACH_PARAVIRT_IRQ_H__ */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Cavium, Inc
*/
#ifndef __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H
#define __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H
#define CP0_EBASE $15, 1
.macro kernel_entry_setup
mfc0 t0, CP0_EBASE
andi t0, t0, 0x3ff # CPUNum
beqz t0, 1f
# CPUs other than zero goto smp_bootstrap
j smp_bootstrap
1:
.endm
/*
* Do SMP slave processor setup necessary before we can safely execute
* C code.
*/
.macro smp_slave_setup
mfc0 t0, CP0_EBASE
andi t0, t0, 0x3ff # CPUNum
slti t1, t0, NR_CPUS
bnez t1, 1f
2:
di
wait
b 2b # Unknown CPU, loop forever.
1:
PTR_LA t1, paravirt_smp_sp
PTR_SLL t0, PTR_SCALESHIFT
PTR_ADDU t1, t1, t0
3:
PTR_L sp, 0(t1)
beqz sp, 3b # Spin until told to proceed.
PTR_LA t1, paravirt_smp_gp
PTR_ADDU t1, t1, t0
sync
PTR_L gp, 0(t1)
.endm
#endif /* __ASM_MACH_PARAVIRT_KERNEL_ENTRY_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
* Copyright (C) 2013 Cavium Networks <support@caviumnetworks.com>
*/
#ifndef __ASM_MIPS_MACH_PARAVIRT_WAR_H
#define __ASM_MIPS_MACH_PARAVIRT_WAR_H
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
#define R4600_V2_HIT_CACHEOP_WAR 0
#define R5432_CP0_INTERRUPT_WAR 0
#define BCM1250_M3_WAR 0
#define SIBYTE_1956_WAR 0
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
#endif /* __ASM_MIPS_MACH_PARAVIRT_WAR_H */
...@@ -1250,17 +1250,13 @@ static void build_r4000_tlb_refill_handler(void) ...@@ -1250,17 +1250,13 @@ static void build_r4000_tlb_refill_handler(void)
unsigned int final_len; unsigned int final_len;
struct mips_huge_tlb_info htlb_info __maybe_unused; struct mips_huge_tlb_info htlb_info __maybe_unused;
enum vmalloc64_mode vmalloc_mode __maybe_unused; enum vmalloc64_mode vmalloc_mode __maybe_unused;
#ifdef CONFIG_64BIT
bool is64bit = true;
#else
bool is64bit = false;
#endif
memset(tlb_handler, 0, sizeof(tlb_handler)); memset(tlb_handler, 0, sizeof(tlb_handler));
memset(labels, 0, sizeof(labels)); memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs)); memset(relocs, 0, sizeof(relocs));
memset(final_handler, 0, sizeof(final_handler)); memset(final_handler, 0, sizeof(final_handler));
if (is64bit && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) { if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
scratch_reg); scratch_reg);
vmalloc_mode = refill_scratch; vmalloc_mode = refill_scratch;
......
#
# Makefile for MIPS para-virtualized specific kernel interface routines
# under Linux.
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2013 Cavium, Inc.
#
obj-y := setup.o serial.o paravirt-irq.o
obj-$(CONFIG_SMP) += paravirt-smp.o
#
# Generic para-virtualized guest.
#
platform-$(CONFIG_MIPS_PARAVIRT) += paravirt/
cflags-$(CONFIG_MIPS_PARAVIRT) += \
-I$(srctree)/arch/mips/include/asm/mach-paravirt
load-$(CONFIG_MIPS_PARAVIRT) = 0xffffffff80010000
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Cavium, Inc.
*/
#include <linux/interrupt.h>
#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <asm/io.h>
#define MBOX_BITS_PER_CPU 2
static int cpunum_for_cpu(int cpu)
{
#ifdef CONFIG_SMP
return cpu_logical_map(cpu);
#else
return get_ebase_cpunum();
#endif
}
struct core_chip_data {
struct mutex core_irq_mutex;
bool current_en;
bool desired_en;
u8 bit;
};
static struct core_chip_data irq_core_chip_data[8];
static void irq_core_ack(struct irq_data *data)
{
struct core_chip_data *cd = irq_data_get_irq_chip_data(data);
unsigned int bit = cd->bit;
/*
* We don't need to disable IRQs to make these atomic since
* they are already disabled earlier in the low level
* interrupt code.
*/
clear_c0_status(0x100 << bit);
/* The two user interrupts must be cleared manually. */
if (bit < 2)
clear_c0_cause(0x100 << bit);
}
static void irq_core_eoi(struct irq_data *data)
{
struct core_chip_data *cd = irq_data_get_irq_chip_data(data);
/*
* We don't need to disable IRQs to make these atomic since
* they are already disabled earlier in the low level
* interrupt code.
*/
set_c0_status(0x100 << cd->bit);
}
static void irq_core_set_enable_local(void *arg)
{
struct irq_data *data = arg;
struct core_chip_data *cd = irq_data_get_irq_chip_data(data);
unsigned int mask = 0x100 << cd->bit;
/*
* Interrupts are already disabled, so these are atomic.
*/
if (cd->desired_en)
set_c0_status(mask);
else
clear_c0_status(mask);
}
static void irq_core_disable(struct irq_data *data)
{
struct core_chip_data *cd = irq_data_get_irq_chip_data(data);
cd->desired_en = false;
}
static void irq_core_enable(struct irq_data *data)
{
struct core_chip_data *cd = irq_data_get_irq_chip_data(data);
cd->desired_en = true;
}
static void irq_core_bus_lock(struct irq_data *data)
{
struct core_chip_data *cd = irq_data_get_irq_chip_data(data);
mutex_lock(&cd->core_irq_mutex);
}
static void irq_core_bus_sync_unlock(struct irq_data *data)
{
struct core_chip_data *cd = irq_data_get_irq_chip_data(data);
if (cd->desired_en != cd->current_en) {
on_each_cpu(irq_core_set_enable_local, data, 1);
cd->current_en = cd->desired_en;
}
mutex_unlock(&cd->core_irq_mutex);
}
static struct irq_chip irq_chip_core = {
.name = "Core",
.irq_enable = irq_core_enable,
.irq_disable = irq_core_disable,
.irq_ack = irq_core_ack,
.irq_eoi = irq_core_eoi,
.irq_bus_lock = irq_core_bus_lock,
.irq_bus_sync_unlock = irq_core_bus_sync_unlock,
.irq_cpu_online = irq_core_eoi,
.irq_cpu_offline = irq_core_ack,
.flags = IRQCHIP_ONOFFLINE_ENABLED,
};
static void __init irq_init_core(void)
{
int i;
int irq;
struct core_chip_data *cd;
/* Start with a clean slate */
clear_c0_status(ST0_IM);
clear_c0_cause(CAUSEF_IP0 | CAUSEF_IP1);
for (i = 0; i < ARRAY_SIZE(irq_core_chip_data); i++) {
cd = irq_core_chip_data + i;
cd->current_en = false;
cd->desired_en = false;
cd->bit = i;
mutex_init(&cd->core_irq_mutex);
irq = MIPS_CPU_IRQ_BASE + i;
switch (i) {
case 0: /* SW0 */
case 1: /* SW1 */
case 5: /* IP5 */
case 6: /* IP6 */
case 7: /* IP7 */
irq_set_chip_data(irq, cd);
irq_set_chip_and_handler(irq, &irq_chip_core,
handle_percpu_irq);
break;
default:
break;
}
}
}
static void __iomem *mips_irq_chip;
#define MIPS_IRQ_CHIP_NUM_BITS 0
#define MIPS_IRQ_CHIP_REGS 8
static int mips_irq_cpu_stride;
static int mips_irq_chip_reg_raw;
static int mips_irq_chip_reg_src;
static int mips_irq_chip_reg_en;
static int mips_irq_chip_reg_raw_w1s;
static int mips_irq_chip_reg_raw_w1c;
static int mips_irq_chip_reg_en_w1s;
static int mips_irq_chip_reg_en_w1c;
static void irq_pci_enable(struct irq_data *data)
{
u32 mask = 1u << data->irq;
__raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_en_w1s);
}
static void irq_pci_disable(struct irq_data *data)
{
u32 mask = 1u << data->irq;
__raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_en_w1c);
}
static void irq_pci_ack(struct irq_data *data)
{
}
static void irq_pci_mask(struct irq_data *data)
{
u32 mask = 1u << data->irq;
__raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_en_w1c);
}
static void irq_pci_unmask(struct irq_data *data)
{
u32 mask = 1u << data->irq;
__raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_en_w1s);
}
static struct irq_chip irq_chip_pci = {
.name = "PCI",
.irq_enable = irq_pci_enable,
.irq_disable = irq_pci_disable,
.irq_ack = irq_pci_ack,
.irq_mask = irq_pci_mask,
.irq_unmask = irq_pci_unmask,
};
static void irq_mbox_all(struct irq_data *data, void __iomem *base)
{
int cpu;
unsigned int mbox = data->irq - MIPS_IRQ_MBOX0;
u32 mask;
WARN_ON(mbox >= MBOX_BITS_PER_CPU);
for_each_online_cpu(cpu) {
unsigned int cpuid = cpunum_for_cpu(cpu);
mask = 1 << (cpuid * MBOX_BITS_PER_CPU + mbox);
__raw_writel(mask, base + (cpuid * mips_irq_cpu_stride));
}
}
static void irq_mbox_enable(struct irq_data *data)
{
irq_mbox_all(data, mips_irq_chip + mips_irq_chip_reg_en_w1s + sizeof(u32));
}
static void irq_mbox_disable(struct irq_data *data)
{
irq_mbox_all(data, mips_irq_chip + mips_irq_chip_reg_en_w1c + sizeof(u32));
}
static void irq_mbox_ack(struct irq_data *data)
{
u32 mask;
unsigned int mbox = data->irq - MIPS_IRQ_MBOX0;
WARN_ON(mbox >= MBOX_BITS_PER_CPU);
mask = 1 << (get_ebase_cpunum() * MBOX_BITS_PER_CPU + mbox);
__raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_raw_w1c + sizeof(u32));
}
void irq_mbox_ipi(int cpu, unsigned int actions)
{
unsigned int cpuid = cpunum_for_cpu(cpu);
u32 mask;
WARN_ON(actions >= (1 << MBOX_BITS_PER_CPU));
mask = actions << (cpuid * MBOX_BITS_PER_CPU);
__raw_writel(mask, mips_irq_chip + mips_irq_chip_reg_raw_w1s + sizeof(u32));
}
static void irq_mbox_cpu_onoffline(struct irq_data *data, void __iomem *base)
{
unsigned int mbox = data->irq - MIPS_IRQ_MBOX0;
unsigned int cpuid = get_ebase_cpunum();
u32 mask;
WARN_ON(mbox >= MBOX_BITS_PER_CPU);
mask = 1 << (cpuid * MBOX_BITS_PER_CPU + mbox);
__raw_writel(mask, base + (cpuid * mips_irq_cpu_stride));
}
static void irq_mbox_cpu_online(struct irq_data *data)
{
irq_mbox_cpu_onoffline(data, mips_irq_chip + mips_irq_chip_reg_en_w1s + sizeof(u32));
}
static void irq_mbox_cpu_offline(struct irq_data *data)
{
irq_mbox_cpu_onoffline(data, mips_irq_chip + mips_irq_chip_reg_en_w1c + sizeof(u32));
}
static struct irq_chip irq_chip_mbox = {
.name = "MBOX",
.irq_enable = irq_mbox_enable,
.irq_disable = irq_mbox_disable,
.irq_ack = irq_mbox_ack,
.irq_cpu_online = irq_mbox_cpu_online,
.irq_cpu_offline = irq_mbox_cpu_offline,
.flags = IRQCHIP_ONOFFLINE_ENABLED,
};
static void __init irq_pci_init(void)
{
int i, stride;
u32 num_bits;
mips_irq_chip = ioremap(0x1e010000, 4096);
num_bits = __raw_readl(mips_irq_chip + MIPS_IRQ_CHIP_NUM_BITS);
stride = 8 * (1 + ((num_bits - 1) / 64));
pr_notice("mips_irq_chip: %u bits, reg stride: %d\n", num_bits, stride);
mips_irq_chip_reg_raw = MIPS_IRQ_CHIP_REGS + 0 * stride;
mips_irq_chip_reg_raw_w1s = MIPS_IRQ_CHIP_REGS + 1 * stride;
mips_irq_chip_reg_raw_w1c = MIPS_IRQ_CHIP_REGS + 2 * stride;
mips_irq_chip_reg_src = MIPS_IRQ_CHIP_REGS + 3 * stride;
mips_irq_chip_reg_en = MIPS_IRQ_CHIP_REGS + 4 * stride;
mips_irq_chip_reg_en_w1s = MIPS_IRQ_CHIP_REGS + 5 * stride;
mips_irq_chip_reg_en_w1c = MIPS_IRQ_CHIP_REGS + 6 * stride;
mips_irq_cpu_stride = stride * 4;
for (i = 0; i < 4; i++)
irq_set_chip_and_handler(i + MIPS_IRQ_PCIA, &irq_chip_pci, handle_level_irq);
for (i = 0; i < 2; i++)
irq_set_chip_and_handler(i + MIPS_IRQ_MBOX0, &irq_chip_mbox, handle_percpu_irq);
set_c0_status(STATUSF_IP2);
}
static void irq_pci_dispatch(void)
{
unsigned int cpuid = get_ebase_cpunum();
u32 en;
en = __raw_readl(mips_irq_chip + mips_irq_chip_reg_src +
(cpuid * mips_irq_cpu_stride));
if (!en) {
en = __raw_readl(mips_irq_chip + mips_irq_chip_reg_src + (cpuid * mips_irq_cpu_stride) + sizeof(u32));
en = (en >> (2 * cpuid)) & 3;
if (!en)
spurious_interrupt();
else
do_IRQ(__ffs(en) + MIPS_IRQ_MBOX0); /* MBOX type */
} else {
do_IRQ(__ffs(en));
}
}
void __init arch_init_irq(void)
{
irq_init_core();
irq_pci_init();
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
int ip;
if (unlikely(!pending)) {
spurious_interrupt();
return;
}
ip = ffs(pending) - 1 - STATUSB_IP0;
if (ip == 2)
irq_pci_dispatch();
else
do_IRQ(MIPS_CPU_IRQ_BASE + ip);
}
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Cavium, Inc.
*/
#include <linux/interrupt.h>
#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/mipsregs.h>
#include <asm/setup.h>
#include <asm/time.h>
#include <asm/smp.h>
/*
* Writing the sp releases the CPU, so writes must be ordered, gp
* first, then sp.
*/
unsigned long paravirt_smp_sp[NR_CPUS];
unsigned long paravirt_smp_gp[NR_CPUS];
static int numcpus = 1;
static int __init set_numcpus(char *str)
{
int newval;
if (get_option(&str, &newval)) {
if (newval < 1 || newval >= NR_CPUS)
goto bad;
numcpus = newval;
return 0;
}
bad:
return -EINVAL;
}
early_param("numcpus", set_numcpus);
static void paravirt_smp_setup(void)
{
int id;
unsigned int cpunum = get_ebase_cpunum();
if (WARN_ON(cpunum >= NR_CPUS))
return;
/* The present CPUs are initially just the boot cpu (CPU 0). */
for (id = 0; id < NR_CPUS; id++) {
set_cpu_possible(id, id == 0);
set_cpu_present(id, id == 0);
}
__cpu_number_map[cpunum] = 0;
__cpu_logical_map[0] = cpunum;
for (id = 0; id < numcpus; id++) {
set_cpu_possible(id, true);
set_cpu_present(id, true);
__cpu_number_map[id] = id;
__cpu_logical_map[id] = id;
}
}
void irq_mbox_ipi(int cpu, unsigned int actions);
static void paravirt_send_ipi_single(int cpu, unsigned int action)
{
irq_mbox_ipi(cpu, action);
}
static void paravirt_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int cpu;
for_each_cpu_mask(cpu, *mask)
paravirt_send_ipi_single(cpu, action);
}
static void paravirt_init_secondary(void)
{
unsigned int sr;
sr = set_c0_status(ST0_BEV);
write_c0_ebase((u32)ebase);
sr |= STATUSF_IP2; /* Interrupt controller on IP2 */
write_c0_status(sr);
irq_cpu_online();
}
static void paravirt_smp_finish(void)
{
/* to generate the first CPU timer interrupt */
write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
local_irq_enable();
}
static void paravirt_cpus_done(void)
{
}
static void paravirt_boot_secondary(int cpu, struct task_struct *idle)
{
paravirt_smp_gp[cpu] = (unsigned long)task_thread_info(idle);
smp_wmb();
paravirt_smp_sp[cpu] = __KSTK_TOS(idle);
}
static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
{
scheduler_ipi();
return IRQ_HANDLED;
}
static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
{
smp_call_function_interrupt();
return IRQ_HANDLED;
}
static void paravirt_prepare_cpus(unsigned int max_cpus)
{
if (request_irq(MIPS_IRQ_MBOX0, paravirt_reched_interrupt,
IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler",
paravirt_reched_interrupt)) {
panic("Cannot request_irq for SchedulerIPI");
}
if (request_irq(MIPS_IRQ_MBOX1, paravirt_function_interrupt,
IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call",
paravirt_function_interrupt)) {
panic("Cannot request_irq for SMP-Call");
}
}
struct plat_smp_ops paravirt_smp_ops = {
.send_ipi_single = paravirt_send_ipi_single,
.send_ipi_mask = paravirt_send_ipi_mask,
.init_secondary = paravirt_init_secondary,
.smp_finish = paravirt_smp_finish,
.cpus_done = paravirt_cpus_done,
.boot_secondary = paravirt_boot_secondary,
.smp_setup = paravirt_smp_setup,
.prepare_cpus = paravirt_prepare_cpus,
};
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Cavium, Inc.
*/
#include <linux/kernel.h>
#include <linux/virtio_console.h>
#include <linux/kvm_para.h>
/*
* Emit one character to the boot console.
*/
int prom_putchar(char c)
{
kvm_hypercall3(KVM_HC_MIPS_CONSOLE_OUTPUT, 0 /* port 0 */,
(unsigned long)&c, 1 /* len == 1 */);
return 1;
}
#ifdef CONFIG_VIRTIO_CONSOLE
static int paravirt_put_chars(u32 vtermno, const char *buf, int count)
{
kvm_hypercall3(KVM_HC_MIPS_CONSOLE_OUTPUT, vtermno,
(unsigned long)buf, count);
return count;
}
static int __init paravirt_cons_init(void)
{
virtio_cons_early_init(paravirt_put_chars);
return 0;
}
core_initcall(paravirt_cons_init);
#endif
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2013 Cavium, Inc.
*/
#include <linux/kernel.h>
#include <linux/kvm_para.h>
#include <asm/reboot.h>
#include <asm/bootinfo.h>
#include <asm/smp-ops.h>
#include <asm/time.h>
extern struct plat_smp_ops paravirt_smp_ops;
const char *get_system_type(void)
{
return "MIPS Para-Virtualized Guest";
}
void __init plat_time_init(void)
{
mips_hpt_frequency = kvm_hypercall0(KVM_HC_MIPS_GET_CLOCK_FREQ);
preset_lpj = mips_hpt_frequency / (2 * HZ);
}
static void pv_machine_halt(void)
{
kvm_hypercall0(KVM_HC_MIPS_EXIT_VM);
}
/*
* Early entry point for arch setup
*/
void __init prom_init(void)
{
int i;
int argc = fw_arg0;
char **argv = (char **)fw_arg1;
#ifdef CONFIG_32BIT
set_io_port_base(KSEG1ADDR(0x1e000000));
#else /* CONFIG_64BIT */
set_io_port_base(PHYS_TO_XKSEG_UNCACHED(0x1e000000));
#endif
for (i = 0; i < argc; i++) {
strlcat(arcs_cmdline, argv[i], COMMAND_LINE_SIZE);
if (i < argc - 1)
strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
}
_machine_halt = pv_machine_halt;
register_smp_ops(&paravirt_smp_ops);
}
void __init plat_mem_setup(void)
{
/* Do nothing, the "mem=???" parser handles our memory. */
}
void __init prom_free_prom_memory(void)
{
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment