Commit 8b53ef33 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: (29 commits)
  [IA64] BUG to BUG_ON changes
  [IA64] Fix typo/thinko in arch/ia64/sn/kernel/sn2/sn2_smp.c
  ia64: remove some warnings.
  ia64/xen: fix the link error.
  ia64/pv_ops/bp/xen: implemented binary patchable pv_cpu_ops.
  ia64/pv_ops/binary patch: define paravirt_dv_serialize_data() and suppress false positive warning.
  ia64/pv_ops/bp/module: support binary patching for kernel module.
  ia64/pv_ops: implement binary patching optimization for native.
  ia64/pv_op/binarypatch: add helper functions to support binary patching for paravirt_ops.
  ia64/pv_ops/xen/gate.S: xen gate page paravirtualization
  ia64/pv_ops: paravirtualize gate.S.
  ia64/pv_ops: move down __kernel_syscall_via_epc.
  ia64/pv_ops/xen: define xen specific gate page.
  ia64/pv_ops: gate page paravirtualization.
  ia64/pv_ops/xen/pv_time_ops: implement sched_clock.
  ia64/pv_ops/pv_time_ops: add sched_clock hook.
  ia64/pv_ops/xen: paravirtualize read/write ar.itc and ar.itm
  ia64/pv_ops: paravirtualize mov = ar.itc.
  ia64/pv_ops/pvchecker: support mov = ar.itc paravirtualization
  ia64/pv_ops: paravirtualize fsys.S.
  ...
parents ef5ddd3d 80a03e29
...@@ -202,7 +202,11 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void); ...@@ -202,7 +202,11 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void);
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#if defined(CONFIG_PARAVIRT) && defined(__KERNEL__) #if defined(CONFIG_PARAVIRT) && defined(__KERNEL__)
#define IA64_INTRINSIC_API(name) pv_cpu_ops.name #ifdef ASM_SUPPORTED
# define IA64_INTRINSIC_API(name) paravirt_ ## name
#else
# define IA64_INTRINSIC_API(name) pv_cpu_ops.name
#endif
#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name #define IA64_INTRINSIC_MACRO(name) paravirt_ ## name
#else #else
#define IA64_INTRINSIC_API(name) ia64_native_ ## name #define IA64_INTRINSIC_API(name) ia64_native_ ## name
......
...@@ -87,7 +87,7 @@ get_mmu_context (struct mm_struct *mm) ...@@ -87,7 +87,7 @@ get_mmu_context (struct mm_struct *mm)
/* re-check, now that we've got the lock: */ /* re-check, now that we've got the lock: */
context = mm->context; context = mm->context;
if (context == 0) { if (context == 0) {
cpus_clear(mm->cpu_vm_mask); cpumask_clear(mm_cpumask(mm));
if (ia64_ctx.next >= ia64_ctx.limit) { if (ia64_ctx.next >= ia64_ctx.limit) {
ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
ia64_ctx.max_ctx, ia64_ctx.next); ia64_ctx.max_ctx, ia64_ctx.next);
...@@ -166,8 +166,8 @@ activate_context (struct mm_struct *mm) ...@@ -166,8 +166,8 @@ activate_context (struct mm_struct *mm)
do { do {
context = get_mmu_context(mm); context = get_mmu_context(mm);
if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
cpu_set(smp_processor_id(), mm->cpu_vm_mask); cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
reload_context(context); reload_context(context);
/* /*
* in the unlikely event of a TLB-flush by another thread, * in the unlikely event of a TLB-flush by another thread,
......
...@@ -16,6 +16,12 @@ struct mod_arch_specific { ...@@ -16,6 +16,12 @@ struct mod_arch_specific {
struct elf64_shdr *got; /* global offset table */ struct elf64_shdr *got; /* global offset table */
struct elf64_shdr *opd; /* official procedure descriptors */ struct elf64_shdr *opd; /* official procedure descriptors */
struct elf64_shdr *unwind; /* unwind-table section */ struct elf64_shdr *unwind; /* unwind-table section */
#ifdef CONFIG_PARAVIRT
struct elf64_shdr *paravirt_bundles;
/* paravirt_alt_bundle_patch table */
struct elf64_shdr *paravirt_insts;
/* paravirt_alt_inst_patch table */
#endif
unsigned long gp; /* global-pointer for module */ unsigned long gp; /* global-pointer for module */
void *core_unw_table; /* core unwind-table cookie returned by unwinder */ void *core_unw_table; /* core unwind-table cookie returned by unwinder */
......
...@@ -30,6 +30,9 @@ ...@@ -30,6 +30,9 @@
#define __paravirt_work_processed_syscall_target \ #define __paravirt_work_processed_syscall_target \
ia64_work_processed_syscall ia64_work_processed_syscall
#define paravirt_fsyscall_table ia64_native_fsyscall_table
#define paravirt_fsys_bubble_down ia64_native_fsys_bubble_down
#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK #ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK
# define PARAVIRT_POISON 0xdeadbeefbaadf00d # define PARAVIRT_POISON 0xdeadbeefbaadf00d
# define CLOBBER(clob) \ # define CLOBBER(clob) \
...@@ -74,6 +77,11 @@ ...@@ -74,6 +77,11 @@
(pred) mov reg = psr \ (pred) mov reg = psr \
CLOBBER(clob) CLOBBER(clob)
#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
(pred) mov reg = ar.itc \
CLOBBER(clob) \
CLOBBER_PRED(pred_clob)
#define MOV_TO_IFA(reg, clob) \ #define MOV_TO_IFA(reg, clob) \
mov cr.ifa = reg \ mov cr.ifa = reg \
CLOBBER(clob) CLOBBER(clob)
...@@ -158,6 +166,11 @@ ...@@ -158,6 +166,11 @@
#define RSM_PSR_DT \ #define RSM_PSR_DT \
rsm psr.dt rsm psr.dt
#define RSM_PSR_BE_I(clob0, clob1) \
rsm psr.be | psr.i \
CLOBBER(clob0) \
CLOBBER(clob1)
#define SSM_PSR_DT_AND_SRLZ_I \ #define SSM_PSR_DT_AND_SRLZ_I \
ssm psr.dt \ ssm psr.dt \
;; \ ;; \
......
/******************************************************************************
* arch/ia64/include/asm/native/inst.h
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#define __paravirt_start_gate_fsyscall_patchlist \
__ia64_native_start_gate_fsyscall_patchlist
#define __paravirt_end_gate_fsyscall_patchlist \
__ia64_native_end_gate_fsyscall_patchlist
#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \
__ia64_native_start_gate_brl_fsys_bubble_down_patchlist
#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \
__ia64_native_end_gate_brl_fsys_bubble_down_patchlist
#define __paravirt_start_gate_vtop_patchlist \
__ia64_native_start_gate_vtop_patchlist
#define __paravirt_end_gate_vtop_patchlist \
__ia64_native_end_gate_vtop_patchlist
#define __paravirt_start_gate_mckinley_e9_patchlist \
__ia64_native_start_gate_mckinley_e9_patchlist
#define __paravirt_end_gate_mckinley_e9_patchlist \
__ia64_native_end_gate_mckinley_e9_patchlist
...@@ -180,6 +180,11 @@ ...@@ -180,6 +180,11 @@
IS_PRED_IN(pred) \ IS_PRED_IN(pred) \
IS_RREG_OUT(reg) \ IS_RREG_OUT(reg) \
IS_RREG_CLOB(clob) IS_RREG_CLOB(clob)
#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
IS_PRED_IN(pred) \
IS_PRED_CLOB(pred_clob) \
IS_RREG_OUT(reg) \
IS_RREG_CLOB(clob)
#define MOV_TO_IFA(reg, clob) \ #define MOV_TO_IFA(reg, clob) \
IS_RREG_IN(reg) \ IS_RREG_IN(reg) \
IS_RREG_CLOB(clob) IS_RREG_CLOB(clob)
...@@ -246,6 +251,9 @@ ...@@ -246,6 +251,9 @@
IS_RREG_CLOB(clob2) IS_RREG_CLOB(clob2)
#define RSM_PSR_DT \ #define RSM_PSR_DT \
nop 0 nop 0
#define RSM_PSR_BE_I(clob0, clob1) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1)
#define SSM_PSR_DT_AND_SRLZ_I \ #define SSM_PSR_DT_AND_SRLZ_I \
nop 0 nop 0
#define BSW_0(clob0, clob1, clob2) \ #define BSW_0(clob0, clob1, clob2) \
......
...@@ -22,6 +22,56 @@ ...@@ -22,6 +22,56 @@
#ifndef __ASM_PARAVIRT_H #ifndef __ASM_PARAVIRT_H
#define __ASM_PARAVIRT_H #define __ASM_PARAVIRT_H
#ifndef __ASSEMBLY__
/******************************************************************************
* fsys related addresses
*/
struct pv_fsys_data {
unsigned long *fsyscall_table;
void *fsys_bubble_down;
};
extern struct pv_fsys_data pv_fsys_data;
unsigned long *paravirt_get_fsyscall_table(void);
char *paravirt_get_fsys_bubble_down(void);
/******************************************************************************
* patchlist addresses for gate page
*/
enum pv_gate_patchlist {
PV_GATE_START_FSYSCALL,
PV_GATE_END_FSYSCALL,
PV_GATE_START_BRL_FSYS_BUBBLE_DOWN,
PV_GATE_END_BRL_FSYS_BUBBLE_DOWN,
PV_GATE_START_VTOP,
PV_GATE_END_VTOP,
PV_GATE_START_MCKINLEY_E9,
PV_GATE_END_MCKINLEY_E9,
};
struct pv_patchdata {
unsigned long start_fsyscall_patchlist;
unsigned long end_fsyscall_patchlist;
unsigned long start_brl_fsys_bubble_down_patchlist;
unsigned long end_brl_fsys_bubble_down_patchlist;
unsigned long start_vtop_patchlist;
unsigned long end_vtop_patchlist;
unsigned long start_mckinley_e9_patchlist;
unsigned long end_mckinley_e9_patchlist;
void *gate_section;
};
extern struct pv_patchdata pv_patchdata;
unsigned long paravirt_get_gate_patchlist(enum pv_gate_patchlist type);
void *paravirt_get_gate_section(void);
#endif
#ifdef CONFIG_PARAVIRT_GUEST #ifdef CONFIG_PARAVIRT_GUEST
#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0 #define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
...@@ -68,6 +118,14 @@ struct pv_init_ops { ...@@ -68,6 +118,14 @@ struct pv_init_ops {
int (*arch_setup_nomca)(void); int (*arch_setup_nomca)(void);
void (*post_smp_prepare_boot_cpu)(void); void (*post_smp_prepare_boot_cpu)(void);
#ifdef ASM_SUPPORTED
unsigned long (*patch_bundle)(void *sbundle, void *ebundle,
unsigned long type);
unsigned long (*patch_inst)(unsigned long stag, unsigned long etag,
unsigned long type);
#endif
void (*patch_branch)(unsigned long tag, unsigned long type);
}; };
extern struct pv_init_ops pv_init_ops; extern struct pv_init_ops pv_init_ops;
...@@ -210,6 +268,8 @@ struct pv_time_ops { ...@@ -210,6 +268,8 @@ struct pv_time_ops {
int (*do_steal_accounting)(unsigned long *new_itm); int (*do_steal_accounting)(unsigned long *new_itm);
void (*clocksource_resume)(void); void (*clocksource_resume)(void);
unsigned long long (*sched_clock)(void);
}; };
extern struct pv_time_ops pv_time_ops; extern struct pv_time_ops pv_time_ops;
...@@ -227,6 +287,11 @@ paravirt_do_steal_accounting(unsigned long *new_itm) ...@@ -227,6 +287,11 @@ paravirt_do_steal_accounting(unsigned long *new_itm)
return pv_time_ops.do_steal_accounting(new_itm); return pv_time_ops.do_steal_accounting(new_itm);
} }
static inline unsigned long long paravirt_sched_clock(void)
{
return pv_time_ops.sched_clock();
}
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#else #else
......
/******************************************************************************
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef __ASM_PARAVIRT_PATCH_H
#define __ASM_PARAVIRT_PATCH_H
#ifdef __ASSEMBLY__
.section .paravirt_branches, "a"
.previous
#define PARAVIRT_PATCH_SITE_BR(type) \
{ \
[1:] ; \
br.cond.sptk.many 2f ; \
nop.b 0 ; \
nop.b 0;; ; \
} ; \
2: \
.xdata8 ".paravirt_branches", 1b, type
#else
#include <linux/stringify.h>
#include <asm/intrinsics.h>
/* for binary patch */
struct paravirt_patch_site_bundle {
void *sbundle;
void *ebundle;
unsigned long type;
};
/* label means the beginning of new bundle */
#define paravirt_alt_bundle(instr, privop) \
"\t998:\n" \
"\t" instr "\n" \
"\t999:\n" \
"\t.pushsection .paravirt_bundles, \"a\"\n" \
"\t.popsection\n" \
"\t.xdata8 \".paravirt_bundles\", 998b, 999b, " \
__stringify(privop) "\n"
struct paravirt_patch_bundle_elem {
const void *sbundle;
const void *ebundle;
unsigned long type;
};
struct paravirt_patch_site_inst {
unsigned long stag;
unsigned long etag;
unsigned long type;
};
#define paravirt_alt_inst(instr, privop) \
"\t[998:]\n" \
"\t" instr "\n" \
"\t[999:]\n" \
"\t.pushsection .paravirt_insts, \"a\"\n" \
"\t.popsection\n" \
"\t.xdata8 \".paravirt_insts\", 998b, 999b, " \
__stringify(privop) "\n"
struct paravirt_patch_site_branch {
unsigned long tag;
unsigned long type;
};
struct paravirt_patch_branch_target {
const void *entry;
unsigned long type;
};
void
__paravirt_patch_apply_branch(
unsigned long tag, unsigned long type,
const struct paravirt_patch_branch_target *entries,
unsigned int nr_entries);
void
paravirt_patch_reloc_br(unsigned long tag, const void *target);
void
paravirt_patch_reloc_brl(unsigned long tag, const void *target);
#if defined(ASM_SUPPORTED) && defined(CONFIG_PARAVIRT)
unsigned long
ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
unsigned long
__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type,
const struct paravirt_patch_bundle_elem *elems,
unsigned long nelems,
const struct paravirt_patch_bundle_elem **found);
void
paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start,
const struct paravirt_patch_site_bundle *end);
void
paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start,
const struct paravirt_patch_site_inst *end);
void paravirt_patch_apply(void);
#else
#define paravirt_patch_apply_bundle(start, end) do { } while (0)
#define paravirt_patch_apply_inst(start, end) do { } while (0)
#define paravirt_patch_apply() do { } while (0)
#endif
#endif /* !__ASSEMBLEY__ */
#endif /* __ASM_PARAVIRT_PATCH_H */
/*
* Local variables:
* mode: C
* c-set-style: "linux"
* c-basic-offset: 8
* tab-width: 8
* indent-tabs-mode: t
* End:
*/
This diff is collapsed.
...@@ -126,7 +126,8 @@ extern void identify_siblings (struct cpuinfo_ia64 *); ...@@ -126,7 +126,8 @@ extern void identify_siblings (struct cpuinfo_ia64 *);
extern int is_multithreading_enabled(void); extern int is_multithreading_enabled(void);
extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi(cpumask_t mask); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
......
...@@ -40,5 +40,6 @@ get_cycles (void) ...@@ -40,5 +40,6 @@ get_cycles (void)
} }
extern void ia64_cpu_local_tick (void); extern void ia64_cpu_local_tick (void);
extern unsigned long long ia64_native_sched_clock (void);
#endif /* _ASM_IA64_TIMEX_H */ #endif /* _ASM_IA64_TIMEX_H */
...@@ -112,11 +112,6 @@ void build_cpu_to_node_map(void); ...@@ -112,11 +112,6 @@ void build_cpu_to_node_map(void);
extern void arch_fix_phys_package_id(int num, u32 slot); extern void arch_fix_phys_package_id(int num, u32 slot);
#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
CPU_MASK_ALL : \
node_to_cpumask(pcibus_to_node(bus)) \
)
#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
cpu_all_mask : \ cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus))) cpumask_of_node(pcibus_to_node(bus)))
......
...@@ -33,9 +33,6 @@ ...@@ -33,9 +33,6 @@
#ifndef _ASM_IA64_XEN_HYPERVISOR_H #ifndef _ASM_IA64_XEN_HYPERVISOR_H
#define _ASM_IA64_XEN_HYPERVISOR_H #define _ASM_IA64_XEN_HYPERVISOR_H
#ifdef CONFIG_XEN
#include <linux/init.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#include <xen/interface/version.h> /* to compile feature.c */ #include <xen/interface/version.h> /* to compile feature.c */
#include <xen/features.h> /* to comiple xen-netfront.c */ #include <xen/features.h> /* to comiple xen-netfront.c */
...@@ -43,22 +40,32 @@ ...@@ -43,22 +40,32 @@
/* xen_domain_type is set before executing any C code by early_xen_setup */ /* xen_domain_type is set before executing any C code by early_xen_setup */
enum xen_domain_type { enum xen_domain_type {
XEN_NATIVE, XEN_NATIVE, /* running on bare hardware */
XEN_PV_DOMAIN, XEN_PV_DOMAIN, /* running in a PV domain */
XEN_HVM_DOMAIN, XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/
}; };
#ifdef CONFIG_XEN
extern enum xen_domain_type xen_domain_type; extern enum xen_domain_type xen_domain_type;
#else
#define xen_domain_type XEN_NATIVE
#endif
#define xen_domain() (xen_domain_type != XEN_NATIVE) #define xen_domain() (xen_domain_type != XEN_NATIVE)
#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN) #define xen_pv_domain() (xen_domain() && \
#define xen_initial_domain() (xen_pv_domain() && \ xen_domain_type == XEN_PV_DOMAIN)
#define xen_hvm_domain() (xen_domain() && \
xen_domain_type == XEN_HVM_DOMAIN)
#ifdef CONFIG_XEN_DOM0
#define xen_initial_domain() (xen_pv_domain() && \
(xen_start_info->flags & SIF_INITDOMAIN)) (xen_start_info->flags & SIF_INITDOMAIN))
#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN) #else
#define xen_initial_domain() (0)
#endif
/* deprecated. remove this */
#define is_running_on_xen() (xen_domain_type == XEN_PV_DOMAIN)
#ifdef CONFIG_XEN
extern struct shared_info *HYPERVISOR_shared_info; extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info; extern struct start_info *xen_start_info;
...@@ -74,16 +81,6 @@ void force_evtchn_callback(void); ...@@ -74,16 +81,6 @@ void force_evtchn_callback(void);
/* For setup_arch() in arch/ia64/kernel/setup.c */ /* For setup_arch() in arch/ia64/kernel/setup.c */
void xen_ia64_enable_opt_feature(void); void xen_ia64_enable_opt_feature(void);
#else /* CONFIG_XEN */
#define xen_domain() (0)
#define xen_pv_domain() (0)
#define xen_initial_domain() (0)
#define xen_hvm_domain() (0)
#define is_running_on_xen() (0) /* deprecated. remove this */
#endif #endif
#define is_initial_xendomain() (0) /* deprecated. remove this */
#endif /* _ASM_IA64_XEN_HYPERVISOR_H */ #endif /* _ASM_IA64_XEN_HYPERVISOR_H */
...@@ -33,6 +33,9 @@ ...@@ -33,6 +33,9 @@
#define __paravirt_work_processed_syscall_target \ #define __paravirt_work_processed_syscall_target \
xen_work_processed_syscall xen_work_processed_syscall
#define paravirt_fsyscall_table xen_fsyscall_table
#define paravirt_fsys_bubble_down xen_fsys_bubble_down
#define MOV_FROM_IFA(reg) \ #define MOV_FROM_IFA(reg) \
movl reg = XSI_IFA; \ movl reg = XSI_IFA; \
;; \ ;; \
...@@ -110,6 +113,27 @@ ...@@ -110,6 +113,27 @@
.endm .endm
#define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
/* assuming ar.itc is read with interrupt disabled. */
#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
(pred) movl clob = XSI_ITC_OFFSET; \
;; \
(pred) ld8 clob = [clob]; \
(pred) mov reg = ar.itc; \
;; \
(pred) add reg = reg, clob; \
;; \
(pred) movl clob = XSI_ITC_LAST; \
;; \
(pred) ld8 clob = [clob]; \
;; \
(pred) cmp.geu.unc pred_clob, p0 = clob, reg; \
;; \
(pred_clob) add reg = 1, clob; \
;; \
(pred) movl clob = XSI_ITC_LAST; \
;; \
(pred) st8 [clob] = reg
#define MOV_TO_IFA(reg, clob) \ #define MOV_TO_IFA(reg, clob) \
movl clob = XSI_IFA; \ movl clob = XSI_IFA; \
...@@ -362,6 +386,10 @@ ...@@ -362,6 +386,10 @@
#define RSM_PSR_DT \ #define RSM_PSR_DT \
XEN_HYPER_RSM_PSR_DT XEN_HYPER_RSM_PSR_DT
#define RSM_PSR_BE_I(clob0, clob1) \
RSM_PSR_I(p0, clob0, clob1); \
rum psr.be
#define SSM_PSR_DT_AND_SRLZ_I \ #define SSM_PSR_DT_AND_SRLZ_I \
XEN_HYPER_SSM_PSR_DT XEN_HYPER_SSM_PSR_DT
......
...@@ -209,6 +209,15 @@ struct mapped_regs { ...@@ -209,6 +209,15 @@ struct mapped_regs {
unsigned long krs[8]; /* kernel registers */ unsigned long krs[8]; /* kernel registers */
unsigned long tmp[16]; /* temp registers unsigned long tmp[16]; /* temp registers
(e.g. for hyperprivops) */ (e.g. for hyperprivops) */
/* itc paravirtualization
* vAR.ITC = mAR.ITC + itc_offset
* itc_last is one which was lastly passed to
* the guest OS in order to prevent it from
* going backwords.
*/
unsigned long itc_offset;
unsigned long itc_last;
}; };
}; };
}; };
......
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/* read ar.itc in advance, and use it before leaving bank 0 */
#define XEN_ACCOUNT_GET_STAMP \
MOV_FROM_ITC(pUStk, p6, r20, r2);
#else
#define XEN_ACCOUNT_GET_STAMP
#endif
/* /*
* DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
* the minimum state necessary that allows us to turn psr.ic back * the minimum state necessary that allows us to turn psr.ic back
...@@ -123,7 +132,7 @@ ...@@ -123,7 +132,7 @@
;; \ ;; \
.mem.offset 0,0; st8.spill [r16]=r2,16; \ .mem.offset 0,0; st8.spill [r16]=r2,16; \
.mem.offset 8,0; st8.spill [r17]=r3,16; \ .mem.offset 8,0; st8.spill [r17]=r3,16; \
ACCOUNT_GET_STAMP \ XEN_ACCOUNT_GET_STAMP \
adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
;; \ ;; \
EXTRA; \ EXTRA; \
......
/******************************************************************************
* arch/ia64/include/asm/xen/patchlist.h
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#define __paravirt_start_gate_fsyscall_patchlist \
__xen_start_gate_fsyscall_patchlist
#define __paravirt_end_gate_fsyscall_patchlist \
__xen_end_gate_fsyscall_patchlist
#define __paravirt_start_gate_brl_fsys_bubble_down_patchlist \
__xen_start_gate_brl_fsys_bubble_down_patchlist
#define __paravirt_end_gate_brl_fsys_bubble_down_patchlist \
__xen_end_gate_brl_fsys_bubble_down_patchlist
#define __paravirt_start_gate_vtop_patchlist \
__xen_start_gate_vtop_patchlist
#define __paravirt_end_gate_vtop_patchlist \
__xen_end_gate_vtop_patchlist
#define __paravirt_start_gate_mckinley_e9_patchlist \
__xen_start_gate_mckinley_e9_patchlist
#define __paravirt_end_gate_mckinley_e9_patchlist \
__xen_end_gate_mckinley_e9_patchlist
...@@ -55,6 +55,8 @@ ...@@ -55,6 +55,8 @@
#define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS) #define XSI_BANK1_R16 (XSI_BASE + XSI_BANK1_R16_OFS)
#define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS) #define XSI_BANKNUM (XSI_BASE + XSI_BANKNUM_OFS)
#define XSI_IHA (XSI_BASE + XSI_IHA_OFS) #define XSI_IHA (XSI_BASE + XSI_IHA_OFS)
#define XSI_ITC_OFFSET (XSI_BASE + XSI_ITC_OFFSET_OFS)
#define XSI_ITC_LAST (XSI_BASE + XSI_ITC_LAST_OFS)
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -67,7 +69,7 @@ ...@@ -67,7 +69,7 @@
* may have different semantics depending on whether they are executed * may have different semantics depending on whether they are executed
* at PL0 vs PL!=0. When paravirtualized, these instructions mustn't * at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
* be allowed to execute directly, lest incorrect semantics result. */ * be allowed to execute directly, lest incorrect semantics result. */
extern void xen_fc(unsigned long addr); extern void xen_fc(void *addr);
extern unsigned long xen_thash(unsigned long addr); extern unsigned long xen_thash(unsigned long addr);
/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag" /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
...@@ -80,8 +82,10 @@ extern unsigned long xen_thash(unsigned long addr); ...@@ -80,8 +82,10 @@ extern unsigned long xen_thash(unsigned long addr);
extern unsigned long xen_get_cpuid(int index); extern unsigned long xen_get_cpuid(int index);
extern unsigned long xen_get_pmd(int index); extern unsigned long xen_get_pmd(int index);
#ifndef ASM_SUPPORTED
extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */ extern unsigned long xen_get_eflag(void); /* see xen_ia64_getreg */
extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
#endif
/************************************************/ /************************************************/
/* Instructions paravirtualized for performance */ /* Instructions paravirtualized for performance */
...@@ -106,6 +110,7 @@ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */ ...@@ -106,6 +110,7 @@ extern void xen_set_eflag(unsigned long); /* see xen_ia64_setreg */
#define xen_get_virtual_pend() \ #define xen_get_virtual_pend() \
(*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1)) (*(((uint8_t *)XEN_MAPPEDREGS->interrupt_mask_addr) - 1))
#ifndef ASM_SUPPORTED
/* Although all privileged operations can be left to trap and will /* Although all privileged operations can be left to trap and will
* be properly handled by Xen, some are frequent enough that we use * be properly handled by Xen, some are frequent enough that we use
* hyperprivops for performance. */ * hyperprivops for performance. */
...@@ -123,6 +128,7 @@ extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1, ...@@ -123,6 +128,7 @@ extern void xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
unsigned long val4); unsigned long val4);
extern void xen_set_kr(unsigned long index, unsigned long val); extern void xen_set_kr(unsigned long index, unsigned long val);
extern void xen_ptcga(unsigned long addr, unsigned long size); extern void xen_ptcga(unsigned long addr, unsigned long size);
#endif /* !ASM_SUPPORTED */
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
extra-y := head.o init_task.o vmlinux.lds extra-y := head.o init_task.o vmlinux.lds
obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \
salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
unwind.o mca.o mca_asm.o topology.o dma-mapping.o unwind.o mca.o mca_asm.o topology.o dma-mapping.o
...@@ -36,7 +36,8 @@ obj-$(CONFIG_PCI_MSI) += msi_ia64.o ...@@ -36,7 +36,8 @@ obj-$(CONFIG_PCI_MSI) += msi_ia64.o
mca_recovery-y += mca_drv.o mca_drv_asm.o mca_recovery-y += mca_drv.o mca_drv_asm.o
obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \
paravirt_patch.o
obj-$(CONFIG_IA64_ESI) += esi.o obj-$(CONFIG_IA64_ESI) += esi.o
ifneq ($(CONFIG_IA64_ESI),) ifneq ($(CONFIG_IA64_ESI),)
...@@ -45,35 +46,13 @@ endif ...@@ -45,35 +46,13 @@ endif
obj-$(CONFIG_DMAR) += pci-dma.o obj-$(CONFIG_DMAR) += pci-dma.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
# The gate DSO image is built using a special linker script.
targets += gate.so gate-syms.o
extra-y += gate.so gate-syms.o gate.lds gate.o
# fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. # fp_emulate() expects f2-f5,f16-f31 to contain the user-level state.
CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
CPPFLAGS_gate.lds := -P -C -U$(ARCH) # The gate DSO image is built using a special linker script.
include $(srctree)/arch/ia64/kernel/Makefile.gate
quiet_cmd_gate = GATE $@ # tell compiled for native
cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_NATIVE
GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
$(call ld-option, -Wl$(comma)--hash-style=sysv)
$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
$(obj)/built-in.o: $(obj)/gate-syms.o
$(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o
GATECFLAGS_gate-syms.o = -r
$(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
# gate-data.o contains the gate DSO image as data in section .data.gate.
# We must build gate.so before we can assemble it.
# Note: kbuild does not track this dependency due to usage of .incbin
$(obj)/gate-data.o: $(obj)/gate.so
# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config # Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
define sed-y define sed-y
...@@ -109,9 +88,9 @@ include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s ...@@ -109,9 +88,9 @@ include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
clean-files += $(objtree)/include/asm-ia64/nr-irqs.h clean-files += $(objtree)/include/asm-ia64/nr-irqs.h
# #
# native ivt.S and entry.S # native ivt.S, entry.S and fsys.S
# #
ASM_PARAVIRT_OBJS = ivt.o entry.o ASM_PARAVIRT_OBJS = ivt.o entry.o fsys.o
define paravirtualized_native define paravirtualized_native
AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE
AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK
......
# The gate DSO image is built using a special linker script.
targets += gate.so gate-syms.o
extra-y += gate.so gate-syms.o gate.lds gate.o
CPPFLAGS_gate.lds := -P -C -U$(ARCH)
quiet_cmd_gate = GATE $@
cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@
GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \
$(call ld-option, -Wl$(comma)--hash-style=sysv)
$(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
$(obj)/built-in.o: $(obj)/gate-syms.o
$(obj)/built-in.o: ld_flags += -R $(obj)/gate-syms.o
GATECFLAGS_gate-syms.o = -r
$(obj)/gate-syms.o: $(obj)/gate.lds $(obj)/gate.o FORCE
$(call if_changed,gate)
# gate-data.o contains the gate DSO image as data in section .data.gate.
# We must build gate.so before we can assemble it.
# Note: kbuild does not track this dependency due to usage of .incbin
$(obj)/gate-data.o: $(obj)/gate.so
...@@ -890,7 +890,7 @@ __init void prefill_possible_map(void) ...@@ -890,7 +890,7 @@ __init void prefill_possible_map(void)
possible, max((possible - available_cpus), 0)); possible, max((possible - available_cpus), 0));
for (i = 0; i < possible; i++) for (i = 0; i < possible; i++)
cpu_set(i, cpu_possible_map); set_cpu_possible(i, true);
} }
int acpi_map_lsapic(acpi_handle handle, int *pcpu) int acpi_map_lsapic(acpi_handle handle, int *pcpu)
...@@ -928,9 +928,9 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu) ...@@ -928,9 +928,9 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
buffer.length = ACPI_ALLOCATE_BUFFER; buffer.length = ACPI_ALLOCATE_BUFFER;
buffer.pointer = NULL; buffer.pointer = NULL;
cpus_complement(tmp_map, cpu_present_map); cpumask_complement(&tmp_map, cpu_present_mask);
cpu = first_cpu(tmp_map); cpu = cpumask_first(&tmp_map);
if (cpu >= NR_CPUS) if (cpu >= nr_cpu_ids)
return -EINVAL; return -EINVAL;
acpi_map_cpu2node(handle, cpu, physid); acpi_map_cpu2node(handle, cpu, physid);
......
...@@ -316,5 +316,7 @@ void foo(void) ...@@ -316,5 +316,7 @@ void foo(void)
DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]); DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat); DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat); DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
DEFINE_MAPPED_REG_OFS(XSI_ITC_OFFSET_OFS, itc_offset);
DEFINE_MAPPED_REG_OFS(XSI_ITC_LAST_OFS, itc_last);
#endif /* CONFIG_XEN */ #endif /* CONFIG_XEN */
} }
...@@ -456,6 +456,7 @@ efi_map_pal_code (void) ...@@ -456,6 +456,7 @@ efi_map_pal_code (void)
GRANULEROUNDDOWN((unsigned long) pal_vaddr), GRANULEROUNDDOWN((unsigned long) pal_vaddr),
pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
IA64_GRANULE_SHIFT); IA64_GRANULE_SHIFT);
paravirt_dv_serialize_data();
ia64_set_psr(psr); /* restore psr */ ia64_set_psr(psr); /* restore psr */
} }
......
...@@ -735,7 +735,7 @@ GLOBAL_ENTRY(__paravirt_leave_syscall) ...@@ -735,7 +735,7 @@ GLOBAL_ENTRY(__paravirt_leave_syscall)
__paravirt_work_processed_syscall: __paravirt_work_processed_syscall:
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
adds r2=PT(LOADRS)+16,r12 adds r2=PT(LOADRS)+16,r12
(pUStk) mov.m r22=ar.itc // fetch time at leave MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave
adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
;; ;;
(p6) ld4 r31=[r18] // load current_thread_info()->flags (p6) ld4 r31=[r18] // load current_thread_info()->flags
...@@ -984,7 +984,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel) ...@@ -984,7 +984,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
.pred.rel.mutex pUStk,pKStk .pred.rel.mutex pUStk,pKStk
MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
(pUStk) mov.m r22=ar.itc // M fetch time at leave MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave
nop.i 0 nop.i 0
;; ;;
#else #else
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include "entry.h" #include "entry.h"
#include "paravirt_inst.h"
/* /*
* See Documentation/ia64/fsys.txt for details on fsyscalls. * See Documentation/ia64/fsys.txt for details on fsyscalls.
...@@ -279,7 +280,7 @@ ENTRY(fsys_gettimeofday) ...@@ -279,7 +280,7 @@ ENTRY(fsys_gettimeofday)
(p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control (p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control
;; ;;
.pred.rel.mutex p8,p9 .pred.rel.mutex p8,p9
(p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!! MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!!
(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. (p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues..
(p13) ld8 r25 = [r19] // get itc_lastcycle value (p13) ld8 r25 = [r19] // get itc_lastcycle value
ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec
...@@ -418,7 +419,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set ...@@ -418,7 +419,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1)) mov r17=(1 << (SIGKILL - 1)) | (1 << (SIGSTOP - 1))
;; ;;
rsm psr.i // mask interrupt delivery RSM_PSR_I(p0, r18, r19) // mask interrupt delivery
mov ar.ccv=0 mov ar.ccv=0
andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP
...@@ -491,7 +492,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set ...@@ -491,7 +492,7 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
st4.rel [r31]=r0 // release the lock st4.rel [r31]=r0 // release the lock
#endif #endif
ssm psr.i SSM_PSR_I(p0, p9, r31)
;; ;;
srlz.d // ensure psr.i is set again srlz.d // ensure psr.i is set again
...@@ -513,7 +514,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3) ...@@ -513,7 +514,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
st4.rel [r31]=r0 // release the lock st4.rel [r31]=r0 // release the lock
#endif #endif
ssm psr.i SSM_PSR_I(p0, p9, r17)
;; ;;
srlz.d srlz.d
br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall br.sptk.many fsys_fallback_syscall // with signal pending, do the heavy-weight syscall
...@@ -521,7 +522,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3) ...@@ -521,7 +522,7 @@ EX(.fail_efault, (p15) st8 [r34]=r3)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.lock_contention: .lock_contention:
/* Rather than spinning here, fall back on doing a heavy-weight syscall. */ /* Rather than spinning here, fall back on doing a heavy-weight syscall. */
ssm psr.i SSM_PSR_I(p0, p9, r17)
;; ;;
srlz.d srlz.d
br.sptk.many fsys_fallback_syscall br.sptk.many fsys_fallback_syscall
...@@ -592,17 +593,17 @@ ENTRY(fsys_fallback_syscall) ...@@ -592,17 +593,17 @@ ENTRY(fsys_fallback_syscall)
adds r17=-1024,r15 adds r17=-1024,r15
movl r14=sys_call_table movl r14=sys_call_table
;; ;;
rsm psr.i RSM_PSR_I(p0, r26, r27)
shladd r18=r17,3,r14 shladd r18=r17,3,r14
;; ;;
ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point
mov r29=psr // read psr (12 cyc load latency) MOV_FROM_PSR(p0, r29, r26) // read psr (12 cyc load latency)
mov r27=ar.rsc mov r27=ar.rsc
mov r21=ar.fpsr mov r21=ar.fpsr
mov r26=ar.pfs mov r26=ar.pfs
END(fsys_fallback_syscall) END(fsys_fallback_syscall)
/* FALL THROUGH */ /* FALL THROUGH */
GLOBAL_ENTRY(fsys_bubble_down) GLOBAL_ENTRY(paravirt_fsys_bubble_down)
.prologue .prologue
.altrp b6 .altrp b6
.body .body
...@@ -640,7 +641,7 @@ GLOBAL_ENTRY(fsys_bubble_down) ...@@ -640,7 +641,7 @@ GLOBAL_ENTRY(fsys_bubble_down)
* *
* PSR.BE : already is turned off in __kernel_syscall_via_epc() * PSR.BE : already is turned off in __kernel_syscall_via_epc()
* PSR.AC : don't care (kernel normally turns PSR.AC on) * PSR.AC : don't care (kernel normally turns PSR.AC on)
* PSR.I : already turned off by the time fsys_bubble_down gets * PSR.I : already turned off by the time paravirt_fsys_bubble_down gets
* invoked * invoked
* PSR.DFL: always 0 (kernel never turns it on) * PSR.DFL: always 0 (kernel never turns it on)
* PSR.DFH: don't care --- kernel never touches f32-f127 on its own * PSR.DFH: don't care --- kernel never touches f32-f127 on its own
...@@ -650,7 +651,7 @@ GLOBAL_ENTRY(fsys_bubble_down) ...@@ -650,7 +651,7 @@ GLOBAL_ENTRY(fsys_bubble_down)
* PSR.DB : don't care --- kernel never enables kernel-level * PSR.DB : don't care --- kernel never enables kernel-level
* breakpoints * breakpoints
* PSR.TB : must be 0 already; if it wasn't zero on entry to * PSR.TB : must be 0 already; if it wasn't zero on entry to
* __kernel_syscall_via_epc, the branch to fsys_bubble_down * __kernel_syscall_via_epc, the branch to paravirt_fsys_bubble_down
* will trigger a taken branch; the taken-trap-handler then * will trigger a taken branch; the taken-trap-handler then
* converts the syscall into a break-based system-call. * converts the syscall into a break-based system-call.
*/ */
...@@ -683,7 +684,7 @@ GLOBAL_ENTRY(fsys_bubble_down) ...@@ -683,7 +684,7 @@ GLOBAL_ENTRY(fsys_bubble_down)
;; ;;
mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
mov.m r30=ar.itc // M get cycle for accounting MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting
#else #else
nop.m 0 nop.m 0
#endif #endif
...@@ -734,21 +735,21 @@ GLOBAL_ENTRY(fsys_bubble_down) ...@@ -734,21 +735,21 @@ GLOBAL_ENTRY(fsys_bubble_down)
mov rp=r14 // I0 set the real return addr mov rp=r14 // I0 set the real return addr
and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A
;; ;;
ssm psr.i // M2 we're on kernel stacks now, reenable irqs SSM_PSR_I(p0, p6, r22) // M2 we're on kernel stacks now, reenable irqs
cmp.eq p8,p0=r3,r0 // A cmp.eq p8,p0=r3,r0 // A
(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
nop.m 0 nop.m 0
(p8) br.call.sptk.many b6=b6 // B (ignore return address) (p8) br.call.sptk.many b6=b6 // B (ignore return address)
br.cond.spnt ia64_trace_syscall // B br.cond.spnt ia64_trace_syscall // B
END(fsys_bubble_down) END(paravirt_fsys_bubble_down)
.rodata .rodata
.align 8 .align 8
.globl fsyscall_table .globl paravirt_fsyscall_table
data8 fsys_bubble_down data8 paravirt_fsys_bubble_down
fsyscall_table: paravirt_fsyscall_table:
data8 fsys_ni_syscall data8 fsys_ni_syscall
data8 0 // exit // 1025 data8 0 // exit // 1025
data8 0 // read data8 0 // read
...@@ -1033,4 +1034,4 @@ fsyscall_table: ...@@ -1033,4 +1034,4 @@ fsyscall_table:
// fill in zeros for the remaining entries // fill in zeros for the remaining entries
.zero: .zero:
.space fsyscall_table + 8*NR_syscalls - .zero, 0 .space paravirt_fsyscall_table + 8*NR_syscalls - .zero, 0
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/sigcontext.h> #include <asm/sigcontext.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include "paravirt_inst.h"
/* /*
* We can't easily refer to symbols inside the kernel. To avoid full runtime relocation, * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation,
...@@ -48,87 +49,6 @@ GLOBAL_ENTRY(__kernel_syscall_via_break) ...@@ -48,87 +49,6 @@ GLOBAL_ENTRY(__kernel_syscall_via_break)
} }
END(__kernel_syscall_via_break) END(__kernel_syscall_via_break)
/*
* On entry:
* r11 = saved ar.pfs
* r15 = system call #
* b0 = saved return address
* b6 = return address
* On exit:
* r11 = saved ar.pfs
* r15 = system call #
* b0 = saved return address
* all other "scratch" registers: undefined
* all "preserved" registers: same as on entry
*/
GLOBAL_ENTRY(__kernel_syscall_via_epc)
.prologue
.altrp b6
.body
{
/*
* Note: the kernel cannot assume that the first two instructions in this
* bundle get executed. The remaining code must be safe even if
* they do not get executed.
*/
adds r17=-1024,r15 // A
mov r10=0 // A default to successful syscall execution
epc // B causes split-issue
}
;;
rsm psr.be | psr.i // M2 (5 cyc to srlz.d)
LOAD_FSYSCALL_TABLE(r14) // X
;;
mov r16=IA64_KR(CURRENT) // M2 (12 cyc)
shladd r18=r17,3,r14 // A
mov r19=NR_syscalls-1 // A
;;
lfetch [r18] // M0|1
mov r29=psr // M2 (12 cyc)
// If r17 is a NaT, p6 will be zero
cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)?
;;
mov r21=ar.fpsr // M2 (12 cyc)
tnat.nz p10,p9=r15 // I0
mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...)
;;
srlz.d // M0 (forces split-issue) ensure PSR.BE==0
(p6) ld8 r18=[r18] // M0|1
nop.i 0
;;
nop.m 0
(p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!)
nop.i 0
;;
(p8) ssm psr.i
(p6) mov b7=r18 // I0
(p8) br.dptk.many b7 // B
mov r27=ar.rsc // M2 (12 cyc)
/*
* brl.cond doesn't work as intended because the linker would convert this branch
* into a branch to a PLT. Perhaps there will be a way to avoid this with some
* future version of the linker. In the meantime, we just use an indirect branch
* instead.
*/
#ifdef CONFIG_ITANIUM
(p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
;;
(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down
;;
(p6) mov b7=r14
(p6) br.sptk.many b7
#else
BRL_COND_FSYS_BUBBLE_DOWN(p6)
#endif
ssm psr.i
mov r10=-1
(p10) mov r8=EINVAL
(p9) mov r8=ENOSYS
FSYS_RETURN
END(__kernel_syscall_via_epc)
# define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET) # define ARG0_OFF (16 + IA64_SIGFRAME_ARG0_OFFSET)
# define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET) # define ARG1_OFF (16 + IA64_SIGFRAME_ARG1_OFFSET)
# define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET) # define ARG2_OFF (16 + IA64_SIGFRAME_ARG2_OFFSET)
...@@ -374,3 +294,92 @@ restore_rbs: ...@@ -374,3 +294,92 @@ restore_rbs:
// invala not necessary as that will happen when returning to user-mode // invala not necessary as that will happen when returning to user-mode
br.cond.sptk back_from_restore_rbs br.cond.sptk back_from_restore_rbs
END(__kernel_sigtramp) END(__kernel_sigtramp)
/*
* On entry:
* r11 = saved ar.pfs
* r15 = system call #
* b0 = saved return address
* b6 = return address
* On exit:
* r11 = saved ar.pfs
* r15 = system call #
* b0 = saved return address
* all other "scratch" registers: undefined
* all "preserved" registers: same as on entry
*/
GLOBAL_ENTRY(__kernel_syscall_via_epc)
.prologue
.altrp b6
.body
{
/*
* Note: the kernel cannot assume that the first two instructions in this
* bundle get executed. The remaining code must be safe even if
* they do not get executed.
*/
adds r17=-1024,r15 // A
mov r10=0 // A default to successful syscall execution
epc // B causes split-issue
}
;;
RSM_PSR_BE_I(r20, r22) // M2 (5 cyc to srlz.d)
LOAD_FSYSCALL_TABLE(r14) // X
;;
mov r16=IA64_KR(CURRENT) // M2 (12 cyc)
shladd r18=r17,3,r14 // A
mov r19=NR_syscalls-1 // A
;;
lfetch [r18] // M0|1
MOV_FROM_PSR(p0, r29, r8) // M2 (12 cyc)
// If r17 is a NaT, p6 will be zero
cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)?
;;
mov r21=ar.fpsr // M2 (12 cyc)
tnat.nz p10,p9=r15 // I0
mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...)
;;
srlz.d // M0 (forces split-issue) ensure PSR.BE==0
(p6) ld8 r18=[r18] // M0|1
nop.i 0
;;
nop.m 0
(p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!)
nop.i 0
;;
SSM_PSR_I(p8, p14, r25)
(p6) mov b7=r18 // I0
(p8) br.dptk.many b7 // B
mov r27=ar.rsc // M2 (12 cyc)
/*
* brl.cond doesn't work as intended because the linker would convert this branch
* into a branch to a PLT. Perhaps there will be a way to avoid this with some
* future version of the linker. In the meantime, we just use an indirect branch
* instead.
*/
#ifdef CONFIG_ITANIUM
(p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
;;
(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down
;;
(p6) mov b7=r14
(p6) br.sptk.many b7
#else
BRL_COND_FSYS_BUBBLE_DOWN(p6)
#endif
SSM_PSR_I(p0, p14, r10)
mov r10=-1
(p10) mov r8=EINVAL
(p9) mov r8=ENOSYS
FSYS_RETURN
#ifdef CONFIG_PARAVIRT
/*
* padd to make the size of this symbol constant
* independent of paravirtualization.
*/
.align PAGE_SIZE / 8
#endif
END(__kernel_syscall_via_epc)
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <asm/system.h> #include <asm/system.h>
#include "paravirt_patchlist.h"
SECTIONS SECTIONS
{ {
...@@ -33,21 +34,21 @@ SECTIONS ...@@ -33,21 +34,21 @@ SECTIONS
. = GATE_ADDR + 0x600; . = GATE_ADDR + 0x600;
.data.patch : { .data.patch : {
__start_gate_mckinley_e9_patchlist = .; __paravirt_start_gate_mckinley_e9_patchlist = .;
*(.data.patch.mckinley_e9) *(.data.patch.mckinley_e9)
__end_gate_mckinley_e9_patchlist = .; __paravirt_end_gate_mckinley_e9_patchlist = .;
__start_gate_vtop_patchlist = .; __paravirt_start_gate_vtop_patchlist = .;
*(.data.patch.vtop) *(.data.patch.vtop)
__end_gate_vtop_patchlist = .; __paravirt_end_gate_vtop_patchlist = .;
__start_gate_fsyscall_patchlist = .; __paravirt_start_gate_fsyscall_patchlist = .;
*(.data.patch.fsyscall_table) *(.data.patch.fsyscall_table)
__end_gate_fsyscall_patchlist = .; __paravirt_end_gate_fsyscall_patchlist = .;
__start_gate_brl_fsys_bubble_down_patchlist = .; __paravirt_start_gate_brl_fsys_bubble_down_patchlist = .;
*(.data.patch.brl_fsys_bubble_down) *(.data.patch.brl_fsys_bubble_down)
__end_gate_brl_fsys_bubble_down_patchlist = .; __paravirt_end_gate_brl_fsys_bubble_down_patchlist = .;
} :readable } :readable
.IA_64.unwind_info : { *(.IA_64.unwind_info*) } .IA_64.unwind_info : { *(.IA_64.unwind_info*) }
......
...@@ -1050,7 +1050,7 @@ END(ia64_delay_loop) ...@@ -1050,7 +1050,7 @@ END(ia64_delay_loop)
* except that the multiplication and the shift are done with 128-bit * except that the multiplication and the shift are done with 128-bit
* intermediate precision so that we can produce a full 64-bit result. * intermediate precision so that we can produce a full 64-bit result.
*/ */
GLOBAL_ENTRY(sched_clock) GLOBAL_ENTRY(ia64_native_sched_clock)
addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
mov.m r9=ar.itc // fetch cycle-counter (35 cyc) mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
;; ;;
...@@ -1066,7 +1066,13 @@ GLOBAL_ENTRY(sched_clock) ...@@ -1066,7 +1066,13 @@ GLOBAL_ENTRY(sched_clock)
;; ;;
shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
br.ret.sptk.many rp br.ret.sptk.many rp
END(sched_clock) END(ia64_native_sched_clock)
#ifndef CONFIG_PARAVIRT
//unsigned long long
//sched_clock(void) __attribute__((alias("ia64_native_sched_clock")));
.global sched_clock
sched_clock = ia64_native_sched_clock
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
GLOBAL_ENTRY(cycle_to_cputime) GLOBAL_ENTRY(cycle_to_cputime)
......
...@@ -804,7 +804,7 @@ ENTRY(break_fault) ...@@ -804,7 +804,7 @@ ENTRY(break_fault)
/////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////
st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
#ifdef CONFIG_VIRT_CPU_ACCOUNTING #ifdef CONFIG_VIRT_CPU_ACCOUNTING
mov.m r30=ar.itc // M get cycle for accounting MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting
#else #else
mov b6=r30 // I0 setup syscall handler branch reg early mov b6=r30 // I0 setup syscall handler branch reg early
#endif #endif
......
...@@ -1456,9 +1456,9 @@ ia64_mca_cmc_int_caller(int cmc_irq, void *arg) ...@@ -1456,9 +1456,9 @@ ia64_mca_cmc_int_caller(int cmc_irq, void *arg)
ia64_mca_cmc_int_handler(cmc_irq, arg); ia64_mca_cmc_int_handler(cmc_irq, arg);
for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); cpuid = cpumask_next(cpuid+1, cpu_online_mask);
if (cpuid < NR_CPUS) { if (cpuid < nr_cpu_ids) {
platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0); platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
} else { } else {
/* If no log record, switch out of polling mode */ /* If no log record, switch out of polling mode */
...@@ -1525,7 +1525,7 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg) ...@@ -1525,7 +1525,7 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg)
ia64_mca_cpe_int_handler(cpe_irq, arg); ia64_mca_cpe_int_handler(cpe_irq, arg);
for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++); cpuid = cpumask_next(cpuid+1, cpu_online_mask);
if (cpuid < NR_CPUS) { if (cpuid < NR_CPUS) {
platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
......
...@@ -446,6 +446,14 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, ...@@ -446,6 +446,14 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
mod->arch.opd = s; mod->arch.opd = s;
else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0) else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
mod->arch.unwind = s; mod->arch.unwind = s;
#ifdef CONFIG_PARAVIRT
else if (strcmp(".paravirt_bundles",
secstrings + s->sh_name) == 0)
mod->arch.paravirt_bundles = s;
else if (strcmp(".paravirt_insts",
secstrings + s->sh_name) == 0)
mod->arch.paravirt_insts = s;
#endif
if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) { if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
printk(KERN_ERR "%s: sections missing\n", mod->name); printk(KERN_ERR "%s: sections missing\n", mod->name);
...@@ -525,8 +533,7 @@ get_ltoff (struct module *mod, uint64_t value, int *okp) ...@@ -525,8 +533,7 @@ get_ltoff (struct module *mod, uint64_t value, int *okp)
goto found; goto found;
/* Not enough GOT entries? */ /* Not enough GOT entries? */
if (e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size)) BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size));
BUG();
e->val = value; e->val = value;
++mod->arch.next_got_entry; ++mod->arch.next_got_entry;
...@@ -921,6 +928,30 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo ...@@ -921,6 +928,30 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo
DEBUGP("%s: init: entry=%p\n", __func__, mod->init); DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
if (mod->arch.unwind) if (mod->arch.unwind)
register_unwind_table(mod); register_unwind_table(mod);
#ifdef CONFIG_PARAVIRT
if (mod->arch.paravirt_bundles) {
struct paravirt_patch_site_bundle *start =
(struct paravirt_patch_site_bundle *)
mod->arch.paravirt_bundles->sh_addr;
struct paravirt_patch_site_bundle *end =
(struct paravirt_patch_site_bundle *)
(mod->arch.paravirt_bundles->sh_addr +
mod->arch.paravirt_bundles->sh_size);
paravirt_patch_apply_bundle(start, end);
}
if (mod->arch.paravirt_insts) {
struct paravirt_patch_site_inst *start =
(struct paravirt_patch_site_inst *)
mod->arch.paravirt_insts->sh_addr;
struct paravirt_patch_site_inst *end =
(struct paravirt_patch_site_inst *)
(mod->arch.paravirt_insts->sh_addr +
mod->arch.paravirt_insts->sh_size);
paravirt_patch_apply_inst(start, end);
}
#endif
return 0; return 0;
} }
......
This diff is collapsed.
This diff is collapsed.
/******************************************************************************
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/bug.h>
#include <asm/paravirt.h>
#define DECLARE(name) \
extern unsigned long \
__ia64_native_start_gate_##name##_patchlist[]; \
extern unsigned long \
__ia64_native_end_gate_##name##_patchlist[]
DECLARE(fsyscall);
DECLARE(brl_fsys_bubble_down);
DECLARE(vtop);
DECLARE(mckinley_e9);
extern unsigned long __start_gate_section[];
#define ASSIGN(name) \
.start_##name##_patchlist = \
(unsigned long)__ia64_native_start_gate_##name##_patchlist, \
.end_##name##_patchlist = \
(unsigned long)__ia64_native_end_gate_##name##_patchlist
struct pv_patchdata pv_patchdata __initdata = {
ASSIGN(fsyscall),
ASSIGN(brl_fsys_bubble_down),
ASSIGN(vtop),
ASSIGN(mckinley_e9),
.gate_section = (void*)__start_gate_section,
};
unsigned long __init
paravirt_get_gate_patchlist(enum pv_gate_patchlist type)
{
#define CASE(NAME, name) \
case PV_GATE_START_##NAME: \
return pv_patchdata.start_##name##_patchlist; \
case PV_GATE_END_##NAME: \
return pv_patchdata.end_##name##_patchlist; \
switch (type) {
CASE(FSYSCALL, fsyscall);
CASE(BRL_FSYS_BUBBLE_DOWN, brl_fsys_bubble_down);
CASE(VTOP, vtop);
CASE(MCKINLEY_E9, mckinley_e9);
default:
BUG();
break;
}
return 0;
}
void * __init
paravirt_get_gate_section(void)
{
return pv_patchdata.gate_section;
}
/******************************************************************************
* linux/arch/ia64/xen/paravirt_patchlist.h
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#if defined(__IA64_GATE_PARAVIRTUALIZED_XEN)
#include <asm/xen/patchlist.h>
#else
#include <asm/native/patchlist.h>
#endif
...@@ -20,8 +20,11 @@ ...@@ -20,8 +20,11 @@
* *
*/ */
#include <linux/init.h>
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/paravirt_privop.h>
#include <asm/paravirt_patch.h>
#include "entry.h" #include "entry.h"
#define DATA8(sym, init_value) \ #define DATA8(sym, init_value) \
...@@ -32,29 +35,87 @@ ...@@ -32,29 +35,87 @@
data8 init_value ; \ data8 init_value ; \
.popsection .popsection
#define BRANCH(targ, reg, breg) \ #define BRANCH(targ, reg, breg, type) \
movl reg=targ ; \ PARAVIRT_PATCH_SITE_BR(PARAVIRT_PATCH_TYPE_BR_ ## type) ; \
;; \ ;; \
ld8 reg=[reg] ; \ movl reg=targ ; \
;; \ ;; \
mov breg=reg ; \ ld8 reg=[reg] ; \
;; \
mov breg=reg ; \
br.cond.sptk.many breg br.cond.sptk.many breg
#define BRANCH_PROC(sym, reg, breg) \ #define BRANCH_PROC(sym, reg, breg, type) \
DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
GLOBAL_ENTRY(paravirt_ ## sym) ; \ GLOBAL_ENTRY(paravirt_ ## sym) ; \
BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \ BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \
END(paravirt_ ## sym) END(paravirt_ ## sym)
#define BRANCH_PROC_UNWINFO(sym, reg, breg) \ #define BRANCH_PROC_UNWINFO(sym, reg, breg, type) \
DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \ DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
GLOBAL_ENTRY(paravirt_ ## sym) ; \ GLOBAL_ENTRY(paravirt_ ## sym) ; \
PT_REGS_UNWIND_INFO(0) ; \ PT_REGS_UNWIND_INFO(0) ; \
BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \ BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \
END(paravirt_ ## sym) END(paravirt_ ## sym)
BRANCH_PROC(switch_to, r22, b7) BRANCH_PROC(switch_to, r22, b7, SWITCH_TO)
BRANCH_PROC_UNWINFO(leave_syscall, r22, b7) BRANCH_PROC_UNWINFO(leave_syscall, r22, b7, LEAVE_SYSCALL)
BRANCH_PROC(work_processed_syscall, r2, b7) BRANCH_PROC(work_processed_syscall, r2, b7, WORK_PROCESSED_SYSCALL)
BRANCH_PROC_UNWINFO(leave_kernel, r22, b7) BRANCH_PROC_UNWINFO(leave_kernel, r22, b7, LEAVE_KERNEL)
#ifdef CONFIG_MODULES
#define __INIT_OR_MODULE .text
#define __INITDATA_OR_MODULE .data
#else
#define __INIT_OR_MODULE __INIT
#define __INITDATA_OR_MODULE __INITDATA
#endif /* CONFIG_MODULES */
__INIT_OR_MODULE
GLOBAL_ENTRY(paravirt_fc_i)
fc.i r32
br.ret.sptk.many rp
END(paravirt_fc_i)
__FINIT
__INIT_OR_MODULE
.align 32
GLOBAL_ENTRY(paravirt_nop_b_inst_bundle)
{
nop.b 0
nop.b 0
nop.b 0
}
END(paravirt_nop_b_inst_bundle)
__FINIT
/* NOTE: nop.[mfi] has same format */
__INIT_OR_MODULE
GLOBAL_ENTRY(paravirt_nop_mfi_inst_bundle)
{
nop.m 0
nop.f 0
nop.i 0
}
END(paravirt_nop_mfi_inst_bundle)
__FINIT
__INIT_OR_MODULE
GLOBAL_ENTRY(paravirt_nop_bundle)
paravirt_nop_bundle_start:
{
nop 0
nop 0
nop 0
}
paravirt_nop_bundle_end:
END(paravirt_nop_bundle)
__FINIT
__INITDATA_OR_MODULE
.align 8
.global paravirt_nop_bundle_size
paravirt_nop_bundle_size:
data8 paravirt_nop_bundle_end - paravirt_nop_bundle_start
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/paravirt.h>
#include <asm/patch.h> #include <asm/patch.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/sections.h> #include <asm/sections.h>
...@@ -169,16 +170,35 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) ...@@ -169,16 +170,35 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
ia64_srlz_i(); ia64_srlz_i();
} }
extern unsigned long ia64_native_fsyscall_table[NR_syscalls];
extern char ia64_native_fsys_bubble_down[];
struct pv_fsys_data pv_fsys_data __initdata = {
.fsyscall_table = (unsigned long *)ia64_native_fsyscall_table,
.fsys_bubble_down = (void *)ia64_native_fsys_bubble_down,
};
unsigned long * __init
paravirt_get_fsyscall_table(void)
{
return pv_fsys_data.fsyscall_table;
}
char * __init
paravirt_get_fsys_bubble_down(void)
{
return pv_fsys_data.fsys_bubble_down;
}
static void __init static void __init
patch_fsyscall_table (unsigned long start, unsigned long end) patch_fsyscall_table (unsigned long start, unsigned long end)
{ {
extern unsigned long fsyscall_table[NR_syscalls]; u64 fsyscall_table = (u64)paravirt_get_fsyscall_table();
s32 *offp = (s32 *) start; s32 *offp = (s32 *) start;
u64 ip; u64 ip;
while (offp < (s32 *) end) { while (offp < (s32 *) end) {
ip = (u64) ia64_imva((char *) offp + *offp); ip = (u64) ia64_imva((char *) offp + *offp);
ia64_patch_imm64(ip, (u64) fsyscall_table); ia64_patch_imm64(ip, fsyscall_table);
ia64_fc((void *) ip); ia64_fc((void *) ip);
++offp; ++offp;
} }
...@@ -189,7 +209,7 @@ patch_fsyscall_table (unsigned long start, unsigned long end) ...@@ -189,7 +209,7 @@ patch_fsyscall_table (unsigned long start, unsigned long end)
static void __init static void __init
patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
{ {
extern char fsys_bubble_down[]; u64 fsys_bubble_down = (u64)paravirt_get_fsys_bubble_down();
s32 *offp = (s32 *) start; s32 *offp = (s32 *) start;
u64 ip; u64 ip;
...@@ -207,13 +227,13 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) ...@@ -207,13 +227,13 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
void __init void __init
ia64_patch_gate (void) ia64_patch_gate (void)
{ {
# define START(name) ((unsigned long) __start_gate_##name##_patchlist) # define START(name) paravirt_get_gate_patchlist(PV_GATE_START_##name)
# define END(name) ((unsigned long)__end_gate_##name##_patchlist) # define END(name) paravirt_get_gate_patchlist(PV_GATE_END_##name)
patch_fsyscall_table(START(fsyscall), END(fsyscall)); patch_fsyscall_table(START(FSYSCALL), END(FSYSCALL));
patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down)); patch_brl_fsys_bubble_down(START(BRL_FSYS_BUBBLE_DOWN), END(BRL_FSYS_BUBBLE_DOWN));
ia64_patch_vtop(START(vtop), END(vtop)); ia64_patch_vtop(START(VTOP), END(VTOP));
ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9)); ia64_patch_mckinley_e9(START(MCKINLEY_E9), END(MCKINLEY_E9));
} }
void ia64_patch_phys_stack_reg(unsigned long val) void ia64_patch_phys_stack_reg(unsigned long val)
...@@ -229,7 +249,7 @@ void ia64_patch_phys_stack_reg(unsigned long val) ...@@ -229,7 +249,7 @@ void ia64_patch_phys_stack_reg(unsigned long val)
while (offp < end) { while (offp < end) {
ip = (u64) offp + *offp; ip = (u64) offp + *offp;
ia64_patch(ip, mask, imm); ia64_patch(ip, mask, imm);
ia64_fc(ip); ia64_fc((void *)ip);
++offp; ++offp;
} }
ia64_sync_i(); ia64_sync_i();
......
...@@ -5603,7 +5603,7 @@ pfm_interrupt_handler(int irq, void *arg) ...@@ -5603,7 +5603,7 @@ pfm_interrupt_handler(int irq, void *arg)
* /proc/perfmon interface, for debug only * /proc/perfmon interface, for debug only
*/ */
#define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1) #define PFM_PROC_SHOW_HEADER ((void *)nr_cpu_ids+1)
static void * static void *
pfm_proc_start(struct seq_file *m, loff_t *pos) pfm_proc_start(struct seq_file *m, loff_t *pos)
...@@ -5612,7 +5612,7 @@ pfm_proc_start(struct seq_file *m, loff_t *pos) ...@@ -5612,7 +5612,7 @@ pfm_proc_start(struct seq_file *m, loff_t *pos)
return PFM_PROC_SHOW_HEADER; return PFM_PROC_SHOW_HEADER;
} }
while (*pos <= NR_CPUS) { while (*pos <= nr_cpu_ids) {
if (cpu_online(*pos - 1)) { if (cpu_online(*pos - 1)) {
return (void *)*pos; return (void *)*pos;
} }
......
...@@ -317,7 +317,7 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t ...@@ -317,7 +317,7 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t
} }
n = data->cpu_check; n = data->cpu_check;
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < nr_cpu_ids; i++) {
if (cpu_isset(n, data->cpu_event)) { if (cpu_isset(n, data->cpu_event)) {
if (!cpu_online(n)) { if (!cpu_online(n)) {
cpu_clear(n, data->cpu_event); cpu_clear(n, data->cpu_event);
...@@ -326,7 +326,7 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t ...@@ -326,7 +326,7 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t
cpu = n; cpu = n;
break; break;
} }
if (++n == NR_CPUS) if (++n == nr_cpu_ids)
n = 0; n = 0;
} }
...@@ -337,7 +337,7 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t ...@@ -337,7 +337,7 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t
/* for next read, start checking at next CPU */ /* for next read, start checking at next CPU */
data->cpu_check = cpu; data->cpu_check = cpu;
if (++data->cpu_check == NR_CPUS) if (++data->cpu_check == nr_cpu_ids)
data->cpu_check = 0; data->cpu_check = 0;
snprintf(cmd, sizeof(cmd), "read %d\n", cpu); snprintf(cmd, sizeof(cmd), "read %d\n", cpu);
......
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#include <asm/meminit.h> #include <asm/meminit.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/paravirt_patch.h>
#include <asm/patch.h> #include <asm/patch.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -537,6 +538,7 @@ setup_arch (char **cmdline_p) ...@@ -537,6 +538,7 @@ setup_arch (char **cmdline_p)
paravirt_arch_setup_early(); paravirt_arch_setup_early();
ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
paravirt_patch_apply();
*cmdline_p = __va(ia64_boot_param->command_line); *cmdline_p = __va(ia64_boot_param->command_line);
strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
...@@ -730,10 +732,10 @@ static void * ...@@ -730,10 +732,10 @@ static void *
c_start (struct seq_file *m, loff_t *pos) c_start (struct seq_file *m, loff_t *pos)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) while (*pos < nr_cpu_ids && !cpu_online(*pos))
++*pos; ++*pos;
#endif #endif
return *pos < NR_CPUS ? cpu_data(*pos) : NULL; return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL;
} }
static void * static void *
...@@ -1016,8 +1018,7 @@ cpu_init (void) ...@@ -1016,8 +1018,7 @@ cpu_init (void)
| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
atomic_inc(&init_mm.mm_count); atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm; current->active_mm = &init_mm;
if (current->mm) BUG_ON(current->mm);
BUG();
ia64_mmu_init(ia64_imva(cpu_data)); ia64_mmu_init(ia64_imva(cpu_data));
ia64_mca_cpu_init(ia64_imva(cpu_data)); ia64_mca_cpu_init(ia64_imva(cpu_data));
......
...@@ -166,11 +166,11 @@ send_IPI_allbutself (int op) ...@@ -166,11 +166,11 @@ send_IPI_allbutself (int op)
* Called with preemption disabled. * Called with preemption disabled.
*/ */
static inline void static inline void
send_IPI_mask(cpumask_t mask, int op) send_IPI_mask(const struct cpumask *mask, int op)
{ {
unsigned int cpu; unsigned int cpu;
for_each_cpu_mask(cpu, mask) { for_each_cpu(cpu, mask) {
send_IPI_single(cpu, op); send_IPI_single(cpu, op);
} }
} }
...@@ -316,7 +316,7 @@ void arch_send_call_function_single_ipi(int cpu) ...@@ -316,7 +316,7 @@ void arch_send_call_function_single_ipi(int cpu)
send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
} }
void arch_send_call_function_ipi(cpumask_t mask) void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{ {
send_IPI_mask(mask, IPI_CALL_FUNC); send_IPI_mask(mask, IPI_CALL_FUNC);
} }
......
...@@ -581,14 +581,14 @@ smp_build_cpu_map (void) ...@@ -581,14 +581,14 @@ smp_build_cpu_map (void)
ia64_cpu_to_sapicid[0] = boot_cpu_id; ia64_cpu_to_sapicid[0] = boot_cpu_id;
cpus_clear(cpu_present_map); cpus_clear(cpu_present_map);
cpu_set(0, cpu_present_map); set_cpu_present(0, true);
cpu_set(0, cpu_possible_map); set_cpu_possible(0, true);
for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
sapicid = smp_boot_data.cpu_phys_id[i]; sapicid = smp_boot_data.cpu_phys_id[i];
if (sapicid == boot_cpu_id) if (sapicid == boot_cpu_id)
continue; continue;
cpu_set(cpu, cpu_present_map); set_cpu_present(cpu, true);
cpu_set(cpu, cpu_possible_map); set_cpu_possible(cpu, true);
ia64_cpu_to_sapicid[cpu] = sapicid; ia64_cpu_to_sapicid[cpu] = sapicid;
cpu++; cpu++;
} }
...@@ -626,12 +626,9 @@ smp_prepare_cpus (unsigned int max_cpus) ...@@ -626,12 +626,9 @@ smp_prepare_cpus (unsigned int max_cpus)
*/ */
if (!max_cpus) { if (!max_cpus) {
printk(KERN_INFO "SMP mode deactivated.\n"); printk(KERN_INFO "SMP mode deactivated.\n");
cpus_clear(cpu_online_map); init_cpu_online(cpumask_of(0));
cpus_clear(cpu_present_map); init_cpu_present(cpumask_of(0));
cpus_clear(cpu_possible_map); init_cpu_possible(cpumask_of(0));
cpu_set(0, cpu_online_map);
cpu_set(0, cpu_present_map);
cpu_set(0, cpu_possible_map);
return; return;
} }
} }
......
...@@ -50,6 +50,15 @@ EXPORT_SYMBOL(last_cli_ip); ...@@ -50,6 +50,15 @@ EXPORT_SYMBOL(last_cli_ip);
#endif #endif
#ifdef CONFIG_PARAVIRT
/* We need to define a real function for sched_clock, to override the
weak default version */
unsigned long long sched_clock(void)
{
return paravirt_sched_clock();
}
#endif
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
static void static void
paravirt_clocksource_resume(void) paravirt_clocksource_resume(void)
......
...@@ -169,6 +169,30 @@ SECTIONS ...@@ -169,6 +169,30 @@ SECTIONS
__end___mckinley_e9_bundles = .; __end___mckinley_e9_bundles = .;
} }
#if defined(CONFIG_PARAVIRT)
. = ALIGN(16);
.paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET)
{
__start_paravirt_bundles = .;
*(.paravirt_bundles)
__stop_paravirt_bundles = .;
}
. = ALIGN(16);
.paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET)
{
__start_paravirt_insts = .;
*(.paravirt_insts)
__stop_paravirt_insts = .;
}
. = ALIGN(16);
.paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET)
{
__start_paravirt_branches = .;
*(.paravirt_branches)
__stop_paravirt_branches = .;
}
#endif
#if defined(CONFIG_IA64_GENERIC) #if defined(CONFIG_IA64_GENERIC)
/* Machine Vector */ /* Machine Vector */
. = ALIGN(16); . = ALIGN(16);
...@@ -201,6 +225,12 @@ SECTIONS ...@@ -201,6 +225,12 @@ SECTIONS
__start_gate_section = .; __start_gate_section = .;
*(.data.gate) *(.data.gate)
__stop_gate_section = .; __stop_gate_section = .;
#ifdef CONFIG_XEN
. = ALIGN(PAGE_SIZE);
__xen_start_gate_section = .;
*(.data.gate.xen)
__xen_stop_gate_section = .;
#endif
} }
. = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose
* kernel data * kernel data
......
...@@ -70,7 +70,7 @@ static void kvm_flush_icache(unsigned long start, unsigned long len) ...@@ -70,7 +70,7 @@ static void kvm_flush_icache(unsigned long start, unsigned long len)
int l; int l;
for (l = 0; l < (len + 32); l += 32) for (l = 0; l < (len + 32); l += 32)
ia64_fc(start + l); ia64_fc((void *)(start + l));
ia64_sync_i(); ia64_sync_i();
ia64_srlz_i(); ia64_srlz_i();
......
...@@ -386,7 +386,7 @@ void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1, ...@@ -386,7 +386,7 @@ void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
else else
*rnat_addr = (*rnat_addr) & (~nat_mask); *rnat_addr = (*rnat_addr) & (~nat_mask);
ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore); ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore);
ia64_setreg(_IA64_REG_AR_RNAT, rnat); ia64_setreg(_IA64_REG_AR_RNAT, rnat);
} }
local_irq_restore(psr); local_irq_restore(psr);
......
...@@ -210,6 +210,7 @@ void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type) ...@@ -210,6 +210,7 @@ void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
phy_pte &= ~PAGE_FLAGS_RV_MASK; phy_pte &= ~PAGE_FLAGS_RV_MASK;
psr = ia64_clear_ic(); psr = ia64_clear_ic();
ia64_itc(type, va, phy_pte, itir_ps(itir)); ia64_itc(type, va, phy_pte, itir_ps(itir));
paravirt_dv_serialize_data();
ia64_set_psr(psr); ia64_set_psr(psr);
} }
...@@ -456,6 +457,7 @@ void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir, ...@@ -456,6 +457,7 @@ void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
phy_pte &= ~PAGE_FLAGS_RV_MASK; phy_pte &= ~PAGE_FLAGS_RV_MASK;
psr = ia64_clear_ic(); psr = ia64_clear_ic();
ia64_itc(type, ifa, phy_pte, ps); ia64_itc(type, ifa, phy_pte, ps);
paravirt_dv_serialize_data();
ia64_set_psr(psr); ia64_set_psr(psr);
} }
if (!(pte&VTLB_PTE_IO)) if (!(pte&VTLB_PTE_IO))
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/mca.h> #include <asm/mca.h>
#include <asm/paravirt.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
...@@ -259,6 +260,7 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) ...@@ -259,6 +260,7 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
static void __init static void __init
setup_gate (void) setup_gate (void)
{ {
void *gate_section;
struct page *page; struct page *page;
/* /*
...@@ -266,10 +268,11 @@ setup_gate (void) ...@@ -266,10 +268,11 @@ setup_gate (void)
* headers etc. and once execute-only page to enable * headers etc. and once execute-only page to enable
* privilege-promotion via "epc": * privilege-promotion via "epc":
*/ */
page = virt_to_page(ia64_imva(__start_gate_section)); gate_section = paravirt_get_gate_section();
page = virt_to_page(ia64_imva(gate_section));
put_kernel_page(page, GATE_ADDR, PAGE_READONLY); put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
#ifdef HAVE_BUGGY_SEGREL #ifdef HAVE_BUGGY_SEGREL
page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE)); page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE));
put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
#else #else
put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
...@@ -633,8 +636,7 @@ mem_init (void) ...@@ -633,8 +636,7 @@ mem_init (void)
#endif #endif
#ifdef CONFIG_FLATMEM #ifdef CONFIG_FLATMEM
if (!mem_map) BUG_ON(!mem_map);
BUG();
max_mapnr = max_low_pfn; max_mapnr = max_low_pfn;
#endif #endif
...@@ -667,8 +669,8 @@ mem_init (void) ...@@ -667,8 +669,8 @@ mem_init (void)
* code can tell them apart. * code can tell them apart.
*/ */
for (i = 0; i < NR_syscalls; ++i) { for (i = 0; i < NR_syscalls; ++i) {
extern unsigned long fsyscall_table[NR_syscalls];
extern unsigned long sys_call_table[NR_syscalls]; extern unsigned long sys_call_table[NR_syscalls];
unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
if (!fsyscall_table[i] || nolwsys) if (!fsyscall_table[i] || nolwsys)
fsyscall_table[i] = sys_call_table[i] | 1; fsyscall_table[i] = sys_call_table[i] | 1;
......
...@@ -309,7 +309,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, ...@@ -309,7 +309,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
preempt_disable(); preempt_disable();
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) { if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
platform_global_tlb_purge(mm, start, end, nbits); platform_global_tlb_purge(mm, start, end, nbits);
preempt_enable(); preempt_enable();
return; return;
......
...@@ -17,6 +17,7 @@ s/mov.*=.*cr\.iip/.warning \"cr.iip should not used directly\"/g ...@@ -17,6 +17,7 @@ s/mov.*=.*cr\.iip/.warning \"cr.iip should not used directly\"/g
s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g
s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr
s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g
s/mov.*=.*ar\.itc.*/.warning \"ar.itc should not used directly\"/g
s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g
s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g
s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g
......
...@@ -135,8 +135,7 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, ...@@ -135,8 +135,7 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
} }
war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
if (!war_list) BUG_ON(!war_list);
BUG();
SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
nasid, widget, __pa(war_list), 0, 0, 0 ,0); nasid, widget, __pa(war_list), 0, 0, 0 ,0);
...@@ -180,23 +179,20 @@ sn_common_hubdev_init(struct hubdev_info *hubdev) ...@@ -180,23 +179,20 @@ sn_common_hubdev_init(struct hubdev_info *hubdev)
sizeof(struct sn_flush_device_kernel *); sizeof(struct sn_flush_device_kernel *);
hubdev->hdi_flush_nasid_list.widget_p = hubdev->hdi_flush_nasid_list.widget_p =
kzalloc(size, GFP_KERNEL); kzalloc(size, GFP_KERNEL);
if (!hubdev->hdi_flush_nasid_list.widget_p) BUG_ON(!hubdev->hdi_flush_nasid_list.widget_p);
BUG();
for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
size = DEV_PER_WIDGET * size = DEV_PER_WIDGET *
sizeof(struct sn_flush_device_kernel); sizeof(struct sn_flush_device_kernel);
sn_flush_device_kernel = kzalloc(size, GFP_KERNEL); sn_flush_device_kernel = kzalloc(size, GFP_KERNEL);
if (!sn_flush_device_kernel) BUG_ON(!sn_flush_device_kernel);
BUG();
dev_entry = sn_flush_device_kernel; dev_entry = sn_flush_device_kernel;
for (device = 0; device < DEV_PER_WIDGET; for (device = 0; device < DEV_PER_WIDGET;
device++, dev_entry++) { device++, dev_entry++) {
size = sizeof(struct sn_flush_device_common); size = sizeof(struct sn_flush_device_common);
dev_entry->common = kzalloc(size, GFP_KERNEL); dev_entry->common = kzalloc(size, GFP_KERNEL);
if (!dev_entry->common) BUG_ON(!dev_entry->common);
BUG();
if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST)) if (sn_prom_feature_available(PRF_DEVICE_FLUSH_LIST))
status = sal_get_device_dmaflush_list( status = sal_get_device_dmaflush_list(
hubdev->hdi_nasid, widget, device, hubdev->hdi_nasid, widget, device,
...@@ -326,8 +322,7 @@ sn_common_bus_fixup(struct pci_bus *bus, ...@@ -326,8 +322,7 @@ sn_common_bus_fixup(struct pci_bus *bus,
*/ */
controller->platform_data = kzalloc(sizeof(struct sn_platform_data), controller->platform_data = kzalloc(sizeof(struct sn_platform_data),
GFP_KERNEL); GFP_KERNEL);
if (controller->platform_data == NULL) BUG_ON(controller->platform_data == NULL);
BUG();
sn_platform_data = sn_platform_data =
(struct sn_platform_data *) controller->platform_data; (struct sn_platform_data *) controller->platform_data;
sn_platform_data->provider_soft = provider_soft; sn_platform_data->provider_soft = provider_soft;
......
...@@ -128,8 +128,7 @@ sn_legacy_pci_window_fixup(struct pci_controller *controller, ...@@ -128,8 +128,7 @@ sn_legacy_pci_window_fixup(struct pci_controller *controller,
{ {
controller->window = kcalloc(2, sizeof(struct pci_window), controller->window = kcalloc(2, sizeof(struct pci_window),
GFP_KERNEL); GFP_KERNEL);
if (controller->window == NULL) BUG_ON(controller->window == NULL);
BUG();
controller->window[0].offset = legacy_io; controller->window[0].offset = legacy_io;
controller->window[0].resource.name = "legacy_io"; controller->window[0].resource.name = "legacy_io";
controller->window[0].resource.flags = IORESOURCE_IO; controller->window[0].resource.flags = IORESOURCE_IO;
...@@ -168,8 +167,7 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count, ...@@ -168,8 +167,7 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
idx = controller->windows; idx = controller->windows;
new_count = controller->windows + count; new_count = controller->windows + count;
new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL); new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL);
if (new_window == NULL) BUG_ON(new_window == NULL);
BUG();
if (controller->window) { if (controller->window) {
memcpy(new_window, controller->window, memcpy(new_window, controller->window,
sizeof(struct pci_window) * controller->windows); sizeof(struct pci_window) * controller->windows);
...@@ -222,8 +220,7 @@ sn_io_slot_fixup(struct pci_dev *dev) ...@@ -222,8 +220,7 @@ sn_io_slot_fixup(struct pci_dev *dev)
(u64) __pa(pcidev_info), (u64) __pa(pcidev_info),
(u64) __pa(sn_irq_info)); (u64) __pa(sn_irq_info));
if (status) BUG_ON(status); /* Cannot get platform pci device information */
BUG(); /* Cannot get platform pci device information */
/* Copy over PIO Mapped Addresses */ /* Copy over PIO Mapped Addresses */
...@@ -307,8 +304,7 @@ sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) ...@@ -307,8 +304,7 @@ sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
prom_bussoft_ptr = __va(prom_bussoft_ptr); prom_bussoft_ptr = __va(prom_bussoft_ptr);
controller = kzalloc(sizeof(*controller), GFP_KERNEL); controller = kzalloc(sizeof(*controller), GFP_KERNEL);
if (!controller) BUG_ON(!controller);
BUG();
controller->segment = segment; controller->segment = segment;
/* /*
......
...@@ -732,8 +732,7 @@ void __init build_cnode_tables(void) ...@@ -732,8 +732,7 @@ void __init build_cnode_tables(void)
kl_config_hdr_t *klgraph_header; kl_config_hdr_t *klgraph_header;
nasid = cnodeid_to_nasid(node); nasid = cnodeid_to_nasid(node);
klgraph_header = ia64_sn_get_klconfig_addr(nasid); klgraph_header = ia64_sn_get_klconfig_addr(nasid);
if (klgraph_header == NULL) BUG_ON(klgraph_header == NULL);
BUG();
brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info); brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info);
while (brd) { while (brd) {
if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) { if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) {
...@@ -750,7 +749,7 @@ nasid_slice_to_cpuid(int nasid, int slice) ...@@ -750,7 +749,7 @@ nasid_slice_to_cpuid(int nasid, int slice)
{ {
long cpu; long cpu;
for (cpu = 0; cpu < NR_CPUS; cpu++) for (cpu = 0; cpu < nr_cpu_ids; cpu++)
if (cpuid_to_nasid(cpu) == nasid && if (cpuid_to_nasid(cpu) == nasid &&
cpuid_to_slice(cpu) == slice) cpuid_to_slice(cpu) == slice)
return cpu; return cpu;
......
...@@ -133,7 +133,7 @@ sn2_ipi_flush_all_tlb(struct mm_struct *mm) ...@@ -133,7 +133,7 @@ sn2_ipi_flush_all_tlb(struct mm_struct *mm)
unsigned long itc; unsigned long itc;
itc = ia64_get_itc(); itc = ia64_get_itc();
smp_flush_tlb_cpumask(mm->cpu_vm_mask); smp_flush_tlb_cpumask(*mm_cpumask(mm));
itc = ia64_get_itc() - itc; itc = ia64_get_itc() - itc;
__get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc;
__get_cpu_var(ptcstats).shub_ipi_flushes++; __get_cpu_var(ptcstats).shub_ipi_flushes++;
...@@ -182,7 +182,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, ...@@ -182,7 +182,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
nodes_clear(nodes_flushed); nodes_clear(nodes_flushed);
i = 0; i = 0;
for_each_cpu_mask(cpu, mm->cpu_vm_mask) { for_each_cpu(cpu, mm_cpumask(mm)) {
cnode = cpu_to_node(cpu); cnode = cpu_to_node(cpu);
node_set(cnode, nodes_flushed); node_set(cnode, nodes_flushed);
lcpu = cpu; lcpu = cpu;
...@@ -461,7 +461,7 @@ bool sn_cpu_disable_allowed(int cpu) ...@@ -461,7 +461,7 @@ bool sn_cpu_disable_allowed(int cpu)
static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
{ {
if (*offset < NR_CPUS) if (*offset < nr_cpu_ids)
return offset; return offset;
return NULL; return NULL;
} }
...@@ -469,7 +469,7 @@ static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) ...@@ -469,7 +469,7 @@ static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset)
static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset)
{ {
(*offset)++; (*offset)++;
if (*offset < NR_CPUS) if (*offset < nr_cpu_ids)
return offset; return offset;
return NULL; return NULL;
} }
...@@ -491,7 +491,7 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data) ...@@ -491,7 +491,7 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt); seq_printf(file, "# ptctest %d, flushopt %d\n", sn2_ptctest, sn2_flush_opt);
} }
if (cpu < NR_CPUS && cpu_online(cpu)) { if (cpu < nr_cpu_ids && cpu_online(cpu)) {
stat = &per_cpu(ptcstats, cpu); stat = &per_cpu(ptcstats, cpu);
seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
...@@ -554,7 +554,7 @@ static int __init sn2_ptc_init(void) ...@@ -554,7 +554,7 @@ static int __init sn2_ptc_init(void)
proc_sn2_ptc = proc_create(PTC_BASENAME, 0444, proc_sn2_ptc = proc_create(PTC_BASENAME, 0444,
NULL, &proc_sn2_ptc_operations); NULL, &proc_sn2_ptc_operations);
if (!&proc_sn2_ptc_operations) { if (!proc_sn2_ptc) {
printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
return -EINVAL; return -EINVAL;
} }
......
...@@ -275,8 +275,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb ...@@ -275,8 +275,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb
/* get it's interconnect topology */ /* get it's interconnect topology */
sz = op->ports * sizeof(struct sn_hwperf_port_info); sz = op->ports * sizeof(struct sn_hwperf_port_info);
if (sz > sizeof(ptdata)) BUG_ON(sz > sizeof(ptdata));
BUG();
e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
SN_HWPERF_ENUM_PORTS, nodeobj->id, sz, SN_HWPERF_ENUM_PORTS, nodeobj->id, sz,
(u64)&ptdata, 0, 0, NULL); (u64)&ptdata, 0, 0, NULL);
...@@ -310,8 +309,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb ...@@ -310,8 +309,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb
if (router && (!found_cpu || !found_mem)) { if (router && (!found_cpu || !found_mem)) {
/* search for a node connected to the same router */ /* search for a node connected to the same router */
sz = router->ports * sizeof(struct sn_hwperf_port_info); sz = router->ports * sizeof(struct sn_hwperf_port_info);
if (sz > sizeof(ptdata)) BUG_ON(sz > sizeof(ptdata));
BUG();
e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
SN_HWPERF_ENUM_PORTS, router->id, sz, SN_HWPERF_ENUM_PORTS, router->id, sz,
(u64)&ptdata, 0, 0, NULL); (u64)&ptdata, 0, 0, NULL);
...@@ -612,7 +610,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) ...@@ -612,7 +610,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK; op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK;
if (cpu != SN_HWPERF_ARG_ANY_CPU) { if (cpu != SN_HWPERF_ARG_ANY_CPU) {
if (cpu >= NR_CPUS || !cpu_online(cpu)) { if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
r = -EINVAL; r = -EINVAL;
goto out; goto out;
} }
......
...@@ -256,9 +256,7 @@ void sn_dma_flush(u64 addr) ...@@ -256,9 +256,7 @@ void sn_dma_flush(u64 addr)
hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo; hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;
if (!hubinfo) { BUG_ON(!hubinfo);
BUG();
}
flush_nasid_list = &hubinfo->hdi_flush_nasid_list; flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
if (flush_nasid_list->widget_p == NULL) if (flush_nasid_list->widget_p == NULL)
......
...@@ -3,14 +3,29 @@ ...@@ -3,14 +3,29 @@
# #
obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \ obj-y := hypercall.o xenivt.o xensetup.o xen_pv_ops.o irq_xen.o \
hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o hypervisor.o xencomm.o xcom_hcall.o grant-table.o time.o suspend.o \
gate-data.o
obj-$(CONFIG_IA64_GENERIC) += machvec.o obj-$(CONFIG_IA64_GENERIC) += machvec.o
# The gate DSO image is built using a special linker script.
include $(srctree)/arch/ia64/kernel/Makefile.gate
# tell compiled for xen
CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_XEN
AFLAGS_gate.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN -D__IA64_GATE_PARAVIRTUALIZED_XEN
# use same file of native.
$(obj)/gate.o: $(src)/../kernel/gate.S FORCE
$(call if_changed_dep,as_o_S)
$(obj)/gate.lds: $(src)/../kernel/gate.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN AFLAGS_xenivt.o += -D__IA64_ASM_PARAVIRTUALIZED_XEN
# xen multi compile # xen multi compile
ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S ASM_PARAVIRT_MULTI_COMPILE_SRCS = ivt.S entry.S fsys.S
ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o)) ASM_PARAVIRT_OBJS = $(addprefix xen-,$(ASM_PARAVIRT_MULTI_COMPILE_SRCS:.S=.o))
obj-y += $(ASM_PARAVIRT_OBJS) obj-y += $(ASM_PARAVIRT_OBJS)
define paravirtualized_xen define paravirtualized_xen
......
.section .data.gate.xen, "aw"
.incbin "arch/ia64/xen/gate.so"
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
#include <asm/xen/privop.h> #include <asm/xen/privop.h>
#ifdef __INTEL_COMPILER
/* /*
* Hypercalls without parameter. * Hypercalls without parameter.
*/ */
...@@ -72,6 +73,7 @@ GLOBAL_ENTRY(xen_set_rr0_to_rr4) ...@@ -72,6 +73,7 @@ GLOBAL_ENTRY(xen_set_rr0_to_rr4)
br.ret.sptk.many rp br.ret.sptk.many rp
;; ;;
END(xen_set_rr0_to_rr4) END(xen_set_rr0_to_rr4)
#endif
GLOBAL_ENTRY(xen_send_ipi) GLOBAL_ENTRY(xen_send_ipi)
mov r14=r32 mov r14=r32
......
...@@ -175,10 +175,58 @@ static void xen_itc_jitter_data_reset(void) ...@@ -175,10 +175,58 @@ static void xen_itc_jitter_data_reset(void)
} while (unlikely(ret != lcycle)); } while (unlikely(ret != lcycle));
} }
/* based on xen_sched_clock() in arch/x86/xen/time.c. */
/*
* This relies on HAVE_UNSTABLE_SCHED_CLOCK. If it can't be defined,
* something similar logic should be implemented here.
*/
/*
* Xen sched_clock implementation. Returns the number of unstolen
* nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
* states.
*/
static unsigned long long xen_sched_clock(void)
{
struct vcpu_runstate_info runstate;
unsigned long long now;
unsigned long long offset;
unsigned long long ret;
/*
* Ideally sched_clock should be called on a per-cpu basis
* anyway, so preempt should already be disabled, but that's
* not current practice at the moment.
*/
preempt_disable();
/*
* both ia64_native_sched_clock() and xen's runstate are
* based on mAR.ITC. So difference of them makes sense.
*/
now = ia64_native_sched_clock();
get_runstate_snapshot(&runstate);
WARN_ON(runstate.state != RUNSTATE_running);
offset = 0;
if (now > runstate.state_entry_time)
offset = now - runstate.state_entry_time;
ret = runstate.time[RUNSTATE_blocked] +
runstate.time[RUNSTATE_running] +
offset;
preempt_enable();
return ret;
}
struct pv_time_ops xen_time_ops __initdata = { struct pv_time_ops xen_time_ops __initdata = {
.init_missing_ticks_accounting = xen_init_missing_ticks_accounting, .init_missing_ticks_accounting = xen_init_missing_ticks_accounting,
.do_steal_accounting = xen_do_steal_accounting, .do_steal_accounting = xen_do_steal_accounting,
.clocksource_resume = xen_itc_jitter_data_reset, .clocksource_resume = xen_itc_jitter_data_reset,
.sched_clock = xen_sched_clock,
}; };
/* Called after suspend, to resume time. */ /* Called after suspend, to resume time. */
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment