Commit a662b813 authored by Luck, Tony's avatar Luck, Tony Committed by Marcelo Tosatti

KVM: ia64: fix build breakage due to host spinlock change

Len Brown pointed out that allmodconfig is broken for
ia64 because of:

arch/ia64/kvm/vmm.c: In function 'vmm_spin_unlock':
arch/ia64/kvm/vmm.c:70: error: 'spinlock_t' has no member named 'raw_lock'

KVM has it's own spinlock routines. It should not depend on the base kernel
spinlock_t type (which changed when ia64 switched to ticket locks).  Define
its own vmm_spinlock_t type.
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent dab4b911
...@@ -388,6 +388,9 @@ static inline u64 __gpfn_is_io(u64 gpfn) ...@@ -388,6 +388,9 @@ static inline u64 __gpfn_is_io(u64 gpfn)
#define _vmm_raw_spin_lock(x) do {}while(0) #define _vmm_raw_spin_lock(x) do {}while(0)
#define _vmm_raw_spin_unlock(x) do {}while(0) #define _vmm_raw_spin_unlock(x) do {}while(0)
#else #else
typedef struct {
volatile unsigned int lock;
} vmm_spinlock_t;
#define _vmm_raw_spin_lock(x) \ #define _vmm_raw_spin_lock(x) \
do { \ do { \
__u32 *ia64_spinlock_ptr = (__u32 *) (x); \ __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
...@@ -405,12 +408,12 @@ static inline u64 __gpfn_is_io(u64 gpfn) ...@@ -405,12 +408,12 @@ static inline u64 __gpfn_is_io(u64 gpfn)
#define _vmm_raw_spin_unlock(x) \ #define _vmm_raw_spin_unlock(x) \
do { barrier(); \ do { barrier(); \
((spinlock_t *)x)->raw_lock.lock = 0; } \ ((vmm_spinlock_t *)x)->lock = 0; } \
while (0) while (0)
#endif #endif
void vmm_spin_lock(spinlock_t *lock); void vmm_spin_lock(vmm_spinlock_t *lock);
void vmm_spin_unlock(spinlock_t *lock); void vmm_spin_unlock(vmm_spinlock_t *lock);
enum { enum {
I_TLB = 1, I_TLB = 1,
D_TLB = 2 D_TLB = 2
......
...@@ -60,12 +60,12 @@ static void __exit kvm_vmm_exit(void) ...@@ -60,12 +60,12 @@ static void __exit kvm_vmm_exit(void)
return ; return ;
} }
void vmm_spin_lock(spinlock_t *lock) void vmm_spin_lock(vmm_spinlock_t *lock)
{ {
_vmm_raw_spin_lock(lock); _vmm_raw_spin_lock(lock);
} }
void vmm_spin_unlock(spinlock_t *lock) void vmm_spin_unlock(vmm_spinlock_t *lock)
{ {
_vmm_raw_spin_unlock(lock); _vmm_raw_spin_unlock(lock);
} }
......
...@@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps) ...@@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
{ {
u64 i, dirty_pages = 1; u64 i, dirty_pages = 1;
u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE; void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment