Commit 6f22bd32 authored by Alexander Graf's avatar Alexander Graf

KVM: PPC: Book3S HV: Make HTAB code LE host aware

When running on an LE host all data structures are kept in little endian
byte order. However, the HTAB still needs to be maintained in big endian.

So every time we access any HTAB we need to make sure we do so in the right
byte order. Fix up all accesses to manually byte swap.
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 8f6822c4
...@@ -162,9 +162,9 @@ extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, ...@@ -162,9 +162,9 @@ extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
bool *writable); bool *writable);
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
unsigned long *rmap, long pte_index, int realmode); unsigned long *rmap, long pte_index, int realmode);
extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index); unsigned long pte_index);
void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep, void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index); unsigned long pte_index);
extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr, extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
unsigned long *nb_ret); unsigned long *nb_ret);
......
...@@ -59,20 +59,29 @@ extern unsigned long kvm_rma_pages; ...@@ -59,20 +59,29 @@ extern unsigned long kvm_rma_pages;
/* These bits are reserved in the guest view of the HPTE */ /* These bits are reserved in the guest view of the HPTE */
#define HPTE_GR_RESERVED HPTE_GR_MODIFIED #define HPTE_GR_RESERVED HPTE_GR_MODIFIED
static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits) static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
{ {
unsigned long tmp, old; unsigned long tmp, old;
__be64 be_lockbit, be_bits;
/*
* We load/store in native endian, but the HTAB is in big endian. If
* we byte swap all data we apply on the PTE we're implicitly correct
* again.
*/
be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
be_bits = cpu_to_be64(bits);
asm volatile(" ldarx %0,0,%2\n" asm volatile(" ldarx %0,0,%2\n"
" and. %1,%0,%3\n" " and. %1,%0,%3\n"
" bne 2f\n" " bne 2f\n"
" ori %0,%0,%4\n" " or %0,%0,%4\n"
" stdcx. %0,0,%2\n" " stdcx. %0,0,%2\n"
" beq+ 2f\n" " beq+ 2f\n"
" mr %1,%3\n" " mr %1,%3\n"
"2: isync" "2: isync"
: "=&r" (tmp), "=&r" (old) : "=&r" (tmp), "=&r" (old)
: "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK) : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
: "cc", "memory"); : "cc", "memory");
return old == 0; return old == 0;
} }
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment