Commit 3e99675a authored by Nicolas Pitre's avatar Nicolas Pitre Committed by Russell King

ARM: 7582/2: rename kvm_seq to vmalloc_seq so to avoid confusion with KVM

The kvm_seq value has nothing to do what so ever with this other KVM.
Given that KVM support on ARM is imminent, it's best to rename kvm_seq
into something else to clearly identify what it is about i.e. a sequence
number for vmalloc section mappings.
Signed-off-by: default avatarNicolas Pitre <nico@linaro.org>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent ce7b1756
......@@ -7,7 +7,7 @@ typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
u64 id;
#endif
unsigned int kvm_seq;
unsigned int vmalloc_seq;
} mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID
......
......@@ -20,7 +20,7 @@
#include <asm/proc-fns.h>
#include <asm-generic/mm_hooks.h>
void __check_kvm_seq(struct mm_struct *mm);
void __check_vmalloc_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID
......@@ -34,8 +34,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
static inline void check_and_switch_context(struct mm_struct *mm,
struct task_struct *tsk)
{
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm);
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
__check_vmalloc_seq(mm);
if (irqs_disabled())
/*
......
......@@ -186,8 +186,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
unsigned long flags;
unsigned int cpu = smp_processor_id();
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm);
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
__check_vmalloc_seq(mm);
/*
* Required during context switch to avoid speculative page table
......
......@@ -47,18 +47,18 @@ int ioremap_page(unsigned long virt, unsigned long phys,
}
EXPORT_SYMBOL(ioremap_page);
void __check_kvm_seq(struct mm_struct *mm)
void __check_vmalloc_seq(struct mm_struct *mm)
{
unsigned int seq;
do {
seq = init_mm.context.kvm_seq;
seq = init_mm.context.vmalloc_seq;
memcpy(pgd_offset(mm, VMALLOC_START),
pgd_offset_k(VMALLOC_START),
sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
pgd_index(VMALLOC_START)));
mm->context.kvm_seq = seq;
} while (seq != init_mm.context.kvm_seq);
mm->context.vmalloc_seq = seq;
} while (seq != init_mm.context.vmalloc_seq);
}
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
......@@ -89,13 +89,13 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
if (!pmd_none(pmd)) {
/*
* Clear the PMD from the page table, and
* increment the kvm sequence so others
* increment the vmalloc sequence so others
* notice this change.
*
* Note: this is still racy on SMP machines.
*/
pmd_clear(pmdp);
init_mm.context.kvm_seq++;
init_mm.context.vmalloc_seq++;
/*
* Free the page table, if there was one.
......@@ -112,8 +112,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
* Ensure that the active_mm is up to date - we want to
* catch any use-after-iounmap cases.
*/
if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
__check_kvm_seq(current->active_mm);
if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
__check_vmalloc_seq(current->active_mm);
flush_tlb_kernel_range(virt, end);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment