Commit fce2ce95 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

x86/mm/cpa: Move CLFLUSH test into cpa_flush_array()

Rather than guarding cpa_flush_array() users with a CLFLUSH test, put
it inside.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarDave Hansen <dave.hansen@intel.com>
Cc: Bin Yang <bin.yang@intel.com>
Cc: Mark Gross <mark.gross@intel.com>
Link: https://lkml.kernel.org/r/20180919085948.087848187@infradead.org
parent 5f464b33
......@@ -328,6 +328,11 @@ static void cpa_flush_array(unsigned long baddr, unsigned long *start,
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
if (!static_cpu_has(X86_FEATURE_CLFLUSH)) {
cpa_flush_all(cache);
return;
}
flush_tlb_kernel_range(baddr, baddr + PAGE_SIZE * numpages);
if (!cache)
......@@ -1756,19 +1761,19 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
cache = !!pgprot2cachemode(mask_set);
/*
* On success we use CLFLUSH, when the CPU supports it to
* avoid the WBINVD. If the CPU does not support it and in the
* error case we fall back to cpa_flush_all (which uses
* WBINVD):
* On error; flush everything to be sure.
*/
if (!ret && boot_cpu_has(X86_FEATURE_CLFLUSH)) {
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
cpa_flush_array(baddr, addr, numpages, cache,
cpa.flags, pages);
} else
cpa_flush_range(baddr, numpages, cache);
} else
if (ret) {
cpa_flush_all(cache);
goto out;
}
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
cpa_flush_array(baddr, addr, numpages, cache,
cpa.flags, pages);
} else {
cpa_flush_range(baddr, numpages, cache);
}
out:
return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment