Commit 2171787b authored by Pallipadi, Venkatesh's avatar Pallipadi, Venkatesh Committed by H. Peter Anvin

x86: avoid back to back on_each_cpu in cpa_flush_array

Cleanup cpa_flush_array() to avoid back to back on_each_cpu() calls.

[ Impact: optimizes fix 0af48f42 ]
Signed-off-by: default avatarVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent 46176b4f
...@@ -204,30 +204,19 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache) ...@@ -204,30 +204,19 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
} }
} }
static void wbinvd_local(void *unused)
{
wbinvd();
}
static void cpa_flush_array(unsigned long *start, int numpages, int cache, static void cpa_flush_array(unsigned long *start, int numpages, int cache,
int in_flags, struct page **pages) int in_flags, struct page **pages)
{ {
unsigned int i, level; unsigned int i, level;
unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
BUG_ON(irqs_disabled()); BUG_ON(irqs_disabled());
on_each_cpu(__cpa_flush_range, NULL, 1); on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
if (!cache) if (!cache || do_wbinvd)
return; return;
/* 4M threshold */
if (numpages >= 1024) {
if (boot_cpu_data.x86 >= 4)
on_each_cpu(wbinvd_local, NULL, 1);
return;
}
/* /*
* We only need to flush on one CPU, * We only need to flush on one CPU,
* clflush is a MESI-coherent instruction that * clflush is a MESI-coherent instruction that
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment