Commit 018d2ad0 authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

x86: change_page_attr bandaids

- Disable CLFLUSH again; it is still broken. Always do WBINVD.
- Always flush in the i386 case, not only when there are deferred pages.

These are both brute-force inefficient fixes, to be improved
next release cycle.

The changes to i386 are a little more extensive than strictly
needed (some dead code added), but it is more similar to the x86-64 version
now and the dead code will be used soon.
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 55181000
...@@ -68,14 +68,23 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot, ...@@ -68,14 +68,23 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
return base; return base;
} }
static void flush_kernel_map(void *arg) static void cache_flush_page(struct page *p)
{ {
unsigned long adr = (unsigned long)arg; unsigned long adr = (unsigned long)page_address(p);
int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
asm volatile("clflush (%0)" :: "r" (adr + i));
}
static void flush_kernel_map(void *arg)
{
struct list_head *lh = (struct list_head *)arg;
struct page *p;
if (adr && cpu_has_clflush) { /* High level code is not ready for clflush yet */
int i; if (0 && cpu_has_clflush) {
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) list_for_each_entry (p, lh, lru)
asm volatile("clflush (%0)" :: "r" (adr + i)); cache_flush_page(p);
} else if (boot_cpu_data.x86_model >= 4) } else if (boot_cpu_data.x86_model >= 4)
wbinvd(); wbinvd();
...@@ -181,9 +190,9 @@ __change_page_attr(struct page *page, pgprot_t prot) ...@@ -181,9 +190,9 @@ __change_page_attr(struct page *page, pgprot_t prot)
return 0; return 0;
} }
static inline void flush_map(void *adr) static inline void flush_map(struct list_head *l)
{ {
on_each_cpu(flush_kernel_map, adr, 1, 1); on_each_cpu(flush_kernel_map, l, 1, 1);
} }
/* /*
...@@ -225,11 +234,8 @@ void global_flush_tlb(void) ...@@ -225,11 +234,8 @@ void global_flush_tlb(void)
spin_lock_irq(&cpa_lock); spin_lock_irq(&cpa_lock);
list_replace_init(&df_list, &l); list_replace_init(&df_list, &l);
spin_unlock_irq(&cpa_lock); spin_unlock_irq(&cpa_lock);
if (!cpu_has_clflush) flush_map(&l);
flush_map(NULL);
list_for_each_entry_safe(pg, next, &l, lru) { list_for_each_entry_safe(pg, next, &l, lru) {
if (cpu_has_clflush)
flush_map(page_address(pg));
__free_page(pg); __free_page(pg);
} }
} }
......
...@@ -74,10 +74,11 @@ static void flush_kernel_map(void *arg) ...@@ -74,10 +74,11 @@ static void flush_kernel_map(void *arg)
struct page *pg; struct page *pg;
/* When clflush is available always use it because it is /* When clflush is available always use it because it is
much cheaper than WBINVD */ much cheaper than WBINVD. Disable clflush for now because
if (!cpu_has_clflush) the high level code is not ready yet */
if (1 || !cpu_has_clflush)
asm volatile("wbinvd" ::: "memory"); asm volatile("wbinvd" ::: "memory");
list_for_each_entry(pg, l, lru) { else list_for_each_entry(pg, l, lru) {
void *adr = page_address(pg); void *adr = page_address(pg);
if (cpu_has_clflush) if (cpu_has_clflush)
cache_flush_page(adr); cache_flush_page(adr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment