With the infrastructure added for CLFLUSH it is possible
to only TLB flush the actually changed pages in change_page_attr()

Take care of old Athlon K7 Errata on the 32bit version

Signed-off-by: Andi Kleen <[EMAIL PROTECTED]>

---
 arch/x86/mm/pageattr_32.c |   15 ++++++++-------
 arch/x86/mm/pageattr_64.c |   10 +++++-----
 2 files changed, 13 insertions(+), 12 deletions(-)

Index: linux/arch/x86/mm/pageattr_32.c
===================================================================
--- linux.orig/arch/x86/mm/pageattr_32.c
+++ linux/arch/x86/mm/pageattr_32.c
@@ -97,19 +97,20 @@ static void flush_kernel_map(void *arg)
        struct flush_arg *a = (struct flush_arg *)arg;
        struct flush *f;
 
-       if (!cpu_has_clflush)
-               a->full_flush = 1;
-       if (a->full_flush && boot_cpu_data.x86_model >= 4)
+       if ((!cpu_has_clflush || a->full_flush) && boot_cpu_data.x86_model >= 4)
                wbinvd();
        list_for_each_entry(f, &a->l, l) {
                if (!a->full_flush)
                        clflush_cache_range((void *)f->addr, PAGE_SIZE);
+               if (!a->full_flush)
+                       __flush_tlb_one(f->addr);
        }
 
-       /* Flush all to work around Errata in early athlons regarding 
-        * large page flushing. 
-        */
-       __flush_tlb_all();      
+       /* Handle errata with flushing large pages in early Athlons */
+       if (a->full_flush ||
+           (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+            boot_cpu_data.x86 == 7))
+               __flush_tlb_all();
 }
 
 static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 
Index: linux/arch/x86/mm/pageattr_64.c
===================================================================
--- linux.orig/arch/x86/mm/pageattr_64.c
+++ linux/arch/x86/mm/pageattr_64.c
@@ -92,18 +92,18 @@ static void flush_kernel_map(void *arg)
        struct flush_arg *a = (struct flush_arg *)arg;
        struct flush *f;
 
-       if (!cpu_has_clflush)
-               a->full_flush = 1;
-
        /* When clflush is available always use it because it is
           much cheaper than WBINVD. */
-       if (a->full_flush)
+       if (a->full_flush || !cpu_has_clflush)
                asm volatile("wbinvd" ::: "memory");
        list_for_each_entry(f, &a->l, l) {
                if (!a->full_flush)
                        clflush_cache_range((void *)f->addr, PAGE_SIZE);
+               if (!a->full_flush)
+                       __flush_tlb_one(f->addr);
        }
-       __flush_tlb_all();
+       if (a->full_flush)
+               __flush_tlb_all();
 }
 
 /* both protected by init_mm.mmap_sem */
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to