The vmalloc() code uses vmalloc_sync_all() to synchronize changes to
the global reference kernel PGD to task PGDs.

This uses pgd_list currently, but we don't need the global list,
as we can walk the task list under RCU.

Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Andy Lutomirski <l...@amacapital.net>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Brian Gerst <brge...@gmail.com>
Cc: Denys Vlasenko <dvlas...@redhat.com>
Cc: H. Peter Anvin <h...@zytor.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Rik van Riel <r...@redhat.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Waiman Long <waiman.l...@hp.com>
Cc: linux...@vger.kernel.org
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/x86/mm/fault.c   | 21 ++++++++++++++-------
 arch/x86/mm/init_64.c |  3 ++-
 2 files changed, 16 insertions(+), 8 deletions(-)

diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 50342825f221..2d587e548b59 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -235,24 +235,31 @@ void vmalloc_sync_all(void)
        for (address = VMALLOC_START & PMD_MASK;
             address >= TASK_SIZE && address < FIXADDR_TOP;
             address += PMD_SIZE) {
-               struct page *page;
 
+               struct task_struct *g, *p;
+
+               rcu_read_lock();
                spin_lock(&pgd_lock);
-               list_for_each_entry(page, &pgd_list, lru) {
+
+               for_each_process_thread(g, p) {
                        spinlock_t *pgt_lock;
-                       pmd_t *ret;
+                       pmd_t *pmd_ret;
 
-                       /* the pgt_lock only for Xen */
-                       pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+                       if (!p->mm)
+                               continue;
 
+                       /* The pgt_lock is only used on Xen: */
+                       pgt_lock = &p->mm->page_table_lock;
                        spin_lock(pgt_lock);
-                       ret = vmalloc_sync_one(page_address(page), address);
+                       pmd_ret = vmalloc_sync_one(p->mm->pgd, address);
                        spin_unlock(pgt_lock);
 
-                       if (!ret)
+                       if (!pmd_ret)
                                break;
                }
+
                spin_unlock(&pgd_lock);
+               rcu_read_unlock();
        }
 }
 
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index beee532b76a7..730560c4873e 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -184,11 +184,12 @@ void sync_global_pgds(unsigned long start, unsigned long 
end)
                spin_lock(&pgd_lock);
 
                for_each_process_thread(g, p) {
-                       pgd_t *pgd = p->mm->pgd;
+                       pgd_t *pgd;
                        spinlock_t *pgt_lock;
 
                        if (!p->mm)
                                continue;
+                       pgd = p->mm->pgd;
 
                        /* The pgt_lock is only used by Xen: */
                        pgt_lock = &p->mm->page_table_lock;
-- 
2.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to