xen_mm_pin_all()/unpin_all() are used to implement full guest instance
suspend/restore. It's a stop-all method that needs to iterate through
all allocated pgds in the system to fix them up for Xen's use.

This code uses pgd_list, probably because it was an easy interface.

But we want to remove the pgd_list, so convert the code over to walk
all tasks in the system. This is an equivalent method.

(As I don't use Xen this is was only build tested.)

Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Andy Lutomirski <l...@amacapital.net>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Brian Gerst <brge...@gmail.com>
Cc: Denys Vlasenko <dvlas...@redhat.com>
Cc: H. Peter Anvin <h...@zytor.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: Waiman Long <waiman.l...@hp.com>
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/x86/xen/mmu.c | 34 ++++++++++++++++++++++++++++------
 1 file changed, 28 insertions(+), 6 deletions(-)

diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index dd151b2045b0..87a8354435f8 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -853,18 +853,29 @@ static void xen_pgd_pin(struct mm_struct *mm)
  */
 void xen_mm_pin_all(void)
 {
-       struct page *page;
+       struct task_struct *g, *p;
 
+       rcu_read_lock();
        spin_lock(&pgd_lock);
 
-       list_for_each_entry(page, &pgd_list, lru) {
+       for_each_process_thread(g, p) {
+               struct page *page;
+               pgd_t *pgd;
+
+               if (!p->mm)
+                       continue;
+
+               pgd = p->mm->pgd;
+               page = virt_to_page(pgd);
+
                if (!PagePinned(page)) {
-                       __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page));
+                       __xen_pgd_pin(&init_mm, pgd);
                        SetPageSavePinned(page);
                }
        }
 
        spin_unlock(&pgd_lock);
+       rcu_read_unlock();
 }
 
 /*
@@ -967,19 +978,30 @@ static void xen_pgd_unpin(struct mm_struct *mm)
  */
 void xen_mm_unpin_all(void)
 {
-       struct page *page;
+       struct task_struct *g, *p;
 
+       rcu_read_lock();
        spin_lock(&pgd_lock);
 
-       list_for_each_entry(page, &pgd_list, lru) {
+       for_each_process_thread(g, p) {
+               struct page *page;
+               pgd_t *pgd;
+
+               if (!p->mm)
+                       continue;
+
+               pgd = p->mm->pgd;
+               page = virt_to_page(pgd);
+
                if (PageSavePinned(page)) {
                        BUG_ON(!PagePinned(page));
-                       __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page));
+                       __xen_pgd_unpin(&init_mm, pgd);
                        ClearPageSavePinned(page);
                }
        }
 
        spin_unlock(&pgd_lock);
+       rcu_read_unlock();
 }
 
 static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
-- 
2.1.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to