Commit-ID:  9db1bdb2eaa80a787f55806603b38d0068a89f1e
Gitweb:     http://git.kernel.org/tip/9db1bdb2eaa80a787f55806603b38d0068a89f1e
Author:     Johannes Weiner <han...@cmpxchg.org>
AuthorDate: Thu, 25 Oct 2012 12:49:51 +0200
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Mon, 29 Oct 2012 07:54:23 +0100

sched, numa, mm: Add memcg support to do_huge_pmd_numa_page()

Add memory control group support to hugepage migration.

Signed-off-by: Johannes Weiner <han...@cmpxchg.org>
Tested-by: Zhouping Liu <z...@redhat.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijls...@chello.nl>
Cc: Andrea Arcangeli <aarca...@redhat.com>
Cc: Rik van Riel <r...@redhat.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Link: http://lkml.kernel.org/n/tip-rdk9mgpoyhzlwh2xhlykv...@git.kernel.org
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 mm/huge_memory.c |   15 +++++++++++++++
 1 files changed, 15 insertions(+), 0 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 7361fd9..1fc805e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -742,6 +742,7 @@ void do_huge_pmd_numa_page(struct mm_struct *mm, struct 
vm_area_struct *vma,
                           unsigned int flags, pmd_t entry)
 {
        unsigned long haddr = address & HPAGE_PMD_MASK;
+       struct mem_cgroup *memcg = NULL;
        struct page *new_page = NULL;
        struct page *page = NULL;
        int node, lru;
@@ -832,6 +833,14 @@ migrate:
 
                return;
        }
+       /*
+        * Traditional migration needs to prepare the memcg charge
+        * transaction early to prevent the old page from being
+        * uncharged when installing migration entries.  Here we can
+        * save the potential rollback and start the charge transfer
+        * only when migration is already known to end successfully.
+        */
+       mem_cgroup_prepare_migration(page, new_page, &memcg);
 
        entry = mk_pmd(new_page, vma->vm_page_prot);
        entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
@@ -842,6 +851,12 @@ migrate:
        set_pmd_at(mm, haddr, pmd, entry);
        update_mmu_cache_pmd(vma, address, entry);
        page_remove_rmap(page);
+       /*
+        * Finish the charge transaction under the page table lock to
+        * prevent split_huge_page() from dividing up the charge
+        * before it's fully transferred to the new page.
+        */
+       mem_cgroup_end_migration(memcg, page, new_page, true);
        spin_unlock(&mm->page_table_lock);
 
        put_page(page);                 /* Drop the rmap reference */
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to