Currently smaps reports many zero fields for vma(VM_HUGETLB), which is
inconvenient when we want to know per-task or per-vma base hugetlb usage.
This patch enables these fields by introducing smaps_hugetlb_range().

before patch:

  Size:              20480 kB
  Rss:                   0 kB
  Pss:                   0 kB
  Shared_Clean:          0 kB
  Shared_Dirty:          0 kB
  Private_Clean:         0 kB
  Private_Dirty:         0 kB
  Referenced:            0 kB
  Anonymous:             0 kB
  AnonHugePages:         0 kB
  Swap:                  0 kB
  KernelPageSize:     2048 kB
  MMUPageSize:        2048 kB
  Locked:                0 kB
  VmFlags: rd wr mr mw me de ht

after patch:

  Size:              20480 kB
  Rss:               18432 kB
  Pss:               18432 kB
  Shared_Clean:          0 kB
  Shared_Dirty:          0 kB
  Private_Clean:         0 kB
  Private_Dirty:     18432 kB
  Referenced:        18432 kB
  Anonymous:         18432 kB
  AnonHugePages:         0 kB
  Swap:                  0 kB
  KernelPageSize:     2048 kB
  MMUPageSize:        2048 kB
  Locked:                0 kB
  VmFlags: rd wr mr mw me de ht

Signed-off-by: Naoya Horiguchi <n-horigu...@ah.jp.nec.com>
Acked-by: Jörn Engel <jo...@logfs.org>
---
 fs/proc/task_mmu.c | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)

diff --git v4.2-rc4.orig/fs/proc/task_mmu.c v4.2-rc4/fs/proc/task_mmu.c
index ca1e091881d4..c7218603306d 100644
--- v4.2-rc4.orig/fs/proc/task_mmu.c
+++ v4.2-rc4/fs/proc/task_mmu.c
@@ -610,12 +610,39 @@ static void show_smap_vma_flags(struct seq_file *m, 
struct vm_area_struct *vma)
        seq_putc(m, '\n');
 }
 
+#ifdef CONFIG_HUGETLB_PAGE
+static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
+                                unsigned long addr, unsigned long end,
+                                struct mm_walk *walk)
+{
+       struct mem_size_stats *mss = walk->private;
+       struct vm_area_struct *vma = walk->vma;
+       struct page *page = NULL;
+
+       if (pte_present(*pte)) {
+               page = vm_normal_page(vma, addr, *pte);
+       } else if (is_swap_pte(*pte)) {
+               swp_entry_t swpent = pte_to_swp_entry(*pte);
+
+               if (is_migration_entry(swpent))
+                       page = migration_entry_to_page(swpent);
+       }
+       if (page)
+               smaps_account(mss, page, huge_page_size(hstate_vma(vma)),
+                             pte_young(*pte), pte_dirty(*pte));
+       return 0;
+}
+#endif /* HUGETLB_PAGE */
+
 static int show_smap(struct seq_file *m, void *v, int is_pid)
 {
        struct vm_area_struct *vma = v;
        struct mem_size_stats mss;
        struct mm_walk smaps_walk = {
                .pmd_entry = smaps_pte_range,
+#ifdef CONFIG_HUGETLB_PAGE
+               .hugetlb_entry = smaps_hugetlb_range,
+#endif
                .mm = vma->vm_mm,
                .private = &mss,
        };
-- 
2.4.3

Reply via email to