This appears to help cached git diff performance by about 5% on a
POWER9 (with 32MB dentry cache hash).

  Profiling git diff dTLB misses with a vanilla kernel:

  81.75%  git      [kernel.vmlinux]    [k] __d_lookup_rcu
   7.21%  git      [kernel.vmlinux]    [k] strncpy_from_user
   1.77%  git      [kernel.vmlinux]    [k] find_get_entry
   1.59%  git      [kernel.vmlinux]    [k] kmem_cache_free

            40,168      dTLB-miss
       0.100342754 seconds time elapsed

After this patch (and the subsequent powerpc HUGE_VMAP patches), the
dentry cache hash gets mapped with 2MB pages:

             2,987      dTLB-miss
       0.095933138 seconds time elapsed

elapsed time improvement isn't too scientific but seems consistent,
TLB misses certainly improves an order of magnitude. My laptop
takes a lot of misses here too, so x86 would be interesting to test,
I think it should just work there.

---
 include/linux/vmalloc.h |  1 +
 mm/vmalloc.c            | 87 +++++++++++++++++++++++++++--------------
 2 files changed, 59 insertions(+), 29 deletions(-)

diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index c6eebb839552..029635560306 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -42,6 +42,7 @@ struct vm_struct {
        unsigned long           size;
        unsigned long           flags;
        struct page             **pages;
+       unsigned int            page_shift;
        unsigned int            nr_pages;
        phys_addr_t             phys_addr;
        const void              *caller;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e5e9e1fcac01..c9ba88768bca 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -216,32 +216,34 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
  * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
  */
 static int vmap_page_range_noflush(unsigned long start, unsigned long end,
-                                  pgprot_t prot, struct page **pages)
+                                  pgprot_t prot, struct page **pages,
+                                  unsigned int page_shift)
 {
-       pgd_t *pgd;
-       unsigned long next;
        unsigned long addr = start;
-       int err = 0;
-       int nr = 0;
+       unsigned int i, nr = (end - start) >> (PAGE_SHIFT + page_shift);
 
-       BUG_ON(addr >= end);
-       pgd = pgd_offset_k(addr);
-       do {
-               next = pgd_addr_end(addr, end);
-               err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
+       for (i = 0; i < nr; i++) {
+               int err;
+
+               err = ioremap_page_range(addr,
+                                       addr + (PAGE_SIZE << page_shift),
+                                       __pa(page_address(pages[i])), prot);
                if (err)
                        return err;
-       } while (pgd++, addr = next, addr != end);
+
+               addr += PAGE_SIZE << page_shift;
+       }
 
        return nr;
 }
 
 static int vmap_page_range(unsigned long start, unsigned long end,
-                          pgprot_t prot, struct page **pages)
+                          pgprot_t prot, struct page **pages,
+                          unsigned int page_shift)
 {
        int ret;
 
-       ret = vmap_page_range_noflush(start, end, prot, pages);
+       ret = vmap_page_range_noflush(start, end, prot, pages, page_shift);
        flush_cache_vmap(start, end);
        return ret;
 }
@@ -1189,7 +1191,7 @@ void *vm_map_ram(struct page **pages, unsigned int count, 
int node, pgprot_t pro
                addr = va->va_start;
                mem = (void *)addr;
        }
-       if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
+       if (vmap_page_range(addr, addr + size, prot, pages, 0) < 0) {
                vm_unmap_ram(mem, count);
                return NULL;
        }
@@ -1305,7 +1307,7 @@ void __init vmalloc_init(void)
 int map_kernel_range_noflush(unsigned long addr, unsigned long size,
                             pgprot_t prot, struct page **pages)
 {
-       return vmap_page_range_noflush(addr, addr + size, prot, pages);
+       return vmap_page_range_noflush(addr, addr + size, prot, pages, 0);
 }
 
 /**
@@ -1352,7 +1354,7 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, 
struct page **pages)
        unsigned long end = addr + get_vm_area_size(area);
        int err;
 
-       err = vmap_page_range(addr, end, prot, pages);
+       err = vmap_page_range(addr, end, prot, pages, 0);
 
        return err > 0 ? 0 : err;
 }
@@ -1395,8 +1397,9 @@ static struct vm_struct *__get_vm_area_node(unsigned long 
size,
                return NULL;
 
        if (flags & VM_IOREMAP)
-               align = 1ul << clamp_t(int, get_count_order_long(size),
-                                      PAGE_SHIFT, IOREMAP_MAX_ORDER);
+               align = max(align,
+                               1ul << clamp_t(int, get_count_order_long(size),
+                                      PAGE_SHIFT, IOREMAP_MAX_ORDER));
 
        area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
        if (unlikely(!area))
@@ -1608,7 +1611,7 @@ static void __vunmap(const void *addr, int 
deallocate_pages)
                        struct page *page = area->pages[i];
 
                        BUG_ON(!page);
-                       __free_pages(page, 0);
+                       __free_pages(page, area->page_shift);
                }
 
                kvfree(area->pages);
@@ -1751,14 +1754,17 @@ static void *__vmalloc_area_node(struct vm_struct 
*area, gfp_t gfp_mask,
                                 pgprot_t prot, int node)
 {
        struct page **pages;
+       unsigned long addr = (unsigned long)area->addr;
+       unsigned long size = get_vm_area_size(area);
+       unsigned int page_shift = area->page_shift;
+       unsigned int shift = page_shift + PAGE_SHIFT;
        unsigned int nr_pages, array_size, i;
        const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
        const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
        const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
-                                       0 :
-                                       __GFP_HIGHMEM;
+                                       0 : __GFP_HIGHMEM;
 
-       nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
+       nr_pages = size >> shift;
        array_size = (nr_pages * sizeof(struct page *));
 
        area->nr_pages = nr_pages;
@@ -1779,10 +1785,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, 
gfp_t gfp_mask,
        for (i = 0; i < area->nr_pages; i++) {
                struct page *page;
 
-               if (node == NUMA_NO_NODE)
-                       page = alloc_page(alloc_mask|highmem_mask);
-               else
-                       page = alloc_pages_node(node, alloc_mask|highmem_mask, 
0);
+               page = alloc_pages_node(node,
+                               alloc_mask|highmem_mask, page_shift);
 
                if (unlikely(!page)) {
                        /* Successfully allocated i pages, free them in 
__vunmap() */
@@ -1794,8 +1798,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, 
gfp_t gfp_mask,
                        cond_resched();
        }
 
-       if (map_vm_area(area, prot, pages))
+       if (vmap_page_range(addr, addr + size, prot, pages, page_shift) < 0)
                goto fail;
+
        return area->addr;
 
 fail:
@@ -1832,19 +1837,35 @@ void *__vmalloc_node_range(unsigned long size, unsigned 
long align,
        struct vm_struct *area;
        void *addr;
        unsigned long real_size = size;
+       unsigned long real_align = align;
+       unsigned long size_per_node;
+       unsigned int shift;
 
        size = PAGE_ALIGN(size);
        if (!size || (size >> PAGE_SHIFT) > totalram_pages())
                goto fail;
 
+       size_per_node = size;
+       if (node == NUMA_NO_NODE)
+               size_per_node /= num_online_nodes();
+       if (size_per_node >= PMD_SIZE)
+               shift = PMD_SHIFT;
+       else
+               shift = PAGE_SHIFT;
+again:
+       align = max(real_align, 1UL << shift);
+       size = ALIGN(real_size, align);
+
        area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
                                vm_flags, start, end, node, gfp_mask, caller);
        if (!area)
                goto fail;
 
+       area->page_shift = shift - PAGE_SHIFT;
+
        addr = __vmalloc_area_node(area, gfp_mask, prot, node);
        if (!addr)
-               return NULL;
+               goto fail;
 
        /*
         * In this function, newly allocated vm_struct has VM_UNINITIALIZED
@@ -1858,8 +1879,16 @@ void *__vmalloc_node_range(unsigned long size, unsigned 
long align,
        return addr;
 
 fail:
-       warn_alloc(gfp_mask, NULL,
+       if (shift == PMD_SHIFT) {
+               shift = PAGE_SHIFT;
+               goto again;
+       }
+
+       if (!area) {
+               /* Warn for area allocation, page allocations already warn */
+               warn_alloc(gfp_mask, NULL,
                          "vmalloc: allocation failure: %lu bytes", real_size);
+       }
        return NULL;
 }
 
-- 
2.20.1

Reply via email to