Author: will
Date: Sat Nov 11 23:30:58 2017
New Revision: 325728
URL: https://svnweb.freebsd.org/changeset/base/325728

Log:
  libkvm: add kvm_walk_pages API.
  
  This API allows callers to enumerate all known pages, including any
  direct map & kernel map virtual addresses, physical addresses, size,
  offset into the core, & protection configured.
  
  For architectures that support direct map addresses, also generate pages
  for any direct map only addresses that are not associated with kernel
  map addresses.
  
  Fix page size portability issue left behind from previous kvm page table
  lookup interface.
  
  Reviewed by:  jhb
  Sponsored by: Backtrace I/O
  Differential Revision:        https://reviews.freebsd.org/D12279

Modified:
  head/lib/libkvm/kvm.c
  head/lib/libkvm/kvm.h
  head/lib/libkvm/kvm_aarch64.h
  head/lib/libkvm/kvm_arm.h
  head/lib/libkvm/kvm_i386.h
  head/lib/libkvm/kvm_minidump_aarch64.c
  head/lib/libkvm/kvm_minidump_amd64.c
  head/lib/libkvm/kvm_minidump_arm.c
  head/lib/libkvm/kvm_minidump_i386.c
  head/lib/libkvm/kvm_minidump_mips.c
  head/lib/libkvm/kvm_mips.h
  head/lib/libkvm/kvm_private.c
  head/lib/libkvm/kvm_private.h

Modified: head/lib/libkvm/kvm.c
==============================================================================
--- head/lib/libkvm/kvm.c       Sat Nov 11 22:50:14 2017        (r325727)
+++ head/lib/libkvm/kvm.c       Sat Nov 11 23:30:58 2017        (r325728)
@@ -49,6 +49,7 @@ static char sccsid[] = "@(#)kvm.c     8.2 (Berkeley) 2/13/
 #include <sys/linker.h>
 #include <sys/pcpu.h>
 #include <sys/stat.h>
+#include <sys/mman.h>
 
 #include <net/vnet.h>
 
@@ -299,6 +300,10 @@ kvm_close(kvm_t *kd)
                free((void *)kd->argv);
        if (kd->pt_map != NULL)
                free(kd->pt_map);
+       if (kd->page_map != NULL)
+               free(kd->page_map);
+       if (kd->sparse_map != MAP_FAILED)
+               munmap(kd->sparse_map, kd->pt_sparse_size);
        free((void *)kd);
 
        return (error);
@@ -486,4 +491,14 @@ kvm_native(kvm_t *kd)
        if (ISALIVE(kd))
                return (1);
        return (kd->arch->ka_native(kd));
+}
+
+int
+kvm_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *closure)
+{
+
+       if (kd->arch->ka_walk_pages == NULL)
+               return (0);
+
+       return (kd->arch->ka_walk_pages(kd, cb, closure));
 }

Modified: head/lib/libkvm/kvm.h
==============================================================================
--- head/lib/libkvm/kvm.h       Sat Nov 11 22:50:14 2017        (r325727)
+++ head/lib/libkvm/kvm.h       Sat Nov 11 23:30:58 2017        (r325728)
@@ -36,6 +36,7 @@
 #include <sys/cdefs.h>
 #include <sys/types.h>
 #include <nlist.h>
+#include <vm/vm.h>
 
 /* Default version symbol. */
 #define        VRS_SYM         "_version"
@@ -73,7 +74,19 @@ struct kvm_swap {
        u_int   ksw_reserved2;
 };
 
+struct kvm_page {
+       unsigned int version;
+       u_long paddr;
+       u_long kmap_vaddr;
+       u_long dmap_vaddr;
+       vm_prot_t prot;
+       u_long offset;
+       size_t len;
+       /* end of version 1 */
+};
+
 #define SWIF_DEV_PREFIX        0x0002
+#define        LIBKVM_WALK_PAGES_VERSION       1
 
 __BEGIN_DECLS
 int      kvm_close(kvm_t *);
@@ -104,6 +117,9 @@ ssize_t       kvm_read(kvm_t *, unsigned long, void *, siz
 ssize_t          kvm_read_zpcpu(kvm_t *, unsigned long, void *, size_t, int);
 ssize_t          kvm_read2(kvm_t *, kvaddr_t, void *, size_t);
 ssize_t          kvm_write(kvm_t *, unsigned long, const void *, size_t);
+
+typedef int kvm_walk_pages_cb_t(struct kvm_page *, void *);
+int kvm_walk_pages(kvm_t *, kvm_walk_pages_cb_t *, void *);
 __END_DECLS
 
 #endif /* !_KVM_H_ */

Modified: head/lib/libkvm/kvm_aarch64.h
==============================================================================
--- head/lib/libkvm/kvm_aarch64.h       Sat Nov 11 22:50:14 2017        
(r325727)
+++ head/lib/libkvm/kvm_aarch64.h       Sat Nov 11 23:30:58 2017        
(r325728)
@@ -40,7 +40,13 @@ typedef uint64_t     aarch64_pte_t;
 #define        AARCH64_PAGE_SIZE       (1 << AARCH64_PAGE_SHIFT)
 #define        AARCH64_PAGE_MASK       (AARCH64_PAGE_SIZE - 1)
 
+/* Source: arm64/include/pte.h */
 #define        AARCH64_ATTR_MASK       0xfff0000000000fff
+#define        AARCH64_ATTR_UXN        (1UL << 54)
+#define        AARCH64_ATTR_PXN        (1UL << 53)
+#define        AARCH64_ATTR_XN         (AARCH64_ATTR_PXN | AARCH64_ATTR_UXN)
+#define        AARCH64_ATTR_AP(x)      ((x) << 6)
+#define        AARCH64_ATTR_AP_RO      (1 << 1)
 
 #define        AARCH64_ATTR_DESCR_MASK 3
 

Modified: head/lib/libkvm/kvm_arm.h
==============================================================================
--- head/lib/libkvm/kvm_arm.h   Sat Nov 11 22:50:14 2017        (r325727)
+++ head/lib/libkvm/kvm_arm.h   Sat Nov 11 23:30:58 2017        (r325728)
@@ -53,6 +53,19 @@ typedef uint32_t     arm_pt_entry_t;
 #define        ARM_L2_S_OFFSET (ARM_L2_S_SIZE - 1)
 #define        ARM_L2_S_FRAME  (~ARM_L2_S_OFFSET)
 #define        ARM_L2_S_SHIFT  12
+#define        ARM_L2_TEX1     0x00000080
+#define        ARM_PTE2_RO     ARM_L2_TEX1
+#define        ARM_L2_NX       0x00000001
+#define        ARM_PTE2_NX     ARM_L2_NX
+
+/*
+ * Note: L2_S_PROT_W differs depending on whether the system is generic or
+ *       xscale.  This isn't easily accessible in this context, so use an
+ *       approximation of 'xscale' which is a subset of 'generic'.
+ */
+#define        ARM_L2_AP0(x)   ((x) << 4)
+#define        ARM_AP_W        0x01
+#define        ARM_L2_S_PROT_W (ARM_L2_AP0(ARM_AP_W))
 
 #define        ARM_L1_TYPE_INV 0x00            /* Invalid (fault) */
 #define        ARM_L1_TYPE_C   0x01            /* Coarse L2 */

Modified: head/lib/libkvm/kvm_i386.h
==============================================================================
--- head/lib/libkvm/kvm_i386.h  Sat Nov 11 22:50:14 2017        (r325727)
+++ head/lib/libkvm/kvm_i386.h  Sat Nov 11 23:30:58 2017        (r325728)
@@ -53,8 +53,11 @@ typedef      uint64_t        i386_pde_pae_t;
 #define        I386_NBPDR_PAE          (1 << I386_PDRSHIFT_PAE)
 #define        I386_PAGE_PS_MASK_PAE   (I386_NBPDR_PAE - 1)
 
+/* Source: i386/include/pmap.h */
 #define        I386_PG_V               0x001
+#define        I386_PG_RW              0x002
 #define        I386_PG_PS              0x080
+#define        I386_PG_NX              (1ULL << 63)
 #define        I386_PG_FRAME_PAE       (0x000ffffffffff000ull)
 #define        I386_PG_PS_FRAME_PAE    (0x000fffffffe00000ull)
 #define        I386_PG_FRAME           (0xfffff000)

Modified: head/lib/libkvm/kvm_minidump_aarch64.c
==============================================================================
--- head/lib/libkvm/kvm_minidump_aarch64.c      Sat Nov 11 22:50:14 2017        
(r325727)
+++ head/lib/libkvm/kvm_minidump_aarch64.c      Sat Nov 11 23:30:58 2017        
(r325728)
@@ -50,9 +50,16 @@ __FBSDID("$FreeBSD$");
 
 struct vmstate {
        struct minidumphdr hdr;
-       uint64_t *page_map;
 };
 
+static aarch64_pte_t
+_aarch64_pte_get(kvm_t *kd, u_long pteindex)
+{
+       aarch64_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
+
+       return le64toh(*pte);
+}
+
 static int
 _aarch64_minidump_probe(kvm_t *kd)
 {
@@ -66,7 +73,6 @@ _aarch64_minidump_freevtop(kvm_t *kd)
 {
        struct vmstate *vm = kd->vmst;
 
-       free(vm->page_map);
        free(vm);
        kd->vmst = NULL;
 }
@@ -116,30 +122,13 @@ _aarch64_minidump_initvtop(kvm_t *kd)
            aarch64_round_page(vmst->hdr.pmapsize);
        if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
            AARCH64_PAGE_SIZE, sizeof(uint64_t)) == -1) {
-               _kvm_err(kd, kd->program, "cannot load core bitmap");
                return (-1);
        }
        off += aarch64_round_page(vmst->hdr.bitmapsize);
 
-       vmst->page_map = _kvm_malloc(kd, vmst->hdr.pmapsize);
-       if (vmst->page_map == NULL) {
-               _kvm_err(kd, kd->program,
-                   "cannot allocate %d bytes for page_map",
-                   vmst->hdr.pmapsize);
+       if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) {
                return (-1);
        }
-       /* This is the end of the dump, savecore may have truncated it. */
-       /*
-        * XXX: This doesn't make sense.  The pmap is not at the end,
-        * and if it is truncated we don't have any actual data (it's
-        * all stored after the bitmap and pmap.  -- jhb
-        */
-       if (pread(kd->pmfd, vmst->page_map, vmst->hdr.pmapsize, off) <
-           AARCH64_PAGE_SIZE) {
-               _kvm_err(kd, kd->program, "cannot read %d bytes for page_map",
-                   vmst->hdr.pmapsize);
-               return (-1);
-       }
        off += aarch64_round_page(vmst->hdr.pmapsize);
 
        return (0);
@@ -161,7 +150,7 @@ _aarch64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t 
        if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
                a = (va - vm->hdr.dmapbase + vm->hdr.dmapphys) &
                    ~AARCH64_PAGE_MASK;
-               ofs = _kvm_pt_find(kd, a);
+               ofs = _kvm_pt_find(kd, a, AARCH64_PAGE_SIZE);
                if (ofs == -1) {
                        _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: "
                            "direct map address 0x%jx not in minidump",
@@ -172,16 +161,16 @@ _aarch64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t 
                return (AARCH64_PAGE_SIZE - offset);
        } else if (va >= vm->hdr.kernbase) {
                l3_index = (va - vm->hdr.kernbase) >> AARCH64_L3_SHIFT;
-               if (l3_index >= vm->hdr.pmapsize / sizeof(*vm->page_map))
+               if (l3_index >= vm->hdr.pmapsize / sizeof(l3))
                        goto invalid;
-               l3 = le64toh(vm->page_map[l3_index]);
+               l3 = _aarch64_pte_get(kd, l3_index);
                if ((l3 & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE) {
                        _kvm_err(kd, kd->program,
                            "_aarch64_minidump_vatop: pde not valid");
                        goto invalid;
                }
                a = l3 & ~AARCH64_ATTR_MASK;
-               ofs = _kvm_pt_find(kd, a);
+               ofs = _kvm_pt_find(kd, a, AARCH64_PAGE_SIZE);
                if (ofs == -1) {
                        _kvm_err(kd, kd->program, "_aarch64_minidump_vatop: "
                            "physical address 0x%jx not in minidump",
@@ -225,12 +214,73 @@ _aarch64_native(kvm_t *kd __unused)
 #endif
 }
 
+static vm_prot_t
+_aarch64_entry_to_prot(aarch64_pte_t pte)
+{
+       vm_prot_t prot = VM_PROT_READ;
+
+       /* Source: arm64/arm64/pmap.c:pmap_protect() */
+       if ((pte & AARCH64_ATTR_AP(AARCH64_ATTR_AP_RO)) == 0)
+               prot |= VM_PROT_WRITE;
+       if ((pte & AARCH64_ATTR_XN) == 0)
+               prot |= VM_PROT_EXECUTE;
+       return prot;
+}
+
+static int
+_aarch64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
+{
+       struct vmstate *vm = kd->vmst;
+       u_long nptes = vm->hdr.pmapsize / sizeof(aarch64_pte_t);
+       u_long bmindex, dva, pa, pteindex, va;
+       struct kvm_bitmap bm;
+       vm_prot_t prot;
+       int ret = 0;
+
+       if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex))
+               return (0);
+
+       for (pteindex = 0; pteindex < nptes; pteindex++) {
+               aarch64_pte_t pte = _aarch64_pte_get(kd, pteindex);
+
+               if ((pte & AARCH64_ATTR_DESCR_MASK) != AARCH64_L3_PAGE)
+                       continue;
+
+               va = vm->hdr.kernbase + (pteindex << AARCH64_L3_SHIFT);
+               pa = pte & ~AARCH64_ATTR_MASK;
+               dva = vm->hdr.dmapbase + pa;
+               if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
+                   _aarch64_entry_to_prot(pte), AARCH64_PAGE_SIZE, 0)) {
+                       goto out;
+               }
+       }
+
+       while (_kvm_bitmap_next(&bm, &bmindex)) {
+               pa = bmindex * AARCH64_PAGE_SIZE;
+               dva = vm->hdr.dmapbase + pa;
+               if (vm->hdr.dmapend < (dva + AARCH64_PAGE_SIZE))
+                       break;
+               va = 0;
+               prot = VM_PROT_READ | VM_PROT_WRITE;
+               if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
+                   prot, AARCH64_PAGE_SIZE, 0)) {
+                       goto out;
+               }
+       }
+       ret = 1;
+
+out:
+       _kvm_bitmap_deinit(&bm);
+       return (ret);
+}
+
 static struct kvm_arch kvm_aarch64_minidump = {
        .ka_probe = _aarch64_minidump_probe,
        .ka_initvtop = _aarch64_minidump_initvtop,
        .ka_freevtop = _aarch64_minidump_freevtop,
        .ka_kvatop = _aarch64_minidump_kvatop,
        .ka_native = _aarch64_native,
+       .ka_walk_pages = _aarch64_minidump_walk_pages,
 };
 
 KVM_ARCH(kvm_aarch64_minidump);

Modified: head/lib/libkvm/kvm_minidump_amd64.c
==============================================================================
--- head/lib/libkvm/kvm_minidump_amd64.c        Sat Nov 11 22:50:14 2017        
(r325727)
+++ head/lib/libkvm/kvm_minidump_amd64.c        Sat Nov 11 23:30:58 2017        
(r325728)
@@ -46,12 +46,59 @@ __FBSDID("$FreeBSD$");
 #include "kvm_amd64.h"
 
 #define        amd64_round_page(x)     roundup2((kvaddr_t)(x), AMD64_PAGE_SIZE)
+#define        VM_IS_V1(vm)            (vm->hdr.version == 1)
+#define        VA_OFF(vm, va)          \
+       (VM_IS_V1(vm) ? ((va) & (AMD64_PAGE_SIZE - 1)) : ((va) & 
AMD64_PAGE_MASK))
 
 struct vmstate {
        struct minidumphdr hdr;
-       amd64_pte_t *page_map;
 };
 
+static vm_prot_t
+_amd64_entry_to_prot(uint64_t entry)
+{
+       vm_prot_t prot = VM_PROT_READ;
+
+       if ((entry & PG_RW) != 0)
+               prot |= VM_PROT_WRITE;
+       if ((entry & PG_NX) == 0)
+               prot |= VM_PROT_EXECUTE;
+       return prot;
+}
+
+/*
+ * Version 2 minidumps use page directory entries, while version 1 use page
+ * table entries.
+ */
+
+static amd64_pde_t
+_amd64_pde_get(kvm_t *kd, u_long pdeindex)
+{
+       amd64_pde_t *pde = _kvm_pmap_get(kd, pdeindex, sizeof(*pde));
+
+       return le64toh(*pde);
+}
+
+static amd64_pte_t
+_amd64_pte_get(kvm_t *kd, u_long pteindex)
+{
+       amd64_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
+
+       return le64toh(*pte);
+}
+
+/* Get the first page table entry for a given page directory index. */
+static amd64_pte_t *
+_amd64_pde_first_pte(kvm_t *kd, u_long pdeindex)
+{
+       u_long *pa;
+
+       pa = _kvm_pmap_get(kd, pdeindex, sizeof(amd64_pde_t));
+       if (pa == NULL)
+               return NULL;
+       return _kvm_map_get(kd, *pa & AMD64_PG_FRAME, AMD64_PAGE_SIZE);
+}
+
 static int
 _amd64_minidump_probe(kvm_t *kd)
 {
@@ -65,7 +112,6 @@ _amd64_minidump_freevtop(kvm_t *kd)
 {
        struct vmstate *vm = kd->vmst;
 
-       free(vm->page_map);
        free(vm);
        kd->vmst = NULL;
 }
@@ -116,23 +162,13 @@ _amd64_minidump_initvtop(kvm_t *kd)
            amd64_round_page(vmst->hdr.pmapsize);
        if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
            AMD64_PAGE_SIZE, sizeof(uint64_t)) == -1) {
-               _kvm_err(kd, kd->program, "cannot load core bitmap");
                return (-1);
        }
        off += amd64_round_page(vmst->hdr.bitmapsize);
 
-       vmst->page_map = _kvm_malloc(kd, vmst->hdr.pmapsize);
-       if (vmst->page_map == NULL) {
-               _kvm_err(kd, kd->program, "cannot allocate %d bytes for 
page_map",
-                   vmst->hdr.pmapsize);
+       if (_kvm_pmap_init(kd, vmst->hdr.pmapsize, off) == -1) {
                return (-1);
        }
-       if (pread(kd->pmfd, vmst->page_map, vmst->hdr.pmapsize, off) !=
-           (ssize_t)vmst->hdr.pmapsize) {
-               _kvm_err(kd, kd->program, "cannot read %d bytes for page_map",
-                   vmst->hdr.pmapsize);
-               return (-1);
-       }
        off += amd64_round_page(vmst->hdr.pmapsize);
 
        return (0);
@@ -153,16 +189,16 @@ _amd64_minidump_vatop_v1(kvm_t *kd, kvaddr_t va, off_t
 
        if (va >= vm->hdr.kernbase) {
                pteindex = (va - vm->hdr.kernbase) >> AMD64_PAGE_SHIFT;
-               if (pteindex >= vm->hdr.pmapsize / sizeof(*vm->page_map))
+               if (pteindex >= vm->hdr.pmapsize / sizeof(pte))
                        goto invalid;
-               pte = le64toh(vm->page_map[pteindex]);
+               pte = _amd64_pte_get(kd, pteindex);
                if ((pte & AMD64_PG_V) == 0) {
                        _kvm_err(kd, kd->program,
                            "_amd64_minidump_vatop_v1: pte not valid");
                        goto invalid;
                }
                a = pte & AMD64_PG_FRAME;
-               ofs = _kvm_pt_find(kd, a);
+               ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
                if (ofs == -1) {
                        _kvm_err(kd, kd->program,
            "_amd64_minidump_vatop_v1: physical address 0x%jx not in minidump",
@@ -173,7 +209,7 @@ _amd64_minidump_vatop_v1(kvm_t *kd, kvaddr_t va, off_t
                return (AMD64_PAGE_SIZE - offset);
        } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
                a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK;
-               ofs = _kvm_pt_find(kd, a);
+               ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
                if (ofs == -1) {
                        _kvm_err(kd, kd->program,
     "_amd64_minidump_vatop_v1: direct map address 0x%jx not in minidump",
@@ -212,9 +248,9 @@ _amd64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *p
 
        if (va >= vm->hdr.kernbase) {
                pdeindex = (va - vm->hdr.kernbase) >> AMD64_PDRSHIFT;
-               if (pdeindex >= vm->hdr.pmapsize / sizeof(*vm->page_map))
+               if (pdeindex >= vm->hdr.pmapsize / sizeof(pde))
                        goto invalid;
-               pde = le64toh(vm->page_map[pdeindex]);
+               pde = _amd64_pde_get(kd, pdeindex);
                if ((pde & AMD64_PG_V) == 0) {
                        _kvm_err(kd, kd->program,
                            "_amd64_minidump_vatop: pde not valid");
@@ -223,7 +259,7 @@ _amd64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *p
                if ((pde & AMD64_PG_PS) == 0) {
                        a = pde & AMD64_PG_FRAME;
                        /* TODO: Just read the single PTE */
-                       ofs = _kvm_pt_find(kd, a);
+                       ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
                        if (ofs == -1) {
                                _kvm_err(kd, kd->program,
                                    "cannot find page table entry for %ju",
@@ -250,7 +286,7 @@ _amd64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *p
                        a = pde & AMD64_PG_PS_FRAME;
                        a += (va & AMD64_PDRMASK) ^ offset;
                }
-               ofs = _kvm_pt_find(kd, a);
+               ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
                if (ofs == -1) {
                        _kvm_err(kd, kd->program,
            "_amd64_minidump_vatop: physical address 0x%jx not in minidump",
@@ -261,7 +297,7 @@ _amd64_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *p
                return (AMD64_PAGE_SIZE - offset);
        } else if (va >= vm->hdr.dmapbase && va < vm->hdr.dmapend) {
                a = (va - vm->hdr.dmapbase) & ~AMD64_PAGE_MASK;
-               ofs = _kvm_pt_find(kd, a);
+               ofs = _kvm_pt_find(kd, a, AMD64_PAGE_SIZE);
                if (ofs == -1) {
                        _kvm_err(kd, kd->program,
            "_amd64_minidump_vatop: direct map address 0x%jx not in minidump",
@@ -297,12 +333,99 @@ _amd64_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *
                return (_amd64_minidump_vatop(kd, va, pa));
 }
 
+static int
+_amd64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
+{
+       struct vmstate *vm = kd->vmst;
+       u_long npdes = vm->hdr.pmapsize / sizeof(amd64_pde_t);
+       u_long bmindex, dva, pa, pdeindex, va;
+       struct kvm_bitmap bm;
+       int ret = 0;
+       vm_prot_t prot;
+       unsigned int pgsz = AMD64_PAGE_SIZE;
+
+       if (vm->hdr.version < 2)
+               return (0);
+
+       if (!_kvm_bitmap_init(&bm, vm->hdr.bitmapsize, &bmindex))
+               return (0);
+
+       for (pdeindex = 0; pdeindex < npdes; pdeindex++) {
+               pd_entry_t pde = _amd64_pde_get(kd, pdeindex);
+               pt_entry_t *ptes;
+               u_long i;
+
+               va = vm->hdr.kernbase + (pdeindex << AMD64_PDRSHIFT);
+               if ((pde & PG_V) == 0)
+                       continue;
+
+               if ((pde & AMD64_PG_PS) != 0) {
+                       /*
+                        * Large page.  Iterate on each 4K page section
+                        * within this page.  This differs from 4K pages in
+                        * that every page here uses the same PDE to
+                        * generate permissions.
+                        */
+                       pa = pde & AMD64_PG_PS_FRAME +
+                           ((va & AMD64_PDRMASK) ^ VA_OFF(vm, va));
+                       dva = vm->hdr.dmapbase + pa;
+                       _kvm_bitmap_set(&bm, pa, AMD64_PAGE_SIZE);
+                       if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
+                           _amd64_entry_to_prot(pde), AMD64_NBPDR, pgsz)) {
+                               goto out;
+                       }
+                       continue;
+               }
+
+               /* 4K pages: pde references another page of entries. */
+               ptes = _amd64_pde_first_pte(kd, pdeindex);
+               /* Ignore page directory pages that were not dumped. */
+               if (ptes == NULL)
+                       continue;
+
+               for (i = 0; i < NPTEPG; i++) {
+                       pt_entry_t pte = (u_long)ptes[i];
+
+                       pa = pte & AMD64_PG_FRAME;
+                       dva = vm->hdr.dmapbase + pa;
+                       if ((pte & PG_V) != 0) {
+                               _kvm_bitmap_set(&bm, pa, AMD64_PAGE_SIZE);
+                               if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
+                                   _amd64_entry_to_prot(pte), pgsz, 0)) {
+                                       goto out;
+                               }
+                       }
+                       va += AMD64_PAGE_SIZE;
+               }
+       }
+
+       while (_kvm_bitmap_next(&bm, &bmindex)) {
+               pa = bmindex * AMD64_PAGE_SIZE;
+               dva = vm->hdr.dmapbase + pa;
+               if (vm->hdr.dmapend < (dva + pgsz))
+                       break;
+               va = 0;
+               /* amd64/pmap.c: create_pagetables(): dmap always R|W. */
+               prot = VM_PROT_READ | VM_PROT_WRITE;
+               if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva, prot, pgsz, 0)) {
+                       goto out;
+               }
+       }
+
+       ret = 1;
+
+out:
+       _kvm_bitmap_deinit(&bm);
+       return (ret);
+}
+
 static struct kvm_arch kvm_amd64_minidump = {
        .ka_probe = _amd64_minidump_probe,
        .ka_initvtop = _amd64_minidump_initvtop,
        .ka_freevtop = _amd64_minidump_freevtop,
        .ka_kvatop = _amd64_minidump_kvatop,
        .ka_native = _amd64_native,
+       .ka_walk_pages = _amd64_minidump_walk_pages,
 };
 
 KVM_ARCH(kvm_amd64_minidump);

Modified: head/lib/libkvm/kvm_minidump_arm.c
==============================================================================
--- head/lib/libkvm/kvm_minidump_arm.c  Sat Nov 11 22:50:14 2017        
(r325727)
+++ head/lib/libkvm/kvm_minidump_arm.c  Sat Nov 11 23:30:58 2017        
(r325728)
@@ -51,10 +51,17 @@ __FBSDID("$FreeBSD$");
 
 struct vmstate {
        struct          minidumphdr hdr;
-       void            *ptemap;
        unsigned char   ei_data;
 };
 
+static arm_pt_entry_t
+_arm_pte_get(kvm_t *kd, u_long pteindex)
+{
+       arm_pt_entry_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
+
+       return _kvm32toh(kd, *pte);
+}
+
 static int
 _arm_minidump_probe(kvm_t *kd)
 {
@@ -68,7 +75,6 @@ _arm_minidump_freevtop(kvm_t *kd)
 {
        struct vmstate *vm = kd->vmst;
 
-       free(vm->ptemap);
        free(vm);
        kd->vmst = NULL;
 }
@@ -122,24 +128,13 @@ _arm_minidump_initvtop(kvm_t *kd)
            arm_round_page(vmst->hdr.ptesize);
        if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
            ARM_PAGE_SIZE, sizeof(uint32_t)) == -1) {
-               _kvm_err(kd, kd->program, "cannot load core bitmap");
                return (-1);
        }
        off += arm_round_page(vmst->hdr.bitmapsize);
 
-       vmst->ptemap = _kvm_malloc(kd, vmst->hdr.ptesize);
-       if (vmst->ptemap == NULL) {
-               _kvm_err(kd, kd->program, "cannot allocate %d bytes for "
-                   "ptemap", vmst->hdr.ptesize);
+       if (_kvm_pmap_init(kd, vmst->hdr.ptesize, off) == -1) {
                return (-1);
        }
-
-       if (pread(kd->pmfd, vmst->ptemap, vmst->hdr.ptesize, off) !=
-           (ssize_t)vmst->hdr.ptesize) {
-               _kvm_err(kd, kd->program, "cannot read %d bytes for ptemap",
-                   vmst->hdr.ptesize);
-               return (-1);
-       }
        off += arm_round_page(vmst->hdr.ptesize);
 
        return (0);
@@ -153,7 +148,6 @@ _arm_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa
        arm_physaddr_t offset, a;
        kvaddr_t pteindex;
        off_t ofs;
-       arm_pt_entry_t *ptemap;
 
        if (ISALIVE(kd)) {
                _kvm_err(kd, 0, "_arm_minidump_kvatop called in live kernel!");
@@ -161,13 +155,12 @@ _arm_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa
        }
 
        vm = kd->vmst;
-       ptemap = vm->ptemap;
 
        if (va >= vm->hdr.kernbase) {
                pteindex = (va - vm->hdr.kernbase) >> ARM_PAGE_SHIFT;
-               if (pteindex >= vm->hdr.ptesize / sizeof(*ptemap))
+               if (pteindex >= vm->hdr.ptesize / sizeof(pte))
                        goto invalid;
-               pte = _kvm32toh(kd, ptemap[pteindex]);
+               pte = _arm_pte_get(kd, pteindex);
                if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_INV) {
                        _kvm_err(kd, kd->program,
                            "_arm_minidump_kvatop: pte not valid");
@@ -190,7 +183,7 @@ _arm_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa
                        a = pte & ARM_L2_S_FRAME;
                }
 
-               ofs = _kvm_pt_find(kd, a);
+               ofs = _kvm_pt_find(kd, a, ARM_PAGE_SIZE);
                if (ofs == -1) {
                        _kvm_err(kd, kd->program, "_arm_minidump_kvatop: "
                            "physical address 0x%jx not in minidump",
@@ -209,12 +202,69 @@ invalid:
        return (0);
 }
 
+static vm_prot_t
+_arm_entry_to_prot(kvm_t *kd, arm_pt_entry_t pte)
+{
+       struct vmstate *vm = kd->vmst;
+       vm_prot_t prot = VM_PROT_READ;
+
+       /* Source: arm/arm/pmap-v4.c:pmap_fault_fixup() */
+       if (vm->hdr.mmuformat == MINIDUMP_MMU_FORMAT_V4) {
+               if (pte & ARM_L2_S_PROT_W)
+                       prot |= VM_PROT_WRITE;
+               return prot;
+       }
+
+       /* Source: arm/arm/pmap-v6.c:pmap_protect() */
+       if ((pte & ARM_PTE2_RO) == 0)
+               prot |= VM_PROT_WRITE;
+       if ((pte & ARM_PTE2_NX) == 0)
+               prot |= VM_PROT_EXECUTE;
+       return prot;
+}
+
+static int
+_arm_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
+{
+       struct vmstate *vm = kd->vmst;
+       u_long nptes = vm->hdr.ptesize / sizeof(arm_pt_entry_t);
+       u_long dva, pa, pteindex, va;
+
+       for (pteindex = 0; pteindex < nptes; pteindex++) {
+               arm_pt_entry_t pte = _arm_pte_get(kd, pteindex);
+
+               if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_INV)
+                       continue;
+
+               va = vm->hdr.kernbase + (pteindex << ARM_PAGE_SHIFT);
+               if ((pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_L) {
+                       /* 64K page */
+                       pa = (pte & ARM_L2_L_FRAME) +
+                           (va & ARM_L2_L_OFFSET & ARM_L2_S_FRAME);
+               } else {
+                       if (vm->hdr.mmuformat == MINIDUMP_MMU_FORMAT_V4 &&
+                           (pte & ARM_L2_TYPE_MASK) == ARM_L2_TYPE_T) {
+                               continue;
+                       }
+                       /* 4K page */
+                       pa = pte & ARM_L2_S_FRAME;
+               }
+
+               dva = 0; /* no direct map on this platform */
+               if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
+                   _arm_entry_to_prot(kd, pte), ARM_PAGE_SIZE, 0))
+                       return (0);
+       }
+       return (1);
+}
+
 static struct kvm_arch kvm_arm_minidump = {
        .ka_probe = _arm_minidump_probe,
        .ka_initvtop = _arm_minidump_initvtop,
        .ka_freevtop = _arm_minidump_freevtop,
        .ka_kvatop = _arm_minidump_kvatop,
        .ka_native = _arm_native,
+       .ka_walk_pages = _arm_minidump_walk_pages,
 };
 
 KVM_ARCH(kvm_arm_minidump);

Modified: head/lib/libkvm/kvm_minidump_i386.c
==============================================================================
--- head/lib/libkvm/kvm_minidump_i386.c Sat Nov 11 22:50:14 2017        
(r325727)
+++ head/lib/libkvm/kvm_minidump_i386.c Sat Nov 11 23:30:58 2017        
(r325728)
@@ -49,9 +49,24 @@ __FBSDID("$FreeBSD$");
 
 struct vmstate {
        struct minidumphdr hdr;
-       void *ptemap;
 };
 
+static i386_pte_pae_t
+_i386_pte_pae_get(kvm_t *kd, u_long pteindex)
+{
+       i386_pte_pae_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
+
+       return le64toh(*pte);
+}
+
+static i386_pte_t
+_i386_pte_get(kvm_t *kd, u_long pteindex)
+{
+       i386_pte_t *pte = _kvm_pmap_get(kd, pteindex, sizeof(*pte));
+
+       return le32toh(*pte);
+}
+
 static int
 _i386_minidump_probe(kvm_t *kd)
 {
@@ -65,7 +80,6 @@ _i386_minidump_freevtop(kvm_t *kd)
 {
        struct vmstate *vm = kd->vmst;
 
-       free(vm->ptemap);
        free(vm);
        kd->vmst = NULL;
 }
@@ -110,21 +124,13 @@ _i386_minidump_initvtop(kvm_t *kd)
            i386_round_page(vmst->hdr.ptesize);
        if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
            I386_PAGE_SIZE, sizeof(uint32_t)) == -1) {
-               _kvm_err(kd, kd->program, "cannot load core bitmap");
                return (-1);
        }
        off += i386_round_page(vmst->hdr.bitmapsize);
 
-       vmst->ptemap = _kvm_malloc(kd, vmst->hdr.ptesize);
-       if (vmst->ptemap == NULL) {
-               _kvm_err(kd, kd->program, "cannot allocate %d bytes for 
ptemap", vmst->hdr.ptesize);
+       if (_kvm_pmap_init(kd, vmst->hdr.ptesize, off) == -1) {
                return (-1);
        }
-       if (pread(kd->pmfd, vmst->ptemap, vmst->hdr.ptesize, off) !=
-           (ssize_t)vmst->hdr.ptesize) {
-               _kvm_err(kd, kd->program, "cannot read %d bytes for ptemap", 
vmst->hdr.ptesize);
-               return (-1);
-       }
        off += i386_round_page(vmst->hdr.ptesize);
 
        return (0);
@@ -139,24 +145,22 @@ _i386_minidump_vatop_pae(kvm_t *kd, kvaddr_t va, off_t
        kvaddr_t pteindex;
        i386_physaddr_pae_t a;
        off_t ofs;
-       i386_pte_pae_t *ptemap;
 
        vm = kd->vmst;
-       ptemap = vm->ptemap;
        offset = va & I386_PAGE_MASK;
 
        if (va >= vm->hdr.kernbase) {
                pteindex = (va - vm->hdr.kernbase) >> I386_PAGE_SHIFT;
-               if (pteindex >= vm->hdr.ptesize / sizeof(*ptemap))
+               if (pteindex >= vm->hdr.ptesize / sizeof(pte))
                        goto invalid;
-               pte = le64toh(ptemap[pteindex]);
+               pte = _i386_pte_pae_get(kd, pteindex);
                if ((pte & I386_PG_V) == 0) {
                        _kvm_err(kd, kd->program,
                            "_i386_minidump_vatop_pae: pte not valid");
                        goto invalid;
                }
                a = pte & I386_PG_FRAME_PAE;
-               ofs = _kvm_pt_find(kd, a);
+               ofs = _kvm_pt_find(kd, a, I386_PAGE_SIZE);
                if (ofs == -1) {
                        _kvm_err(kd, kd->program,
            "_i386_minidump_vatop_pae: physical address 0x%jx not in minidump",
@@ -186,24 +190,22 @@ _i386_minidump_vatop(kvm_t *kd, kvaddr_t va, off_t *pa
        kvaddr_t pteindex;
        i386_physaddr_t a;
        off_t ofs;
-       i386_pte_t *ptemap;
 
        vm = kd->vmst;
-       ptemap = vm->ptemap;
        offset = va & I386_PAGE_MASK;
 
        if (va >= vm->hdr.kernbase) {
                pteindex = (va - vm->hdr.kernbase) >> I386_PAGE_SHIFT;
-               if (pteindex >= vm->hdr.ptesize / sizeof(*ptemap))
+               if (pteindex >= vm->hdr.ptesize / sizeof(pte))
                        goto invalid;
-               pte = le32toh(ptemap[pteindex]);
+               pte = _i386_pte_get(kd, pteindex);
                if ((pte & I386_PG_V) == 0) {
                        _kvm_err(kd, kd->program,
                            "_i386_minidump_vatop: pte not valid");
                        goto invalid;
                }
                a = pte & I386_PG_FRAME;
-               ofs = _kvm_pt_find(kd, a);
+               ofs = _kvm_pt_find(kd, a, I386_PAGE_SIZE);
                if (ofs == -1) {
                        _kvm_err(kd, kd->program,
            "_i386_minidump_vatop: physical address 0x%jx not in minidump",
@@ -238,12 +240,95 @@ _i386_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *p
                return (_i386_minidump_vatop(kd, va, pa));
 }
 
+static vm_prot_t
+_i386_entry_to_prot(uint64_t pte)
+{
+       vm_prot_t prot = VM_PROT_READ;
+
+       /* Source: i386/pmap.c:pmap_protect() */
+       if (pte & I386_PG_RW)
+               prot |= VM_PROT_WRITE;
+       if ((pte & I386_PG_NX) == 0)
+               prot |= VM_PROT_EXECUTE;
+
+       return prot;
+}
+
+struct i386_iter {
+       kvm_t *kd;
+       u_long nptes;
+       u_long pteindex;
+};
+
+static void
+_i386_iterator_init(struct i386_iter *it, kvm_t *kd)
+{
+       struct vmstate *vm = kd->vmst;
+
+       it->kd = kd;
+       it->pteindex = 0;
+       if (vm->hdr.paemode) {
+               it->nptes = vm->hdr.ptesize / sizeof(i386_pte_pae_t);
+       } else {
+               it->nptes = vm->hdr.ptesize / sizeof(i386_pte_t);
+       }
+       return;
+}
+
+static int
+_i386_iterator_next(struct i386_iter *it, u_long *pa, u_long *va, u_long *dva,
+    vm_prot_t *prot)
+{
+       struct vmstate *vm = it->kd->vmst;
+       i386_pte_t pte32;
+       i386_pte_pae_t pte64;
+       int found = 0;
+
+       *dva = 0;
+       for (; it->pteindex < it->nptes && found == 0; it->pteindex++) {
+               if (vm->hdr.paemode) {
+                       pte64 = _i386_pte_pae_get(it->kd, it->pteindex);
+                       if ((pte64 & I386_PG_V) == 0)
+                               continue;
+                       *prot = _i386_entry_to_prot(pte64);
+                       *pa = pte64 & I386_PG_FRAME_PAE;
+               } else {
+                       pte32 = _i386_pte_get(it->kd, it->pteindex);
+                       if ((pte32 & I386_PG_V) == 0)
+                               continue;
+                       *prot = _i386_entry_to_prot(pte32);
+                       *pa = pte32 & I386_PG_FRAME;
+               }
+               *va = vm->hdr.kernbase + (it->pteindex << I386_PAGE_SHIFT);
+               found = 1;
+       }
+       return found;
+}
+
+static int
+_i386_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
+{
+       struct i386_iter it;
+       u_long dva, pa, va;
+       vm_prot_t prot;
+
+       _i386_iterator_init(&it, kd);
+       while (_i386_iterator_next(&it, &pa, &va, &dva, &prot)) {
+               if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
+                   prot, I386_PAGE_SIZE, 0)) {
+                       return (0);
+               }
+       }
+       return (1);
+}
+
 static struct kvm_arch kvm_i386_minidump = {
        .ka_probe = _i386_minidump_probe,
        .ka_initvtop = _i386_minidump_initvtop,
        .ka_freevtop = _i386_minidump_freevtop,
        .ka_kvatop = _i386_minidump_kvatop,
        .ka_native = _i386_native,
+       .ka_walk_pages = _i386_minidump_walk_pages,
 };
 
 KVM_ARCH(kvm_i386_minidump);

Modified: head/lib/libkvm/kvm_minidump_mips.c
==============================================================================
--- head/lib/libkvm/kvm_minidump_mips.c Sat Nov 11 22:50:14 2017        
(r325727)
+++ head/lib/libkvm/kvm_minidump_mips.c Sat Nov 11 23:30:58 2017        
(r325728)
@@ -52,7 +52,6 @@ __FBSDID("$FreeBSD$");
 
 struct vmstate {
        struct          minidumphdr hdr;
-       void            *ptemap;
        int             pte_size;
 };
 
@@ -73,7 +72,6 @@ _mips_minidump_freevtop(kvm_t *kd)
 {
        struct vmstate *vm = kd->vmst;
 
-       free(vm->ptemap);
        free(vm);
        kd->vmst = NULL;
 }
@@ -129,24 +127,13 @@ _mips_minidump_initvtop(kvm_t *kd)
            mips_round_page(vmst->hdr.ptesize);
        if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
            MIPS_PAGE_SIZE, sizeof(uint32_t)) == -1) {
-               _kvm_err(kd, kd->program, "cannot load core bitmap");
                return (-1);
        }
        off += mips_round_page(vmst->hdr.bitmapsize);
 
-       vmst->ptemap = _kvm_malloc(kd, vmst->hdr.ptesize);
-       if (vmst->ptemap == NULL) {
-               _kvm_err(kd, kd->program, "cannot allocate %d bytes for "
-                   "ptemap", vmst->hdr.ptesize);
+       if (_kvm_pmap_init(kd, vmst->hdr.ptesize, off) == -1) {
                return (-1);
        }
-
-       if (pread(kd->pmfd, vmst->ptemap, vmst->hdr.ptesize, off) !=
-           (ssize_t)vmst->hdr.ptesize) {
-               _kvm_err(kd, kd->program, "cannot read %d bytes for ptemap",
-                   vmst->hdr.ptesize);
-               return (-1);
-       }
        off += mips_round_page(vmst->hdr.ptesize);
 
        return (0);
@@ -156,12 +143,12 @@ static int
 _mips_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *pa)
 {
        struct vmstate *vm;
-       uint64_t pte;
        mips_physaddr_t offset, a;
        kvaddr_t pteindex;
+       u_long valid;
        off_t ofs;
-       uint32_t *ptemap32;
-       uint64_t *ptemap64;
+       mips32_pte_t pte32;
+       mips64_pte_t pte64;
 
        if (ISALIVE(kd)) {
                _kvm_err(kd, 0, "_mips_minidump_kvatop called in live kernel!");
@@ -173,9 +160,6 @@ _mips_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *p
        va &= ~MIPS_PAGE_MASK;
 
        vm = kd->vmst;
-       ptemap32 = vm->ptemap;
-       ptemap64 = vm->ptemap;
-
        if (kd->nlehdr.e_ident[EI_CLASS] == ELFCLASS64) {
                if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END) {
                        a = va & MIPS_XKPHYS_PHYS_MASK;
@@ -202,17 +186,22 @@ _mips_minidump_kvatop(kvm_t *kd, kvaddr_t va, off_t *p
        if (va >= vm->hdr.kernbase) {
                pteindex = (va - vm->hdr.kernbase) >> MIPS_PAGE_SHIFT;
                if (vm->pte_size == 64) {
-                       if (pteindex >= vm->hdr.ptesize / sizeof(*ptemap64))
+                       valid = pteindex < vm->hdr.ptesize / sizeof(pte64);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to