I went ahead and implemented the "ndmpdpphys" change I
had thought about.  Here's that patch by itself.

At this point it might be reasonable to use the patch
without the AMD64_HUGE option, just increasing the KVM
area, as the direct map no longer consumes extra pages.

(There's a glitch in the comment in the original patch,
I'll fix that if/when there's any agreement on whether
it gets applied in some form :-) )

Chris

diff --git a/amd64/amd64/pmap.c b/amd64/amd64/pmap.c
index acf5af2..f1ed8b6 100644
--- a/amd64/amd64/pmap.c
+++ b/amd64/amd64/pmap.c
@@ -232,6 +232,7 @@ u_int64_t           KPML4phys;      /* phys addr of kernel 
level 4 */
 
 static u_int64_t       DMPDphys;       /* phys addr of direct mapped level 2 */
 static u_int64_t       DMPDPphys;      /* phys addr of direct mapped level 3 */
+static int             ndmpdpphys;     /* number of DMPDPphys pages */
 
 static struct rwlock_padalign pvh_global_lock;
 
@@ -543,7 +544,18 @@ create_pagetables(vm_paddr_t *firstaddr)
        ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
        if (ndmpdp < 4)         /* Minimum 4GB of dirmap */
                ndmpdp = 4;
-       DMPDPphys = allocpages(firstaddr, NDMPML4E);
+       ndmpdpphys = howmany(ndmpdp, NPML4EPG);
+       if (ndmpdpphys > NDMPML4E) {
+               /*
+                * Each NDMPML4E allows 512 GB, so limit to that,
+                * and then readjust ndmpdp and ndmpdpphys.
+                */
+               printf("NDMPML4E limits system to %d GB\n", NDMPML4E * 512);
+               Maxmem = atop(NDMPML4E * NBPML4);
+               ndmpdpphys = NDMPML4E;
+               ndmpdp = NDMPML4E * NPDEPG;
+       }
+       DMPDPphys = allocpages(firstaddr, ndmpdpphys);
        ndm1g = 0;
        if ((amd_feature & AMDID_PAGE1GB) != 0)
                ndm1g = ptoa(Maxmem) >> PDPSHIFT;
@@ -626,7 +638,7 @@ create_pagetables(vm_paddr_t *firstaddr)
        p4_p[PML4PML4I] |= PG_RW | PG_V | PG_U;
 
        /* Connect the Direct Map slot(s) up to the PML4. */
-       for (i = 0; i < NDMPML4E; i++) {
+       for (i = 0; i < ndmpdpphys; i++) {
                p4_p[DMPML4I + i] = DMPDPphys + ptoa(i);
                p4_p[DMPML4I + i] |= PG_RW | PG_V | PG_U;
        }
@@ -1698,7 +1710,7 @@ pmap_pinit(pmap_t pmap)
                pmap->pm_pml4[KPML4BASE + i] = (KPDPphys + (i << PAGE_SHIFT)) |
                    PG_RW | PG_V | PG_U;
        }
-       for (i = 0; i < NDMPML4E; i++) {
+       for (i = 0; i < ndmpdpphys; i++) {
                pmap->pm_pml4[DMPML4I + i] = (DMPDPphys + (i << PAGE_SHIFT)) |
                    PG_RW | PG_V | PG_U;
        }
@@ -1955,7 +1967,7 @@ pmap_release(pmap_t pmap)
 
        for (i = 0; i < NKPML4E; i++)   /* KVA */
                pmap->pm_pml4[KPML4BASE + i] = 0;
-       for (i = 0; i < NDMPML4E; i++)  /* Direct Map */
+       for (i = 0; i < ndmpdpphys; i++)/* Direct Map */
                pmap->pm_pml4[DMPML4I + i] = 0;
        pmap->pm_pml4[PML4PML4I] = 0;   /* Recursive Mapping */
 
_______________________________________________
freebsd-hackers@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/freebsd-hackers
To unsubscribe, send any mail to "freebsd-hackers-unsubscr...@freebsd.org"

Reply via email to