Author: kib
Date: Sun Sep 20 22:16:24 2020
New Revision: 365931
URL: https://svnweb.freebsd.org/changeset/base/365931

Log:
  amd64 pmap: handle cases where pml4 page table page is not allocated.
  
  Possible in LA57 pmap config.
  
  Noted by:     alc
  Reviewed by:  alc, markj
  Sponsored by: The FreeBSD Foundation
  Differential revision:        https://reviews.freebsd.org/D26492

Modified:
  head/sys/amd64/amd64/pmap.c

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Sun Sep 20 21:32:52 2020        (r365930)
+++ head/sys/amd64/amd64/pmap.c Sun Sep 20 22:16:24 2020        (r365931)
@@ -6219,7 +6219,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t
        PMAP_LOCK(pmap);
        for (; sva < eva; sva = va_next) {
                pml4e = pmap_pml4e(pmap, sva);
-               if ((*pml4e & PG_V) == 0) {
+               if (pml4e == NULL || (*pml4e & PG_V) == 0) {
                        va_next = (sva + NBPML4) & ~PML4MASK;
                        if (va_next < sva)
                                va_next = eva;
@@ -6502,7 +6502,7 @@ restart:
                if (!pmap_pkru_same(pmap, va, va + NBPDP))
                        return (KERN_PROTECTION_FAILURE);
                pml4e = pmap_pml4e(pmap, va);
-               if ((*pml4e & PG_V) == 0) {
+               if (pml4e == NULL || (*pml4e & PG_V) == 0) {
                        mp = _pmap_allocpte(pmap, pmap_pml4e_pindex(va),
                            NULL, va);
                        if (mp == NULL) {
@@ -7363,7 +7363,7 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t 
        PMAP_LOCK(pmap);
        for (; sva < eva; sva = va_next) {
                pml4e = pmap_pml4e(pmap, sva);
-               if ((*pml4e & PG_V) == 0) {
+               if (pml4e == NULL || (*pml4e & PG_V) == 0) {
                        va_next = (sva + NBPML4) & ~PML4MASK;
                        if (va_next < sva)
                                va_next = eva;
@@ -7488,7 +7488,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_
                    ("pmap_copy: invalid to pmap_copy page tables"));
 
                pml4e = pmap_pml4e(src_pmap, addr);
-               if ((*pml4e & PG_V) == 0) {
+               if (pml4e == NULL || (*pml4e & PG_V) == 0) {
                        va_next = (addr + NBPML4) & ~PML4MASK;
                        if (va_next < addr)
                                va_next = end_addr;
@@ -8571,7 +8571,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t 
        PMAP_LOCK(pmap);
        for (; sva < eva; sva = va_next) {
                pml4e = pmap_pml4e(pmap, sva);
-               if ((*pml4e & PG_V) == 0) {
+               if (pml4e == NULL || (*pml4e & PG_V) == 0) {
                        va_next = (sva + NBPML4) & ~PML4MASK;
                        if (va_next < sva)
                                va_next = eva;
@@ -9795,6 +9795,8 @@ pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t
        PMAP_LOCK(pmap);
 
        pml4 = pmap_pml4e(pmap, va);
+       if (pml4 == NULL)
+               goto done;
        ptr[idx++] = *pml4;
        if ((*pml4 & PG_V) == 0)
                goto done;
@@ -10893,7 +10895,7 @@ pmap_pkru_update_range(pmap_t pmap, vm_offset_t sva, v
 
        for (changed = false, va = sva; va < eva; va = va_next) {
                pml4e = pmap_pml4e(pmap, va);
-               if ((*pml4e & X86_PG_V) == 0) {
+               if (pml4e == NULL || (*pml4e & X86_PG_V) == 0) {
                        va_next = (va + NBPML4) & ~PML4MASK;
                        if (va_next < va)
                                va_next = eva;
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to