Author: marius
Date: Tue Jun 18 21:24:07 2013
New Revision: 251963
URL: http://svnweb.freebsd.org/changeset/base/251963

Log:
  MFC: r238190
  
  Implement ia64_physmem_alloc() and use it consistently to get memory
  before VM has been initialized.

Modified:
  stable/9/sys/ia64/ia64/machdep.c
  stable/9/sys/ia64/ia64/physmem.c
  stable/9/sys/ia64/ia64/pmap.c
  stable/9/sys/ia64/include/md_var.h
Directory Properties:
  stable/9/sys/   (props changed)

Modified: stable/9/sys/ia64/ia64/machdep.c
==============================================================================
--- stable/9/sys/ia64/ia64/machdep.c    Tue Jun 18 21:14:05 2013        
(r251962)
+++ stable/9/sys/ia64/ia64/machdep.c    Tue Jun 18 21:24:07 2013        
(r251963)
@@ -691,7 +691,6 @@ ia64_init(void)
        struct efi_md *md;
        pt_entry_t *pbvm_pgtbl_ent, *pbvm_pgtbl_lim;
        char *p;
-       vm_offset_t kernend;
        vm_size_t mdlen;
        int metadata_missing;
 
@@ -789,20 +788,6 @@ ia64_init(void)
                bootverbose = 1;
 
        /*
-        * Find the end of the kernel.
-        */
-#ifdef DDB
-       ksym_start = bootinfo->bi_symtab;
-       ksym_end = bootinfo->bi_esymtab;
-       kernend = (vm_offset_t)round_page(ksym_end);
-#else
-       kernend = (vm_offset_t)round_page(_end);
-#endif
-       /* But if the bootstrap tells us otherwise, believe it! */
-       if (bootinfo->bi_kernend)
-               kernend = round_page(bootinfo->bi_kernend);
-
-       /*
         * Wire things up so we can call the firmware.
         */
        map_pal_code();
@@ -821,9 +806,8 @@ ia64_init(void)
        pcpup = &pcpu0;
        ia64_set_k4((u_int64_t)pcpup);
        pcpu_init(pcpup, 0, sizeof(pcpu0));
-       dpcpu_init((void *)kernend, 0);
+       dpcpu_init(ia64_physmem_alloc(DPCPU_SIZE, PAGE_SIZE), 0);
        PCPU_SET(md.lid, ia64_get_lid());
-       kernend += DPCPU_SIZE;
        PCPU_SET(curthread, &thread0);
 
        /*
@@ -854,14 +838,15 @@ ia64_init(void)
        /*
         * Initialize error message buffer (at end of core).
         */
-       msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize);
+       msgbufp = ia64_physmem_alloc(msgbufsize, PAGE_SIZE);
        msgbufinit(msgbufp, msgbufsize);
 
        proc_linkup0(&proc0, &thread0);
        /*
         * Init mapping for kernel stack for proc 0
         */
-       thread0.td_kstack = pmap_steal_memory(KSTACK_PAGES * PAGE_SIZE);
+       p = ia64_physmem_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
+       thread0.td_kstack = (uintptr_t)p;
        thread0.td_kstack_pages = KSTACK_PAGES;
 
        mutex_init();
@@ -887,6 +872,11 @@ ia64_init(void)
        /*
         * Initialize debuggers, and break into them if appropriate.
         */
+#ifdef DDB
+       ksym_start = bootinfo->bi_symtab;
+       ksym_end = bootinfo->bi_esymtab;
+#endif
+
        kdb_init();
 
 #ifdef KDB

Modified: stable/9/sys/ia64/ia64/physmem.c
==============================================================================
--- stable/9/sys/ia64/ia64/physmem.c    Tue Jun 18 21:14:05 2013        
(r251962)
+++ stable/9/sys/ia64/ia64/physmem.c    Tue Jun 18 21:24:07 2013        
(r251963)
@@ -187,9 +187,72 @@ ia64_physmem_track(vm_paddr_t base, vm_s
        return (0);
 }
 
-vm_paddr_t
+void *
 ia64_physmem_alloc(vm_size_t len, vm_size_t align)
 {
+       vm_paddr_t base, lim, pa;
+       void *ptr;
+       u_int idx;
 
-       return (0);
+       if (phys_avail_segs == 0)
+               return (NULL);
+
+       len = round_page(len);
+
+       /*
+        * Try and allocate with least effort.
+        */
+       idx = phys_avail_segs * 2;
+       while (idx > 0) {
+               idx -= 2;
+               base = phys_avail[idx];
+               lim = phys_avail[idx + 1];
+
+               if (lim - base < len)
+                       continue;
+
+               /* First try from the end. */
+               pa = lim - len;
+               if ((pa & (align - 1)) == 0) {
+                       if (pa == base)
+                               ia64_physmem_remove(idx);
+                       else
+                               phys_avail[idx + 1] = pa;
+                       goto gotit;
+               }
+
+               /* Try from the start next. */
+               pa = base;
+               if ((pa & (align - 1)) == 0) {
+                       if (pa + len == lim)
+                               ia64_physmem_remove(idx);
+                       else
+                               phys_avail[idx] += len;
+                       goto gotit;
+               }
+       }
+
+       /*
+        * Find a good segment and split it up.
+        */
+       idx = phys_avail_segs * 2;
+       while (idx > 0) {
+               idx -= 2;
+               base = phys_avail[idx];
+               lim = phys_avail[idx + 1];
+
+               pa = (base + align - 1) & ~(align - 1);
+               if (pa + len <= lim) {
+                       ia64_physmem_delete(pa, len);
+                       goto gotit;
+               }
+       }
+
+       /* Out of luck. */
+       return (NULL);
+
+ gotit:
+       ptr = (void *)IA64_PHYS_TO_RR7(pa);
+       bzero(ptr, len);
+       return (ptr);
 }

Modified: stable/9/sys/ia64/ia64/pmap.c
==============================================================================
--- stable/9/sys/ia64/ia64/pmap.c       Tue Jun 18 21:14:05 2013        
(r251962)
+++ stable/9/sys/ia64/ia64/pmap.c       Tue Jun 18 21:24:07 2013        
(r251963)
@@ -244,36 +244,6 @@ static int pmap_remove_vhpt(vm_offset_t 
 static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
                    vm_page_t m);
 
-vm_offset_t
-pmap_steal_memory(vm_size_t size)
-{
-       vm_size_t bank_size;
-       vm_offset_t pa, va;
-
-       size = round_page(size);
-
-       bank_size = phys_avail[1] - phys_avail[0];
-       while (size > bank_size) {
-               int i;
-               for (i = 0; phys_avail[i+2]; i+= 2) {
-                       phys_avail[i] = phys_avail[i+2];
-                       phys_avail[i+1] = phys_avail[i+3];
-               }
-               phys_avail[i] = 0;
-               phys_avail[i+1] = 0;
-               if (!phys_avail[0])
-                       panic("pmap_steal_memory: out of memory");
-               bank_size = phys_avail[1] - phys_avail[0];
-       }
-
-       pa = phys_avail[0];
-       phys_avail[0] += size;
-
-       va = IA64_PHYS_TO_RR7(pa);
-       bzero((caddr_t) va, size);
-       return va;
-}
-
 static void
 pmap_initialize_vhpt(vm_offset_t vhpt)
 {
@@ -317,7 +287,7 @@ pmap_bootstrap()
        struct ia64_pal_result res;
        vm_offset_t base;
        size_t size;
-       int i, j, count, ridbits;
+       int i, ridbits;
 
        /*
         * Query the PAL Code to find the loop parameters for the
@@ -379,7 +349,7 @@ pmap_bootstrap()
 
        pmap_ridmax = (1 << ridbits);
        pmap_ridmapsz = pmap_ridmax / 64;
-       pmap_ridmap = (uint64_t *)pmap_steal_memory(pmap_ridmax / 8);
+       pmap_ridmap = ia64_physmem_alloc(pmap_ridmax / 8, PAGE_SIZE);
        pmap_ridmap[0] |= 0xff;
        pmap_rididx = 0;
        pmap_ridcount = 8;
@@ -388,14 +358,10 @@ pmap_bootstrap()
        /*
         * Allocate some memory for initial kernel 'page tables'.
         */
-       ia64_kptdir = (void *)pmap_steal_memory(PAGE_SIZE);
+       ia64_kptdir = ia64_physmem_alloc(PAGE_SIZE, PAGE_SIZE);
        nkpt = 0;
        kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
 
-       for (i = 0; phys_avail[i+2]; i+= 2)
-               ;
-       count = i+2;
-
        /*
         * Determine a valid (mappable) VHPT size.
         */
@@ -409,35 +375,18 @@ pmap_bootstrap()
        if (pmap_vhpt_log2size & 1)
                pmap_vhpt_log2size--;
 
-       base = 0;
        size = 1UL << pmap_vhpt_log2size;
-       for (i = 0; i < count; i += 2) {
-               base = (phys_avail[i] + size - 1) & ~(size - 1);
-               if (base + size <= phys_avail[i+1])
-                       break;
-       }
-       if (!phys_avail[i])
+       base = (uintptr_t)ia64_physmem_alloc(size, size);
+       if (base == 0)
                panic("Unable to allocate VHPT");
 
-       if (base != phys_avail[i]) {
-               /* Split this region. */
-               for (j = count; j > i; j -= 2) {
-                       phys_avail[j] = phys_avail[j-2];
-                       phys_avail[j+1] = phys_avail[j-2+1];
-               }
-               phys_avail[i+1] = base;
-               phys_avail[i+2] = base + size;
-       } else
-               phys_avail[i] = base + size;
-
-       base = IA64_PHYS_TO_RR7(base);
        PCPU_SET(md.vhpt, base);
        if (bootverbose)
                printf("VHPT: address=%#lx, size=%#lx\n", base, size);
 
        pmap_vhpt_nbuckets = size / sizeof(struct ia64_lpte);
-       pmap_vhpt_bucket = (void *)pmap_steal_memory(pmap_vhpt_nbuckets *
-           sizeof(struct ia64_bucket));
+       pmap_vhpt_bucket = ia64_physmem_alloc(pmap_vhpt_nbuckets *
+           sizeof(struct ia64_bucket), PAGE_SIZE);
        for (i = 0; i < pmap_vhpt_nbuckets; i++) {
                /* Stolen memory is zeroed. */
                mtx_init(&pmap_vhpt_bucket[i].mutex, "VHPT bucket lock", NULL,

Modified: stable/9/sys/ia64/include/md_var.h
==============================================================================
--- stable/9/sys/ia64/include/md_var.h  Tue Jun 18 21:14:05 2013        
(r251962)
+++ stable/9/sys/ia64/include/md_var.h  Tue Jun 18 21:24:07 2013        
(r251963)
@@ -93,7 +93,7 @@ int   ia64_highfp_save_ipi(void);
 struct ia64_init_return ia64_init(void);
 u_int  ia64_itc_freq(void);
 int    ia64_physmem_add(vm_paddr_t, vm_size_t);
-vm_paddr_t ia64_physmem_alloc(vm_size_t, vm_size_t);
+void   *ia64_physmem_alloc(vm_size_t, vm_size_t);
 int    ia64_physmem_delete(vm_paddr_t, vm_size_t);
 int    ia64_physmem_fini(void);
 int    ia64_physmem_init(void);
_______________________________________________
svn-src-stable-9@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-stable-9
To unsubscribe, send any mail to "svn-src-stable-9-unsubscr...@freebsd.org"

Reply via email to