On 4/15/10, Samuel Thibault <samuel.thiba...@gnu.org> wrote:
>> -            assert(init_alloc_aligned(round_page(len), &addr));
>> +            assert(init_alloc_aligned(kernel_cmdline_len, &addr));
>
> This is not the same: init_alloc_aligned used to required size aligned
> on the page size.
>
i don't understand this one .. the modified "init_alloc_aligned" does
the same thing.

>> -                    vm_size_t size = m[i].mod_end - m[i].mod_start;
>> +                    vm_size_t size = m[i].mod_end - m[i].mod_start + 1;
>
> Mmm, the multiboot spec is not so much clear whether mod_end is the
> address of the last byte, or the address after the last byte. Your
> change seems to imply the former, is it really so?
>
well, the specs said : "The first two fields contain the start and end
addresses of the boot module itself" . so , i thought that this means
that "mod_end" is the address of the last byte.
Anyway , i modified it and it's now safe in both cases.

All the other things have been fixed.
diff --git a/i386/i386/bitops.h b/i386/i386/bitops.h
new file mode 100644
index 0000000..50716ab
--- /dev/null
+++ b/i386/i386/bitops.h
@@ -0,0 +1,35 @@
+#ifndef _MACH_I386_BITOPS_H_
+#define _MACH_I386_BITOPS_H_
+
+#include <mach/vm_param.h>
+
+int clear_bit(int nr, unsigned long * addr)
+{
+	int     mask;
+	addr += nr >> 5;
+	mask = 1UL << (nr & 0x1f);
+
+	return *addr &= ~mask;
+}
+
+
+int set_bit(int nr, unsigned long * addr)
+{
+	int     mask;
+	addr += nr >> 5;
+	mask = 1UL << (nr & 0x1f);
+
+	return *addr |= mask;
+}
+
+
+int test_bit(int nr, unsigned long * addr)
+{
+	int     mask;
+	addr += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+	return ((mask & (*addr)) != 0);
+}
+
+#endif
diff --git a/i386/i386/model_dep.h b/i386/i386/model_dep.h
index a41c474..81d9d27 100644
--- a/i386/i386/model_dep.h
+++ b/i386/i386/model_dep.h
@@ -28,6 +28,8 @@
 
 #include <mach/std_types.h>
 
+#define DEREF(addr) (*((vm_offset_t *)(addr)))
+
 /*
  * Find devices.  The system is alive.
  */
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index ca00078..edbbe99 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -42,6 +42,7 @@
 #include <mach/machine/multiboot.h>
 #include <mach/xen.h>
 
+#include <i386/bitops.h>
 #include <i386/vm_param.h>
 #include <kern/assert.h>
 #include <kern/cpu_number.h>
@@ -104,18 +105,16 @@ unsigned long la_shift = VM_MIN_KERNEL_ADDRESS;
 struct multiboot_info boot_info;
 #endif	/* MACH_XEN */
 
+/* 
+ * Set to 1 in init_boot_allocator
+ * _init_alloc_aligned always checks it before any allocatoin,
+ * and invokes init_boot_allocator if it was '0'
+ */
+int boot_allocator_initialized= 0;
+
 /* Command line supplied to kernel.  */
 char *kernel_cmdline = "";
-
-/* This is used for memory initialization:
-   it gets bumped up through physical memory
-   that exists and is not occupied by boot gunk.
-   It is not necessarily page-aligned.  */
-static vm_offset_t avail_next
-#ifndef MACH_HYP
-	= 0x1000 /* XX end of BIOS data area */
-#endif	/* MACH_HYP */
-	;
+int kernel_cmdline_len = 0;
 
 /* Possibly overestimated amount of available memory
    still remaining to be handed to the VM system.  */
@@ -334,6 +333,8 @@ i386at_init(void)
 	 */
 	mem_size_init();
 
+	init_boot_allocator();
+
 #ifdef MACH_XEN
 	kernel_cmdline = (char*) boot_info.cmd_line;
 #else	/* MACH_XEN */
@@ -341,33 +342,37 @@ i386at_init(void)
 	 * is too far in physical memory.  */
 	if (boot_info.flags & MULTIBOOT_CMDLINE) {
 		vm_offset_t addr;
-		int len = strlen ((char*)phystokv(boot_info.cmdline)) + 1;
-		assert(init_alloc_aligned(round_page(len), &addr));
+
+		assert(init_alloc_aligned(kernel_cmdline_len, &addr));
 		kernel_cmdline = (char*) phystokv(addr);
-		memcpy(kernel_cmdline, (char*)phystokv(boot_info.cmdline), len);
+		memcpy(kernel_cmdline, (char*)phystokv(boot_info.cmdline), kernel_cmdline_len);
+		bootmap_free(atop(round_page(boot_info.cmdline - phys_first_addr)), atop(trunc_page(kernel_cmdline_len)));
 		boot_info.cmdline = addr;
 	}
 
 	if (boot_info.flags & MULTIBOOT_MODS) {
 		struct multiboot_module *m;
 		vm_offset_t addr;
-		int i;
-
-		assert(init_alloc_aligned(round_page(boot_info.mods_count * sizeof(*m)), &addr));
-		m = (void*) phystokv(addr);
-		memcpy(m, (void*) phystokv(boot_info.mods_addr), boot_info.mods_count * sizeof(*m));
+		int i,size;
+		
+		size = boot_info.mods_count * sizeof(*m);
+		assert(init_alloc_aligned(size, &addr));
+		m = (struct multiboot_module *) phystokv(addr);
+		memcpy(m, (void*) phystokv(boot_info.mods_addr), size);
+		bootmap_free(atop(round_page(boot_info.mods_addr)), atop(trunc_page(size) ));
 		boot_info.mods_addr = addr;
 
 		for (i = 0; i < boot_info.mods_count; i++) {
-			vm_size_t size = m[i].mod_end - m[i].mod_start;
-			assert(init_alloc_aligned(round_page(size), &addr));
+			vm_size_t size = m[i].mod_end - m[i].mod_start + 1;
+			assert(init_alloc_aligned(size, &addr));
 			memcpy((void*) phystokv(addr), (void*) phystokv(m[i].mod_start), size);
+			bootmap_free(atop(round_page(m[i].mod_start)),atop(trunc_page(size)));
 			m[i].mod_start = addr;
-			m[i].mod_end = addr + size;
-
+			m[i].mod_end = addr + size - 1;
 			size = strlen((char*) phystokv(m[i].string)) + 1;
-			assert(init_alloc_aligned(round_page(size), &addr));
+			assert(init_alloc_aligned(size, &addr));
 			memcpy((void*) phystokv(addr), (void*) phystokv(m[i].string), size);
+			bootmap_free(atop(round_page(m[i].string-phys_first_addr)),atop(trunc_page(round_page(size))));
 			m[i].string = addr;
 		}
 	}
@@ -650,127 +655,248 @@ unsigned int pmap_free_pages(void)
 	return atop(avail_remaining);
 }
 
-/* Always returns page-aligned regions.  */
-boolean_t
-init_alloc_aligned(vm_size_t size, vm_offset_t *addrp)
+/* boot_bitmap will be initially at 16*1024*1024 and we will see if this is suitable */
+static vm_offset_t boot_bitmap = 16*1024*1024;
+static vm_offset_t boot_bitmap_next_avail;
+
+static vm_offset_t tmp_map;
+static vm_offset_t tmp_map_ptr;
+static vm_size_t tmp_map_size = 0;
+
+void
+bootmap_free(unsigned long sidx, unsigned long pages)
 {
-	vm_offset_t addr;
+	unsigned long eidx = sidx + pages;
+	for (; sidx < eidx ; sidx++)
+		clear_bit(sidx,boot_bitmap);
+}
+
+void
+bootmap_allocate(unsigned long sidx, unsigned long pages)
+{
+	unsigned long eidx = sidx + pages;
+	for (; sidx < eidx ; sidx++)
+		set_bit(sidx,boot_bitmap);
+}
+
+register_reserved_mem(vm_offset_t addr, vm_size_t size)
+{
+	(DEREF(tmp_map_ptr)) = addr;
+	(DEREF(tmp_map_ptr+4)) = size;
+	tmp_map_ptr +=8;
+}
+
+void
+init_boot_allocator(void){
 
 #ifdef MACH_HYP
-	/* There is none */
-	if (!avail_next)
-		avail_next = _kvtophys(boot_info.pt_base) + (boot_info.nr_pt_frames + 3) * 0x1000;
-#else	/* MACH_HYP */
+	if (boot_bitmap_next_avail == 0)
+		boot_bitmap_next_avail = _kvtophys(boot_info.pt_base) + (boot_info.nr_pt_frames + 3) * 0x1000;
+#else
+	boot_bitmap_next_avail = (phys_last_addr > 16*1024*1024) ? 16*1024*1024 : 0x1000;
+
+	vm_offset_t s_addr,e_addr;
 	extern char start[], end[];
-	int i;
-	static int wrapped = 0;
-
-	/* Memory regions to skip.  */
-	vm_offset_t cmdline_start_pa = boot_info.flags & MULTIBOOT_CMDLINE
-		? boot_info.cmdline : 0;
-	vm_offset_t cmdline_end_pa = cmdline_start_pa
-		? cmdline_start_pa+strlen((char*)phystokv(cmdline_start_pa))+1
-		: 0;
-	vm_offset_t mods_start_pa = boot_info.flags & MULTIBOOT_MODS
-		? boot_info.mods_addr : 0;
-	vm_offset_t mods_end_pa = mods_start_pa
-		? mods_start_pa
-		  + boot_info.mods_count * sizeof(struct multiboot_module)
-		: 0;
-
-	retry:
-#endif	/* MACH_HYP */
 
-	/* Page-align the start address.  */
-	avail_next = round_page(avail_next);
+	vm_offset_t boot_bitmap_end;
+	vm_size_t map_size;
 
-#ifndef MACH_HYP
-	/* Start with memory above 16MB, reserving the low memory for later. */
-	/* Don't care on Xen */
-	if (!wrapped && phys_last_addr > 16 * 1024*1024)
-	  {
-	    if (avail_next < 16 * 1024*1024)
-	      avail_next = 16 * 1024*1024;
-	    else if (avail_next == phys_last_addr)
-	      {
-		/* We have used all the memory above 16MB, so now start on
-		   the low memory.  This will wind up at the end of the list
-		   of free pages, so it should not have been allocated to any
-		   other use in early initialization before the Linux driver
-		   glue initialization needs to allocate low memory.  */
-		avail_next = 0x1000;
-		wrapped = 1;
-	      }
-	  }
-#endif	/* MACH_HYP */
-
-	/* Check if we have reached the end of memory.  */
-        if (avail_next == 
-		(
-#ifndef MACH_HYP
-		wrapped ? 16 * 1024*1024 : 
-#endif	/* MACH_HYP */
-		phys_last_addr))
-		return FALSE;
+	vm_offset_t entry_addr;
+	vm_size_t entry_size;
 
-	/* Tentatively assign the current location to the caller.  */
-	addr = avail_next;
+	/* We First allocate the size of tmp_map */
 
-	/* Bump the pointer past the newly allocated region
-	   and see where that puts us.  */
-	avail_next += size;
+	/* 
+	 * 8 bytes for the first page.
+	 * + 8 bytes for the kernel code, data, and bss.
+	 * + 8 bytes I/O and ROM area.
+	 * + 8 bytes for valuable boot_info data.
+	 * + 8 bytes for the boot bitmap.
+	 * = 32 bytes
+	 */
+	tmp_map_size += 40;
 
-#ifndef MACH_HYP
-	/* Skip past the I/O and ROM area.  */
-	if ((avail_next > (boot_info.mem_lower * 0x400)) && (addr < 0x100000))
+	if (boot_info.flags & MULTIBOOT_CMDLINE)
 	{
-		avail_next = 0x100000;
-		goto retry;
+		tmp_map_size +=8;
 	}
 
-	/* Skip our own kernel code, data, and bss.  */
-	if ((avail_next > (vm_offset_t)start) && (addr < (vm_offset_t)end))
+	if (boot_info.flags & MULTIBOOT_MODS)
 	{
-		avail_next = (vm_offset_t)end;
-		goto retry;
-	}
+		/* 8 bytes for the multiboot_module array. */
+		tmp_map_size +=8;
 
-	/* Skip any areas occupied by valuable boot_info data.  */
-	if ((avail_next > cmdline_start_pa) && (addr < cmdline_end_pa))
-	{
-		avail_next = cmdline_end_pa;
-		goto retry;
+		/* 8 bytes for each module.
+		 * + 8 bytes for the modules' string.
+		 */
+		tmp_map_size += (boot_info.mods_count << 4);
 	}
-	if ((avail_next > mods_start_pa) && (addr < mods_end_pa))
-	{
-		avail_next = mods_end_pa;
-		goto retry;
-	}
-	if ((avail_next > kern_sym_start) && (addr < kern_sym_end))
+
+	/* Now we can allocate the tmp_map_size */
+	assert(tmp_map = tmp_map_ptr = alloca(tmp_map_size));
+
+	/* Calculate the required bitmap size */
+	map_size = ((atop(round_page(phys_last_addr - phys_first_addr))) + 7) >> 3;
+	map_size = (map_size + sizeof(long) - 1UL) & ~(sizeof(long) -1UL);
+	boot_bitmap_end = boot_bitmap_end + map_size - 1;
+
+	/* Register the first page */
+	register_reserved_mem(0,PAGE_SIZE);
+
+	/* Register the I/O and ROM area.  */
+	s_addr = trunc_page(boot_info.mem_lower * 0x400);
+	e_addr = 0x100000;
+	register_reserved_mem(s_addr, e_addr-s_addr);
+ 	
+	/* Register our own kernel code, data, and bss.  */
+	s_addr = trunc_page(start);
+	e_addr = round_page(end);
+	register_reserved_mem(s_addr, e_addr-s_addr);
+
+	/* Register any areas occupied by valuable boot_info data.  */
+	s_addr = trunc_page(kern_sym_start);
+	e_addr = round_page(kern_sym_end);
+	register_reserved_mem(s_addr, e_addr-s_addr);
+
+	/* Register kernel command line */
+	if (boot_info.flags & MULTIBOOT_CMDLINE)
 	{
-		avail_next = kern_sym_end;
-		goto retry;
+		kernel_cmdline_len = strlen ((char*)phystokv(boot_info.cmdline)) + 1;
+		s_addr = trunc_page(boot_info.cmdline);
+		register_reserved_mem(s_addr, round_page(kernel_cmdline_len));
 	}
+
+	/* Register some area for loaded modules */
 	if (boot_info.flags & MULTIBOOT_MODS)
 	{
 		struct multiboot_module *m = (struct multiboot_module *)
 			phystokv(boot_info.mods_addr);
+
+		s_addr = trunc_page(boot_info.mods_addr);
+		register_reserved_mem(s_addr, round_page(boot_info.mods_count * sizeof(*m)));
+
+		int i;
 		for (i = 0; i < boot_info.mods_count; i++)
 		{
-			if ((avail_next > m[i].mod_start)
-			    && (addr < m[i].mod_end))
-			{
-				avail_next = m[i].mod_end;
-				goto retry;
-			}
-			/* XXX string */
+			s_addr = trunc_page(m[i].mod_start);
+			/* the "+1" are here just to make sure that we're safe 
+			 * in case m[i].mod_end was pointing to the last address
+			 * in the module ,not to the byte following it.
+			 */
+			e_addr = round_page(m[i].mod_end + 1);
+			register_reserved_mem(s_addr, e_addr-s_addr);
+			s_addr = trunc_page(m[i].string);
+			register_reserved_mem(s_addr, round_page(strlen((char*)phystokv(m[i].string))+1));
+		}
+	}
+
+	/* Find a free space for our allocator bitmap */
+retry:
+	for (entry_addr = tmp_map, entry_size = tmp_map + 4
+		;entry_addr < tmp_map + tmp_map_size
+		;entry_addr+=8,entry_size+=8)
+	{
+		if ((boot_bitmap_end >= DEREF(entry_addr)) && 
+			(boot_bitmap_end <= (DEREF(entry_addr) + DEREF(entry_size))))
+		{
+			boot_bitmap = DEREF(entry_addr) + DEREF(entry_size);
+			boot_bitmap_end = boot_bitmap + map_size - 1;
+			goto retry;
 		}
 	}
+
+	memset((void *)boot_bitmap, 0x00, map_size); /* map is initially free */
+
+	/* Now i can register boot_bitmap */
+	register_reserved_mem(boot_bitmap,round_page(map_size));
+
+	/* Loop through the tmp_map and all the registered memory */
+	for (entry_addr = tmp_map, entry_size = tmp_map + 4
+		;entry_addr < tmp_map + tmp_map_size
+		;entry_addr+=8,entry_size+=8)
+	{
+		bootmap_allocate(atop(DEREF(entry_addr)),atop(DEREF(entry_addr+4)));
+	}
+
 #endif	/* MACH_HYP */
 
-	avail_remaining -= size;
+	boot_allocator_initialized = 1;
+}
 
-	*addrp = addr;
+boolean_t
+init_alloc_aligned(vm_size_t size, vm_offset_t *addrp)
+{
+	boolean_t rt;
+#ifdef MACH_HYP
+	rt = _init_alloc_aligned(size, addrp, boot_bitmap_next_avail);
+#else /* MACH_HYP */
+	/* Try to allocate from "boot_bitmap_next_avail */
+	if(!_init_alloc_aligned(size, addrp, boot_bitmap_next_avail)){
+		/* Failed , now try to allocated from the beginning of memory */
+		if(_init_alloc_aligned(size, addrp,phys_first_addr)){
+			rt = TRUE;
+		}else{
+			rt = FALSE;
+		}
+	}else{
+		rt = TRUE;
+	}
+	/* save a hint for the next allocation */
+#endif /* MACH_HYP */
+	boot_bitmap_next_avail = round_page(*addrp + size);
+	return rt;
+}
+
+/* Search the physical address space from "goal" to "phys_last_addr" for "size" bytes of free contiguous space */
+boolean_t
+_init_alloc_aligned(vm_size_t size, vm_offset_t *addrp, vm_offset_t goal)
+{
+	if (!boot_allocator_initialized){
+		init_boot_allocator();
+	}
+
+	size = round_page(size);
+	goal = round_page(goal);
+	
+	if (!(goal >= phys_first_addr && goal+size <= phys_last_addr)){
+		return FALSE;
+	}
+
+#ifdef MACH_HYP
+	*addrp = boot_bitmap_avail_next;
+#else /* MACH_HYP */
+	unsigned long last_page, first_page;
+	vm_offset_t map = boot_bitmap;
+	unsigned long sidx=0, idx=0;
+
+	last_page = atop(phys_last_addr);
+	first_page = atop(phys_first_addr);
+	vm_size_t size_found = 0;
+	sidx = idx = atop(round_page(goal-phys_first_addr));
+
+	while(1){
+		if(idx+first_page >= last_page)
+			return FALSE;
+				
+		if(test_bit(idx,map)){
+			sidx = ++idx;
+			size_found = 0;
+			continue;
+		}
+
+		size_found += PAGE_SIZE;
+		if (size_found >= size)
+			break;
+		
+		idx++;
+	}
+
+
+	*addrp = ptoa(sidx) + phys_first_addr;
+	bootmap_allocate(sidx,atop(size));
+#endif /* MACH_HYP */
+
+	avail_remaining -= size;
 	return TRUE;
 }
 

Reply via email to