Hi! > The (updated) patch follows.
Okay, few comments... > Signed-off-by: Rafael J. Wysocki <[EMAIL PROTECTED]> > > diff -Nru linux-2.6.11-rc3-mm1-orig/arch/i386/power/swsusp.S > linux-2.6.11-rc3-mm1/arch/i386/power/swsusp.S > --- linux-2.6.11-rc3-mm1-orig/arch/i386/power/swsusp.S 2004-12-24 > 22:34:31.000000000 +0100 > +++ linux-2.6.11-rc3-mm1/arch/i386/power/swsusp.S 2005-02-05 > 20:57:03.000000000 +0100 > @@ -28,28 +28,28 @@ > ret > > ENTRY(swsusp_arch_resume) > - movl $swsusp_pg_dir-__PAGE_OFFSET,%ecx > - movl %ecx,%cr3 > + movl $swsusp_pg_dir-__PAGE_OFFSET,%ecx > + movl %ecx,%cr3 > > - movl pagedir_nosave, %ebx > - xorl %eax, %eax > - xorl %edx, %edx > + movl pagedir_nosave, %edx move copy_loop: here > + testl %edx, %edx > + jz done > .p2align 4,,7 > > copy_loop: > - movl 4(%ebx,%edx),%edi > - movl (%ebx,%edx),%esi > + movl (%edx), %esi > + movl 4(%edx), %edi > > movl $1024, %ecx > rep > movsl > > - incl %eax > - addl $16, %edx > - cmpl nr_copy_pages,%eax > - jb copy_loop > + movl 12(%edx), %edx > + testl %edx, %edx > + jnz copy_loop And do unconditional jump here? Also, 12(%edx)... Could it be handled using asm-offsets, like on x86-64? > +static void __init free_eaten_memory(void) { Please put { at new line. > + for_each_pbe(p, pblist) > + p->address = 0UL; > + > + for_each_pbe(p, pblist) { > + p->address = get_usable_page(GFP_ATOMIC); > + if(!p->address) I'd put space between if and (. And probably do the same for for_each_pbe... it behaves like a while. > @@ -966,45 +1018,52 @@ > zone->zone_start_pfn)); > } > > - /* Clear orig address */ > + /* Clear orig addresses */ > > - for(i = 0, p = pagedir_nosave; i < nr_copy_pages; i++, p++) { > + for_each_pbe(p, pblist) > ClearPageNosaveFree(virt_to_page(p->orig_address)); > - } > > - if (!does_collide_order((unsigned long)old_pagedir, pagedir_order)) { > - printk("not necessary\n"); > - return check_pagedir(); > - } > + tail = pblist + PB_PAGE_SKIP; > > - while ((m = (void *) __get_free_pages(GFP_ATOMIC, pagedir_order)) != > NULL) { > - if (!does_collide_order((unsigned long)m, pagedir_order)) > - break; > - eaten_memory = m; > - printk( "." ); > - *eaten_memory = c; > - c = eaten_memory; > - } > + /* Relocate colliding pages */ > > - if (!m) { > - printk("out of memory\n"); > - ret = -ENOMEM; > - } else { > - pagedir_nosave = > - memcpy(m, old_pagedir, PAGE_SIZE << pagedir_order); > + for_each_pb_page(pbpage, pblist) { > + if (does_collide_order((unsigned long)pbpage, 0)) { > + m = (void *)get_usable_page(GFP_ATOMIC | __GFP_COLD); > + if (!m) { > + error = -ENOMEM; > + break; > + } > + memcpy(m, (void *)pbpage, PAGE_SIZE); > + if (pbpage == pblist) > + pblist = (struct pbe *)m; > + else > + tail->next = (struct pbe *)m; > + > + free_page((unsigned long)pbpage); Uh, you free it so that you can allocate it again, and again find out that it is unusable? It will probably end up on list of unusable pages, so it is okay, but... > + pbpage = (struct pbe *)m; > + > + /* We have to link the PBEs again */ > + > + for (p = pbpage ; p < pbpage + PB_PAGE_SKIP ; p++) I'd avoid " " before ;. > + p = pbe; > + pbe += PB_PAGE_SKIP; > + do > + p->next = p + 1; > + while (p++ < pbe); I've already seen this code somewhere around in different variant... Perhaps you want to make it inline function? > + p->next = NULL; > + pr_debug("swsusp: Read %d pages, allocated %d PBEs\n", i, num); > + error = (i != swsusp_info.pagedir_pages); /* a sanity check */ If it is sanity check, do BUG_ON(). > + if(!(p = read_pagedir())) > + return -EFAULT; Is the value used? By using pointers instead of normal ints, you kill possibility of meaningfull error reporting... > + if(!(pagedir_nosave = swsusp_pagedir_relocate(p))) > + return -ENOMEM; Same here. Pavel -- People were complaining that M$ turns users into beta-testers... ...jr ghea gurz vagb qrirybcref, naq gurl frrz gb yvxr vg gung jnl! - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/