Patch seems to work and survives AIM7. However, we only know about 30% of the Mlocked pages after boot. With this additional patch to opportunistically move pages off the LRU immediately I can get the counter be accurate (for all practical purposes) like the non lazy version:
Index: current/mm/memory.c =================================================================== --- current.orig/mm/memory.c 2007-02-05 10:44:10.000000000 -0800 +++ current/mm/memory.c 2007-02-05 11:01:46.000000000 -0800 @@ -919,6 +919,30 @@ void anon_add(struct vm_area_struct *vma } /* + * Opportunistically move the page off the LRU + * if possible. If we do not succeed then the LRU + * scans will take the page off. + */ +void try_to_set_mlocked(struct page *page) +{ + struct zone *zone; + unsigned long flags; + + if (!PageLRU(page) || PageMlocked(page)) + return; + + zone = page_zone(page); + if (spin_trylock_irqsave(&zone->lru_lock, flags)) { + if (PageLRU(page) && !PageMlocked(page)) { + ClearPageLRU(page); + list_del(&page->lru); + SetPageMlocked(page); + __inc_zone_page_state(page, NR_MLOCK); + } + spin_unlock_irqrestore(&zone->lru_lock, flags); + } +} +/* * Do a quick page-table lookup for a single page. */ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, @@ -978,6 +1002,8 @@ struct page *follow_page(struct vm_area_ set_page_dirty(page); mark_page_accessed(page); } + if (vma->vm_flags & VM_LOCKED) + try_to_set_mlocked(page); unlock: pte_unmap_unlock(ptep, ptl); out: @@ -2271,6 +2297,8 @@ retry: else { inc_mm_counter(mm, file_rss); page_add_file_rmap(new_page); + if (vma->vm_flags & VM_LOCKED) + try_to_set_mlocked(new_page); if (write_access) { dirty_page = new_page; get_page(dirty_page); - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/