On Fri, 15 Sep 2000, James Lewis Nance wrote:
> On Fri, Sep 15, 2000 at 10:09:57PM -0300, Rik van Riel wrote:
> > Hi,
> > 
> > today I released a new VM patch with 4 small improvements:
> 
> Are these 4 improvements in the code test9-pre1 patch that Linus
> just released?

Oh well, I may as well give it now ;)

The patch below upgrades 2.4.0-test9-pre1 VM to a
VM with the 4 changes...

They /should/ be stable, but I'd really appreciate
a bit more testing before I give the patch to Linus.

(I know the VM patch included in 2.4.0-test9-pre1 is
stable, that one got a heavier testing than any VM patch
I ever made. I was testing the system so heavily that I
had to upgrade my 8139too driver and other things to keep
the system from crashing ;))

regards,

Rik
--
"What you're running that piece of shit Gnome?!?!"
       -- Miguel de Icaza, UKUUG 2000

http://www.conectiva.com/               http://www.surriel.com/


--- linux-2.4.8-test9-pre1/fs/buffer.c.orig     Fri Sep 15 23:23:09 2000
+++ linux-2.4.8-test9-pre1/fs/buffer.c  Fri Sep 15 23:26:24 2000
@@ -705,7 +705,6 @@
 static void refill_freelist(int size)
 {
        if (!grow_buffers(size)) {
-               //wakeup_bdflush(1);
                balance_dirty(NODEV);
                wakeup_kswapd(1);
        }
@@ -863,15 +862,14 @@
 
        dirty = size_buffers_type[BUF_DIRTY] >> PAGE_SHIFT;
        tot = nr_free_buffer_pages();
-//     tot -= size_buffers_type[BUF_PROTECTED] >> PAGE_SHIFT;
 
        dirty *= 200;
        soft_dirty_limit = tot * bdf_prm.b_un.nfract;
        hard_dirty_limit = soft_dirty_limit * 2;
 
        /* First, check for the "real" dirty limit. */
-       if (dirty > soft_dirty_limit || inactive_shortage()) {
-               if (dirty > hard_dirty_limit)
+       if (dirty > soft_dirty_limit) {
+               if (dirty > hard_dirty_limit || inactive_shortage())
                        return 1;
                return 0;
        }
@@ -2279,7 +2277,9 @@
 {
        struct buffer_head * tmp, * bh = page->buffers;
        int index = BUFSIZE_INDEX(bh->b_size);
+       int loop = 0;
 
+cleaned_buffers_try_again:
        spin_lock(&lru_list_lock);
        write_lock(&hash_table_lock);
        spin_lock(&free_list[index].lock);
@@ -2325,8 +2325,14 @@
        spin_unlock(&free_list[index].lock);
        write_unlock(&hash_table_lock);
        spin_unlock(&lru_list_lock);
-       if (wait)
+       if (wait) {
                sync_page_buffers(bh, wait);
+               /* We waited synchronously, so we can free the buffers. */
+               if (wait > 1 && !loop) {
+                       loop = 1;
+                       goto cleaned_buffers_try_again;
+               }
+       }
        return 0;
 }
 
--- linux-2.4.8-test9-pre1/mm/swap.c.orig       Fri Sep 15 23:23:11 2000
+++ linux-2.4.8-test9-pre1/mm/swap.c    Fri Sep 15 23:24:23 2000
@@ -161,14 +161,19 @@
         * Don't touch it if it's not on the active list.
         * (some pages aren't on any list at all)
         */
-       if (PageActive(page) && (page_count(page) == 1 || page->buffers) &&
+       if (PageActive(page) && (page_count(page) <= 2 || page->buffers) &&
                        !page_ramdisk(page)) {
 
                /*
                 * We can move the page to the inactive_dirty list
                 * if we know there is backing store available.
+                *
+                * We also move pages here that we cannot free yet,
+                * but may be able to free later - because most likely
+                * we're holding an extra reference on the page which
+                * will be dropped right after deactivate_page().
                 */
-               if (page->buffers) {
+               if (page->buffers || page_count(page) == 2) {
                        del_page_from_active_list(page);
                        add_page_to_inactive_dirty_list(page);
                /*
@@ -181,8 +186,7 @@
                        add_page_to_inactive_clean_list(page);
                }
                /*
-                * ELSE: no backing store available, leave it on
-                * the active list.
+                * OK, we cannot free the page. Leave it alone.
                 */
        }
 }      
--- linux-2.4.8-test9-pre1/mm/vmscan.c.orig     Fri Sep 15 23:23:11 2000
+++ linux-2.4.8-test9-pre1/mm/vmscan.c  Fri Sep 15 23:32:10 2000
@@ -103,8 +103,8 @@
                UnlockPage(page);
                vma->vm_mm->rss--;
                flush_tlb_page(vma, address);
-               page_cache_release(page);
                deactivate_page(page);
+               page_cache_release(page);
                goto out_failed;
        }
 
@@ -681,19 +681,26 @@
                        if (freed_page && !free_shortage())
                                break;
                        continue;
+               } else if (page->mapping && !PageDirty(page)) {
+                       /*
+                        * If a page had an extra reference in
+                        * deactivate_page(), we will find it here.
+                        * Now the page is really freeable, so we
+                        * move it to the inactive_clean list.
+                        */
+                       UnlockPage(page);
+                       del_page_from_inactive_dirty_list(page);
+                       add_page_to_inactive_clean_list(page);
+                       cleaned_pages++;
                } else {
                        /*
-                        * Somebody else freed the bufferheads for us?
-                        * This really shouldn't happen, but we check
-                        * for it anyway.
+                        * OK, we don't know what to do with the page.
+                        * It's no use keeping it here, so we move it to
+                        * the active list.
                         */
-                       printk("VM: page_launder, found pre-cleaned page ?!\n");
                        UnlockPage(page);
-                       if (page->mapping && !PageDirty(page)) {
-                               del_page_from_inactive_dirty_list(page);
-                               add_page_to_inactive_clean_list(page);
-                               cleaned_pages++;
-                       }
+                       del_page_from_inactive_dirty_list(page);
+                       add_page_to_active_list(page);
                }
        }
        spin_unlock(&pagemap_lru_lock);
@@ -738,12 +745,13 @@
 {
        struct list_head * page_lru;
        struct page * page;
-       int maxscan;
+       int maxscan, page_active = 0;
        int ret = 0;
 
        /* Take the lock while messing with the list... */
        spin_lock(&pagemap_lru_lock);
        maxscan = nr_active_pages >> priority;
+       /* Then, look through the rest of the active pages. */
        while (maxscan-- > 0 && (page_lru = active_list.prev) != &active_list) {
                page = list_entry(page_lru, struct page, lru);
 
@@ -758,17 +766,17 @@
                /* Do aging on the pages. */
                if (PageTestandClearReferenced(page)) {
                        age_page_up_nolock(page);
-                       goto must_be_active;
+                       page_active = 1;
                } else {
                        age_page_down_nolock(page);
+                       page_active = 0;
                }
                /*
                 * If the page is still on the active list, move it
                 * to the other end of the list. Otherwise it was
                 * deactivated by age_page_down and we exit successfully.
                 */
-               if (PageActive(page)) {
-must_be_active:
+               if (page_active || PageActive(page)) {
                        list_del(page_lru);
                        list_add(page_lru, &active_list);
                } else {

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
Please read the FAQ at http://www.tux.org/lkml/

Reply via email to