This needed highmem fix from Rik is still missing too, so please apply
along the other 5 (it's orthogonal so you can apply this one in any
order you want).

From: Rik van Riel <[EMAIL PROTECTED]>
Subject: [PATCH][1/2] adjust dirty threshold for lowmem-only mappings

Simply running "dd if=/dev/zero of=/dev/hd<one you can miss>" will
result in OOM kills, with the dirty pagecache completely filling up
lowmem.  This patch is part 1 to fixing that problem.

This patch effectively lowers the dirty limit for mappings which cannot
be cached in highmem, counting the dirty limit as a percentage of lowmem
instead.  This should prevent heavy block device writers from pushing
the VM over the edge and triggering OOM kills.

Signed-off-by: Rik van Riel <[EMAIL PROTECTED]>
Acked-by: Andrea Arcangeli <[EMAIL PROTECTED]>

--- x/mm/page-writeback.c.orig  2005-01-04 01:13:30.000000000 +0100
+++ x/mm/page-writeback.c       2005-01-04 02:41:29.573177184 +0100
@@ -133,7 +133,8 @@ static void get_writeback_state(struct w
  * clamping level.
  */
 static void
-get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty)
+get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
+                struct address_space *mapping)
 {
        int background_ratio;           /* Percentages */
        int dirty_ratio;
@@ -141,10 +142,20 @@ get_dirty_limits(struct writeback_state 
        long background;
        long dirty;
        struct task_struct *tsk;
+       unsigned long available_memory = total_pages;
 
        get_writeback_state(wbs);
 
-       unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages;
+#ifdef CONFIG_HIGHMEM
+       /*
+        * In some cases we can only allocate from low memory,
+        * so we exclude high memory from our count.
+        */
+       if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM))
+               available_memory -= totalhigh_pages;
+#endif
+
+       unmapped_ratio = 100 - (wbs->nr_mapped * 100) / available_memory;
 
        dirty_ratio = vm_dirty_ratio;
        if (dirty_ratio > unmapped_ratio / 2)
@@ -194,7 +205,7 @@ static void balance_dirty_pages(struct a
                        .nr_to_write    = write_chunk,
                };
 
-               get_dirty_limits(&wbs, &background_thresh, &dirty_thresh);
+               get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, 
mapping);
                nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
                if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
                        break;
@@ -210,7 +221,7 @@ static void balance_dirty_pages(struct a
                if (nr_reclaimable) {
                        writeback_inodes(&wbc);
                        get_dirty_limits(&wbs, &background_thresh,
-                                       &dirty_thresh);
+                                       &dirty_thresh, mapping);
                        nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
                        if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
                                break;
@@ -296,7 +307,7 @@ static void background_writeout(unsigned
                long background_thresh;
                long dirty_thresh;
 
-               get_dirty_limits(&wbs, &background_thresh, &dirty_thresh);
+               get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL);
                if (wbs.nr_dirty + wbs.nr_unstable < background_thresh
                                && min_pages <= 0)
                        break;
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to