From: Paul Jackson <[EMAIL PROTECTED]>

Some minor code cleanups for mm/mempolicy.c.

This patch should have been sent out before,
to be applied first to 2.6.23-mm1, -before-
another patch I sent out an hour ago:

    [RFC] cpuset relative memory policies - second choice

Signed-off-by: Paul Jackson <[EMAIL PROTECTED]>

---
 include/linux/mempolicy.h |    5 +++--
 mm/mempolicy.c            |   20 ++++++++++----------
 2 files changed, 13 insertions(+), 12 deletions(-)

--- 2.6.23-mm1.orig/include/linux/mempolicy.h   2007-10-30 03:29:37.000000000 
-0700
+++ 2.6.23-mm1/include/linux/mempolicy.h        2007-10-30 04:14:58.000000000 
-0700
@@ -23,7 +23,8 @@
 
 /* Flags for mbind */
 #define MPOL_MF_STRICT (1<<0)  /* Verify existing pages in the mapping */
-#define MPOL_MF_MOVE   (1<<1)  /* Move pages owned by this process to conform 
to mapping */
+#define MPOL_MF_MOVE   (1<<1)  /* Move pages owned by this process to conform
+                                                               to mapping */
 #define MPOL_MF_MOVE_ALL (1<<2)        /* Move every page to conform to 
mapping */
 #define MPOL_MF_INTERNAL (1<<3)        /* Internal flags start here */
 
@@ -48,7 +49,7 @@ struct mm_struct;
  * the process policy is used. Interrupts ignore the memory policy
  * of the current process.
  *
- * Locking policy for interlave:
+ * Locking policy for interleave:
  * In process context there is no locking because only the process accesses
  * its own state. All vma manipulation is somewhat protected by a down_read on
  * mmap_sem.
--- 2.6.23-mm1.orig/mm/mempolicy.c      2007-10-30 03:29:37.000000000 -0700
+++ 2.6.23-mm1/mm/mempolicy.c   2007-10-30 04:15:05.000000000 -0700
@@ -25,7 +25,7 @@
  *                to the last. It would be better if bind would truly restrict
  *                the allocation to memory nodes instead
  *
- * preferred       Try a specific node first before normal fallback.
+ * preferred      Try a specific node first before normal fallback.
  *                As a special case node -1 here means do the allocation
  *                on the local CPU. This is normally identical to default,
  *                but useful to set in a VMA when you have a non default
@@ -541,7 +541,7 @@ static long do_get_mempolicy(int *policy
                if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
                        return -EINVAL;
                *policy = 0;    /* just so it's initialized */
-               *nmask  = cpuset_current_mems_allowed;
+               *nmask = cpuset_current_mems_allowed;
                return 0;
        }
 
@@ -688,7 +688,7 @@ int do_migrate_pages(struct mm_struct *m
 
        tmp = *from_nodes;
        while (!nodes_empty(tmp)) {
-               int s,d;
+               int s, d;
                int source = -1;
                int dest = 0;
 
@@ -783,6 +783,8 @@ static long do_mbind(unsigned long start
        if (mpol_check_policy(mode, nmask))
                return -EINVAL;
 
+       cpuset_update_task_memory_state();
+
        new = mpol_new(mode, nmask);
        if (IS_ERR(new))
                return PTR_ERR(new);
@@ -794,7 +796,7 @@ static long do_mbind(unsigned long start
        if (!new)
                flags |= MPOL_MF_DISCONTIG_OK;
 
-       pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
+       pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n", start, start+len,
                 mode, nmask ? nodes_addr(*nmask)[0] : -1);
 
        down_write(&mm->mmap_sem);
@@ -898,10 +900,8 @@ asmlinkage long sys_mbind(unsigned long 
        err = get_nodes(&nodes, nmask, maxnode);
        if (err)
                return err;
-#ifdef CONFIG_CPUSETS
        /* Restrict the nodes to the allowed nodes in the cpuset */
-       nodes_and(nodes, nodes, current->mems_allowed);
-#endif
+       nodes_and(nodes, nodes, cpuset_current_mems_allowed);
        return do_mbind(start, len, mode, &nodes, flags);
 }
 
@@ -1384,7 +1384,7 @@ EXPORT_SYMBOL(alloc_pages_current);
 
 /*
  * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
- * rebinds the mempolicy its copying by calling mpol_rebind_policy()
+ * rebinds the mempolicy it's copying by calling mpol_rebind_policy()
  * with the mems_allowed returned by cpuset_mems_allowed().  This
  * keeps mempolicies cpuset relative after its cpuset moves.  See
  * further kernel/cpuset.c update_nodemask().
@@ -1741,9 +1741,9 @@ static void mpol_rebind_policy(struct me
        case MPOL_INTERLEAVE:
                nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
                pol->v.nodes = tmp;
-               *mpolmask = *newmask;
                current->il_next = node_remap(current->il_next,
                                                *mpolmask, *newmask);
+               *mpolmask = *newmask;
                break;
        case MPOL_PREFERRED:
                pol->v.preferred_node = node_remap(pol->v.preferred_node,
@@ -1763,7 +1763,7 @@ static void mpol_rebind_policy(struct me
 
                zonelist = bind_zonelist(&nodes);
 
-               /* If no mem, then zonelist is NULL and we keep old zonelist.
+               /* If no mem, then zonelist is -ENOMEM and we keep old zonelist.
                 * If that old zonelist has no remaining mems_allowed nodes,
                 * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
                 */

-- 
                          I won't rest till it's the best ...
                          Programmer, Linux Scalability
                          Paul Jackson <[EMAIL PROTECTED]> 1.650.933.1373
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to