Fwiw:
- vmlist_modify_lock/unlock are turned into spin_lock/unlock;
- ditto for vmlist_access/unaccess;
- atomic_t rss (gross).
Excuse the incorrect In-Reply-To field (my mail archive for the day is
currently unavailable).
diff -u --recursive linux-2.4.0-test10-pre2.orig/fs/binfmt_aout.c
linux-2.4.0-test10-pre2/fs/binfmt_aout.c
--- linux-2.4.0-test10-pre2.orig/fs/binfmt_aout.c Fri Jul 7 22:54:01 2000
+++ linux-2.4.0-test10-pre2/fs/binfmt_aout.c Fri Oct 13 16:45:45 2000
@@ -301,7 +301,7 @@
current->mm->brk = ex.a_bss +
(current->mm->start_brk = N_BSSADDR(ex));
- current->mm->rss = 0;
+ atomic_set(¤t->mm->rss, 0);
current->mm->mmap = NULL;
compute_creds(bprm);
current->flags &= ~PF_FORKNOEXEC;
diff -u --recursive linux-2.4.0-test10-pre2.orig/fs/binfmt_elf.c
linux-2.4.0-test10-pre2/fs/binfmt_elf.c
--- linux-2.4.0-test10-pre2.orig/fs/binfmt_elf.c Tue Sep 26 10:41:47 2000
+++ linux-2.4.0-test10-pre2/fs/binfmt_elf.c Fri Oct 13 16:40:34 2000
@@ -586,7 +586,7 @@
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
- current->mm->rss = 0;
+ atomic_set(¤t->mm->rss, 0);
setup_arg_pages(bprm); /* XXX: check error */
current->mm->start_stack = bprm->p;
diff -u --recursive linux-2.4.0-test10-pre2.orig/fs/exec.c
linux-2.4.0-test10-pre2/fs/exec.c
--- linux-2.4.0-test10-pre2.orig/fs/exec.c Tue Sep 26 10:41:47 2000
+++ linux-2.4.0-test10-pre2/fs/exec.c Fri Oct 13 16:39:22 2000
@@ -314,9 +314,9 @@
mpnt->vm_pgoff = 0;
mpnt->vm_file = NULL;
mpnt->vm_private_data = (void *) 0;
- vmlist_modify_lock(current->mm);
+ spin_lock(¤t->mm->page_table_lock);
insert_vm_struct(current->mm, mpnt);
- vmlist_modify_unlock(current->mm);
+ spin_unlock(¤t->mm->page_table_lock);
current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
}
@@ -324,7 +324,7 @@
struct page *page = bprm->page[i];
if (page) {
bprm->page[i] = NULL;
- current->mm->rss++;
+ atomic_inc(¤t->mm->rss);
put_dirty_page(current,page,stack_base);
}
stack_base += PAGE_SIZE;
diff -u --recursive linux-2.4.0-test10-pre2.orig/fs/proc/array.c
linux-2.4.0-test10-pre2/fs/proc/array.c
--- linux-2.4.0-test10-pre2.orig/fs/proc/array.c Fri Oct 13 09:58:47 2000
+++ linux-2.4.0-test10-pre2/fs/proc/array.c Fri Oct 13 16:37:47 2000
@@ -202,14 +202,14 @@
buffer += sprintf(buffer,
"VmSize:\t%8lu kB\n"
"VmLck:\t%8lu kB\n"
- "VmRSS:\t%8lu kB\n"
+ "VmRSS:\t%8u kB\n"
"VmData:\t%8lu kB\n"
"VmStk:\t%8lu kB\n"
"VmExe:\t%8lu kB\n"
"VmLib:\t%8lu kB\n",
mm->total_vm << (PAGE_SHIFT-10),
mm->locked_vm << (PAGE_SHIFT-10),
- mm->rss << (PAGE_SHIFT-10),
+ atomic_read(&mm->rss) << (PAGE_SHIFT-10),
data - stack, stack,
exec - lib, lib);
up(&mm->mmap_sem);
@@ -371,7 +371,8 @@
task->it_real_value,
task->start_time,
vsize,
- mm ? mm->rss : 0, /* you might want to shift this left 3 */
+ /* you might want to shift this left 3 */
+ mm ? atomic_read(&mm->rss) : 0,
task->rlim ? task->rlim[RLIMIT_RSS].rlim_cur : 0,
mm ? mm->start_code : 0,
mm ? mm->end_code : 0,
diff -u --recursive linux-2.4.0-test10-pre2.orig/include/linux/mm.h
linux-2.4.0-test10-pre2/include/linux/mm.h
--- linux-2.4.0-test10-pre2.orig/include/linux/mm.h Tue Oct 3 20:16:16 2000
+++ linux-2.4.0-test10-pre2/include/linux/mm.h Fri Oct 13 10:54:19 2000
@@ -527,11 +527,6 @@
#define pgcache_under_min() (atomic_read(&page_cache_size) * 100 < \
page_cache.min_percent * num_physpages)
-#define vmlist_access_lock(mm) spin_lock(&mm->page_table_lock)
-#define vmlist_access_unlock(mm) spin_unlock(&mm->page_table_lock)
-#define vmlist_modify_lock(mm) vmlist_access_lock(mm)
-#define vmlist_modify_unlock(mm) vmlist_access_unlock(mm)
-
#endif /* __KERNEL__ */
#endif
diff -u --recursive linux-2.4.0-test10-pre2.orig/include/linux/sched.h
linux-2.4.0-test10-pre2/include/linux/sched.h
--- linux-2.4.0-test10-pre2.orig/include/linux/sched.h Fri Oct 13 09:58:47 2000
+++ linux-2.4.0-test10-pre2/include/linux/sched.h Fri Oct 13 10:19:37 2000
@@ -207,7 +207,8 @@
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
unsigned long arg_start, arg_end, env_start, env_end;
- unsigned long rss, total_vm, locked_vm;
+ atomic_t rss;
+ unsigned long total_vm, locked_vm;
unsigned long def_flags;
unsigned long cpu_vm_mask;
unsigned long swap_cnt; /* number of pages to swap on next pass */
diff -u --recursive linux-2.4.0-test10-pre2.orig/mm/filemap.c
linux-2.4.0-test10-pre2/mm/filemap.c
--- linux-2.4.0-test10-pre2.orig/mm/filemap.c Tue Oct 3 20:16:16 2000
+++ linux-2.4.0-test10-pre2/mm/filemap.c Fri Oct 13 16:30:50 2000
@@ -1766,11 +1766,11 @@
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
- vmlist_modify_lock(vma->vm_mm);
+ spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
vma->vm_start = end;
insert_vm_struct(current->mm, n);
- vmlist_modify_unlock(vma->vm_mm);
+ spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}
@@ -1790,10 +1790,10 @@
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
- vmlist_modify_lock(vma->vm_mm);
+ spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_end = start;
insert_vm_struct(current->mm, n);
- vmlist_modify_unlock(vma->vm_mm);
+ spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}
@@ -1823,7 +1823,7 @@
vma->vm_ops->open(left);
vma->vm_ops->open(right);
}
- vmlist_modify_lock(vma->vm_mm);
+ spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
vma->vm_start = start;
vma->vm_end = end;
@@ -1831,7 +1831,7 @@
vma->vm_raend = 0;
insert_vm_struct(current->mm, left);
insert_vm_struct(current->mm, right);
- vmlist_modify_unlock(vma->vm_mm);
+ spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}
@@ -1891,7 +1891,7 @@
error = -EIO;
rlim_rss = current->rlim ? current->rlim[RLIMIT_RSS].rlim_cur :
LONG_MAX; /* default: see resource.h */
- if ((vma->vm_mm->rss + (end - start)) > rlim_rss)
+ if ((atomic_read(&vma->vm_mm->rss) + (end - start)) > rlim_rss)
return error;
/* round to cluster boundaries if this isn't a "random" area. */
diff -u --recursive linux-2.4.0-test10-pre2.orig/mm/memory.c
linux-2.4.0-test10-pre2/mm/memory.c
--- linux-2.4.0-test10-pre2.orig/mm/memory.c Tue Oct 3 20:16:16 2000
+++ linux-2.4.0-test10-pre2/mm/memory.c Fri Oct 13 16:26:26 2000
@@ -370,15 +370,13 @@
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
- spin_unlock(&mm->page_table_lock);
/*
* Update rss for the mm_struct (not necessarily current->mm)
* Notice that rss is an unsigned long.
*/
- if (mm->rss > freed)
- mm->rss -= freed;
- else
- mm->rss = 0;
+ if (atomic_sub_and_test(freed, &mm->rss) <= 0)
+ atomic_set(&mm->rss, 0);
+ spin_unlock(&mm->page_table_lock);
}
@@ -869,7 +867,7 @@
*/
if (pte_val(*page_table) == pte_val(pte)) {
if (PageReserved(old_page))
- ++mm->rss;
+ atomic_inc(&mm->rss);
break_cow(vma, old_page, new_page, address, page_table);
/* Free the old page.. */
@@ -1074,7 +1072,7 @@
flush_icache_page(vma, page);
}
- mm->rss++;
+ atomic_inc(&mm->rss);
pte = mk_pte(page, vma->vm_page_prot);
@@ -1113,7 +1111,7 @@
return -1;
clear_user_highpage(page, addr);
entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
- mm->rss++;
+ atomic_inc(&mm->rss);
flush_page_to_ram(page);
}
set_pte(page_table, entry);
@@ -1152,7 +1150,7 @@
return 0;
if (new_page == NOPAGE_OOM)
return -1;
- ++mm->rss;
+ atomic_inc(&mm->rss);
/*
* This silly early PAGE_DIRTY setting removes a race
* due to the bad i386 page protection. But it's valid
diff -u --recursive linux-2.4.0-test10-pre2.orig/mm/mlock.c
linux-2.4.0-test10-pre2/mm/mlock.c
--- linux-2.4.0-test10-pre2.orig/mm/mlock.c Wed Mar 15 01:45:20 2000
+++ linux-2.4.0-test10-pre2/mm/mlock.c Fri Oct 13 11:06:06 2000
@@ -14,9 +14,9 @@
static inline int mlock_fixup_all(struct vm_area_struct * vma, int newflags)
{
- vmlist_modify_lock(vma->vm_mm);
+ spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_flags = newflags;
- vmlist_modify_unlock(vma->vm_mm);
+ spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}
@@ -36,11 +36,11 @@
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
- vmlist_modify_lock(vma->vm_mm);
+ spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
vma->vm_start = end;
insert_vm_struct(current->mm, n);
- vmlist_modify_unlock(vma->vm_mm);
+ spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}
@@ -61,10 +61,10 @@
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
- vmlist_modify_lock(vma->vm_mm);
+ spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_end = start;
insert_vm_struct(current->mm, n);
- vmlist_modify_unlock(vma->vm_mm);
+ spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}
@@ -96,7 +96,7 @@
vma->vm_ops->open(left);
vma->vm_ops->open(right);
}
- vmlist_modify_lock(vma->vm_mm);
+ spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
vma->vm_start = start;
vma->vm_end = end;
@@ -104,7 +104,7 @@
vma->vm_raend = 0;
insert_vm_struct(current->mm, left);
insert_vm_struct(current->mm, right);
- vmlist_modify_unlock(vma->vm_mm);
+ spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}
@@ -183,9 +183,9 @@
break;
}
}
- vmlist_modify_lock(current->mm);
+ spin_lock(¤t->mm->page_table_lock);
merge_segments(current->mm, start, end);
- vmlist_modify_unlock(current->mm);
+ spin_unlock(¤t->mm->page_table_lock);
return error;
}
@@ -257,9 +257,9 @@
if (error)
break;
}
- vmlist_modify_lock(current->mm);
+ spin_lock(¤t->mm->page_table_lock);
merge_segments(current->mm, 0, TASK_SIZE);
- vmlist_modify_unlock(current->mm);
+ spin_unlock(¤t->mm->page_table_lock);
return error;
}
diff -u --recursive linux-2.4.0-test10-pre2.orig/mm/mmap.c
linux-2.4.0-test10-pre2/mm/mmap.c
--- linux-2.4.0-test10-pre2.orig/mm/mmap.c Fri Oct 13 09:58:47 2000
+++ linux-2.4.0-test10-pre2/mm/mmap.c Fri Oct 13 16:27:27 2000
@@ -317,12 +317,12 @@
*/
flags = vma->vm_flags;
addr = vma->vm_start; /* can addr have changed?? */
- vmlist_modify_lock(mm);
+ spin_lock(&mm->page_table_lock);
insert_vm_struct(mm, vma);
if (correct_wcount)
atomic_inc(&file->f_dentry->d_inode->i_writecount);
merge_segments(mm, vma->vm_start, vma->vm_end);
- vmlist_modify_unlock(mm);
+ spin_unlock(&mm->page_table_lock);
mm->total_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED) {
@@ -534,11 +534,11 @@
/* Work out to one of the ends. */
if (end == area->vm_end) {
area->vm_end = addr;
- vmlist_modify_lock(mm);
+ spin_lock(&mm->page_table_lock);
} else if (addr == area->vm_start) {
area->vm_pgoff += (end - area->vm_start) >> PAGE_SHIFT;
area->vm_start = end;
- vmlist_modify_lock(mm);
+ spin_lock(&mm->page_table_lock);
} else {
/* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */
/* Add end mapping -- leave beginning for below */
@@ -560,12 +560,12 @@
if (mpnt->vm_ops && mpnt->vm_ops->open)
mpnt->vm_ops->open(mpnt);
area->vm_end = addr; /* Truncate area */
- vmlist_modify_lock(mm);
+ spin_lock(&mm->page_table_lock);
insert_vm_struct(mm, mpnt);
}
insert_vm_struct(mm, area);
- vmlist_modify_unlock(mm);
+ spin_unlock(&mm->page_table_lock);
return extra;
}
@@ -670,7 +670,7 @@
npp = (prev ? &prev->vm_next : &mm->mmap);
free = NULL;
- vmlist_modify_lock(mm);
+ spin_lock(&mm->page_table_lock);
for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
*npp = mpnt->vm_next;
mpnt->vm_next = free;
@@ -679,7 +679,7 @@
avl_remove(mpnt, &mm->mmap_avl);
}
mm->mmap_cache = NULL; /* Kill the cache. */
- vmlist_modify_unlock(mm);
+ spin_unlock(&mm->page_table_lock);
/* Ok - we have the memory areas we should free on the 'free' list,
* so release them, and unmap the page range..
@@ -811,10 +811,10 @@
flags = vma->vm_flags;
addr = vma->vm_start;
- vmlist_modify_lock(mm);
+ spin_lock(&mm->page_table_lock);
insert_vm_struct(mm, vma);
merge_segments(mm, vma->vm_start, vma->vm_end);
- vmlist_modify_unlock(mm);
+ spin_unlock(&mm->page_table_lock);
mm->total_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED) {
@@ -840,11 +840,11 @@
struct vm_area_struct * mpnt;
release_segments(mm);
- vmlist_modify_lock(mm);
+ spin_lock(&mm->page_table_lock);
mpnt = mm->mmap;
mm->mmap = mm->mmap_avl = mm->mmap_cache = NULL;
- vmlist_modify_unlock(mm);
- mm->rss = 0;
+ spin_unlock(&mm->page_table_lock);
+ atomic_set(&mm->rss, 0);
mm->total_vm = 0;
mm->locked_vm = 0;
while (mpnt) {
@@ -985,9 +985,9 @@
if (mpnt->vm_ops && mpnt->vm_ops->close) {
mpnt->vm_pgoff += (mpnt->vm_end - mpnt->vm_start) >>
PAGE_SHIFT;
mpnt->vm_start = mpnt->vm_end;
- vmlist_modify_unlock(mm);
+ spin_unlock(&mm->page_table_lock);
mpnt->vm_ops->close(mpnt);
- vmlist_modify_lock(mm);
+ spin_lock(&mm->page_table_lock);
}
mm->map_count--;
remove_shared_vm_struct(mpnt);
diff -u --recursive linux-2.4.0-test10-pre2.orig/mm/mprotect.c
linux-2.4.0-test10-pre2/mm/mprotect.c
--- linux-2.4.0-test10-pre2.orig/mm/mprotect.c Wed Mar 15 01:45:21 2000
+++ linux-2.4.0-test10-pre2/mm/mprotect.c Fri Oct 13 11:06:06 2000
@@ -86,10 +86,10 @@
static inline int mprotect_fixup_all(struct vm_area_struct * vma,
int newflags, pgprot_t prot)
{
- vmlist_modify_lock(vma->vm_mm);
+ spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_flags = newflags;
vma->vm_page_prot = prot;
- vmlist_modify_unlock(vma->vm_mm);
+ spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}
@@ -111,11 +111,11 @@
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
- vmlist_modify_lock(vma->vm_mm);
+ spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
vma->vm_start = end;
insert_vm_struct(current->mm, n);
- vmlist_modify_unlock(vma->vm_mm);
+ spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}
@@ -138,10 +138,10 @@
get_file(n->vm_file);
if (n->vm_ops && n->vm_ops->open)
n->vm_ops->open(n);
- vmlist_modify_lock(vma->vm_mm);
+ spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_end = start;
insert_vm_struct(current->mm, n);
- vmlist_modify_unlock(vma->vm_mm);
+ spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}
@@ -172,7 +172,7 @@
vma->vm_ops->open(left);
vma->vm_ops->open(right);
}
- vmlist_modify_lock(vma->vm_mm);
+ spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
vma->vm_start = start;
vma->vm_end = end;
@@ -181,7 +181,7 @@
vma->vm_page_prot = prot;
insert_vm_struct(current->mm, left);
insert_vm_struct(current->mm, right);
- vmlist_modify_unlock(vma->vm_mm);
+ spin_unlock(&vma->vm_mm->page_table_lock);
return 0;
}
@@ -263,9 +263,9 @@
break;
}
}
- vmlist_modify_lock(current->mm);
+ spin_lock(¤t->mm->page_table_lock);
merge_segments(current->mm, start, end);
- vmlist_modify_unlock(current->mm);
+ spin_unlock(¤t->mm->page_table_lock);
out:
up(¤t->mm->mmap_sem);
return error;
diff -u --recursive linux-2.4.0-test10-pre2.orig/mm/mremap.c
linux-2.4.0-test10-pre2/mm/mremap.c
--- linux-2.4.0-test10-pre2.orig/mm/mremap.c Tue Oct 3 20:16:16 2000
+++ linux-2.4.0-test10-pre2/mm/mremap.c Fri Oct 13 11:06:06 2000
@@ -141,10 +141,10 @@
get_file(new_vma->vm_file);
if (new_vma->vm_ops && new_vma->vm_ops->open)
new_vma->vm_ops->open(new_vma);
- vmlist_modify_lock(current->mm);
+ spin_lock(¤t->mm->page_table_lock);
insert_vm_struct(current->mm, new_vma);
merge_segments(current->mm, new_vma->vm_start,
new_vma->vm_end);
- vmlist_modify_unlock(current->mm);
+ spin_unlock(¤t->mm->page_table_lock);
do_munmap(current->mm, addr, old_len);
current->mm->total_vm += new_len >> PAGE_SHIFT;
if (new_vma->vm_flags & VM_LOCKED) {
@@ -258,9 +258,9 @@
/* can we just expand the current mapping? */
if (max_addr - addr >= new_len) {
int pages = (new_len - old_len) >> PAGE_SHIFT;
- vmlist_modify_lock(vma->vm_mm);
+ spin_lock(&vma->vm_mm->page_table_lock);
vma->vm_end = addr + new_len;
- vmlist_modify_unlock(vma->vm_mm);
+ spin_unlock(&vma->vm_mm->page_table_lock);
current->mm->total_vm += pages;
if (vma->vm_flags & VM_LOCKED) {
current->mm->locked_vm += pages;
diff -u --recursive linux-2.4.0-test10-pre2.orig/mm/swapfile.c
linux-2.4.0-test10-pre2/mm/swapfile.c
--- linux-2.4.0-test10-pre2.orig/mm/swapfile.c Tue Aug 8 04:01:36 2000
+++ linux-2.4.0-test10-pre2/mm/swapfile.c Fri Oct 13 16:34:38 2000
@@ -231,7 +231,7 @@
set_pte(dir, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
swap_free(entry);
get_page(page);
- ++vma->vm_mm->rss;
+ atomic_inc(&vma->vm_mm->rss);
}
static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
@@ -315,12 +315,12 @@
*/
if (!mm)
return;
- vmlist_access_lock(mm);
+ spin_lock(&mm->page_table_lock);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
pgd_t * pgd = pgd_offset(mm, vma->vm_start);
unuse_vma(vma, pgd, entry, page);
}
- vmlist_access_unlock(mm);
+ spin_unlock(&mm->page_table_lock);
return;
}
diff -u --recursive linux-2.4.0-test10-pre2.orig/mm/vmscan.c
linux-2.4.0-test10-pre2/mm/vmscan.c
--- linux-2.4.0-test10-pre2.orig/mm/vmscan.c Fri Oct 13 09:58:47 2000
+++ linux-2.4.0-test10-pre2/mm/vmscan.c Fri Oct 13 16:33:17 2000
@@ -102,7 +102,7 @@
set_pte(page_table, swp_entry_to_pte(entry));
drop_pte:
UnlockPage(page);
- mm->rss--;
+ atomic_dec(&mm->rss);
flush_tlb_page(vma, address);
deactivate_page(page);
page_cache_release(page);
@@ -170,9 +170,9 @@
struct file *file = vma->vm_file;
if (file) get_file(file);
pte_clear(page_table);
- mm->rss--;
+ atomic_dec(&mm->rss);
flush_tlb_page(vma, address);
- vmlist_access_unlock(mm);
+ spin_lock(&mm->page_table_lock);
error = swapout(page, file);
UnlockPage(page);
if (file) fput(file);
@@ -202,10 +202,10 @@
add_to_swap_cache(page, entry);
/* Put the swap entry into the pte after the page is in swapcache */
- mm->rss--;
+ atomic_dec(&mm->rss);
set_pte(page_table, swp_entry_to_pte(entry));
flush_tlb_page(vma, address);
- vmlist_access_unlock(mm);
+ spin_unlock(&mm->page_table_lock);
/* OK, do a physical asynchronous write to swap. */
rw_swap_page(WRITE, page, 0);
@@ -341,7 +341,7 @@
* Find the proper vm-area after freezing the vma chain
* and ptes.
*/
- vmlist_access_lock(mm);
+ spin_lock(&mm->page_table_lock);
vma = find_vma(mm, address);
if (vma) {
if (address < vma->vm_start)
@@ -364,7 +364,7 @@
mm->swap_cnt = 0;
out_unlock:
- vmlist_access_unlock(mm);
+ spin_unlock(&mm->page_table_lock);
/* We didn't find anything for the process */
return 0;
@@ -416,7 +416,7 @@
struct mm_struct *mm = p->mm;
if (!p->swappable || !mm)
continue;
- if (mm->rss <= 0)
+ if (atomic_read(&mm->rss) <= 0)
continue;
/* Skip tasks which haven't slept long enough yet when
idle-swapping. */
if (idle_time && !assign && (!(p->state & TASK_INTERRUPTIBLE)
||
@@ -425,7 +425,7 @@
found_task++;
/* Refresh swap_cnt? */
if (assign == 1) {
- mm->swap_cnt = (mm->rss >> SWAP_SHIFT);
+ mm->swap_cnt = atomic_read(&mm->rss) >> SWAP_SHIFT;
if (mm->swap_cnt < SWAP_MIN)
mm->swap_cnt = SWAP_MIN;
}
--
Ueimor
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
Please read the FAQ at http://www.tux.org/lkml/