From: Suparna Bhattacharya <suparna@in.ibm.com>

Defines a new mmgrab() routines which increments mm_users only if the mm
isn't already going away.  Changes get_task_mm() and proc_pid_stat() to
call mmgrab() instead of a direct atomic_inc(&mm->mm_users).



 fs/proc/array.c       |    2 +-
 include/linux/sched.h |    4 +++-
 kernel/fork.c         |   17 +++++++++++++++++
 3 files changed, 21 insertions(+), 2 deletions(-)

diff -puN fs/proc/array.c~aio-mm-refcounting-fix fs/proc/array.c
--- 25/fs/proc/array.c~aio-mm-refcounting-fix	2003-08-16 13:54:16.000000000 -0700
+++ 25-akpm/fs/proc/array.c	2003-08-16 13:54:17.000000000 -0700
@@ -303,7 +303,7 @@ int proc_pid_stat(struct task_struct *ta
 	task_lock(task);
 	mm = task->mm;
 	if(mm)
-		atomic_inc(&mm->mm_users);
+		mm = mmgrab(mm);
 	if (task->tty) {
 		tty_pgrp = task->tty->pgrp;
 		tty_nr = task->tty->device;
diff -puN include/linux/sched.h~aio-mm-refcounting-fix include/linux/sched.h
--- 25/include/linux/sched.h~aio-mm-refcounting-fix	2003-08-16 13:54:16.000000000 -0700
+++ 25-akpm/include/linux/sched.h	2003-08-16 13:54:17.000000000 -0700
@@ -650,6 +650,8 @@ static inline void mmdrop(struct mm_stru
 
 /* mmput gets rid of the mappings and all user-space */
 extern void mmput(struct mm_struct *);
+/* Grab a reference to the mm if its not already going away */
+extern struct mm_struct *mmgrab(struct mm_struct *);
 /* Remove the current tasks stale references to the old mm_struct */
 extern void mm_release(struct task_struct *, struct mm_struct *);
 
@@ -757,7 +759,7 @@ static inline struct mm_struct * get_tas
 	task_lock(task);
 	mm = task->mm;
 	if (mm)
-		atomic_inc(&mm->mm_users);
+		mm = mmgrab(mm);
 	task_unlock(task);
 
 	return mm;
diff -puN kernel/fork.c~aio-mm-refcounting-fix kernel/fork.c
--- 25/kernel/fork.c~aio-mm-refcounting-fix	2003-08-16 13:54:16.000000000 -0700
+++ 25-akpm/kernel/fork.c	2003-08-16 13:54:17.000000000 -0700
@@ -398,6 +398,23 @@ void mmput(struct mm_struct *mm)
 	}
 }
 
+/*
+ * Checks if the use count of an mm is non-zero and if so
+ * returns a reference to it after bumping up the use count.
+ * If the use count is zero, it means this mm is going away,
+ * so return NULL.
+ */
+struct mm_struct *mmgrab(struct mm_struct *mm)
+{
+	spin_lock(&mmlist_lock);
+	if (!atomic_read(&mm->mm_users))
+		mm = NULL;
+	else
+		atomic_inc(&mm->mm_users);
+	spin_unlock(&mmlist_lock);
+	return mm;
+}
+
 /* Please note the differences between mmput and mm_release.
  * mmput is called whenever we stop holding onto a mm_struct,
  * error success whatever.

_