From: David Howells <dhowells@redhat.com>

The attached patch changes the nommu procfs routines to match the nommu
changes in patch 1/1.

This is an exercise in structure renaming and handling the fact that the list
of VMAs in the system is now held together by vma->vm_rb.

Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/fs/proc/nommu.c      |   29 ++++++++++--------------
 25-akpm/fs/proc/task_nommu.c |   50 ++++++++++++++++++++++---------------------
 2 files changed, 38 insertions(+), 41 deletions(-)

diff -puN fs/proc/nommu.c~further-nommu-proc-changes fs/proc/nommu.c
--- 25/fs/proc/nommu.c~further-nommu-proc-changes	2004-11-16 23:41:32.394986592 -0800
+++ 25-akpm/fs/proc/nommu.c	2004-11-16 23:41:32.400985680 -0800
@@ -38,33 +38,32 @@
  */
 static int nommu_vma_list_show(struct seq_file *m, void *v)
 {
-	struct vm_area_struct *map;
+	struct vm_area_struct *vma;
 	unsigned long ino = 0;
 	struct file *file;
 	dev_t dev = 0;
 	int flags, len;
 
-	map = list_entry((struct list_head *) v,
-			 struct vm_area_struct, vm_link);
+	vma = rb_entry((struct rb_node *) v, struct vm_area_struct, vm_rb);
 
-	flags = map->vm_flags;
-	file = map->vm_file;
+	flags = vma->vm_flags;
+	file = vma->vm_file;
 
 	if (file) {
-		struct inode *inode = map->vm_file->f_dentry->d_inode;
+		struct inode *inode = vma->vm_file->f_dentry->d_inode;
 		dev = inode->i_sb->s_dev;
 		ino = inode->i_ino;
 	}
 
 	seq_printf(m,
 		   "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
-		   map->vm_start,
-		   map->vm_end,
+		   vma->vm_start,
+		   vma->vm_end,
 		   flags & VM_READ ? 'r' : '-',
 		   flags & VM_WRITE ? 'w' : '-',
 		   flags & VM_EXEC ? 'x' : '-',
 		   flags & VM_MAYSHARE ? 's' : 'p',
-		   map->vm_pgoff << PAGE_SHIFT,
+		   vma->vm_pgoff << PAGE_SHIFT,
 		   MAJOR(dev), MINOR(dev), ino, &len);
 
 	if (file) {
@@ -81,15 +80,15 @@ static int nommu_vma_list_show(struct se
 
 static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos)
 {
-	struct list_head *_p;
+	struct rb_node *_rb;
 	loff_t pos = *_pos;
 	void *next = NULL;
 
 	down_read(&nommu_vma_sem);
 
-	list_for_each(_p, &nommu_vma_list) {
+	for (_rb = rb_first(&nommu_vma_tree); _rb; _rb = rb_next(_rb)) {
 		if (pos == 0) {
-			next = _p;
+			next = _rb;
 			break;
 		}
 	}
@@ -104,12 +103,8 @@ static void nommu_vma_list_stop(struct s
 
 static void *nommu_vma_list_next(struct seq_file *m, void *v, loff_t *pos)
 {
-	struct list_head *_p = v;
-
 	(*pos)++;
-
-	_p = _p->next;
-	return (_p != &nommu_vma_list) ? _p : NULL;
+	return rb_next((struct rb_node *) v);
 }
 
 static struct seq_operations proc_nommu_vma_list_seqop = {
diff -puN fs/proc/task_nommu.c~further-nommu-proc-changes fs/proc/task_nommu.c
--- 25/fs/proc/task_nommu.c~further-nommu-proc-changes	2004-11-16 23:41:32.395986440 -0800
+++ 25-akpm/fs/proc/task_nommu.c	2004-11-16 23:41:32.401985528 -0800
@@ -13,23 +13,25 @@
  */
 char *task_mem(struct mm_struct *mm, char *buffer)
 {
+	struct vm_list_struct *vml;
 	unsigned long bytes = 0, sbytes = 0, slack = 0;
-	struct mm_tblock_struct *tblock;
         
 	down_read(&mm->mmap_sem);
-	for (tblock = mm->context.tblock; tblock; tblock = tblock->next) {
-		if (!tblock->vma)
+	for (vml = mm->context.vmlist; vml; vml = vml->next) {
+		if (!vml->vma)
 			continue;
-		bytes += kobjsize(tblock);
+
+		bytes += kobjsize(vml);
 		if (atomic_read(&mm->mm_count) > 1 ||
-		    atomic_read(&tblock->vma->vm_usage) > 1) {
-			sbytes += kobjsize((void *) tblock->vma->vm_start);
-			sbytes += kobjsize(tblock->vma);
+		    atomic_read(&vml->vma->vm_usage) > 1
+		    ) {
+			sbytes += kobjsize((void *) vml->vma->vm_start);
+			sbytes += kobjsize(vml->vma);
 		} else {
-			bytes += kobjsize((void *) tblock->vma->vm_start);
-			bytes += kobjsize(tblock->vma);
-			slack += kobjsize((void *) tblock->vma->vm_start) -
-				(tblock->vma->vm_end - tblock->vma->vm_start);
+			bytes += kobjsize((void *) vml->vma->vm_start);
+			bytes += kobjsize(vml->vma);
+			slack += kobjsize((void *) vml->vma->vm_start) -
+				(vml->vma->vm_end - vml->vma->vm_start);
 		}
 	}
 
@@ -67,11 +69,11 @@ char *task_mem(struct mm_struct *mm, cha
 
 unsigned long task_vsize(struct mm_struct *mm)
 {
-	struct mm_tblock_struct *tbp;
+	struct vm_list_struct *tbp;
 	unsigned long vsize = 0;
 
 	down_read(&mm->mmap_sem);
-	for (tbp = mm->context.tblock; tbp; tbp = tbp->next) {
+	for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
 		if (tbp->vma)
 			vsize += kobjsize((void *) tbp->vma->vm_start);
 	}
@@ -82,11 +84,11 @@ unsigned long task_vsize(struct mm_struc
 int task_statm(struct mm_struct *mm, int *shared, int *text,
 	       int *data, int *resident)
 {
-	struct mm_tblock_struct *tbp;
+	struct vm_list_struct *tbp;
 	int size = kobjsize(mm);
 
 	down_read(&mm->mmap_sem);
-	for (tbp = mm->context.tblock; tbp; tbp = tbp->next) {
+	for (tbp = mm->context.vmlist; tbp; tbp = tbp->next) {
 		size += kobjsize(tbp);
 		if (tbp->vma) {
 			size += kobjsize(tbp->vma);
@@ -103,24 +105,24 @@ int task_statm(struct mm_struct *mm, int
 
 int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
 {
-	struct mm_tblock_struct *tblock;
-	struct vm_area_struct * vma;
-	int result = -ENOENT;
+	struct vm_list_struct *vml;
+	struct vm_area_struct *vma;
 	struct task_struct *task = proc_task(inode);
-	struct mm_struct * mm = get_task_mm(task);
+	struct mm_struct *mm = get_task_mm(task);
+	int result = -ENOENT;
 
 	if (!mm)
 		goto out;
 	down_read(&mm->mmap_sem);
 
-	tblock = mm->context.tblock;
+	vml = mm->context.vmlist;
 	vma = NULL;
-	while (tblock) {
-		if ((tblock->vma->vm_flags & VM_EXECUTABLE) && tblock->vma->vm_file) {
-			vma = tblock->vma;
+	while (vml) {
+		if ((vml->vma->vm_flags & VM_EXECUTABLE) && vml->vma->vm_file) {
+			vma = vml->vma;
 			break;
 		}
-		tblock = tblock->next;
+		vml = vml->next;
 	}
 
 	if (vma) {
_