From: Hugh Dickins <hugh@veritas.com>

Fix several points in objrmap's vma sorting:

1. It was adding all vmas, even private ones, to i_mmap_shared.
2. It was not quite sorting: list_add_tail is needed in all cases.
3. If vm_pgoff is changed on a file vma (as in vma_merge and split_vma)
   we must unlink vma from list and relink while holding i_shared_sem:
   move_vma_start to do this (holds page_table_lock too, as vma_merge
   did and split_vma did not: I think nothing needs that, rip it out
   if you like, but my guess was that you'd prefer the extra safety).

Sorry, no, this doesn't magically make it all a hundred times faster.



 mm/mmap.c |   40 +++++++++++++++++++++++++++-------------
 1 files changed, 27 insertions(+), 13 deletions(-)

diff -puN mm/mmap.c~objrmap-vma-sorting-fix mm/mmap.c
--- 25/mm/mmap.c~objrmap-vma-sorting-fix	2003-04-13 16:55:36.000000000 -0700
+++ 25-akpm/mm/mmap.c	2003-04-13 16:55:36.000000000 -0700
@@ -321,16 +321,13 @@ static inline void __vma_link_file(struc
 		else
 			vmhead = &mapping->i_mmap;
 
-		list_for_each(vmlist, &mapping->i_mmap_shared) {
+		list_for_each(vmlist, vmhead) {
 			struct vm_area_struct *vmtemp;
 			vmtemp = list_entry(vmlist, struct vm_area_struct, shared);
 			if (vmtemp->vm_pgoff >= vma->vm_pgoff)
 				break;
 		}
-		if (vmlist == vmhead)
-			list_add_tail(&vma->shared, vmlist);
-		else
-			list_add(&vma->shared, vmlist);
+		list_add_tail(&vma->shared, vmlist);
 	}
 }
 
@@ -366,6 +363,28 @@ static void vma_link(struct mm_struct *m
 	validate_mm(mm);
 }
 
+static void move_vma_start(struct vm_area_struct *vma, unsigned long addr)
+{
+	spinlock_t *lock = &vma->vm_mm->page_table_lock;
+	struct inode *inode = NULL;
+	
+	if (vma->vm_file) {
+		inode = vma->vm_file->f_dentry->d_inode;
+		down(&inode->i_mapping->i_shared_sem);
+	}
+	spin_lock(lock);
+	if (inode)
+		__remove_shared_vm_struct(vma, inode);
+	/* If no vm_file, perhaps we should always keep vm_pgoff at 0?? */
+	vma->vm_pgoff += (long)(addr - vma->vm_start) >> PAGE_SHIFT;
+	vma->vm_start = addr;
+	if (inode) {
+		__vma_link_file(vma);
+		up(&inode->i_mapping->i_shared_sem);
+	}
+	spin_unlock(lock);
+}
+
 /*
  * Return true if we can merge this (vm_flags,file,vm_pgoff,size)
  * in front of (at a lower virtual address and file offset than) the vma.
@@ -422,8 +441,6 @@ static int vma_merge(struct mm_struct *m
 			unsigned long end, unsigned long vm_flags,
 			struct file *file, unsigned long pgoff)
 {
-	spinlock_t * lock = &mm->page_table_lock;
-
 	if (!prev) {
 		prev = rb_entry(rb_parent, struct vm_area_struct, vm_rb);
 		goto merge_next;
@@ -435,6 +452,7 @@ static int vma_merge(struct mm_struct *m
 	if (prev->vm_end == addr &&
 			can_vma_merge_after(prev, vm_flags, file, pgoff)) {
 		struct vm_area_struct *next;
+		spinlock_t *lock = &mm->page_table_lock;
 		struct inode *inode = file ? file->f_dentry->d_inode : NULL;
 		int need_up = 0;
 
@@ -480,10 +498,7 @@ static int vma_merge(struct mm_struct *m
 				pgoff, (end - addr) >> PAGE_SHIFT))
 			return 0;
 		if (end == prev->vm_start) {
-			spin_lock(lock);
-			prev->vm_start = addr;
-			prev->vm_pgoff -= (end - addr) >> PAGE_SHIFT;
-			spin_unlock(lock);
+			move_vma_start(prev, addr);
 			return 1;
 		}
 	}
@@ -1203,8 +1218,7 @@ int split_vma(struct mm_struct * mm, str
 
 	if (new_below) {
 		new->vm_end = addr;
-		vma->vm_start = addr;
-		vma->vm_pgoff += ((addr - new->vm_start) >> PAGE_SHIFT);
+		move_vma_start(vma, addr);
 	} else {
 		vma->vm_end = addr;
 		new->vm_start = addr;

_