From: "Chen, Kenneth W" <kenneth.w.chen@intel.com>

Patch avoiding-mmap-fragmentation.patch proposed in 2.6.12-rc4-mm2 tries to
solve a VM address space fragmentation issue for 32-bit app where
free_area_cache algorithm leads to inefficient use of vm address space. 
However, the issue is only restricted to 32-bit app.  This patch reverse
the unnecessary change made in arch/ia64 and make x86_64 to use the
proposed cache algorithm only for 32-bit app.

Signed-off-by: Ken Chen <kenneth.w.chen@intel.com>
Cc: Wolfgang Wander <wwc@rentec.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 arch/ia64/kernel/sys_ia64.c     |   13 ++-----------
 arch/x86_64/kernel/sys_x86_64.c |    2 +-
 2 files changed, 3 insertions(+), 12 deletions(-)

diff -puN arch/ia64/kernel/sys_ia64.c~avoiding-mmap-fragmentation-revert-unneeded-64-bit-changes arch/ia64/kernel/sys_ia64.c
--- 25/arch/ia64/kernel/sys_ia64.c~avoiding-mmap-fragmentation-revert-unneeded-64-bit-changes	2005-06-02 22:34:14.000000000 -0700
+++ 25-akpm/arch/ia64/kernel/sys_ia64.c	2005-06-02 22:34:14.000000000 -0700
@@ -38,14 +38,8 @@ arch_get_unmapped_area (struct file *fil
 	if (REGION_NUMBER(addr) == REGION_HPAGE)
 		addr = 0;
 #endif
-	if (!addr) {
-	        if (len > mm->cached_hole_size) {
-		        addr = mm->free_area_cache;
-		} else {
-		        addr = TASK_UNMAPPED_BASE;
-			mm->cached_hole_size = 0;
-		}
-	}
+	if (!addr)
+		addr = mm->free_area_cache;
 
 	if (map_shared && (TASK_SIZE > 0xfffffffful))
 		/*
@@ -65,7 +59,6 @@ arch_get_unmapped_area (struct file *fil
 			if (start_addr != TASK_UNMAPPED_BASE) {
 				/* Start a new search --- just in case we missed some holes.  */
 				addr = TASK_UNMAPPED_BASE;
-				mm->cached_hole_size = 0;
 				goto full_search;
 			}
 			return -ENOMEM;
@@ -75,8 +68,6 @@ arch_get_unmapped_area (struct file *fil
 			mm->free_area_cache = addr + len;
 			return addr;
 		}
-		if (addr + mm->cached_hole_size < vma->vm_start)
-		        mm->cached_hole_size = vma->vm_start - addr;
 		addr = (vma->vm_end + align_mask) & ~align_mask;
 	}
 }
diff -puN arch/x86_64/kernel/sys_x86_64.c~avoiding-mmap-fragmentation-revert-unneeded-64-bit-changes arch/x86_64/kernel/sys_x86_64.c
--- 25/arch/x86_64/kernel/sys_x86_64.c~avoiding-mmap-fragmentation-revert-unneeded-64-bit-changes	2005-06-02 22:34:14.000000000 -0700
+++ 25-akpm/arch/x86_64/kernel/sys_x86_64.c	2005-06-02 22:34:14.000000000 -0700
@@ -105,7 +105,7 @@ arch_get_unmapped_area(struct file *filp
 		    (!vma || addr + len <= vma->vm_start))
 			return addr;
 	}
-	if (len <= mm->cached_hole_size) {
+	if (begin != TASK_UNMAPPED_64 && len <= mm->cached_hole_size) {
 	        mm->cached_hole_size = 0;
 		mm->free_area_cache = begin;
 	}
_