unmap_vmas() will cause scheduling latency when tearing down really big vmas
on !CONFIG_PREEMPT.  That's a bit unkind to the non-preempt case, so let's do
a cond_resched() after zapping 1024 pages.



---

 25-akpm/mm/memory.c |    4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff -puN mm/memory.c~unmap_vmas-latency-improvement mm/memory.c
--- 25/mm/memory.c~unmap_vmas-latency-improvement	2004-04-03 17:43:34.206847088 -0800
+++ 25-akpm/mm/memory.c	2004-04-03 17:43:56.891398512 -0800
@@ -491,9 +491,9 @@ void unmap_page_range(struct mmu_gather 
 #define ZAP_BLOCK_SIZE	(256 * PAGE_SIZE)
 #endif
 
-/* No preempt: go for the best straight-line efficiency */
+/* No preempt: go for improved straight-line efficiency */
 #if !defined(CONFIG_PREEMPT)
-#define ZAP_BLOCK_SIZE	(~(0UL))
+#define ZAP_BLOCK_SIZE	(1024 * PAGE_SIZE)
 #endif
 
 /**

_