From: Martin Hicks <mort@sgi.com>

When early zone reclaim is turned on the LRU is scanned more frequently when a
zone is low on memory.  This limits when the zone reclaim can be called by
skipping the scan if another thread (either via kswapd or sync reclaim) is
already reclaiming from the zone.

Signed-off-by: Martin Hicks <mort@sgi.com> 
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 include/linux/mmzone.h |    2 ++
 mm/page_alloc.c        |    1 +
 mm/vmscan.c            |   10 ++++++++++
 3 files changed, 13 insertions(+)

diff -puN include/linux/mmzone.h~vm-rate-limit-early-reclaim include/linux/mmzone.h
--- 25/include/linux/mmzone.h~vm-rate-limit-early-reclaim	2005-06-05 22:07:21.000000000 -0700
+++ 25-akpm/include/linux/mmzone.h	2005-06-05 22:07:21.000000000 -0700
@@ -149,6 +149,8 @@ struct zone {
 	 * as it fails a watermark_ok() in __alloc_pages?
 	 */
 	int			reclaim_pages;
+	/* A count of how many reclaimers are scanning this zone */
+	atomic_t		reclaim_in_progress;
 
 	/*
 	 * prev_priority holds the scanning priority for this zone.  It is
diff -puN mm/page_alloc.c~vm-rate-limit-early-reclaim mm/page_alloc.c
--- 25/mm/page_alloc.c~vm-rate-limit-early-reclaim	2005-06-05 22:07:21.000000000 -0700
+++ 25-akpm/mm/page_alloc.c	2005-06-05 22:07:21.000000000 -0700
@@ -1757,6 +1757,7 @@ static void __init free_area_init_core(s
 		zone->nr_scan_inactive = 0;
 		zone->nr_active = 0;
 		zone->nr_inactive = 0;
+		atomic_set(&zone->reclaim_in_progress, -1);
 		if (!size)
 			continue;
 
diff -puN mm/vmscan.c~vm-rate-limit-early-reclaim mm/vmscan.c
--- 25/mm/vmscan.c~vm-rate-limit-early-reclaim	2005-06-05 22:07:21.000000000 -0700
+++ 25-akpm/mm/vmscan.c	2005-06-05 22:07:21.000000000 -0700
@@ -903,7 +903,9 @@ shrink_caches(struct zone **zones, struc
 		if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY)
 			continue;	/* Let kswapd poll it */
 
+		atomic_inc(&zone->reclaim_in_progress);
 		shrink_zone(zone, sc);
+		atomic_dec(&zone->reclaim_in_progress);
 	}
 }
  
@@ -1114,7 +1116,9 @@ scan:
 			sc.nr_reclaimed = 0;
 			sc.priority = priority;
 			sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX;
+			atomic_inc(&zone->reclaim_in_progress);
 			shrink_zone(zone, &sc);
+			atomic_dec(&zone->reclaim_in_progress);
 			reclaim_state->reclaimed_slab = 0;
 			nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
 						lru_pages);
@@ -1357,9 +1361,15 @@ int zone_reclaim(struct zone *zone, unsi
 	else
 		sc.swap_cluster_max = SWAP_CLUSTER_MAX;
 
+	/* Don't reclaim the zone if there are other reclaimers active */
+	if (!atomic_inc_and_test(&zone->reclaim_in_progress))
+		goto out;
+
 	shrink_zone(zone, &sc);
 	total_reclaimed = sc.nr_reclaimed;
 
+ out:
+	atomic_dec(&zone->reclaim_in_progress);
 	return total_reclaimed;
 }
 
_