From: William Lee Irwin III <wli@holomorphy.com>

If one's goal is to free highmem pages, shrink_slab() is an ineffective
method of recovering them, as slab pages are all ZONE_NORMAL or ZONE_DMA. 
Hence, this "FIXME: do not do for zone highmem".  Presumably this is a
question of policy, as highmem allocations may be satisfied by reaping slab
pages and handing them back; but the FIXME says what we should do.



 mm/vmscan.c |   10 +++++-----
 1 files changed, 5 insertions(+), 5 deletions(-)

diff -puN mm/vmscan.c~dont-shrink-slab-for-highmem mm/vmscan.c
--- 25/mm/vmscan.c~dont-shrink-slab-for-highmem	2003-04-17 19:33:44.000000000 -0700
+++ 25-akpm/mm/vmscan.c	2003-04-17 19:33:44.000000000 -0700
@@ -135,11 +135,9 @@ void remove_shrinker(struct shrinker *sh
  * If the vm encounted mapped pages on the LRU it increase the pressure on
  * slab to avoid swapping.
  *
- * FIXME: do not do for zone highmem
- *
  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
  */
-static int shrink_slab(long scanned,  unsigned int gfp_mask)
+static int shrink_slab(long scanned, unsigned int gfp_mask)
 {
 	struct shrinker *shrinker;
 	long pages;
@@ -835,7 +833,8 @@ int try_to_free_pages(struct zone *class
 
 		/* Take a nap, wait for some writeback to complete */
 		blk_congestion_wait(WRITE, HZ/10);
-		shrink_slab(total_scanned, gfp_mask);
+		if (classzone - classzone->zone_pgdat->node_zones < ZONE_HIGHMEM)
+			shrink_slab(total_scanned, gfp_mask);
 	}
 	if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY))
 		out_of_memory();
@@ -895,7 +894,8 @@ static int balance_pgdat(pg_data_t *pgda
 				max_scan = SWAP_CLUSTER_MAX;
 			to_free -= shrink_zone(zone, max_scan, GFP_KERNEL,
 					to_reclaim, &nr_mapped, ps, priority);
-			shrink_slab(max_scan + nr_mapped, GFP_KERNEL);
+			if (i < ZONE_HIGHMEM)
+				shrink_slab(max_scan + nr_mapped, GFP_KERNEL);
 			if (zone->all_unreclaimable)
 				continue;
 			if (zone->pages_scanned > zone->present_pages * 2)

_