To check on zone balancing, split the /proc/vmstat:pgsteal stats into
pgsteal_high, pgsteal_normal and pgsteal_dma: oen for each zone.



---

 include/linux/mmzone.h     |    5 +++++
 include/linux/page-flags.h |    7 +++++--
 mm/page_alloc.c            |   11 ++++++++---
 mm/vmscan.c                |   11 ++++++++---
 4 files changed, 26 insertions(+), 8 deletions(-)

diff -puN mm/page_alloc.c~instrument-highmem-page-reclaim mm/page_alloc.c
--- 25/mm/page_alloc.c~instrument-highmem-page-reclaim	2004-02-25 03:24:06.000000000 -0800
+++ 25-akpm/mm/page_alloc.c	2004-02-25 03:24:35.000000000 -0800
@@ -1021,6 +1021,7 @@ void show_free_areas(void)
 			" high:%lukB"
 			" active:%lukB"
 			" inactive:%lukB"
+			" present:%lukB"
 			"\n",
 			zone->name,
 			K(zone->free_pages),
@@ -1028,7 +1029,8 @@ void show_free_areas(void)
 			K(zone->pages_low),
 			K(zone->pages_high),
 			K(zone->nr_active),
-			K(zone->nr_inactive)
+			K(zone->nr_inactive),
+			K(zone->present_pages)
 			);
 	}
 
@@ -1491,13 +1493,16 @@ static char *vmstat_text[] = {
 
 	"pgscan",
 	"pgrefill",
-	"pgsteal",
+	"pgsteal_high",
+	"pgsteal_normal",
+	"pgsteal_dma",
+
 	"pginodesteal",
 	"kswapd_steal",
-
 	"kswapd_inodesteal",
 	"pageoutrun",
 	"allocstall",
+
 	"pgrotated",
 };
 
diff -puN include/linux/page-flags.h~instrument-highmem-page-reclaim include/linux/page-flags.h
--- 25/include/linux/page-flags.h~instrument-highmem-page-reclaim	2004-02-25 03:24:06.000000000 -0800
+++ 25-akpm/include/linux/page-flags.h	2004-02-25 03:24:06.000000000 -0800
@@ -108,13 +108,16 @@ struct page_state {
 
 	unsigned long pgscan;		/* pages scanned by page reclaim */
 	unsigned long pgrefill;		/* inspected in refill_inactive_zone */
-	unsigned long pgsteal;		/* total pages reclaimed */
+	unsigned long pgsteal_high;	/* total highmem pages reclaimed */
+	unsigned long pgsteal_normal;	/* total ZONE_NORMAL pages reclaimed */
+	unsigned long pgsteal_dma;	/* total ZONE_DMA pages reclaimed */
+
 	unsigned long pginodesteal;	/* pages reclaimed via inode freeing */
 	unsigned long kswapd_steal;	/* pages reclaimed by kswapd */
-
 	unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
 	unsigned long pageoutrun;	/* kswapd's calls to page reclaim */
 	unsigned long allocstall;	/* direct reclaim calls */
+
 	unsigned long pgrotated;	/* pages rotated to tail of the LRU */
 } ____cacheline_aligned;
 
diff -puN mm/vmscan.c~instrument-highmem-page-reclaim mm/vmscan.c
--- 25/mm/vmscan.c~instrument-highmem-page-reclaim	2004-02-25 03:24:06.000000000 -0800
+++ 25-akpm/mm/vmscan.c	2004-02-25 03:24:06.000000000 -0800
@@ -461,9 +461,6 @@ keep:
 	list_splice(&ret_pages, page_list);
 	if (pagevec_count(&freed_pvec))
 		__pagevec_release_nonlru(&freed_pvec);
-	mod_page_state(pgsteal, ret);
-	if (current_is_kswapd())
-		mod_page_state(kswapd_steal, ret);
 	mod_page_state(pgactivate, pgactivate);
 	return ret;
 }
@@ -537,6 +534,14 @@ shrink_cache(const int nr_pages, struct 
 		mod_page_state(pgscan, nr_scan);
 		nr_freed = shrink_list(&page_list, gfp_mask,
 					&max_scan, nr_mapped);
+		if (current_is_kswapd())
+			mod_page_state(kswapd_steal, nr_freed);
+		if (is_highmem(zone))
+			mod_page_state(pgsteal_high, nr_freed);
+		else if (is_normal(zone))
+			mod_page_state(pgsteal_normal, nr_freed);
+		else
+			mod_page_state(pgsteal_dma, nr_freed);
 		ret += nr_freed;
 		if (nr_freed <= 0 && list_empty(&page_list))
 			goto done;
diff -puN include/linux/mmzone.h~instrument-highmem-page-reclaim include/linux/mmzone.h
--- 25/include/linux/mmzone.h~instrument-highmem-page-reclaim	2004-02-25 03:24:06.000000000 -0800
+++ 25-akpm/include/linux/mmzone.h	2004-02-25 03:24:06.000000000 -0800
@@ -289,6 +289,11 @@ static inline int is_highmem(struct zone
 	return (zone - zone->zone_pgdat->node_zones == ZONE_HIGHMEM);
 }
 
+static inline int is_normal(struct zone *zone)
+{
+	return (zone - zone->zone_pgdat->node_zones == ZONE_NORMAL);
+}
+
 /* These two functions are used to setup the per zone pages min values */
 struct ctl_table;
 struct file;

_