From: Nick Piggin <piggin@cyberone.com.au>

Pad the scan pages to prevent accidental inter-node traffic.


---

 mm/page_alloc.c |   12 +++++++++---
 1 files changed, 9 insertions(+), 3 deletions(-)

diff -puN mm/page_alloc.c~vm-dont-rotate-active-list-padding mm/page_alloc.c
--- 25/mm/page_alloc.c~vm-dont-rotate-active-list-padding	2004-02-18 20:54:55.000000000 -0800
+++ 25-akpm/mm/page_alloc.c	2004-02-18 20:54:55.000000000 -0800
@@ -1225,8 +1225,14 @@ void __init memmap_init_zone(struct page
 	memmap_init_zone((start), (size), (nid), (zone), (start_pfn))
 #endif
 
-/* dummy pages used to scan active lists */
-static struct page scan_pages[MAX_NUMNODES][MAX_NR_ZONES];
+/*
+ * Dummy pages used to scan active lists. It would be cleaner if these
+ * could be part of struct zone directly, but include dependencies currently
+ * prevent that.
+ */
+static struct {
+	struct page zone[MAX_NR_ZONES];
+} ____cacheline_aligned scan_pages[MAX_NUMNODES];
 
 /*
  * Set up the zone data structures:
@@ -1307,7 +1313,7 @@ static void __init free_area_init_core(s
 		zone->nr_inactive = 0;
 
 		/* initialize dummy page used for scanning */
-		scan_page = &scan_pages[nid][j];
+		scan_page = &(scan_pages[nid].zone[j]);
 		zone->scan_page = scan_page;
 		memset(scan_page, 0, sizeof *scan_page);
 		scan_page->flags =

_