From: "Chen, Kenneth W" <kenneth.w.chen@intel.com>

It adds per node huge page stats in sysfs.

Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/drivers/base/node.c     |    6 ------
 25-akpm/include/linux/hugetlb.h |    2 ++
 25-akpm/mm/hugetlb.c            |   36 +++++++++++++++++++++++++++---------
 3 files changed, 29 insertions(+), 15 deletions(-)

diff -puN drivers/base/node.c~per-node-huge-page-stats-in-sysfs drivers/base/node.c
--- 25/drivers/base/node.c~per-node-huge-page-stats-in-sysfs	Wed Jun 23 16:19:36 2004
+++ 25-akpm/drivers/base/node.c	Wed Jun 23 16:19:36 2004
@@ -31,12 +31,6 @@ static ssize_t node_read_cpumap(struct s
 
 static SYSDEV_ATTR(cpumap, S_IRUGO, node_read_cpumap, NULL);
 
-/* Can be overwritten by architecture specific code. */
-int __attribute__((weak)) hugetlb_report_node_meminfo(int node, char *buf)
-{
-	return 0;
-}
-
 #define K(x) ((x) << (PAGE_SHIFT - 10))
 static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
 {
diff -puN include/linux/hugetlb.h~per-node-huge-page-stats-in-sysfs include/linux/hugetlb.h
--- 25/include/linux/hugetlb.h~per-node-huge-page-stats-in-sysfs	Wed Jun 23 16:19:36 2004
+++ 25-akpm/include/linux/hugetlb.h	Wed Jun 23 16:19:36 2004
@@ -19,6 +19,7 @@ void zap_hugepage_range(struct vm_area_s
 void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
 int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
 int hugetlb_report_meminfo(char *);
+int hugetlb_report_node_meminfo(int, char *);
 int is_hugepage_mem_enough(size_t);
 unsigned long hugetlb_total_pages(void);
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
@@ -72,6 +73,7 @@ static inline unsigned long hugetlb_tota
 #define unmap_hugepage_range(vma, start, end)	BUG()
 #define is_hugepage_mem_enough(size)		0
 #define hugetlb_report_meminfo(buf)		0
+#define hugetlb_report_node_meminfo(n, buf)	0
 #define mark_mm_hugetlb(mm, vma)		do { } while (0)
 #define follow_huge_pmd(mm, addr, pmd, write)	0
 #define is_aligned_hugepage_range(addr, len)	0
diff -puN mm/hugetlb.c~per-node-huge-page-stats-in-sysfs mm/hugetlb.c
--- 25/mm/hugetlb.c~per-node-huge-page-stats-in-sysfs	Wed Jun 23 16:19:36 2004
+++ 25-akpm/mm/hugetlb.c	Wed Jun 23 16:19:36 2004
@@ -15,12 +15,16 @@ const unsigned long hugetlb_zero = 0, hu
 static unsigned long nr_huge_pages, free_huge_pages;
 unsigned long max_huge_pages;
 static struct list_head hugepage_freelists[MAX_NUMNODES];
+static unsigned int nr_huge_pages_node[MAX_NUMNODES];
+static unsigned int free_huge_pages_node[MAX_NUMNODES];
 static spinlock_t hugetlb_lock = SPIN_LOCK_UNLOCKED;
 
 static void enqueue_huge_page(struct page *page)
 {
-	list_add(&page->lru,
-		 &hugepage_freelists[page_zone(page)->zone_pgdat->node_id]);
+	int nid = page_zone(page)->zone_pgdat->node_id;
+	list_add(&page->lru, &hugepage_freelists[nid]);
+	free_huge_pages++;
+	free_huge_pages_node[nid]++;
 }
 
 static struct page *dequeue_huge_page(void)
@@ -38,6 +42,8 @@ static struct page *dequeue_huge_page(vo
 		page = list_entry(hugepage_freelists[nid].next,
 				  struct page, lru);
 		list_del(&page->lru);
+		free_huge_pages--;
+		free_huge_pages_node[nid]--;
 	}
 	return page;
 }
@@ -49,6 +55,10 @@ static struct page *alloc_fresh_huge_pag
 	page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP,
 					HUGETLB_PAGE_ORDER);
 	nid = (nid + 1) % numnodes;
+	if (page) {
+		nr_huge_pages++;
+		nr_huge_pages_node[page_zone(page)->zone_pgdat->node_id]++;
+	}
 	return page;
 }
 
@@ -61,7 +71,6 @@ void free_huge_page(struct page *page)
 
 	spin_lock(&hugetlb_lock);
 	enqueue_huge_page(page);
-	free_huge_pages++;
 	spin_unlock(&hugetlb_lock);
 }
 
@@ -76,7 +85,6 @@ struct page *alloc_huge_page(void)
 		spin_unlock(&hugetlb_lock);
 		return NULL;
 	}
-	free_huge_pages--;
 	spin_unlock(&hugetlb_lock);
 	set_page_count(page, 1);
 	page[1].mapping = (void *)free_huge_page;
@@ -119,6 +127,7 @@ static void update_and_free_page(struct 
 {
 	int i;
 	nr_huge_pages--;
+	nr_huge_pages_node[page_zone(page)->zone_pgdat->node_id]--;
 	for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
 				1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
@@ -132,7 +141,7 @@ static void update_and_free_page(struct 
 #ifdef CONFIG_HIGHMEM
 static void try_to_free_low(unsigned long count)
 {
-	int i;
+	int i, nid;
 	for (i = 0; i < MAX_NUMNODES; ++i) {
 		struct page *page, *next;
 		list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
@@ -140,7 +149,9 @@ static void try_to_free_low(unsigned lon
 				continue;
 			list_del(&page->lru);
 			update_and_free_page(page);
-			--free_huge_pages;
+			nid = page_zone(page)->zone_pgdat->node_id;
+			free_huge_pages--;
+			free_huge_pages_node[nid]--;
 			if (count >= nr_huge_pages)
 				return;
 		}
@@ -160,8 +171,6 @@ static unsigned long set_max_huge_pages(
 			return nr_huge_pages;
 		spin_lock(&hugetlb_lock);
 		enqueue_huge_page(page);
-		free_huge_pages++;
-		nr_huge_pages++;
 		spin_unlock(&hugetlb_lock);
 	}
 	if (count >= nr_huge_pages)
@@ -169,7 +178,7 @@ static unsigned long set_max_huge_pages(
 
 	spin_lock(&hugetlb_lock);
 	try_to_free_low(count);
-	for (; count < nr_huge_pages; --free_huge_pages) {
+	while (count < nr_huge_pages) {
 		struct page *page = dequeue_huge_page();
 		if (!page)
 			break;
@@ -201,6 +210,15 @@ int hugetlb_report_meminfo(char *buf)
 			HPAGE_SIZE/1024);
 }
 
+int hugetlb_report_node_meminfo(int nid, char *buf)
+{
+	return sprintf(buf,
+		"Node %d HugePages_Total: %5u\n"
+		"Node %d HugePages_Free:  %5u\n",
+		nid, nr_huge_pages_node[nid],
+		nid, free_huge_pages_node[nid]);
+}
+
 int is_hugepage_mem_enough(size_t size)
 {
 	return (size + ~HPAGE_MASK)/HPAGE_SIZE <= free_huge_pages;
_