From: "Seth, Rohit" <rohit.seth@intel.com>

We recently covered a bug in mm/mmap.c on IA-64.  While unmapping a address
space, unmap_region calls free_pgtables to possibly free the pages that are
used for page tables.  Currently no distinction is made between freeing a
region that is mapped by normal pages vs the pages that are mapped by
hugepages.  Architecture specific code needs to handle cases where PTEs
corresponding to a region that is mapped by hugepages is properly getting
unmapped.  Attached please find a patch that makes the required changes in
generic part of kernel.  We will need to send a separate IA-64 patch to use
this new semantics.  Currently, so not to disturb the PPC (as that is the
only arch that had ARCH_HAS_HUGEPAGE_ONLY_RANGE defined) we are mapping back
the definition of new function hugetlb_free_pgtables to free_pgtables.


 include/asm-ppc64/page.h |    1 +
 include/linux/hugetlb.h  |    2 ++
 mm/mmap.c                |    6 +++++-
 3 files changed, 8 insertions(+), 1 deletion(-)

diff -puN include/asm-ppc64/page.h~free_pgt_generic1 include/asm-ppc64/page.h
--- 25/include/asm-ppc64/page.h~free_pgt_generic1	2003-12-22 18:07:53.000000000 -0800
+++ 25-akpm/include/asm-ppc64/page.h	2003-12-22 18:07:53.000000000 -0800
@@ -41,6 +41,7 @@
 	( ((addr > (TASK_HPAGE_BASE-len)) && (addr < TASK_HPAGE_END)) || \
 	  ((current->mm->context & CONTEXT_LOW_HPAGES) && \
 	   (addr > (TASK_HPAGE_BASE_32-len)) && (addr < TASK_HPAGE_END_32)) )
+#define hugetlb_free_pgtables free_pgtables
 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 
 #define in_hugepage_area(context, addr) \
diff -puN include/linux/hugetlb.h~free_pgt_generic1 include/linux/hugetlb.h
--- 25/include/linux/hugetlb.h~free_pgt_generic1	2003-12-22 18:07:53.000000000 -0800
+++ 25-akpm/include/linux/hugetlb.h	2003-12-22 18:07:53.000000000 -0800
@@ -39,6 +39,7 @@ mark_mm_hugetlb(struct mm_struct *mm, st
 
 #ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
 #define is_hugepage_only_range(addr, len)	0
+#define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0)
 #endif
 
 #else /* !CONFIG_HUGETLB_PAGE */
@@ -63,6 +64,7 @@ static inline int is_vm_hugetlb_page(str
 #define is_aligned_hugepage_range(addr, len)	0
 #define pmd_huge(x)	0
 #define is_hugepage_only_range(addr, len)	0
+#define hugetlb_free_pgtables(tlb, prev, start, end) do { } while (0)
 
 #ifndef HPAGE_MASK
 #define HPAGE_MASK	0		/* Keep the compiler happy */
diff -puN mm/mmap.c~free_pgt_generic1 mm/mmap.c
--- 25/mm/mmap.c~free_pgt_generic1	2003-12-22 18:07:53.000000000 -0800
+++ 25-akpm/mm/mmap.c	2003-12-22 18:07:53.000000000 -0800
@@ -1136,7 +1136,11 @@ static void unmap_region(struct mm_struc
 	tlb = tlb_gather_mmu(mm, 0);
 	unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted);
 	vm_unacct_memory(nr_accounted);
-	free_pgtables(tlb, prev, start, end);
+
+	if (is_hugepage_only_range(start, end - start))
+		hugetlb_free_pgtables(tlb, prev, start, end);
+	else
+		free_pgtables(tlb, prev, start, end);
 	tlb_finish_mmu(tlb, start, end);
 }
 

_