From: Benjamin Herrenschmidt <benh@kernel.crashing.org>

The patch below fixes a 2.6 mm problem.  Without this patch, zeromapped
pages are not flushed properly when they are swapped out.

What happens is that the page->index field is zero for page table pages
corresponding to the zeromapped range.  This causes ptep_to_address() to
return an incorrect virtual address with the result that PTEs are never
invalidated at swap-out...

The fix below mirrors the remap_pmd_range() case.



 mm/memory.c |    7 ++++---
 1 files changed, 4 insertions(+), 3 deletions(-)

diff -puN mm/memory.c~zeromap_pmd_range-fix mm/memory.c
--- 25/mm/memory.c~zeromap_pmd_range-fix	2003-08-23 14:01:59.000000000 -0700
+++ 25-akpm/mm/memory.c	2003-08-23 14:01:59.000000000 -0700
@@ -810,17 +810,18 @@ static void zeromap_pte_range(pte_t * pt
 static inline int zeromap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address,
                                     unsigned long size, pgprot_t prot)
 {
-	unsigned long end;
+	unsigned long base, end;
 
+	base = address & PGDIR_MASK;
 	address &= ~PGDIR_MASK;
 	end = address + size;
 	if (end > PGDIR_SIZE)
 		end = PGDIR_SIZE;
 	do {
-		pte_t * pte = pte_alloc_map(mm, pmd, address);
+		pte_t * pte = pte_alloc_map(mm, pmd, base + address);
 		if (!pte)
 			return -ENOMEM;
-		zeromap_pte_range(pte, address, end - address, prot);
+		zeromap_pte_range(pte, base + address, end - address, prot);
 		pte_unmap(pte);
 		address = (address + PMD_SIZE) & PMD_MASK;
 		pmd++;

_