From: Hugh Dickins <hugh@veritas.com>

Some arches refer to page->mapping for their dcache flushing: use
page_mapping(page) for safety, to avoid confusion on anon pages, which will
store a different pointer there - though in most cases flush_dcache_page is
being applied to pagecache pages.

arm has a useful mapping_mapped macro: move that to generic, and add
mapping_writably_mapped, to avoid explicit list_empty checks on i_mmap and
i_mmap_shared in several places.

Very tempted to add page_mapped(page) tests, perhaps along with the
mapping_writably_mapped tests in do_generic_mapping_read and
do_shmem_file_read, to cut down on wasted flush_dcache effort; but the
serialization is not obvious, too unsafe to do in a hurry.


---

 25-akpm/arch/arm/mm/fault-armv.c        |    4 ++--
 25-akpm/arch/mips/mm/cache.c            |    9 +++------
 25-akpm/arch/parisc/kernel/cache.c      |    4 ++--
 25-akpm/arch/sparc64/kernel/smp.c       |    8 ++++----
 25-akpm/arch/sparc64/mm/init.c          |   14 ++++++--------
 25-akpm/fs/locks.c                      |   22 ++++++++--------------
 25-akpm/fs/xfs/linux/xfs_vnode.h        |    4 +---
 25-akpm/include/asm-arm/cacheflush.h    |   12 ++++--------
 25-akpm/include/asm-parisc/cacheflush.h |    3 +--
 25-akpm/include/asm-sh/pgalloc.h        |    4 ++--
 25-akpm/include/linux/fs.h              |   20 ++++++++++++++++++++
 25-akpm/mm/filemap.c                    |    2 +-
 25-akpm/mm/shmem.c                      |    2 +-
 25-akpm/mm/vmscan.c                     |    9 ++-------
 14 files changed, 57 insertions(+), 60 deletions(-)

diff -puN arch/arm/mm/fault-armv.c~rmap-3-arches--mapping_mapped arch/arm/mm/fault-armv.c
--- 25/arch/arm/mm/fault-armv.c~rmap-3-arches--mapping_mapped	Thu Apr  8 16:38:12 2004
+++ 25-akpm/arch/arm/mm/fault-armv.c	Thu Apr  8 16:38:12 2004
@@ -191,7 +191,7 @@ void __flush_dcache_page(struct page *pa
 
 	__cpuc_flush_dcache_page(page_address(page));
 
-	if (!page->mapping)
+	if (!page_mapping(page))
 		return;
 
 	/*
@@ -292,7 +292,7 @@ void update_mmu_cache(struct vm_area_str
 	if (!pfn_valid(pfn))
 		return;
 	page = pfn_to_page(pfn);
-	if (page->mapping) {
+	if (page_mapping(page)) {
 		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
 
 		if (dirty)
diff -puN arch/mips/mm/cache.c~rmap-3-arches--mapping_mapped arch/mips/mm/cache.c
--- 25/arch/mips/mm/cache.c~rmap-3-arches--mapping_mapped	Thu Apr  8 16:38:12 2004
+++ 25-akpm/arch/mips/mm/cache.c	Thu Apr  8 16:38:12 2004
@@ -57,16 +57,13 @@ void flush_dcache_page(struct page *page
 {
 	unsigned long addr;
 
-	if (page->mapping &&
-	    list_empty(&page->mapping->i_mmap) &&
-	    list_empty(&page->mapping->i_mmap_shared)) {
+	if (page_mapping(page) && !mapping_mapped(page->mapping)) {
 		SetPageDcacheDirty(page);
-
 		return;
 	}
 
 	/*
-	 * We could delay the flush for the !page->mapping case too.  But that
+	 * We could delay the flush for the !page_mapping case too.  But that
 	 * case is for exec env/arg pages and those are %99 certainly going to
 	 * get faulted into the tlb (and thus flushed) anyways.
 	 */
@@ -81,7 +78,7 @@ void __update_cache(struct vm_area_struc
 	unsigned long pfn, addr;
 
 	pfn = pte_pfn(pte);
-	if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page->mapping) &&
+	if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page_mapping(page)) &&
 	    Page_dcache_dirty(page)) {
 		if (pages_do_alias((unsigned long)page_address(page),
 		                   address & PAGE_MASK)) {
diff -puN arch/parisc/kernel/cache.c~rmap-3-arches--mapping_mapped arch/parisc/kernel/cache.c
--- 25/arch/parisc/kernel/cache.c~rmap-3-arches--mapping_mapped	Thu Apr  8 16:38:12 2004
+++ 25-akpm/arch/parisc/kernel/cache.c	Thu Apr  8 16:38:12 2004
@@ -68,7 +68,7 @@ update_mmu_cache(struct vm_area_struct *
 {
 	struct page *page = pte_page(pte);
 
-	if (VALID_PAGE(page) && page->mapping &&
+	if (VALID_PAGE(page) && page_mapping(page) &&
 	    test_bit(PG_dcache_dirty, &page->flags)) {
 
 		flush_kernel_dcache_page(page_address(page));
@@ -234,7 +234,7 @@ void __flush_dcache_page(struct page *pa
 
 	flush_kernel_dcache_page(page_address(page));
 
-	if (!page->mapping)
+	if (!page_mapping(page))
 		return;
 	/* check shared list first if it's not empty...it's usually
 	 * the shortest */
diff -puN arch/sparc64/kernel/smp.c~rmap-3-arches--mapping_mapped arch/sparc64/kernel/smp.c
--- 25/arch/sparc64/kernel/smp.c~rmap-3-arches--mapping_mapped	Thu Apr  8 16:38:12 2004
+++ 25-akpm/arch/sparc64/kernel/smp.c	Thu Apr  8 16:38:12 2004
@@ -671,9 +671,9 @@ static __inline__ void __local_flush_dca
 #if (L1DCACHE_SIZE > PAGE_SIZE)
 	__flush_dcache_page(page->virtual,
 			    ((tlb_type == spitfire) &&
-			     page->mapping != NULL));
+			     page_mapping(page) != NULL));
 #else
-	if (page->mapping != NULL &&
+	if (page_mapping(page) != NULL &&
 	    tlb_type == spitfire)
 		__flush_icache_page(__pa(page->virtual));
 #endif
@@ -694,7 +694,7 @@ void smp_flush_dcache_page_impl(struct p
 		if (tlb_type == spitfire) {
 			data0 =
 				((u64)&xcall_flush_dcache_page_spitfire);
-			if (page->mapping != NULL)
+			if (page_mapping(page) != NULL)
 				data0 |= ((u64)1 << 32);
 			spitfire_xcall_deliver(data0,
 					       __pa(page->virtual),
@@ -727,7 +727,7 @@ void flush_dcache_page_all(struct mm_str
 		goto flush_self;
 	if (tlb_type == spitfire) {
 		data0 = ((u64)&xcall_flush_dcache_page_spitfire);
-		if (page->mapping != NULL)
+		if (page_mapping(page) != NULL)
 			data0 |= ((u64)1 << 32);
 		spitfire_xcall_deliver(data0,
 				       __pa(page->virtual),
diff -puN arch/sparc64/mm/init.c~rmap-3-arches--mapping_mapped arch/sparc64/mm/init.c
--- 25/arch/sparc64/mm/init.c~rmap-3-arches--mapping_mapped	Thu Apr  8 16:38:12 2004
+++ 25-akpm/arch/sparc64/mm/init.c	Thu Apr  8 16:38:12 2004
@@ -139,9 +139,9 @@ __inline__ void flush_dcache_page_impl(s
 #if (L1DCACHE_SIZE > PAGE_SIZE)
 	__flush_dcache_page(page->virtual,
 			    ((tlb_type == spitfire) &&
-			     page->mapping != NULL));
+			     page_mapping(page) != NULL));
 #else
-	if (page->mapping != NULL &&
+	if (page_mapping(page) != NULL &&
 	    tlb_type == spitfire)
 		__flush_icache_page(__pa(page->virtual));
 #endif
@@ -203,7 +203,7 @@ void update_mmu_cache(struct vm_area_str
 
 	pfn = pte_pfn(pte);
 	if (pfn_valid(pfn) &&
-	    (page = pfn_to_page(pfn), page->mapping) &&
+	    (page = pfn_to_page(pfn), page_mapping(page)) &&
 	    ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
 		int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
 
@@ -227,9 +227,7 @@ void flush_dcache_page(struct page *page
 	int dirty = test_bit(PG_dcache_dirty, &page->flags);
 	int dirty_cpu = dcache_dirty_cpu(page);
 
-	if (page->mapping &&
-	    list_empty(&page->mapping->i_mmap) &&
-	    list_empty(&page->mapping->i_mmap_shared)) {
+	if (page_mapping(page) && !mapping_mapped(page->mapping)) {
 		if (dirty) {
 			if (dirty_cpu == smp_processor_id())
 				return;
@@ -237,7 +235,7 @@ void flush_dcache_page(struct page *page
 		}
 		set_dcache_dirty(page);
 	} else {
-		/* We could delay the flush for the !page->mapping
+		/* We could delay the flush for the !page_mapping
 		 * case too.  But that case is for exec env/arg
 		 * pages and those are %99 certainly going to get
 		 * faulted into the tlb (and thus flushed) anyways.
@@ -279,7 +277,7 @@ static inline void flush_cache_pte_range
 			if (!pfn_valid(pfn))
 				continue;
 			page = pfn_to_page(pfn);
-			if (PageReserved(page) || !page->mapping)
+			if (PageReserved(page) || !page_mapping(page))
 				continue;
 			pgaddr = (unsigned long) page_address(page);
 			uaddr = address + offset;
diff -puN fs/locks.c~rmap-3-arches--mapping_mapped fs/locks.c
--- 25/fs/locks.c~rmap-3-arches--mapping_mapped	Thu Apr  8 16:38:12 2004
+++ 25-akpm/fs/locks.c	Thu Apr  8 16:38:12 2004
@@ -1453,13 +1453,10 @@ int fcntl_setlk(struct file *filp, unsig
 	 * and shared.
 	 */
 	if (IS_MANDLOCK(inode) &&
-	    (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
-		struct address_space *mapping = filp->f_mapping;
-
-		if (!list_empty(&mapping->i_mmap_shared)) {
-			error = -EAGAIN;
-			goto out;
-		}
+	    (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
+	    mapping_writably_mapped(filp->f_mapping)) {
+		error = -EAGAIN;
+		goto out;
 	}
 
 	error = flock_to_posix_lock(filp, file_lock, &flock);
@@ -1591,13 +1588,10 @@ int fcntl_setlk64(struct file *filp, uns
 	 * and shared.
 	 */
 	if (IS_MANDLOCK(inode) &&
-	    (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) {
-		struct address_space *mapping = filp->f_mapping;
-
-		if (!list_empty(&mapping->i_mmap_shared)) {
-			error = -EAGAIN;
-			goto out;
-		}
+	    (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID &&
+	    mapping_writably_mapped(filp->f_mapping)) {
+		error = -EAGAIN;
+		goto out;
 	}
 
 	error = flock64_to_posix_lock(filp, file_lock, &flock);
diff -puN fs/xfs/linux/xfs_vnode.h~rmap-3-arches--mapping_mapped fs/xfs/linux/xfs_vnode.h
--- 25/fs/xfs/linux/xfs_vnode.h~rmap-3-arches--mapping_mapped	Thu Apr  8 16:38:12 2004
+++ 25-akpm/fs/xfs/linux/xfs_vnode.h	Thu Apr  8 16:38:12 2004
@@ -596,9 +596,7 @@ static __inline__ void vn_flagclr(struct
 /*
  * Some useful predicates.
  */
-#define VN_MAPPED(vp)	\
-	(!list_empty(&(LINVFS_GET_IP(vp)->i_mapping->i_mmap)) || \
-	(!list_empty(&(LINVFS_GET_IP(vp)->i_mapping->i_mmap_shared))))
+#define VN_MAPPED(vp)	mapping_mapped(LINVFS_GET_IP(vp)->i_mapping)
 #define VN_CACHED(vp)	(LINVFS_GET_IP(vp)->i_mapping->nrpages)
 #define VN_DIRTY(vp)	mapping_tagged(LINVFS_GET_IP(vp)->i_mapping, \
 					PAGECACHE_TAG_DIRTY)
diff -puN include/asm-arm/cacheflush.h~rmap-3-arches--mapping_mapped include/asm-arm/cacheflush.h
--- 25/include/asm-arm/cacheflush.h~rmap-3-arches--mapping_mapped	Thu Apr  8 16:38:12 2004
+++ 25-akpm/include/asm-arm/cacheflush.h	Thu Apr  8 16:38:12 2004
@@ -283,23 +283,19 @@ flush_cache_page(struct vm_area_struct *
  * flush_dcache_page is used when the kernel has written to the page
  * cache page at virtual address page->virtual.
  *
- * If this page isn't mapped (ie, page->mapping = NULL), or it has
- * userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared)
- * then we _must_ always clean + invalidate the dcache entries associated
- * with the kernel mapping.
+ * If this page isn't mapped (ie, page_mapping == NULL), or it might
+ * have userspace mappings, then we _must_ always clean + invalidate
+ * the dcache entries associated with the kernel mapping.
  *
  * Otherwise we can defer the operation, and clean the cache when we are
  * about to change to user space.  This is the same method as used on SPARC64.
  * See update_mmu_cache for the user space part.
  */
-#define mapping_mapped(map)	(!list_empty(&(map)->i_mmap) || \
-				 !list_empty(&(map)->i_mmap_shared))
-
 extern void __flush_dcache_page(struct page *);
 
 static inline void flush_dcache_page(struct page *page)
 {
-	if (page->mapping && !mapping_mapped(page->mapping))
+	if (page_mapping(page) && !mapping_mapped(page->mapping))
 		set_bit(PG_dcache_dirty, &page->flags);
 	else
 		__flush_dcache_page(page);
diff -puN include/asm-parisc/cacheflush.h~rmap-3-arches--mapping_mapped include/asm-parisc/cacheflush.h
--- 25/include/asm-parisc/cacheflush.h~rmap-3-arches--mapping_mapped	Thu Apr  8 16:38:12 2004
+++ 25-akpm/include/asm-parisc/cacheflush.h	Thu Apr  8 16:38:12 2004
@@ -69,8 +69,7 @@ extern void __flush_dcache_page(struct p
 
 static inline void flush_dcache_page(struct page *page)
 {
-	if (page->mapping && list_empty(&page->mapping->i_mmap) &&
-			list_empty(&page->mapping->i_mmap_shared)) {
+	if (page_mapping(page) && !mapping_mapped(page->mapping)) {
 		set_bit(PG_dcache_dirty, &page->flags);
 	} else {
 		__flush_dcache_page(page);
diff -puN include/asm-sh/pgalloc.h~rmap-3-arches--mapping_mapped include/asm-sh/pgalloc.h
--- 25/include/asm-sh/pgalloc.h~rmap-3-arches--mapping_mapped	Thu Apr  8 16:38:12 2004
+++ 25-akpm/include/asm-sh/pgalloc.h	Thu Apr  8 16:38:12 2004
@@ -101,8 +101,8 @@ static inline pte_t ptep_get_and_clear(p
 		unsigned long pfn = pte_pfn(pte);
 		if (pfn_valid(pfn)) {
 			page = pfn_to_page(pfn);
-			if (!page->mapping
-			    || list_empty(&page->mapping->i_mmap_shared))
+			if (!page_mapping(page) ||
+			    !mapping_writably_mapped(page->mapping))
 				__clear_bit(PG_mapped, &page->flags);
 		}
 	}
diff -puN include/linux/fs.h~rmap-3-arches--mapping_mapped include/linux/fs.h
--- 25/include/linux/fs.h~rmap-3-arches--mapping_mapped	Thu Apr  8 16:38:12 2004
+++ 25-akpm/include/linux/fs.h	Thu Apr  8 16:38:12 2004
@@ -374,6 +374,26 @@ struct block_device {
 int mapping_tagged(struct address_space *mapping, int tag);
 
 /*
+ * Might pages of this file be mapped into userspace?
+ */
+static inline int mapping_mapped(struct address_space *mapping)
+{
+	return	!list_empty(&mapping->i_mmap) ||
+		!list_empty(&mapping->i_mmap_shared);
+}
+
+/*
+ * Might pages of this file have been modified in userspace?
+ * Note that i_mmap_shared holds all the VM_SHARED vmas: do_mmap_pgoff
+ * marks vma as VM_SHARED if it is shared, and the file was opened for
+ * writing i.e. vma may be mprotected writable even if now readonly.
+ */
+static inline int mapping_writably_mapped(struct address_space *mapping)
+{
+	return	!list_empty(&mapping->i_mmap_shared);
+}
+
+/*
  * Use sequence counter to get consistent i_size on 32-bit processors.
  */
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff -puN mm/filemap.c~rmap-3-arches--mapping_mapped mm/filemap.c
--- 25/mm/filemap.c~rmap-3-arches--mapping_mapped	Thu Apr  8 16:38:12 2004
+++ 25-akpm/mm/filemap.c	Thu Apr  8 16:38:12 2004
@@ -660,7 +660,7 @@ page_ok:
 		 * virtual addresses, take care about potential aliasing
 		 * before reading the page on the kernel side.
 		 */
-		if (!list_empty(&mapping->i_mmap_shared))
+		if (mapping_writably_mapped(mapping))
 			flush_dcache_page(page);
 
 		/*
diff -puN mm/shmem.c~rmap-3-arches--mapping_mapped mm/shmem.c
--- 25/mm/shmem.c~rmap-3-arches--mapping_mapped	Thu Apr  8 16:38:12 2004
+++ 25-akpm/mm/shmem.c	Thu Apr  8 16:38:12 2004
@@ -1340,7 +1340,7 @@ static void do_shmem_file_read(struct fi
 			 * virtual addresses, take care about potential aliasing
 			 * before reading the page on the kernel side.
 			 */
-			if (!list_empty(&mapping->i_mmap_shared))
+			if (mapping_writably_mapped(mapping))
 				flush_dcache_page(page);
 			/*
 			 * Mark the page accessed if we read the beginning.
diff -puN mm/vmscan.c~rmap-3-arches--mapping_mapped mm/vmscan.c
--- 25/mm/vmscan.c~rmap-3-arches--mapping_mapped	Thu Apr  8 16:38:12 2004
+++ 25-akpm/mm/vmscan.c	Thu Apr  8 16:38:12 2004
@@ -190,13 +190,8 @@ static inline int page_mapping_inuse(str
 	if (!mapping)
 		return 0;
 
-	/* File is mmap'd by somebody. */
-	if (!list_empty(&mapping->i_mmap))
-		return 1;
-	if (!list_empty(&mapping->i_mmap_shared))
-		return 1;
-
-	return 0;
+	/* File is mmap'd by somebody? */
+	return mapping_mapped(mapping);
 }
 
 static inline int is_page_cache_freeable(struct page *page)

_