From: Hugh Dickins <hugh@veritas.com>

One of the callers of flush_dcache_page is do_generic_mapping_read, where
file is read without i_sem and without page lock: concurrent truncation may
at any moment remove page from cache, NULLing ->mapping, making
flush_dcache_page liable to oops.  Put result of page_mapping in a local
variable and apply mapping_mapped to that (if we were to check for NULL
within mapping_mapped, it's unclear whether to say yes or no).

parisc and arm do have other locking unsafety in their i_mmap(_shared)
searching, but that's a larger issue to be dealt with down the line.


---

 25-akpm/arch/arm/mm/fault-armv.c        |   11 ++++++++---
 25-akpm/arch/mips/mm/cache.c            |    3 ++-
 25-akpm/arch/parisc/kernel/cache.c      |    7 ++++---
 25-akpm/arch/sparc64/mm/init.c          |    3 ++-
 25-akpm/include/asm-arm/cacheflush.h    |    4 +++-
 25-akpm/include/asm-parisc/cacheflush.h |    4 +++-
 25-akpm/include/asm-sh/pgalloc.h        |    7 +++----
 7 files changed, 25 insertions(+), 14 deletions(-)

diff -puN arch/arm/mm/fault-armv.c~rmap-4-flush_dcache-revisited arch/arm/mm/fault-armv.c
--- 25/arch/arm/mm/fault-armv.c~rmap-4-flush_dcache-revisited	2004-04-12 21:08:28.830894920 -0700
+++ 25-akpm/arch/arm/mm/fault-armv.c	2004-04-12 21:09:34.326938008 -0700
@@ -186,19 +186,20 @@ no_pmd:
 
 void __flush_dcache_page(struct page *page)
 {
+	struct address_space *mapping = page_mapping(page);
 	struct mm_struct *mm = current->active_mm;
 	struct list_head *l;
 
 	__cpuc_flush_dcache_page(page_address(page));
 
-	if (!page_mapping(page))
+	if (!mapping)
 		return;
 
 	/*
 	 * With a VIVT cache, we need to also write back
 	 * and invalidate any user data.
 	 */
-	list_for_each(l, &page->mapping->i_mmap_shared) {
+	list_for_each(l, &mapping->i_mmap_shared) {
 		struct vm_area_struct *mpnt;
 		unsigned long off;
 
@@ -224,11 +225,15 @@ void __flush_dcache_page(struct page *pa
 static void
 make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty)
 {
+	struct address_space *mapping = page_mapping(page);
 	struct list_head *l;
 	struct mm_struct *mm = vma->vm_mm;
 	unsigned long pgoff;
 	int aliases = 0;
 
+	if (!mapping)
+		return;
+
 	pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
 
 	/*
@@ -236,7 +241,7 @@ make_coherent(struct vm_area_struct *vma
 	 * space, then we need to handle them specially to maintain
 	 * cache coherency.
 	 */
-	list_for_each(l, &page->mapping->i_mmap_shared) {
+	list_for_each(l, &mapping->i_mmap_shared) {
 		struct vm_area_struct *mpnt;
 		unsigned long off;
 
diff -puN arch/mips/mm/cache.c~rmap-4-flush_dcache-revisited arch/mips/mm/cache.c
--- 25/arch/mips/mm/cache.c~rmap-4-flush_dcache-revisited	2004-04-12 21:08:28.831894768 -0700
+++ 25-akpm/arch/mips/mm/cache.c	2004-04-12 21:08:28.851891728 -0700
@@ -55,9 +55,10 @@ asmlinkage int sys_cacheflush(void *addr
 
 void flush_dcache_page(struct page *page)
 {
+	struct address_space *mapping = page_mapping(page);
 	unsigned long addr;
 
-	if (page_mapping(page) && !mapping_mapped(page->mapping)) {
+	if (mapping && !mapping_mapped(mapping)) {
 		SetPageDcacheDirty(page);
 		return;
 	}
diff -puN arch/parisc/kernel/cache.c~rmap-4-flush_dcache-revisited arch/parisc/kernel/cache.c
--- 25/arch/parisc/kernel/cache.c~rmap-4-flush_dcache-revisited	2004-04-12 21:08:28.832894616 -0700
+++ 25-akpm/arch/parisc/kernel/cache.c	2004-04-12 21:08:28.851891728 -0700
@@ -229,16 +229,17 @@ void disable_sr_hashing(void)
 
 void __flush_dcache_page(struct page *page)
 {
+	struct address_space *mapping = page_mapping(page);
 	struct mm_struct *mm = current->active_mm;
 	struct list_head *l;
 
 	flush_kernel_dcache_page(page_address(page));
 
-	if (!page_mapping(page))
+	if (!mapping)
 		return;
 	/* check shared list first if it's not empty...it's usually
 	 * the shortest */
-	list_for_each(l, &page->mapping->i_mmap_shared) {
+	list_for_each(l, &mapping->i_mmap_shared) {
 		struct vm_area_struct *mpnt;
 		unsigned long off;
 
@@ -267,7 +268,7 @@ void __flush_dcache_page(struct page *pa
 
 	/* then check private mapping list for read only shared mappings
 	 * which are flagged by VM_MAYSHARE */
-	list_for_each(l, &page->mapping->i_mmap) {
+	list_for_each(l, &mapping->i_mmap) {
 		struct vm_area_struct *mpnt;
 		unsigned long off;
 
diff -puN arch/sparc64/mm/init.c~rmap-4-flush_dcache-revisited arch/sparc64/mm/init.c
--- 25/arch/sparc64/mm/init.c~rmap-4-flush_dcache-revisited	2004-04-12 21:08:28.834894312 -0700
+++ 25-akpm/arch/sparc64/mm/init.c	2004-04-12 21:08:28.853891424 -0700
@@ -224,10 +224,11 @@ void update_mmu_cache(struct vm_area_str
 
 void flush_dcache_page(struct page *page)
 {
+	struct address_space *mapping = page_mapping(page);
 	int dirty = test_bit(PG_dcache_dirty, &page->flags);
 	int dirty_cpu = dcache_dirty_cpu(page);
 
-	if (page_mapping(page) && !mapping_mapped(page->mapping)) {
+	if (mapping && !mapping_mapped(mapping)) {
 		if (dirty) {
 			if (dirty_cpu == smp_processor_id())
 				return;
diff -puN include/asm-arm/cacheflush.h~rmap-4-flush_dcache-revisited include/asm-arm/cacheflush.h
--- 25/include/asm-arm/cacheflush.h~rmap-4-flush_dcache-revisited	2004-04-12 21:08:28.836894008 -0700
+++ 25-akpm/include/asm-arm/cacheflush.h	2004-04-12 21:08:28.854891272 -0700
@@ -295,7 +295,9 @@ extern void __flush_dcache_page(struct p
 
 static inline void flush_dcache_page(struct page *page)
 {
-	if (page_mapping(page) && !mapping_mapped(page->mapping))
+	struct address_space *mapping = page_mapping(page);
+
+	if (mapping && !mapping_mapped(mapping))
 		set_bit(PG_dcache_dirty, &page->flags);
 	else
 		__flush_dcache_page(page);
diff -puN include/asm-parisc/cacheflush.h~rmap-4-flush_dcache-revisited include/asm-parisc/cacheflush.h
--- 25/include/asm-parisc/cacheflush.h~rmap-4-flush_dcache-revisited	2004-04-12 21:08:28.837893856 -0700
+++ 25-akpm/include/asm-parisc/cacheflush.h	2004-04-12 21:08:28.854891272 -0700
@@ -69,7 +69,9 @@ extern void __flush_dcache_page(struct p
 
 static inline void flush_dcache_page(struct page *page)
 {
-	if (page_mapping(page) && !mapping_mapped(page->mapping)) {
+	struct address_space *mapping = page_mapping(page);
+
+	if (mapping && !mapping_mapped(mapping)) {
 		set_bit(PG_dcache_dirty, &page->flags);
 	} else {
 		__flush_dcache_page(page);
diff -puN include/asm-sh/pgalloc.h~rmap-4-flush_dcache-revisited include/asm-sh/pgalloc.h
--- 25/include/asm-sh/pgalloc.h~rmap-4-flush_dcache-revisited	2004-04-12 21:08:28.839893552 -0700
+++ 25-akpm/include/asm-sh/pgalloc.h	2004-04-12 21:08:28.854891272 -0700
@@ -97,12 +97,11 @@ static inline pte_t ptep_get_and_clear(p
 
 	pte_clear(ptep);
 	if (!pte_not_present(pte)) {
-		struct page *page;
 		unsigned long pfn = pte_pfn(pte);
 		if (pfn_valid(pfn)) {
-			page = pfn_to_page(pfn);
-			if (!page_mapping(page) ||
-			    !mapping_writably_mapped(page->mapping))
+			struct page *page = pfn_to_page(pfn);
+			struct address_space *mapping = page_mapping(page);
+			if (!mapping || !mapping_writably_mapped(mapping))
 				__clear_bit(PG_mapped, &page->flags);
 		}
 	}

_