From: Hugh Dickins <hugh@veritas.com>

The get_user_pages force write case (from ptrace) expects that a single call
to handle_mm_fault is enough to give it a page it can safely write to.  This
implies that when handling a write access to a page_mkwrite area, do_file_page
must now itself call do_wp_page to call page_mkwrite and (probably) make the
pte writable: that cannot safely be left to a subsequent fault.

Clarify today's flow of control in do_file_page: it is only called for a
pte_file entry, which only appears in a non-linear vma, which is always shared
and must have a populate: so the do_no_page path is never taken.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 mm/memory.c |   39 +++++++++++++++++++++++++--------------
 1 files changed, 25 insertions(+), 14 deletions(-)

diff -puN mm/memory.c~fix-page-becoming-writable-in-do_file_page mm/memory.c
--- devel/mm/memory.c~fix-page-becoming-writable-in-do_file_page	2005-07-27 16:03:23.000000000 -0700
+++ devel-akpm/mm/memory.c	2005-07-27 16:05:07.000000000 -0700
@@ -1984,33 +1984,44 @@ oom:
  * from the encoded file_pte if possible. This enables swappable
  * nonlinear vmas.
  */
-static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma,
-	unsigned long address, int write_access, pte_t *pte, pmd_t *pmd, pte_t entry)
+static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
+		unsigned long address, int write_access, pte_t *pte,
+		pmd_t *pmd, pte_t entry)
 {
 	unsigned long pgoff;
 	int err;
 
-	BUG_ON(!vma->vm_ops || !vma->vm_ops->nopage);
-	/*
-	 * Fall back to the linear mapping if the fs does not support
-	 * ->populate:
-	 */
-	if (!vma->vm_ops || !vma->vm_ops->populate || 
-			(write_access && !(vma->vm_flags & VM_SHARED))) {
-		pte_clear(mm, address, pte);
-		return do_no_page(mm, vma, address, write_access, pte, pmd, entry);
-	}
+	BUG_ON(!vma->vm_ops || !vma->vm_ops->populate);
+	BUG_ON(!(vma->vm_flags & VM_SHARED));
 
 	pgoff = pte_to_pgoff(entry);
-
+again:
 	pte_unmap(pte);
 	page_table_atomic_stop(mm);
 
-	err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE, vma->vm_page_prot, pgoff, 0);
+	err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE,
+					vma->vm_page_prot, pgoff, 0);
 	if (err == -ENOMEM)
 		return VM_FAULT_OOM;
 	if (err)
 		return VM_FAULT_SIGBUS;
+	/*
+	 * For the get_user_pages force write case, we must make sure that
+	 * page_mkwrite is called by this invocation of handle_mm_fault.
+	 */
+	if (write_access && vma->vm_ops->page_mkwrite) {
+		pte_t entry;
+		int ret;
+
+		page_table_atomic_start(mm);
+		pte = pte_offset_map(pmd, address);
+		entry = *pte;
+		if (!pte_present(entry))
+			goto again;
+		ret = do_wp_page(mm, vma, address, pte, pmd, entry);
+		if (ret != VM_FAULT_MINOR)
+			return ret;
+	}
 	return VM_FAULT_MAJOR;
 }
 
_