From: Torsten Foertsch <torsten.foertsch@gmx.net>

show_smap calls first show_map and then prints its additional information to
the seq_file.  show_map checks if all it has to print fits into the buffer and
if yes marks the current vma as written.  While that is correct for show_map
it is not for show_smap.  Here the vma should be marked as written only after
the additional information is also written.

The attached patch cures the problem.  It moves the functionality of the
show_map function to a new function show_map_internal that is called with an
additional struct mem_size_stats* argument.  Then show_map calls
show_map_internal with NULL as struct mem_size_stats* whereas show_smap calls
it with a real pointer.  Now the final

	if (m->count < m->size)  /* vma is copied successfully */
		m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;

is done only if the whole entry fits into the buffer.

Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 fs/proc/task_mmu.c |   57 ++++++++++++++++++++++++++++-------------------------
 1 files changed, 31 insertions(+), 26 deletions(-)

diff -puN fs/proc/task_mmu.c~smaps-reading-fix fs/proc/task_mmu.c
--- devel/fs/proc/task_mmu.c~smaps-reading-fix	2005-08-06 15:07:43.000000000 -0700
+++ devel-akpm/fs/proc/task_mmu.c	2005-08-06 15:07:43.000000000 -0700
@@ -92,7 +92,16 @@ static void pad_len_spaces(struct seq_fi
 	seq_printf(m, "%*c", len, ' ');
 }
 
-static int show_map(struct seq_file *m, void *v)
+struct mem_size_stats
+{
+	unsigned long resident;
+	unsigned long shared_clean;
+	unsigned long shared_dirty;
+	unsigned long private_clean;
+	unsigned long private_dirty;
+};
+
+static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
 {
 	struct task_struct *task = m->private;
 	struct vm_area_struct *vma = v;
@@ -146,19 +155,31 @@ static int show_map(struct seq_file *m, 
 		}
 	}
 	seq_putc(m, '\n');
+
+	if (mss)
+		seq_printf(m,
+			   "Size:          %8lu kB\n"
+			   "Rss:           %8lu kB\n"
+			   "Shared_Clean:  %8lu kB\n"
+			   "Shared_Dirty:  %8lu kB\n"
+			   "Private_Clean: %8lu kB\n"
+			   "Private_Dirty: %8lu kB\n",
+			   (vma->vm_end - vma->vm_start) >> 10,
+			   mss->resident >> 10,
+			   mss->shared_clean  >> 10,
+			   mss->shared_dirty  >> 10,
+			   mss->private_clean >> 10,
+			   mss->private_dirty >> 10);
+
 	if (m->count < m->size)  /* vma is copied successfully */
 		m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
 	return 0;
 }
 
-struct mem_size_stats
+static int show_map(struct seq_file *m, void *v)
 {
-	unsigned long resident;
-	unsigned long shared_clean;
-	unsigned long shared_dirty;
-	unsigned long private_clean;
-	unsigned long private_dirty;
-};
+	return show_map_internal(m, v, 0);
+}
 
 static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 				unsigned long addr, unsigned long end,
@@ -248,33 +269,17 @@ static int show_smap(struct seq_file *m,
 {
 	struct vm_area_struct *vma = v;
 	struct mm_struct *mm = vma->vm_mm;
-	unsigned long vma_len = (vma->vm_end - vma->vm_start);
 	struct mem_size_stats mss;
 
 	memset(&mss, 0, sizeof mss);
 
-	show_map(m, v);
-
 	if (mm) {
 		spin_lock(&mm->page_table_lock);
 		smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
 		spin_unlock(&mm->page_table_lock);
 	}
 
-	seq_printf(m,
-		   "Size:          %8lu kB\n"
-		   "Rss:           %8lu kB\n"
-		   "Shared_Clean:  %8lu kB\n"
-		   "Shared_Dirty:  %8lu kB\n"
-		   "Private_Clean: %8lu kB\n"
-		   "Private_Dirty: %8lu kB\n",
-		   vma_len >> 10,
-		   mss.resident >> 10,
-		   mss.shared_clean  >> 10,
-		   mss.shared_dirty  >> 10,
-		   mss.private_clean >> 10,
-		   mss.private_dirty >> 10);
-	return 0;
+	return show_map_internal(m, v, &mss);
 }
 
 static void *m_start(struct seq_file *m, loff_t *pos)
@@ -288,7 +293,7 @@ static void *m_start(struct seq_file *m,
 	/*
 	 * We remember last_addr rather than next_addr to hit with
 	 * mmap_cache most of the time. We have zero last_addr at
-	 * the begining and also after lseek. We will have -1 last_addr
+	 * the beginning and also after lseek. We will have -1 last_addr
 	 * after the end of the vmas.
 	 */
 
_