From: Manfred Spraul <manfred@colorfullife.com>

Maintenance work from Alexander Nyberg <alexn@telia.com>

With the patch applied,

	echo "size-4096 0 0 0" > /proc/slabinfo

walks the objects in the size-4096 slab, printing out the calling address
of whoever allocated that object.

It is for leak detection.

Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 mm/slab.c |   46 +++++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 43 insertions(+), 3 deletions(-)

diff -puN mm/slab.c~slab-leak-detector mm/slab.c
--- devel/mm/slab.c~slab-leak-detector	2005-08-29 22:54:48.000000000 -0700
+++ devel-akpm/mm/slab.c	2005-08-29 22:54:48.000000000 -0700
@@ -199,7 +199,7 @@
  * is less than 512 (PAGE_SIZE<<3), but greater than 256.
  */
 
-typedef unsigned int kmem_bufctl_t;
+typedef unsigned long kmem_bufctl_t;
 #define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
 #define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
 #define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-2)
@@ -2341,7 +2341,7 @@ bad:
 				cachep->name, cachep->num, slabp, slabp->inuse);
 		for (i=0;i<sizeof(slabp)+cachep->num*sizeof(kmem_bufctl_t);i++) {
 			if ((i%16)==0)
-				printk("\n%03x:", i);
+				printk("\n%04lx:", i);
 			printk(" %02x", ((unsigned char*)slabp)[i]);
 		}
 		printk("\n");
@@ -2493,6 +2493,15 @@ cache_alloc_debugcheck_after(kmem_cache_
 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
 	}
+	{
+		int objnr;
+		struct slab *slabp;
+
+		slabp = GET_PAGE_SLAB(virt_to_page(objp));
+
+		objnr = (objp - slabp->s_mem) / cachep->objsize;
+		slab_bufctl(slabp)[objnr] = (unsigned long)caller;
+	}
 	objp += obj_dbghead(cachep);
 	if (cachep->ctor && cachep->flags & SLAB_POISON) {
 		unsigned long	ctor_flags = SLAB_CTOR_CONSTRUCTOR;
@@ -2628,7 +2637,7 @@ static void free_block(kmem_cache_t *cac
 		check_slabp(cachep, slabp);
 
 
-#if DEBUG
+#if 0 /* disabled, not compatible with leak detection */
 		if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
 			printk(KERN_ERR "slab: double free detected in cache "
 					"'%s', objp %p\n", cachep->name, objp);
@@ -3507,6 +3516,36 @@ struct seq_operations slabinfo_op = {
 	.show	= s_show,
 };
 
+static void do_dump_slabp(kmem_cache_t *cachep)
+{
+#if DEBUG
+	struct list_head *q;
+	int node;
+
+	check_irq_on();
+	spin_lock_irq(&cachep->spinlock);
+	for_each_online_node(node) {
+		struct kmem_list3 *rl3 = cachep->nodelists[node];
+		spin_lock(&rl3->list_lock);
+
+		list_for_each(q, &rl3->slabs_full) {
+			int i;
+			struct slab *slabp = list_entry(q, struct slab, list);
+
+			for (i = 0; i < cachep->num; i++) {
+				unsigned long sym = slab_bufctl(slabp)[i];
+
+				printk("obj %p/%d: %p", slabp, i, (void *)sym);
+				print_symbol(" <%s>", sym);
+				printk("\n");
+			}
+		}
+		spin_unlock(&rl3->list_lock);
+	}
+	spin_unlock_irq(&cachep->spinlock);
+#endif
+}
+
 #define MAX_SLABINFO_WRITE 128
 /**
  * slabinfo_write - Tuning for the slab allocator
@@ -3547,6 +3586,7 @@ ssize_t slabinfo_write(struct file *file
 			    batchcount < 1 ||
 			    batchcount > limit ||
 			    shared < 0) {
+				do_dump_slabp(cachep);
 				res = 0;
 			} else {
 				res = do_tune_cpucache(cachep, limit,
_