patch-2.1.127 linux/mm/slab.c

Next file: linux/mm/swapfile.c
Previous file: linux/mm/page_alloc.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.126/linux/mm/slab.c linux/mm/slab.c
@@ -650,9 +650,9 @@
 	}
 
 	slabp->s_magic = SLAB_MAGIC_DESTROYED;
-	kmem_freepages(cachep, slabp->s_mem-slabp->s_offset);
 	if (slabp->s_index)
 		kmem_cache_free(cachep->c_index_cachep, slabp->s_index);
+	kmem_freepages(cachep, slabp->s_mem-slabp->s_offset);
 	if (SLAB_OFF_SLAB(cachep->c_flags))
 		kmem_cache_free(cache_slabp, slabp);
 }
@@ -1190,7 +1190,6 @@
 	cachep->c_dflags = SLAB_CFLGS_GROWN;
 
 	cachep->c_growing++;
-re_try:
 	spin_unlock_irqrestore(&cachep->c_spinlock, save_flags);
 
 	/* A series of memory allocations for a new slab.
@@ -1257,15 +1256,6 @@
 	kmem_freepages(cachep, objp); 
 failed:
 	spin_lock_irq(&cachep->c_spinlock);
-	if (local_flags != SLAB_ATOMIC && cachep->c_gfporder) {
-		/* For large order (>0) slabs, we try again.
-		 * Needed because the gfp() functions are not good at giving
-		 * out contiguous pages unless pushed (but do not push too hard).
-		 */
-		if (cachep->c_failures++ < 4 && cachep->c_freep == kmem_slab_end(cachep))
-			goto re_try;
-		cachep->c_failures = 1;	/* Memory is low, don't try as hard next time. */
-	}
 	cachep->c_growing--;
 	spin_unlock_irqrestore(&cachep->c_spinlock, save_flags);
 	return 0;

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov