patch-2.4.17 linux/mm/slab.c
Next file: linux/mm/swapfile.c
Previous file: linux/mm/shmem.c
Back to the patch index
Back to the overall index
- Lines: 61
- Date:
Fri Dec 21 16:40:33 2001
- Orig file:
linux-2.4.16/mm/slab.c
- Orig date:
Tue Sep 18 21:16:26 2001
diff -Naur -X /home/marcelo/lib/dontdiff linux-2.4.16/mm/slab.c linux/mm/slab.c
@@ -109,9 +109,11 @@
#if DEBUG
# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
- SLAB_NO_REAP | SLAB_CACHE_DMA)
+ SLAB_NO_REAP | SLAB_CACHE_DMA | \
+ SLAB_MUST_HWCACHE_ALIGN)
#else
-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | SLAB_CACHE_DMA)
+# define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \
+ SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN)
#endif
/*
@@ -649,7 +651,7 @@
flags &= ~SLAB_POISON;
}
#if FORCED_DEBUG
- if (size < (PAGE_SIZE>>3))
+ if ((size < (PAGE_SIZE>>3)) && !(flags & SLAB_MUST_HWCACHE_ALIGN))
/*
* do not red zone large object, causes severe
* fragmentation.
@@ -1280,10 +1282,9 @@
})
#ifdef CONFIG_SMP
-void* kmem_cache_alloc_batch(kmem_cache_t* cachep, int flags)
+void* kmem_cache_alloc_batch(kmem_cache_t* cachep, cpucache_t* cc, int flags)
{
int batchcount = cachep->batchcount;
- cpucache_t* cc = cc_data(cachep);
spin_lock(&cachep->spinlock);
while (batchcount--) {
@@ -1332,7 +1333,7 @@
objp = cc_entry(cc)[--cc->avail];
} else {
STATS_INC_ALLOCMISS(cachep);
- objp = kmem_cache_alloc_batch(cachep,flags);
+ objp = kmem_cache_alloc_batch(cachep,cc,flags);
if (!objp)
goto alloc_new_slab_nolock;
}
@@ -1920,12 +1921,13 @@
#endif
#ifdef CONFIG_SMP
{
+ cpucache_t *cc = cc_data(cachep);
unsigned int batchcount = cachep->batchcount;
unsigned int limit;
- if (cc_data(cachep))
- limit = cc_data(cachep)->limit;
- else
+ if (cc)
+ limit = cc->limit;
+ else
limit = 0;
len += sprintf(page+len, " : %4u %4u",
limit, batchcount);
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)