From: Hugh Dickins <hugh@veritas.com>

Anton recently removed SLAB_HWCACHE_ALIGN from the fs inode caches, now do the
same for tmpfs inode cache: fits 9 per page where 7 before.

Was saying SLAB_RECLAIM_ACCOUNT too, but that's wrong: tmpfs inodes are not
reclaimed under pressure; and hugetlbfs had copied that too.

Rearrange shmem_inode_info fields so those most likely to be needed are most
likely to be in the same cacheline as the spinlock guarding them.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/fs/hugetlbfs/inode.c     |    3 +--
 25-akpm/include/linux/shmem_fs.h |   14 +++++++-------
 25-akpm/mm/shmem.c               |    3 +--
 3 files changed, 9 insertions(+), 11 deletions(-)

diff -puN fs/hugetlbfs/inode.c~shmem-dont-slab_hwcache_align fs/hugetlbfs/inode.c
--- 25/fs/hugetlbfs/inode.c~shmem-dont-slab_hwcache_align	2004-09-05 21:24:32.948496176 -0700
+++ 25-akpm/fs/hugetlbfs/inode.c	2004-09-05 21:24:32.956494960 -0700
@@ -802,8 +802,7 @@ static int __init init_hugetlbfs_fs(void
 
 	hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
 					sizeof(struct hugetlbfs_inode_info),
-					0, SLAB_RECLAIM_ACCOUNT,
-					init_once, NULL);
+					0, 0, init_once, NULL);
 	if (hugetlbfs_inode_cachep == NULL)
 		return -ENOMEM;
 
diff -puN include/linux/shmem_fs.h~shmem-dont-slab_hwcache_align include/linux/shmem_fs.h
--- 25/include/linux/shmem_fs.h~shmem-dont-slab_hwcache_align	2004-09-05 21:24:32.950495872 -0700
+++ 25-akpm/include/linux/shmem_fs.h	2004-09-05 21:24:32.956494960 -0700
@@ -10,14 +10,14 @@
 
 struct shmem_inode_info {
 	spinlock_t		lock;
-	unsigned long		next_index;
-	swp_entry_t		i_direct[SHMEM_NR_DIRECT]; /* for the first blocks */
-	struct page	       *i_indirect; /* indirect blocks */
-	unsigned long		alloced;    /* data pages allocated to file */
-	unsigned long		swapped;    /* subtotal assigned to swap */
 	unsigned long		flags;
-	struct shared_policy     policy;
-	struct list_head	list;
+	unsigned long		alloced;	/* data pages alloced to file */
+	unsigned long		swapped;	/* subtotal assigned to swap */
+	unsigned long		next_index;	/* highest alloced index + 1 */
+	struct shared_policy	policy;		/* NUMA memory alloc policy */
+	struct page		*i_indirect;	/* top indirect blocks page */
+	swp_entry_t		i_direct[SHMEM_NR_DIRECT]; /* first blocks */
+	struct list_head	list;		/* chain of all shmem inodes */
 	struct inode		vfs_inode;
 };
 
diff -puN mm/shmem.c~shmem-dont-slab_hwcache_align mm/shmem.c
--- 25/mm/shmem.c~shmem-dont-slab_hwcache_align	2004-09-05 21:24:32.952495568 -0700
+++ 25-akpm/mm/shmem.c	2004-09-05 21:24:32.958494656 -0700
@@ -1897,8 +1897,7 @@ static int init_inodecache(void)
 {
 	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
 				sizeof(struct shmem_inode_info),
-				0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
-				init_once, NULL);
+				0, 0, init_once, NULL);
 	if (shmem_inode_cachep == NULL)
 		return -ENOMEM;
 	return 0;
_