diff -purN -X /home/mbligh/.diff.exclude 290-gfp_node_strict/fs/buffer.c 300-page_lock/fs/buffer.c
--- 290-gfp_node_strict/fs/buffer.c	2003-10-14 15:50:28.000000000 -0700
+++ 300-page_lock/fs/buffer.c	2003-11-05 13:46:27.000000000 -0800
@@ -865,14 +865,14 @@ int __set_page_dirty_buffers(struct page
 	spin_unlock(&mapping->private_lock);
 
 	if (!TestSetPageDirty(page)) {
-		spin_lock(&mapping->page_lock);
+		mapping_wrlock(&mapping->page_lock);
 		if (page->mapping) {	/* Race with truncate? */
 			if (!mapping->backing_dev_info->memory_backed)
 				inc_page_state(nr_dirty);
 			list_del(&page->list);
 			list_add(&page->list, &mapping->dirty_pages);
 		}
-		spin_unlock(&mapping->page_lock);
+		mapping_wrunlock(&mapping->page_lock);
 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 	}
 	
diff -purN -X /home/mbligh/.diff.exclude 290-gfp_node_strict/fs/fs-writeback.c 300-page_lock/fs/fs-writeback.c
--- 290-gfp_node_strict/fs/fs-writeback.c	2003-10-14 15:50:28.000000000 -0700
+++ 300-page_lock/fs/fs-writeback.c	2003-11-05 13:46:28.000000000 -0800
@@ -152,10 +152,10 @@ __sync_single_inode(struct inode *inode,
 	 * read speculatively by this cpu before &= ~I_DIRTY  -- mikulas
 	 */
 
-	spin_lock(&mapping->page_lock);
+	mapping_wrlock(&mapping->page_lock);
 	if (wait || !wbc->for_kupdate || list_empty(&mapping->io_pages))
 		list_splice_init(&mapping->dirty_pages, &mapping->io_pages);
-	spin_unlock(&mapping->page_lock);
+	mapping_wrunlock(&mapping->page_lock);
 	spin_unlock(&inode_lock);
 
 	do_writepages(mapping, wbc);
diff -purN -X /home/mbligh/.diff.exclude 290-gfp_node_strict/fs/inode.c 300-page_lock/fs/inode.c
--- 290-gfp_node_strict/fs/inode.c	2003-11-05 13:45:44.000000000 -0800
+++ 300-page_lock/fs/inode.c	2003-11-05 13:46:28.000000000 -0800
@@ -187,7 +187,7 @@ void inode_init_once(struct inode *inode
 	INIT_LIST_HEAD(&inode->i_devices);
 	sema_init(&inode->i_sem, 1);
 	INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
-	spin_lock_init(&inode->i_data.page_lock);
+	mapping_rwlock_init(&inode->i_data.page_lock);
 	init_MUTEX(&inode->i_data.i_shared_sem);
 	atomic_set(&inode->i_data.truncate_count, 0);
 	INIT_LIST_HEAD(&inode->i_data.private_list);
diff -purN -X /home/mbligh/.diff.exclude 290-gfp_node_strict/fs/mpage.c 300-page_lock/fs/mpage.c
--- 290-gfp_node_strict/fs/mpage.c	2003-10-01 11:41:13.000000000 -0700
+++ 300-page_lock/fs/mpage.c	2003-11-05 13:46:28.000000000 -0800
@@ -635,7 +635,7 @@ mpage_writepages(struct address_space *m
 	if (get_block == NULL)
 		writepage = mapping->a_ops->writepage;
 
-	spin_lock(&mapping->page_lock);
+	mapping_wrlock(&mapping->page_lock);
 	while (!list_empty(&mapping->io_pages) && !done) {
 		struct page *page = list_entry(mapping->io_pages.prev,
 					struct page, list);
@@ -655,7 +655,7 @@ mpage_writepages(struct address_space *m
 		list_add(&page->list, &mapping->locked_pages);
 
 		page_cache_get(page);
-		spin_unlock(&mapping->page_lock);
+		mapping_wrunlock(&mapping->page_lock);
 
 		/*
 		 * At this point we hold neither mapping->page_lock nor
@@ -695,12 +695,12 @@ mpage_writepages(struct address_space *m
 			unlock_page(page);
 		}
 		page_cache_release(page);
-		spin_lock(&mapping->page_lock);
+		mapping_wrlock(&mapping->page_lock);
 	}
 	/*
 	 * Leave any remaining dirty pages on ->io_pages
 	 */
-	spin_unlock(&mapping->page_lock);
+	mapping_wrunlock(&mapping->page_lock);
 	if (bio)
 		mpage_bio_submit(WRITE, bio);
 	return ret;
diff -purN -X /home/mbligh/.diff.exclude 290-gfp_node_strict/include/linux/fs.h 300-page_lock/include/linux/fs.h
--- 290-gfp_node_strict/include/linux/fs.h	2003-11-05 13:45:44.000000000 -0800
+++ 300-page_lock/include/linux/fs.h	2003-11-05 13:46:28.000000000 -0800
@@ -19,6 +19,8 @@
 #include <linux/cache.h>
 #include <linux/radix-tree.h>
 #include <linux/kobject.h>
+#include <linux/threads.h>
+#include <linux/spinlock.h>
 #include <asm/atomic.h>
 
 struct iovec;
@@ -315,11 +317,29 @@ struct address_space_operations {
 			loff_t offset, unsigned long nr_segs);
 };
 
+#if NR_CPUS > 8
+typedef rwlock_t mapping_rwlock_t;
+#define mapping_rdlock(lock)		read_lock(lock)
+#define mapping_rdunlock(lock)		read_unlock(lock)
+#define mapping_wrlock(lock)		write_lock(lock)
+#define mapping_wrunlock(lock)		write_unlock(lock)
+#define mapping_rwlock_init(lock)	rwlock_init(lock)
+#define MAPPING_RW_LOCK_UNLOCKED	RW_LOCK_UNLOCKED
+#else
+typedef spinlock_t mapping_rwlock_t;
+#define mapping_rdlock(lock)		spin_lock(lock)
+#define mapping_rdunlock(lock)		spin_unlock(lock)
+#define mapping_wrlock(lock)		spin_lock(lock)
+#define mapping_wrunlock(lock)		spin_unlock(lock)
+#define mapping_rwlock_init(lock)	spin_lock_init(lock)
+#define MAPPING_RW_LOCK_UNLOCKED	SPIN_LOCK_UNLOCKED
+#endif
+
 struct backing_dev_info;
 struct address_space {
 	struct inode		*host;		/* owner: inode, block_device */
 	struct radix_tree_root	page_tree;	/* radix tree of all pages */
-	spinlock_t		page_lock;	/* and spinlock protecting it */
+	mapping_rwlock_t	page_lock;	/* and spinlock protecting it */
 	struct list_head	clean_pages;	/* list of clean pages */
 	struct list_head	dirty_pages;	/* list of dirty pages */
 	struct list_head	locked_pages;	/* list of locked pages */
diff -purN -X /home/mbligh/.diff.exclude 290-gfp_node_strict/ipc/shm.c 300-page_lock/ipc/shm.c
--- 290-gfp_node_strict/ipc/shm.c	2003-10-01 11:47:15.000000000 -0700
+++ 300-page_lock/ipc/shm.c	2003-11-05 13:46:28.000000000 -0800
@@ -380,9 +380,9 @@ static void shm_get_stat(unsigned long *
 
 		if (is_file_hugepages(shp->shm_file)) {
 			struct address_space *mapping = inode->i_mapping;
-			spin_lock(&mapping->page_lock);
+			mapping_wrlock(&mapping->page_lock);
 			*rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
-			spin_unlock(&mapping->page_lock);
+			mapping_wrunlock(&mapping->page_lock);
 		} else {
 			struct shmem_inode_info *info = SHMEM_I(inode);
 			spin_lock(&info->lock);
diff -purN -X /home/mbligh/.diff.exclude 290-gfp_node_strict/mm/filemap.c 300-page_lock/mm/filemap.c
--- 290-gfp_node_strict/mm/filemap.c	2003-11-05 13:44:38.000000000 -0800
+++ 300-page_lock/mm/filemap.c	2003-11-05 13:46:28.000000000 -0800
@@ -111,9 +111,9 @@ void remove_from_page_cache(struct page 
 	if (unlikely(!PageLocked(page)))
 		PAGE_BUG(page);
 
-	spin_lock(&mapping->page_lock);
+	mapping_wrlock(&mapping->page_lock);
 	__remove_from_page_cache(page);
-	spin_unlock(&mapping->page_lock);
+	mapping_wrunlock(&mapping->page_lock);
 }
 
 static inline int sync_page(struct page *page)
@@ -145,9 +145,9 @@ static int __filemap_fdatawrite(struct a
 	if (mapping->backing_dev_info->memory_backed)
 		return 0;
 
-	spin_lock(&mapping->page_lock);
+	mapping_wrlock(&mapping->page_lock);
 	list_splice_init(&mapping->dirty_pages, &mapping->io_pages);
-	spin_unlock(&mapping->page_lock);
+	mapping_wrunlock(&mapping->page_lock);
 	ret = do_writepages(mapping, &wbc);
 	return ret;
 }
@@ -180,7 +180,7 @@ int filemap_fdatawait(struct address_spa
 
 restart:
 	progress = 0;
-	spin_lock(&mapping->page_lock);
+	mapping_wrlock(&mapping->page_lock);
         while (!list_empty(&mapping->locked_pages)) {
 		struct page *page;
 
@@ -194,7 +194,7 @@ restart:
 		if (!PageWriteback(page)) {
 			if (++progress > 32) {
 				if (need_resched()) {
-					spin_unlock(&mapping->page_lock);
+					mapping_wrunlock(&mapping->page_lock);
 					__cond_resched();
 					goto restart;
 				}
@@ -204,16 +204,16 @@ restart:
 
 		progress = 0;
 		page_cache_get(page);
-		spin_unlock(&mapping->page_lock);
+		mapping_wrunlock(&mapping->page_lock);
 
 		wait_on_page_writeback(page);
 		if (PageError(page))
 			ret = -EIO;
 
 		page_cache_release(page);
-		spin_lock(&mapping->page_lock);
+		mapping_wrlock(&mapping->page_lock);
 	}
-	spin_unlock(&mapping->page_lock);
+	mapping_wrunlock(&mapping->page_lock);
 
 	/* Check for outstanding write errors */
 	if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
@@ -250,7 +250,7 @@ int add_to_page_cache(struct page *page,
 
 	if (error == 0) {
 		page_cache_get(page);
-		spin_lock(&mapping->page_lock);
+		mapping_wrlock(&mapping->page_lock);
 		error = radix_tree_insert(&mapping->page_tree, offset, page);
 		if (!error) {
 			SetPageLocked(page);
@@ -258,7 +258,7 @@ int add_to_page_cache(struct page *page,
 		} else {
 			page_cache_release(page);
 		}
-		spin_unlock(&mapping->page_lock);
+		mapping_wrunlock(&mapping->page_lock);
 		radix_tree_preload_end();
 	}
 	return error;
@@ -394,11 +394,11 @@ struct page * find_get_page(struct addre
 	 * We scan the hash list read-only. Addition to and removal from
 	 * the hash-list needs a held write-lock.
 	 */
-	spin_lock(&mapping->page_lock);
+	mapping_rdlock(&mapping->page_lock);
 	page = radix_tree_lookup(&mapping->page_tree, offset);
 	if (page)
 		page_cache_get(page);
-	spin_unlock(&mapping->page_lock);
+	mapping_rdunlock(&mapping->page_lock);
 	return page;
 }
 
@@ -411,11 +411,11 @@ struct page *find_trylock_page(struct ad
 {
 	struct page *page;
 
-	spin_lock(&mapping->page_lock);
+	mapping_rdlock(&mapping->page_lock);
 	page = radix_tree_lookup(&mapping->page_tree, offset);
 	if (page && TestSetPageLocked(page))
 		page = NULL;
-	spin_unlock(&mapping->page_lock);
+	mapping_rdunlock(&mapping->page_lock);
 	return page;
 }
 
@@ -437,15 +437,15 @@ struct page *find_lock_page(struct addre
 {
 	struct page *page;
 
-	spin_lock(&mapping->page_lock);
+	mapping_rdlock(&mapping->page_lock);
 repeat:
 	page = radix_tree_lookup(&mapping->page_tree, offset);
 	if (page) {
 		page_cache_get(page);
 		if (TestSetPageLocked(page)) {
-			spin_unlock(&mapping->page_lock);
+			mapping_rdunlock(&mapping->page_lock);
 			lock_page(page);
-			spin_lock(&mapping->page_lock);
+			mapping_rdlock(&mapping->page_lock);
 
 			/* Has the page been truncated while we slept? */
 			if (page->mapping != mapping || page->index != offset) {
@@ -455,7 +455,7 @@ repeat:
 			}
 		}
 	}
-	spin_unlock(&mapping->page_lock);
+	mapping_rdunlock(&mapping->page_lock);
 	return page;
 }
 
@@ -529,12 +529,12 @@ unsigned int find_get_pages(struct addre
 	unsigned int i;
 	unsigned int ret;
 
-	spin_lock(&mapping->page_lock);
+	mapping_rdlock(&mapping->page_lock);
 	ret = radix_tree_gang_lookup(&mapping->page_tree,
 				(void **)pages, start, nr_pages);
 	for (i = 0; i < ret; i++)
 		page_cache_get(pages[i]);
-	spin_unlock(&mapping->page_lock);
+	mapping_rdunlock(&mapping->page_lock);
 	return ret;
 }
 
diff -purN -X /home/mbligh/.diff.exclude 290-gfp_node_strict/mm/page-writeback.c 300-page_lock/mm/page-writeback.c
--- 290-gfp_node_strict/mm/page-writeback.c	2003-10-21 11:16:13.000000000 -0700
+++ 300-page_lock/mm/page-writeback.c	2003-11-05 13:46:28.000000000 -0800
@@ -469,12 +469,12 @@ int write_one_page(struct page *page, in
 	if (wait)
 		wait_on_page_writeback(page);
 
-	spin_lock(&mapping->page_lock);
+	mapping_wrlock(&mapping->page_lock);
 	list_del(&page->list);
 	if (test_clear_page_dirty(page)) {
 		list_add(&page->list, &mapping->locked_pages);
 		page_cache_get(page);
-		spin_unlock(&mapping->page_lock);
+		mapping_wrunlock(&mapping->page_lock);
 		ret = mapping->a_ops->writepage(page, &wbc);
 		if (ret == 0 && wait) {
 			wait_on_page_writeback(page);
@@ -484,7 +484,7 @@ int write_one_page(struct page *page, in
 		page_cache_release(page);
 	} else {
 		list_add(&page->list, &mapping->clean_pages);
-		spin_unlock(&mapping->page_lock);
+		mapping_wrunlock(&mapping->page_lock);
 		unlock_page(page);
 	}
 	return ret;
@@ -512,7 +512,7 @@ int __set_page_dirty_nobuffers(struct pa
 		struct address_space *mapping = page->mapping;
 
 		if (mapping) {
-			spin_lock(&mapping->page_lock);
+			mapping_wrlock(&mapping->page_lock);
 			if (page->mapping) {	/* Race with truncate? */
 				BUG_ON(page->mapping != mapping);
 				if (!mapping->backing_dev_info->memory_backed)
@@ -520,7 +520,7 @@ int __set_page_dirty_nobuffers(struct pa
 				list_del(&page->list);
 				list_add(&page->list, &mapping->dirty_pages);
 			}
-			spin_unlock(&mapping->page_lock);
+			mapping_wrunlock(&mapping->page_lock);
 			if (!PageSwapCache(page))
 				__mark_inode_dirty(mapping->host,
 							I_DIRTY_PAGES);
diff -purN -X /home/mbligh/.diff.exclude 290-gfp_node_strict/mm/readahead.c 300-page_lock/mm/readahead.c
--- 290-gfp_node_strict/mm/readahead.c	2003-10-14 15:50:36.000000000 -0700
+++ 300-page_lock/mm/readahead.c	2003-11-05 13:46:28.000000000 -0800
@@ -229,7 +229,7 @@ __do_page_cache_readahead(struct address
 	/*
 	 * Preallocate as many pages as we will need.
 	 */
-	spin_lock(&mapping->page_lock);
+	mapping_rdlock(&mapping->page_lock);
 	for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
 		unsigned long page_offset = offset + page_idx;
 		
@@ -240,16 +240,16 @@ __do_page_cache_readahead(struct address
 		if (page)
 			continue;
 
-		spin_unlock(&mapping->page_lock);
+		mapping_rdunlock(&mapping->page_lock);
 		page = page_cache_alloc_cold(mapping);
-		spin_lock(&mapping->page_lock);
+		mapping_rdlock(&mapping->page_lock);
 		if (!page)
 			break;
 		page->index = page_offset;
 		list_add(&page->list, &page_pool);
 		ret++;
 	}
-	spin_unlock(&mapping->page_lock);
+	mapping_rdunlock(&mapping->page_lock);
 
 	/*
 	 * Now start the IO.  We ignore I/O errors - if the page is not
diff -purN -X /home/mbligh/.diff.exclude 290-gfp_node_strict/mm/swap_state.c 300-page_lock/mm/swap_state.c
--- 290-gfp_node_strict/mm/swap_state.c	2003-10-01 11:35:37.000000000 -0700
+++ 300-page_lock/mm/swap_state.c	2003-11-05 13:46:28.000000000 -0800
@@ -25,7 +25,7 @@ extern struct address_space_operations s
 
 struct address_space swapper_space = {
 	.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC),
-	.page_lock	= SPIN_LOCK_UNLOCKED,
+	.page_lock	= MAPPING_RW_LOCK_UNLOCKED,
 	.clean_pages	= LIST_HEAD_INIT(swapper_space.clean_pages),
 	.dirty_pages	= LIST_HEAD_INIT(swapper_space.dirty_pages),
 	.io_pages	= LIST_HEAD_INIT(swapper_space.io_pages),
@@ -182,9 +182,9 @@ void delete_from_swap_cache(struct page 
   
 	entry.val = page->index;
 
-	spin_lock(&swapper_space.page_lock);
+	mapping_wrlock(&swapper_space.page_lock);
 	__delete_from_swap_cache(page);
-	spin_unlock(&swapper_space.page_lock);
+	mapping_wrunlock(&swapper_space.page_lock);
 
 	swap_free(entry);
 	page_cache_release(page);
@@ -195,8 +195,8 @@ int move_to_swap_cache(struct page *page
 	struct address_space *mapping = page->mapping;
 	int err;
 
-	spin_lock(&swapper_space.page_lock);
-	spin_lock(&mapping->page_lock);
+	mapping_wrlock(&swapper_space.page_lock);
+	mapping_wrlock(&mapping->page_lock);
 
 	err = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
 	if (!err) {
@@ -204,8 +204,8 @@ int move_to_swap_cache(struct page *page
 		___add_to_page_cache(page, &swapper_space, entry.val);
 	}
 
-	spin_unlock(&mapping->page_lock);
-	spin_unlock(&swapper_space.page_lock);
+	mapping_wrunlock(&mapping->page_lock);
+	mapping_wrunlock(&swapper_space.page_lock);
 
 	if (!err) {
 		if (!swap_duplicate(entry))
@@ -231,8 +231,8 @@ int move_from_swap_cache(struct page *pa
 
 	entry.val = page->index;
 
-	spin_lock(&swapper_space.page_lock);
-	spin_lock(&mapping->page_lock);
+	mapping_wrlock(&swapper_space.page_lock);
+	mapping_wrlock(&mapping->page_lock);
 
 	err = radix_tree_insert(&mapping->page_tree, index, page);
 	if (!err) {
@@ -240,8 +240,8 @@ int move_from_swap_cache(struct page *pa
 		___add_to_page_cache(page, mapping, index);
 	}
 
-	spin_unlock(&mapping->page_lock);
-	spin_unlock(&swapper_space.page_lock);
+	mapping_wrunlock(&mapping->page_lock);
+	mapping_wrunlock(&swapper_space.page_lock);
 
 	if (!err) {
 		swap_free(entry);
diff -purN -X /home/mbligh/.diff.exclude 290-gfp_node_strict/mm/swapfile.c 300-page_lock/mm/swapfile.c
--- 290-gfp_node_strict/mm/swapfile.c	2003-11-05 13:45:56.000000000 -0800
+++ 300-page_lock/mm/swapfile.c	2003-11-05 13:46:28.000000000 -0800
@@ -253,10 +253,10 @@ static int exclusive_swap_page(struct pa
 		/* Is the only swap cache user the cache itself? */
 		if (p->swap_map[swp_offset(entry)] == 1) {
 			/* Recheck the page count with the pagecache lock held.. */
-			spin_lock(&swapper_space.page_lock);
+			mapping_rdlock(&swapper_space.page_lock);
 			if (page_count(page) - !!PagePrivate(page) == 2)
 				retval = 1;
-			spin_unlock(&swapper_space.page_lock);
+			mapping_rdunlock(&swapper_space.page_lock);
 		}
 		swap_info_put(p);
 	}
@@ -324,13 +324,13 @@ int remove_exclusive_swap_page(struct pa
 	retval = 0;
 	if (p->swap_map[swp_offset(entry)] == 1) {
 		/* Recheck the page count with the pagecache lock held.. */
-		spin_lock(&swapper_space.page_lock);
+		mapping_wrlock(&swapper_space.page_lock);
 		if ((page_count(page) == 2) && !PageWriteback(page)) {
 			__delete_from_swap_cache(page);
 			SetPageDirty(page);
 			retval = 1;
 		}
-		spin_unlock(&swapper_space.page_lock);
+		mapping_wrunlock(&swapper_space.page_lock);
 	}
 	swap_info_put(p);
 
diff -purN -X /home/mbligh/.diff.exclude 290-gfp_node_strict/mm/truncate.c 300-page_lock/mm/truncate.c
--- 290-gfp_node_strict/mm/truncate.c	2003-10-14 15:50:36.000000000 -0700
+++ 300-page_lock/mm/truncate.c	2003-11-05 13:46:28.000000000 -0800
@@ -74,13 +74,13 @@ invalidate_complete_page(struct address_
 	if (PagePrivate(page) && !try_to_release_page(page, 0))
 		return 0;
 
-	spin_lock(&mapping->page_lock);
+	mapping_wrlock(&mapping->page_lock);
 	if (PageDirty(page)) {
-		spin_unlock(&mapping->page_lock);
+		mapping_wrunlock(&mapping->page_lock);
 		return 0;
 	}
 	__remove_from_page_cache(page);
-	spin_unlock(&mapping->page_lock);
+	mapping_wrunlock(&mapping->page_lock);
 	ClearPageUptodate(page);
 	page_cache_release(page);	/* pagecache ref */
 	return 1;
diff -purN -X /home/mbligh/.diff.exclude 290-gfp_node_strict/mm/vmscan.c 300-page_lock/mm/vmscan.c
--- 290-gfp_node_strict/mm/vmscan.c	2003-10-14 15:50:36.000000000 -0700
+++ 300-page_lock/mm/vmscan.c	2003-11-05 13:46:28.000000000 -0800
@@ -358,7 +358,7 @@ shrink_list(struct list_head *page_list,
 				goto keep_locked;
 			if (!may_write_to_queue(mapping->backing_dev_info))
 				goto keep_locked;
-			spin_lock(&mapping->page_lock);
+			mapping_wrlock(&mapping->page_lock);
 			if (test_clear_page_dirty(page)) {
 				int res;
 				struct writeback_control wbc = {
@@ -369,7 +369,7 @@ shrink_list(struct list_head *page_list,
 				};
 
 				list_move(&page->list, &mapping->locked_pages);
-				spin_unlock(&mapping->page_lock);
+				mapping_wrunlock(&mapping->page_lock);
 
 				SetPageReclaim(page);
 				res = mapping->a_ops->writepage(page, &wbc);
@@ -385,7 +385,7 @@ shrink_list(struct list_head *page_list,
 				}
 				goto keep;
 			}
-			spin_unlock(&mapping->page_lock);
+			mapping_wrunlock(&mapping->page_lock);
 		}
 
 		/*
@@ -419,7 +419,7 @@ shrink_list(struct list_head *page_list,
 		if (!mapping)
 			goto keep_locked;	/* truncate got there first */
 
-		spin_lock(&mapping->page_lock);
+		mapping_wrlock(&mapping->page_lock);
 
 		/*
 		 * The non-racy check for busy page.  It is critical to check
@@ -427,7 +427,7 @@ shrink_list(struct list_head *page_list,
 		 * not in use by anybody. 	(pagecache + us == 2)
 		 */
 		if (page_count(page) != 2 || PageDirty(page)) {
-			spin_unlock(&mapping->page_lock);
+			mapping_wrunlock(&mapping->page_lock);
 			goto keep_locked;
 		}
 
@@ -435,7 +435,7 @@ shrink_list(struct list_head *page_list,
 		if (PageSwapCache(page)) {
 			swp_entry_t swap = { .val = page->index };
 			__delete_from_swap_cache(page);
-			spin_unlock(&mapping->page_lock);
+			mapping_wrunlock(&mapping->page_lock);
 			swap_free(swap);
 			__put_page(page);	/* The pagecache ref */
 			goto free_it;
@@ -443,7 +443,7 @@ shrink_list(struct list_head *page_list,
 #endif /* CONFIG_SWAP */
 
 		__remove_from_page_cache(page);
-		spin_unlock(&mapping->page_lock);
+		mapping_wrunlock(&mapping->page_lock);
 		__put_page(page);
 
 free_it: