In a further attempt to prevent dirty pages from being written out from the
LRU, don't write them is they were referenced.  This gives those pages
another trip around the inactive list.  So more of them are written via
balance_dirty_pages().

It speeds up an untar-of-five-kernel trees by 5% on a 256M box.


 mm/vmscan.c |    6 +++++-
 1 files changed, 5 insertions(+), 1 deletion(-)

diff -puN mm/vmscan.c~vmscan-defer-writepage mm/vmscan.c
--- 25/mm/vmscan.c~vmscan-defer-writepage	2003-07-27 12:24:52.000000000 -0700
+++ 25-akpm/mm/vmscan.c	2003-07-28 00:07:43.000000000 -0700
@@ -254,6 +254,7 @@ shrink_list(struct list_head *page_list,
 	while (!list_empty(page_list)) {
 		struct page *page;
 		int may_enter_fs;
+		int referenced;
 
 		page = list_entry(page_list->prev, struct page, lru);
 		list_del(&page->lru);
@@ -273,7 +274,8 @@ shrink_list(struct list_head *page_list,
 			goto keep_locked;
 
 		pte_chain_lock(page);
-		if (page_referenced(page) && page_mapping_inuse(page)) {
+		referenced = page_referenced(page);
+		if (referenced && page_mapping_inuse(page)) {
 			/* In active use or really unfreeable.  Activate it. */
 			pte_chain_unlock(page);
 			goto activate_locked;
@@ -333,6 +335,8 @@ shrink_list(struct list_head *page_list,
 		 * See swapfile.c:page_queue_congested().
 		 */
 		if (PageDirty(page)) {
+			if (referenced)
+				goto keep_locked;
 			if (!is_page_cache_freeable(page))
 				goto keep_locked;
 			if (!mapping)

_