From: Suparna Bhattacharya <suparna@in.ibm.com>

From: Andrew Morton <akpm@osdl.org>

Modify mpage_writepages to optionally only write back dirty pages within a
specified range in a file (as in the case of O_SYNC).  Cheat a little to avoid
changes to prototypes of aops - just put the <start, end> hint into the
writeback_control struct instead.  If <start, end> are not set, then default
to writing back all the mapping's dirty pages.

Signed-off-by: Suparna Bhattacharya <suparna@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/fs/mpage.c                |   27 ++++++++++++++++++++++++---
 25-akpm/include/linux/writeback.h |   21 ++++++++++++++++-----
 2 files changed, 40 insertions(+), 8 deletions(-)

diff -puN fs/mpage.c~writeback-page-range-hint fs/mpage.c
--- 25/fs/mpage.c~writeback-page-range-hint	2004-08-01 17:23:55.316030856 -0700
+++ 25-akpm/fs/mpage.c	2004-08-01 17:23:55.322029944 -0700
@@ -627,7 +627,9 @@ mpage_writepages(struct address_space *m
 	struct pagevec pvec;
 	int nr_pages;
 	pgoff_t index;
+	pgoff_t end = -1;		/* Inclusive */
 	int scanned = 0;
+	int is_range = 0;
 
 	if (wbc->nonblocking && bdi_write_congested(bdi)) {
 		wbc->encountered_congestion = 1;
@@ -645,9 +647,16 @@ mpage_writepages(struct address_space *m
 		index = 0;			  /* whole-file sweep */
 		scanned = 1;
 	}
+	if (wbc->start || wbc->end) {
+		index = wbc->start >> PAGE_CACHE_SHIFT;
+		end = wbc->end >> PAGE_CACHE_SHIFT;
+		is_range = 1;
+		scanned = 1;
+	}
 retry:
 	while (!done && (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-					PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE))) {
+			PAGECACHE_TAG_DIRTY,
+			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
 		unsigned i;
 
 		scanned = 1;
@@ -664,10 +673,21 @@ retry:
 
 			lock_page(page);
 
+			if (unlikely(page->mapping != mapping)) {
+				unlock_page(page);
+				continue;
+			}
+
+			if (unlikely(is_range) && page->index > end) {
+				done = 1;
+				unlock_page(page);
+				continue;
+			}
+
 			if (wbc->sync_mode != WB_SYNC_NONE)
 				wait_on_page_writeback(page);
 
-			if (page->mapping != mapping || PageWriteback(page) ||
+			if (PageWriteback(page) ||
 					!clear_page_dirty_for_io(page)) {
 				unlock_page(page);
 				continue;
@@ -706,7 +726,8 @@ retry:
 		index = 0;
 		goto retry;
 	}
-	mapping->writeback_index = index;
+	if (!is_range)
+		mapping->writeback_index = index;
 	if (bio)
 		mpage_bio_submit(WRITE, bio);
 	return ret;
diff -puN include/linux/writeback.h~writeback-page-range-hint include/linux/writeback.h
--- 25/include/linux/writeback.h~writeback-page-range-hint	2004-08-01 17:23:55.317030704 -0700
+++ 25-akpm/include/linux/writeback.h	2004-08-01 17:23:55.323029792 -0700
@@ -29,7 +29,9 @@ enum writeback_sync_modes {
 };
 
 /*
- * A control structure which tells the writeback code what to do
+ * A control structure which tells the writeback code what to do.  These are
+ * always on the stack, and hence need no locking.  They are always initialised
+ * in a manner such that unspecified fields are set to zero.
  */
 struct writeback_control {
 	struct backing_dev_info *bdi;	/* If !NULL, only write back this
@@ -40,10 +42,19 @@ struct writeback_control {
 	long nr_to_write;		/* Write this many pages, and decrement
 					   this for each page written */
 	long pages_skipped;		/* Pages which were not written */
-	int nonblocking;		/* Don't get stuck on request queues */
-	int encountered_congestion;	/* An output: a queue is full */
-	int for_kupdate;		/* A kupdate writeback */
-	int for_reclaim;		/* Invoked from the page allocator */
+
+	/*
+	 * For a_ops->writepages(): is start or end are non-zero then this is
+	 * a hint that the filesystem need only write out the pages inside that
+	 * byterange.  The byte at `end' is included in the writeout request.
+	 */
+	loff_t start;
+	loff_t end;
+
+	int nonblocking:1;		/* Don't get stuck on request queues */
+	int encountered_congestion:1;	/* An output: a queue is full */
+	int for_kupdate:1;		/* A kupdate writeback */
+	int for_reclaim:1;		/* Invoked from the page allocator */
 };
 
 /*
_