Signed-off-by: Andrew Morton <akpm@osdl.org>
---

 25-akpm/mm/filemap.c   |    9 +++-
 25-akpm/mm/readahead.c |  102 ++++++++++++++++++++++---------------------------
 2 files changed, 54 insertions(+), 57 deletions(-)

diff -puN mm/filemap.c~simplified-readahead-cleanups mm/filemap.c
--- 25/mm/filemap.c~simplified-readahead-cleanups	2004-12-04 00:07:07.466440304 -0800
+++ 25-akpm/mm/filemap.c	2004-12-04 00:07:07.473439240 -0800
@@ -689,14 +689,19 @@ void do_generic_mapping_read(struct addr
 			     read_actor_t actor)
 {
 	struct inode *inode = mapping->host;
-	unsigned long index, end_index, offset, req_size, next_index;
+	unsigned long index;
+	unsigned long end_index;
+	unsigned long offset;
+	unsigned long req_size;
+	unsigned long next_index;
 	loff_t isize;
 	struct page *cached_page;
 	int error;
 	struct file_ra_state ra = *_ra;
 
 	cached_page = NULL;
-	next_index = index = *ppos >> PAGE_CACHE_SHIFT;
+	index = *ppos >> PAGE_CACHE_SHIFT;
+	next_index = index;
 	req_size = (desc->count + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 	offset = *ppos & ~PAGE_CACHE_MASK;
 
diff -puN mm/readahead.c~simplified-readahead-cleanups mm/readahead.c
--- 25/mm/readahead.c~simplified-readahead-cleanups	2004-12-04 00:07:07.468440000 -0800
+++ 25-akpm/mm/readahead.c	2004-12-04 00:07:20.022531488 -0800
@@ -54,7 +54,7 @@ static inline void ra_off(struct file_ra
 {
 	ra->start = 0;
 	ra->flags = 0;
-	ra->size = -1UL;
+	ra->size = -1;
 	ra->ahead_start = 0;
 	ra->ahead_size = 0;
 	return;
@@ -66,47 +66,40 @@ static inline void ra_off(struct file_ra
  * for 128k (32 page) max ra
  * 1-8 page = 32k initial, > 8 page = 128k initial
  */
-unsigned long get_init_ra_size(unsigned long size, unsigned long max)
+static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
 {
-	unsigned long s_size=1, newsize;
+	unsigned long newsize = roundup_pow_of_two(size);
 
-	do {
-		s_size = s_size << 1;
-	} while ((size = size >> 1));
-	if (s_size <= max / 64) {
-		newsize = s_size * s_size;
-	} else if (s_size <= max/4) {
+	if (newsize <= max / 64)
+		newsize = newsize * newsize;
+	else if (newsize <= max / 4)
 		newsize = max / 4;
-	} else {
+	else
 		newsize = max;
-	}
 	return newsize;
 }
 
 /*
- * Set the new window size, this is called only when
- * I/O is to be submitted, not for each call to readahead
- * If cache miss occered, reduce next I/O size, else
- * increase depending on how close to max we are.
+ * Set the new window size, this is called only when I/O is to be submitted,
+ * not for each call to readahead.  If a cache miss occured, reduce next I/O
+ * size, else increase depending on how close to max we are.
  */
-unsigned long get_next_ra_size(unsigned long cur, unsigned long max,
+static unsigned long get_next_ra_size(unsigned long cur, unsigned long max,
 				unsigned long min, unsigned long * flags)
 {
 	unsigned long newsize;
 
 	if (*flags & RA_FLAG_MISS) {
-		newsize = max((cur - 2),min);
+		newsize = max((cur - 2), min);
 		*flags &= ~RA_FLAG_MISS;
-	} else if ( cur < max/16 ) {
+	} else if (cur < max / 16) {
 		newsize = 4 * cur;
 	} else {
 		newsize = 2 * cur;
 	}
-	newsize = min(newsize, max);
-	return newsize;
+	return min(newsize, max);
 }
 
-
 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
 
 /**
@@ -207,9 +200,8 @@ out:
  * ahead_size:  Together, these form the "ahead window".
  * ra_pages:	The externally controlled max readahead for this fd.
  *
- * When readahead is in the off state (size == -1UL),
- * readahead is disabled.  In this state, prev_page is used
- * to detect the resumption of sequential I/O.
+ * When readahead is in the off state (size == -1UL), readahead is disabled.
+ * In this state, prev_page is used to detect the resumption of sequential I/O.
  *
  * The readahead code manages two windows - the "current" and the "ahead"
  * windows.  The intent is that while the application is walking the pages
@@ -376,9 +368,6 @@ int do_page_cache_readahead(struct addre
 	return  __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
 }
 
-
-
-
 /*
  * page_cache_readahead is the main function.  If performs the adaptive
  * readahead window size management and submits the readahead I/O.
@@ -388,7 +377,8 @@ page_cache_readahead(struct address_spac
 		     struct file *filp, unsigned long offset,
 		     unsigned long req_size)
 {
-	unsigned long max, min, newsize=req_size;
+	unsigned long max, min;
+	unsigned long newsize = req_size;
 	unsigned long actual=0;
 
 	/*
@@ -397,37 +387,37 @@ page_cache_readahead(struct address_spac
 	 * perturbing the readahead window expansion logic.
 	 * If size is zero, there is no read ahead window so we need one
 	 */
-	if (offset == ra->prev_page && req_size == 1 && ra->size != 0) {
+	if (offset == ra->prev_page && req_size == 1 && ra->size != 0)
 		goto out;
-	}
 
 	max = get_max_readahead(ra);
 	min = get_min_readahead(ra);
 //	maxsane = max_sane_readahead(max);
 	newsize = min(req_size, max);
 
-	if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE)){
+	if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE)) {
 		newsize = 1;
-		goto out;	/* No readahead or file already in cache*/
+		goto out;	/* No readahead or file already in cache */
 	}
 	/*
-	 * Special case - first read.
-	 * We'll assume it's a whole-file read if at start of file, and
-	 * grow the window fast.
-	 * or detect first sequential access
+	 * Special case - first read.  We'll assume it's a whole-file read if
+	 * at start of file, and grow the window fast.  Or detect first
+	 * sequential access
 	 */
-	if ((ra->size == 0 && offset == 0)	 // first io and start of file
-		|| (ra->size == -1UL && ra->prev_page == offset-1)) { //1st seq
+	if ((ra->size == 0 && offset == 0)	/* first io and start of file */
+	    || (ra->size == -1 && ra->prev_page == offset - 1)) {
+		/* First sequential */
 		ra->prev_page  = offset + newsize-1;
 		ra->size = get_init_ra_size(newsize, max);
 		ra->start = offset;
 		actual = do_page_cache_readahead(mapping, filp, offset,
-						 ra->size);
-		if (!check_ra_success(ra, ra->size, actual)) {
+						ra->size);
+		if (!check_ra_success(ra, ra->size, actual))
 			goto out;
-		}
-		/* if the request size is larger than our max readahead, we
-		 * at least want to be sure that we get 2 IOs if flight and
+
+		/*
+		 * If the request size is larger than our max readahead, we
+		 * at least want to be sure that we get 2 IOs in flight and
 		 * we know that we will definitly need the new I/O.
 		 * once we do this, subsequent calls should be able to overlap
 		 * IOs,* thus preventing stalls. so issue the ahead window
@@ -444,7 +434,8 @@ page_cache_readahead(struct address_spac
 		goto out;
 	}
 
-	/* now handle the random case:
+	/*
+	 * Now handle the random case:
 	 * partial page reads and first access were handled above,
 	 * so this must be the next page otherwise it is random
 	 */
@@ -457,8 +448,9 @@ page_cache_readahead(struct address_spac
 		goto out;
 	}
 
-	/* If we get here we are doing sequential IO and this was
-	 * not the first occurence (ie we have an existing window)
+	/*
+	 * If we get here we are doing sequential IO and this was not the first
+	 * occurence (ie we have an existing window)
 	 */
 
 	if (ra->ahead_start == 0) {	 /* no ahead window yet */
@@ -468,16 +460,16 @@ page_cache_readahead(struct address_spac
 		newsize = min (newsize, ra->ahead_start - offset);
 		actual = do_page_cache_readahead(mapping, filp,
 					 ra->ahead_start, ra->ahead_size);
-		if (!check_ra_success(ra, ra->ahead_size, actual)) {
+		if (!check_ra_success(ra, ra->ahead_size, actual))
 			goto out;
-		}
 	}
-	/* already have an ahead window, check if we crossed into it
-	   if so, shift windows and issue a new ahead window.
-	   Only return the #pages that are in the current window, so that
-	   we get called back on the first page of the ahead window which
-	   will allow us to submit more IO.
-	*/
+	/*
+	 * Already have an ahead window, check if we crossed into it if so,
+	 * shift windows and issue a new ahead window.
+	 * Only return the #pages that are in the current window, so that we
+	 * get called back on the first page of the ahead window which will
+	 * allow us to submit more IO.
+	 */
 	if ((offset + newsize -1) >= ra->ahead_start) {
 		ra->start = ra->ahead_start;
 		ra->size = ra->ahead_size;
@@ -500,7 +492,7 @@ out:
  * not found.  This will happen if it was evicted by the VM (readahead
  * thrashing)
  *
- * turn on the cache miss flag in the RA struct, this will cause the RA code
+ * Turn on the cache miss flag in the RA struct, this will cause the RA code
  * to reduce the RA size on the next read.
  */
 void handle_ra_miss(struct address_space *mapping,
_