diff -purN -X /home/mbligh/.diff.exclude 525-aio-fs_read/mm/filemap.c 530-aio-upfront-readahead/mm/filemap.c
--- 525-aio-fs_read/mm/filemap.c	2004-02-28 11:21:29.000000000 -0800
+++ 530-aio-upfront-readahead/mm/filemap.c	2004-02-28 11:21:31.000000000 -0800
@@ -645,6 +645,34 @@ void do_generic_mapping_read(struct addr
 	index = *ppos >> PAGE_CACHE_SHIFT;
 	offset = *ppos & ~PAGE_CACHE_MASK;
 
+	if (unlikely(in_aio())) {
+		unsigned long i, last, nr;
+		/*
+	 	 * Let the readahead logic know upfront about all
+	 	 * the pages we'll need to satisfy this request while
+		 * taking care to avoid repeat readaheads during retries.
+		 * Required for reasonable IO ordering with multipage 
+		 * streaming AIO requests.
+		 */
+		if ((!is_retried_kiocb(io_wait_to_kiocb(current->io_wait)))
+			|| (ra->prev_page + 1 == index)) {
+
+			last = (*ppos + desc->count - 1) >> PAGE_CACHE_SHIFT;
+			nr = max_sane_readahead(last - index + 1);
+
+			for (i = 0; (i < nr) && ((i == 0)||(i < ra->ra_pages));
+				i++) {
+				page_cache_readahead(mapping, ra, filp, 
+				index + i);
+				if (bdi_read_congested(
+					mapping->backing_dev_info)) {
+					printk("AIO readahead congestion\n");
+					break;
+				}
+			}
+		}
+	}
+
 	for (;;) {
 		struct page *page;
 		unsigned long end_index, nr, ret;
@@ -662,7 +690,14 @@ void do_generic_mapping_read(struct addr
 		}
 
 		cond_resched();
-		page_cache_readahead(mapping, ra, filp, index);
+		/* 
+		 * Take care to avoid disturbing the existing readahead 
+		 * window (concurrent reads may be active for the same fd, 
+		 * in the AIO case)
+		 */
+		if (!in_aio() || (ra->prev_page + 1 == index))
+			page_cache_readahead(mapping, ra, filp, index);
+		
 
 		nr = nr - offset;
 find_page: