Async page wait


 include/linux/pagemap.h |   19 ++++++++++++
 mm/filemap.c            |   74 +++++++++++++++++++++++++++++++++++++++++-------
 2 files changed, 83 insertions(+), 10 deletions(-)

diff -puN include/linux/pagemap.h~aio-02-lockpage_wq include/linux/pagemap.h
--- 25/include/linux/pagemap.h~aio-02-lockpage_wq	2003-08-30 15:42:24.000000000 -0700
+++ 25-akpm/include/linux/pagemap.h	2003-08-30 15:42:24.000000000 -0700
@@ -158,6 +158,16 @@ static inline void lock_page(struct page
 	if (TestSetPageLocked(page))
 		__lock_page(page);
 }
+
+extern int FASTCALL(__lock_page_wq(struct page *page, wait_queue_t *wait));
+static inline int lock_page_wq(struct page *page, wait_queue_t *wait)
+{
+	if (TestSetPageLocked(page))
+		return __lock_page_wq(page, wait);
+	else
+		return 0;
+}
+
 	
 /*
  * This is exported only for wait_on_page_locked/wait_on_page_writeback.
@@ -178,6 +188,15 @@ static inline void wait_on_page_locked(s
 		wait_on_page_bit(page, PG_locked);
 }
 
+extern int FASTCALL(wait_on_page_bit_wq(struct page *page, int bit_nr,
+	wait_queue_t *wait));
+static inline int wait_on_page_locked_wq(struct page *page, wait_queue_t *wait)
+{
+	if (PageLocked(page))
+		return wait_on_page_bit_wq(page, PG_locked, wait);
+	return 0;
+}
+
 /* 
  * Wait for a page to complete writeback
  */
diff -puN mm/filemap.c~aio-02-lockpage_wq mm/filemap.c
--- 25/mm/filemap.c~aio-02-lockpage_wq	2003-08-30 15:42:24.000000000 -0700
+++ 25-akpm/mm/filemap.c	2003-08-30 15:42:24.000000000 -0700
@@ -281,19 +281,46 @@ static wait_queue_head_t *page_waitqueue
 	return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
 }
 
-void wait_on_page_bit(struct page *page, int bit_nr)
+/*
+ * wait for the specified page bit to be cleared
+ * this could be a synchronous wait or could just queue an async
+ * notification callback depending on the wait queue entry parameter
+ *
+ * A NULL wait queue parameter defaults to sync behaviour
+ */
+int wait_on_page_bit_wq(struct page *page, int bit_nr, wait_queue_t *wait)
 {
 	wait_queue_head_t *waitqueue = page_waitqueue(page);
-	DEFINE_WAIT(wait);
+	DEFINE_WAIT(local_wait);
+
+	if (!wait)
+		wait = &local_wait; /* default to a sync wait entry */
 
 	do {
-		prepare_to_wait(waitqueue, &wait, TASK_UNINTERRUPTIBLE);
+		prepare_to_wait(waitqueue, wait, TASK_UNINTERRUPTIBLE);
 		if (test_bit(bit_nr, &page->flags)) {
 			sync_page(page);
+			if (!is_sync_wait(wait)) {
+				/*
+				 * if we've queued an async wait queue
+				 * callback do not block; just tell the
+				 * caller to return and retry later when
+				 * the callback is notified
+				 */
+				return -EIOCBRETRY;
+			}
 			io_schedule();
 		}
 	} while (test_bit(bit_nr, &page->flags));
-	finish_wait(waitqueue, &wait);
+	finish_wait(waitqueue, wait);
+
+	return 0;
+}
+EXPORT_SYMBOL(wait_on_page_bit_wq);
+
+void wait_on_page_bit(struct page *page, int bit_nr)
+{
+	wait_on_page_bit_wq(page, bit_nr, NULL);
 }
 EXPORT_SYMBOL(wait_on_page_bit);
 
@@ -305,7 +332,9 @@ EXPORT_SYMBOL(wait_on_page_bit);
  * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
  * Also wakes sleepers in wait_on_page_writeback() because the wakeup
  * mechananism between PageLocked pages and PageWriteback pages is shared.
- * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
+ * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep,
+ * or in case the wakeup notifies async wait queue entries, as in the case
+ * of aio, retries would be triggered and may re-queue their callbacks.
  *
  * The first mb is necessary to safely close the critical section opened by the
  * TestSetPageLocked(), the second mb is necessary to enforce ordering between
@@ -342,26 +371,51 @@ void end_page_writeback(struct page *pag
 EXPORT_SYMBOL(end_page_writeback);
 
 /*
- * Get a lock on the page, assuming we need to sleep to get it.
+ * Get a lock on the page, assuming we need to either sleep to get it
+ * or to queue an async notification callback to try again when its
+ * available.
+ *
+ * A NULL wait queue parameter defaults to sync behaviour. Otherwise
+ * it specifies the wait queue entry to be used for async notification
+ * or waiting.
  *
  * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary.  If some
  * random driver's requestfn sets TASK_RUNNING, we could busywait.  However
  * chances are that on the second loop, the block layer's plug list is empty,
  * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
  */
-void __lock_page(struct page *page)
+int __lock_page_wq(struct page *page, wait_queue_t *wait)
 {
 	wait_queue_head_t *wqh = page_waitqueue(page);
-	DEFINE_WAIT(wait);
+	DEFINE_WAIT(local_wait);
+
+	if (!wait)
+		wait = &local_wait;
 
 	while (TestSetPageLocked(page)) {
-		prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
+		prepare_to_wait(wqh, wait, TASK_UNINTERRUPTIBLE);
 		if (PageLocked(page)) {
 			sync_page(page);
+			if (!is_sync_wait(wait)) {
+				/*
+				 * if we've queued an async wait queue
+				 * callback do not block; just tell the
+				 * caller to return and retry later when
+				 * the callback is notified
+				 */
+				return -EIOCBRETRY;
+			}
 			io_schedule();
 		}
 	}
-	finish_wait(wqh, &wait);
+	finish_wait(wqh, wait);
+	return 0;
+}
+EXPORT_SYMBOL(__lock_page_wq);
+
+void __lock_page(struct page *page)
+{
+	__lock_page_wq(page, NULL);
 }
 EXPORT_SYMBOL(__lock_page);
 

_