patch-2.4.20 linux-2.4.20/mm/filemap.c
Next file: linux-2.4.20/mm/highmem.c
Previous file: linux-2.4.20/mm/bootmem.c
Back to the patch index
Back to the overall index
- Lines: 152
- Date:
Thu Nov 28 15:53:15 2002
- Orig file:
linux-2.4.19/mm/filemap.c
- Orig date:
Fri Aug 2 17:39:46 2002
diff -urN linux-2.4.19/mm/filemap.c linux-2.4.20/mm/filemap.c
@@ -154,11 +154,14 @@
if (mapping) {
spin_lock(&pagecache_lock);
- list_del(&page->list);
- list_add(&page->list, &mapping->dirty_pages);
+ mapping = page->mapping;
+ if (mapping) { /* may have been truncated */
+ list_del(&page->list);
+ list_add(&page->list, &mapping->dirty_pages);
+ }
spin_unlock(&pagecache_lock);
- if (mapping->host)
+ if (mapping && mapping->host)
mark_inode_dirty_pages(mapping->host);
}
}
@@ -800,7 +803,6 @@
/* On some cpus multiply is faster, on others gcc will do shifts */
hash *= GOLDEN_RATIO_PRIME;
#endif
-
hash >>= zone->wait_table_shift;
return &wait[hash];
@@ -812,6 +814,21 @@
* This must be called with the caller "holding" the page,
* ie with increased "page->count" so that the page won't
* go away during the wait..
+ *
+ * The waiting strategy is to get on a waitqueue determined
+ * by hashing. Waiters will then collide, and the newly woken
+ * task must then determine whether it was woken for the page
+ * it really wanted, and go back to sleep on the waitqueue if
+ * that wasn't it. With the waitqueue semantics, it never leaves
+ * the waitqueue unless it calls, so the loop moves forward one
+ * iteration every time there is
+ * (1) a collision
+ * and
+ * (2) one of the colliding pages is woken
+ *
+ * This is the thundering herd problem, but it is expected to
+ * be very rare due to the few pages that are actually being
+ * waited on at any given time and the quality of the hash function.
*/
void ___wait_on_page(struct page *page)
{
@@ -832,7 +849,11 @@
}
/*
- * Unlock the page and wake up sleepers in ___wait_on_page.
+ * unlock_page() is the other half of the story just above
+ * __wait_on_page(). Here a couple of quick checks are done
+ * and a couple of flags are set on the page, and then all
+ * of the waiters for all of the pages in the appropriate
+ * wait queue are woken.
*/
void unlock_page(struct page *page)
{
@@ -842,6 +863,13 @@
if (!test_and_clear_bit(PG_locked, &(page)->flags))
BUG();
smp_mb__after_clear_bit();
+
+ /*
+ * Although the default semantics of wake_up() are
+ * to wake all, here the specific function is used
+ * to make it even more explicit that a number of
+ * pages are being waited on here.
+ */
if (waitqueue_active(waitqueue))
wake_up_all(waitqueue);
}
@@ -1001,15 +1029,6 @@
}
/*
- * Returns locked page at given index in given cache, creating it if needed.
- */
-struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
-{
- return find_or_create_page(mapping, index, mapping->gfp_mask);
-}
-
-
-/*
* Same as grab_cache_page, but do not wait if the page is unavailable.
* This is intended for speculative data generators, where the data can
* be regenerated if the page couldn't be grabbed. This routine should
@@ -1291,21 +1310,16 @@
/*
* Mark a page as having seen activity.
*
- * If it was already so marked, move it
- * to the active queue and drop the referenced
- * bit. Otherwise, just mark it for future
- * action..
+ * If it was already so marked, move it to the active queue and drop
+ * the referenced bit. Otherwise, just mark it for future action..
*/
void mark_page_accessed(struct page *page)
{
if (!PageActive(page) && PageReferenced(page)) {
activate_page(page);
ClearPageReferenced(page);
- return;
- }
-
- /* Mark the page referenced, AFTER checking for previous usage.. */
- SetPageReferenced(page);
+ } else
+ SetPageReferenced(page);
}
/*
@@ -1534,6 +1548,7 @@
struct kiobuf * iobuf;
struct address_space * mapping = filp->f_dentry->d_inode->i_mapping;
struct inode * inode = mapping->host;
+ loff_t size = inode->i_size;
new_iobuf = 0;
iobuf = filp->f_iobuf;
@@ -1559,6 +1574,9 @@
if (!mapping->a_ops->direct_IO)
goto out_free;
+ if ((rw == READ) && (offset + count > size))
+ count = size - offset;
+
/*
* Flush to disk exclusively the _data_, metadata must remain
* completly asynchronous or performance will go to /dev/null.
@@ -1589,6 +1607,7 @@
if (retval >= 0) {
count -= retval;
buf += retval;
+ /* warning: weird semantics here, we're reporting a read behind the end of the file */
progress += retval;
}
@@ -1678,8 +1697,6 @@
goto out; /* skip atime */
size = inode->i_size;
if (pos < size) {
- if (pos + count > size)
- count = size - pos;
retval = generic_file_direct_IO(READ, filp, buf, count, pos);
if (retval > 0)
*ppos = pos + retval;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)