patch-2.4.15 linux/fs/buffer.c
Next file: linux/fs/dquot.c
Previous file: linux/fs/block_dev.c
Back to the patch index
Back to the overall index
- Lines: 245
- Date:
Wed Nov 21 14:40:17 2001
- Orig file:
v2.4.14/linux/fs/buffer.c
- Orig date:
Mon Nov 5 15:55:33 2001
diff -u --recursive --new-file v2.4.14/linux/fs/buffer.c linux/fs/buffer.c
@@ -45,6 +45,7 @@
#include <linux/quotaops.h>
#include <linux/iobuf.h>
#include <linux/highmem.h>
+#include <linux/module.h>
#include <linux/completion.h>
#include <asm/uaccess.h>
@@ -538,6 +539,15 @@
__remove_from_lru_list(bh);
}
+static void remove_from_queues(struct buffer_head *bh)
+{
+ spin_lock(&lru_list_lock);
+ write_lock(&hash_table_lock);
+ __remove_from_queues(bh);
+ write_unlock(&hash_table_lock);
+ spin_unlock(&lru_list_lock);
+}
+
struct buffer_head * get_hash_table(kdev_t dev, int block, int size)
{
struct buffer_head *bh, **p = &hash(dev, block);
@@ -613,8 +623,12 @@
information that was supposed to be just stored on the physical layer
by the user.
- Thus invalidate_buffers in general usage is not allwowed to trash dirty
- buffers. For example ioctl(FLSBLKBUF) expects dirty data to be preserved.
+ Thus invalidate_buffers in general usage is not allwowed to trash
+ dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
+ be preserved. These buffers are simply skipped.
+
+ We also skip buffers which are still in use. For example this can
+ happen if a userspace program is reading the block device.
NOTE: In the case where the user removed a removable-media-disk even if
there's still dirty data not synced on disk (due a bug in the device driver
@@ -1090,6 +1104,12 @@
}
}
+void set_buffer_flushtime(struct buffer_head *bh)
+{
+ bh->b_flushtime = jiffies + bdf_prm.b_un.age_buffer;
+}
+EXPORT_SYMBOL(set_buffer_flushtime);
+
/*
* A buffer may need to be moved from one buffer list to another
* (e.g. in case it is not shared any more). Handle this.
@@ -1152,6 +1172,7 @@
struct buffer_head * bh;
bh = getblk(dev, block, size);
+ touch_buffer(bh);
if (buffer_uptodate(bh))
return bh;
ll_rw_block(READ, 1, &bh);
@@ -1165,7 +1186,7 @@
/*
* Note: the caller should wake up the buffer_wait list if needed.
*/
-static __inline__ void __put_unused_buffer_head(struct buffer_head * bh)
+static void __put_unused_buffer_head(struct buffer_head * bh)
{
if (bh->b_inode)
BUG();
@@ -1182,12 +1203,20 @@
}
}
+void put_unused_buffer_head(struct buffer_head *bh)
+{
+ spin_lock(&unused_list_lock);
+ __put_unused_buffer_head(bh);
+ spin_unlock(&unused_list_lock);
+}
+EXPORT_SYMBOL(put_unused_buffer_head);
+
/*
* Reserve NR_RESERVED buffer heads for async IO requests to avoid
* no-buffer-head deadlock. Return NULL on failure; waiting for
* buffer heads is now handled in create_buffers().
*/
-static struct buffer_head * get_unused_buffer_head(int async)
+struct buffer_head * get_unused_buffer_head(int async)
{
struct buffer_head * bh;
@@ -1228,6 +1257,7 @@
return NULL;
}
+EXPORT_SYMBOL(get_unused_buffer_head);
void set_bh_page (struct buffer_head *bh, struct page *page, unsigned long offset)
{
@@ -1242,6 +1272,7 @@
else
bh->b_data = page_address(page) + offset;
}
+EXPORT_SYMBOL(set_bh_page);
/*
* Create the appropriate buffers when given a page for data area and
@@ -1331,10 +1362,36 @@
clear_bit(BH_Mapped, &bh->b_state);
clear_bit(BH_Req, &bh->b_state);
clear_bit(BH_New, &bh->b_state);
+ remove_from_queues(bh);
unlock_buffer(bh);
}
}
+/**
+ * try_to_release_page - release old fs-specific metadata on a page
+ *
+ */
+
+int try_to_release_page(struct page * page, int gfp_mask)
+{
+ if (!PageLocked(page))
+ BUG();
+
+ if (!page->mapping)
+ goto try_to_free;
+ if (!page->mapping->a_ops->releasepage)
+ goto try_to_free;
+ if (page->mapping->a_ops->releasepage(page, gfp_mask))
+ goto try_to_free;
+ /*
+ * We couldn't release buffer metadata; don't even bother trying
+ * to release buffers.
+ */
+ return 0;
+try_to_free:
+ return try_to_free_buffers(page, gfp_mask);
+}
+
/*
* We don't have to release all buffers here, but
* we have to be sure that no dirty buffer is left
@@ -1378,7 +1435,7 @@
* instead.
*/
if (!offset) {
- if (!try_to_free_buffers(page, 0))
+ if (!try_to_release_page(page, 0))
return 0;
}
@@ -1406,6 +1463,7 @@
page->buffers = head;
page_cache_get(page);
}
+EXPORT_SYMBOL(create_empty_buffers);
/*
* We are taking a block for data and we don't want any output from any
@@ -1446,8 +1504,7 @@
*/
/*
- * block_write_full_page() is SMP-safe - currently it's still
- * being called with the kernel lock held, but the code is ready.
+ * block_write_full_page() is SMP threaded - the kernel lock is not held.
*/
static int __block_write_full_page(struct inode *inode, struct page *page, get_block_t *get_block)
{
@@ -1941,6 +1998,47 @@
return tmp.b_blocknr;
}
+int generic_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsigned long blocknr, int blocksize, get_block_t * get_block)
+{
+ int i, nr_blocks, retval;
+ unsigned long * blocks = iobuf->blocks;
+
+ nr_blocks = iobuf->length / blocksize;
+ /* build the blocklist */
+ for (i = 0; i < nr_blocks; i++, blocknr++) {
+ struct buffer_head bh;
+
+ bh.b_state = 0;
+ bh.b_dev = inode->i_dev;
+ bh.b_size = blocksize;
+
+ retval = get_block(inode, blocknr, &bh, rw == READ ? 0 : 1);
+ if (retval)
+ goto out;
+
+ if (rw == READ) {
+ if (buffer_new(&bh))
+ BUG();
+ if (!buffer_mapped(&bh)) {
+ /* there was an hole in the filesystem */
+ blocks[i] = -1UL;
+ continue;
+ }
+ } else {
+ if (buffer_new(&bh))
+ unmap_underlying_metadata(&bh);
+ if (!buffer_mapped(&bh))
+ BUG();
+ }
+ blocks[i] = bh.b_blocknr;
+ }
+
+ retval = brw_kiovec(rw, 1, &iobuf, inode->i_dev, iobuf->blocks, blocksize);
+
+ out:
+ return retval;
+}
+
/*
* IO completion routine for a buffer_head being used for kiobuf IO: we
* can't dispatch the kiobuf callback until io_count reaches 0.
@@ -2294,10 +2392,13 @@
unsigned long index;
int sizebits;
- if ((size & 511) || (size > PAGE_SIZE)) {
- printk(KERN_ERR "VFS: grow_buffers: size = %d\n",size);
- return 0;
- }
+ /* Size must be multiple of hard sectorsize */
+ if (size & (get_hardsect_size(dev)-1))
+ BUG();
+ /* Size must be within 512 bytes and PAGE_SIZE */
+ if (size < 512 || size > PAGE_SIZE)
+ BUG();
+
sizebits = -1;
do {
sizebits++;
@@ -2444,6 +2545,7 @@
wakeup_bdflush();
return 0;
}
+EXPORT_SYMBOL(try_to_free_buffers);
/* ================== Debugging =================== */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)