patch-2.1.73 linux/fs/ext2/fsync.c

Next file: linux/fs/fat/inode.c
Previous file: linux/fs/dcache.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.1.72/linux/fs/ext2/fsync.c linux/fs/ext2/fsync.c
@@ -57,6 +57,72 @@
 	return 0;
 }
 
+#ifndef __LITTLE_ENDIAN
+static int sync_block_swab32 (struct inode * inode, u32 * block, int wait)
+{
+	struct buffer_head * bh;
+	
+	if (!le32_to_cpu(*block))
+		return 0;
+	bh = get_hash_table (inode->i_dev, le32_to_cpu(*block), blocksize);
+	if (!bh)
+		return 0;
+	if (wait && buffer_req(bh) && !buffer_uptodate(bh)) {
+		brelse (bh);
+		return -1;
+	}
+	if (wait || !buffer_uptodate(bh) || !buffer_dirty(bh)) {
+		brelse (bh);
+		return 0;
+	}
+	ll_rw_block (WRITE, 1, &bh);
+	bh->b_count--;
+	return 0;
+}
+#else
+#define sync_block_swab32 sync_block
+#endif
+
+
+static int sync_iblock (struct inode * inode, u32 * iblock, 
+			struct buffer_head ** bh, int wait) 
+{
+	int rc, tmp;
+	
+	*bh = NULL;
+	tmp = *iblock;
+	if (!tmp)
+		return 0;
+	rc = sync_block (inode, iblock, wait);
+	if (rc)
+		return rc;
+	*bh = bread (inode->i_dev, tmp, blocksize);
+	if (!*bh)
+		return -1;
+	return 0;
+}
+
+#ifndef __LITTLE_ENDIAN
+static int sync_iblock_swab32 (struct inode * inode, u32 * iblock, 
+			       struct buffer_head ** bh, int wait) 
+{
+	int rc, tmp;
+	
+	*bh = NULL;
+	tmp = le32_to_cpu(*iblock);
+	if (!tmp)
+		return 0;
+	rc = sync_block_swab32 (inode, iblock, wait);
+	if (rc)
+		return rc;
+	*bh = bread (inode->i_dev, tmp, blocksize);
+	if (!*bh)
+		return -1;
+	return 0;
+}
+#else
+#define sync_iblock_swab32 sync_iblock
+#endif
 
 static int sync_direct (struct inode * inode, int wait)
 {
@@ -71,15 +137,122 @@
 	return err;
 }
 
+static int sync_indirect (struct inode * inode, u32 * iblock, int wait)
+{
+	int i;
+	struct buffer_head * ind_bh;
+	int rc, err = 0;
+
+	rc = sync_iblock (inode, iblock, &ind_bh, wait);
+	if (rc || !ind_bh)
+		return rc;
+	
+	for (i = 0; i < addr_per_block; i++) {
+		rc = sync_block_swab32 (inode, 
+					((u32 *) ind_bh->b_data) + i,
+					wait);
+		if (rc)
+			err = rc;
+	}
+	brelse (ind_bh);
+	return err;
+}
+
+#ifndef __LITTLE_ENDIAN
+static __inline__ int sync_indirect_swab32 (struct inode * inode, u32 * iblock, int wait)
+{
+	int i;
+	struct buffer_head * ind_bh;
+	int rc, err = 0;
+
+	rc = sync_iblock_swab32 (inode, iblock, &ind_bh, wait);
+	if (rc || !ind_bh)
+		return rc;
+	
+	for (i = 0; i < addr_per_block; i++) {
+		rc = sync_block_swab32 (inode, 
+					((u32 *) ind_bh->b_data) + i,
+					wait);
+		if (rc)
+			err = rc;
+	}
+	brelse (ind_bh);
+	return err;
+}
+#else
+#define sync_indirect_swab32 sync_indirect
+#endif
+
+static int sync_dindirect (struct inode * inode, u32 * diblock, int wait)
+{
+	int i;
+	struct buffer_head * dind_bh;
+	int rc, err = 0;
+
+	rc = sync_iblock (inode, diblock, &dind_bh, wait);
+	if (rc || !dind_bh)
+		return rc;
+	
+	for (i = 0; i < addr_per_block; i++) {
+		rc = sync_indirect_swab32 (inode,
+					   ((u32 *) dind_bh->b_data) + i,
+					   wait);
+		if (rc)
+			err = rc;
+	}
+	brelse (dind_bh);
+	return err;
+}
+
+#ifndef __LITTLE_ENDIAN
+static __inline__ int sync_dindirect_swab32 (struct inode * inode, u32 * diblock, int wait)
+{
+	int i;
+	struct buffer_head * dind_bh;
+	int rc, err = 0;
+
+	rc = sync_iblock_swab32 (inode, diblock, &dind_bh, wait);
+	if (rc || !dind_bh)
+		return rc;
+	
+	for (i = 0; i < addr_per_block; i++) {
+		rc = sync_indirect_swab32 (inode,
+					   ((u32 *) dind_bh->b_data) + i,
+					   wait);
+		if (rc)
+			err = rc;
+	}
+	brelse (dind_bh);
+	return err;
+}
+#else
+#define sync_dindirect_swab32 sync_dindirect
+#endif
+
+static int sync_tindirect (struct inode * inode, u32 * tiblock, int wait)
+{
+	int i;
+	struct buffer_head * tind_bh;
+	int rc, err = 0;
+
+	rc = sync_iblock (inode, tiblock, &tind_bh, wait);
+	if (rc || !tind_bh)
+		return rc;
+	
+	for (i = 0; i < addr_per_block; i++) {
+		rc = sync_dindirect_swab32 (inode,
+					    ((u32 *) tind_bh->b_data) + i,
+					    wait);
+		if (rc)
+			err = rc;
+	}
+	brelse (tind_bh);
+	return err;
+}
+
 /*
  *	File may be NULL when we are called. Perhaps we shouldn't
  *	even pass file to fsync ?
- *
- *	This currently falls back to synching the whole device when
- *	the file is larger than can fit directly in the inode. This
- *	is because dirty-buffer handling is indexed by the device
- *	of the buffer, which makes it much faster to sync the whole
- *	device than to sync just one large file.
  */
 
 int ext2_sync_file(struct file * file, struct dentry *dentry)
@@ -96,12 +269,18 @@
 		 */
 		goto skip;
 
-	if (inode->i_size > EXT2_NDIR_BLOCKS*blocksize)
-		return file_fsync(file, dentry);
-
 	for (wait=0; wait<=1; wait++)
 	{
 		err |= sync_direct (inode, wait);
+		err |= sync_indirect (inode,
+				      inode->u.ext2_i.i_data+EXT2_IND_BLOCK,
+				      wait);
+		err |= sync_dindirect (inode,
+				       inode->u.ext2_i.i_data+EXT2_DIND_BLOCK, 
+				       wait);
+		err |= sync_tindirect (inode, 
+				       inode->u.ext2_i.i_data+EXT2_TIND_BLOCK, 
+				       wait);
 	}
 skip:
 	err |= ext2_sync_inode (inode);

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov