aboutsummaryrefslogtreecommitdiff
path: root/fs/reiserfs/file.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-07-12 20:21:28 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-12 20:21:28 -0700
commitbd4c625c061c2a38568d0add3478f59172455159 (patch)
tree1c44a17c55bce2ee7ad5ea3d15a208ecc0955f74 /fs/reiserfs/file.c
parent7fa94c8868edfef8cb6a201fcc9a5078b7b961da (diff)
reiserfs: run scripts/Lindent on reiserfs code
This was a pure indentation change, using: scripts/Lindent fs/reiserfs/*.c include/linux/reiserfs_*.h to make reiserfs match the regular Linux indentation style. As Jeff Mahoney <jeffm@suse.com> writes: The ReiserFS code is a mix of a number of different coding styles, sometimes different even from line-to-line. Since the code has been relatively stable for quite some time and there are few outstanding patches to be applied, it is time to reformat the code to conform to the Linux style standard outlined in Documentation/CodingStyle. This patch contains the result of running scripts/Lindent against fs/reiserfs/*.c and include/linux/reiserfs_*.h. There are places where the code can be made to look better, but I'd rather keep those patches separate so that there isn't a subtle by-hand hand accident in the middle of a huge patch. To be clear: This patch is reformatting *only*. A number of patches may follow that continue to make the code more consistent with the Linux coding style. Hans wasn't particularly enthusiastic about these patches, but said he wouldn't really oppose them either. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/reiserfs/file.c')
-rw-r--r--fs/reiserfs/file.c2564
1 files changed, 1362 insertions, 1202 deletions
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 12e91209544..c9f178fb494 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -2,7 +2,6 @@
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
*/
-
#include <linux/time.h>
#include <linux/reiserfs_fs.h>
#include <linux/reiserfs_acl.h>
@@ -31,82 +30,84 @@
** We use reiserfs_truncate_file to pack the tail, since it already has
** all the conditions coded.
*/
-static int reiserfs_file_release (struct inode * inode, struct file * filp)
+static int reiserfs_file_release(struct inode *inode, struct file *filp)
{
- struct reiserfs_transaction_handle th ;
- int err;
- int jbegin_failure = 0;
+ struct reiserfs_transaction_handle th;
+ int err;
+ int jbegin_failure = 0;
- if (!S_ISREG (inode->i_mode))
- BUG ();
+ if (!S_ISREG(inode->i_mode))
+ BUG();
- /* fast out for when nothing needs to be done */
- if ((atomic_read(&inode->i_count) > 1 ||
- !(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
- !tail_has_to_be_packed(inode)) &&
- REISERFS_I(inode)->i_prealloc_count <= 0) {
- return 0;
- }
-
- reiserfs_write_lock(inode->i_sb);
- down (&inode->i_sem);
- /* freeing preallocation only involves relogging blocks that
- * are already in the current transaction. preallocation gets
- * freed at the end of each transaction, so it is impossible for
- * us to log any additional blocks (including quota blocks)
- */
- err = journal_begin(&th, inode->i_sb, 1);
- if (err) {
- /* uh oh, we can't allow the inode to go away while there
- * is still preallocation blocks pending. Try to join the
- * aborted transaction
- */
- jbegin_failure = err;
- err = journal_join_abort(&th, inode->i_sb, 1);
+ /* fast out for when nothing needs to be done */
+ if ((atomic_read(&inode->i_count) > 1 ||
+ !(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
+ !tail_has_to_be_packed(inode)) &&
+ REISERFS_I(inode)->i_prealloc_count <= 0) {
+ return 0;
+ }
+ reiserfs_write_lock(inode->i_sb);
+ down(&inode->i_sem);
+ /* freeing preallocation only involves relogging blocks that
+ * are already in the current transaction. preallocation gets
+ * freed at the end of each transaction, so it is impossible for
+ * us to log any additional blocks (including quota blocks)
+ */
+ err = journal_begin(&th, inode->i_sb, 1);
if (err) {
- /* hmpf, our choices here aren't good. We can pin the inode
- * which will disallow unmount from every happening, we can
- * do nothing, which will corrupt random memory on unmount,
- * or we can forcibly remove the file from the preallocation
- * list, which will leak blocks on disk. Lets pin the inode
- * and let the admin know what is going on.
- */
- igrab(inode);
- reiserfs_warning(inode->i_sb, "pinning inode %lu because the "
- "preallocation can't be freed");
- goto out;
+ /* uh oh, we can't allow the inode to go away while there
+ * is still preallocation blocks pending. Try to join the
+ * aborted transaction
+ */
+ jbegin_failure = err;
+ err = journal_join_abort(&th, inode->i_sb, 1);
+
+ if (err) {
+ /* hmpf, our choices here aren't good. We can pin the inode
+ * which will disallow unmount from every happening, we can
+ * do nothing, which will corrupt random memory on unmount,
+ * or we can forcibly remove the file from the preallocation
+ * list, which will leak blocks on disk. Lets pin the inode
+ * and let the admin know what is going on.
+ */
+ igrab(inode);
+ reiserfs_warning(inode->i_sb,
+ "pinning inode %lu because the "
+ "preallocation can't be freed");
+ goto out;
+ }
}
- }
- reiserfs_update_inode_transaction(inode) ;
+ reiserfs_update_inode_transaction(inode);
#ifdef REISERFS_PREALLOCATE
- reiserfs_discard_prealloc (&th, inode);
+ reiserfs_discard_prealloc(&th, inode);
#endif
- err = journal_end(&th, inode->i_sb, 1);
-
- /* copy back the error code from journal_begin */
- if (!err)
- err = jbegin_failure;
-
- if (!err && atomic_read(&inode->i_count) <= 1 &&
- (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
- tail_has_to_be_packed (inode)) {
- /* if regular file is released by last holder and it has been
- appended (we append by unformatted node only) or its direct
- item(s) had to be converted, then it may have to be
- indirect2direct converted */
- err = reiserfs_truncate_file(inode, 0) ;
- }
-out:
- up (&inode->i_sem);
- reiserfs_write_unlock(inode->i_sb);
- return err;
+ err = journal_end(&th, inode->i_sb, 1);
+
+ /* copy back the error code from journal_begin */
+ if (!err)
+ err = jbegin_failure;
+
+ if (!err && atomic_read(&inode->i_count) <= 1 &&
+ (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
+ tail_has_to_be_packed(inode)) {
+ /* if regular file is released by last holder and it has been
+ appended (we append by unformatted node only) or its direct
+ item(s) had to be converted, then it may have to be
+ indirect2direct converted */
+ err = reiserfs_truncate_file(inode, 0);
+ }
+ out:
+ up(&inode->i_sem);
+ reiserfs_write_unlock(inode->i_sb);
+ return err;
}
-static void reiserfs_vfs_truncate_file(struct inode *inode) {
- reiserfs_truncate_file(inode, 1) ;
+static void reiserfs_vfs_truncate_file(struct inode *inode)
+{
+ reiserfs_truncate_file(inode, 1);
}
/* Sync a reiserfs file. */
@@ -116,26 +117,24 @@ static void reiserfs_vfs_truncate_file(struct inode *inode) {
* be removed...
*/
-static int reiserfs_sync_file(
- struct file * p_s_filp,
- struct dentry * p_s_dentry,
- int datasync
- ) {
- struct inode * p_s_inode = p_s_dentry->d_inode;
- int n_err;
- int barrier_done;
-
- if (!S_ISREG(p_s_inode->i_mode))
- BUG ();
- n_err = sync_mapping_buffers(p_s_inode->i_mapping) ;
- reiserfs_write_lock(p_s_inode->i_sb);
- barrier_done = reiserfs_commit_for_inode(p_s_inode);
- reiserfs_write_unlock(p_s_inode->i_sb);
- if (barrier_done != 1)
- blkdev_issue_flush(p_s_inode->i_sb->s_bdev, NULL);
- if (barrier_done < 0)
- return barrier_done;
- return ( n_err < 0 ) ? -EIO : 0;
+static int reiserfs_sync_file(struct file *p_s_filp,
+ struct dentry *p_s_dentry, int datasync)
+{
+ struct inode *p_s_inode = p_s_dentry->d_inode;
+ int n_err;
+ int barrier_done;
+
+ if (!S_ISREG(p_s_inode->i_mode))
+ BUG();
+ n_err = sync_mapping_buffers(p_s_inode->i_mapping);
+ reiserfs_write_lock(p_s_inode->i_sb);
+ barrier_done = reiserfs_commit_for_inode(p_s_inode);
+ reiserfs_write_unlock(p_s_inode->i_sb);
+ if (barrier_done != 1)
+ blkdev_issue_flush(p_s_inode->i_sb->s_bdev, NULL);
+ if (barrier_done < 0)
+ return barrier_done;
+ return (n_err < 0) ? -EIO : 0;
}
/* I really do not want to play with memory shortage right now, so
@@ -147,700 +146,797 @@ static int reiserfs_sync_file(
/* Allocates blocks for a file to fulfil write request.
Maps all unmapped but prepared pages from the list.
Updates metadata with newly allocated blocknumbers as needed */
-static int reiserfs_allocate_blocks_for_region(
- struct reiserfs_transaction_handle *th,
- struct inode *inode, /* Inode we work with */
- loff_t pos, /* Writing position */
- int num_pages, /* number of pages write going
- to touch */
- int write_bytes, /* amount of bytes to write */
- struct page **prepared_pages, /* array of
- prepared pages
- */
- int blocks_to_allocate /* Amount of blocks we
- need to allocate to
- fit the data into file
- */
- )
+static int reiserfs_allocate_blocks_for_region(struct reiserfs_transaction_handle *th, struct inode *inode, /* Inode we work with */
+ loff_t pos, /* Writing position */
+ int num_pages, /* number of pages write going
+ to touch */
+ int write_bytes, /* amount of bytes to write */
+ struct page **prepared_pages, /* array of
+ prepared pages
+ */
+ int blocks_to_allocate /* Amount of blocks we
+ need to allocate to
+ fit the data into file
+ */
+ )
{
- struct cpu_key key; // cpu key of item that we are going to deal with
- struct item_head *ih; // pointer to item head that we are going to deal with
- struct buffer_head *bh; // Buffer head that contains items that we are going to deal with
- __le32 * item; // pointer to item we are going to deal with
- INITIALIZE_PATH(path); // path to item, that we are going to deal with.
- b_blocknr_t *allocated_blocks; // Pointer to a place where allocated blocknumbers would be stored.
- reiserfs_blocknr_hint_t hint; // hint structure for block allocator.
- size_t res; // return value of various functions that we call.
- int curr_block; // current block used to keep track of unmapped blocks.
- int i; // loop counter
- int itempos; // position in item
- unsigned int from = (pos & (PAGE_CACHE_SIZE - 1)); // writing position in
- // first page
- unsigned int to = ((pos + write_bytes - 1) & (PAGE_CACHE_SIZE - 1)) + 1; /* last modified byte offset in last page */
- __u64 hole_size ; // amount of blocks for a file hole, if it needed to be created.
- int modifying_this_item = 0; // Flag for items traversal code to keep track
- // of the fact that we already prepared
- // current block for journal
- int will_prealloc = 0;
- RFALSE(!blocks_to_allocate, "green-9004: tried to allocate zero blocks?");
-
- /* only preallocate if this is a small write */
- if (REISERFS_I(inode)->i_prealloc_count ||
- (!(write_bytes & (inode->i_sb->s_blocksize -1)) &&
- blocks_to_allocate <
- REISERFS_SB(inode->i_sb)->s_alloc_options.preallocsize))
- will_prealloc = REISERFS_SB(inode->i_sb)->s_alloc_options.preallocsize;
-
- allocated_blocks = kmalloc((blocks_to_allocate + will_prealloc) *
- sizeof(b_blocknr_t), GFP_NOFS);
-
- /* First we compose a key to point at the writing position, we want to do
- that outside of any locking region. */
- make_cpu_key (&key, inode, pos+1, TYPE_ANY, 3/*key length*/);
-
- /* If we came here, it means we absolutely need to open a transaction,
- since we need to allocate some blocks */
- reiserfs_write_lock(inode->i_sb); // Journaling stuff and we need that.
- res = journal_begin(th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3 + 1 + 2 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb)); // Wish I know if this number enough
- if (res)
- goto error_exit;
- reiserfs_update_inode_transaction(inode) ;
-
- /* Look for the in-tree position of our write, need path for block allocator */
- res = search_for_position_by_key(inode->i_sb, &key, &path);
- if ( res == IO_ERROR ) {
- res = -EIO;
- goto error_exit;
- }
-
- /* Allocate blocks */
- /* First fill in "hint" structure for block allocator */
- hint.th = th; // transaction handle.
- hint.path = &path; // Path, so that block allocator can determine packing locality or whatever it needs to determine.
- hint.inode = inode; // Inode is needed by block allocator too.
- hint.search_start = 0; // We have no hint on where to search free blocks for block allocator.
- hint.key = key.on_disk_key; // on disk key of file.
- hint.block = inode->i_blocks>>(inode->i_sb->s_blocksize_bits-9); // Number of disk blocks this file occupies already.
- hint.formatted_node = 0; // We are allocating blocks for unformatted node.
- hint.preallocate = will_prealloc;
-
- /* Call block allocator to allocate blocks */
- res = reiserfs_allocate_blocknrs(&hint, allocated_blocks, blocks_to_allocate, blocks_to_allocate);
- if ( res != CARRY_ON ) {
- if ( res == NO_DISK_SPACE ) {
- /* We flush the transaction in case of no space. This way some
- blocks might become free */
- SB_JOURNAL(inode->i_sb)->j_must_wait = 1;
- res = restart_transaction(th, inode, &path);
- if (res)
- goto error_exit;
-
- /* We might have scheduled, so search again */
- res = search_for_position_by_key(inode->i_sb, &key, &path);
- if ( res == IO_ERROR ) {
- res = -EIO;
+ struct cpu_key key; // cpu key of item that we are going to deal with
+ struct item_head *ih; // pointer to item head that we are going to deal with
+ struct buffer_head *bh; // Buffer head that contains items that we are going to deal with
+ __le32 *item; // pointer to item we are going to deal with
+ INITIALIZE_PATH(path); // path to item, that we are going to deal with.
+ b_blocknr_t *allocated_blocks; // Pointer to a place where allocated blocknumbers would be stored.
+ reiserfs_blocknr_hint_t hint; // hint structure for block allocator.
+ size_t res; // return value of various functions that we call.
+ int curr_block; // current block used to keep track of unmapped blocks.
+ int i; // loop counter
+ int itempos; // position in item
+ unsigned int from = (pos & (PAGE_CACHE_SIZE - 1)); // writing position in
+ // first page
+ unsigned int to = ((pos + write_bytes - 1) & (PAGE_CACHE_SIZE - 1)) + 1; /* last modified byte offset in last page */
+ __u64 hole_size; // amount of blocks for a file hole, if it needed to be created.
+ int modifying_this_item = 0; // Flag for items traversal code to keep track
+ // of the fact that we already prepared
+ // current block for journal
+ int will_prealloc = 0;
+ RFALSE(!blocks_to_allocate,
+ "green-9004: tried to allocate zero blocks?");
+
+ /* only preallocate if this is a small write */
+ if (REISERFS_I(inode)->i_prealloc_count ||
+ (!(write_bytes & (inode->i_sb->s_blocksize - 1)) &&
+ blocks_to_allocate <
+ REISERFS_SB(inode->i_sb)->s_alloc_options.preallocsize))
+ will_prealloc =
+ REISERFS_SB(inode->i_sb)->s_alloc_options.preallocsize;
+
+ allocated_blocks = kmalloc((blocks_to_allocate + will_prealloc) *
+ sizeof(b_blocknr_t), GFP_NOFS);
+
+ /* First we compose a key to point at the writing position, we want to do
+ that outside of any locking region. */
+ make_cpu_key(&key, inode, pos + 1, TYPE_ANY, 3 /*key length */ );
+
+ /* If we came here, it means we absolutely need to open a transaction,
+ since we need to allocate some blocks */
+ reiserfs_write_lock(inode->i_sb); // Journaling stuff and we need that.
+ res = journal_begin(th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3 + 1 + 2 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb)); // Wish I know if this number enough
+ if (res)
goto error_exit;
- }
+ reiserfs_update_inode_transaction(inode);
- /* update changed info for hint structure. */
- res = reiserfs_allocate_blocknrs(&hint, allocated_blocks, blocks_to_allocate, blocks_to_allocate);
- if ( res != CARRY_ON ) {
- res = -ENOSPC;
- pathrelse(&path);
+ /* Look for the in-tree position of our write, need path for block allocator */
+ res = search_for_position_by_key(inode->i_sb, &key, &path);
+ if (res == IO_ERROR) {
+ res = -EIO;
goto error_exit;
- }
- } else {
- res = -ENOSPC;
- pathrelse(&path);
- goto error_exit;
}
- }
-#ifdef __BIG_ENDIAN
- // Too bad, I have not found any way to convert a given region from
- // cpu format to little endian format
- {
- int i;
- for ( i = 0; i < blocks_to_allocate ; i++)
- allocated_blocks[i]=cpu_to_le32(allocated_blocks[i]);
- }
-#endif
-
- /* Blocks allocating well might have scheduled and tree might have changed,
- let's search the tree again */
- /* find where in the tree our write should go */
- res = search_for_position_by_key(inode->i_sb, &key, &path);
- if ( res == IO_ERROR ) {
- res = -EIO;
- goto error_exit_free_blocks;
- }
-
- bh = get_last_bh( &path ); // Get a bufferhead for last element in path.
- ih = get_ih( &path ); // Get a pointer to last item head in path.
- item = get_item( &path ); // Get a pointer to last item in path
-
- /* Let's see what we have found */
- if ( res != POSITION_FOUND ) { /* position not found, this means that we
- might need to append file with holes
- first */
- // Since we are writing past the file's end, we need to find out if
- // there is a hole that needs to be inserted before our writing
- // position, and how many blocks it is going to cover (we need to
- // populate pointers to file blocks representing the hole with zeros)
+ /* Allocate blocks */
+ /* First fill in "hint" structure for block allocator */
+ hint.th = th; // transaction handle.
+ hint.path = &path; // Path, so that block allocator can determine packing locality or whatever it needs to determine.
+ hint.inode = inode; // Inode is needed by block allocator too.
+ hint.search_start = 0; // We have no hint on where to search free blocks for block allocator.
+ hint.key = key.on_disk_key; // on disk key of file.
+ hint.block = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9); // Number of disk blocks this file occupies already.
+ hint.formatted_node = 0; // We are allocating blocks for unformatted node.
+ hint.preallocate = will_prealloc;
+
+ /* Call block allocator to allocate blocks */
+ res =
+ reiserfs_allocate_blocknrs(&hint, allocated_blocks,
+ blocks_to_allocate, blocks_to_allocate);
+ if (res != CARRY_ON) {
+ if (res == NO_DISK_SPACE) {
+ /* We flush the transaction in case of no space. This way some
+ blocks might become free */
+ SB_JOURNAL(inode->i_sb)->j_must_wait = 1;
+ res = restart_transaction(th, inode, &path);
+ if (res)
+ goto error_exit;
+
+ /* We might have scheduled, so search again */
+ res =
+ search_for_position_by_key(inode->i_sb, &key,
+ &path);
+ if (res == IO_ERROR) {
+ res = -EIO;
+ goto error_exit;
+ }
+ /* update changed info for hint structure. */
+ res =
+ reiserfs_allocate_blocknrs(&hint, allocated_blocks,
+ blocks_to_allocate,
+ blocks_to_allocate);
+ if (res != CARRY_ON) {
+ res = -ENOSPC;
+ pathrelse(&path);
+ goto error_exit;
+ }
+ } else {
+ res = -ENOSPC;
+ pathrelse(&path);
+ goto error_exit;
+ }
+ }
+#ifdef __BIG_ENDIAN
+ // Too bad, I have not found any way to convert a given region from
+ // cpu format to little endian format
{
- int item_offset = 1;
- /*
- * if ih is stat data, its offset is 0 and we don't want to
- * add 1 to pos in the hole_size calculation
- */
- if (is_statdata_le_ih(ih))
- item_offset = 0;
- hole_size = (pos + item_offset -
- (le_key_k_offset( get_inode_item_key_version(inode),
- &(ih->ih_key)) +
- op_bytes_number(ih, inode->i_sb->s_blocksize))) >>
- inode->i_sb->s_blocksize_bits;
+ int i;
+ for (i = 0; i < blocks_to_allocate; i++)
+ allocated_blocks[i] = cpu_to_le32(allocated_blocks[i]);
}
+#endif
- if ( hole_size > 0 ) {
- int to_paste = min_t(__u64, hole_size, MAX_ITEM_LEN(inode->i_sb->s_blocksize)/UNFM_P_SIZE ); // How much data to insert first time.
- /* area filled with zeroes, to supply as list of zero blocknumbers
- We allocate it outside of loop just in case loop would spin for
- several iterations. */
- char *zeros = kmalloc(to_paste*UNFM_P_SIZE, GFP_ATOMIC); // We cannot insert more than MAX_ITEM_LEN bytes anyway.
- if ( !zeros ) {
- res = -ENOMEM;
+ /* Blocks allocating well might have scheduled and tree might have changed,
+ let's search the tree again */
+ /* find where in the tree our write should go */
+ res = search_for_position_by_key(inode->i_sb, &key, &path);
+ if (res == IO_ERROR) {
+ res = -EIO;
goto error_exit_free_blocks;
- }
- memset ( zeros, 0, to_paste*UNFM_P_SIZE);
- do {
- to_paste = min_t(__u64, hole_size, MAX_ITEM_LEN(inode->i_sb->s_blocksize)/UNFM_P_SIZE );
- if ( is_indirect_le_ih(ih) ) {
- /* Ok, there is existing indirect item already. Need to append it */
- /* Calculate position past inserted item */
- make_cpu_key( &key, inode, le_key_k_offset( get_inode_item_key_version(inode), &(ih->ih_key)) + op_bytes_number(ih, inode->i_sb->s_blocksize), TYPE_INDIRECT, 3);
- res = reiserfs_paste_into_item( th, &path, &key, inode, (char *)zeros, UNFM_P_SIZE*to_paste);
- if ( res ) {
- kfree(zeros);
- goto error_exit_free_blocks;
- }
- } else if ( is_statdata_le_ih(ih) ) {
- /* No existing item, create it */
- /* item head for new item */
- struct item_head ins_ih;
-
- /* create a key for our new item */
- make_cpu_key( &key, inode, 1, TYPE_INDIRECT, 3);
-
- /* Create new item head for our new item */
- make_le_item_head (&ins_ih, &key, key.version, 1,
- TYPE_INDIRECT, to_paste*UNFM_P_SIZE,
- 0 /* free space */);
-
- /* Find where such item should live in the tree */
- res = search_item (inode->i_sb, &key, &path);
- if ( res != ITEM_NOT_FOUND ) {
- /* item should not exist, otherwise we have error */
- if ( res != -ENOSPC ) {
- reiserfs_warning (inode->i_sb,
- "green-9008: search_by_key (%K) returned %d",
- &key, res);
+ }
+
+ bh = get_last_bh(&path); // Get a bufferhead for last element in path.
+ ih = get_ih(&path); // Get a pointer to last item head in path.
+ item = get_item(&path); // Get a pointer to last item in path
+
+ /* Let's see what we have found */
+ if (res != POSITION_FOUND) { /* position not found, this means that we
+ might need to append file with holes
+ first */
+ // Since we are writing past the file's end, we need to find out if
+ // there is a hole that needs to be inserted before our writing
+ // position, and how many blocks it is going to cover (we need to
+ // populate pointers to file blocks representing the hole with zeros)
+
+ {
+ int item_offset = 1;
+ /*
+ * if ih is stat data, its offset is 0 and we don't want to
+ * add 1 to pos in the hole_size calculation
+ */
+ if (is_statdata_le_ih(ih))
+ item_offset = 0;
+ hole_size = (pos + item_offset -
+ (le_key_k_offset
+ (get_inode_item_key_version(inode),
+ &(ih->ih_key)) + op_bytes_number(ih,
+ inode->
+ i_sb->
+ s_blocksize)))
+ >> inode->i_sb->s_blocksize_bits;
+ }
+
+ if (hole_size > 0) {
+ int to_paste = min_t(__u64, hole_size, MAX_ITEM_LEN(inode->i_sb->s_blocksize) / UNFM_P_SIZE); // How much data to insert first time.
+ /* area filled with zeroes, to supply as list of zero blocknumbers
+ We allocate it outside of loop just in case loop would spin for
+ several iterations. */
+ char *zeros = kmalloc(to_paste * UNFM_P_SIZE, GFP_ATOMIC); // We cannot insert more than MAX_ITEM_LEN bytes anyway.
+ if (!zeros) {
+ res = -ENOMEM;
+ goto error_exit_free_blocks;
}
- res = -EIO;
- kfree(zeros);
- goto error_exit_free_blocks;
- }
- res = reiserfs_insert_item( th, &path, &key, &ins_ih, inode, (char *)zeros);
- } else {
- reiserfs_panic(inode->i_sb, "green-9011: Unexpected key type %K\n", &key);
+ memset(zeros, 0, to_paste * UNFM_P_SIZE);
+ do {
+ to_paste =
+ min_t(__u64, hole_size,
+ MAX_ITEM_LEN(inode->i_sb->
+ s_blocksize) /
+ UNFM_P_SIZE);
+ if (is_indirect_le_ih(ih)) {
+ /* Ok, there is existing indirect item already. Need to append it */
+ /* Calculate position past inserted item */
+ make_cpu_key(&key, inode,
+ le_key_k_offset
+ (get_inode_item_key_version
+ (inode),
+ &(ih->ih_key)) +
+ op_bytes_number(ih,
+ inode->
+ i_sb->
+ s_blocksize),
+ TYPE_INDIRECT, 3);
+ res =
+ reiserfs_paste_into_item(th, &path,
+ &key,
+ inode,
+ (char *)
+ zeros,
+ UNFM_P_SIZE
+ *
+ to_paste);
+ if (res) {
+ kfree(zeros);
+ goto error_exit_free_blocks;
+ }
+ } else if (is_statdata_le_ih(ih)) {
+ /* No existing item, create it */
+ /* item head for new item */
+ struct item_head ins_ih;
+
+ /* create a key for our new item */
+ make_cpu_key(&key, inode, 1,
+ TYPE_INDIRECT, 3);
+
+ /* Create new item head for our new item */
+ make_le_item_head(&ins_ih, &key,
+ key.version, 1,
+ TYPE_INDIRECT,
+ to_paste *
+ UNFM_P_SIZE,
+ 0 /* free space */ );
+
+ /* Find where such item should live in the tree */
+ res =
+ search_item(inode->i_sb, &key,
+ &path);
+ if (res != ITEM_NOT_FOUND) {
+ /* item should not exist, otherwise we have error */
+ if (res != -ENOSPC) {
+ reiserfs_warning(inode->
+ i_sb,
+ "green-9008: search_by_key (%K) returned %d",
+ &key,
+ res);
+ }
+ res = -EIO;
+ kfree(zeros);
+ goto error_exit_free_blocks;
+ }
+ res =
+ reiserfs_insert_item(th, &path,
+ &key, &ins_ih,
+ inode,
+ (char *)zeros);
+ } else {
+ reiserfs_panic(inode->i_sb,
+ "green-9011: Unexpected key type %K\n",
+ &key);
+ }
+ if (res) {
+ kfree(zeros);
+ goto error_exit_free_blocks;
+ }
+ /* Now we want to check if transaction is too full, and if it is
+ we restart it. This will also free the path. */
+ if (journal_transaction_should_end
+ (th, th->t_blocks_allocated)) {
+ res =
+ restart_transaction(th, inode,
+ &path);
+ if (res) {
+ pathrelse(&path);
+ kfree(zeros);
+ goto error_exit;
+ }
+ }
+
+ /* Well, need to recalculate path and stuff */
+ set_cpu_key_k_offset(&key,
+ cpu_key_k_offset(&key) +
+ (to_paste << inode->
+ i_blkbits));
+ res =
+ search_for_position_by_key(inode->i_sb,
+ &key, &path);
+ if (res == IO_ERROR) {
+ res = -EIO;
+ kfree(zeros);
+ goto error_exit_free_blocks;
+ }
+ bh = get_last_bh(&path);
+ ih = get_ih(&path);
+ item = get_item(&path);
+ hole_size -= to_paste;
+ } while (hole_size);
+ kfree(zeros);
}
- if ( res ) {
- kfree(zeros);
- goto error_exit_free_blocks;
+ }
+ // Go through existing indirect items first
+ // replace all zeroes with blocknumbers from list
+ // Note that if no corresponding item was found, by previous search,
+ // it means there are no existing in-tree representation for file area
+ // we are going to overwrite, so there is nothing to scan through for holes.
+ for (curr_block = 0, itempos = path.pos_in_item;
+ curr_block < blocks_to_allocate && res == POSITION_FOUND;) {
+ retry:
+
+ if (itempos >= ih_item_len(ih) / UNFM_P_SIZE) {
+ /* We run out of data in this indirect item, let's look for another
+ one. */
+ /* First if we are already modifying current item, log it */
+ if (modifying_this_item) {
+ journal_mark_dirty(th, inode->i_sb, bh);
+ modifying_this_item = 0;
+ }
+ /* Then set the key to look for a new indirect item (offset of old
+ item is added to old item length */
+ set_cpu_key_k_offset(&key,
+ le_key_k_offset
+ (get_inode_item_key_version(inode),
+ &(ih->ih_key)) +
+ op_bytes_number(ih,
+ inode->i_sb->
+ s_blocksize));
+ /* Search ofor position of new key in the tree. */
+ res =
+ search_for_position_by_key(inode->i_sb, &key,
+ &path);
+ if (res == IO_ERROR) {
+ res = -EIO;
+ goto error_exit_free_blocks;
+ }
+ bh = get_last_bh(&path);
+ ih = get_ih(&path);
+ item = get_item(&path);
+ itempos = path.pos_in_item;
+ continue; // loop to check all kinds of conditions and so on.
}
- /* Now we want to check if transaction is too full, and if it is
- we restart it. This will also free the path. */
- if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
- res = restart_transaction(th, inode, &path);
- if (res) {
- pathrelse (&path);
- kfree(zeros);
- goto error_exit;
- }
- }
-
- /* Well, need to recalculate path and stuff */
- set_cpu_key_k_offset( &key, cpu_key_k_offset(&key) + (to_paste << inode->i_blkbits));
- res = search_for_position_by_key(inode->i_sb, &key, &path);
- if ( res == IO_ERROR ) {
- res = -EIO;
- kfree(zeros);
- goto error_exit_free_blocks;
+ /* Ok, we have correct position in item now, so let's see if it is
+ representing file hole (blocknumber is zero) and fill it if needed */
+ if (!item[itempos]) {
+ /* Ok, a hole. Now we need to check if we already prepared this
+ block to be journaled */
+ while (!modifying_this_item) { // loop until succeed
+ /* Well, this item is not journaled yet, so we must prepare
+ it for journal first, before we can change it */
+ struct item_head tmp_ih; // We copy item head of found item,
+ // here to detect if fs changed under
+ // us while we were preparing for
+ // journal.
+ int fs_gen; // We store fs generation here to find if someone
+ // changes fs under our feet
+
+ copy_item_head(&tmp_ih, ih); // Remember itemhead
+ fs_gen = get_generation(inode->i_sb); // remember fs generation
+ reiserfs_prepare_for_journal(inode->i_sb, bh, 1); // Prepare a buffer within which indirect item is stored for changing.
+ if (fs_changed(fs_gen, inode->i_sb)
+ && item_moved(&tmp_ih, &path)) {
+ // Sigh, fs was changed under us, we need to look for new
+ // location of item we are working with
+
+ /* unmark prepaerd area as journaled and search for it's
+ new position */
+ reiserfs_restore_prepared_buffer(inode->
+ i_sb,
+ bh);
+ res =
+ search_for_position_by_key(inode->
+ i_sb,
+ &key,
+ &path);
+ if (res == IO_ERROR) {
+ res = -EIO;
+ goto error_exit_free_blocks;
+ }
+ bh = get_last_bh(&path);
+ ih = get_ih(&path);
+ item = get_item(&path);
+ itempos = path.pos_in_item;
+ goto retry;
+ }
+ modifying_this_item = 1;
+ }
+ item[itempos] = allocated_blocks[curr_block]; // Assign new block
+ curr_block++;
}
- bh=get_last_bh(&path);
- ih=get_ih(&path);
- item = get_item(&path);
- hole_size -= to_paste;
- } while ( hole_size );
- kfree(zeros);
+ itempos++;
}
- }
-
- // Go through existing indirect items first
- // replace all zeroes with blocknumbers from list
- // Note that if no corresponding item was found, by previous search,
- // it means there are no existing in-tree representation for file area
- // we are going to overwrite, so there is nothing to scan through for holes.
- for ( curr_block = 0, itempos = path.pos_in_item ; curr_block < blocks_to_allocate && res == POSITION_FOUND ; ) {
-retry:
-
- if ( itempos >= ih_item_len(ih)/UNFM_P_SIZE ) {
- /* We run out of data in this indirect item, let's look for another
- one. */
- /* First if we are already modifying current item, log it */
- if ( modifying_this_item ) {
- journal_mark_dirty (th, inode->i_sb, bh);
- modifying_this_item = 0;
- }
- /* Then set the key to look for a new indirect item (offset of old
- item is added to old item length */
- set_cpu_key_k_offset( &key, le_key_k_offset( get_inode_item_key_version(inode), &(ih->ih_key)) + op_bytes_number(ih, inode->i_sb->s_blocksize));
- /* Search ofor position of new key in the tree. */
- res = search_for_position_by_key(inode->i_sb, &key, &path);
- if ( res == IO_ERROR) {
- res = -EIO;
- goto error_exit_free_blocks;
- }
- bh=get_last_bh(&path);
- ih=get_ih(&path);
- item = get_item(&path);
- itempos = path.pos_in_item;
- continue; // loop to check all kinds of conditions and so on.
+
+ if (modifying_this_item) { // We need to log last-accessed block, if it
+ // was modified, but not logged yet.
+ journal_mark_dirty(th, inode->i_sb, bh);
}
- /* Ok, we have correct position in item now, so let's see if it is
- representing file hole (blocknumber is zero) and fill it if needed */
- if ( !item[itempos] ) {
- /* Ok, a hole. Now we need to check if we already prepared this
- block to be journaled */
- while ( !modifying_this_item ) { // loop until succeed
- /* Well, this item is not journaled yet, so we must prepare
- it for journal first, before we can change it */
- struct item_head tmp_ih; // We copy item head of found item,
- // here to detect if fs changed under
- // us while we were preparing for
- // journal.
- int fs_gen; // We store fs generation here to find if someone
- // changes fs under our feet
-
- copy_item_head (&tmp_ih, ih); // Remember itemhead
- fs_gen = get_generation (inode->i_sb); // remember fs generation
- reiserfs_prepare_for_journal(inode->i_sb, bh, 1); // Prepare a buffer within which indirect item is stored for changing.
- if (fs_changed (fs_gen, inode->i_sb) && item_moved (&tmp_ih, &path)) {
- // Sigh, fs was changed under us, we need to look for new
- // location of item we are working with
-
- /* unmark prepaerd area as journaled and search for it's
- new position */
- reiserfs_restore_prepared_buffer(inode->i_sb, bh);
- res = search_for_position_by_key(inode->i_sb, &key, &path);
- if ( res == IO_ERROR) {
- res = -EIO;
- goto error_exit_free_blocks;
- }
- bh=get_last_bh(&path);
- ih=get_ih(&path);
- item = get_item(&path);
- itempos = path.pos_in_item;
- goto retry;
+
+ if (curr_block < blocks_to_allocate) {
+ // Oh, well need to append to indirect item, or to create indirect item
+ // if there weren't any
+ if (is_indirect_le_ih(ih)) {
+ // Existing indirect item - append. First calculate key for append
+ // position. We do not need to recalculate path as it should
+ // already point to correct place.
+ make_cpu_key(&key, inode,
+ le_key_k_offset(get_inode_item_key_version
+ (inode),
+ &(ih->ih_key)) +
+ op_bytes_number(ih,
+ inode->i_sb->s_blocksize),
+ TYPE_INDIRECT, 3);
+ res =
+ reiserfs_paste_into_item(th, &path, &key, inode,
+ (char *)(allocated_blocks +
+ curr_block),
+ UNFM_P_SIZE *
+ (blocks_to_allocate -
+ curr_block));
+ if (res) {
+ goto error_exit_free_blocks;
+ }
+ } else if (is_statdata_le_ih(ih)) {
+ // Last found item was statdata. That means we need to create indirect item.
+ struct item_head ins_ih; /* itemhead for new item */
+
+ /* create a key for our new item */
+ make_cpu_key(&key, inode, 1, TYPE_INDIRECT, 3); // Position one,
+ // because that's
+ // where first
+ // indirect item
+ // begins
+ /* Create new item head for our new item */
+ make_le_item_head(&ins_ih, &key, key.version, 1,
+ TYPE_INDIRECT,
+ (blocks_to_allocate -
+ curr_block) * UNFM_P_SIZE,
+ 0 /* free space */ );
+ /* Find where such item should live in the tree */
+ res = search_item(inode->i_sb, &key, &path);
+ if (res != ITEM_NOT_FOUND) {
+ /* Well, if we have found such item already, or some error
+ occured, we need to warn user and return error */
+ if (res != -ENOSPC) {
+ reiserfs_warning(inode->i_sb,
+ "green-9009: search_by_key (%K) "
+ "returned %d", &key,
+ res);
+ }
+ res = -EIO;
+ goto error_exit_free_blocks;<