2 * linux/fs/ext3/inode.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
11 * linux/fs/minix/inode.c
13 * Copyright (C) 1991, 1992 Linus Torvalds
15 * Goal-directed block allocation by Stephen Tweedie
16 * (sct@redhat.com), 1993, 1998
17 * Big-endian to little-endian byte-swapping/bitmaps by
18 * David S. Miller (davem@caip.rutgers.edu), 1995
19 * 64-bit file support on 64-bit platforms by Jakub Jelinek
20 * (jj@sunsite.ms.mff.cuni.cz)
22 * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
26 #include <linux/sched.h>
27 #include <linux/ext3_jbd.h>
28 #include <linux/jbd.h>
29 #include <linux/locks.h>
30 #include <linux/smp_lock.h>
31 #include <linux/highuid.h>
32 #include <linux/quotaops.h>
33 #include <linux/module.h>
36 * SEARCH_FROM_ZERO forces each block allocation to search from the start
37 * of the filesystem. This is to force rapid reallocation of recently-freed
38 * blocks. The file fragmentation is horrendous.
40 #undef SEARCH_FROM_ZERO
42 /* The ext3 forget function must perform a revoke if we are freeing data
43 * which has been journaled. Metadata (eg. indirect blocks) must be
44 * revoked in all cases.
46 * "bh" may be NULL: a metadata block may have been freed from memory
47 * but there may still be a record of it in the journal, and that record
48 * still needs to be revoked.
51 static int ext3_forget(handle_t *handle, int is_metadata,
52 struct inode *inode, struct buffer_head *bh,
57 BUFFER_TRACE(bh, "enter");
59 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
61 bh, is_metadata, inode->i_mode,
62 test_opt(inode->i_sb, DATA_FLAGS));
64 /* Never use the revoke function if we are doing full data
65 * journaling: there is no need to, and a V1 superblock won't
66 * support it. Otherwise, only skip the revoke on un-journaled
69 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
70 (!is_metadata && !ext3_should_journal_data(inode))) {
72 BUFFER_TRACE(bh, "call journal_forget");
73 ext3_journal_forget(handle, bh);
79 * data!=journal && (is_metadata || should_journal_data(inode))
81 BUFFER_TRACE(bh, "call ext3_journal_revoke");
82 err = ext3_journal_revoke(handle, blocknr, bh);
84 ext3_abort(inode->i_sb, __FUNCTION__,
85 "error %d when attempting revoke", err);
86 BUFFER_TRACE(bh, "exit");
91 * Truncate transactions can be complex and absolutely huge. So we need to
92 * be able to restart the transaction at a conventient checkpoint to make
93 * sure we don't overflow the journal.
95 * start_transaction gets us a new handle for a truncate transaction,
96 * and extend_transaction tries to extend the existing one a bit. If
97 * extend fails, we need to propagate the failure up and restart the
98 * transaction in the top-level truncate loop. --sct
101 static handle_t *start_transaction(struct inode *inode)
106 needed = inode->i_blocks;
107 if (needed > EXT3_MAX_TRANS_DATA)
108 needed = EXT3_MAX_TRANS_DATA;
110 result = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS + needed);
114 ext3_std_error(inode->i_sb, PTR_ERR(result));
119 * Try to extend this transaction for the purposes of truncation.
121 * Returns 0 if we managed to create more room. If we can't create more
122 * room, and the transaction must be restarted we return 1.
124 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
128 if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
130 needed = inode->i_blocks;
131 if (needed > EXT3_MAX_TRANS_DATA)
132 needed = EXT3_MAX_TRANS_DATA;
133 if (!ext3_journal_extend(handle, EXT3_RESERVE_TRANS_BLOCKS + needed))
139 * Restart the transaction associated with *handle. This does a commit,
140 * so before we call here everything must be consistently dirtied against
143 static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
145 long needed = inode->i_blocks;
146 if (needed > EXT3_MAX_TRANS_DATA)
147 needed = EXT3_MAX_TRANS_DATA;
148 jbd_debug(2, "restarting handle %p\n", handle);
149 return ext3_journal_restart(handle, EXT3_DATA_TRANS_BLOCKS + needed);
153 * Called at each iput()
155 void ext3_put_inode (struct inode * inode)
157 ext3_discard_prealloc (inode);
161 * Called at the last iput() if i_nlink is zero.
163 void ext3_delete_inode (struct inode * inode)
167 if (is_bad_inode(inode) ||
168 inode->i_ino == EXT3_ACL_IDX_INO ||
169 inode->i_ino == EXT3_ACL_DATA_INO)
173 handle = start_transaction(inode);
174 if (IS_ERR(handle)) {
175 /* If we're going to skip the normal cleanup, we still
176 * need to make sure that the in-core orphan linked list
177 * is properly cleaned up. */
178 ext3_orphan_del(NULL, inode);
180 ext3_std_error(inode->i_sb, PTR_ERR(handle));
189 ext3_truncate(inode);
191 * Kill off the orphan record which ext3_truncate created.
192 * AKPM: I think this can be inside the above `if'.
193 * Note that ext3_orphan_del() has to be able to cope with the
194 * deletion of a non-existent orphan - this is because we don't
195 * know if ext3_truncate() actually created an orphan record.
196 * (Well, we could do this if we need to, but heck - it works)
198 ext3_orphan_del(handle, inode);
199 inode->u.ext3_i.i_dtime = CURRENT_TIME;
202 * One subtle ordering requirement: if anything has gone wrong
203 * (transaction abort, IO errors, whatever), then we can still
204 * do these next steps (the fs will already have been marked as
205 * having errors), but we can't free the inode if the mark_dirty
208 if (ext3_mark_inode_dirty(handle, inode))
209 /* If that failed, just do the required in-core inode clear. */
212 ext3_free_inode(handle, inode);
213 ext3_journal_stop(handle, inode);
217 clear_inode(inode); /* We must guarantee clearing of inode... */
220 void ext3_discard_prealloc (struct inode * inode)
222 #ifdef EXT3_PREALLOCATE
224 /* Writer: ->i_prealloc* */
225 if (inode->u.ext3_i.i_prealloc_count) {
226 unsigned short total = inode->u.ext3_i.i_prealloc_count;
227 unsigned long block = inode->u.ext3_i.i_prealloc_block;
228 inode->u.ext3_i.i_prealloc_count = 0;
229 inode->u.ext3_i.i_prealloc_block = 0;
231 ext3_free_blocks (inode, block, total);
237 static int ext3_alloc_block (handle_t *handle,
238 struct inode * inode, unsigned long goal, int *err)
241 static unsigned long alloc_hits = 0, alloc_attempts = 0;
243 unsigned long result;
245 #ifdef EXT3_PREALLOCATE
246 /* Writer: ->i_prealloc* */
247 if (inode->u.ext3_i.i_prealloc_count &&
248 (goal == inode->u.ext3_i.i_prealloc_block ||
249 goal + 1 == inode->u.ext3_i.i_prealloc_block))
251 result = inode->u.ext3_i.i_prealloc_block++;
252 inode->u.ext3_i.i_prealloc_count--;
254 ext3_debug ("preallocation hit (%lu/%lu).\n",
255 ++alloc_hits, ++alloc_attempts);
257 ext3_discard_prealloc (inode);
258 ext3_debug ("preallocation miss (%lu/%lu).\n",
259 alloc_hits, ++alloc_attempts);
260 if (S_ISREG(inode->i_mode))
261 result = ext3_new_block (inode, goal,
262 &inode->u.ext3_i.i_prealloc_count,
263 &inode->u.ext3_i.i_prealloc_block, err);
265 result = ext3_new_block (inode, goal, 0, 0, err);
267 * AKPM: this is somewhat sticky. I'm not surprised it was
268 * disabled in 2.2's ext3. Need to integrate b_committed_data
269 * guarding with preallocation, if indeed preallocation is
274 result = ext3_new_block (handle, inode, goal, 0, 0, err);
283 struct buffer_head *bh;
286 static inline void add_chain(Indirect *p, struct buffer_head *bh, u32 *v)
288 p->key = *(p->p = v);
292 static inline int verify_chain(Indirect *from, Indirect *to)
294 while (from <= to && from->key == *from->p)
300 * ext3_block_to_path - parse the block number into array of offsets
301 * @inode: inode in question (we are only interested in its superblock)
302 * @i_block: block number to be parsed
303 * @offsets: array to store the offsets in
305 * To store the locations of file's data ext3 uses a data structure common
306 * for UNIX filesystems - tree of pointers anchored in the inode, with
307 * data blocks at leaves and indirect blocks in intermediate nodes.
308 * This function translates the block number into path in that tree -
309 * return value is the path length and @offsets[n] is the offset of
310 * pointer to (n+1)th node in the nth one. If @block is out of range
311 * (negative or too large) warning is printed and zero returned.
313 * Note: function doesn't find node addresses, so no IO is needed. All
314 * we need to know is the capacity of indirect blocks (taken from the
319 * Portability note: the last comparison (check that we fit into triple
320 * indirect block) is spelled differently, because otherwise on an
321 * architecture with 32-bit longs and 8Kb pages we might get into trouble
322 * if our filesystem had 8Kb blocks. We might use long long, but that would
323 * kill us on x86. Oh, well, at least the sign propagation does not matter -
324 * i_block would have to be negative in the very beginning, so we would not
328 static int ext3_block_to_path(struct inode *inode, long i_block, int offsets[4])
330 int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
331 int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
332 const long direct_blocks = EXT3_NDIR_BLOCKS,
333 indirect_blocks = ptrs,
334 double_blocks = (1 << (ptrs_bits * 2));
338 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
339 } else if (i_block < direct_blocks) {
340 offsets[n++] = i_block;
341 } else if ( (i_block -= direct_blocks) < indirect_blocks) {
342 offsets[n++] = EXT3_IND_BLOCK;
343 offsets[n++] = i_block;
344 } else if ((i_block -= indirect_blocks) < double_blocks) {
345 offsets[n++] = EXT3_DIND_BLOCK;
346 offsets[n++] = i_block >> ptrs_bits;
347 offsets[n++] = i_block & (ptrs - 1);
348 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
349 offsets[n++] = EXT3_TIND_BLOCK;
350 offsets[n++] = i_block >> (ptrs_bits * 2);
351 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
352 offsets[n++] = i_block & (ptrs - 1);
354 ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big");
360 * ext3_get_branch - read the chain of indirect blocks leading to data
361 * @inode: inode in question
362 * @depth: depth of the chain (1 - direct pointer, etc.)
363 * @offsets: offsets of pointers in inode/indirect blocks
364 * @chain: place to store the result
365 * @err: here we store the error value
367 * Function fills the array of triples <key, p, bh> and returns %NULL
368 * if everything went OK or the pointer to the last filled triple
369 * (incomplete one) otherwise. Upon the return chain[i].key contains
370 * the number of (i+1)-th block in the chain (as it is stored in memory,
371 * i.e. little-endian 32-bit), chain[i].p contains the address of that
372 * number (it points into struct inode for i==0 and into the bh->b_data
373 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
374 * block for i>0 and NULL for i==0. In other words, it holds the block
375 * numbers of the chain, addresses they were taken from (and where we can
376 * verify that chain did not change) and buffer_heads hosting these
379 * Function stops when it stumbles upon zero pointer (absent block)
380 * (pointer to last triple returned, *@err == 0)
381 * or when it gets an IO error reading an indirect block
382 * (ditto, *@err == -EIO)
383 * or when it notices that chain had been changed while it was reading
384 * (ditto, *@err == -EAGAIN)
385 * or when it reads all @depth-1 indirect blocks successfully and finds
386 * the whole chain, all way to the data (returns %NULL, *err == 0).
388 static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
389 Indirect chain[4], int *err)
391 struct super_block *sb = inode->i_sb;
393 struct buffer_head *bh;
396 /* i_data is not going away, no lock needed */
397 add_chain (chain, NULL, inode->u.ext3_i.i_data + *offsets);
401 bh = sb_bread(sb, le32_to_cpu(p->key));
404 /* Reader: pointers */
405 if (!verify_chain(chain, p))
407 add_chain(++p, bh, (u32*)bh->b_data + *++offsets);
425 * ext3_find_near - find a place for allocation with sufficient locality
427 * @ind: descriptor of indirect block.
429 * This function returns the prefered place for block allocation.
430 * It is used when heuristic for sequential allocation fails.
432 * + if there is a block to the left of our position - allocate near it.
433 * + if pointer will live in indirect block - allocate near that block.
434 * + if pointer will live in inode - allocate in the same
436 * Caller must make sure that @ind is valid and will stay that way.
439 static inline unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
441 u32 *start = ind->bh ? (u32*) ind->bh->b_data : inode->u.ext3_i.i_data;
444 /* Try to find previous block */
445 for (p = ind->p - 1; p >= start; p--)
447 return le32_to_cpu(*p);
449 /* No such thing, so let's try location of indirect block */
451 return ind->bh->b_blocknr;
454 * It is going to be refered from inode itself? OK, just put it into
455 * the same cylinder group then.
457 return (inode->u.ext3_i.i_block_group *
458 EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
459 le32_to_cpu(inode->i_sb->u.ext3_sb.s_es->s_first_data_block);
463 * ext3_find_goal - find a prefered place for allocation.
465 * @block: block we want
466 * @chain: chain of indirect blocks
467 * @partial: pointer to the last triple within a chain
468 * @goal: place to store the result.
470 * Normally this function find the prefered place for block allocation,
471 * stores it in *@goal and returns zero. If the branch had been changed
472 * under us we return -EAGAIN.
475 static int ext3_find_goal(struct inode *inode, long block, Indirect chain[4],
476 Indirect *partial, unsigned long *goal)
478 /* Writer: ->i_next_alloc* */
479 if (block == inode->u.ext3_i.i_next_alloc_block + 1) {
480 inode->u.ext3_i.i_next_alloc_block++;
481 inode->u.ext3_i.i_next_alloc_goal++;
483 #ifdef SEARCH_FROM_ZERO
484 inode->u.ext3_i.i_next_alloc_block = 0;
485 inode->u.ext3_i.i_next_alloc_goal = 0;
488 /* Reader: pointers, ->i_next_alloc* */
489 if (verify_chain(chain, partial)) {
491 * try the heuristic for sequential allocation,
492 * failing that at least try to get decent locality.
494 if (block == inode->u.ext3_i.i_next_alloc_block)
495 *goal = inode->u.ext3_i.i_next_alloc_goal;
497 *goal = ext3_find_near(inode, partial);
498 #ifdef SEARCH_FROM_ZERO
508 * ext3_alloc_branch - allocate and set up a chain of blocks.
510 * @num: depth of the chain (number of blocks to allocate)
511 * @offsets: offsets (in the blocks) to store the pointers to next.
512 * @branch: place to store the chain in.
514 * This function allocates @num blocks, zeroes out all but the last one,
515 * links them into chain and (if we are synchronous) writes them to disk.
516 * In other words, it prepares a branch that can be spliced onto the
517 * inode. It stores the information about that chain in the branch[], in
518 * the same format as ext3_get_branch() would do. We are calling it after
519 * we had read the existing part of chain and partial points to the last
520 * triple of that (one with zero ->key). Upon the exit we have the same
521 * picture as after the successful ext3_get_block(), excpet that in one
522 * place chain is disconnected - *branch->p is still zero (we did not
523 * set the last link), but branch->key contains the number that should
524 * be placed into *branch->p to fill that gap.
526 * If allocation fails we free all blocks we've allocated (and forget
527 * their buffer_heads) and return the error value the from failed
528 * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
529 * as described above and return 0.
532 static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
538 int blocksize = inode->i_sb->s_blocksize;
542 int parent = ext3_alloc_block(handle, inode, goal, &err);
544 branch[0].key = cpu_to_le32(parent);
546 for (n = 1; n < num; n++) {
547 struct buffer_head *bh;
548 /* Allocate the next block */
549 int nr = ext3_alloc_block(handle, inode, parent, &err);
552 branch[n].key = cpu_to_le32(nr);
556 * Get buffer_head for parent block, zero it out
557 * and set the pointer to new one, then send
560 bh = sb_getblk(inode->i_sb, parent);
563 BUFFER_TRACE(bh, "call get_create_access");
564 err = ext3_journal_get_create_access(handle, bh);
571 memset(bh->b_data, 0, blocksize);
572 branch[n].p = (u32*) bh->b_data + offsets[n];
573 *branch[n].p = branch[n].key;
574 BUFFER_TRACE(bh, "marking uptodate");
575 mark_buffer_uptodate(bh, 1);
578 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
579 err = ext3_journal_dirty_metadata(handle, bh);
589 /* Allocation failed, free what we already allocated */
590 for (i = 1; i < keys; i++) {
591 BUFFER_TRACE(branch[i].bh, "call journal_forget");
592 ext3_journal_forget(handle, branch[i].bh);
594 for (i = 0; i < keys; i++)
595 ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
600 * ext3_splice_branch - splice the allocated branch onto inode.
602 * @block: (logical) number of block we are adding
603 * @chain: chain of indirect blocks (with a missing link - see
605 * @where: location of missing link
606 * @num: number of blocks we are adding
608 * This function verifies that chain (up to the missing link) had not
609 * changed, fills the missing link and does all housekeeping needed in
610 * inode (->i_blocks, etc.). In case of success we end up with the full
611 * chain to new block and return 0. Otherwise (== chain had been changed)
612 * we free the new blocks (forgetting their buffer_heads, indeed) and
616 static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
617 Indirect chain[4], Indirect *where, int num)
623 * If we're splicing into a [td]indirect block (as opposed to the
624 * inode) then we need to get write access to the [td]indirect block
628 BUFFER_TRACE(where->bh, "get_write_access");
629 err = ext3_journal_get_write_access(handle, where->bh);
633 /* Verify that place we are splicing to is still there and vacant */
635 /* Writer: pointers, ->i_next_alloc* */
636 if (!verify_chain(chain, where-1) || *where->p)
642 *where->p = where->key;
643 inode->u.ext3_i.i_next_alloc_block = block;
644 inode->u.ext3_i.i_next_alloc_goal = le32_to_cpu(where[num-1].key);
645 #ifdef SEARCH_FROM_ZERO
646 inode->u.ext3_i.i_next_alloc_block = 0;
647 inode->u.ext3_i.i_next_alloc_goal = 0;
651 /* We are done with atomic stuff, now do the rest of housekeeping */
653 inode->i_ctime = CURRENT_TIME;
654 ext3_mark_inode_dirty(handle, inode);
656 /* had we spliced it onto indirect block? */
659 * akpm: If we spliced it onto an indirect block, we haven't
660 * altered the inode. Note however that if it is being spliced
661 * onto an indirect block at the very end of the file (the
662 * file is growing) then we *will* alter the inode to reflect
663 * the new i_size. But that is not done here - it is done in
664 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
666 jbd_debug(5, "splicing indirect only\n");
667 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
668 err = ext3_journal_dirty_metadata(handle, where->bh);
673 * OK, we spliced it into the inode itself on a direct block.
674 * Inode was dirtied above.
676 jbd_debug(5, "splicing direct\n");
682 * AKPM: if where[i].bh isn't part of the current updating
683 * transaction then we explode nastily. Test this code path.
685 jbd_debug(1, "the chain changed: try again\n");
689 for (i = 1; i < num; i++) {
690 BUFFER_TRACE(where[i].bh, "call journal_forget");
691 ext3_journal_forget(handle, where[i].bh);
693 /* For the normal collision cleanup case, we free up the blocks.
694 * On genuine filesystem errors we don't even think about doing
697 for (i = 0; i < num; i++)
698 ext3_free_blocks(handle, inode,
699 le32_to_cpu(where[i].key), 1);
704 * Allocation strategy is simple: if we have to allocate something, we will
705 * have to go the whole way to leaf. So let's do it before attaching anything
706 * to tree, set linkage between the newborn blocks, write them if sync is
707 * required, recheck the path, free and repeat if check fails, otherwise
708 * set the last missing link (that will protect us from any truncate-generated
709 * removals - all blocks on the path are immune now) and possibly force the
710 * write on the parent block.
711 * That has a nice additional property: no special recovery from the failed
712 * allocations is needed - we simply release blocks and do not touch anything
713 * reachable from inode.
715 * akpm: `handle' can be NULL if create == 0.
717 * The BKL may not be held on entry here. Be sure to take it early.
720 static int ext3_get_block_handle(handle_t *handle, struct inode *inode,
722 struct buffer_head *bh_result, int create)
730 int depth = ext3_block_to_path(inode, iblock, offsets);
733 J_ASSERT(handle != NULL || create == 0);
740 partial = ext3_get_branch(inode, depth, offsets, chain, &err);
742 /* Simplest case - block found, no allocation needed */
744 bh_result->b_state &= ~(1UL << BH_New);
746 bh_result->b_dev = inode->i_dev;
747 bh_result->b_blocknr = le32_to_cpu(chain[depth-1].key);
748 bh_result->b_state |= (1UL << BH_Mapped);
749 /* Clean up and exit */
750 partial = chain+depth-1; /* the whole chain */
754 /* Next simple case - plain lookup or failed read of indirect block */
755 if (!create || err == -EIO) {
757 while (partial > chain) {
758 BUFFER_TRACE(partial->bh, "call brelse");
762 BUFFER_TRACE(bh_result, "returned");
769 * Indirect block might be removed by truncate while we were
770 * reading it. Handling of that case (forget what we've got and
771 * reread) is taken out of the main path.
776 if (ext3_find_goal(inode, iblock, chain, partial, &goal) < 0)
779 left = (chain + depth) - partial;
782 * Block out ext3_truncate while we alter the tree
784 down_read(&inode->u.ext3_i.truncate_sem);
785 err = ext3_alloc_branch(handle, inode, left, goal,
786 offsets+(partial-chain), partial);
788 /* The ext3_splice_branch call will free and forget any buffers
789 * on the new chain if there is a failure, but that risks using
790 * up transaction credits, especially for bitmaps where the
791 * credits cannot be returned. Can we handle this somehow? We
792 * may need to return -EAGAIN upwards in the worst case. --sct */
794 err = ext3_splice_branch(handle, inode, iblock, chain,
796 up_read(&inode->u.ext3_i.truncate_sem);
802 new_size = inode->i_size;
804 * This is not racy against ext3_truncate's modification of i_disksize
805 * because VM/VFS ensures that the file cannot be extended while
806 * truncate is in progress. It is racy between multiple parallel
807 * instances of get_block, but we have the BKL.
809 if (new_size > inode->u.ext3_i.i_disksize)
810 inode->u.ext3_i.i_disksize = new_size;
812 bh_result->b_state |= (1UL << BH_New);
816 while (partial > chain) {
817 jbd_debug(1, "buffer chain changed, retrying\n");
818 BUFFER_TRACE(partial->bh, "brelsing");
826 * The BKL is not held on entry here.
828 static int ext3_get_block(struct inode *inode, long iblock,
829 struct buffer_head *bh_result, int create)
831 handle_t *handle = 0;
835 handle = ext3_journal_current_handle();
836 J_ASSERT(handle != 0);
838 ret = ext3_get_block_handle(handle, inode, iblock, bh_result, create);
843 * `handle' can be NULL if create is zero
845 struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
846 long block, int create, int * errp)
848 struct buffer_head dummy;
851 J_ASSERT(handle != NULL || create == 0);
854 dummy.b_blocknr = -1000;
855 buffer_trace_init(&dummy.b_history);
856 *errp = ext3_get_block_handle(handle, inode, block, &dummy, create);
857 if (!*errp && buffer_mapped(&dummy)) {
858 struct buffer_head *bh;
859 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
860 if (buffer_new(&dummy)) {
861 J_ASSERT(create != 0);
862 J_ASSERT(handle != 0);
864 /* Now that we do not always journal data, we
865 should keep in mind whether this should
866 always journal the new buffer as metadata.
867 For now, regular file writes use
868 ext3_get_block instead, so it's not a
872 BUFFER_TRACE(bh, "call get_create_access");
873 fatal = ext3_journal_get_create_access(handle, bh);
875 memset(bh->b_data, 0,
876 inode->i_sb->s_blocksize);
877 mark_buffer_uptodate(bh, 1);
880 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
881 err = ext3_journal_dirty_metadata(handle, bh);
882 if (!fatal) fatal = err;
885 BUFFER_TRACE(bh, "not a new buffer");
897 struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode,
898 int block, int create, int *err)
900 struct buffer_head * bh;
903 prev_blocks = inode->i_blocks;
905 bh = ext3_getblk (handle, inode, block, create, err);
908 #ifdef EXT3_PREALLOCATE
910 * If the inode has grown, and this is a directory, then use a few
911 * more of the preallocated blocks to keep directory fragmentation
912 * down. The preallocated blocks are guaranteed to be contiguous.
915 S_ISDIR(inode->i_mode) &&
916 inode->i_blocks > prev_blocks &&
917 EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
918 EXT3_FEATURE_COMPAT_DIR_PREALLOC)) {
920 struct buffer_head *tmp_bh;
923 inode->u.ext3_i.i_prealloc_count &&
924 i < EXT3_SB(inode->i_sb)->s_es->s_prealloc_dir_blocks;
927 * ext3_getblk will zero out the contents of the
930 tmp_bh = ext3_getblk(handle, inode,
931 block+i, create, err);
940 if (buffer_uptodate(bh))
942 ll_rw_block (READ, 1, &bh);
944 if (buffer_uptodate(bh))
951 static int walk_page_buffers( handle_t *handle,
953 struct buffer_head *head,
957 int (*fn)( handle_t *handle,
959 struct buffer_head *bh))
961 struct buffer_head *bh;
962 unsigned block_start, block_end;
963 unsigned blocksize = head->b_size;
966 for ( bh = head, block_start = 0;
967 ret == 0 && (bh != head || !block_start);
968 block_start = block_end, bh = bh->b_this_page)
970 block_end = block_start + blocksize;
971 if (block_end <= from || block_start >= to) {
972 if (partial && !buffer_uptodate(bh))
976 err = (*fn)(handle, inode, bh);
984 * To preserve ordering, it is essential that the hole instantiation and
985 * the data write be encapsulated in a single transaction. We cannot
986 * close off a transaction and start a new one between the ext3_get_block()
987 * and the commit_write(). So doing the journal_start at the start of
988 * prepare_write() is the right place.
990 * Also, this function can nest inside ext3_writepage() ->
991 * block_write_full_page(). In that case, we *know* that ext3_writepage()
992 * has generated enough buffer credits to do the whole page. So we won't
993 * block on the journal in that case, which is good, because the caller may
996 * By accident, ext3 can be reentered when a transaction is open via
997 * quota file writes. If we were to commit the transaction while thus
998 * reentered, there can be a deadlock - we would be holding a quota
999 * lock, and the commit would never complete if another thread had a
1000 * transaction open and was blocking on the quota lock - a ranking
1003 * So what we do is to rely on the fact that journal_stop/journal_start
1004 * will _not_ run commit under these circumstances because handle->h_ref
1005 * is elevated. We'll still have enough credits for the tiny quotafile
1009 static int do_journal_get_write_access(handle_t *handle, struct inode *inode,
1010 struct buffer_head *bh)
1012 return ext3_journal_get_write_access(handle, bh);
1015 static int ext3_prepare_write(struct file *file, struct page *page,
1016 unsigned from, unsigned to)
1018 struct inode *inode = page->mapping->host;
1019 int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
1023 handle = ext3_journal_start(inode, needed_blocks);
1024 if (IS_ERR(handle)) {
1025 ret = PTR_ERR(handle);
1029 ret = block_prepare_write(page, from, to, ext3_get_block);
1032 goto prepare_write_failed;
1034 if (ext3_should_journal_data(inode)) {
1035 ret = walk_page_buffers(handle, inode, page->buffers,
1036 from, to, NULL, do_journal_get_write_access);
1039 * We're going to fail this prepare_write(),
1040 * so commit_write() will not be called.
1041 * We need to undo block_prepare_write()'s kmap().
1042 * AKPM: Do we need to clear PageUptodate? I don't
1048 prepare_write_failed:
1050 ext3_journal_stop(handle, inode);
1056 static int journal_dirty_sync_data(handle_t *handle, struct inode *inode,
1057 struct buffer_head *bh)
1059 int ret = ext3_journal_dirty_data(handle, bh, 0);
1060 buffer_insert_inode_data_queue(bh, inode);
1065 * For ext3_writepage(). We also brelse() the buffer to account for
1066 * the bget() which ext3_writepage() performs.
1068 static int journal_dirty_async_data(handle_t *handle, struct inode *inode,
1069 struct buffer_head *bh)
1071 int ret = ext3_journal_dirty_data(handle, bh, 1);
1072 buffer_insert_inode_data_queue(bh, inode);
1077 /* For commit_write() in data=journal mode */
1078 static int commit_write_fn(handle_t *handle, struct inode *inode,
1079 struct buffer_head *bh)
1081 set_bit(BH_Uptodate, &bh->b_state);
1082 return ext3_journal_dirty_metadata(handle, bh);
1086 * We need to pick up the new inode size which generic_commit_write gave us
1087 * `file' can be NULL - eg, when called from block_symlink().
1089 * ext3 inode->i_dirty_buffers policy: If we're journalling data we
1090 * definitely don't want them to appear on the inode at all - instead
1091 * we need to manage them at the JBD layer and we need to intercept
1092 * the relevant sync operations and translate them into journal operations.
1094 * If we're not journalling data then we can just leave the buffers
1095 * on ->i_dirty_buffers. If someone writes them out for us then thanks.
1096 * Otherwise we'll do it in commit, if we're using ordered data.
1099 static int ext3_commit_write(struct file *file, struct page *page,
1100 unsigned from, unsigned to)
1102 handle_t *handle = ext3_journal_current_handle();
1103 struct inode *inode = page->mapping->host;
1107 if (ext3_should_journal_data(inode)) {
1109 * Here we duplicate the generic_commit_write() functionality
1112 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1114 ret = walk_page_buffers(handle, inode, page->buffers,
1115 from, to, &partial, commit_write_fn);
1117 SetPageUptodate(page);
1119 if (pos > inode->i_size)
1120 inode->i_size = pos;
1121 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
1123 if (ext3_should_order_data(inode)) {
1124 ret = walk_page_buffers(handle, inode, page->buffers,
1125 from, to, NULL, journal_dirty_sync_data);
1127 /* Be careful here if generic_commit_write becomes a
1128 * required invocation after block_prepare_write. */
1130 ret = generic_commit_write(file, page, from, to);
1133 * block_prepare_write() was called, but we're not
1134 * going to call generic_commit_write(). So we
1135 * need to perform generic_commit_write()'s kunmap
1141 if (inode->i_size > inode->u.ext3_i.i_disksize) {
1142 inode->u.ext3_i.i_disksize = inode->i_size;
1143 ret2 = ext3_mark_inode_dirty(handle, inode);
1147 ret2 = ext3_journal_stop(handle, inode);
1155 * bmap() is special. It gets used by applications such as lilo and by
1156 * the swapper to find the on-disk block of a specific piece of data.
1158 * Naturally, this is dangerous if the block concerned is still in the
1159 * journal. If somebody makes a swapfile on an ext3 data-journaling
1160 * filesystem and enables swap, then they may get a nasty shock when the
1161 * data getting swapped to that swapfile suddenly gets overwritten by
1162 * the original zero's written out previously to the journal and
1163 * awaiting writeback in the kernel's buffer cache.
1165 * So, if we see any bmap calls here on a modified, data-journaled file,
1166 * take extra steps to flush any blocks which might be in the cache.
1168 static int ext3_bmap(struct address_space *mapping, long block)
1170 struct inode *inode = mapping->host;
1174 if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
1176 * This is a REALLY heavyweight approach, but the use of
1177 * bmap on dirty files is expected to be extremely rare:
1178 * only if we run lilo or swapon on a freshly made file
1179 * do we expect this to happen.
1181 * (bmap requires CAP_SYS_RAWIO so this does not
1182 * represent an unprivileged user DOS attack --- we'd be
1183 * in trouble if mortal users could trigger this path at
1186 * NB. EXT3_STATE_JDATA is not set on files other than
1187 * regular files. If somebody wants to bmap a directory
1188 * or symlink and gets confused because the buffer
1189 * hasn't yet been flushed to disk, they deserve
1190 * everything they get.
1193 EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
1194 journal = EXT3_JOURNAL(inode);
1195 journal_lock_updates(journal);
1196 err = journal_flush(journal);
1197 journal_unlock_updates(journal);
1203 return generic_block_bmap(mapping,block,ext3_get_block);
1206 static int bget_one(handle_t *handle, struct inode *inode,
1207 struct buffer_head *bh)
1209 atomic_inc(&bh->b_count);
1214 * Note that we always start a transaction even if we're not journalling
1215 * data. This is to preserve ordering: any hole instantiation within
1216 * __block_write_full_page -> ext3_get_block() should be journalled
1217 * along with the data so we don't crash and then get metadata which
1218 * refers to old data.
1220 * In all journalling modes block_write_full_page() will start the I/O.
1224 * ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1229 * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1231 * Same applies to ext3_get_block(). We will deadlock on various things like
1232 * lock_journal and i_truncate_sem.
1234 * Setting PF_MEMALLOC here doesn't work - too many internal memory
1237 * 16May01: If we're reentered then journal_current_handle() will be
1238 * non-zero. We simply *return*.
1240 * 1 July 2001: @@@ FIXME:
1241 * In journalled data mode, a data buffer may be metadata against the
1242 * current transaction. But the same file is part of a shared mapping
1243 * and someone does a writepage() on it.
1245 * We will move the buffer onto the async_data list, but *after* it has
1246 * been dirtied. So there's a small window where we have dirty data on
1249 * Note that this only applies to the last partial page in the file. The
1250 * bit which block_write_full_page() uses prepare/commit for. (That's
1251 * broken code anyway: it's wrong for msync()).
1253 * It's a rare case: affects the final partial page, for journalled data
1254 * where the file is subject to bith write() and writepage() in the same
1255 * transction. To fix it we'll need a custom block_write_full_page().
1256 * We'll probably need that anyway for journalling writepage() output.
1258 * We don't honour synchronous mounts for writepage(). That would be
1259 * disastrous. Any write() or metadata operation will sync the fs for
1262 static int ext3_writepage(struct page *page)
1264 struct inode *inode = page->mapping->host;
1265 struct buffer_head *page_buffers;
1266 handle_t *handle = NULL;
1271 J_ASSERT(PageLocked(page));
1274 * We give up here if we're reentered, because it might be
1275 * for a different filesystem. One *could* look for a
1276 * nested transaction opportunity.
1279 if (ext3_journal_current_handle())
1282 needed = ext3_writepage_trans_blocks(inode);
1283 if (current->flags & PF_MEMALLOC)
1284 handle = ext3_journal_try_start(inode, needed);
1286 handle = ext3_journal_start(inode, needed);
1288 if (IS_ERR(handle)) {
1289 ret = PTR_ERR(handle);
1293 order_data = ext3_should_order_data(inode) ||
1294 ext3_should_journal_data(inode);
1298 page_buffers = NULL; /* Purely to prevent compiler warning */
1300 /* bget() all the buffers */
1303 create_empty_buffers(page,
1304 inode->i_dev, inode->i_sb->s_blocksize);
1305 page_buffers = page->buffers;
1306 walk_page_buffers(handle, inode, page_buffers, 0,
1307 PAGE_CACHE_SIZE, NULL, bget_one);
1310 ret = block_write_full_page(page, ext3_get_block);
1313 * The page can become unlocked at any point now, and
1314 * truncate can then come in and change things. So we
1315 * can't touch *page from now on. But *page_buffers is
1316 * safe due to elevated refcount.
1319 handle = ext3_journal_current_handle();
1322 /* And attach them to the current transaction */
1324 err = walk_page_buffers(handle, inode, page_buffers,
1325 0, PAGE_CACHE_SIZE, NULL, journal_dirty_async_data);
1330 err = ext3_journal_stop(handle, inode);
1344 static int ext3_readpage(struct file *file, struct page *page)
1346 return block_read_full_page(page,ext3_get_block);
1350 static int ext3_flushpage(struct page *page, unsigned long offset)
1352 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1353 return journal_flushpage(journal, page, offset);
1356 static int ext3_releasepage(struct page *page, int wait)
1358 journal_t *journal = EXT3_JOURNAL(page->mapping->host);
1359 return journal_try_to_free_buffers(journal, page, wait);
1363 struct address_space_operations ext3_aops = {
1364 readpage: ext3_readpage, /* BKL not held. Don't need */
1365 writepage: ext3_writepage, /* BKL not held. We take it */
1366 sync_page: block_sync_page,
1367 prepare_write: ext3_prepare_write, /* BKL not held. We take it */
1368 commit_write: ext3_commit_write, /* BKL not held. We take it */
1369 bmap: ext3_bmap, /* BKL held */
1370 flushpage: ext3_flushpage, /* BKL not held. Don't need */
1371 releasepage: ext3_releasepage, /* BKL not held. Don't need */
1375 * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
1376 * up to the end of the block which corresponds to `from'.
1377 * This required during truncate. We need to physically zero the tail end
1378 * of that block so it doesn't yield old data if the file is later grown.
1380 static int ext3_block_truncate_page(handle_t *handle,
1381 struct address_space *mapping, loff_t from)
1383 unsigned long index = from >> PAGE_CACHE_SHIFT;
1384 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1385 unsigned blocksize, iblock, length, pos;
1386 struct inode *inode = mapping->host;
1388 struct buffer_head *bh;
1391 blocksize = inode->i_sb->s_blocksize;
1392 length = offset & (blocksize - 1);
1394 /* Block boundary? Nothing to do */
1398 length = blocksize - length;
1399 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1401 page = grab_cache_page(mapping, index);
1407 create_empty_buffers(page, inode->i_dev, blocksize);
1409 /* Find the buffer that contains "offset" */
1412 while (offset >= pos) {
1413 bh = bh->b_this_page;
1419 if (!buffer_mapped(bh)) {
1420 /* Hole? Nothing to do */
1421 if (buffer_uptodate(bh))
1423 ext3_get_block(inode, iblock, bh, 0);
1424 /* Still unmapped? Nothing to do */
1425 if (!buffer_mapped(bh))
1429 /* Ok, it's mapped. Make sure it's up-to-date */
1430 if (Page_Uptodate(page))
1431 set_bit(BH_Uptodate, &bh->b_state);
1433 if (!buffer_uptodate(bh)) {
1435 ll_rw_block(READ, 1, &bh);
1437 /* Uhhuh. Read error. Complain and punt. */
1438 if (!buffer_uptodate(bh))
1442 if (ext3_should_journal_data(inode)) {
1443 BUFFER_TRACE(bh, "get write access");
1444 err = ext3_journal_get_write_access(handle, bh);
1449 memset(kmap(page) + offset, 0, length);
1450 flush_dcache_page(page);
1453 BUFFER_TRACE(bh, "zeroed end of block");
1456 if (ext3_should_journal_data(inode)) {
1457 err = ext3_journal_dirty_metadata(handle, bh);
1459 if (ext3_should_order_data(inode))
1460 err = ext3_journal_dirty_data(handle, bh, 0);
1461 __mark_buffer_dirty(bh);
1466 page_cache_release(page);
1472 * Probably it should be a library function... search for first non-zero word
1473 * or memcmp with zero_page, whatever is better for particular architecture.
1476 static inline int all_zeroes(u32 *p, u32 *q)
1485 * ext3_find_shared - find the indirect blocks for partial truncation.
1486 * @inode: inode in question
1487 * @depth: depth of the affected branch
1488 * @offsets: offsets of pointers in that branch (see ext3_block_to_path)
1489 * @chain: place to store the pointers to partial indirect blocks
1490 * @top: place to the (detached) top of branch
1492 * This is a helper function used by ext3_truncate().
1494 * When we do truncate() we may have to clean the ends of several
1495 * indirect blocks but leave the blocks themselves alive. Block is
1496 * partially truncated if some data below the new i_size is refered
1497 * from it (and it is on the path to the first completely truncated
1498 * data block, indeed). We have to free the top of that path along
1499 * with everything to the right of the path. Since no allocation
1500 * past the truncation point is possible until ext3_truncate()
1501 * finishes, we may safely do the latter, but top of branch may
1502 * require special attention - pageout below the truncation point
1503 * might try to populate it.
1505 * We atomically detach the top of branch from the tree, store the
1506 * block number of its root in *@top, pointers to buffer_heads of
1507 * partially truncated blocks - in @chain[].bh and pointers to
1508 * their last elements that should not be removed - in
1509 * @chain[].p. Return value is the pointer to last filled element
1512 * The work left to caller to do the actual freeing of subtrees:
1513 * a) free the subtree starting from *@top
1514 * b) free the subtrees whose roots are stored in
1515 * (@chain[i].p+1 .. end of @chain[i].bh->b_data)
1516 * c) free the subtrees growing from the inode past the @chain[0].
1517 * (no partially truncated stuff there). */
1519 static Indirect *ext3_find_shared(struct inode *inode,
1525 Indirect *partial, *p;
1529 /* Make k index the deepest non-null offest + 1 */
1530 for (k = depth; k > 1 && !offsets[k-1]; k--)
1532 partial = ext3_get_branch(inode, k, offsets, chain, &err);
1533 /* Writer: pointers */
1535 partial = chain + k-1;
1537 * If the branch acquired continuation since we've looked at it -
1538 * fine, it should all survive and (new) top doesn't belong to us.
1540 if (!partial->key && *partial->p)
1543 for (p=partial; p>chain && all_zeroes((u32*)p->bh->b_data,p->p); p--)
1546 * OK, we've found the last block that must survive. The rest of our
1547 * branch should be detached before unlocking. However, if that rest
1548 * of branch is all ours and does not grow immediately from the inode
1549 * it's easier to cheat and just decrement partial->p.
1551 if (p == chain + k - 1 && p > chain) {
1555 /* Nope, don't do this in ext3. Must leave the tree intact */
1564 brelse(partial->bh);
1572 * Zero a number of block pointers in either an inode or an indirect block.
1573 * If we restart the transaction we must again get write access to the
1574 * indirect block for further modification.
1576 * We release `count' blocks on disk, but (last - first) may be greater
1577 * than `count' because there can be holes in there.
1580 ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
1581 unsigned long block_to_free, unsigned long count,
1582 u32 *first, u32 *last)
1585 if (try_to_extend_transaction(handle, inode)) {
1587 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
1588 ext3_journal_dirty_metadata(handle, bh);
1590 ext3_mark_inode_dirty(handle, inode);
1591 ext3_journal_test_restart(handle, inode);
1593 BUFFER_TRACE(bh, "retaking write access");
1594 ext3_journal_get_write_access(handle, bh);
1599 * Any buffers which are on the journal will be in memory. We find
1600 * them on the hash table so journal_revoke() will run journal_forget()
1601 * on them. We've already detached each block from the file, so
1602 * bforget() in journal_forget() should be safe.
1604 * AKPM: turn on bforget in journal_forget()!!!
1606 for (p = first; p < last; p++) {
1607 u32 nr = le32_to_cpu(*p);
1609 struct buffer_head *bh;
1612 bh = sb_get_hash_table(inode->i_sb, nr);
1613 ext3_forget(handle, 0, inode, bh, nr);
1617 ext3_free_blocks(handle, inode, block_to_free, count);
1621 * ext3_free_data - free a list of data blocks
1622 * @handle: handle for this transaction
1623 * @inode: inode we are dealing with
1624 * @this_bh: indirect buffer_head which contains *@first and *@last
1625 * @first: array of block numbers
1626 * @last: points immediately past the end of array
1628 * We are freeing all blocks refered from that array (numbers are stored as
1629 * little-endian 32-bit) and updating @inode->i_blocks appropriately.
1631 * We accumulate contiguous runs of blocks to free. Conveniently, if these
1632 * blocks are contiguous then releasing them at one time will only affect one
1633 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
1634 * actually use a lot of journal space.
1636 * @this_bh will be %NULL if @first and @last point into the inode's direct
1639 static void ext3_free_data(handle_t *handle, struct inode *inode,
1640 struct buffer_head *this_bh, u32 *first, u32 *last)
1642 unsigned long block_to_free = 0; /* Starting block # of a run */
1643 unsigned long count = 0; /* Number of blocks in the run */
1644 u32 *block_to_free_p = NULL; /* Pointer into inode/ind
1647 unsigned long nr; /* Current block # */
1648 u32 *p; /* Pointer into inode/ind
1649 for current block */
1652 if (this_bh) { /* For indirect block */
1653 BUFFER_TRACE(this_bh, "get_write_access");
1654 err = ext3_journal_get_write_access(handle, this_bh);
1655 /* Important: if we can't update the indirect pointers
1656 * to the blocks, we can't free them. */
1661 for (p = first; p < last; p++) {
1662 nr = le32_to_cpu(*p);
1664 /* accumulate blocks to free if they're contiguous */
1667 block_to_free_p = p;
1669 } else if (nr == block_to_free + count) {
1672 ext3_clear_blocks(handle, inode, this_bh,
1674 count, block_to_free_p, p);
1676 block_to_free_p = p;
1683 ext3_clear_blocks(handle, inode, this_bh, block_to_free,
1684 count, block_to_free_p, p);
1687 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
1688 ext3_journal_dirty_metadata(handle, this_bh);
1693 * ext3_free_branches - free an array of branches
1694 * @handle: JBD handle for this transaction
1695 * @inode: inode we are dealing with
1696 * @parent_bh: the buffer_head which contains *@first and *@last
1697 * @first: array of block numbers
1698 * @last: pointer immediately past the end of array
1699 * @depth: depth of the branches to free
1701 * We are freeing all blocks refered from these branches (numbers are
1702 * stored as little-endian 32-bit) and updating @inode->i_blocks
1705 static void ext3_free_branches(handle_t *handle, struct inode *inode,
1706 struct buffer_head *parent_bh,
1707 u32 *first, u32 *last, int depth)
1712 if (is_handle_aborted(handle))
1716 struct buffer_head *bh;
1717 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
1719 while (--p >= first) {
1720 nr = le32_to_cpu(*p);
1722 continue; /* A hole */
1724 /* Go read the buffer for the next level down */
1725 bh = sb_bread(inode->i_sb, nr);
1728 * A read failure? Report error and clear slot
1732 ext3_error(inode->i_sb, "ext3_free_branches",
1733 "Read failure, inode=%ld, block=%ld",
1738 /* This zaps the entire block. Bottom up. */
1739 BUFFER_TRACE(bh, "free child branches");
1740 ext3_free_branches(handle, inode, bh, (u32*)bh->b_data,
1741 (u32*)bh->b_data + addr_per_block,
1745 * We've probably journalled the indirect block several
1746 * times during the truncate. But it's no longer
1747 * needed and we now drop it from the transaction via
1750 * That's easy if it's exclusively part of this
1751 * transaction. But if it's part of the committing
1752 * transaction then journal_forget() will simply
1753 * brelse() it. That means that if the underlying
1754 * block is reallocated in ext3_get_block(),
1755 * unmap_underlying_metadata() will find this block
1756 * and will try to get rid of it. damn, damn.
1758 * If this block has already been committed to the
1759 * journal, a revoke record will be written. And
1760 * revoke records must be emitted *before* clearing
1761 * this block's bit in the bitmaps.
1763 ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
1766 * Everything below this this pointer has been
1767 * released. Now let this top-of-subtree go.
1769 * We want the freeing of this indirect block to be
1770 * atomic in the journal with the updating of the
1771 * bitmap block which owns it. So make some room in
1774 * We zero the parent pointer *after* freeing its
1775 * pointee in the bitmaps, so if extend_transaction()
1776 * for some reason fails to put the bitmap changes and
1777 * the release into the same transaction, recovery
1778 * will merely complain about releasing a free block,
1779 * rather than leaking blocks.
1781 if (is_handle_aborted(handle))
1783 if (try_to_extend_transaction(handle, inode)) {
1784 ext3_mark_inode_dirty(handle, inode);
1785 ext3_journal_test_restart(handle, inode);
1788 ext3_free_blocks(handle, inode, nr, 1);
1792 * The block which we have just freed is
1793 * pointed to by an indirect block: journal it
1795 BUFFER_TRACE(parent_bh, "get_write_access");
1796 if (!ext3_journal_get_write_access(handle,
1799 BUFFER_TRACE(parent_bh,
1800 "call ext3_journal_dirty_metadata");
1801 ext3_journal_dirty_metadata(handle,
1807 /* We have reached the bottom of the tree. */
1808 BUFFER_TRACE(parent_bh, "free data blocks");
1809 ext3_free_data(handle, inode, parent_bh, first, last);
1816 * We block out ext3_get_block() block instantiations across the entire
1817 * transaction, and VFS/VM ensures that ext3_truncate() cannot run
1818 * simultaneously on behalf of the same inode.
1820 * As we work through the truncate and commmit bits of it to the journal there
1821 * is one core, guiding principle: the file's tree must always be consistent on
1822 * disk. We must be able to restart the truncate after a crash.
1824 * The file's tree may be transiently inconsistent in memory (although it
1825 * probably isn't), but whenever we close off and commit a journal transaction,
1826 * the contents of (the filesystem + the journal) must be consistent and
1827 * restartable. It's pretty simple, really: bottom up, right to left (although
1828 * left-to-right works OK too).
1830 * Note that at recovery time, journal replay occurs *before* the restart of
1831 * truncate against the orphan inode list.
1833 * The committed inode has the new, desired i_size (which is the same as
1834 * i_disksize in this case). After a crash, ext3_orphan_cleanup() will see
1835 * that this inode's truncate did not complete and it will again call
1836 * ext3_truncate() to have another go. So there will be instantiated blocks
1837 * to the right of the truncation point in a crashed ext3 filesystem. But
1838 * that's fine - as long as they are linked from the inode, the post-crash
1839 * ext3_truncate() run will find them and release them.
1842 void ext3_truncate(struct inode * inode)
1845 u32 *i_data = inode->u.ext3_i.i_data;
1846 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
1855 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1856 S_ISLNK(inode->i_mode)))
1858 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1861 ext3_discard_prealloc(inode);
1863 handle = start_transaction(inode);
1865 return; /* AKPM: return what? */
1867 blocksize = inode->i_sb->s_blocksize;
1868 last_block = (inode->i_size + blocksize-1)
1869 >> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
1871 ext3_block_truncate_page(handle, inode->i_mapping, inode->i_size);
1874 n = ext3_block_to_path(inode, last_block, offsets);
1876 goto out_stop; /* error */
1879 * OK. This truncate is going to happen. We add the inode to the
1880 * orphan list, so that if this truncate spans multiple transactions,
1881 * and we crash, we will resume the truncate when the filesystem
1882 * recovers. It also marks the inode dirty, to catch the new size.
1884 * Implication: the file must always be in a sane, consistent
1885 * truncatable state while each transaction commits.
1887 if (ext3_orphan_add(handle, inode))
1891 * The orphan list entry will now protect us from any crash which
1892 * occurs before the truncate completes, so it is now safe to propagate
1893 * the new, shorter inode size (held for now in i_size) into the
1894 * on-disk inode. We do this via i_disksize, which is the value which
1895 * ext3 *really* writes onto the disk inode.
1897 inode->u.ext3_i.i_disksize = inode->i_size;
1900 * From here we block out all ext3_get_block() callers who want to
1901 * modify the block allocation tree.
1903 down_write(&inode->u.ext3_i.truncate_sem);
1905 if (n == 1) { /* direct blocks */
1906 ext3_free_data(handle, inode, NULL, i_data+offsets[0],
1907 i_data + EXT3_NDIR_BLOCKS);
1911 partial = ext3_find_shared(inode, n, offsets, chain, &nr);
1912 /* Kill the top of shared branch (not detached) */
1914 if (partial == chain) {
1915 /* Shared branch grows from the inode */
1916 ext3_free_branches(handle, inode, NULL,
1917 &nr, &nr+1, (chain+n-1) - partial);
1920 * We mark the inode dirty prior to restart,
1921 * and prior to stop. No need for it here.
1924 /* Shared branch grows from an indirect block */
1925 BUFFER_TRACE(partial->bh, "get_write_access");
1926 ext3_free_branches(handle, inode, partial->bh,
1928 partial->p+1, (chain+n-1) - partial);
1931 /* Clear the ends of indirect blocks on the shared branch */
1932 while (partial > chain) {
1933 ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
1934 (u32*)partial->bh->b_data + addr_per_block,
1935 (chain+n-1) - partial);
1936 BUFFER_TRACE(partial->bh, "call brelse");
1937 brelse (partial->bh);
1941 /* Kill the remaining (whole) subtrees */
1942 switch (offsets[0]) {
1944 nr = i_data[EXT3_IND_BLOCK];
1946 ext3_free_branches(handle, inode, NULL,
1948 i_data[EXT3_IND_BLOCK] = 0;
1950 case EXT3_IND_BLOCK:
1951 nr = i_data[EXT3_DIND_BLOCK];
1953 ext3_free_branches(handle, inode, NULL,
1955 i_data[EXT3_DIND_BLOCK] = 0;
1957 case EXT3_DIND_BLOCK:
1958 nr = i_data[EXT3_TIND_BLOCK];
1960 ext3_free_branches(handle, inode, NULL,
1962 i_data[EXT3_TIND_BLOCK] = 0;
1964 case EXT3_TIND_BLOCK:
1967 up_write(&inode->u.ext3_i.truncate_sem);
1968 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1969 ext3_mark_inode_dirty(handle, inode);
1971 /* In a multi-transaction truncate, we only make the final
1972 * transaction synchronous */
1977 * If this was a simple ftruncate(), and the file will remain alive
1978 * then we need to clear up the orphan record which we created above.
1979 * However, if this was a real unlink then we were called by
1980 * ext3_delete_inode(), and we allow that function to clean up the
1981 * orphan info for us.
1984 ext3_orphan_del(handle, inode);
1986 ext3_journal_stop(handle, inode);
1990 * ext3_get_inode_loc returns with an extra refcount against the
1991 * inode's underlying buffer_head on success.
1994 int ext3_get_inode_loc (struct inode *inode, struct ext3_iloc *iloc)
1996 struct buffer_head *bh = 0;
1997 unsigned long block;
1998 unsigned long block_group;
1999 unsigned long group_desc;
2001 unsigned long offset;
2002 struct ext3_group_desc * gdp;
2004 if ((inode->i_ino != EXT3_ROOT_INO &&
2005 inode->i_ino != EXT3_ACL_IDX_INO &&
2006 inode->i_ino != EXT3_ACL_DATA_INO &&
2007 inode->i_ino != EXT3_JOURNAL_INO &&
2008 inode->i_ino < EXT3_FIRST_INO(inode->i_sb)) ||
2009 inode->i_ino > le32_to_cpu(
2010 inode->i_sb->u.ext3_sb.s_es->s_inodes_count)) {
2011 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2012 "bad inode number: %lu", inode->i_ino);
2015 block_group = (inode->i_ino - 1) / EXT3_INODES_PER_GROUP(inode->i_sb);
2016 if (block_group >= inode->i_sb->u.ext3_sb.s_groups_count) {
2017 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2018 "group >= groups count");
2021 group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(inode->i_sb);
2022 desc = block_group & (EXT3_DESC_PER_BLOCK(inode->i_sb) - 1);
2023 bh = inode->i_sb->u.ext3_sb.s_group_desc[group_desc];
2025 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2026 "Descriptor not loaded");
2030 gdp = (struct ext3_group_desc *) bh->b_data;
2032 * Figure out the offset within the block group inode table
2034 offset = ((inode->i_ino - 1) % EXT3_INODES_PER_GROUP(inode->i_sb)) *
2035 EXT3_INODE_SIZE(inode->i_sb);
2036 block = le32_to_cpu(gdp[desc].bg_inode_table) +
2037 (offset >> EXT3_BLOCK_SIZE_BITS(inode->i_sb));
2038 if (!(bh = sb_bread(inode->i_sb, block))) {
2039 ext3_error (inode->i_sb, "ext3_get_inode_loc",
2040 "unable to read inode block - "
2041 "inode=%lu, block=%lu", inode->i_ino, block);
2044 offset &= (EXT3_BLOCK_SIZE(inode->i_sb) - 1);
2047 iloc->raw_inode = (struct ext3_inode *) (bh->b_data + offset);
2048 iloc->block_group = block_group;
2056 void ext3_read_inode(struct inode * inode)
2058 struct ext3_iloc iloc;
2059 struct ext3_inode *raw_inode;
2060 struct buffer_head *bh;
2063 if(ext3_get_inode_loc(inode, &iloc))
2066 raw_inode = iloc.raw_inode;
2067 init_rwsem(&inode->u.ext3_i.truncate_sem);
2068 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2069 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2070 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2071 if(!(test_opt (inode->i_sb, NO_UID32))) {
2072 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2073 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2075 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2076 inode->i_size = le32_to_cpu(raw_inode->i_size);
2077 inode->i_atime = le32_to_cpu(raw_inode->i_atime);
2078 inode->i_ctime = le32_to_cpu(raw_inode->i_ctime);
2079 inode->i_mtime = le32_to_cpu(raw_inode->i_mtime);
2080 inode->u.ext3_i.i_dtime = le32_to_cpu(raw_inode->i_dtime);
2081 /* We now have enough fields to check if the inode was active or not.
2082 * This is needed because nfsd might try to access dead inodes
2083 * the test is that same one that e2fsck uses
2084 * NeilBrown 1999oct15
2086 if (inode->i_nlink == 0) {
2087 if (inode->i_mode == 0 ||
2088 !(inode->i_sb->u.ext3_sb.s_mount_state & EXT3_ORPHAN_FS)) {
2089 /* this inode is deleted */
2093 /* The only unlinked inodes we let through here have
2094 * valid i_mode and are being read by the orphan
2095 * recovery code: that's fine, we're about to complete
2096 * the process of deleting those. */
2098 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size
2099 * (for stat), not the fs block
2101 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
2102 inode->i_version = ++event;
2103 inode->u.ext3_i.i_flags = le32_to_cpu(raw_inode->i_flags);
2104 #ifdef EXT3_FRAGMENTS
2105 inode->u.ext3_i.i_faddr = le32_to_cpu(raw_inode->i_faddr);
2106 inode->u.ext3_i.i_frag_no = raw_inode->i_frag;
2107 inode->u.ext3_i.i_frag_size = raw_inode->i_fsize;
2109 inode->u.ext3_i.i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
2110 if (!S_ISREG(inode->i_mode)) {
2111 inode->u.ext3_i.i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
2114 ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
2116 inode->u.ext3_i.i_disksize = inode->i_size;
2117 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2118 #ifdef EXT3_PREALLOCATE
2119 inode->u.ext3_i.i_prealloc_count = 0;
2121 inode->u.ext3_i.i_block_group = iloc.block_group;
2124 * NOTE! The in-memory inode i_data array is in little-endian order
2125 * even on big-endian machines: we do NOT byteswap the block numbers!
2127 for (block = 0; block < EXT3_N_BLOCKS; block++)
2128 inode->u.ext3_i.i_data[block] = iloc.raw_inode->i_block[block];
2129 INIT_LIST_HEAD(&inode->u.ext3_i.i_orphan);
2133 if (inode->i_ino == EXT3_ACL_IDX_INO ||
2134 inode->i_ino == EXT3_ACL_DATA_INO)
2135 /* Nothing to do */ ;
2136 else if (S_ISREG(inode->i_mode)) {
2137 inode->i_op = &ext3_file_inode_operations;
2138 inode->i_fop = &ext3_file_operations;
2139 inode->i_mapping->a_ops = &ext3_aops;
2140 } else if (S_ISDIR(inode->i_mode)) {
2141 inode->i_op = &ext3_dir_inode_operations;
2142 inode->i_fop = &ext3_dir_operations;
2143 } else if (S_ISLNK(inode->i_mode)) {
2144 if (!inode->i_blocks)
2145 inode->i_op = &ext3_fast_symlink_inode_operations;
2147 inode->i_op = &page_symlink_inode_operations;
2148 inode->i_mapping->a_ops = &ext3_aops;
2151 init_special_inode(inode, inode->i_mode,
2152 le32_to_cpu(iloc.raw_inode->i_block[0]));
2153 /* inode->i_attr_flags = 0; unused */
2154 if (inode->u.ext3_i.i_flags & EXT3_SYNC_FL) {
2155 /* inode->i_attr_flags |= ATTR_FLAG_SYNCRONOUS; unused */
2156 inode->i_flags |= S_SYNC;
2158 if (inode->u.ext3_i.i_flags & EXT3_APPEND_FL) {
2159 /* inode->i_attr_flags |= ATTR_FLAG_APPEND; unused */
2160 inode->i_flags |= S_APPEND;
2162 if (inode->u.ext3_i.i_flags & EXT3_IMMUTABLE_FL) {
2163 /* inode->i_attr_flags |= ATTR_FLAG_IMMUTABLE; unused */
2164 inode->i_flags |= S_IMMUTABLE;
2166 if (inode->u.ext3_i.i_flags & EXT3_NOATIME_FL) {
2167 /* inode->i_attr_flags |= ATTR_FLAG_NOATIME; unused */
2168 inode->i_flags |= S_NOATIME;
2173 make_bad_inode(inode);
2178 * Post the struct inode info into an on-disk inode location in the
2179 * buffer-cache. This gobbles the caller's reference to the
2180 * buffer_head in the inode location struct.
2183 static int ext3_do_update_inode(handle_t *handle,
2184 struct inode *inode,
2185 struct ext3_iloc *iloc)
2187 struct ext3_inode *raw_inode = iloc->raw_inode;
2188 struct buffer_head *bh = iloc->bh;
2189 int err = 0, rc, block;
2192 BUFFER_TRACE(bh, "get_write_access");
2193 err = ext3_journal_get_write_access(handle, bh);
2197 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2198 if(!(test_opt(inode->i_sb, NO_UID32))) {
2199 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2200 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2202 * Fix up interoperability with old kernels. Otherwise, old inodes get
2203 * re-used with the upper 16 bits of the uid/gid intact
2205 if(!inode->u.ext3_i.i_dtime) {
2206 raw_inode->i_uid_high =
2207 cpu_to_le16(high_16_bits(inode->i_uid));
2208 raw_inode->i_gid_high =
2209 cpu_to_le16(high_16_bits(inode->i_gid));
2211 raw_inode->i_uid_high = 0;
2212 raw_inode->i_gid_high = 0;
2215 raw_inode->i_uid_low =
2216 cpu_to_le16(fs_high2lowuid(inode->i_uid));
2217 raw_inode->i_gid_low =
2218 cpu_to_le16(fs_high2lowgid(inode->i_gid));
2219 raw_inode->i_uid_high = 0;
2220 raw_inode->i_gid_high = 0;
2222 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2223 raw_inode->i_size = cpu_to_le32(inode->u.ext3_i.i_disksize);
2224 raw_inode->i_atime = cpu_to_le32(inode->i_atime);
2225 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime);
2226 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime);
2227 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
2228 raw_inode->i_dtime = cpu_to_le32(inode->u.ext3_i.i_dtime);
2229 raw_inode->i_flags = cpu_to_le32(inode->u.ext3_i.i_flags);
2230 #ifdef EXT3_FRAGMENTS
2231 raw_inode->i_faddr = cpu_to_le32(inode->u.ext3_i.i_faddr);
2232 raw_inode->i_frag = inode->u.ext3_i.i_frag_no;
2233 raw_inode->i_fsize = inode->u.ext3_i.i_frag_size;
2235 /* If we are not tracking these fields in the in-memory inode,
2236 * then preserve them on disk, but still initialise them to zero
2237 * for new inodes. */
2238 if (EXT3_I(inode)->i_state & EXT3_STATE_NEW) {
2239 raw_inode->i_faddr = 0;
2240 raw_inode->i_frag = 0;
2241 raw_inode->i_fsize = 0;
2244 raw_inode->i_file_acl = cpu_to_le32(inode->u.ext3_i.i_file_acl);
2245 if (!S_ISREG(inode->i_mode)) {
2246 raw_inode->i_dir_acl = cpu_to_le32(inode->u.ext3_i.i_dir_acl);
2248 raw_inode->i_size_high =
2249 cpu_to_le32(inode->u.ext3_i.i_disksize >> 32);
2250 if (inode->u.ext3_i.i_disksize > 0x7fffffffULL) {
2251 struct super_block *sb = inode->i_sb;
2252 if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
2253 EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
2254 EXT3_SB(sb)->s_es->s_rev_level ==
2255 cpu_to_le32(EXT3_GOOD_OLD_REV)) {
2256 /* If this is the first large file
2257 * created, add a flag to the superblock.
2259 err = ext3_journal_get_write_access(handle,
2260 sb->u.ext3_sb.s_sbh);
2263 ext3_update_dynamic_rev(sb);
2264 EXT3_SET_RO_COMPAT_FEATURE(sb,
2265 EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
2268 err = ext3_journal_dirty_metadata(handle,
2269 sb->u.ext3_sb.s_sbh);
2273 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2274 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
2275 raw_inode->i_block[0] =
2276 cpu_to_le32(kdev_t_to_nr(inode->i_rdev));
2277 else for (block = 0; block < EXT3_N_BLOCKS; block++)
2278 raw_inode->i_block[block] = inode->u.ext3_i.i_data[block];
2280 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
2281 rc = ext3_journal_dirty_metadata(handle, bh);
2284 EXT3_I(inode)->i_state &= ~EXT3_STATE_NEW;
2288 ext3_std_error(inode->i_sb, err);
2293 * ext3_write_inode()
2295 * We are called from a few places:
2297 * - Within generic_file_write() for O_SYNC files.
2298 * Here, there will be no transaction running. We wait for any running
2299 * trasnaction to commit.
2301 * - Within sys_sync(), kupdate and such.
2302 * We wait on commit, if tol to.
2304 * - Within prune_icache() (PF_MEMALLOC == true)
2305 * Here we simply return. We can't afford to block kswapd on the
2308 * In all cases it is actually safe for us to return without doing anything,
2309 * because the inode has been copied into a raw inode buffer in
2310 * ext3_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
2313 * Note that we are absolutely dependent upon all inode dirtiers doing the
2314 * right thing: they *must* call mark_inode_dirty() after dirtying info in
2315 * which we are interested.
2317 * It would be a bug for them to not do this. The code:
2319 * mark_inode_dirty(inode)
2321 * inode->i_size = expr;
2323 * is in error because a kswapd-driven write_inode() could occur while
2324 * `stuff()' is running, and the new i_size will be lost. Plus the inode
2325 * will no longer be on the superblock's dirty inode list.
2327 void ext3_write_inode(struct inode *inode, int wait)
2329 if (current->flags & PF_MEMALLOC)
2332 if (ext3_journal_current_handle()) {
2333 jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n");
2340 ext3_force_commit(inode->i_sb);
2346 * Called from notify_change.
2348 * We want to trap VFS attempts to truncate the file as soon as
2349 * possible. In particular, we want to make sure that when the VFS
2350 * shrinks i_size, we put the inode on the orphan list and modify
2351 * i_disksize immediately, so that during the subsequent flushing of
2352 * dirty pages and freeing of disk blocks, we can guarantee that any
2353 * commit will leave the blocks being flushed in an unused state on
2354 * disk. (On recovery, the inode will get truncated and the blocks will
2355 * be freed, so we have a strong guarantee that no future commit will
2356 * leave these blocks visible to the user.)
2358 * This is only needed for regular files. rmdir() has its own path, and
2359 * we can never truncate a direcory except on final unlink (at which
2360 * point i_nlink is zero so recovery is easy.)
2362 * Called with the BKL.
2365 int ext3_setattr(struct dentry *dentry, struct iattr *attr)
2367 struct inode *inode = dentry->d_inode;
2369 const unsigned int ia_valid = attr->ia_valid;
2371 error = inode_change_ok(inode, attr);
2375 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
2376 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
2377 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
2382 if (attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
2385 handle = ext3_journal_start(inode, 3);
2386 if (IS_ERR(handle)) {
2387 error = PTR_ERR(handle);
2391 error = ext3_orphan_add(handle, inode);
2392 inode->u.ext3_i.i_disksize = attr->ia_size;
2393 rc = ext3_mark_inode_dirty(handle, inode);
2396 ext3_journal_stop(handle, inode);
2399 rc = inode_setattr(inode, attr);
2401 /* If inode_setattr's call to ext3_truncate failed to get a
2402 * transaction handle at all, we need to clean up the in-core
2403 * orphan list manually. */
2405 ext3_orphan_del(NULL, inode);
2408 ext3_std_error(inode->i_sb, error);
2416 * akpm: how many blocks doth make a writepage()?
2418 * With N blocks per page, it may be:
2423 * N+5 bitmap blocks (from the above)
2424 * N+5 group descriptor summary blocks
2427 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
2429 * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
2431 * With ordered or writeback data it's the same, less the N data blocks.
2433 * If the inode's direct blocks can hold an integral number of pages then a
2434 * page cannot straddle two indirect blocks, and we can only touch one indirect
2435 * and dindirect block, and the "5" above becomes "3".
2437 * This still overestimates under most circumstances. If we were to pass the
2438 * start and end offsets in here as well we could do block_to_path() on each
2439 * block and work out the exact number of indirects which are touched. Pah.
2442 int ext3_writepage_trans_blocks(struct inode *inode)
2444 int bpp = ext3_journal_blocks_per_page(inode);
2445 int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
2448 if (ext3_should_journal_data(inode))
2449 ret = 3 * (bpp + indirects) + 2;
2451 ret = 2 * (bpp + indirects) + 2;
2454 ret += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS;
2461 ext3_mark_iloc_dirty(handle_t *handle,
2462 struct inode *inode,
2463 struct ext3_iloc *iloc)
2468 /* the do_update_inode consumes one bh->b_count */
2469 atomic_inc(&iloc->bh->b_count);
2470 err = ext3_do_update_inode(handle, inode, iloc);
2471 /* ext3_do_update_inode() does journal_dirty_metadata */
2474 printk(KERN_EMERG "%s: called with no handle!\n", __FUNCTION__);
2480 * On success, We end up with an outstanding reference count against
2481 * iloc->bh. This _must_ be cleaned up later.
2485 ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
2486 struct ext3_iloc *iloc)
2490 err = ext3_get_inode_loc(inode, iloc);
2492 BUFFER_TRACE(iloc->bh, "get_write_access");
2493 err = ext3_journal_get_write_access(handle, iloc->bh);
2500 ext3_std_error(inode->i_sb, err);
2505 * akpm: What we do here is to mark the in-core inode as clean
2506 * with respect to inode dirtiness (it may still be data-dirty).
2507 * This means that the in-core inode may be reaped by prune_icache
2508 * without having to perform any I/O. This is a very good thing,
2509 * because *any* task may call prune_icache - even ones which
2510 * have a transaction open against a different journal.
2512 * Is this cheating? Not really. Sure, we haven't written the
2513 * inode out, but prune_icache isn't a user-visible syncing function.
2514 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
2515 * we start and wait on commits.
2517 * Is this efficient/effective? Well, we're being nice to the system
2518 * by cleaning up our inodes proactively so they can be reaped
2519 * without I/O. But we are potentially leaving up to five seconds'
2520 * worth of inodes floating about which prune_icache wants us to
2521 * write out. One way to fix that would be to get prune_icache()
2522 * to do a write_super() to free up some memory. It has the desired
2525 int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
2527 struct ext3_iloc iloc;
2530 err = ext3_reserve_inode_write(handle, inode, &iloc);
2532 err = ext3_mark_iloc_dirty(handle, inode, &iloc);
2537 * akpm: ext3_dirty_inode() is called from __mark_inode_dirty()
2539 * We're really interested in the case where a file is being extended.
2540 * i_size has been changed by generic_commit_write() and we thus need
2541 * to include the updated inode in the current transaction.
2543 * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
2544 * are allocated to the file.
2546 * If the inode is marked synchronous, we don't honour that here - doing
2547 * so would cause a commit on atime updates, which we don't bother doing.
2548 * We handle synchronous inodes at the highest possible level.
2550 void ext3_dirty_inode(struct inode *inode)
2552 handle_t *current_handle = ext3_journal_current_handle();
2556 handle = ext3_journal_start(inode, 2);
2559 if (current_handle &&
2560 current_handle->h_transaction != handle->h_transaction) {
2561 /* This task has a transaction open against a different fs */
2562 printk(KERN_EMERG "%s: transactions do not match!\n",
2565 jbd_debug(5, "marking dirty. outer handle=%p\n",
2567 ext3_mark_inode_dirty(handle, inode);
2569 ext3_journal_stop(handle, inode);
2576 * Bind an inode's backing buffer_head into this transaction, to prevent
2577 * it from being flushed to disk early. Unlike
2578 * ext3_reserve_inode_write, this leaves behind no bh reference and
2579 * returns no iloc structure, so the caller needs to repeat the iloc
2580 * lookup to mark the inode dirty later.
2583 ext3_pin_inode(handle_t *handle, struct inode *inode)
2585 struct ext3_iloc iloc;
2589 err = ext3_get_inode_loc(inode, &iloc);
2591 BUFFER_TRACE(iloc.bh, "get_write_access");
2592 err = journal_get_write_access(handle, iloc.bh);
2594 err = ext3_journal_dirty_metadata(handle,
2599 ext3_std_error(inode->i_sb, err);
2604 int ext3_change_inode_journal_flag(struct inode *inode, int val)
2611 * We have to be very careful here: changing a data block's
2612 * journaling status dynamically is dangerous. If we write a
2613 * data block to the journal, change the status and then delete
2614 * that block, we risk forgetting to revoke the old log record
2615 * from the journal and so a subsequent replay can corrupt data.
2616 * So, first we make sure that the journal is empty and that
2617 * nobody is changing anything.
2620 journal = EXT3_JOURNAL(inode);
2621 if (is_journal_aborted(journal) || IS_RDONLY(inode))
2624 journal_lock_updates(journal);
2625 journal_flush(journal);
2628 * OK, there are no updates running now, and all cached data is
2629 * synced to disk. We are now in a completely consistent state
2630 * which doesn't have anything in the journal, and we know that
2631 * no filesystem updates are running, so it is safe to modify
2632 * the inode's in-core data-journaling state flag now.
2636 inode->u.ext3_i.i_flags |= EXT3_JOURNAL_DATA_FL;
2638 inode->u.ext3_i.i_flags &= ~EXT3_JOURNAL_DATA_FL;
2640 journal_unlock_updates(journal);
2642 /* Finally we can mark the inode as dirty. */
2644 handle = ext3_journal_start(inode, 1);
2646 return PTR_ERR(handle);
2648 err = ext3_mark_inode_dirty(handle, inode);
2650 ext3_journal_stop(handle, inode);
2651 ext3_std_error(inode->i_sb, err);
2658 * ext3_aops_journal_start().
2660 * <This function died, but the comment lives on>
2662 * We need to take the inode semaphore *outside* the
2663 * journal_start/journal_stop. Otherwise, a different task could do a
2664 * wait_for_commit() while holding ->i_sem, which deadlocks. The rule
2665 * is: transaction open/closes are considered to be a locking operation
2666 * and they nest *inside* ->i_sem.
2667 * ----------------------------------------------------------------------------
2670 * -> generic_file_write()
2671 * -> __alloc_pages()
2673 * -> ext3_writepage()
2675 * And the writepage can be on a different fs while we have a
2676 * transaction open against this one! Bad.
2678 * I tried making the task PF_MEMALLOC here, but that simply results in
2679 * 0-order allocation failures passed back to generic_file_write().
2680 * Instead, we rely on the reentrancy protection in ext3_writepage().
2681 * ----------------------------------------------------------------------------
2682 * When we do the journal_start() here we don't really need to reserve
2683 * any blocks - we won't need any until we hit ext3_prepare_write(),
2684 * which does all the needed journal extending. However! There is a
2685 * problem with quotas:
2695 * ->ext3_prepare_write
2699 * ext3_create (for example)
2701 * ->dquot_initialize
2704 * Deadlock. Thread 1's journal_start blocks because thread 2 has a
2705 * transaction open. Thread 2's transaction will never close because
2706 * thread 2 is stuck waiting for the dquot lock.
2708 * So. We must ensure that thread 1 *never* needs to extend the journal
2709 * for quota writes. We do that by reserving enough journal blocks
2710 * here, in ext3_aops_journal_start() to ensure that the forthcoming "see if we
2711 * need to extend" test in ext3_prepare_write() succeeds.