1 // SPDX-License-Identifier: GPL-2.0+
3 * linux/fs/jbd2/transaction.c
5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
7 * Copyright 1998 Red Hat corp --- All Rights Reserved
9 * Generic filesystem transaction handling code; part of the ext2fs
12 * This file manages transactions (compound commits managed by the
13 * journaling code) and handles (individual atomic operations by the
17 #include <linux/time.h>
19 #include <linux/jbd2.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
24 #include <linux/highmem.h>
25 #include <linux/hrtimer.h>
26 #include <linux/backing-dev.h>
27 #include <linux/bug.h>
28 #include <linux/module.h>
29 #include <linux/sched/mm.h>
31 #include <trace/events/jbd2.h>
33 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
34 static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
36 static struct kmem_cache *transaction_cache;
37 int __init jbd2_journal_init_transaction_cache(void)
39 J_ASSERT(!transaction_cache);
40 transaction_cache = kmem_cache_create("jbd2_transaction_s",
41 sizeof(transaction_t),
43 SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
45 if (transaction_cache)
50 void jbd2_journal_destroy_transaction_cache(void)
52 kmem_cache_destroy(transaction_cache);
53 transaction_cache = NULL;
56 void jbd2_journal_free_transaction(transaction_t *transaction)
58 if (unlikely(ZERO_OR_NULL_PTR(transaction)))
60 kmem_cache_free(transaction_cache, transaction);
64 * jbd2_get_transaction: obtain a new transaction_t object.
66 * Simply allocate and initialise a new transaction. Create it in
67 * RUNNING state and add it to the current journal (which should not
68 * have an existing running transaction: we only make a new transaction
69 * once we have started to commit the old one).
72 * The journal MUST be locked. We don't perform atomic mallocs on the
73 * new transaction and we can't block without protecting against other
74 * processes trying to touch the journal while it is in transition.
78 static transaction_t *
79 jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
81 transaction->t_journal = journal;
82 transaction->t_state = T_RUNNING;
83 transaction->t_start_time = ktime_get();
84 transaction->t_tid = journal->j_transaction_sequence++;
85 transaction->t_expires = jiffies + journal->j_commit_interval;
86 spin_lock_init(&transaction->t_handle_lock);
87 atomic_set(&transaction->t_updates, 0);
88 atomic_set(&transaction->t_outstanding_credits,
89 atomic_read(&journal->j_reserved_credits));
90 atomic_set(&transaction->t_handle_count, 0);
91 INIT_LIST_HEAD(&transaction->t_inode_list);
92 INIT_LIST_HEAD(&transaction->t_private_list);
94 /* Set up the commit timer for the new transaction. */
95 journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires);
96 add_timer(&journal->j_commit_timer);
98 J_ASSERT(journal->j_running_transaction == NULL);
99 journal->j_running_transaction = transaction;
100 transaction->t_max_wait = 0;
101 transaction->t_start = jiffies;
102 transaction->t_requested = 0;
110 * A handle_t is an object which represents a single atomic update to a
111 * filesystem, and which tracks all of the modifications which form part
112 * of that one update.
116 * Update transaction's maximum wait time, if debugging is enabled.
118 * In order for t_max_wait to be reliable, it must be protected by a
119 * lock. But doing so will mean that start_this_handle() can not be
120 * run in parallel on SMP systems, which limits our scalability. So
121 * unless debugging is enabled, we no longer update t_max_wait, which
122 * means that maximum wait time reported by the jbd2_run_stats
123 * tracepoint will always be zero.
125 static inline void update_t_max_wait(transaction_t *transaction,
128 #ifdef CONFIG_JBD2_DEBUG
129 if (jbd2_journal_enable_debug &&
130 time_after(transaction->t_start, ts)) {
131 ts = jbd2_time_diff(ts, transaction->t_start);
132 spin_lock(&transaction->t_handle_lock);
133 if (ts > transaction->t_max_wait)
134 transaction->t_max_wait = ts;
135 spin_unlock(&transaction->t_handle_lock);
141 * Wait until running transaction passes to T_FLUSH state and new transaction
142 * can thus be started. Also starts the commit if needed. The function expects
143 * running transaction to exist and releases j_state_lock.
145 static void wait_transaction_locked(journal_t *journal)
146 __releases(journal->j_state_lock)
150 tid_t tid = journal->j_running_transaction->t_tid;
152 prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
153 TASK_UNINTERRUPTIBLE);
154 need_to_start = !tid_geq(journal->j_commit_request, tid);
155 read_unlock(&journal->j_state_lock);
157 jbd2_log_start_commit(journal, tid);
158 jbd2_might_wait_for_commit(journal);
160 finish_wait(&journal->j_wait_transaction_locked, &wait);
164 * Wait until running transaction transitions from T_SWITCH to T_FLUSH
165 * state and new transaction can thus be started. The function releases
168 static void wait_transaction_switching(journal_t *journal)
169 __releases(journal->j_state_lock)
173 if (WARN_ON(!journal->j_running_transaction ||
174 journal->j_running_transaction->t_state != T_SWITCH))
176 prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
177 TASK_UNINTERRUPTIBLE);
178 read_unlock(&journal->j_state_lock);
180 * We don't call jbd2_might_wait_for_commit() here as there's no
181 * waiting for outstanding handles happening anymore in T_SWITCH state
182 * and handling of reserved handles actually relies on that for
186 finish_wait(&journal->j_wait_transaction_locked, &wait);
189 static void sub_reserved_credits(journal_t *journal, int blocks)
191 atomic_sub(blocks, &journal->j_reserved_credits);
192 wake_up(&journal->j_wait_reserved);
196 * Wait until we can add credits for handle to the running transaction. Called
197 * with j_state_lock held for reading. Returns 0 if handle joined the running
198 * transaction. Returns 1 if we had to wait, j_state_lock is dropped, and
201 static int add_transaction_credits(journal_t *journal, int blocks,
204 transaction_t *t = journal->j_running_transaction;
206 int total = blocks + rsv_blocks;
209 * If the current transaction is locked down for commit, wait
210 * for the lock to be released.
212 if (t->t_state != T_RUNNING) {
213 WARN_ON_ONCE(t->t_state >= T_FLUSH);
214 wait_transaction_locked(journal);
219 * If there is not enough space left in the log to write all
220 * potential buffers requested by this operation, we need to
221 * stall pending a log checkpoint to free some more log space.
223 needed = atomic_add_return(total, &t->t_outstanding_credits);
224 if (needed > journal->j_max_transaction_buffers) {
226 * If the current transaction is already too large,
227 * then start to commit it: we can then go back and
228 * attach this handle to a new transaction.
230 atomic_sub(total, &t->t_outstanding_credits);
233 * Is the number of reserved credits in the current transaction too
234 * big to fit this handle? Wait until reserved credits are freed.
236 if (atomic_read(&journal->j_reserved_credits) + total >
237 journal->j_max_transaction_buffers) {
238 read_unlock(&journal->j_state_lock);
239 jbd2_might_wait_for_commit(journal);
240 wait_event(journal->j_wait_reserved,
241 atomic_read(&journal->j_reserved_credits) + total <=
242 journal->j_max_transaction_buffers);
246 wait_transaction_locked(journal);
251 * The commit code assumes that it can get enough log space
252 * without forcing a checkpoint. This is *critical* for
253 * correctness: a checkpoint of a buffer which is also
254 * associated with a committing transaction creates a deadlock,
255 * so commit simply cannot force through checkpoints.
257 * We must therefore ensure the necessary space in the journal
258 * *before* starting to dirty potentially checkpointed buffers
259 * in the new transaction.
261 if (jbd2_log_space_left(journal) < jbd2_space_needed(journal)) {
262 atomic_sub(total, &t->t_outstanding_credits);
263 read_unlock(&journal->j_state_lock);
264 jbd2_might_wait_for_commit(journal);
265 write_lock(&journal->j_state_lock);
266 if (jbd2_log_space_left(journal) < jbd2_space_needed(journal))
267 __jbd2_log_wait_for_space(journal);
268 write_unlock(&journal->j_state_lock);
272 /* No reservation? We are done... */
276 needed = atomic_add_return(rsv_blocks, &journal->j_reserved_credits);
277 /* We allow at most half of a transaction to be reserved */
278 if (needed > journal->j_max_transaction_buffers / 2) {
279 sub_reserved_credits(journal, rsv_blocks);
280 atomic_sub(total, &t->t_outstanding_credits);
281 read_unlock(&journal->j_state_lock);
282 jbd2_might_wait_for_commit(journal);
283 wait_event(journal->j_wait_reserved,
284 atomic_read(&journal->j_reserved_credits) + rsv_blocks
285 <= journal->j_max_transaction_buffers / 2);
292 * start_this_handle: Given a handle, deal with any locking or stalling
293 * needed to make sure that there is enough journal space for the handle
294 * to begin. Attach the handle to a transaction and set up the
295 * transaction's buffer credits.
298 static int start_this_handle(journal_t *journal, handle_t *handle,
301 transaction_t *transaction, *new_transaction = NULL;
302 int blocks = handle->h_buffer_credits;
304 unsigned long ts = jiffies;
306 if (handle->h_rsv_handle)
307 rsv_blocks = handle->h_rsv_handle->h_buffer_credits;
310 * Limit the number of reserved credits to 1/2 of maximum transaction
311 * size and limit the number of total credits to not exceed maximum
312 * transaction size per operation.
314 if ((rsv_blocks > journal->j_max_transaction_buffers / 2) ||
315 (rsv_blocks + blocks > journal->j_max_transaction_buffers)) {
316 printk(KERN_ERR "JBD2: %s wants too many credits "
317 "credits:%d rsv_credits:%d max:%d\n",
318 current->comm, blocks, rsv_blocks,
319 journal->j_max_transaction_buffers);
325 if (!journal->j_running_transaction) {
327 * If __GFP_FS is not present, then we may be being called from
328 * inside the fs writeback layer, so we MUST NOT fail.
330 if ((gfp_mask & __GFP_FS) == 0)
331 gfp_mask |= __GFP_NOFAIL;
332 new_transaction = kmem_cache_zalloc(transaction_cache,
334 if (!new_transaction)
338 jbd_debug(3, "New handle %p going live.\n", handle);
341 * We need to hold j_state_lock until t_updates has been incremented,
342 * for proper journal barrier handling
345 read_lock(&journal->j_state_lock);
346 BUG_ON(journal->j_flags & JBD2_UNMOUNT);
347 if (is_journal_aborted(journal) ||
348 (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
349 read_unlock(&journal->j_state_lock);
350 jbd2_journal_free_transaction(new_transaction);
355 * Wait on the journal's transaction barrier if necessary. Specifically
356 * we allow reserved handles to proceed because otherwise commit could
357 * deadlock on page writeback not being able to complete.
359 if (!handle->h_reserved && journal->j_barrier_count) {
360 read_unlock(&journal->j_state_lock);
361 wait_event(journal->j_wait_transaction_locked,
362 journal->j_barrier_count == 0);
366 if (!journal->j_running_transaction) {
367 read_unlock(&journal->j_state_lock);
368 if (!new_transaction)
369 goto alloc_transaction;
370 write_lock(&journal->j_state_lock);
371 if (!journal->j_running_transaction &&
372 (handle->h_reserved || !journal->j_barrier_count)) {
373 jbd2_get_transaction(journal, new_transaction);
374 new_transaction = NULL;
376 write_unlock(&journal->j_state_lock);
380 transaction = journal->j_running_transaction;
382 if (!handle->h_reserved) {
383 /* We may have dropped j_state_lock - restart in that case */
384 if (add_transaction_credits(journal, blocks, rsv_blocks))
388 * We have handle reserved so we are allowed to join T_LOCKED
389 * transaction and we don't have to check for transaction size
390 * and journal space. But we still have to wait while running
391 * transaction is being switched to a committing one as it
392 * won't wait for any handles anymore.
394 if (transaction->t_state == T_SWITCH) {
395 wait_transaction_switching(journal);
398 sub_reserved_credits(journal, blocks);
399 handle->h_reserved = 0;
402 /* OK, account for the buffers that this operation expects to
403 * use and add the handle to the running transaction.
405 update_t_max_wait(transaction, ts);
406 handle->h_transaction = transaction;
407 handle->h_requested_credits = blocks;
408 handle->h_start_jiffies = jiffies;
409 atomic_inc(&transaction->t_updates);
410 atomic_inc(&transaction->t_handle_count);
411 jbd_debug(4, "Handle %p given %d credits (total %d, free %lu)\n",
413 atomic_read(&transaction->t_outstanding_credits),
414 jbd2_log_space_left(journal));
415 read_unlock(&journal->j_state_lock);
416 current->journal_info = handle;
418 rwsem_acquire_read(&journal->j_trans_commit_map, 0, 0, _THIS_IP_);
419 jbd2_journal_free_transaction(new_transaction);
421 * Ensure that no allocations done while the transaction is open are
422 * going to recurse back to the fs layer.
424 handle->saved_alloc_context = memalloc_nofs_save();
428 /* Allocate a new handle. This should probably be in a slab... */
429 static handle_t *new_handle(int nblocks)
431 handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
434 handle->h_buffer_credits = nblocks;
440 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int rsv_blocks,
441 gfp_t gfp_mask, unsigned int type,
442 unsigned int line_no)
444 handle_t *handle = journal_current_handle();
448 return ERR_PTR(-EROFS);
451 J_ASSERT(handle->h_transaction->t_journal == journal);
456 handle = new_handle(nblocks);
458 return ERR_PTR(-ENOMEM);
460 handle_t *rsv_handle;
462 rsv_handle = new_handle(rsv_blocks);
464 jbd2_free_handle(handle);
465 return ERR_PTR(-ENOMEM);
467 rsv_handle->h_reserved = 1;
468 rsv_handle->h_journal = journal;
469 handle->h_rsv_handle = rsv_handle;
472 err = start_this_handle(journal, handle, gfp_mask);
474 if (handle->h_rsv_handle)
475 jbd2_free_handle(handle->h_rsv_handle);
476 jbd2_free_handle(handle);
479 handle->h_type = type;
480 handle->h_line_no = line_no;
481 trace_jbd2_handle_start(journal->j_fs_dev->bd_dev,
482 handle->h_transaction->t_tid, type,
487 EXPORT_SYMBOL(jbd2__journal_start);
491 * handle_t *jbd2_journal_start() - Obtain a new handle.
492 * @journal: Journal to start transaction on.
493 * @nblocks: number of block buffer we might modify
495 * We make sure that the transaction can guarantee at least nblocks of
496 * modified buffers in the log. We block until the log can guarantee
497 * that much space. Additionally, if rsv_blocks > 0, we also create another
498 * handle with rsv_blocks reserved blocks in the journal. This handle is
499 * is stored in h_rsv_handle. It is not attached to any particular transaction
500 * and thus doesn't block transaction commit. If the caller uses this reserved
501 * handle, it has to set h_rsv_handle to NULL as otherwise jbd2_journal_stop()
502 * on the parent handle will dispose the reserved one. Reserved handle has to
503 * be converted to a normal handle using jbd2_journal_start_reserved() before
506 * Return a pointer to a newly allocated handle, or an ERR_PTR() value
509 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
511 return jbd2__journal_start(journal, nblocks, 0, GFP_NOFS, 0, 0);
513 EXPORT_SYMBOL(jbd2_journal_start);
515 void jbd2_journal_free_reserved(handle_t *handle)
517 journal_t *journal = handle->h_journal;
519 WARN_ON(!handle->h_reserved);
520 sub_reserved_credits(journal, handle->h_buffer_credits);
521 jbd2_free_handle(handle);
523 EXPORT_SYMBOL(jbd2_journal_free_reserved);
526 * int jbd2_journal_start_reserved() - start reserved handle
527 * @handle: handle to start
528 * @type: for handle statistics
529 * @line_no: for handle statistics
531 * Start handle that has been previously reserved with jbd2_journal_reserve().
532 * This attaches @handle to the running transaction (or creates one if there's
533 * not transaction running). Unlike jbd2_journal_start() this function cannot
534 * block on journal commit, checkpointing, or similar stuff. It can block on
535 * memory allocation or frozen journal though.
537 * Return 0 on success, non-zero on error - handle is freed in that case.
539 int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
540 unsigned int line_no)
542 journal_t *journal = handle->h_journal;
545 if (WARN_ON(!handle->h_reserved)) {
546 /* Someone passed in normal handle? Just stop it. */
547 jbd2_journal_stop(handle);
551 * Usefulness of mixing of reserved and unreserved handles is
552 * questionable. So far nobody seems to need it so just error out.
554 if (WARN_ON(current->journal_info)) {
555 jbd2_journal_free_reserved(handle);
559 handle->h_journal = NULL;
561 * GFP_NOFS is here because callers are likely from writeback or
562 * similarly constrained call sites
564 ret = start_this_handle(journal, handle, GFP_NOFS);
566 handle->h_journal = journal;
567 jbd2_journal_free_reserved(handle);
570 handle->h_type = type;
571 handle->h_line_no = line_no;
574 EXPORT_SYMBOL(jbd2_journal_start_reserved);
577 * int jbd2_journal_extend() - extend buffer credits.
578 * @handle: handle to 'extend'
579 * @nblocks: nr blocks to try to extend by.
581 * Some transactions, such as large extends and truncates, can be done
582 * atomically all at once or in several stages. The operation requests
583 * a credit for a number of buffer modifications in advance, but can
584 * extend its credit if it needs more.
586 * jbd2_journal_extend tries to give the running handle more buffer credits.
587 * It does not guarantee that allocation - this is a best-effort only.
588 * The calling process MUST be able to deal cleanly with a failure to
591 * Return 0 on success, non-zero on failure.
593 * return code < 0 implies an error
594 * return code > 0 implies normal transaction-full status.
596 int jbd2_journal_extend(handle_t *handle, int nblocks)
598 transaction_t *transaction = handle->h_transaction;
603 if (is_handle_aborted(handle))
605 journal = transaction->t_journal;
609 read_lock(&journal->j_state_lock);
611 /* Don't extend a locked-down transaction! */
612 if (transaction->t_state != T_RUNNING) {
613 jbd_debug(3, "denied handle %p %d blocks: "
614 "transaction not running\n", handle, nblocks);
618 spin_lock(&transaction->t_handle_lock);
619 wanted = atomic_add_return(nblocks,
620 &transaction->t_outstanding_credits);
622 if (wanted > journal->j_max_transaction_buffers) {
623 jbd_debug(3, "denied handle %p %d blocks: "
624 "transaction too large\n", handle, nblocks);
625 atomic_sub(nblocks, &transaction->t_outstanding_credits);
629 if (wanted + (wanted >> JBD2_CONTROL_BLOCKS_SHIFT) >
630 jbd2_log_space_left(journal)) {
631 jbd_debug(3, "denied handle %p %d blocks: "
632 "insufficient log space\n", handle, nblocks);
633 atomic_sub(nblocks, &transaction->t_outstanding_credits);
637 trace_jbd2_handle_extend(journal->j_fs_dev->bd_dev,
639 handle->h_type, handle->h_line_no,
640 handle->h_buffer_credits,
643 handle->h_buffer_credits += nblocks;
644 handle->h_requested_credits += nblocks;
647 jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
649 spin_unlock(&transaction->t_handle_lock);
651 read_unlock(&journal->j_state_lock);
657 * int jbd2_journal_restart() - restart a handle .
658 * @handle: handle to restart
659 * @nblocks: nr credits requested
660 * @gfp_mask: memory allocation flags (for start_this_handle)
662 * Restart a handle for a multi-transaction filesystem
665 * If the jbd2_journal_extend() call above fails to grant new buffer credits
666 * to a running handle, a call to jbd2_journal_restart will commit the
667 * handle's transaction so far and reattach the handle to a new
668 * transaction capable of guaranteeing the requested number of
669 * credits. We preserve reserved handle if there's any attached to the
672 int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
674 transaction_t *transaction = handle->h_transaction;
677 int need_to_start, ret;
679 /* If we've had an abort of any type, don't even think about
680 * actually doing the restart! */
681 if (is_handle_aborted(handle))
683 journal = transaction->t_journal;
686 * First unlink the handle from its current transaction, and start the
689 J_ASSERT(atomic_read(&transaction->t_updates) > 0);
690 J_ASSERT(journal_current_handle() == handle);
692 read_lock(&journal->j_state_lock);
693 spin_lock(&transaction->t_handle_lock);
694 atomic_sub(handle->h_buffer_credits,
695 &transaction->t_outstanding_credits);
696 if (handle->h_rsv_handle) {
697 sub_reserved_credits(journal,
698 handle->h_rsv_handle->h_buffer_credits);
700 if (atomic_dec_and_test(&transaction->t_updates))
701 wake_up(&journal->j_wait_updates);
702 tid = transaction->t_tid;
703 spin_unlock(&transaction->t_handle_lock);
704 handle->h_transaction = NULL;
705 current->journal_info = NULL;
707 jbd_debug(2, "restarting handle %p\n", handle);
708 need_to_start = !tid_geq(journal->j_commit_request, tid);
709 read_unlock(&journal->j_state_lock);
711 jbd2_log_start_commit(journal, tid);
713 rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_);
714 handle->h_buffer_credits = nblocks;
716 * Restore the original nofs context because the journal restart
717 * is basically the same thing as journal stop and start.
718 * start_this_handle will start a new nofs context.
720 memalloc_nofs_restore(handle->saved_alloc_context);
721 ret = start_this_handle(journal, handle, gfp_mask);
724 EXPORT_SYMBOL(jbd2__journal_restart);
727 int jbd2_journal_restart(handle_t *handle, int nblocks)
729 return jbd2__journal_restart(handle, nblocks, GFP_NOFS);
731 EXPORT_SYMBOL(jbd2_journal_restart);
734 * void jbd2_journal_lock_updates () - establish a transaction barrier.
735 * @journal: Journal to establish a barrier on.
737 * This locks out any further updates from being started, and blocks
738 * until all existing updates have completed, returning only once the
739 * journal is in a quiescent state with no updates running.
741 * The journal lock should not be held on entry.
743 void jbd2_journal_lock_updates(journal_t *journal)
747 jbd2_might_wait_for_commit(journal);
749 write_lock(&journal->j_state_lock);
750 ++journal->j_barrier_count;
752 /* Wait until there are no reserved handles */
753 if (atomic_read(&journal->j_reserved_credits)) {
754 write_unlock(&journal->j_state_lock);
755 wait_event(journal->j_wait_reserved,
756 atomic_read(&journal->j_reserved_credits) == 0);
757 write_lock(&journal->j_state_lock);
760 /* Wait until there are no running updates */
762 transaction_t *transaction = journal->j_running_transaction;
767 spin_lock(&transaction->t_handle_lock);
768 prepare_to_wait(&journal->j_wait_updates, &wait,
769 TASK_UNINTERRUPTIBLE);
770 if (!atomic_read(&transaction->t_updates)) {
771 spin_unlock(&transaction->t_handle_lock);
772 finish_wait(&journal->j_wait_updates, &wait);
775 spin_unlock(&transaction->t_handle_lock);
776 write_unlock(&journal->j_state_lock);
778 finish_wait(&journal->j_wait_updates, &wait);
779 write_lock(&journal->j_state_lock);
781 write_unlock(&journal->j_state_lock);
784 * We have now established a barrier against other normal updates, but
785 * we also need to barrier against other jbd2_journal_lock_updates() calls
786 * to make sure that we serialise special journal-locked operations
789 mutex_lock(&journal->j_barrier);
793 * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
794 * @journal: Journal to release the barrier on.
796 * Release a transaction barrier obtained with jbd2_journal_lock_updates().
798 * Should be called without the journal lock held.
800 void jbd2_journal_unlock_updates (journal_t *journal)
802 J_ASSERT(journal->j_barrier_count != 0);
804 mutex_unlock(&journal->j_barrier);
805 write_lock(&journal->j_state_lock);
806 --journal->j_barrier_count;
807 write_unlock(&journal->j_state_lock);
808 wake_up(&journal->j_wait_transaction_locked);
811 static void warn_dirty_buffer(struct buffer_head *bh)
814 "JBD2: Spotted dirty metadata buffer (dev = %pg, blocknr = %llu). "
815 "There's a risk of filesystem corruption in case of system "
817 bh->b_bdev, (unsigned long long)bh->b_blocknr);
820 /* Call t_frozen trigger and copy buffer data into jh->b_frozen_data. */
821 static void jbd2_freeze_jh_data(struct journal_head *jh)
826 struct buffer_head *bh = jh2bh(jh);
828 J_EXPECT_JH(jh, buffer_uptodate(bh), "Possible IO failure.\n");
830 offset = offset_in_page(bh->b_data);
831 source = kmap_atomic(page);
832 /* Fire data frozen trigger just before we copy the data */
833 jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers);
834 memcpy(jh->b_frozen_data, source + offset, bh->b_size);
835 kunmap_atomic(source);
838 * Now that the frozen data is saved off, we need to store any matching
841 jh->b_frozen_triggers = jh->b_triggers;
845 * If the buffer is already part of the current transaction, then there
846 * is nothing we need to do. If it is already part of a prior
847 * transaction which we are still committing to disk, then we need to
848 * make sure that we do not overwrite the old copy: we do copy-out to
849 * preserve the copy going to disk. We also account the buffer against
850 * the handle's metadata buffer credits (unless the buffer is already
851 * part of the transaction, that is).
855 do_get_write_access(handle_t *handle, struct journal_head *jh,
858 struct buffer_head *bh;
859 transaction_t *transaction = handle->h_transaction;
862 char *frozen_buffer = NULL;
863 unsigned long start_lock, time_lock;
865 if (is_handle_aborted(handle))
867 journal = transaction->t_journal;
869 jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
871 JBUFFER_TRACE(jh, "entry");
875 /* @@@ Need to check for errors here at some point. */
877 start_lock = jiffies;
879 jbd_lock_bh_state(bh);
881 /* If it takes too long to lock the buffer, trace it */
882 time_lock = jbd2_time_diff(start_lock, jiffies);
883 if (time_lock > HZ/10)
884 trace_jbd2_lock_buffer_stall(bh->b_bdev->bd_dev,
885 jiffies_to_msecs(time_lock));
887 /* We now hold the buffer lock so it is safe to query the buffer
888 * state. Is the buffer dirty?
890 * If so, there are two possibilities. The buffer may be
891 * non-journaled, and undergoing a quite legitimate writeback.
892 * Otherwise, it is journaled, and we don't expect dirty buffers
893 * in that state (the buffers should be marked JBD_Dirty
894 * instead.) So either the IO is being done under our own
895 * control and this is a bug, or it's a third party IO such as
896 * dump(8) (which may leave the buffer scheduled for read ---
897 * ie. locked but not dirty) or tune2fs (which may actually have
898 * the buffer dirtied, ugh.) */
900 if (buffer_dirty(bh)) {
902 * First question: is this buffer already part of the current
903 * transaction or the existing committing transaction?
905 if (jh->b_transaction) {
907 jh->b_transaction == transaction ||
909 journal->j_committing_transaction);
910 if (jh->b_next_transaction)
911 J_ASSERT_JH(jh, jh->b_next_transaction ==
913 warn_dirty_buffer(bh);
916 * In any case we need to clean the dirty flag and we must
917 * do it under the buffer lock to be sure we don't race
918 * with running write-out.
920 JBUFFER_TRACE(jh, "Journalling dirty buffer");
921 clear_buffer_dirty(bh);
922 set_buffer_jbddirty(bh);
928 if (is_handle_aborted(handle)) {
929 jbd_unlock_bh_state(bh);
935 * The buffer is already part of this transaction if b_transaction or
936 * b_next_transaction points to it
938 if (jh->b_transaction == transaction ||
939 jh->b_next_transaction == transaction)
943 * this is the first time this transaction is touching this buffer,
944 * reset the modified flag
949 * If the buffer is not journaled right now, we need to make sure it
950 * doesn't get written to disk before the caller actually commits the
953 if (!jh->b_transaction) {
954 JBUFFER_TRACE(jh, "no transaction");
955 J_ASSERT_JH(jh, !jh->b_next_transaction);
956 JBUFFER_TRACE(jh, "file as BJ_Reserved");
958 * Make sure all stores to jh (b_modified, b_frozen_data) are
959 * visible before attaching it to the running transaction.
960 * Paired with barrier in jbd2_write_access_granted()
963 spin_lock(&journal->j_list_lock);
964 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
965 spin_unlock(&journal->j_list_lock);
969 * If there is already a copy-out version of this buffer, then we don't
970 * need to make another one
972 if (jh->b_frozen_data) {
973 JBUFFER_TRACE(jh, "has frozen data");
974 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
978 JBUFFER_TRACE(jh, "owned by older transaction");
979 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
980 J_ASSERT_JH(jh, jh->b_transaction == journal->j_committing_transaction);
983 * There is one case we have to be very careful about. If the
984 * committing transaction is currently writing this buffer out to disk
985 * and has NOT made a copy-out, then we cannot modify the buffer
986 * contents at all right now. The essence of copy-out is that it is
987 * the extra copy, not the primary copy, which gets journaled. If the
988 * primary copy is already going to disk then we cannot do copy-out
991 if (buffer_shadow(bh)) {
992 JBUFFER_TRACE(jh, "on shadow: sleep");
993 jbd_unlock_bh_state(bh);
994 wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE);
999 * Only do the copy if the currently-owning transaction still needs it.
1000 * If buffer isn't on BJ_Metadata list, the committing transaction is
1001 * past that stage (here we use the fact that BH_Shadow is set under
1002 * bh_state lock together with refiling to BJ_Shadow list and at this
1003 * point we know the buffer doesn't have BH_Shadow set).
1005 * Subtle point, though: if this is a get_undo_access, then we will be
1006 * relying on the frozen_data to contain the new value of the
1007 * committed_data record after the transaction, so we HAVE to force the
1008 * frozen_data copy in that case.
1010 if (jh->b_jlist == BJ_Metadata || force_copy) {
1011 JBUFFER_TRACE(jh, "generate frozen data");
1012 if (!frozen_buffer) {
1013 JBUFFER_TRACE(jh, "allocate memory for buffer");
1014 jbd_unlock_bh_state(bh);
1015 frozen_buffer = jbd2_alloc(jh2bh(jh)->b_size,
1016 GFP_NOFS | __GFP_NOFAIL);
1019 jh->b_frozen_data = frozen_buffer;
1020 frozen_buffer = NULL;
1021 jbd2_freeze_jh_data(jh);
1025 * Make sure all stores to jh (b_modified, b_frozen_data) are visible
1026 * before attaching it to the running transaction. Paired with barrier
1027 * in jbd2_write_access_granted()
1030 jh->b_next_transaction = transaction;
1033 jbd_unlock_bh_state(bh);
1036 * If we are about to journal a buffer, then any revoke pending on it is
1039 jbd2_journal_cancel_revoke(handle, jh);
1042 if (unlikely(frozen_buffer)) /* It's usually NULL */
1043 jbd2_free(frozen_buffer, bh->b_size);
1045 JBUFFER_TRACE(jh, "exit");
1049 /* Fast check whether buffer is already attached to the required transaction */
1050 static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
1053 struct journal_head *jh;
1056 /* Dirty buffers require special handling... */
1057 if (buffer_dirty(bh))
1061 * RCU protects us from dereferencing freed pages. So the checks we do
1062 * are guaranteed not to oops. However the jh slab object can get freed
1063 * & reallocated while we work with it. So we have to be careful. When
1064 * we see jh attached to the running transaction, we know it must stay
1065 * so until the transaction is committed. Thus jh won't be freed and
1066 * will be attached to the same bh while we run. However it can
1067 * happen jh gets freed, reallocated, and attached to the transaction
1068 * just after we get pointer to it from bh. So we have to be careful
1069 * and recheck jh still belongs to our bh before we return success.
1072 if (!buffer_jbd(bh))
1074 /* This should be bh2jh() but that doesn't work with inline functions */
1075 jh = READ_ONCE(bh->b_private);
1078 /* For undo access buffer must have data copied */
1079 if (undo && !jh->b_committed_data)
1081 if (jh->b_transaction != handle->h_transaction &&
1082 jh->b_next_transaction != handle->h_transaction)
1085 * There are two reasons for the barrier here:
1086 * 1) Make sure to fetch b_bh after we did previous checks so that we
1087 * detect when jh went through free, realloc, attach to transaction
1088 * while we were checking. Paired with implicit barrier in that path.
1089 * 2) So that access to bh done after jbd2_write_access_granted()
1090 * doesn't get reordered and see inconsistent state of concurrent
1091 * do_get_write_access().
1094 if (unlikely(jh->b_bh != bh))
1103 * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
1104 * @handle: transaction to add buffer modifications to
1105 * @bh: bh to be used for metadata writes
1107 * Returns: error code or 0 on success.
1109 * In full data journalling mode the buffer may be of type BJ_AsyncData,
1110 * because we're ``write()ing`` a buffer which is also part of a shared mapping.
1113 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
1115 struct journal_head *jh;
1118 if (jbd2_write_access_granted(handle, bh, false))
1121 jh = jbd2_journal_add_journal_head(bh);
1122 /* We do not want to get caught playing with fields which the
1123 * log thread also manipulates. Make sure that the buffer
1124 * completes any outstanding IO before proceeding. */
1125 rc = do_get_write_access(handle, jh, 0);
1126 jbd2_journal_put_journal_head(jh);
1132 * When the user wants to journal a newly created buffer_head
1133 * (ie. getblk() returned a new buffer and we are going to populate it
1134 * manually rather than reading off disk), then we need to keep the
1135 * buffer_head locked until it has been completely filled with new
1136 * data. In this case, we should be able to make the assertion that
1137 * the bh is not already part of an existing transaction.
1139 * The buffer should already be locked by the caller by this point.
1140 * There is no lock ranking violation: it was a newly created,
1141 * unlocked buffer beforehand. */
1144 * int jbd2_journal_get_create_access () - notify intent to use newly created bh
1145 * @handle: transaction to new buffer to
1148 * Call this if you create a new bh.
1150 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
1152 transaction_t *transaction = handle->h_transaction;
1154 struct journal_head *jh = jbd2_journal_add_journal_head(bh);
1157 jbd_debug(5, "journal_head %p\n", jh);
1159 if (is_handle_aborted(handle))
1161 journal = transaction->t_journal;
1164 JBUFFER_TRACE(jh, "entry");
1166 * The buffer may already belong to this transaction due to pre-zeroing
1167 * in the filesystem's new_block code. It may also be on the previous,
1168 * committing transaction's lists, but it HAS to be in Forget state in
1169 * that case: the transaction must have deleted the buffer for it to be
1172 jbd_lock_bh_state(bh);
1173 J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
1174 jh->b_transaction == NULL ||
1175 (jh->b_transaction == journal->j_committing_transaction &&
1176 jh->b_jlist == BJ_Forget)));
1178 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
1179 J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
1181 if (jh->b_transaction == NULL) {
1183 * Previous jbd2_journal_forget() could have left the buffer
1184 * with jbddirty bit set because it was being committed. When
1185 * the commit finished, we've filed the buffer for
1186 * checkpointing and marked it dirty. Now we are reallocating
1187 * the buffer so the transaction freeing it must have
1188 * committed and so it's safe to clear the dirty bit.
1190 clear_buffer_dirty(jh2bh(jh));
1191 /* first access by this transaction */
1194 JBUFFER_TRACE(jh, "file as BJ_Reserved");
1195 spin_lock(&journal->j_list_lock);
1196 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
1197 spin_unlock(&journal->j_list_lock);
1198 } else if (jh->b_transaction == journal->j_committing_transaction) {
1199 /* first access by this transaction */
1202 JBUFFER_TRACE(jh, "set next transaction");
1203 spin_lock(&journal->j_list_lock);
1204 jh->b_next_transaction = transaction;
1205 spin_unlock(&journal->j_list_lock);
1207 jbd_unlock_bh_state(bh);
1210 * akpm: I added this. ext3_alloc_branch can pick up new indirect
1211 * blocks which contain freed but then revoked metadata. We need
1212 * to cancel the revoke in case we end up freeing it yet again
1213 * and the reallocating as data - this would cause a second revoke,
1214 * which hits an assertion error.
1216 JBUFFER_TRACE(jh, "cancelling revoke");
1217 jbd2_journal_cancel_revoke(handle, jh);
1219 jbd2_journal_put_journal_head(jh);
1224 * int jbd2_journal_get_undo_access() - Notify intent to modify metadata with
1225 * non-rewindable consequences
1226 * @handle: transaction
1227 * @bh: buffer to undo
1229 * Sometimes there is a need to distinguish between metadata which has
1230 * been committed to disk and that which has not. The ext3fs code uses
1231 * this for freeing and allocating space, we have to make sure that we
1232 * do not reuse freed space until the deallocation has been committed,
1233 * since if we overwrote that space we would make the delete
1234 * un-rewindable in case of a crash.
1236 * To deal with that, jbd2_journal_get_undo_access requests write access to a
1237 * buffer for parts of non-rewindable operations such as delete
1238 * operations on the bitmaps. The journaling code must keep a copy of
1239 * the buffer's contents prior to the undo_access call until such time
1240 * as we know that the buffer has definitely been committed to disk.
1242 * We never need to know which transaction the committed data is part
1243 * of, buffers touched here are guaranteed to be dirtied later and so
1244 * will be committed to a new transaction in due course, at which point
1245 * we can discard the old committed data pointer.
1247 * Returns error number or 0 on success.
1249 int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
1252 struct journal_head *jh;
1253 char *committed_data = NULL;
1255 JBUFFER_TRACE(jh, "entry");
1256 if (jbd2_write_access_granted(handle, bh, true))
1259 jh = jbd2_journal_add_journal_head(bh);
1261 * Do this first --- it can drop the journal lock, so we want to
1262 * make sure that obtaining the committed_data is done
1263 * atomically wrt. completion of any outstanding commits.
1265 err = do_get_write_access(handle, jh, 1);
1270 if (!jh->b_committed_data)
1271 committed_data = jbd2_alloc(jh2bh(jh)->b_size,
1272 GFP_NOFS|__GFP_NOFAIL);
1274 jbd_lock_bh_state(bh);
1275 if (!jh->b_committed_data) {
1276 /* Copy out the current buffer contents into the
1277 * preserved, committed copy. */
1278 JBUFFER_TRACE(jh, "generate b_committed data");
1279 if (!committed_data) {
1280 jbd_unlock_bh_state(bh);
1284 jh->b_committed_data = committed_data;
1285 committed_data = NULL;
1286 memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
1288 jbd_unlock_bh_state(bh);
1290 jbd2_journal_put_journal_head(jh);
1291 if (unlikely(committed_data))
1292 jbd2_free(committed_data, bh->b_size);
1297 * void jbd2_journal_set_triggers() - Add triggers for commit writeout
1298 * @bh: buffer to trigger on
1299 * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
1301 * Set any triggers on this journal_head. This is always safe, because
1302 * triggers for a committing buffer will be saved off, and triggers for
1303 * a running transaction will match the buffer in that transaction.
1305 * Call with NULL to clear the triggers.
1307 void jbd2_journal_set_triggers(struct buffer_head *bh,
1308 struct jbd2_buffer_trigger_type *type)
1310 struct journal_head *jh = jbd2_journal_grab_journal_head(bh);
1314 jh->b_triggers = type;
1315 jbd2_journal_put_journal_head(jh);
1318 void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
1319 struct jbd2_buffer_trigger_type *triggers)
1321 struct buffer_head *bh = jh2bh(jh);
1323 if (!triggers || !triggers->t_frozen)
1326 triggers->t_frozen(triggers, bh, mapped_data, bh->b_size);
1329 void jbd2_buffer_abort_trigger(struct journal_head *jh,
1330 struct jbd2_buffer_trigger_type *triggers)
1332 if (!triggers || !triggers->t_abort)
1335 triggers->t_abort(triggers, jh2bh(jh));
1339 * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
1340 * @handle: transaction to add buffer to.
1341 * @bh: buffer to mark
1343 * mark dirty metadata which needs to be journaled as part of the current
1346 * The buffer must have previously had jbd2_journal_get_write_access()
1347 * called so that it has a valid journal_head attached to the buffer
1350 * The buffer is placed on the transaction's metadata list and is marked
1351 * as belonging to the transaction.
1353 * Returns error number or 0 on success.
1355 * Special care needs to be taken if the buffer already belongs to the
1356 * current committing transaction (in which case we should have frozen
1357 * data present for that commit). In that case, we don't relink the
1358 * buffer: that only gets done when the old transaction finally
1359 * completes its commit.
1361 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1363 transaction_t *transaction = handle->h_transaction;
1365 struct journal_head *jh;
1368 if (is_handle_aborted(handle))
1370 if (!buffer_jbd(bh)) {
1375 * We don't grab jh reference here since the buffer must be part
1376 * of the running transaction.
1380 * This and the following assertions are unreliable since we may see jh
1381 * in inconsistent state unless we grab bh_state lock. But this is
1382 * crucial to catch bugs so let's do a reliable check until the
1383 * lockless handling is fully proven.
1385 if (jh->b_transaction != transaction &&
1386 jh->b_next_transaction != transaction) {
1387 jbd_lock_bh_state(bh);
1388 J_ASSERT_JH(jh, jh->b_transaction == transaction ||
1389 jh->b_next_transaction == transaction);
1390 jbd_unlock_bh_state(bh);
1392 if (jh->b_modified == 1) {
1393 /* If it's in our transaction it must be in BJ_Metadata list. */
1394 if (jh->b_transaction == transaction &&
1395 jh->b_jlist != BJ_Metadata) {
1396 jbd_lock_bh_state(bh);
1397 if (jh->b_transaction == transaction &&
1398 jh->b_jlist != BJ_Metadata)
1399 pr_err("JBD2: assertion failure: h_type=%u "
1400 "h_line_no=%u block_no=%llu jlist=%u\n",
1401 handle->h_type, handle->h_line_no,
1402 (unsigned long long) bh->b_blocknr,
1404 J_ASSERT_JH(jh, jh->b_transaction != transaction ||
1405 jh->b_jlist == BJ_Metadata);
1406 jbd_unlock_bh_state(bh);
1411 journal = transaction->t_journal;
1412 jbd_debug(5, "journal_head %p\n", jh);
1413 JBUFFER_TRACE(jh, "entry");
1415 jbd_lock_bh_state(bh);
1417 if (jh->b_modified == 0) {
1419 * This buffer's got modified and becoming part
1420 * of the transaction. This needs to be done
1421 * once a transaction -bzzz
1423 if (handle->h_buffer_credits <= 0) {
1428 handle->h_buffer_credits--;
1432 * fastpath, to avoid expensive locking. If this buffer is already
1433 * on the running transaction's metadata list there is nothing to do.
1434 * Nobody can take it off again because there is a handle open.
1435 * I _think_ we're OK here with SMP barriers - a mistaken decision will
1436 * result in this test being false, so we go in and take the locks.
1438 if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
1439 JBUFFER_TRACE(jh, "fastpath");
1440 if (unlikely(jh->b_transaction !=
1441 journal->j_running_transaction)) {
1442 printk(KERN_ERR "JBD2: %s: "
1443 "jh->b_transaction (%llu, %p, %u) != "
1444 "journal->j_running_transaction (%p, %u)\n",
1446 (unsigned long long) bh->b_blocknr,
1448 jh->b_transaction ? jh->b_transaction->t_tid : 0,
1449 journal->j_running_transaction,
1450 journal->j_running_transaction ?
1451 journal->j_running_transaction->t_tid : 0);
1457 set_buffer_jbddirty(bh);
1460 * Metadata already on the current transaction list doesn't
1461 * need to be filed. Metadata on another transaction's list must
1462 * be committing, and will be refiled once the commit completes:
1463 * leave it alone for now.
1465 if (jh->b_transaction != transaction) {
1466 JBUFFER_TRACE(jh, "already on other transaction");
1467 if (unlikely(((jh->b_transaction !=
1468 journal->j_committing_transaction)) ||
1469 (jh->b_next_transaction != transaction))) {
1470 printk(KERN_ERR "jbd2_journal_dirty_metadata: %s: "
1471 "bad jh for block %llu: "
1472 "transaction (%p, %u), "
1473 "jh->b_transaction (%p, %u), "
1474 "jh->b_next_transaction (%p, %u), jlist %u\n",
1476 (unsigned long long) bh->b_blocknr,
1477 transaction, transaction->t_tid,
1480 jh->b_transaction->t_tid : 0,
1481 jh->b_next_transaction,
1482 jh->b_next_transaction ?
1483 jh->b_next_transaction->t_tid : 0,
1488 /* And this case is illegal: we can't reuse another
1489 * transaction's data buffer, ever. */
1493 /* That test should have eliminated the following case: */
1494 J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
1496 JBUFFER_TRACE(jh, "file as BJ_Metadata");
1497 spin_lock(&journal->j_list_lock);
1498 __jbd2_journal_file_buffer(jh, transaction, BJ_Metadata);
1499 spin_unlock(&journal->j_list_lock);
1501 jbd_unlock_bh_state(bh);
1503 JBUFFER_TRACE(jh, "exit");
1508 * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1509 * @handle: transaction handle
1510 * @bh: bh to 'forget'
1512 * We can only do the bforget if there are no commits pending against the
1513 * buffer. If the buffer is dirty in the current running transaction we
1514 * can safely unlink it.
1516 * bh may not be a journalled buffer at all - it may be a non-JBD
1517 * buffer which came off the hashtable. Check for this.
1519 * Decrements bh->b_count by one.
1521 * Allow this call even if the handle has aborted --- it may be part of
1522 * the caller's cleanup after an abort.
1524 int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
1526 transaction_t *transaction = handle->h_transaction;
1528 struct journal_head *jh;
1529 int drop_reserve = 0;
1531 int was_modified = 0;
1533 if (is_handle_aborted(handle))
1535 journal = transaction->t_journal;
1537 BUFFER_TRACE(bh, "entry");
1539 jbd_lock_bh_state(bh);
1541 if (!buffer_jbd(bh))
1545 /* Critical error: attempting to delete a bitmap buffer, maybe?
1546 * Don't do any jbd operations, and return an error. */
1547 if (!J_EXPECT_JH(jh, !jh->b_committed_data,
1548 "inconsistent data on disk")) {
1553 /* keep track of whether or not this transaction modified us */
1554 was_modified = jh->b_modified;
1557 * The buffer's going from the transaction, we must drop
1558 * all references -bzzz
1562 if (jh->b_transaction == transaction) {
1563 J_ASSERT_JH(jh, !jh->b_frozen_data);
1565 /* If we are forgetting a buffer which is already part
1566 * of this transaction, then we can just drop it from
1567 * the transaction immediately. */
1568 clear_buffer_dirty(bh);
1569 clear_buffer_jbddirty(bh);
1571 JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
1574 * we only want to drop a reference if this transaction
1575 * modified the buffer
1581 * We are no longer going to journal this buffer.
1582 * However, the commit of this transaction is still
1583 * important to the buffer: the delete that we are now
1584 * processing might obsolete an old log entry, so by
1585 * committing, we can satisfy the buffer's checkpoint.
1587 * So, if we have a checkpoint on the buffer, we should
1588 * now refile the buffer on our BJ_Forget list so that
1589 * we know to remove the checkpoint after we commit.
1592 spin_lock(&journal->j_list_lock);
1593 if (jh->b_cp_transaction) {
1594 __jbd2_journal_temp_unlink_buffer(jh);
1595 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1597 __jbd2_journal_unfile_buffer(jh);
1598 if (!buffer_jbd(bh)) {
1599 spin_unlock(&journal->j_list_lock);
1600 jbd_unlock_bh_state(bh);
1605 spin_unlock(&journal->j_list_lock);
1606 } else if (jh->b_transaction) {
1607 J_ASSERT_JH(jh, (jh->b_transaction ==
1608 journal->j_committing_transaction));
1609 /* However, if the buffer is still owned by a prior
1610 * (committing) transaction, we can't drop it yet... */
1611 JBUFFER_TRACE(jh, "belongs to older transaction");
1612 /* ... but we CAN drop it from the new transaction if we
1613 * have also modified it since the original commit. */
1615 if (jh->b_next_transaction) {
1616 J_ASSERT(jh->b_next_transaction == transaction);
1617 spin_lock(&journal->j_list_lock);
1618 jh->b_next_transaction = NULL;
1619 spin_unlock(&journal->j_list_lock);
1622 * only drop a reference if this transaction modified
1631 jbd_unlock_bh_state(bh);
1635 /* no need to reserve log space for this block -bzzz */
1636 handle->h_buffer_credits++;
1642 * int jbd2_journal_stop() - complete a transaction
1643 * @handle: transaction to complete.
1645 * All done for a particular handle.
1647 * There is not much action needed here. We just return any remaining
1648 * buffer credits to the transaction and remove the handle. The only
1649 * complication is that we need to start a commit operation if the
1650 * filesystem is marked for synchronous update.
1652 * jbd2_journal_stop itself will not usually return an error, but it may
1653 * do so in unusual circumstances. In particular, expect it to
1654 * return -EIO if a jbd2_journal_abort has been executed since the
1655 * transaction began.
1657 int jbd2_journal_stop(handle_t *handle)
1659 transaction_t *transaction = handle->h_transaction;
1661 int err = 0, wait_for_commit = 0;
1667 * Handle is already detached from the transaction so
1668 * there is nothing to do other than decrease a refcount,
1669 * or free the handle if refcount drops to zero
1671 if (--handle->h_ref > 0) {
1672 jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
1676 if (handle->h_rsv_handle)
1677 jbd2_free_handle(handle->h_rsv_handle);
1681 journal = transaction->t_journal;
1683 J_ASSERT(journal_current_handle() == handle);
1685 if (is_handle_aborted(handle))
1688 J_ASSERT(atomic_read(&transaction->t_updates) > 0);
1690 if (--handle->h_ref > 0) {
1691 jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
1696 jbd_debug(4, "Handle %p going down\n", handle);
1697 trace_jbd2_handle_stats(journal->j_fs_dev->bd_dev,
1699 handle->h_type, handle->h_line_no,
1700 jiffies - handle->h_start_jiffies,
1701 handle->h_sync, handle->h_requested_credits,
1702 (handle->h_requested_credits -
1703 handle->h_buffer_credits));
1706 * Implement synchronous transaction batching. If the handle
1707 * was synchronous, don't force a commit immediately. Let's
1708 * yield and let another thread piggyback onto this
1709 * transaction. Keep doing that while new threads continue to
1710 * arrive. It doesn't cost much - we're about to run a commit
1711 * and sleep on IO anyway. Speeds up many-threaded, many-dir
1712 * operations by 30x or more...
1714 * We try and optimize the sleep time against what the
1715 * underlying disk can do, instead of having a static sleep
1716 * time. This is useful for the case where our storage is so
1717 * fast that it is more optimal to go ahead and force a flush
1718 * and wait for the transaction to be committed than it is to
1719 * wait for an arbitrary amount of time for new writers to
1720 * join the transaction. We achieve this by measuring how
1721 * long it takes to commit a transaction, and compare it with
1722 * how long this transaction has been running, and if run time
1723 * < commit time then we sleep for the delta and commit. This
1724 * greatly helps super fast disks that would see slowdowns as
1725 * more threads started doing fsyncs.
1727 * But don't do this if this process was the most recent one
1728 * to perform a synchronous write. We do this to detect the
1729 * case where a single process is doing a stream of sync
1730 * writes. No point in waiting for joiners in that case.
1732 * Setting max_batch_time to 0 disables this completely.
1735 if (handle->h_sync && journal->j_last_sync_writer != pid &&
1736 journal->j_max_batch_time) {
1737 u64 commit_time, trans_time;
1739 journal->j_last_sync_writer = pid;
1741 read_lock(&journal->j_state_lock);
1742 commit_time = journal->j_average_commit_time;
1743 read_unlock(&journal->j_state_lock);
1745 trans_time = ktime_to_ns(ktime_sub(ktime_get(),
1746 transaction->t_start_time));
1748 commit_time = max_t(u64, commit_time,
1749 1000*journal->j_min_batch_time);
1750 commit_time = min_t(u64, commit_time,
1751 1000*journal->j_max_batch_time);
1753 if (trans_time < commit_time) {
1754 ktime_t expires = ktime_add_ns(ktime_get(),
1756 set_current_state(TASK_UNINTERRUPTIBLE);
1757 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1762 transaction->t_synchronous_commit = 1;
1763 current->journal_info = NULL;
1764 atomic_sub(handle->h_buffer_credits,
1765 &transaction->t_outstanding_credits);
1768 * If the handle is marked SYNC, we need to set another commit
1769 * going! We also want to force a commit if the current
1770 * transaction is occupying too much of the log, or if the
1771 * transaction is too old now.
1773 if (handle->h_sync ||
1774 (atomic_read(&transaction->t_outstanding_credits) >
1775 journal->j_max_transaction_buffers) ||
1776 time_after_eq(jiffies, transaction->t_expires)) {
1777 /* Do this even for aborted journals: an abort still
1778 * completes the commit thread, it just doesn't write
1779 * anything to disk. */
1781 jbd_debug(2, "transaction too old, requesting commit for "
1782 "handle %p\n", handle);
1783 /* This is non-blocking */
1784 jbd2_log_start_commit(journal, transaction->t_tid);
1787 * Special case: JBD2_SYNC synchronous updates require us
1788 * to wait for the commit to complete.
1790 if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1791 wait_for_commit = 1;
1795 * Once we drop t_updates, if it goes to zero the transaction
1796 * could start committing on us and eventually disappear. So
1797 * once we do this, we must not dereference transaction
1800 tid = transaction->t_tid;
1801 if (atomic_dec_and_test(&transaction->t_updates)) {
1802 wake_up(&journal->j_wait_updates);
1803 if (journal->j_barrier_count)
1804 wake_up(&journal->j_wait_transaction_locked);
1807 rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_);
1809 if (wait_for_commit)
1810 err = jbd2_log_wait_commit(journal, tid);
1812 if (handle->h_rsv_handle)
1813 jbd2_journal_free_reserved(handle->h_rsv_handle);
1816 * Scope of the GFP_NOFS context is over here and so we can restore the
1817 * original alloc context.
1819 memalloc_nofs_restore(handle->saved_alloc_context);
1820 jbd2_free_handle(handle);
1826 * List management code snippets: various functions for manipulating the
1827 * transaction buffer lists.
1832 * Append a buffer to a transaction list, given the transaction's list head
1835 * j_list_lock is held.
1837 * jbd_lock_bh_state(jh2bh(jh)) is held.
1841 __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1844 jh->b_tnext = jh->b_tprev = jh;
1847 /* Insert at the tail of the list to preserve order */
1848 struct journal_head *first = *list, *last = first->b_tprev;
1850 jh->b_tnext = first;
1851 last->b_tnext = first->b_tprev = jh;
1856 * Remove a buffer from a transaction list, given the transaction's list
1859 * Called with j_list_lock held, and the journal may not be locked.
1861 * jbd_lock_bh_state(jh2bh(jh)) is held.
1865 __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
1868 *list = jh->b_tnext;
1872 jh->b_tprev->b_tnext = jh->b_tnext;
1873 jh->b_tnext->b_tprev = jh->b_tprev;
1877 * Remove a buffer from the appropriate transaction list.
1879 * Note that this function can *change* the value of
1880 * bh->b_transaction->t_buffers, t_forget, t_shadow_list, t_log_list or
1881 * t_reserved_list. If the caller is holding onto a copy of one of these
1882 * pointers, it could go bad. Generally the caller needs to re-read the
1883 * pointer from the transaction_t.
1885 * Called under j_list_lock.
1887 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
1889 struct journal_head **list = NULL;
1890 transaction_t *transaction;
1891 struct buffer_head *bh = jh2bh(jh);
1893 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1894 transaction = jh->b_transaction;
1896 assert_spin_locked(&transaction->t_journal->j_list_lock);
1898 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
1899 if (jh->b_jlist != BJ_None)
1900 J_ASSERT_JH(jh, transaction != NULL);
1902 switch (jh->b_jlist) {
1906 transaction->t_nr_buffers--;
1907 J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
1908 list = &transaction->t_buffers;
1911 list = &transaction->t_forget;
1914 list = &transaction->t_shadow_list;
1917 list = &transaction->t_reserved_list;
1921 __blist_del_buffer(list, jh);
1922 jh->b_jlist = BJ_None;
1923 if (transaction && is_journal_aborted(transaction->t_journal))
1924 clear_buffer_jbddirty(bh);
1925 else if (test_clear_buffer_jbddirty(bh))
1926 mark_buffer_dirty(bh); /* Expose it to the VM */
1930 * Remove buffer from all transactions.
1932 * Called with bh_state lock and j_list_lock
1934 * jh and bh may be already freed when this function returns.
1936 static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
1938 __jbd2_journal_temp_unlink_buffer(jh);
1939 jh->b_transaction = NULL;
1940 jbd2_journal_put_journal_head(jh);
1943 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
1945 struct buffer_head *bh = jh2bh(jh);
1947 /* Get reference so that buffer cannot be freed before we unlock it */
1949 jbd_lock_bh_state(bh);
1950 spin_lock(&journal->j_list_lock);
1951 __jbd2_journal_unfile_buffer(jh);
1952 spin_unlock(&journal->j_list_lock);
1953 jbd_unlock_bh_state(bh);
1958 * Called from jbd2_journal_try_to_free_buffers().
1960 * Called under jbd_lock_bh_state(bh)
1963 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
1965 struct journal_head *jh;
1969 if (buffer_locked(bh) || buffer_dirty(bh))
1972 if (jh->b_next_transaction != NULL || jh->b_transaction != NULL)
1975 spin_lock(&journal->j_list_lock);
1976 if (jh->b_cp_transaction != NULL) {
1977 /* written-back checkpointed metadata buffer */
1978 JBUFFER_TRACE(jh, "remove from checkpoint list");
1979 __jbd2_journal_remove_checkpoint(jh);
1981 spin_unlock(&journal->j_list_lock);
1987 * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
1988 * @journal: journal for operation
1989 * @page: to try and free
1990 * @gfp_mask: we use the mask to detect how hard should we try to release
1991 * buffers. If __GFP_DIRECT_RECLAIM and __GFP_FS is set, we wait for commit
1992 * code to release the buffers.
1995 * For all the buffers on this page,
1996 * if they are fully written out ordered data, move them onto BUF_CLEAN
1997 * so try_to_free_buffers() can reap them.
1999 * This function returns non-zero if we wish try_to_free_buffers()
2000 * to be called. We do this if the page is releasable by try_to_free_buffers().
2001 * We also do it if the page has locked or dirty buffers and the caller wants
2002 * us to perform sync or async writeout.
2004 * This complicates JBD locking somewhat. We aren't protected by the
2005 * BKL here. We wish to remove the buffer from its committing or
2006 * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
2008 * This may *change* the value of transaction_t->t_datalist, so anyone
2009 * who looks at t_datalist needs to lock against this function.
2011 * Even worse, someone may be doing a jbd2_journal_dirty_data on this
2012 * buffer. So we need to lock against that. jbd2_journal_dirty_data()
2013 * will come out of the lock with the buffer dirty, which makes it
2014 * ineligible for release here.
2016 * Who else is affected by this? hmm... Really the only contender
2017 * is do_get_write_access() - it could be looking at the buffer while
2018 * journal_try_to_free_buffer() is changing its state. But that
2019 * cannot happen because we never reallocate freed data as metadata
2020 * while the data is part of a transaction. Yes?
2022 * Return 0 on failure, 1 on success
2024 int jbd2_journal_try_to_free_buffers(journal_t *journal,
2025 struct page *page, gfp_t gfp_mask)
2027 struct buffer_head *head;
2028 struct buffer_head *bh;
2031 J_ASSERT(PageLocked(page));
2033 head = page_buffers(page);
2036 struct journal_head *jh;
2039 * We take our own ref against the journal_head here to avoid
2040 * having to add tons of locking around each instance of
2041 * jbd2_journal_put_journal_head().
2043 jh = jbd2_journal_grab_journal_head(bh);
2047 jbd_lock_bh_state(bh);
2048 __journal_try_to_free_buffer(journal, bh);
2049 jbd2_journal_put_journal_head(jh);
2050 jbd_unlock_bh_state(bh);
2053 } while ((bh = bh->b_this_page) != head);
2055 ret = try_to_free_buffers(page);
2062 * This buffer is no longer needed. If it is on an older transaction's
2063 * checkpoint list we need to record it on this transaction's forget list
2064 * to pin this buffer (and hence its checkpointing transaction) down until
2065 * this transaction commits. If the buffer isn't on a checkpoint list, we
2067 * Returns non-zero if JBD no longer has an interest in the buffer.
2069 * Called under j_list_lock.
2071 * Called under jbd_lock_bh_state(bh).
2073 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
2076 struct buffer_head *bh = jh2bh(jh);
2078 if (jh->b_cp_transaction) {
2079 JBUFFER_TRACE(jh, "on running+cp transaction");
2080 __jbd2_journal_temp_unlink_buffer(jh);
2082 * We don't want to write the buffer anymore, clear the
2083 * bit so that we don't confuse checks in
2084 * __journal_file_buffer
2086 clear_buffer_dirty(bh);
2087 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
2090 JBUFFER_TRACE(jh, "on running transaction");
2091 __jbd2_journal_unfile_buffer(jh);
2097 * jbd2_journal_invalidatepage
2099 * This code is tricky. It has a number of cases to deal with.
2101 * There are two invariants which this code relies on:
2103 * i_size must be updated on disk before we start calling invalidatepage on the
2106 * This is done in ext3 by defining an ext3_setattr method which
2107 * updates i_size before truncate gets going. By maintaining this
2108 * invariant, we can be sure that it is safe to throw away any buffers
2109 * attached to the current transaction: once the transaction commits,
2110 * we know that the data will not be needed.
2112 * Note however that we can *not* throw away data belonging to the
2113 * previous, committing transaction!
2115 * Any disk blocks which *are* part of the previous, committing
2116 * transaction (and which therefore cannot be discarded immediately) are
2117 * not going to be reused in the new running transaction
2119 * The bitmap committed_data images guarantee this: any block which is
2120 * allocated in one transaction and removed in the next will be marked
2121 * as in-use in the committed_data bitmap, so cannot be reused until
2122 * the next transaction to delete the block commits. This means that
2123 * leaving committing buffers dirty is quite safe: the disk blocks
2124 * cannot be reallocated to a different file and so buffer aliasing is
2128 * The above applies mainly to ordered data mode. In writeback mode we
2129 * don't make guarantees about the order in which data hits disk --- in
2130 * particular we don't guarantee that new dirty data is flushed before
2131 * transaction commit --- so it is always safe just to discard data
2132 * immediately in that mode. --sct
2136 * The journal_unmap_buffer helper function returns zero if the buffer
2137 * concerned remains pinned as an anonymous buffer belonging to an older
2140 * We're outside-transaction here. Either or both of j_running_transaction
2141 * and j_committing_transaction may be NULL.
2143 static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
2146 transaction_t *transaction;
2147 struct journal_head *jh;
2150 BUFFER_TRACE(bh, "entry");
2153 * It is safe to proceed here without the j_list_lock because the
2154 * buffers cannot be stolen by try_to_free_buffers as long as we are
2155 * holding the page lock. --sct
2158 if (!buffer_jbd(bh))
2159 goto zap_buffer_unlocked;
2161 /* OK, we have data buffer in journaled mode */
2162 write_lock(&journal->j_state_lock);
2163 jbd_lock_bh_state(bh);
2164 spin_lock(&journal->j_list_lock);
2166 jh = jbd2_journal_grab_journal_head(bh);
2168 goto zap_buffer_no_jh;
2171 * We cannot remove the buffer from checkpoint lists until the
2172 * transaction adding inode to orphan list (let's call it T)
2173 * is committed. Otherwise if the transaction changing the
2174 * buffer would be cleaned from the journal before T is
2175 * committed, a crash will cause that the correct contents of
2176 * the buffer will be lost. On the other hand we have to
2177 * clear the buffer dirty bit at latest at the moment when the
2178 * transaction marking the buffer as freed in the filesystem
2179 * structures is committed because from that moment on the
2180 * block can be reallocated and used by a different page.
2181 * Since the block hasn't been freed yet but the inode has
2182 * already been added to orphan list, it is safe for us to add
2183 * the buffer to BJ_Forget list of the newest transaction.
2185 * Also we have to clear buffer_mapped flag of a truncated buffer
2186 * because the buffer_head may be attached to the page straddling
2187 * i_size (can happen only when blocksize < pagesize) and thus the
2188 * buffer_head can be reused when the file is extended again. So we end
2189 * up keeping around invalidated buffers attached to transactions'
2190 * BJ_Forget list just to stop checkpointing code from cleaning up
2191 * the transaction this buffer was modified in.
2193 transaction = jh->b_transaction;
2194 if (transaction == NULL) {
2195 /* First case: not on any transaction. If it
2196 * has no checkpoint link, then we can zap it:
2197 * it's a writeback-mode buffer so we don't care
2198 * if it hits disk safely. */
2199 if (!jh->b_cp_transaction) {
2200 JBUFFER_TRACE(jh, "not on any transaction: zap");
2204 if (!buffer_dirty(bh)) {
2205 /* bdflush has written it. We can drop it now */
2206 __jbd2_journal_remove_checkpoint(jh);
2210 /* OK, it must be in the journal but still not
2211 * written fully to disk: it's metadata or
2212 * journaled data... */
2214 if (journal->j_running_transaction) {
2215 /* ... and once the current transaction has
2216 * committed, the buffer won't be needed any
2218 JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
2219 may_free = __dispose_buffer(jh,
2220 journal->j_running_transaction);
2223 /* There is no currently-running transaction. So the
2224 * orphan record which we wrote for this file must have
2225 * passed into commit. We must attach this buffer to
2226 * the committing transaction, if it exists. */
2227 if (journal->j_committing_transaction) {
2228 JBUFFER_TRACE(jh, "give to committing trans");
2229 may_free = __dispose_buffer(jh,
2230 journal->j_committing_transaction);
2233 /* The orphan record's transaction has
2234 * committed. We can cleanse this buffer */
2235 clear_buffer_jbddirty(bh);
2236 __jbd2_journal_remove_checkpoint(jh);
2240 } else if (transaction == journal->j_committing_transaction) {
2241 JBUFFER_TRACE(jh, "on committing transaction");
2243 * The buffer is committing, we simply cannot touch
2244 * it. If the page is straddling i_size we have to wait
2245 * for commit and try again.
2248 jbd2_journal_put_journal_head(jh);
2249 spin_unlock(&journal->j_list_lock);
2250 jbd_unlock_bh_state(bh);
2251 write_unlock(&journal->j_state_lock);
2255 * OK, buffer won't be reachable after truncate. We just set
2256 * j_next_transaction to the running transaction (if there is
2257 * one) and mark buffer as freed so that commit code knows it
2258 * should clear dirty bits when it is done with the buffer.
2260 set_buffer_freed(bh);
2261 if (journal->j_running_transaction && buffer_jbddirty(bh))
2262 jh->b_next_transaction = journal->j_running_transaction;
2263 jbd2_journal_put_journal_head(jh);
2264 spin_unlock(&journal->j_list_lock);
2265 jbd_unlock_bh_state(bh);
2266 write_unlock(&journal->j_state_lock);
2269 /* Good, the buffer belongs to the running transaction.
2270 * We are writing our own transaction's data, not any
2271 * previous one's, so it is safe to throw it away
2272 * (remember that we expect the filesystem to have set
2273 * i_size already for this truncate so recovery will not
2274 * expose the disk blocks we are discarding here.) */
2275 J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
2276 JBUFFER_TRACE(jh, "on running transaction");
2277 may_free = __dispose_buffer(jh, transaction);
2282 * This is tricky. Although the buffer is truncated, it may be reused
2283 * if blocksize < pagesize and it is attached to the page straddling
2284 * EOF. Since the buffer might have been added to BJ_Forget list of the
2285 * running transaction, journal_get_write_access() won't clear
2286 * b_modified and credit accounting gets confused. So clear b_modified
2290 jbd2_journal_put_journal_head(jh);
2292 spin_unlock(&journal->j_list_lock);
2293 jbd_unlock_bh_state(bh);
2294 write_unlock(&journal->j_state_lock);
2295 zap_buffer_unlocked:
2296 clear_buffer_dirty(bh);
2297 J_ASSERT_BH(bh, !buffer_jbddirty(bh));
2298 clear_buffer_mapped(bh);
2299 clear_buffer_req(bh);
2300 clear_buffer_new(bh);
2301 clear_buffer_delay(bh);
2302 clear_buffer_unwritten(bh);
2308 * void jbd2_journal_invalidatepage()
2309 * @journal: journal to use for flush...
2310 * @page: page to flush
2311 * @offset: start of the range to invalidate
2312 * @length: length of the range to invalidate
2314 * Reap page buffers containing data after in the specified range in page.
2315 * Can return -EBUSY if buffers are part of the committing transaction and
2316 * the page is straddling i_size. Caller then has to wait for current commit
2319 int jbd2_journal_invalidatepage(journal_t *journal,
2321 unsigned int offset,
2322 unsigned int length)
2324 struct buffer_head *head, *bh, *next;
2325 unsigned int stop = offset + length;
2326 unsigned int curr_off = 0;
2327 int partial_page = (offset || length < PAGE_SIZE);
2331 if (!PageLocked(page))
2333 if (!page_has_buffers(page))
2336 BUG_ON(stop > PAGE_SIZE || stop < length);
2338 /* We will potentially be playing with lists other than just the
2339 * data lists (especially for journaled data mode), so be
2340 * cautious in our locking. */
2342 head = bh = page_buffers(page);
2344 unsigned int next_off = curr_off + bh->b_size;
2345 next = bh->b_this_page;
2347 if (next_off > stop)
2350 if (offset <= curr_off) {
2351 /* This block is wholly outside the truncation point */
2353 ret = journal_unmap_buffer(journal, bh, partial_page);
2359 curr_off = next_off;
2362 } while (bh != head);
2364 if (!partial_page) {
2365 if (may_free && try_to_free_buffers(page))
2366 J_ASSERT(!page_has_buffers(page));
2372 * File a buffer on the given transaction list.
2374 void __jbd2_journal_file_buffer(struct journal_head *jh,
2375 transaction_t *transaction, int jlist)
2377 struct journal_head **list = NULL;
2379 struct buffer_head *bh = jh2bh(jh);
2381 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
2382 assert_spin_locked(&transaction->t_journal->j_list_lock);
2384 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
2385 J_ASSERT_JH(jh, jh->b_transaction == transaction ||
2386 jh->b_transaction == NULL);
2388 if (jh->b_transaction && jh->b_jlist == jlist)
2391 if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
2392 jlist == BJ_Shadow || jlist == BJ_Forget) {
2394 * For metadata buffers, we track dirty bit in buffer_jbddirty
2395 * instead of buffer_dirty. We should not see a dirty bit set
2396 * here because we clear it in do_get_write_access but e.g.
2397 * tune2fs can modify the sb and set the dirty bit at any time
2398 * so we try to gracefully handle that.
2400 if (buffer_dirty(bh))
2401 warn_dirty_buffer(bh);
2402 if (test_clear_buffer_dirty(bh) ||
2403 test_clear_buffer_jbddirty(bh))
2407 if (jh->b_transaction)
2408 __jbd2_journal_temp_unlink_buffer(jh);
2410 jbd2_journal_grab_journal_head(bh);
2411 jh->b_transaction = transaction;
2415 J_ASSERT_JH(jh, !jh->b_committed_data);
2416 J_ASSERT_JH(jh, !jh->b_frozen_data);
2419 transaction->t_nr_buffers++;
2420 list = &transaction->t_buffers;
2423 list = &transaction->t_forget;
2426 list = &transaction->t_shadow_list;
2429 list = &transaction->t_reserved_list;
2433 __blist_add_buffer(list, jh);
2434 jh->b_jlist = jlist;
2437 set_buffer_jbddirty(bh);
2440 void jbd2_journal_file_buffer(struct journal_head *jh,
2441 transaction_t *transaction, int jlist)
2443 jbd_lock_bh_state(jh2bh(jh));
2444 spin_lock(&transaction->t_journal->j_list_lock);
2445 __jbd2_journal_file_buffer(jh, transaction, jlist);
2446 spin_unlock(&transaction->t_journal->j_list_lock);
2447 jbd_unlock_bh_state(jh2bh(jh));
2451 * Remove a buffer from its current buffer list in preparation for
2452 * dropping it from its current transaction entirely. If the buffer has
2453 * already started to be used by a subsequent transaction, refile the
2454 * buffer on that transaction's metadata list.
2456 * Called under j_list_lock
2457 * Called under jbd_lock_bh_state(jh2bh(jh))
2459 * jh and bh may be already free when this function returns
2461 void __jbd2_journal_refile_buffer(struct journal_head *jh)
2463 int was_dirty, jlist;
2464 struct buffer_head *bh = jh2bh(jh);
2466 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
2467 if (jh->b_transaction)
2468 assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
2470 /* If the buffer is now unused, just drop it. */
2471 if (jh->b_next_transaction == NULL) {
2472 __jbd2_journal_unfile_buffer(jh);
2477 * It has been modified by a later transaction: add it to the new
2478 * transaction's metadata list.
2481 was_dirty = test_clear_buffer_jbddirty(bh);
2482 __jbd2_journal_temp_unlink_buffer(jh);
2484 * We set b_transaction here because b_next_transaction will inherit
2485 * our jh reference and thus __jbd2_journal_file_buffer() must not
2488 jh->b_transaction = jh->b_next_transaction;
2489 jh->b_next_transaction = NULL;
2490 if (buffer_freed(bh))
2492 else if (jh->b_modified)
2493 jlist = BJ_Metadata;
2495 jlist = BJ_Reserved;
2496 __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist);
2497 J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
2500 set_buffer_jbddirty(bh);
2504 * __jbd2_journal_refile_buffer() with necessary locking added. We take our
2505 * bh reference so that we can safely unlock bh.
2507 * The jh and bh may be freed by this call.
2509 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2511 struct buffer_head *bh = jh2bh(jh);
2513 /* Get reference so that buffer cannot be freed before we unlock it */
2515 jbd_lock_bh_state(bh);
2516 spin_lock(&journal->j_list_lock);
2517 __jbd2_journal_refile_buffer(jh);
2518 jbd_unlock_bh_state(bh);
2519 spin_unlock(&journal->j_list_lock);
2524 * File inode in the inode list of the handle's transaction
2526 static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode,
2527 unsigned long flags)
2529 transaction_t *transaction = handle->h_transaction;
2532 if (is_handle_aborted(handle))
2534 journal = transaction->t_journal;
2536 jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
2537 transaction->t_tid);
2540 * First check whether inode isn't already on the transaction's
2541 * lists without taking the lock. Note that this check is safe
2542 * without the lock as we cannot race with somebody removing inode
2543 * from the transaction. The reason is that we remove inode from the
2544 * transaction only in journal_release_jbd_inode() and when we commit
2545 * the transaction. We are guarded from the first case by holding
2546 * a reference to the inode. We are safe against the second case
2547 * because if jinode->i_transaction == transaction, commit code
2548 * cannot touch the transaction because we hold reference to it,
2549 * and if jinode->i_next_transaction == transaction, commit code
2550 * will only file the inode where we want it.
2552 if ((jinode->i_transaction == transaction ||
2553 jinode->i_next_transaction == transaction) &&
2554 (jinode->i_flags & flags) == flags)
2557 spin_lock(&journal->j_list_lock);
2558 jinode->i_flags |= flags;
2559 /* Is inode already attached where we need it? */
2560 if (jinode->i_transaction == transaction ||
2561 jinode->i_next_transaction == transaction)
2565 * We only ever set this variable to 1 so the test is safe. Since
2566 * t_need_data_flush is likely to be set, we do the test to save some
2567 * cacheline bouncing
2569 if (!transaction->t_need_data_flush)
2570 transaction->t_need_data_flush = 1;
2571 /* On some different transaction's list - should be
2572 * the committing one */
2573 if (jinode->i_transaction) {
2574 J_ASSERT(jinode->i_next_transaction == NULL);
2575 J_ASSERT(jinode->i_transaction ==
2576 journal->j_committing_transaction);
2577 jinode->i_next_transaction = transaction;
2580 /* Not on any transaction list... */
2581 J_ASSERT(!jinode->i_next_transaction);
2582 jinode->i_transaction = transaction;
2583 list_add(&jinode->i_list, &transaction->t_inode_list);
2585 spin_unlock(&journal->j_list_lock);
2590 int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *jinode)
2592 return jbd2_journal_file_inode(handle, jinode,
2593 JI_WRITE_DATA | JI_WAIT_DATA);
2596 int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *jinode)
2598 return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA);
2602 * File truncate and transaction commit interact with each other in a
2603 * non-trivial way. If a transaction writing data block A is
2604 * committing, we cannot discard the data by truncate until we have
2605 * written them. Otherwise if we crashed after the transaction with
2606 * write has committed but before the transaction with truncate has
2607 * committed, we could see stale data in block A. This function is a
2608 * helper to solve this problem. It starts writeout of the truncated
2609 * part in case it is in the committing transaction.
2611 * Filesystem code must call this function when inode is journaled in
2612 * ordered mode before truncation happens and after the inode has been
2613 * placed on orphan list with the new inode size. The second condition
2614 * avoids the race that someone writes new data and we start
2615 * committing the transaction after this function has been called but
2616 * before a transaction for truncate is started (and furthermore it
2617 * allows us to optimize the case where the addition to orphan list
2618 * happens in the same transaction as write --- we don't have to write
2619 * any data in such case).
2621 int jbd2_journal_begin_ordered_truncate(journal_t *journal,
2622 struct jbd2_inode *jinode,
2625 transaction_t *inode_trans, *commit_trans;
2628 /* This is a quick check to avoid locking if not necessary */
2629 if (!jinode->i_transaction)
2631 /* Locks are here just to force reading of recent values, it is
2632 * enough that the transaction was not committing before we started
2633 * a transaction adding the inode to orphan list */
2634 read_lock(&journal->j_state_lock);
2635 commit_trans = journal->j_committing_transaction;
2636 read_unlock(&journal->j_state_lock);
2637 spin_lock(&journal->j_list_lock);
2638 inode_trans = jinode->i_transaction;
2639 spin_unlock(&journal->j_list_lock);
2640 if (inode_trans == commit_trans) {
2641 ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping,
2642 new_size, LLONG_MAX);
2644 jbd2_journal_abort(journal, ret);