[GFS2] Wendy's dump lockname in hex & fix glock dump
[powerpc.git] / fs / gfs2 / log.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/crc32.h>
17 #include <linux/lm_interface.h>
18 #include <linux/delay.h>
19
20 #include "gfs2.h"
21 #include "incore.h"
22 #include "bmap.h"
23 #include "glock.h"
24 #include "log.h"
25 #include "lops.h"
26 #include "meta_io.h"
27 #include "util.h"
28 #include "dir.h"
29
30 #define PULL 1
31
32 /**
33  * gfs2_struct2blk - compute stuff
34  * @sdp: the filesystem
35  * @nstruct: the number of structures
36  * @ssize: the size of the structures
37  *
38  * Compute the number of log descriptor blocks needed to hold a certain number
39  * of structures of a certain size.
40  *
41  * Returns: the number of blocks needed (minimum is always 1)
42  */
43
44 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
45                              unsigned int ssize)
46 {
47         unsigned int blks;
48         unsigned int first, second;
49
50         blks = 1;
51         first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
52
53         if (nstruct > first) {
54                 second = (sdp->sd_sb.sb_bsize -
55                           sizeof(struct gfs2_meta_header)) / ssize;
56                 blks += DIV_ROUND_UP(nstruct - first, second);
57         }
58
59         return blks;
60 }
61
62 /**
63  * gfs2_ail1_start_one - Start I/O on a part of the AIL
64  * @sdp: the filesystem
65  * @tr: the part of the AIL
66  *
67  */
68
69 static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
70 {
71         struct gfs2_bufdata *bd, *s;
72         struct buffer_head *bh;
73         int retry;
74
75         BUG_ON(!spin_is_locked(&sdp->sd_log_lock));
76
77         do {
78                 retry = 0;
79
80                 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
81                                                  bd_ail_st_list) {
82                         bh = bd->bd_bh;
83
84                         gfs2_assert(sdp, bd->bd_ail == ai);
85
86                         if (!buffer_busy(bh)) {
87                                 if (!buffer_uptodate(bh)) {
88                                         gfs2_log_unlock(sdp);
89                                         gfs2_io_error_bh(sdp, bh);
90                                         gfs2_log_lock(sdp);
91                                 }
92                                 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
93                                 continue;
94                         }
95
96                         if (!buffer_dirty(bh))
97                                 continue;
98
99                         list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
100
101                         gfs2_log_unlock(sdp);
102                         wait_on_buffer(bh);
103                         ll_rw_block(WRITE, 1, &bh);
104                         gfs2_log_lock(sdp);
105
106                         retry = 1;
107                         break;
108                 }
109         } while (retry);
110 }
111
112 /**
113  * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
114  * @sdp: the filesystem
115  * @ai: the AIL entry
116  *
117  */
118
119 static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags)
120 {
121         struct gfs2_bufdata *bd, *s;
122         struct buffer_head *bh;
123
124         list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
125                                          bd_ail_st_list) {
126                 bh = bd->bd_bh;
127
128                 gfs2_assert(sdp, bd->bd_ail == ai);
129
130                 if (buffer_busy(bh)) {
131                         if (flags & DIO_ALL)
132                                 continue;
133                         else
134                                 break;
135                 }
136
137                 if (!buffer_uptodate(bh))
138                         gfs2_io_error_bh(sdp, bh);
139
140                 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
141         }
142
143         return list_empty(&ai->ai_ail1_list);
144 }
145
146 static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
147 {
148         struct list_head *head;
149         u64 sync_gen;
150         struct list_head *first;
151         struct gfs2_ail *first_ai, *ai, *tmp;
152         int done = 0;
153
154         gfs2_log_lock(sdp);
155         head = &sdp->sd_ail1_list;
156         if (list_empty(head)) {
157                 gfs2_log_unlock(sdp);
158                 return;
159         }
160         sync_gen = sdp->sd_ail_sync_gen++;
161
162         first = head->prev;
163         first_ai = list_entry(first, struct gfs2_ail, ai_list);
164         first_ai->ai_sync_gen = sync_gen;
165         gfs2_ail1_start_one(sdp, first_ai); /* This may drop log lock */
166
167         if (flags & DIO_ALL)
168                 first = NULL;
169
170         while(!done) {
171                 if (first && (head->prev != first ||
172                               gfs2_ail1_empty_one(sdp, first_ai, 0)))
173                         break;
174
175                 done = 1;
176                 list_for_each_entry_safe_reverse(ai, tmp, head, ai_list) {
177                         if (ai->ai_sync_gen >= sync_gen)
178                                 continue;
179                         ai->ai_sync_gen = sync_gen;
180                         gfs2_ail1_start_one(sdp, ai); /* This may drop log lock */
181                         done = 0;
182                         break;
183                 }
184         }
185
186         gfs2_log_unlock(sdp);
187 }
188
189 int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags)
190 {
191         struct gfs2_ail *ai, *s;
192         int ret;
193
194         gfs2_log_lock(sdp);
195
196         list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
197                 if (gfs2_ail1_empty_one(sdp, ai, flags))
198                         list_move(&ai->ai_list, &sdp->sd_ail2_list);
199                 else if (!(flags & DIO_ALL))
200                         break;
201         }
202
203         ret = list_empty(&sdp->sd_ail1_list);
204
205         gfs2_log_unlock(sdp);
206
207         return ret;
208 }
209
210
211 /**
212  * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
213  * @sdp: the filesystem
214  * @ai: the AIL entry
215  *
216  */
217
218 static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
219 {
220         struct list_head *head = &ai->ai_ail2_list;
221         struct gfs2_bufdata *bd;
222         struct gfs2_inode *bh_ip;
223
224         while (!list_empty(head)) {
225                 bd = list_entry(head->prev, struct gfs2_bufdata,
226                                 bd_ail_st_list);
227                 gfs2_assert(sdp, bd->bd_ail == ai);
228                 bd->bd_ail = NULL;
229                 list_del(&bd->bd_ail_st_list);
230                 list_del(&bd->bd_ail_gl_list);
231                 atomic_dec(&bd->bd_gl->gl_ail_count);
232                 if (bd->bd_bh->b_page->mapping) {
233                         bh_ip = GFS2_I(bd->bd_bh->b_page->mapping->host);
234                         gfs2_meta_cache_flush(bh_ip);
235                 }
236                 brelse(bd->bd_bh);
237         }
238 }
239
240 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
241 {
242         struct gfs2_ail *ai, *safe;
243         unsigned int old_tail = sdp->sd_log_tail;
244         int wrap = (new_tail < old_tail);
245         int a, b, rm;
246
247         gfs2_log_lock(sdp);
248
249         list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) {
250                 a = (old_tail <= ai->ai_first);
251                 b = (ai->ai_first < new_tail);
252                 rm = (wrap) ? (a || b) : (a && b);
253                 if (!rm)
254                         continue;
255
256                 gfs2_ail2_empty_one(sdp, ai);
257                 list_del(&ai->ai_list);
258                 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list));
259                 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list));
260                 kfree(ai);
261         }
262
263         gfs2_log_unlock(sdp);
264 }
265
266 /**
267  * gfs2_log_reserve - Make a log reservation
268  * @sdp: The GFS2 superblock
269  * @blks: The number of blocks to reserve
270  *
271  * Note that we never give out the last few blocks of the journal. Thats
272  * due to the fact that there is a small number of header blocks
273  * associated with each log flush. The exact number can't be known until
274  * flush time, so we ensure that we have just enough free blocks at all
275  * times to avoid running out during a log flush.
276  *
277  * Returns: errno
278  */
279
280 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
281 {
282         unsigned int try = 0;
283         unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize);
284
285         if (gfs2_assert_warn(sdp, blks) ||
286             gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
287                 return -EINVAL;
288
289         mutex_lock(&sdp->sd_log_reserve_mutex);
290         gfs2_log_lock(sdp);
291         while(sdp->sd_log_blks_free <= (blks + reserved_blks)) {
292                 gfs2_log_unlock(sdp);
293                 gfs2_ail1_empty(sdp, 0);
294                 gfs2_log_flush(sdp, NULL);
295
296                 if (try++)
297                         gfs2_ail1_start(sdp, 0);
298                 gfs2_log_lock(sdp);
299         }
300         sdp->sd_log_blks_free -= blks;
301         gfs2_log_unlock(sdp);
302         mutex_unlock(&sdp->sd_log_reserve_mutex);
303
304         down_read(&sdp->sd_log_flush_lock);
305
306         return 0;
307 }
308
309 /**
310  * gfs2_log_release - Release a given number of log blocks
311  * @sdp: The GFS2 superblock
312  * @blks: The number of blocks
313  *
314  */
315
316 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
317 {
318
319         gfs2_log_lock(sdp);
320         sdp->sd_log_blks_free += blks;
321         gfs2_assert_withdraw(sdp,
322                              sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
323         gfs2_log_unlock(sdp);
324         up_read(&sdp->sd_log_flush_lock);
325 }
326
327 static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
328 {
329         struct inode *inode = sdp->sd_jdesc->jd_inode;
330         int error;
331         struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
332
333         bh_map.b_size = 1 << inode->i_blkbits;
334         error = gfs2_block_map(inode, lbn, 0, &bh_map);
335         if (error || !bh_map.b_blocknr)
336                 printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error,
337                        (unsigned long long)bh_map.b_blocknr, lbn);
338         gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr);
339
340         return bh_map.b_blocknr;
341 }
342
343 /**
344  * log_distance - Compute distance between two journal blocks
345  * @sdp: The GFS2 superblock
346  * @newer: The most recent journal block of the pair
347  * @older: The older journal block of the pair
348  *
349  *   Compute the distance (in the journal direction) between two
350  *   blocks in the journal
351  *
352  * Returns: the distance in blocks
353  */
354
355 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
356                                         unsigned int older)
357 {
358         int dist;
359
360         dist = newer - older;
361         if (dist < 0)
362                 dist += sdp->sd_jdesc->jd_blocks;
363
364         return dist;
365 }
366
367 /**
368  * calc_reserved - Calculate the number of blocks to reserve when
369  *                 refunding a transaction's unused buffers.
370  * @sdp: The GFS2 superblock
371  *
372  * This is complex.  We need to reserve room for all our currently used
373  * metadata buffers (e.g. normal file I/O rewriting file time stamps) and 
374  * all our journaled data buffers for journaled files (e.g. files in the 
375  * meta_fs like rindex, or files for which chattr +j was done.)
376  * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
377  * will count it as free space (sd_log_blks_free) and corruption will follow.
378  *
379  * We can have metadata bufs and jdata bufs in the same journal.  So each
380  * type gets its own log header, for which we need to reserve a block.
381  * In fact, each type has the potential for needing more than one header 
382  * in cases where we have more buffers than will fit on a journal page.
383  * Metadata journal entries take up half the space of journaled buffer entries.
384  * Thus, metadata entries have buf_limit (502) and journaled buffers have
385  * databuf_limit (251) before they cause a wrap around.
386  *
387  * Also, we need to reserve blocks for revoke journal entries and one for an
388  * overall header for the lot.
389  *
390  * Returns: the number of blocks reserved
391  */
392 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
393 {
394         unsigned int reserved = 0;
395         unsigned int mbuf_limit, metabufhdrs_needed;
396         unsigned int dbuf_limit, databufhdrs_needed;
397         unsigned int revokes = 0;
398
399         mbuf_limit = buf_limit(sdp);
400         metabufhdrs_needed = (sdp->sd_log_commited_buf +
401                               (mbuf_limit - 1)) / mbuf_limit;
402         dbuf_limit = databuf_limit(sdp);
403         databufhdrs_needed = (sdp->sd_log_commited_databuf +
404                               (dbuf_limit - 1)) / dbuf_limit;
405
406         if (sdp->sd_log_commited_revoke)
407                 revokes = gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
408                                           sizeof(u64));
409
410         reserved = sdp->sd_log_commited_buf + metabufhdrs_needed +
411                 sdp->sd_log_commited_databuf + databufhdrs_needed +
412                 revokes;
413         /* One for the overall header */
414         if (reserved)
415                 reserved++;
416         return reserved;
417 }
418
419 static unsigned int current_tail(struct gfs2_sbd *sdp)
420 {
421         struct gfs2_ail *ai;
422         unsigned int tail;
423
424         gfs2_log_lock(sdp);
425
426         if (list_empty(&sdp->sd_ail1_list)) {
427                 tail = sdp->sd_log_head;
428         } else {
429                 ai = list_entry(sdp->sd_ail1_list.prev, struct gfs2_ail, ai_list);
430                 tail = ai->ai_first;
431         }
432
433         gfs2_log_unlock(sdp);
434
435         return tail;
436 }
437
438 static inline void log_incr_head(struct gfs2_sbd *sdp)
439 {
440         if (sdp->sd_log_flush_head == sdp->sd_log_tail)
441                 gfs2_assert_withdraw(sdp, sdp->sd_log_flush_head == sdp->sd_log_head);
442
443         if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
444                 sdp->sd_log_flush_head = 0;
445                 sdp->sd_log_flush_wrapped = 1;
446         }
447 }
448
449 /**
450  * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
451  * @sdp: The GFS2 superblock
452  *
453  * Returns: the buffer_head
454  */
455
456 struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
457 {
458         u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
459         struct gfs2_log_buf *lb;
460         struct buffer_head *bh;
461
462         lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
463         list_add(&lb->lb_list, &sdp->sd_log_flush_list);
464
465         bh = lb->lb_bh = sb_getblk(sdp->sd_vfs, blkno);
466         lock_buffer(bh);
467         memset(bh->b_data, 0, bh->b_size);
468         set_buffer_uptodate(bh);
469         clear_buffer_dirty(bh);
470         unlock_buffer(bh);
471
472         log_incr_head(sdp);
473
474         return bh;
475 }
476
477 /**
478  * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
479  * @sdp: the filesystem
480  * @data: the data the buffer_head should point to
481  *
482  * Returns: the log buffer descriptor
483  */
484
485 struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
486                                       struct buffer_head *real)
487 {
488         u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
489         struct gfs2_log_buf *lb;
490         struct buffer_head *bh;
491
492         lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
493         list_add(&lb->lb_list, &sdp->sd_log_flush_list);
494         lb->lb_real = real;
495
496         bh = lb->lb_bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
497         atomic_set(&bh->b_count, 1);
498         bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate);
499         set_bh_page(bh, real->b_page, bh_offset(real));
500         bh->b_blocknr = blkno;
501         bh->b_size = sdp->sd_sb.sb_bsize;
502         bh->b_bdev = sdp->sd_vfs->s_bdev;
503
504         log_incr_head(sdp);
505
506         return bh;
507 }
508
509 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
510 {
511         unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
512
513         ail2_empty(sdp, new_tail);
514
515         gfs2_log_lock(sdp);
516         sdp->sd_log_blks_free += dist;
517         gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
518         gfs2_log_unlock(sdp);
519
520         sdp->sd_log_tail = new_tail;
521 }
522
523 /**
524  * log_write_header - Get and initialize a journal header buffer
525  * @sdp: The GFS2 superblock
526  *
527  * Returns: the initialized log buffer descriptor
528  */
529
530 static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
531 {
532         u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
533         struct buffer_head *bh;
534         struct gfs2_log_header *lh;
535         unsigned int tail;
536         u32 hash;
537
538         bh = sb_getblk(sdp->sd_vfs, blkno);
539         lock_buffer(bh);
540         memset(bh->b_data, 0, bh->b_size);
541         set_buffer_uptodate(bh);
542         clear_buffer_dirty(bh);
543         unlock_buffer(bh);
544
545         gfs2_ail1_empty(sdp, 0);
546         tail = current_tail(sdp);
547
548         lh = (struct gfs2_log_header *)bh->b_data;
549         memset(lh, 0, sizeof(struct gfs2_log_header));
550         lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
551         lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
552         lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
553         lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
554         lh->lh_flags = cpu_to_be32(flags);
555         lh->lh_tail = cpu_to_be32(tail);
556         lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
557         hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
558         lh->lh_hash = cpu_to_be32(hash);
559
560         set_buffer_dirty(bh);
561         if (sync_dirty_buffer(bh))
562                 gfs2_io_error_bh(sdp, bh);
563         brelse(bh);
564
565         if (sdp->sd_log_tail != tail)
566                 log_pull_tail(sdp, tail);
567         else
568                 gfs2_assert_withdraw(sdp, !pull);
569
570         sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
571         log_incr_head(sdp);
572 }
573
574 static void log_flush_commit(struct gfs2_sbd *sdp)
575 {
576         struct list_head *head = &sdp->sd_log_flush_list;
577         struct gfs2_log_buf *lb;
578         struct buffer_head *bh;
579         int flushcount = 0;
580
581         while (!list_empty(head)) {
582                 lb = list_entry(head->next, struct gfs2_log_buf, lb_list);
583                 list_del(&lb->lb_list);
584                 bh = lb->lb_bh;
585
586                 wait_on_buffer(bh);
587                 if (!buffer_uptodate(bh))
588                         gfs2_io_error_bh(sdp, bh);
589                 if (lb->lb_real) {
590                         while (atomic_read(&bh->b_count) != 1)  /* Grrrr... */
591                                 schedule();
592                         free_buffer_head(bh);
593                 } else
594                         brelse(bh);
595                 kfree(lb);
596                 flushcount++;
597         }
598
599         /* If nothing was journaled, the header is unplanned and unwanted. */
600         if (flushcount) {
601                 log_write_header(sdp, 0, 0);
602         } else {
603                 unsigned int tail;
604                 tail = current_tail(sdp);
605
606                 gfs2_ail1_empty(sdp, 0);
607                 if (sdp->sd_log_tail != tail)
608                         log_pull_tail(sdp, tail);
609         }
610 }
611
612 /**
613  * gfs2_log_flush - flush incore transaction(s)
614  * @sdp: the filesystem
615  * @gl: The glock structure to flush.  If NULL, flush the whole incore log
616  *
617  */
618
619 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
620 {
621         struct gfs2_ail *ai;
622
623         down_write(&sdp->sd_log_flush_lock);
624
625         if (gl) {
626                 gfs2_log_lock(sdp);
627                 if (list_empty(&gl->gl_le.le_list)) {
628                         gfs2_log_unlock(sdp);
629                         up_write(&sdp->sd_log_flush_lock);
630                         return;
631                 }
632                 gfs2_log_unlock(sdp);
633         }
634
635         ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
636         INIT_LIST_HEAD(&ai->ai_ail1_list);
637         INIT_LIST_HEAD(&ai->ai_ail2_list);
638
639         gfs2_assert_withdraw(sdp,
640                              sdp->sd_log_num_buf + sdp->sd_log_num_jdata ==
641                              sdp->sd_log_commited_buf +
642                              sdp->sd_log_commited_databuf);
643         gfs2_assert_withdraw(sdp,
644                         sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
645
646         sdp->sd_log_flush_head = sdp->sd_log_head;
647         sdp->sd_log_flush_wrapped = 0;
648         ai->ai_first = sdp->sd_log_flush_head;
649
650         lops_before_commit(sdp);
651         if (!list_empty(&sdp->sd_log_flush_list))
652                 log_flush_commit(sdp);
653         else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
654                 gfs2_log_lock(sdp);
655                 sdp->sd_log_blks_free--; /* Adjust for unreserved buffer */
656                 gfs2_log_unlock(sdp);
657                 log_write_header(sdp, 0, PULL);
658         }
659         lops_after_commit(sdp, ai);
660
661         gfs2_log_lock(sdp);
662         sdp->sd_log_head = sdp->sd_log_flush_head;
663         sdp->sd_log_blks_reserved = 0;
664         sdp->sd_log_commited_buf = 0;
665         sdp->sd_log_commited_databuf = 0;
666         sdp->sd_log_commited_revoke = 0;
667
668         if (!list_empty(&ai->ai_ail1_list)) {
669                 list_add(&ai->ai_list, &sdp->sd_ail1_list);
670                 ai = NULL;
671         }
672         gfs2_log_unlock(sdp);
673
674         sdp->sd_vfs->s_dirt = 0;
675         up_write(&sdp->sd_log_flush_lock);
676
677         kfree(ai);
678 }
679
680 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
681 {
682         unsigned int reserved;
683         unsigned int old;
684
685         gfs2_log_lock(sdp);
686
687         sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm;
688         sdp->sd_log_commited_databuf += tr->tr_num_databuf_new -
689                 tr->tr_num_databuf_rm;
690         gfs2_assert_withdraw(sdp, (((int)sdp->sd_log_commited_buf) >= 0) ||
691                              (((int)sdp->sd_log_commited_databuf) >= 0));
692         sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
693         gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_revoke) >= 0);
694         reserved = calc_reserved(sdp);
695         old = sdp->sd_log_blks_free;
696         sdp->sd_log_blks_free += tr->tr_reserved -
697                                  (reserved - sdp->sd_log_blks_reserved);
698
699         gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free >= old);
700         gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free <=
701                              sdp->sd_jdesc->jd_blocks);
702
703         sdp->sd_log_blks_reserved = reserved;
704
705         gfs2_log_unlock(sdp);
706 }
707
708 /**
709  * gfs2_log_commit - Commit a transaction to the log
710  * @sdp: the filesystem
711  * @tr: the transaction
712  *
713  * Returns: errno
714  */
715
716 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
717 {
718         log_refund(sdp, tr);
719         lops_incore_commit(sdp, tr);
720
721         sdp->sd_vfs->s_dirt = 1;
722         up_read(&sdp->sd_log_flush_lock);
723
724         gfs2_log_lock(sdp);
725         if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks))
726                 wake_up_process(sdp->sd_logd_process);
727         gfs2_log_unlock(sdp);
728 }
729
730 /**
731  * gfs2_log_shutdown - write a shutdown header into a journal
732  * @sdp: the filesystem
733  *
734  */
735
736 void gfs2_log_shutdown(struct gfs2_sbd *sdp)
737 {
738         down_write(&sdp->sd_log_flush_lock);
739
740         gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
741         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_gl);
742         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
743         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_jdata);
744         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
745         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
746         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
747         gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
748
749         sdp->sd_log_flush_head = sdp->sd_log_head;
750         sdp->sd_log_flush_wrapped = 0;
751
752         log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT,
753                          (sdp->sd_log_tail == current_tail(sdp)) ? 0 : PULL);
754
755         gfs2_assert_warn(sdp, sdp->sd_log_blks_free == sdp->sd_jdesc->jd_blocks);
756         gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
757         gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
758
759         sdp->sd_log_head = sdp->sd_log_flush_head;
760         sdp->sd_log_tail = sdp->sd_log_head;
761
762         up_write(&sdp->sd_log_flush_lock);
763 }
764
765
766 /**
767  * gfs2_meta_syncfs - sync all the buffers in a filesystem
768  * @sdp: the filesystem
769  *
770  */
771
772 void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
773 {
774         gfs2_log_flush(sdp, NULL);
775         for (;;) {
776                 gfs2_ail1_start(sdp, DIO_ALL);
777                 if (gfs2_ail1_empty(sdp, DIO_ALL))
778                         break;
779                 msleep(10);
780         }
781 }
782