[GFS2] Fix an oops in glock dumping
[powerpc.git] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28
29 #include "gfs2.h"
30 #include "incore.h"
31 #include "glock.h"
32 #include "glops.h"
33 #include "inode.h"
34 #include "lm.h"
35 #include "lops.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "super.h"
39 #include "util.h"
40
41 struct gfs2_gl_hash_bucket {
42         struct hlist_head hb_list;
43 };
44
45 struct glock_iter {
46         int hash;                     /* hash bucket index         */
47         struct gfs2_sbd *sdp;         /* incore superblock         */
48         struct gfs2_glock *gl;        /* current glock struct      */
49         struct seq_file *seq;         /* sequence file for debugfs */
50         char string[512];             /* scratch space             */
51 };
52
53 typedef void (*glock_examiner) (struct gfs2_glock * gl);
54
55 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
56 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
57 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
58 static void gfs2_glock_drop_th(struct gfs2_glock *gl);
59 static DECLARE_RWSEM(gfs2_umount_flush_sem);
60 static struct dentry *gfs2_root;
61
62 #define GFS2_GL_HASH_SHIFT      15
63 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
64 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
65
66 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
67 static struct dentry *gfs2_root;
68
69 /*
70  * Despite what you might think, the numbers below are not arbitrary :-)
71  * They are taken from the ipv4 routing hash code, which is well tested
72  * and thus should be nearly optimal. Later on we might tweek the numbers
73  * but for now this should be fine.
74  *
75  * The reason for putting the locks in a separate array from the list heads
76  * is that we can have fewer locks than list heads and save memory. We use
77  * the same hash function for both, but with a different hash mask.
78  */
79 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
80         defined(CONFIG_PROVE_LOCKING)
81
82 #ifdef CONFIG_LOCKDEP
83 # define GL_HASH_LOCK_SZ        256
84 #else
85 # if NR_CPUS >= 32
86 #  define GL_HASH_LOCK_SZ       4096
87 # elif NR_CPUS >= 16
88 #  define GL_HASH_LOCK_SZ       2048
89 # elif NR_CPUS >= 8
90 #  define GL_HASH_LOCK_SZ       1024
91 # elif NR_CPUS >= 4
92 #  define GL_HASH_LOCK_SZ       512
93 # else
94 #  define GL_HASH_LOCK_SZ       256
95 # endif
96 #endif
97
98 /* We never want more locks than chains */
99 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
100 # undef GL_HASH_LOCK_SZ
101 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
102 #endif
103
104 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
105
106 static inline rwlock_t *gl_lock_addr(unsigned int x)
107 {
108         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
109 }
110 #else /* not SMP, so no spinlocks required */
111 static inline rwlock_t *gl_lock_addr(unsigned int x)
112 {
113         return NULL;
114 }
115 #endif
116
117 /**
118  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
119  * @actual: the current state of the lock
120  * @requested: the lock state that was requested by the caller
121  * @flags: the modifier flags passed in by the caller
122  *
123  * Returns: 1 if the locks are compatible, 0 otherwise
124  */
125
126 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
127                                    int flags)
128 {
129         if (actual == requested)
130                 return 1;
131
132         if (flags & GL_EXACT)
133                 return 0;
134
135         if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
136                 return 1;
137
138         if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
139                 return 1;
140
141         return 0;
142 }
143
144 /**
145  * gl_hash() - Turn glock number into hash bucket number
146  * @lock: The glock number
147  *
148  * Returns: The number of the corresponding hash bucket
149  */
150
151 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
152                             const struct lm_lockname *name)
153 {
154         unsigned int h;
155
156         h = jhash(&name->ln_number, sizeof(u64), 0);
157         h = jhash(&name->ln_type, sizeof(unsigned int), h);
158         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
159         h &= GFS2_GL_HASH_MASK;
160
161         return h;
162 }
163
164 /**
165  * glock_free() - Perform a few checks and then release struct gfs2_glock
166  * @gl: The glock to release
167  *
168  * Also calls lock module to release its internal structure for this glock.
169  *
170  */
171
172 static void glock_free(struct gfs2_glock *gl)
173 {
174         struct gfs2_sbd *sdp = gl->gl_sbd;
175         struct inode *aspace = gl->gl_aspace;
176
177         gfs2_lm_put_lock(sdp, gl->gl_lock);
178
179         if (aspace)
180                 gfs2_aspace_put(aspace);
181
182         kmem_cache_free(gfs2_glock_cachep, gl);
183 }
184
185 /**
186  * gfs2_glock_hold() - increment reference count on glock
187  * @gl: The glock to hold
188  *
189  */
190
191 void gfs2_glock_hold(struct gfs2_glock *gl)
192 {
193         atomic_inc(&gl->gl_ref);
194 }
195
196 /**
197  * gfs2_glock_put() - Decrement reference count on glock
198  * @gl: The glock to put
199  *
200  */
201
202 int gfs2_glock_put(struct gfs2_glock *gl)
203 {
204         int rv = 0;
205         struct gfs2_sbd *sdp = gl->gl_sbd;
206
207         write_lock(gl_lock_addr(gl->gl_hash));
208         if (atomic_dec_and_test(&gl->gl_ref)) {
209                 hlist_del(&gl->gl_list);
210                 write_unlock(gl_lock_addr(gl->gl_hash));
211                 BUG_ON(spin_is_locked(&gl->gl_spin));
212                 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
213                 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
214                 gfs2_assert(sdp, list_empty(&gl->gl_holders));
215                 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
216                 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
217                 glock_free(gl);
218                 rv = 1;
219                 goto out;
220         }
221         write_unlock(gl_lock_addr(gl->gl_hash));
222 out:
223         return rv;
224 }
225
226 /**
227  * search_bucket() - Find struct gfs2_glock by lock number
228  * @bucket: the bucket to search
229  * @name: The lock name
230  *
231  * Returns: NULL, or the struct gfs2_glock with the requested number
232  */
233
234 static struct gfs2_glock *search_bucket(unsigned int hash,
235                                         const struct gfs2_sbd *sdp,
236                                         const struct lm_lockname *name)
237 {
238         struct gfs2_glock *gl;
239         struct hlist_node *h;
240
241         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
242                 if (!lm_name_equal(&gl->gl_name, name))
243                         continue;
244                 if (gl->gl_sbd != sdp)
245                         continue;
246
247                 atomic_inc(&gl->gl_ref);
248
249                 return gl;
250         }
251
252         return NULL;
253 }
254
255 /**
256  * gfs2_glock_find() - Find glock by lock number
257  * @sdp: The GFS2 superblock
258  * @name: The lock name
259  *
260  * Returns: NULL, or the struct gfs2_glock with the requested number
261  */
262
263 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
264                                           const struct lm_lockname *name)
265 {
266         unsigned int hash = gl_hash(sdp, name);
267         struct gfs2_glock *gl;
268
269         read_lock(gl_lock_addr(hash));
270         gl = search_bucket(hash, sdp, name);
271         read_unlock(gl_lock_addr(hash));
272
273         return gl;
274 }
275
276 /**
277  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
278  * @sdp: The GFS2 superblock
279  * @number: the lock number
280  * @glops: The glock_operations to use
281  * @create: If 0, don't create the glock if it doesn't exist
282  * @glp: the glock is returned here
283  *
284  * This does not lock a glock, just finds/creates structures for one.
285  *
286  * Returns: errno
287  */
288
289 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
290                    const struct gfs2_glock_operations *glops, int create,
291                    struct gfs2_glock **glp)
292 {
293         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
294         struct gfs2_glock *gl, *tmp;
295         unsigned int hash = gl_hash(sdp, &name);
296         int error;
297
298         read_lock(gl_lock_addr(hash));
299         gl = search_bucket(hash, sdp, &name);
300         read_unlock(gl_lock_addr(hash));
301
302         if (gl || !create) {
303                 *glp = gl;
304                 return 0;
305         }
306
307         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
308         if (!gl)
309                 return -ENOMEM;
310
311         gl->gl_flags = 0;
312         gl->gl_name = name;
313         atomic_set(&gl->gl_ref, 1);
314         gl->gl_state = LM_ST_UNLOCKED;
315         gl->gl_hash = hash;
316         gl->gl_owner_pid = 0;
317         gl->gl_ip = 0;
318         gl->gl_ops = glops;
319         gl->gl_req_gh = NULL;
320         gl->gl_req_bh = NULL;
321         gl->gl_vn = 0;
322         gl->gl_stamp = jiffies;
323         gl->gl_object = NULL;
324         gl->gl_sbd = sdp;
325         gl->gl_aspace = NULL;
326         lops_init_le(&gl->gl_le, &gfs2_glock_lops);
327
328         /* If this glock protects actual on-disk data or metadata blocks,
329            create a VFS inode to manage the pages/buffers holding them. */
330         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
331                 gl->gl_aspace = gfs2_aspace_get(sdp);
332                 if (!gl->gl_aspace) {
333                         error = -ENOMEM;
334                         goto fail;
335                 }
336         }
337
338         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
339         if (error)
340                 goto fail_aspace;
341
342         write_lock(gl_lock_addr(hash));
343         tmp = search_bucket(hash, sdp, &name);
344         if (tmp) {
345                 write_unlock(gl_lock_addr(hash));
346                 glock_free(gl);
347                 gl = tmp;
348         } else {
349                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
350                 write_unlock(gl_lock_addr(hash));
351         }
352
353         *glp = gl;
354
355         return 0;
356
357 fail_aspace:
358         if (gl->gl_aspace)
359                 gfs2_aspace_put(gl->gl_aspace);
360 fail:
361         kmem_cache_free(gfs2_glock_cachep, gl);
362         return error;
363 }
364
365 /**
366  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
367  * @gl: the glock
368  * @state: the state we're requesting
369  * @flags: the modifier flags
370  * @gh: the holder structure
371  *
372  */
373
374 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
375                       struct gfs2_holder *gh)
376 {
377         INIT_LIST_HEAD(&gh->gh_list);
378         gh->gh_gl = gl;
379         gh->gh_ip = (unsigned long)__builtin_return_address(0);
380         gh->gh_owner_pid = current->pid;
381         gh->gh_state = state;
382         gh->gh_flags = flags;
383         gh->gh_error = 0;
384         gh->gh_iflags = 0;
385         gfs2_glock_hold(gl);
386 }
387
388 /**
389  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
390  * @state: the state we're requesting
391  * @flags: the modifier flags
392  * @gh: the holder structure
393  *
394  * Don't mess with the glock.
395  *
396  */
397
398 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
399 {
400         gh->gh_state = state;
401         gh->gh_flags = flags;
402         gh->gh_iflags = 0;
403         gh->gh_ip = (unsigned long)__builtin_return_address(0);
404 }
405
406 /**
407  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
408  * @gh: the holder structure
409  *
410  */
411
412 void gfs2_holder_uninit(struct gfs2_holder *gh)
413 {
414         gfs2_glock_put(gh->gh_gl);
415         gh->gh_gl = NULL;
416         gh->gh_ip = 0;
417 }
418
419 static void gfs2_holder_wake(struct gfs2_holder *gh)
420 {
421         clear_bit(HIF_WAIT, &gh->gh_iflags);
422         smp_mb__after_clear_bit();
423         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
424 }
425
426 static int just_schedule(void *word)
427 {
428         schedule();
429         return 0;
430 }
431
432 static void wait_on_holder(struct gfs2_holder *gh)
433 {
434         might_sleep();
435         wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
436 }
437
438 static void gfs2_demote_wake(struct gfs2_glock *gl)
439 {
440         clear_bit(GLF_DEMOTE, &gl->gl_flags);
441         smp_mb__after_clear_bit();
442         wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
443 }
444
445 static void wait_on_demote(struct gfs2_glock *gl)
446 {
447         might_sleep();
448         wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
449 }
450
451 /**
452  * rq_mutex - process a mutex request in the queue
453  * @gh: the glock holder
454  *
455  * Returns: 1 if the queue is blocked
456  */
457
458 static int rq_mutex(struct gfs2_holder *gh)
459 {
460         struct gfs2_glock *gl = gh->gh_gl;
461
462         list_del_init(&gh->gh_list);
463         /*  gh->gh_error never examined.  */
464         set_bit(GLF_LOCK, &gl->gl_flags);
465         clear_bit(HIF_WAIT, &gh->gh_iflags);
466         smp_mb();
467         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
468
469         return 1;
470 }
471
472 /**
473  * rq_promote - process a promote request in the queue
474  * @gh: the glock holder
475  *
476  * Acquire a new inter-node lock, or change a lock state to more restrictive.
477  *
478  * Returns: 1 if the queue is blocked
479  */
480
481 static int rq_promote(struct gfs2_holder *gh)
482 {
483         struct gfs2_glock *gl = gh->gh_gl;
484         struct gfs2_sbd *sdp = gl->gl_sbd;
485
486         if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
487                 if (list_empty(&gl->gl_holders)) {
488                         gl->gl_req_gh = gh;
489                         set_bit(GLF_LOCK, &gl->gl_flags);
490                         spin_unlock(&gl->gl_spin);
491
492                         if (atomic_read(&sdp->sd_reclaim_count) >
493                             gfs2_tune_get(sdp, gt_reclaim_limit) &&
494                             !(gh->gh_flags & LM_FLAG_PRIORITY)) {
495                                 gfs2_reclaim_glock(sdp);
496                                 gfs2_reclaim_glock(sdp);
497                         }
498
499                         gfs2_glock_xmote_th(gh->gh_gl, gh);
500                         spin_lock(&gl->gl_spin);
501                 }
502                 return 1;
503         }
504
505         if (list_empty(&gl->gl_holders)) {
506                 set_bit(HIF_FIRST, &gh->gh_iflags);
507                 set_bit(GLF_LOCK, &gl->gl_flags);
508         } else {
509                 struct gfs2_holder *next_gh;
510                 if (gh->gh_state == LM_ST_EXCLUSIVE)
511                         return 1;
512                 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
513                                      gh_list);
514                 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
515                          return 1;
516         }
517
518         list_move_tail(&gh->gh_list, &gl->gl_holders);
519         gh->gh_error = 0;
520         set_bit(HIF_HOLDER, &gh->gh_iflags);
521
522         gfs2_holder_wake(gh);
523
524         return 0;
525 }
526
527 /**
528  * rq_demote - process a demote request in the queue
529  * @gh: the glock holder
530  *
531  * Returns: 1 if the queue is blocked
532  */
533
534 static int rq_demote(struct gfs2_glock *gl)
535 {
536         if (!list_empty(&gl->gl_holders))
537                 return 1;
538
539         if (gl->gl_state == gl->gl_demote_state ||
540             gl->gl_state == LM_ST_UNLOCKED) {
541                 gfs2_demote_wake(gl);
542                 return 0;
543         }
544         set_bit(GLF_LOCK, &gl->gl_flags);
545         if (gl->gl_demote_state == LM_ST_UNLOCKED ||
546             gl->gl_state != LM_ST_EXCLUSIVE) {
547                 spin_unlock(&gl->gl_spin);
548                 gfs2_glock_drop_th(gl);
549         } else {
550                 spin_unlock(&gl->gl_spin);
551                 gfs2_glock_xmote_th(gl, NULL);
552         }
553         spin_lock(&gl->gl_spin);
554
555         return 0;
556 }
557
558 /**
559  * run_queue - process holder structures on a glock
560  * @gl: the glock
561  *
562  */
563 static void run_queue(struct gfs2_glock *gl)
564 {
565         struct gfs2_holder *gh;
566         int blocked = 1;
567
568         for (;;) {
569                 if (test_bit(GLF_LOCK, &gl->gl_flags))
570                         break;
571
572                 if (!list_empty(&gl->gl_waiters1)) {
573                         gh = list_entry(gl->gl_waiters1.next,
574                                         struct gfs2_holder, gh_list);
575
576                         if (test_bit(HIF_MUTEX, &gh->gh_iflags))
577                                 blocked = rq_mutex(gh);
578                         else
579                                 gfs2_assert_warn(gl->gl_sbd, 0);
580
581                 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
582                         blocked = rq_demote(gl);
583                 } else if (!list_empty(&gl->gl_waiters3)) {
584                         gh = list_entry(gl->gl_waiters3.next,
585                                         struct gfs2_holder, gh_list);
586
587                         if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
588                                 blocked = rq_promote(gh);
589                         else
590                                 gfs2_assert_warn(gl->gl_sbd, 0);
591
592                 } else
593                         break;
594
595                 if (blocked)
596                         break;
597         }
598 }
599
600 /**
601  * gfs2_glmutex_lock - acquire a local lock on a glock
602  * @gl: the glock
603  *
604  * Gives caller exclusive access to manipulate a glock structure.
605  */
606
607 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
608 {
609         struct gfs2_holder gh;
610
611         gfs2_holder_init(gl, 0, 0, &gh);
612         set_bit(HIF_MUTEX, &gh.gh_iflags);
613         if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
614                 BUG();
615
616         spin_lock(&gl->gl_spin);
617         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
618                 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
619         } else {
620                 gl->gl_owner_pid = current->pid;
621                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
622                 clear_bit(HIF_WAIT, &gh.gh_iflags);
623                 smp_mb();
624                 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
625         }
626         spin_unlock(&gl->gl_spin);
627
628         wait_on_holder(&gh);
629         gfs2_holder_uninit(&gh);
630 }
631
632 /**
633  * gfs2_glmutex_trylock - try to acquire a local lock on a glock
634  * @gl: the glock
635  *
636  * Returns: 1 if the glock is acquired
637  */
638
639 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
640 {
641         int acquired = 1;
642
643         spin_lock(&gl->gl_spin);
644         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
645                 acquired = 0;
646         } else {
647                 gl->gl_owner_pid = current->pid;
648                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
649         }
650         spin_unlock(&gl->gl_spin);
651
652         return acquired;
653 }
654
655 /**
656  * gfs2_glmutex_unlock - release a local lock on a glock
657  * @gl: the glock
658  *
659  */
660
661 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
662 {
663         spin_lock(&gl->gl_spin);
664         clear_bit(GLF_LOCK, &gl->gl_flags);
665         gl->gl_owner_pid = 0;
666         gl->gl_ip = 0;
667         run_queue(gl);
668         BUG_ON(!spin_is_locked(&gl->gl_spin));
669         spin_unlock(&gl->gl_spin);
670 }
671
672 /**
673  * handle_callback - process a demote request
674  * @gl: the glock
675  * @state: the state the caller wants us to change to
676  *
677  * There are only two requests that we are going to see in actual
678  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
679  */
680
681 static void handle_callback(struct gfs2_glock *gl, unsigned int state, int remote)
682 {
683         spin_lock(&gl->gl_spin);
684         if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) {
685                 gl->gl_demote_state = state;
686                 gl->gl_demote_time = jiffies;
687                 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
688                     gl->gl_object) {
689                         struct inode *inode = igrab(gl->gl_object);
690                         spin_unlock(&gl->gl_spin);
691                         if (inode) {
692                                 d_prune_aliases(inode);
693                                 iput(inode);
694                         }
695                         return;
696                 }
697         } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
698                         gl->gl_demote_state != state) {
699                 gl->gl_demote_state = LM_ST_UNLOCKED;
700         }
701         spin_unlock(&gl->gl_spin);
702 }
703
704 /**
705  * state_change - record that the glock is now in a different state
706  * @gl: the glock
707  * @new_state the new state
708  *
709  */
710
711 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
712 {
713         int held1, held2;
714
715         held1 = (gl->gl_state != LM_ST_UNLOCKED);
716         held2 = (new_state != LM_ST_UNLOCKED);
717
718         if (held1 != held2) {
719                 if (held2)
720                         gfs2_glock_hold(gl);
721                 else
722                         gfs2_glock_put(gl);
723         }
724
725         gl->gl_state = new_state;
726 }
727
728 /**
729  * xmote_bh - Called after the lock module is done acquiring a lock
730  * @gl: The glock in question
731  * @ret: the int returned from the lock module
732  *
733  */
734
735 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
736 {
737         struct gfs2_sbd *sdp = gl->gl_sbd;
738         const struct gfs2_glock_operations *glops = gl->gl_ops;
739         struct gfs2_holder *gh = gl->gl_req_gh;
740         int prev_state = gl->gl_state;
741         int op_done = 1;
742
743         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
744         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
745         gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
746
747         state_change(gl, ret & LM_OUT_ST_MASK);
748
749         if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
750                 if (glops->go_inval)
751                         glops->go_inval(gl, DIO_METADATA);
752         } else if (gl->gl_state == LM_ST_DEFERRED) {
753                 /* We might not want to do this here.
754                    Look at moving to the inode glops. */
755                 if (glops->go_inval)
756                         glops->go_inval(gl, 0);
757         }
758
759         /*  Deal with each possible exit condition  */
760
761         if (!gh) {
762                 gl->gl_stamp = jiffies;
763                 if (ret & LM_OUT_CANCELED) {
764                         op_done = 0;
765                 } else {
766                         spin_lock(&gl->gl_spin);
767                         if (gl->gl_state != gl->gl_demote_state) {
768                                 gl->gl_req_bh = NULL;
769                                 spin_unlock(&gl->gl_spin);
770                                 gfs2_glock_drop_th(gl);
771                                 gfs2_glock_put(gl);
772                                 return;
773                         }
774                         gfs2_demote_wake(gl);
775                         spin_unlock(&gl->gl_spin);
776                 }
777         } else {
778                 spin_lock(&gl->gl_spin);
779                 list_del_init(&gh->gh_list);
780                 gh->gh_error = -EIO;
781                 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 
782                         goto out;
783                 gh->gh_error = GLR_CANCELED;
784                 if (ret & LM_OUT_CANCELED) 
785                         goto out;
786                 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
787                         list_add_tail(&gh->gh_list, &gl->gl_holders);
788                         gh->gh_error = 0;
789                         set_bit(HIF_HOLDER, &gh->gh_iflags);
790                         set_bit(HIF_FIRST, &gh->gh_iflags);
791                         op_done = 0;
792                         goto out;
793                 }
794                 gh->gh_error = GLR_TRYFAILED;
795                 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
796                         goto out;
797                 gh->gh_error = -EINVAL;
798                 if (gfs2_assert_withdraw(sdp, 0) == -1)
799                         fs_err(sdp, "ret = 0x%.8X\n", ret);
800 out:
801                 spin_unlock(&gl->gl_spin);
802         }
803
804         if (glops->go_xmote_bh)
805                 glops->go_xmote_bh(gl);
806
807         if (op_done) {
808                 spin_lock(&gl->gl_spin);
809                 gl->gl_req_gh = NULL;
810                 gl->gl_req_bh = NULL;
811                 clear_bit(GLF_LOCK, &gl->gl_flags);
812                 run_queue(gl);
813                 spin_unlock(&gl->gl_spin);
814         }
815
816         gfs2_glock_put(gl);
817
818         if (gh)
819                 gfs2_holder_wake(gh);
820 }
821
822 /**
823  * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
824  * @gl: The glock in question
825  * @state: the requested state
826  * @flags: modifier flags to the lock call
827  *
828  */
829
830 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
831 {
832         struct gfs2_sbd *sdp = gl->gl_sbd;
833         int flags = gh ? gh->gh_flags : 0;
834         unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
835         const struct gfs2_glock_operations *glops = gl->gl_ops;
836         int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
837                                  LM_FLAG_NOEXP | LM_FLAG_ANY |
838                                  LM_FLAG_PRIORITY);
839         unsigned int lck_ret;
840
841         if (glops->go_xmote_th)
842                 glops->go_xmote_th(gl);
843
844         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
845         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
846         gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
847         gfs2_assert_warn(sdp, state != gl->gl_state);
848
849         gfs2_glock_hold(gl);
850         gl->gl_req_bh = xmote_bh;
851
852         lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
853
854         if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
855                 return;
856
857         if (lck_ret & LM_OUT_ASYNC)
858                 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
859         else
860                 xmote_bh(gl, lck_ret);
861 }
862
863 /**
864  * drop_bh - Called after a lock module unlock completes
865  * @gl: the glock
866  * @ret: the return status
867  *
868  * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
869  * Doesn't drop the reference on the glock the top half took out
870  *
871  */
872
873 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
874 {
875         struct gfs2_sbd *sdp = gl->gl_sbd;
876         const struct gfs2_glock_operations *glops = gl->gl_ops;
877         struct gfs2_holder *gh = gl->gl_req_gh;
878
879         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
880         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
881         gfs2_assert_warn(sdp, !ret);
882
883         state_change(gl, LM_ST_UNLOCKED);
884         gfs2_demote_wake(gl);
885
886         if (glops->go_inval)
887                 glops->go_inval(gl, DIO_METADATA);
888
889         if (gh) {
890                 spin_lock(&gl->gl_spin);
891                 list_del_init(&gh->gh_list);
892                 gh->gh_error = 0;
893                 spin_unlock(&gl->gl_spin);
894         }
895
896         spin_lock(&gl->gl_spin);
897         gl->gl_req_gh = NULL;
898         gl->gl_req_bh = NULL;
899         clear_bit(GLF_LOCK, &gl->gl_flags);
900         run_queue(gl);
901         spin_unlock(&gl->gl_spin);
902
903         gfs2_glock_put(gl);
904
905         if (gh)
906                 gfs2_holder_wake(gh);
907 }
908
909 /**
910  * gfs2_glock_drop_th - call into the lock module to unlock a lock
911  * @gl: the glock
912  *
913  */
914
915 static void gfs2_glock_drop_th(struct gfs2_glock *gl)
916 {
917         struct gfs2_sbd *sdp = gl->gl_sbd;
918         const struct gfs2_glock_operations *glops = gl->gl_ops;
919         unsigned int ret;
920
921         if (glops->go_drop_th)
922                 glops->go_drop_th(gl);
923
924         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
925         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
926         gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
927
928         gfs2_glock_hold(gl);
929         gl->gl_req_bh = drop_bh;
930
931         ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
932
933         if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
934                 return;
935
936         if (!ret)
937                 drop_bh(gl, ret);
938         else
939                 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
940 }
941
942 /**
943  * do_cancels - cancel requests for locks stuck waiting on an expire flag
944  * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
945  *
946  * Don't cancel GL_NOCANCEL requests.
947  */
948
949 static void do_cancels(struct gfs2_holder *gh)
950 {
951         struct gfs2_glock *gl = gh->gh_gl;
952
953         spin_lock(&gl->gl_spin);
954
955         while (gl->gl_req_gh != gh &&
956                !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
957                !list_empty(&gh->gh_list)) {
958                 if (gl->gl_req_bh && !(gl->gl_req_gh &&
959                                      (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
960                         spin_unlock(&gl->gl_spin);
961                         gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
962                         msleep(100);
963                         spin_lock(&gl->gl_spin);
964                 } else {
965                         spin_unlock(&gl->gl_spin);
966                         msleep(100);
967                         spin_lock(&gl->gl_spin);
968                 }
969         }
970
971         spin_unlock(&gl->gl_spin);
972 }
973
974 /**
975  * glock_wait_internal - wait on a glock acquisition
976  * @gh: the glock holder
977  *
978  * Returns: 0 on success
979  */
980
981 static int glock_wait_internal(struct gfs2_holder *gh)
982 {
983         struct gfs2_glock *gl = gh->gh_gl;
984         struct gfs2_sbd *sdp = gl->gl_sbd;
985         const struct gfs2_glock_operations *glops = gl->gl_ops;
986
987         if (test_bit(HIF_ABORTED, &gh->gh_iflags))
988                 return -EIO;
989
990         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
991                 spin_lock(&gl->gl_spin);
992                 if (gl->gl_req_gh != gh &&
993                     !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
994                     !list_empty(&gh->gh_list)) {
995                         list_del_init(&gh->gh_list);
996                         gh->gh_error = GLR_TRYFAILED;
997                         run_queue(gl);
998                         spin_unlock(&gl->gl_spin);
999                         return gh->gh_error;
1000                 }
1001                 spin_unlock(&gl->gl_spin);
1002         }
1003
1004         if (gh->gh_flags & LM_FLAG_PRIORITY)
1005                 do_cancels(gh);
1006
1007         wait_on_holder(gh);
1008         if (gh->gh_error)
1009                 return gh->gh_error;
1010
1011         gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1012         gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1013                                                    gh->gh_flags));
1014
1015         if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1016                 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1017
1018                 if (glops->go_lock) {
1019                         gh->gh_error = glops->go_lock(gh);
1020                         if (gh->gh_error) {
1021                                 spin_lock(&gl->gl_spin);
1022                                 list_del_init(&gh->gh_list);
1023                                 spin_unlock(&gl->gl_spin);
1024                         }
1025                 }
1026
1027                 spin_lock(&gl->gl_spin);
1028                 gl->gl_req_gh = NULL;
1029                 gl->gl_req_bh = NULL;
1030                 clear_bit(GLF_LOCK, &gl->gl_flags);
1031                 run_queue(gl);
1032                 spin_unlock(&gl->gl_spin);
1033         }
1034
1035         return gh->gh_error;
1036 }
1037
1038 static inline struct gfs2_holder *
1039 find_holder_by_owner(struct list_head *head, pid_t pid)
1040 {
1041         struct gfs2_holder *gh;
1042
1043         list_for_each_entry(gh, head, gh_list) {
1044                 if (gh->gh_owner_pid == pid)
1045                         return gh;
1046         }
1047
1048         return NULL;
1049 }
1050
1051 static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1052 {
1053         va_list args;
1054
1055         va_start(args, fmt);
1056         if (gi) {
1057                 vsprintf(gi->string, fmt, args);
1058                 seq_printf(gi->seq, gi->string);
1059         }
1060         else
1061                 vprintk(fmt, args);
1062         va_end(args);
1063 }
1064
1065 /**
1066  * add_to_queue - Add a holder to the wait queue (but look for recursion)
1067  * @gh: the holder structure to add
1068  *
1069  */
1070
1071 static void add_to_queue(struct gfs2_holder *gh)
1072 {
1073         struct gfs2_glock *gl = gh->gh_gl;
1074         struct gfs2_holder *existing;
1075
1076         BUG_ON(!gh->gh_owner_pid);
1077         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1078                 BUG();
1079
1080         existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner_pid);
1081         if (existing) {
1082                 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1083                 printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
1084                 printk(KERN_INFO "lock type : %d lock state : %d\n",
1085                                 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
1086                 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1087                 printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
1088                 printk(KERN_INFO "lock type : %d lock state : %d\n",
1089                                 gl->gl_name.ln_type, gl->gl_state);
1090                 BUG();
1091         }
1092
1093         existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner_pid);
1094         if (existing) {
1095                 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1096                 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1097                 BUG();
1098         }
1099
1100         if (gh->gh_flags & LM_FLAG_PRIORITY)
1101                 list_add(&gh->gh_list, &gl->gl_waiters3);
1102         else
1103                 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1104 }
1105
1106 /**
1107  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1108  * @gh: the holder structure
1109  *
1110  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1111  *
1112  * Returns: 0, GLR_TRYFAILED, or errno on failure
1113  */
1114
1115 int gfs2_glock_nq(struct gfs2_holder *gh)
1116 {
1117         struct gfs2_glock *gl = gh->gh_gl;
1118         struct gfs2_sbd *sdp = gl->gl_sbd;
1119         int error = 0;
1120
1121 restart:
1122         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1123                 set_bit(HIF_ABORTED, &gh->gh_iflags);
1124                 return -EIO;
1125         }
1126
1127         set_bit(HIF_PROMOTE, &gh->gh_iflags);
1128
1129         spin_lock(&gl->gl_spin);
1130         add_to_queue(gh);
1131         run_queue(gl);
1132         spin_unlock(&gl->gl_spin);
1133
1134         if (!(gh->gh_flags & GL_ASYNC)) {
1135                 error = glock_wait_internal(gh);
1136                 if (error == GLR_CANCELED) {
1137                         msleep(100);
1138                         goto restart;
1139                 }
1140         }
1141
1142         return error;
1143 }
1144
1145 /**
1146  * gfs2_glock_poll - poll to see if an async request has been completed
1147  * @gh: the holder
1148  *
1149  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1150  */
1151
1152 int gfs2_glock_poll(struct gfs2_holder *gh)
1153 {
1154         struct gfs2_glock *gl = gh->gh_gl;
1155         int ready = 0;
1156
1157         spin_lock(&gl->gl_spin);
1158
1159         if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1160                 ready = 1;
1161         else if (list_empty(&gh->gh_list)) {
1162                 if (gh->gh_error == GLR_CANCELED) {
1163                         spin_unlock(&gl->gl_spin);
1164                         msleep(100);
1165                         if (gfs2_glock_nq(gh))
1166                                 return 1;
1167                         return 0;
1168                 } else
1169                         ready = 1;
1170         }
1171
1172         spin_unlock(&gl->gl_spin);
1173
1174         return ready;
1175 }
1176
1177 /**
1178  * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1179  * @gh: the holder structure
1180  *
1181  * Returns: 0, GLR_TRYFAILED, or errno on failure
1182  */
1183
1184 int gfs2_glock_wait(struct gfs2_holder *gh)
1185 {
1186         int error;
1187
1188         error = glock_wait_internal(gh);
1189         if (error == GLR_CANCELED) {
1190                 msleep(100);
1191                 gh->gh_flags &= ~GL_ASYNC;
1192                 error = gfs2_glock_nq(gh);
1193         }
1194
1195         return error;
1196 }
1197
1198 /**
1199  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1200  * @gh: the glock holder
1201  *
1202  */
1203
1204 void gfs2_glock_dq(struct gfs2_holder *gh)
1205 {
1206         struct gfs2_glock *gl = gh->gh_gl;
1207         const struct gfs2_glock_operations *glops = gl->gl_ops;
1208
1209         if (gh->gh_flags & GL_NOCACHE)
1210                 handle_callback(gl, LM_ST_UNLOCKED, 0);
1211
1212         gfs2_glmutex_lock(gl);
1213
1214         spin_lock(&gl->gl_spin);
1215         list_del_init(&gh->gh_list);
1216
1217         if (list_empty(&gl->gl_holders)) {
1218                 spin_unlock(&gl->gl_spin);
1219
1220                 if (glops->go_unlock)
1221                         glops->go_unlock(gh);
1222
1223                 spin_lock(&gl->gl_spin);
1224                 gl->gl_stamp = jiffies;
1225         }
1226
1227         clear_bit(GLF_LOCK, &gl->gl_flags);
1228         run_queue(gl);
1229         spin_unlock(&gl->gl_spin);
1230 }
1231
1232 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1233 {
1234         struct gfs2_glock *gl = gh->gh_gl;
1235         gfs2_glock_dq(gh);
1236         wait_on_demote(gl);
1237 }
1238
1239 /**
1240  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1241  * @gh: the holder structure
1242  *
1243  */
1244
1245 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1246 {
1247         gfs2_glock_dq(gh);
1248         gfs2_holder_uninit(gh);
1249 }
1250
1251 /**
1252  * gfs2_glock_nq_num - acquire a glock based on lock number
1253  * @sdp: the filesystem
1254  * @number: the lock number
1255  * @glops: the glock operations for the type of glock
1256  * @state: the state to acquire the glock in
1257  * @flags: modifier flags for the aquisition
1258  * @gh: the struct gfs2_holder
1259  *
1260  * Returns: errno
1261  */
1262
1263 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1264                       const struct gfs2_glock_operations *glops,
1265                       unsigned int state, int flags, struct gfs2_holder *gh)
1266 {
1267         struct gfs2_glock *gl;
1268         int error;
1269
1270         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1271         if (!error) {
1272                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1273                 gfs2_glock_put(gl);
1274         }
1275
1276         return error;
1277 }
1278
1279 /**
1280  * glock_compare - Compare two struct gfs2_glock structures for sorting
1281  * @arg_a: the first structure
1282  * @arg_b: the second structure
1283  *
1284  */
1285
1286 static int glock_compare(const void *arg_a, const void *arg_b)
1287 {
1288         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1289         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1290         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1291         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1292
1293         if (a->ln_number > b->ln_number)
1294                 return 1;
1295         if (a->ln_number < b->ln_number)
1296                 return -1;
1297         BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1298         return 0;
1299 }
1300
1301 /**
1302  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1303  * @num_gh: the number of structures
1304  * @ghs: an array of struct gfs2_holder structures
1305  *
1306  * Returns: 0 on success (all glocks acquired),
1307  *          errno on failure (no glocks acquired)
1308  */
1309
1310 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1311                      struct gfs2_holder **p)
1312 {
1313         unsigned int x;
1314         int error = 0;
1315
1316         for (x = 0; x < num_gh; x++)
1317                 p[x] = &ghs[x];
1318
1319         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1320
1321         for (x = 0; x < num_gh; x++) {
1322                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1323
1324                 error = gfs2_glock_nq(p[x]);
1325                 if (error) {
1326                         while (x--)
1327                                 gfs2_glock_dq(p[x]);
1328                         break;
1329                 }
1330         }
1331
1332         return error;
1333 }
1334
1335 /**
1336  * gfs2_glock_nq_m - acquire multiple glocks
1337  * @num_gh: the number of structures
1338  * @ghs: an array of struct gfs2_holder structures
1339  *
1340  *
1341  * Returns: 0 on success (all glocks acquired),
1342  *          errno on failure (no glocks acquired)
1343  */
1344
1345 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1346 {
1347         struct gfs2_holder *tmp[4];
1348         struct gfs2_holder **pph = tmp;
1349         int error = 0;
1350
1351         switch(num_gh) {
1352         case 0:
1353                 return 0;
1354         case 1:
1355                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1356                 return gfs2_glock_nq(ghs);
1357         default:
1358                 if (num_gh <= 4)
1359                         break;
1360                 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1361                 if (!pph)
1362                         return -ENOMEM;
1363         }
1364
1365         error = nq_m_sync(num_gh, ghs, pph);
1366
1367         if (pph != tmp)
1368                 kfree(pph);
1369
1370         return error;
1371 }
1372
1373 /**
1374  * gfs2_glock_dq_m - release multiple glocks
1375  * @num_gh: the number of structures
1376  * @ghs: an array of struct gfs2_holder structures
1377  *
1378  */
1379
1380 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1381 {
1382         unsigned int x;
1383
1384         for (x = 0; x < num_gh; x++)
1385                 gfs2_glock_dq(&ghs[x]);
1386 }
1387
1388 /**
1389  * gfs2_glock_dq_uninit_m - release multiple glocks
1390  * @num_gh: the number of structures
1391  * @ghs: an array of struct gfs2_holder structures
1392  *
1393  */
1394
1395 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1396 {
1397         unsigned int x;
1398
1399         for (x = 0; x < num_gh; x++)
1400                 gfs2_glock_dq_uninit(&ghs[x]);
1401 }
1402
1403 /**
1404  * gfs2_lvb_hold - attach a LVB from a glock
1405  * @gl: The glock in question
1406  *
1407  */
1408
1409 int gfs2_lvb_hold(struct gfs2_glock *gl)
1410 {
1411         int error;
1412
1413         gfs2_glmutex_lock(gl);
1414
1415         if (!atomic_read(&gl->gl_lvb_count)) {
1416                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1417                 if (error) {
1418                         gfs2_glmutex_unlock(gl);
1419                         return error;
1420                 }
1421                 gfs2_glock_hold(gl);
1422         }
1423         atomic_inc(&gl->gl_lvb_count);
1424
1425         gfs2_glmutex_unlock(gl);
1426
1427         return 0;
1428 }
1429
1430 /**
1431  * gfs2_lvb_unhold - detach a LVB from a glock
1432  * @gl: The glock in question
1433  *
1434  */
1435
1436 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1437 {
1438         gfs2_glock_hold(gl);
1439         gfs2_glmutex_lock(gl);
1440
1441         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1442         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1443                 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1444                 gl->gl_lvb = NULL;
1445                 gfs2_glock_put(gl);
1446         }
1447
1448         gfs2_glmutex_unlock(gl);
1449         gfs2_glock_put(gl);
1450 }
1451
1452 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1453                         unsigned int state)
1454 {
1455         struct gfs2_glock *gl;
1456
1457         gl = gfs2_glock_find(sdp, name);
1458         if (!gl)
1459                 return;
1460
1461         handle_callback(gl, state, 1);
1462
1463         spin_lock(&gl->gl_spin);
1464         run_queue(gl);
1465         spin_unlock(&gl->gl_spin);
1466
1467         gfs2_glock_put(gl);
1468 }
1469
1470 /**
1471  * gfs2_glock_cb - Callback used by locking module
1472  * @sdp: Pointer to the superblock
1473  * @type: Type of callback
1474  * @data: Type dependent data pointer
1475  *
1476  * Called by the locking module when it wants to tell us something.
1477  * Either we need to drop a lock, one of our ASYNC requests completed, or
1478  * a journal from another client needs to be recovered.
1479  */
1480
1481 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1482 {
1483         struct gfs2_sbd *sdp = cb_data;
1484
1485         switch (type) {
1486         case LM_CB_NEED_E:
1487                 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1488                 return;
1489
1490         case LM_CB_NEED_D:
1491                 blocking_cb(sdp, data, LM_ST_DEFERRED);
1492                 return;
1493
1494         case LM_CB_NEED_S:
1495                 blocking_cb(sdp, data, LM_ST_SHARED);
1496                 return;
1497
1498         case LM_CB_ASYNC: {
1499                 struct lm_async_cb *async = data;
1500                 struct gfs2_glock *gl;
1501
1502                 down_read(&gfs2_umount_flush_sem);
1503                 gl = gfs2_glock_find(sdp, &async->lc_name);
1504                 if (gfs2_assert_warn(sdp, gl))
1505                         return;
1506                 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1507                         gl->gl_req_bh(gl, async->lc_ret);
1508                 gfs2_glock_put(gl);
1509                 up_read(&gfs2_umount_flush_sem);
1510                 return;
1511         }
1512
1513         case LM_CB_NEED_RECOVERY:
1514                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1515                 if (sdp->sd_recoverd_process)
1516                         wake_up_process(sdp->sd_recoverd_process);
1517                 return;
1518
1519         case LM_CB_DROPLOCKS:
1520                 gfs2_gl_hash_clear(sdp, NO_WAIT);
1521                 gfs2_quota_scan(sdp);
1522                 return;
1523
1524         default:
1525                 gfs2_assert_warn(sdp, 0);
1526                 return;
1527         }
1528 }
1529
1530 /**
1531  * demote_ok - Check to see if it's ok to unlock a glock
1532  * @gl: the glock
1533  *
1534  * Returns: 1 if it's ok
1535  */
1536
1537 static int demote_ok(struct gfs2_glock *gl)
1538 {
1539         const struct gfs2_glock_operations *glops = gl->gl_ops;
1540         int demote = 1;
1541
1542         if (test_bit(GLF_STICKY, &gl->gl_flags))
1543                 demote = 0;
1544         else if (glops->go_demote_ok)
1545                 demote = glops->go_demote_ok(gl);
1546
1547         return demote;
1548 }
1549
1550 /**
1551  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1552  * @gl: the glock
1553  *
1554  */
1555
1556 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1557 {
1558         struct gfs2_sbd *sdp = gl->gl_sbd;
1559
1560         spin_lock(&sdp->sd_reclaim_lock);
1561         if (list_empty(&gl->gl_reclaim)) {
1562                 gfs2_glock_hold(gl);
1563                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1564                 atomic_inc(&sdp->sd_reclaim_count);
1565         }
1566         spin_unlock(&sdp->sd_reclaim_lock);
1567
1568         wake_up(&sdp->sd_reclaim_wq);
1569 }
1570
1571 /**
1572  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1573  * @sdp: the filesystem
1574  *
1575  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1576  * different glock and we notice that there are a lot of glocks in the
1577  * reclaim list.
1578  *
1579  */
1580
1581 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1582 {
1583         struct gfs2_glock *gl;
1584
1585         spin_lock(&sdp->sd_reclaim_lock);
1586         if (list_empty(&sdp->sd_reclaim_list)) {
1587                 spin_unlock(&sdp->sd_reclaim_lock);
1588                 return;
1589         }
1590         gl = list_entry(sdp->sd_reclaim_list.next,
1591                         struct gfs2_glock, gl_reclaim);
1592         list_del_init(&gl->gl_reclaim);
1593         spin_unlock(&sdp->sd_reclaim_lock);
1594
1595         atomic_dec(&sdp->sd_reclaim_count);
1596         atomic_inc(&sdp->sd_reclaimed);
1597
1598         if (gfs2_glmutex_trylock(gl)) {
1599                 if (list_empty(&gl->gl_holders) &&
1600                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1601                         handle_callback(gl, LM_ST_UNLOCKED, 0);
1602                 gfs2_glmutex_unlock(gl);
1603         }
1604
1605         gfs2_glock_put(gl);
1606 }
1607
1608 /**
1609  * examine_bucket - Call a function for glock in a hash bucket
1610  * @examiner: the function
1611  * @sdp: the filesystem
1612  * @bucket: the bucket
1613  *
1614  * Returns: 1 if the bucket has entries
1615  */
1616
1617 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1618                           unsigned int hash)
1619 {
1620         struct gfs2_glock *gl, *prev = NULL;
1621         int has_entries = 0;
1622         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1623
1624         read_lock(gl_lock_addr(hash));
1625         /* Can't use hlist_for_each_entry - don't want prefetch here */
1626         if (hlist_empty(head))
1627                 goto out;
1628         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1629         while(1) {
1630                 if (gl->gl_sbd == sdp) {
1631                         gfs2_glock_hold(gl);
1632                         read_unlock(gl_lock_addr(hash));
1633                         if (prev)
1634                                 gfs2_glock_put(prev);
1635                         prev = gl;
1636                         examiner(gl);
1637                         has_entries = 1;
1638                         read_lock(gl_lock_addr(hash));
1639                 }
1640                 if (gl->gl_list.next == NULL)
1641                         break;
1642                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1643         }
1644 out:
1645         read_unlock(gl_lock_addr(hash));
1646         if (prev)
1647                 gfs2_glock_put(prev);
1648         return has_entries;
1649 }
1650
1651 /**
1652  * scan_glock - look at a glock and see if we can reclaim it
1653  * @gl: the glock to look at
1654  *
1655  */
1656
1657 static void scan_glock(struct gfs2_glock *gl)
1658 {
1659         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1660                 return;
1661
1662         if (gfs2_glmutex_trylock(gl)) {
1663                 if (list_empty(&gl->gl_holders) &&
1664                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1665                         goto out_schedule;
1666                 gfs2_glmutex_unlock(gl);
1667         }
1668         return;
1669
1670 out_schedule:
1671         gfs2_glmutex_unlock(gl);
1672         gfs2_glock_schedule_for_reclaim(gl);
1673 }
1674
1675 /**
1676  * gfs2_scand_internal - Look for glocks and inodes to toss from memory
1677  * @sdp: the filesystem
1678  *
1679  */
1680
1681 void gfs2_scand_internal(struct gfs2_sbd *sdp)
1682 {
1683         unsigned int x;
1684
1685         for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1686                 examine_bucket(scan_glock, sdp, x);
1687 }
1688
1689 /**
1690  * clear_glock - look at a glock and see if we can free it from glock cache
1691  * @gl: the glock to look at
1692  *
1693  */
1694
1695 static void clear_glock(struct gfs2_glock *gl)
1696 {
1697         struct gfs2_sbd *sdp = gl->gl_sbd;
1698         int released;
1699
1700         spin_lock(&sdp->sd_reclaim_lock);
1701         if (!list_empty(&gl->gl_reclaim)) {
1702                 list_del_init(&gl->gl_reclaim);
1703                 atomic_dec(&sdp->sd_reclaim_count);
1704                 spin_unlock(&sdp->sd_reclaim_lock);
1705                 released = gfs2_glock_put(gl);
1706                 gfs2_assert(sdp, !released);
1707         } else {
1708                 spin_unlock(&sdp->sd_reclaim_lock);
1709         }
1710
1711         if (gfs2_glmutex_trylock(gl)) {
1712                 if (list_empty(&gl->gl_holders) &&
1713                     gl->gl_state != LM_ST_UNLOCKED)
1714                         handle_callback(gl, LM_ST_UNLOCKED, 0);
1715                 gfs2_glmutex_unlock(gl);
1716         }
1717 }
1718
1719 /**
1720  * gfs2_gl_hash_clear - Empty out the glock hash table
1721  * @sdp: the filesystem
1722  * @wait: wait until it's all gone
1723  *
1724  * Called when unmounting the filesystem, or when inter-node lock manager
1725  * requests DROPLOCKS because it is running out of capacity.
1726  */
1727
1728 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1729 {
1730         unsigned long t;
1731         unsigned int x;
1732         int cont;
1733
1734         t = jiffies;
1735
1736         for (;;) {
1737                 cont = 0;
1738                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1739                         if (examine_bucket(clear_glock, sdp, x))
1740                                 cont = 1;
1741                 }
1742
1743                 if (!wait || !cont)
1744                         break;
1745
1746                 if (time_after_eq(jiffies,
1747                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1748                         fs_warn(sdp, "Unmount seems to be stalled. "
1749                                      "Dumping lock state...\n");
1750                         gfs2_dump_lockstate(sdp);
1751                         t = jiffies;
1752                 }
1753
1754                 down_write(&gfs2_umount_flush_sem);
1755                 invalidate_inodes(sdp->sd_vfs);
1756                 up_write(&gfs2_umount_flush_sem);
1757                 msleep(10);
1758         }
1759 }
1760
1761 /*
1762  *  Diagnostic routines to help debug distributed deadlock
1763  */
1764
1765 static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1766                               unsigned long address)
1767 {
1768         char buffer[KSYM_SYMBOL_LEN];
1769
1770         sprint_symbol(buffer, address);
1771         print_dbg(gi, fmt, buffer);
1772 }
1773
1774 /**
1775  * dump_holder - print information about a glock holder
1776  * @str: a string naming the type of holder
1777  * @gh: the glock holder
1778  *
1779  * Returns: 0 on success, -ENOBUFS when we run out of space
1780  */
1781
1782 static int dump_holder(struct glock_iter *gi, char *str,
1783                        struct gfs2_holder *gh)
1784 {
1785         unsigned int x;
1786         struct task_struct *gh_owner;
1787
1788         print_dbg(gi, "  %s\n", str);
1789         if (gh->gh_owner_pid) {
1790                 print_dbg(gi, "    owner = %ld ", (long)gh->gh_owner_pid);
1791                 gh_owner = find_task_by_pid(gh->gh_owner_pid);
1792                 if (gh_owner)
1793                         print_dbg(gi, "(%s)\n", gh_owner->comm);
1794                 else
1795                         print_dbg(gi, "(ended)\n");
1796         } else
1797                 print_dbg(gi, "    owner = -1\n");
1798         print_dbg(gi, "    gh_state = %u\n", gh->gh_state);
1799         print_dbg(gi, "    gh_flags =");
1800         for (x = 0; x < 32; x++)
1801                 if (gh->gh_flags & (1 << x))
1802                         print_dbg(gi, " %u", x);
1803         print_dbg(gi, " \n");
1804         print_dbg(gi, "    error = %d\n", gh->gh_error);
1805         print_dbg(gi, "    gh_iflags =");
1806         for (x = 0; x < 32; x++)
1807                 if (test_bit(x, &gh->gh_iflags))
1808                         print_dbg(gi, " %u", x);
1809         print_dbg(gi, " \n");
1810         gfs2_print_symbol(gi, "    initialized at: %s\n", gh->gh_ip);
1811
1812         return 0;
1813 }
1814
1815 /**
1816  * dump_inode - print information about an inode
1817  * @ip: the inode
1818  *
1819  * Returns: 0 on success, -ENOBUFS when we run out of space
1820  */
1821
1822 static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
1823 {
1824         unsigned int x;
1825
1826         print_dbg(gi, "  Inode:\n");
1827         print_dbg(gi, "    num = %llu/%llu\n",
1828                   (unsigned long long)ip->i_no_formal_ino,
1829                   (unsigned long long)ip->i_no_addr);
1830         print_dbg(gi, "    type = %u\n", IF2DT(ip->i_inode.i_mode));
1831         print_dbg(gi, "    i_flags =");
1832         for (x = 0; x < 32; x++)
1833                 if (test_bit(x, &ip->i_flags))
1834                         print_dbg(gi, " %u", x);
1835         print_dbg(gi, " \n");
1836         return 0;
1837 }
1838
1839 /**
1840  * dump_glock - print information about a glock
1841  * @gl: the glock
1842  * @count: where we are in the buffer
1843  *
1844  * Returns: 0 on success, -ENOBUFS when we run out of space
1845  */
1846
1847 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1848 {
1849         struct gfs2_holder *gh;
1850         unsigned int x;
1851         int error = -ENOBUFS;
1852         struct task_struct *gl_owner;
1853
1854         spin_lock(&gl->gl_spin);
1855
1856         print_dbg(gi, "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
1857                    (unsigned long long)gl->gl_name.ln_number);
1858         print_dbg(gi, "  gl_flags =");
1859         for (x = 0; x < 32; x++) {
1860                 if (test_bit(x, &gl->gl_flags))
1861                         print_dbg(gi, " %u", x);
1862         }
1863         if (!test_bit(GLF_LOCK, &gl->gl_flags))
1864                 print_dbg(gi, " (unlocked)");
1865         print_dbg(gi, " \n");
1866         print_dbg(gi, "  gl_ref = %d\n", atomic_read(&gl->gl_ref));
1867         print_dbg(gi, "  gl_state = %u\n", gl->gl_state);
1868         if (gl->gl_owner_pid) {
1869                 gl_owner = find_task_by_pid(gl->gl_owner_pid);
1870                 if (gl_owner)
1871                         print_dbg(gi, "  gl_owner = pid %d (%s)\n",
1872                                   gl->gl_owner_pid, gl_owner->comm);
1873                 else
1874                         print_dbg(gi, "  gl_owner = %d (ended)\n",
1875                                   gl->gl_owner_pid);
1876         } else
1877                 print_dbg(gi, "  gl_owner = -1\n");
1878         print_dbg(gi, "  gl_ip = %lu\n", gl->gl_ip);
1879         print_dbg(gi, "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1880         print_dbg(gi, "  req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1881         print_dbg(gi, "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1882         print_dbg(gi, "  object = %s\n", (gl->gl_object) ? "yes" : "no");
1883         print_dbg(gi, "  le = %s\n",
1884                    (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
1885         print_dbg(gi, "  reclaim = %s\n",
1886                    (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1887         if (gl->gl_aspace)
1888                 print_dbg(gi, "  aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1889                            gl->gl_aspace->i_mapping->nrpages);
1890         else
1891                 print_dbg(gi, "  aspace = no\n");
1892         print_dbg(gi, "  ail = %d\n", atomic_read(&gl->gl_ail_count));
1893         if (gl->gl_req_gh) {
1894                 error = dump_holder(gi, "Request", gl->gl_req_gh);
1895                 if (error)
1896                         goto out;
1897         }
1898         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1899                 error = dump_holder(gi, "Holder", gh);
1900                 if (error)
1901                         goto out;
1902         }
1903         list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1904                 error = dump_holder(gi, "Waiter1", gh);
1905                 if (error)
1906                         goto out;
1907         }
1908         list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1909                 error = dump_holder(gi, "Waiter3", gh);
1910                 if (error)
1911                         goto out;
1912         }
1913         if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1914                 print_dbg(gi, "  Demotion req to state %u (%llu uS ago)\n",
1915                           gl->gl_demote_state, (unsigned long long)
1916                           (jiffies - gl->gl_demote_time)*(1000000/HZ));
1917         }
1918         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1919                 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1920                         list_empty(&gl->gl_holders)) {
1921                         error = dump_inode(gi, gl->gl_object);
1922                         if (error)
1923                                 goto out;
1924                 } else {
1925                         error = -ENOBUFS;
1926                         print_dbg(gi, "  Inode: busy\n");
1927                 }
1928         }
1929
1930         error = 0;
1931
1932 out:
1933         spin_unlock(&gl->gl_spin);
1934         return error;
1935 }
1936
1937 /**
1938  * gfs2_dump_lockstate - print out the current lockstate
1939  * @sdp: the filesystem
1940  * @ub: the buffer to copy the information into
1941  *
1942  * If @ub is NULL, dump the lockstate to the console.
1943  *
1944  */
1945
1946 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1947 {
1948         struct gfs2_glock *gl;
1949         struct hlist_node *h;
1950         unsigned int x;
1951         int error = 0;
1952
1953         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1954
1955                 read_lock(gl_lock_addr(x));
1956
1957                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1958                         if (gl->gl_sbd != sdp)
1959                                 continue;
1960
1961                         error = dump_glock(NULL, gl);
1962                         if (error)
1963                                 break;
1964                 }
1965
1966                 read_unlock(gl_lock_addr(x));
1967
1968                 if (error)
1969                         break;
1970         }
1971
1972
1973         return error;
1974 }
1975
1976 int __init gfs2_glock_init(void)
1977 {
1978         unsigned i;
1979         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1980                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
1981         }
1982 #ifdef GL_HASH_LOCK_SZ
1983         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1984                 rwlock_init(&gl_hash_locks[i]);
1985         }
1986 #endif
1987         return 0;
1988 }
1989
1990 static int gfs2_glock_iter_next(struct glock_iter *gi)
1991 {
1992         struct gfs2_glock *gl;
1993
1994         read_lock(gl_lock_addr(gi->hash));
1995         gl = gi->gl;
1996         if (gl) {
1997                 gi->gl = hlist_entry(gl->gl_list.next, struct gfs2_glock,
1998                                      gl_list);
1999                 if (gi->gl)
2000                         gfs2_glock_hold(gi->gl);
2001         }
2002         read_unlock(gl_lock_addr(gi->hash));
2003         if (gl)
2004                 gfs2_glock_put(gl);
2005
2006         while(gi->gl == NULL) {
2007                 gi->hash++;
2008                 if (gi->hash >= GFS2_GL_HASH_SIZE)
2009                         return 1;
2010                 read_lock(gl_lock_addr(gi->hash));
2011                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
2012                                      struct gfs2_glock, gl_list);
2013                 if (gi->gl)
2014                         gfs2_glock_hold(gi->gl);
2015                 read_unlock(gl_lock_addr(gi->hash));
2016         }
2017         return 0;
2018 }
2019
2020 static void gfs2_glock_iter_free(struct glock_iter *gi)
2021 {
2022         if (gi->gl)
2023                 gfs2_glock_put(gi->gl);
2024         kfree(gi);
2025 }
2026
2027 static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2028 {
2029         struct glock_iter *gi;
2030
2031         gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2032         if (!gi)
2033                 return NULL;
2034
2035         gi->sdp = sdp;
2036         gi->hash = 0;
2037         gi->seq = NULL;
2038         memset(gi->string, 0, sizeof(gi->string));
2039
2040         read_lock(gl_lock_addr(gi->hash));
2041         gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
2042                              struct gfs2_glock, gl_list);
2043         if (gi->gl)
2044                 gfs2_glock_hold(gi->gl);
2045         read_unlock(gl_lock_addr(gi->hash));
2046
2047         if (!gi->gl && gfs2_glock_iter_next(gi)) {
2048                 gfs2_glock_iter_free(gi);
2049                 return NULL;
2050         }
2051
2052         return gi;
2053 }
2054
2055 static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2056 {
2057         struct glock_iter *gi;
2058         loff_t n = *pos;
2059
2060         gi = gfs2_glock_iter_init(file->private);
2061         if (!gi)
2062                 return NULL;
2063
2064         while(n--) {
2065                 if (gfs2_glock_iter_next(gi)) {
2066                         gfs2_glock_iter_free(gi);
2067                         return NULL;
2068                 }
2069         }
2070
2071         return gi;
2072 }
2073
2074 static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2075                                  loff_t *pos)
2076 {
2077         struct glock_iter *gi = iter_ptr;
2078
2079         (*pos)++;
2080
2081         if (gfs2_glock_iter_next(gi)) {
2082                 gfs2_glock_iter_free(gi);
2083                 return NULL;
2084         }
2085
2086         return gi;
2087 }
2088
2089 static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2090 {
2091         struct glock_iter *gi = iter_ptr;
2092         if (gi)
2093                 gfs2_glock_iter_free(gi);
2094 }
2095
2096 static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2097 {
2098         struct glock_iter *gi = iter_ptr;
2099
2100         gi->seq = file;
2101         dump_glock(gi, gi->gl);
2102
2103         return 0;
2104 }
2105
2106 static struct seq_operations gfs2_glock_seq_ops = {
2107         .start = gfs2_glock_seq_start,
2108         .next  = gfs2_glock_seq_next,
2109         .stop  = gfs2_glock_seq_stop,
2110         .show  = gfs2_glock_seq_show,
2111 };
2112
2113 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2114 {
2115         struct seq_file *seq;
2116         int ret;
2117
2118         ret = seq_open(file, &gfs2_glock_seq_ops);
2119         if (ret)
2120                 return ret;
2121
2122         seq = file->private_data;
2123         seq->private = inode->i_private;
2124
2125         return 0;
2126 }
2127
2128 static const struct file_operations gfs2_debug_fops = {
2129         .owner   = THIS_MODULE,
2130         .open    = gfs2_debugfs_open,
2131         .read    = seq_read,
2132         .llseek  = seq_lseek,
2133         .release = seq_release
2134 };
2135
2136 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2137 {
2138         sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2139         if (!sdp->debugfs_dir)
2140                 return -ENOMEM;
2141         sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2142                                                          S_IFREG | S_IRUGO,
2143                                                          sdp->debugfs_dir, sdp,
2144                                                          &gfs2_debug_fops);
2145         if (!sdp->debugfs_dentry_glocks)
2146                 return -ENOMEM;
2147
2148         return 0;
2149 }
2150
2151 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2152 {
2153         if (sdp && sdp->debugfs_dir) {
2154                 if (sdp->debugfs_dentry_glocks) {
2155                         debugfs_remove(sdp->debugfs_dentry_glocks);
2156                         sdp->debugfs_dentry_glocks = NULL;
2157                 }
2158                 debugfs_remove(sdp->debugfs_dir);
2159                 sdp->debugfs_dir = NULL;
2160         }
2161 }
2162
2163 int gfs2_register_debugfs(void)
2164 {
2165         gfs2_root = debugfs_create_dir("gfs2", NULL);
2166         return gfs2_root ? 0 : -ENOMEM;
2167 }
2168
2169 void gfs2_unregister_debugfs(void)
2170 {
2171         debugfs_remove(gfs2_root);
2172         gfs2_root = NULL;
2173 }