[GFS2] Replace revoke structure with bufdata structure
[powerpc.git] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <linux/rwsem.h>
25 #include <asm/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28 #include <linux/kthread.h>
29 #include <linux/freezer.h>
30 #include <linux/workqueue.h>
31 #include <linux/jiffies.h>
32
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lm.h"
39 #include "lops.h"
40 #include "meta_io.h"
41 #include "quota.h"
42 #include "super.h"
43 #include "util.h"
44
45 struct gfs2_gl_hash_bucket {
46         struct hlist_head hb_list;
47 };
48
49 struct glock_iter {
50         int hash;                     /* hash bucket index         */
51         struct gfs2_sbd *sdp;         /* incore superblock         */
52         struct gfs2_glock *gl;        /* current glock struct      */
53         struct seq_file *seq;         /* sequence file for debugfs */
54         char string[512];             /* scratch space             */
55 };
56
57 typedef void (*glock_examiner) (struct gfs2_glock * gl);
58
59 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
60 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
61 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
62 static void gfs2_glock_drop_th(struct gfs2_glock *gl);
63 static void run_queue(struct gfs2_glock *gl);
64
65 static DECLARE_RWSEM(gfs2_umount_flush_sem);
66 static struct dentry *gfs2_root;
67 static struct task_struct *scand_process;
68 static unsigned int scand_secs = 5;
69 static struct workqueue_struct *glock_workqueue;
70
71 #define GFS2_GL_HASH_SHIFT      15
72 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
73 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
74
75 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
76 static struct dentry *gfs2_root;
77
78 /*
79  * Despite what you might think, the numbers below are not arbitrary :-)
80  * They are taken from the ipv4 routing hash code, which is well tested
81  * and thus should be nearly optimal. Later on we might tweek the numbers
82  * but for now this should be fine.
83  *
84  * The reason for putting the locks in a separate array from the list heads
85  * is that we can have fewer locks than list heads and save memory. We use
86  * the same hash function for both, but with a different hash mask.
87  */
88 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
89         defined(CONFIG_PROVE_LOCKING)
90
91 #ifdef CONFIG_LOCKDEP
92 # define GL_HASH_LOCK_SZ        256
93 #else
94 # if NR_CPUS >= 32
95 #  define GL_HASH_LOCK_SZ       4096
96 # elif NR_CPUS >= 16
97 #  define GL_HASH_LOCK_SZ       2048
98 # elif NR_CPUS >= 8
99 #  define GL_HASH_LOCK_SZ       1024
100 # elif NR_CPUS >= 4
101 #  define GL_HASH_LOCK_SZ       512
102 # else
103 #  define GL_HASH_LOCK_SZ       256
104 # endif
105 #endif
106
107 /* We never want more locks than chains */
108 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
109 # undef GL_HASH_LOCK_SZ
110 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
111 #endif
112
113 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
114
115 static inline rwlock_t *gl_lock_addr(unsigned int x)
116 {
117         return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
118 }
119 #else /* not SMP, so no spinlocks required */
120 static inline rwlock_t *gl_lock_addr(unsigned int x)
121 {
122         return NULL;
123 }
124 #endif
125
126 /**
127  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
128  * @actual: the current state of the lock
129  * @requested: the lock state that was requested by the caller
130  * @flags: the modifier flags passed in by the caller
131  *
132  * Returns: 1 if the locks are compatible, 0 otherwise
133  */
134
135 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
136                                    int flags)
137 {
138         if (actual == requested)
139                 return 1;
140
141         if (flags & GL_EXACT)
142                 return 0;
143
144         if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
145                 return 1;
146
147         if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
148                 return 1;
149
150         return 0;
151 }
152
153 /**
154  * gl_hash() - Turn glock number into hash bucket number
155  * @lock: The glock number
156  *
157  * Returns: The number of the corresponding hash bucket
158  */
159
160 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
161                             const struct lm_lockname *name)
162 {
163         unsigned int h;
164
165         h = jhash(&name->ln_number, sizeof(u64), 0);
166         h = jhash(&name->ln_type, sizeof(unsigned int), h);
167         h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
168         h &= GFS2_GL_HASH_MASK;
169
170         return h;
171 }
172
173 /**
174  * glock_free() - Perform a few checks and then release struct gfs2_glock
175  * @gl: The glock to release
176  *
177  * Also calls lock module to release its internal structure for this glock.
178  *
179  */
180
181 static void glock_free(struct gfs2_glock *gl)
182 {
183         struct gfs2_sbd *sdp = gl->gl_sbd;
184         struct inode *aspace = gl->gl_aspace;
185
186         gfs2_lm_put_lock(sdp, gl->gl_lock);
187
188         if (aspace)
189                 gfs2_aspace_put(aspace);
190
191         kmem_cache_free(gfs2_glock_cachep, gl);
192 }
193
194 /**
195  * gfs2_glock_hold() - increment reference count on glock
196  * @gl: The glock to hold
197  *
198  */
199
200 void gfs2_glock_hold(struct gfs2_glock *gl)
201 {
202         atomic_inc(&gl->gl_ref);
203 }
204
205 /**
206  * gfs2_glock_put() - Decrement reference count on glock
207  * @gl: The glock to put
208  *
209  */
210
211 int gfs2_glock_put(struct gfs2_glock *gl)
212 {
213         int rv = 0;
214         struct gfs2_sbd *sdp = gl->gl_sbd;
215
216         write_lock(gl_lock_addr(gl->gl_hash));
217         if (atomic_dec_and_test(&gl->gl_ref)) {
218                 hlist_del(&gl->gl_list);
219                 write_unlock(gl_lock_addr(gl->gl_hash));
220                 BUG_ON(spin_is_locked(&gl->gl_spin));
221                 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
222                 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
223                 gfs2_assert(sdp, list_empty(&gl->gl_holders));
224                 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
225                 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
226                 glock_free(gl);
227                 rv = 1;
228                 goto out;
229         }
230         write_unlock(gl_lock_addr(gl->gl_hash));
231 out:
232         return rv;
233 }
234
235 /**
236  * search_bucket() - Find struct gfs2_glock by lock number
237  * @bucket: the bucket to search
238  * @name: The lock name
239  *
240  * Returns: NULL, or the struct gfs2_glock with the requested number
241  */
242
243 static struct gfs2_glock *search_bucket(unsigned int hash,
244                                         const struct gfs2_sbd *sdp,
245                                         const struct lm_lockname *name)
246 {
247         struct gfs2_glock *gl;
248         struct hlist_node *h;
249
250         hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
251                 if (!lm_name_equal(&gl->gl_name, name))
252                         continue;
253                 if (gl->gl_sbd != sdp)
254                         continue;
255
256                 atomic_inc(&gl->gl_ref);
257
258                 return gl;
259         }
260
261         return NULL;
262 }
263
264 /**
265  * gfs2_glock_find() - Find glock by lock number
266  * @sdp: The GFS2 superblock
267  * @name: The lock name
268  *
269  * Returns: NULL, or the struct gfs2_glock with the requested number
270  */
271
272 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
273                                           const struct lm_lockname *name)
274 {
275         unsigned int hash = gl_hash(sdp, name);
276         struct gfs2_glock *gl;
277
278         read_lock(gl_lock_addr(hash));
279         gl = search_bucket(hash, sdp, name);
280         read_unlock(gl_lock_addr(hash));
281
282         return gl;
283 }
284
285 static void glock_work_func(struct work_struct *work)
286 {
287         struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
288
289         spin_lock(&gl->gl_spin);
290         if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags))
291                 set_bit(GLF_DEMOTE, &gl->gl_flags);
292         run_queue(gl);
293         spin_unlock(&gl->gl_spin);
294         gfs2_glock_put(gl);
295 }
296
297 /**
298  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
299  * @sdp: The GFS2 superblock
300  * @number: the lock number
301  * @glops: The glock_operations to use
302  * @create: If 0, don't create the glock if it doesn't exist
303  * @glp: the glock is returned here
304  *
305  * This does not lock a glock, just finds/creates structures for one.
306  *
307  * Returns: errno
308  */
309
310 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
311                    const struct gfs2_glock_operations *glops, int create,
312                    struct gfs2_glock **glp)
313 {
314         struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
315         struct gfs2_glock *gl, *tmp;
316         unsigned int hash = gl_hash(sdp, &name);
317         int error;
318
319         read_lock(gl_lock_addr(hash));
320         gl = search_bucket(hash, sdp, &name);
321         read_unlock(gl_lock_addr(hash));
322
323         if (gl || !create) {
324                 *glp = gl;
325                 return 0;
326         }
327
328         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
329         if (!gl)
330                 return -ENOMEM;
331
332         gl->gl_flags = 0;
333         gl->gl_name = name;
334         atomic_set(&gl->gl_ref, 1);
335         gl->gl_state = LM_ST_UNLOCKED;
336         gl->gl_demote_state = LM_ST_EXCLUSIVE;
337         gl->gl_hash = hash;
338         gl->gl_owner_pid = 0;
339         gl->gl_ip = 0;
340         gl->gl_ops = glops;
341         gl->gl_req_gh = NULL;
342         gl->gl_req_bh = NULL;
343         gl->gl_vn = 0;
344         gl->gl_stamp = jiffies;
345         gl->gl_tchange = jiffies;
346         gl->gl_object = NULL;
347         gl->gl_sbd = sdp;
348         gl->gl_aspace = NULL;
349         lops_init_le(&gl->gl_le, &gfs2_glock_lops);
350         INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
351
352         /* If this glock protects actual on-disk data or metadata blocks,
353            create a VFS inode to manage the pages/buffers holding them. */
354         if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
355                 gl->gl_aspace = gfs2_aspace_get(sdp);
356                 if (!gl->gl_aspace) {
357                         error = -ENOMEM;
358                         goto fail;
359                 }
360         }
361
362         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
363         if (error)
364                 goto fail_aspace;
365
366         write_lock(gl_lock_addr(hash));
367         tmp = search_bucket(hash, sdp, &name);
368         if (tmp) {
369                 write_unlock(gl_lock_addr(hash));
370                 glock_free(gl);
371                 gl = tmp;
372         } else {
373                 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
374                 write_unlock(gl_lock_addr(hash));
375         }
376
377         *glp = gl;
378
379         return 0;
380
381 fail_aspace:
382         if (gl->gl_aspace)
383                 gfs2_aspace_put(gl->gl_aspace);
384 fail:
385         kmem_cache_free(gfs2_glock_cachep, gl);
386         return error;
387 }
388
389 /**
390  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
391  * @gl: the glock
392  * @state: the state we're requesting
393  * @flags: the modifier flags
394  * @gh: the holder structure
395  *
396  */
397
398 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
399                       struct gfs2_holder *gh)
400 {
401         INIT_LIST_HEAD(&gh->gh_list);
402         gh->gh_gl = gl;
403         gh->gh_ip = (unsigned long)__builtin_return_address(0);
404         gh->gh_owner_pid = current->pid;
405         gh->gh_state = state;
406         gh->gh_flags = flags;
407         gh->gh_error = 0;
408         gh->gh_iflags = 0;
409         gfs2_glock_hold(gl);
410 }
411
412 /**
413  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
414  * @state: the state we're requesting
415  * @flags: the modifier flags
416  * @gh: the holder structure
417  *
418  * Don't mess with the glock.
419  *
420  */
421
422 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
423 {
424         gh->gh_state = state;
425         gh->gh_flags = flags;
426         gh->gh_iflags = 0;
427         gh->gh_ip = (unsigned long)__builtin_return_address(0);
428 }
429
430 /**
431  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
432  * @gh: the holder structure
433  *
434  */
435
436 void gfs2_holder_uninit(struct gfs2_holder *gh)
437 {
438         gfs2_glock_put(gh->gh_gl);
439         gh->gh_gl = NULL;
440         gh->gh_ip = 0;
441 }
442
443 static void gfs2_holder_wake(struct gfs2_holder *gh)
444 {
445         clear_bit(HIF_WAIT, &gh->gh_iflags);
446         smp_mb__after_clear_bit();
447         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
448 }
449
450 static int just_schedule(void *word)
451 {
452         schedule();
453         return 0;
454 }
455
456 static void wait_on_holder(struct gfs2_holder *gh)
457 {
458         might_sleep();
459         wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
460 }
461
462 static void gfs2_demote_wake(struct gfs2_glock *gl)
463 {
464         BUG_ON(!spin_is_locked(&gl->gl_spin));
465         gl->gl_demote_state = LM_ST_EXCLUSIVE;
466         clear_bit(GLF_DEMOTE, &gl->gl_flags);
467         smp_mb__after_clear_bit();
468         wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
469 }
470
471 static void wait_on_demote(struct gfs2_glock *gl)
472 {
473         might_sleep();
474         wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
475 }
476
477 /**
478  * rq_mutex - process a mutex request in the queue
479  * @gh: the glock holder
480  *
481  * Returns: 1 if the queue is blocked
482  */
483
484 static int rq_mutex(struct gfs2_holder *gh)
485 {
486         struct gfs2_glock *gl = gh->gh_gl;
487
488         list_del_init(&gh->gh_list);
489         /*  gh->gh_error never examined.  */
490         set_bit(GLF_LOCK, &gl->gl_flags);
491         clear_bit(HIF_WAIT, &gh->gh_iflags);
492         smp_mb();
493         wake_up_bit(&gh->gh_iflags, HIF_WAIT);
494
495         return 1;
496 }
497
498 /**
499  * rq_promote - process a promote request in the queue
500  * @gh: the glock holder
501  *
502  * Acquire a new inter-node lock, or change a lock state to more restrictive.
503  *
504  * Returns: 1 if the queue is blocked
505  */
506
507 static int rq_promote(struct gfs2_holder *gh)
508 {
509         struct gfs2_glock *gl = gh->gh_gl;
510         struct gfs2_sbd *sdp = gl->gl_sbd;
511
512         if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
513                 if (list_empty(&gl->gl_holders)) {
514                         gl->gl_req_gh = gh;
515                         set_bit(GLF_LOCK, &gl->gl_flags);
516                         spin_unlock(&gl->gl_spin);
517
518                         if (atomic_read(&sdp->sd_reclaim_count) >
519                             gfs2_tune_get(sdp, gt_reclaim_limit) &&
520                             !(gh->gh_flags & LM_FLAG_PRIORITY)) {
521                                 gfs2_reclaim_glock(sdp);
522                                 gfs2_reclaim_glock(sdp);
523                         }
524
525                         gfs2_glock_xmote_th(gh->gh_gl, gh);
526                         spin_lock(&gl->gl_spin);
527                 }
528                 return 1;
529         }
530
531         if (list_empty(&gl->gl_holders)) {
532                 set_bit(HIF_FIRST, &gh->gh_iflags);
533                 set_bit(GLF_LOCK, &gl->gl_flags);
534         } else {
535                 struct gfs2_holder *next_gh;
536                 if (gh->gh_state == LM_ST_EXCLUSIVE)
537                         return 1;
538                 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
539                                      gh_list);
540                 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
541                          return 1;
542         }
543
544         list_move_tail(&gh->gh_list, &gl->gl_holders);
545         gh->gh_error = 0;
546         set_bit(HIF_HOLDER, &gh->gh_iflags);
547
548         gfs2_holder_wake(gh);
549
550         return 0;
551 }
552
553 /**
554  * rq_demote - process a demote request in the queue
555  * @gh: the glock holder
556  *
557  * Returns: 1 if the queue is blocked
558  */
559
560 static int rq_demote(struct gfs2_glock *gl)
561 {
562         if (!list_empty(&gl->gl_holders))
563                 return 1;
564
565         if (gl->gl_state == gl->gl_demote_state ||
566             gl->gl_state == LM_ST_UNLOCKED) {
567                 gfs2_demote_wake(gl);
568                 return 0;
569         }
570         set_bit(GLF_LOCK, &gl->gl_flags);
571         if (gl->gl_demote_state == LM_ST_UNLOCKED ||
572             gl->gl_state != LM_ST_EXCLUSIVE) {
573                 spin_unlock(&gl->gl_spin);
574                 gfs2_glock_drop_th(gl);
575         } else {
576                 spin_unlock(&gl->gl_spin);
577                 gfs2_glock_xmote_th(gl, NULL);
578         }
579         spin_lock(&gl->gl_spin);
580
581         return 0;
582 }
583
584 /**
585  * run_queue - process holder structures on a glock
586  * @gl: the glock
587  *
588  */
589 static void run_queue(struct gfs2_glock *gl)
590 {
591         struct gfs2_holder *gh;
592         int blocked = 1;
593
594         for (;;) {
595                 if (test_bit(GLF_LOCK, &gl->gl_flags))
596                         break;
597
598                 if (!list_empty(&gl->gl_waiters1)) {
599                         gh = list_entry(gl->gl_waiters1.next,
600                                         struct gfs2_holder, gh_list);
601
602                         if (test_bit(HIF_MUTEX, &gh->gh_iflags))
603                                 blocked = rq_mutex(gh);
604                         else
605                                 gfs2_assert_warn(gl->gl_sbd, 0);
606
607                 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
608                         blocked = rq_demote(gl);
609                 } else if (!list_empty(&gl->gl_waiters3)) {
610                         gh = list_entry(gl->gl_waiters3.next,
611                                         struct gfs2_holder, gh_list);
612
613                         if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
614                                 blocked = rq_promote(gh);
615                         else
616                                 gfs2_assert_warn(gl->gl_sbd, 0);
617
618                 } else
619                         break;
620
621                 if (blocked)
622                         break;
623         }
624 }
625
626 /**
627  * gfs2_glmutex_lock - acquire a local lock on a glock
628  * @gl: the glock
629  *
630  * Gives caller exclusive access to manipulate a glock structure.
631  */
632
633 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
634 {
635         struct gfs2_holder gh;
636
637         gfs2_holder_init(gl, 0, 0, &gh);
638         set_bit(HIF_MUTEX, &gh.gh_iflags);
639         if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
640                 BUG();
641
642         spin_lock(&gl->gl_spin);
643         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
644                 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
645         } else {
646                 gl->gl_owner_pid = current->pid;
647                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
648                 clear_bit(HIF_WAIT, &gh.gh_iflags);
649                 smp_mb();
650                 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
651         }
652         spin_unlock(&gl->gl_spin);
653
654         wait_on_holder(&gh);
655         gfs2_holder_uninit(&gh);
656 }
657
658 /**
659  * gfs2_glmutex_trylock - try to acquire a local lock on a glock
660  * @gl: the glock
661  *
662  * Returns: 1 if the glock is acquired
663  */
664
665 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
666 {
667         int acquired = 1;
668
669         spin_lock(&gl->gl_spin);
670         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
671                 acquired = 0;
672         } else {
673                 gl->gl_owner_pid = current->pid;
674                 gl->gl_ip = (unsigned long)__builtin_return_address(0);
675         }
676         spin_unlock(&gl->gl_spin);
677
678         return acquired;
679 }
680
681 /**
682  * gfs2_glmutex_unlock - release a local lock on a glock
683  * @gl: the glock
684  *
685  */
686
687 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
688 {
689         spin_lock(&gl->gl_spin);
690         clear_bit(GLF_LOCK, &gl->gl_flags);
691         gl->gl_owner_pid = 0;
692         gl->gl_ip = 0;
693         run_queue(gl);
694         BUG_ON(!spin_is_locked(&gl->gl_spin));
695         spin_unlock(&gl->gl_spin);
696 }
697
698 /**
699  * handle_callback - process a demote request
700  * @gl: the glock
701  * @state: the state the caller wants us to change to
702  *
703  * There are only two requests that we are going to see in actual
704  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
705  */
706
707 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
708                             int remote, unsigned long delay)
709 {
710         int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
711
712         spin_lock(&gl->gl_spin);
713         set_bit(bit, &gl->gl_flags);
714         if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
715                 gl->gl_demote_state = state;
716                 gl->gl_demote_time = jiffies;
717                 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
718                     gl->gl_object) {
719                         struct inode *inode = igrab(gl->gl_object);
720                         spin_unlock(&gl->gl_spin);
721                         if (inode) {
722                                 d_prune_aliases(inode);
723                                 iput(inode);
724                         }
725                         return;
726                 }
727         } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
728                         gl->gl_demote_state != state) {
729                 gl->gl_demote_state = LM_ST_UNLOCKED;
730         }
731         spin_unlock(&gl->gl_spin);
732 }
733
734 /**
735  * state_change - record that the glock is now in a different state
736  * @gl: the glock
737  * @new_state the new state
738  *
739  */
740
741 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
742 {
743         int held1, held2;
744
745         held1 = (gl->gl_state != LM_ST_UNLOCKED);
746         held2 = (new_state != LM_ST_UNLOCKED);
747
748         if (held1 != held2) {
749                 if (held2)
750                         gfs2_glock_hold(gl);
751                 else
752                         gfs2_glock_put(gl);
753         }
754
755         gl->gl_state = new_state;
756         gl->gl_tchange = jiffies;
757 }
758
759 /**
760  * xmote_bh - Called after the lock module is done acquiring a lock
761  * @gl: The glock in question
762  * @ret: the int returned from the lock module
763  *
764  */
765
766 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
767 {
768         struct gfs2_sbd *sdp = gl->gl_sbd;
769         const struct gfs2_glock_operations *glops = gl->gl_ops;
770         struct gfs2_holder *gh = gl->gl_req_gh;
771         int prev_state = gl->gl_state;
772         int op_done = 1;
773
774         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
775         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
776         gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
777
778         state_change(gl, ret & LM_OUT_ST_MASK);
779
780         if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
781                 if (glops->go_inval)
782                         glops->go_inval(gl, DIO_METADATA);
783         } else if (gl->gl_state == LM_ST_DEFERRED) {
784                 /* We might not want to do this here.
785                    Look at moving to the inode glops. */
786                 if (glops->go_inval)
787                         glops->go_inval(gl, 0);
788         }
789
790         /*  Deal with each possible exit condition  */
791
792         if (!gh) {
793                 gl->gl_stamp = jiffies;
794                 if (ret & LM_OUT_CANCELED) {
795                         op_done = 0;
796                 } else {
797                         spin_lock(&gl->gl_spin);
798                         if (gl->gl_state != gl->gl_demote_state) {
799                                 gl->gl_req_bh = NULL;
800                                 spin_unlock(&gl->gl_spin);
801                                 gfs2_glock_drop_th(gl);
802                                 gfs2_glock_put(gl);
803                                 return;
804                         }
805                         gfs2_demote_wake(gl);
806                         spin_unlock(&gl->gl_spin);
807                 }
808         } else {
809                 spin_lock(&gl->gl_spin);
810                 list_del_init(&gh->gh_list);
811                 gh->gh_error = -EIO;
812                 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 
813                         goto out;
814                 gh->gh_error = GLR_CANCELED;
815                 if (ret & LM_OUT_CANCELED) 
816                         goto out;
817                 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
818                         list_add_tail(&gh->gh_list, &gl->gl_holders);
819                         gh->gh_error = 0;
820                         set_bit(HIF_HOLDER, &gh->gh_iflags);
821                         set_bit(HIF_FIRST, &gh->gh_iflags);
822                         op_done = 0;
823                         goto out;
824                 }
825                 gh->gh_error = GLR_TRYFAILED;
826                 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
827                         goto out;
828                 gh->gh_error = -EINVAL;
829                 if (gfs2_assert_withdraw(sdp, 0) == -1)
830                         fs_err(sdp, "ret = 0x%.8X\n", ret);
831 out:
832                 spin_unlock(&gl->gl_spin);
833         }
834
835         if (glops->go_xmote_bh)
836                 glops->go_xmote_bh(gl);
837
838         if (op_done) {
839                 spin_lock(&gl->gl_spin);
840                 gl->gl_req_gh = NULL;
841                 gl->gl_req_bh = NULL;
842                 clear_bit(GLF_LOCK, &gl->gl_flags);
843                 spin_unlock(&gl->gl_spin);
844         }
845
846         gfs2_glock_put(gl);
847
848         if (gh)
849                 gfs2_holder_wake(gh);
850 }
851
852 /**
853  * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
854  * @gl: The glock in question
855  * @state: the requested state
856  * @flags: modifier flags to the lock call
857  *
858  */
859
860 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
861 {
862         struct gfs2_sbd *sdp = gl->gl_sbd;
863         int flags = gh ? gh->gh_flags : 0;
864         unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
865         const struct gfs2_glock_operations *glops = gl->gl_ops;
866         int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
867                                  LM_FLAG_NOEXP | LM_FLAG_ANY |
868                                  LM_FLAG_PRIORITY);
869         unsigned int lck_ret;
870
871         if (glops->go_xmote_th)
872                 glops->go_xmote_th(gl);
873
874         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
875         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
876         gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
877         gfs2_assert_warn(sdp, state != gl->gl_state);
878
879         gfs2_glock_hold(gl);
880         gl->gl_req_bh = xmote_bh;
881
882         lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
883
884         if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
885                 return;
886
887         if (lck_ret & LM_OUT_ASYNC)
888                 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
889         else
890                 xmote_bh(gl, lck_ret);
891 }
892
893 /**
894  * drop_bh - Called after a lock module unlock completes
895  * @gl: the glock
896  * @ret: the return status
897  *
898  * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
899  * Doesn't drop the reference on the glock the top half took out
900  *
901  */
902
903 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
904 {
905         struct gfs2_sbd *sdp = gl->gl_sbd;
906         const struct gfs2_glock_operations *glops = gl->gl_ops;
907         struct gfs2_holder *gh = gl->gl_req_gh;
908
909         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
910         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
911         gfs2_assert_warn(sdp, !ret);
912
913         state_change(gl, LM_ST_UNLOCKED);
914
915         if (glops->go_inval)
916                 glops->go_inval(gl, DIO_METADATA);
917
918         if (gh) {
919                 spin_lock(&gl->gl_spin);
920                 list_del_init(&gh->gh_list);
921                 gh->gh_error = 0;
922                 spin_unlock(&gl->gl_spin);
923         }
924
925         spin_lock(&gl->gl_spin);
926         gfs2_demote_wake(gl);
927         gl->gl_req_gh = NULL;
928         gl->gl_req_bh = NULL;
929         clear_bit(GLF_LOCK, &gl->gl_flags);
930         spin_unlock(&gl->gl_spin);
931
932         gfs2_glock_put(gl);
933
934         if (gh)
935                 gfs2_holder_wake(gh);
936 }
937
938 /**
939  * gfs2_glock_drop_th - call into the lock module to unlock a lock
940  * @gl: the glock
941  *
942  */
943
944 static void gfs2_glock_drop_th(struct gfs2_glock *gl)
945 {
946         struct gfs2_sbd *sdp = gl->gl_sbd;
947         const struct gfs2_glock_operations *glops = gl->gl_ops;
948         unsigned int ret;
949
950         if (glops->go_drop_th)
951                 glops->go_drop_th(gl);
952
953         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
954         gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
955         gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
956
957         gfs2_glock_hold(gl);
958         gl->gl_req_bh = drop_bh;
959
960         ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
961
962         if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
963                 return;
964
965         if (!ret)
966                 drop_bh(gl, ret);
967         else
968                 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
969 }
970
971 /**
972  * do_cancels - cancel requests for locks stuck waiting on an expire flag
973  * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
974  *
975  * Don't cancel GL_NOCANCEL requests.
976  */
977
978 static void do_cancels(struct gfs2_holder *gh)
979 {
980         struct gfs2_glock *gl = gh->gh_gl;
981
982         spin_lock(&gl->gl_spin);
983
984         while (gl->gl_req_gh != gh &&
985                !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
986                !list_empty(&gh->gh_list)) {
987                 if (gl->gl_req_bh && !(gl->gl_req_gh &&
988                                      (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
989                         spin_unlock(&gl->gl_spin);
990                         gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
991                         msleep(100);
992                         spin_lock(&gl->gl_spin);
993                 } else {
994                         spin_unlock(&gl->gl_spin);
995                         msleep(100);
996                         spin_lock(&gl->gl_spin);
997                 }
998         }
999
1000         spin_unlock(&gl->gl_spin);
1001 }
1002
1003 /**
1004  * glock_wait_internal - wait on a glock acquisition
1005  * @gh: the glock holder
1006  *
1007  * Returns: 0 on success
1008  */
1009
1010 static int glock_wait_internal(struct gfs2_holder *gh)
1011 {
1012         struct gfs2_glock *gl = gh->gh_gl;
1013         struct gfs2_sbd *sdp = gl->gl_sbd;
1014         const struct gfs2_glock_operations *glops = gl->gl_ops;
1015
1016         if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1017                 return -EIO;
1018
1019         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1020                 spin_lock(&gl->gl_spin);
1021                 if (gl->gl_req_gh != gh &&
1022                     !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1023                     !list_empty(&gh->gh_list)) {
1024                         list_del_init(&gh->gh_list);
1025                         gh->gh_error = GLR_TRYFAILED;
1026                         run_queue(gl);
1027                         spin_unlock(&gl->gl_spin);
1028                         return gh->gh_error;
1029                 }
1030                 spin_unlock(&gl->gl_spin);
1031         }
1032
1033         if (gh->gh_flags & LM_FLAG_PRIORITY)
1034                 do_cancels(gh);
1035
1036         wait_on_holder(gh);
1037         if (gh->gh_error)
1038                 return gh->gh_error;
1039
1040         gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1041         gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1042                                                    gh->gh_flags));
1043
1044         if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1045                 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1046
1047                 if (glops->go_lock) {
1048                         gh->gh_error = glops->go_lock(gh);
1049                         if (gh->gh_error) {
1050                                 spin_lock(&gl->gl_spin);
1051                                 list_del_init(&gh->gh_list);
1052                                 spin_unlock(&gl->gl_spin);
1053                         }
1054                 }
1055
1056                 spin_lock(&gl->gl_spin);
1057                 gl->gl_req_gh = NULL;
1058                 gl->gl_req_bh = NULL;
1059                 clear_bit(GLF_LOCK, &gl->gl_flags);
1060                 run_queue(gl);
1061                 spin_unlock(&gl->gl_spin);
1062         }
1063
1064         return gh->gh_error;
1065 }
1066
1067 static inline struct gfs2_holder *
1068 find_holder_by_owner(struct list_head *head, pid_t pid)
1069 {
1070         struct gfs2_holder *gh;
1071
1072         list_for_each_entry(gh, head, gh_list) {
1073                 if (gh->gh_owner_pid == pid)
1074                         return gh;
1075         }
1076
1077         return NULL;
1078 }
1079
1080 static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1081 {
1082         va_list args;
1083
1084         va_start(args, fmt);
1085         if (gi) {
1086                 vsprintf(gi->string, fmt, args);
1087                 seq_printf(gi->seq, gi->string);
1088         }
1089         else
1090                 vprintk(fmt, args);
1091         va_end(args);
1092 }
1093
1094 /**
1095  * add_to_queue - Add a holder to the wait queue (but look for recursion)
1096  * @gh: the holder structure to add
1097  *
1098  */
1099
1100 static void add_to_queue(struct gfs2_holder *gh)
1101 {
1102         struct gfs2_glock *gl = gh->gh_gl;
1103         struct gfs2_holder *existing;
1104
1105         BUG_ON(!gh->gh_owner_pid);
1106         if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1107                 BUG();
1108
1109         existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner_pid);
1110         if (existing) {
1111                 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1112                 printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
1113                 printk(KERN_INFO "lock type : %d lock state : %d\n",
1114                                 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
1115                 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1116                 printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
1117                 printk(KERN_INFO "lock type : %d lock state : %d\n",
1118                                 gl->gl_name.ln_type, gl->gl_state);
1119                 BUG();
1120         }
1121
1122         existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner_pid);
1123         if (existing) {
1124                 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1125                 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1126                 BUG();
1127         }
1128
1129         if (gh->gh_flags & LM_FLAG_PRIORITY)
1130                 list_add(&gh->gh_list, &gl->gl_waiters3);
1131         else
1132                 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1133 }
1134
1135 /**
1136  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1137  * @gh: the holder structure
1138  *
1139  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1140  *
1141  * Returns: 0, GLR_TRYFAILED, or errno on failure
1142  */
1143
1144 int gfs2_glock_nq(struct gfs2_holder *gh)
1145 {
1146         struct gfs2_glock *gl = gh->gh_gl;
1147         struct gfs2_sbd *sdp = gl->gl_sbd;
1148         int error = 0;
1149
1150 restart:
1151         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1152                 set_bit(HIF_ABORTED, &gh->gh_iflags);
1153                 return -EIO;
1154         }
1155
1156         set_bit(HIF_PROMOTE, &gh->gh_iflags);
1157
1158         spin_lock(&gl->gl_spin);
1159         add_to_queue(gh);
1160         run_queue(gl);
1161         spin_unlock(&gl->gl_spin);
1162
1163         if (!(gh->gh_flags & GL_ASYNC)) {
1164                 error = glock_wait_internal(gh);
1165                 if (error == GLR_CANCELED) {
1166                         msleep(100);
1167                         goto restart;
1168                 }
1169         }
1170
1171         return error;
1172 }
1173
1174 /**
1175  * gfs2_glock_poll - poll to see if an async request has been completed
1176  * @gh: the holder
1177  *
1178  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1179  */
1180
1181 int gfs2_glock_poll(struct gfs2_holder *gh)
1182 {
1183         struct gfs2_glock *gl = gh->gh_gl;
1184         int ready = 0;
1185
1186         spin_lock(&gl->gl_spin);
1187
1188         if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1189                 ready = 1;
1190         else if (list_empty(&gh->gh_list)) {
1191                 if (gh->gh_error == GLR_CANCELED) {
1192                         spin_unlock(&gl->gl_spin);
1193                         msleep(100);
1194                         if (gfs2_glock_nq(gh))
1195                                 return 1;
1196                         return 0;
1197                 } else
1198                         ready = 1;
1199         }
1200
1201         spin_unlock(&gl->gl_spin);
1202
1203         return ready;
1204 }
1205
1206 /**
1207  * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1208  * @gh: the holder structure
1209  *
1210  * Returns: 0, GLR_TRYFAILED, or errno on failure
1211  */
1212
1213 int gfs2_glock_wait(struct gfs2_holder *gh)
1214 {
1215         int error;
1216
1217         error = glock_wait_internal(gh);
1218         if (error == GLR_CANCELED) {
1219                 msleep(100);
1220                 gh->gh_flags &= ~GL_ASYNC;
1221                 error = gfs2_glock_nq(gh);
1222         }
1223
1224         return error;
1225 }
1226
1227 /**
1228  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1229  * @gh: the glock holder
1230  *
1231  */
1232
1233 void gfs2_glock_dq(struct gfs2_holder *gh)
1234 {
1235         struct gfs2_glock *gl = gh->gh_gl;
1236         const struct gfs2_glock_operations *glops = gl->gl_ops;
1237         unsigned delay = 0;
1238
1239         if (gh->gh_flags & GL_NOCACHE)
1240                 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1241
1242         gfs2_glmutex_lock(gl);
1243
1244         spin_lock(&gl->gl_spin);
1245         list_del_init(&gh->gh_list);
1246
1247         if (list_empty(&gl->gl_holders)) {
1248                 spin_unlock(&gl->gl_spin);
1249
1250                 if (glops->go_unlock)
1251                         glops->go_unlock(gh);
1252
1253                 spin_lock(&gl->gl_spin);
1254                 gl->gl_stamp = jiffies;
1255         }
1256
1257         clear_bit(GLF_LOCK, &gl->gl_flags);
1258         spin_unlock(&gl->gl_spin);
1259
1260         gfs2_glock_hold(gl);
1261         if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1262             !test_bit(GLF_DEMOTE, &gl->gl_flags))
1263                 delay = gl->gl_ops->go_min_hold_time;
1264         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1265                 gfs2_glock_put(gl);
1266 }
1267
1268 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1269 {
1270         struct gfs2_glock *gl = gh->gh_gl;
1271         gfs2_glock_dq(gh);
1272         wait_on_demote(gl);
1273 }
1274
1275 /**
1276  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1277  * @gh: the holder structure
1278  *
1279  */
1280
1281 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1282 {
1283         gfs2_glock_dq(gh);
1284         gfs2_holder_uninit(gh);
1285 }
1286
1287 /**
1288  * gfs2_glock_nq_num - acquire a glock based on lock number
1289  * @sdp: the filesystem
1290  * @number: the lock number
1291  * @glops: the glock operations for the type of glock
1292  * @state: the state to acquire the glock in
1293  * @flags: modifier flags for the aquisition
1294  * @gh: the struct gfs2_holder
1295  *
1296  * Returns: errno
1297  */
1298
1299 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1300                       const struct gfs2_glock_operations *glops,
1301                       unsigned int state, int flags, struct gfs2_holder *gh)
1302 {
1303         struct gfs2_glock *gl;
1304         int error;
1305
1306         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1307         if (!error) {
1308                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1309                 gfs2_glock_put(gl);
1310         }
1311
1312         return error;
1313 }
1314
1315 /**
1316  * glock_compare - Compare two struct gfs2_glock structures for sorting
1317  * @arg_a: the first structure
1318  * @arg_b: the second structure
1319  *
1320  */
1321
1322 static int glock_compare(const void *arg_a, const void *arg_b)
1323 {
1324         const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1325         const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1326         const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1327         const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1328
1329         if (a->ln_number > b->ln_number)
1330                 return 1;
1331         if (a->ln_number < b->ln_number)
1332                 return -1;
1333         BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1334         return 0;
1335 }
1336
1337 /**
1338  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1339  * @num_gh: the number of structures
1340  * @ghs: an array of struct gfs2_holder structures
1341  *
1342  * Returns: 0 on success (all glocks acquired),
1343  *          errno on failure (no glocks acquired)
1344  */
1345
1346 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1347                      struct gfs2_holder **p)
1348 {
1349         unsigned int x;
1350         int error = 0;
1351
1352         for (x = 0; x < num_gh; x++)
1353                 p[x] = &ghs[x];
1354
1355         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1356
1357         for (x = 0; x < num_gh; x++) {
1358                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1359
1360                 error = gfs2_glock_nq(p[x]);
1361                 if (error) {
1362                         while (x--)
1363                                 gfs2_glock_dq(p[x]);
1364                         break;
1365                 }
1366         }
1367
1368         return error;
1369 }
1370
1371 /**
1372  * gfs2_glock_nq_m - acquire multiple glocks
1373  * @num_gh: the number of structures
1374  * @ghs: an array of struct gfs2_holder structures
1375  *
1376  *
1377  * Returns: 0 on success (all glocks acquired),
1378  *          errno on failure (no glocks acquired)
1379  */
1380
1381 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1382 {
1383         struct gfs2_holder *tmp[4];
1384         struct gfs2_holder **pph = tmp;
1385         int error = 0;
1386
1387         switch(num_gh) {
1388         case 0:
1389                 return 0;
1390         case 1:
1391                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1392                 return gfs2_glock_nq(ghs);
1393         default:
1394                 if (num_gh <= 4)
1395                         break;
1396                 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1397                 if (!pph)
1398                         return -ENOMEM;
1399         }
1400
1401         error = nq_m_sync(num_gh, ghs, pph);
1402
1403         if (pph != tmp)
1404                 kfree(pph);
1405
1406         return error;
1407 }
1408
1409 /**
1410  * gfs2_glock_dq_m - release multiple glocks
1411  * @num_gh: the number of structures
1412  * @ghs: an array of struct gfs2_holder structures
1413  *
1414  */
1415
1416 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1417 {
1418         unsigned int x;
1419
1420         for (x = 0; x < num_gh; x++)
1421                 gfs2_glock_dq(&ghs[x]);
1422 }
1423
1424 /**
1425  * gfs2_glock_dq_uninit_m - release multiple glocks
1426  * @num_gh: the number of structures
1427  * @ghs: an array of struct gfs2_holder structures
1428  *
1429  */
1430
1431 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1432 {
1433         unsigned int x;
1434
1435         for (x = 0; x < num_gh; x++)
1436                 gfs2_glock_dq_uninit(&ghs[x]);
1437 }
1438
1439 /**
1440  * gfs2_lvb_hold - attach a LVB from a glock
1441  * @gl: The glock in question
1442  *
1443  */
1444
1445 int gfs2_lvb_hold(struct gfs2_glock *gl)
1446 {
1447         int error;
1448
1449         gfs2_glmutex_lock(gl);
1450
1451         if (!atomic_read(&gl->gl_lvb_count)) {
1452                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1453                 if (error) {
1454                         gfs2_glmutex_unlock(gl);
1455                         return error;
1456                 }
1457                 gfs2_glock_hold(gl);
1458         }
1459         atomic_inc(&gl->gl_lvb_count);
1460
1461         gfs2_glmutex_unlock(gl);
1462
1463         return 0;
1464 }
1465
1466 /**
1467  * gfs2_lvb_unhold - detach a LVB from a glock
1468  * @gl: The glock in question
1469  *
1470  */
1471
1472 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1473 {
1474         gfs2_glock_hold(gl);
1475         gfs2_glmutex_lock(gl);
1476
1477         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1478         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1479                 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1480                 gl->gl_lvb = NULL;
1481                 gfs2_glock_put(gl);
1482         }
1483
1484         gfs2_glmutex_unlock(gl);
1485         gfs2_glock_put(gl);
1486 }
1487
1488 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1489                         unsigned int state)
1490 {
1491         struct gfs2_glock *gl;
1492         unsigned long delay = 0;
1493         unsigned long holdtime;
1494         unsigned long now = jiffies;
1495
1496         gl = gfs2_glock_find(sdp, name);
1497         if (!gl)
1498                 return;
1499
1500         holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1501         if (time_before(now, holdtime))
1502                 delay = holdtime - now;
1503
1504         handle_callback(gl, state, 1, delay);
1505         if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1506                 gfs2_glock_put(gl);
1507 }
1508
1509 /**
1510  * gfs2_glock_cb - Callback used by locking module
1511  * @sdp: Pointer to the superblock
1512  * @type: Type of callback
1513  * @data: Type dependent data pointer
1514  *
1515  * Called by the locking module when it wants to tell us something.
1516  * Either we need to drop a lock, one of our ASYNC requests completed, or
1517  * a journal from another client needs to be recovered.
1518  */
1519
1520 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1521 {
1522         struct gfs2_sbd *sdp = cb_data;
1523
1524         switch (type) {
1525         case LM_CB_NEED_E:
1526                 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1527                 return;
1528
1529         case LM_CB_NEED_D:
1530                 blocking_cb(sdp, data, LM_ST_DEFERRED);
1531                 return;
1532
1533         case LM_CB_NEED_S:
1534                 blocking_cb(sdp, data, LM_ST_SHARED);
1535                 return;
1536
1537         case LM_CB_ASYNC: {
1538                 struct lm_async_cb *async = data;
1539                 struct gfs2_glock *gl;
1540
1541                 down_read(&gfs2_umount_flush_sem);
1542                 gl = gfs2_glock_find(sdp, &async->lc_name);
1543                 if (gfs2_assert_warn(sdp, gl))
1544                         return;
1545                 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1546                         gl->gl_req_bh(gl, async->lc_ret);
1547                 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1548                         gfs2_glock_put(gl);
1549                 up_read(&gfs2_umount_flush_sem);
1550                 return;
1551         }
1552
1553         case LM_CB_NEED_RECOVERY:
1554                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1555                 if (sdp->sd_recoverd_process)
1556                         wake_up_process(sdp->sd_recoverd_process);
1557                 return;
1558
1559         case LM_CB_DROPLOCKS:
1560                 gfs2_gl_hash_clear(sdp, NO_WAIT);
1561                 gfs2_quota_scan(sdp);
1562                 return;
1563
1564         default:
1565                 gfs2_assert_warn(sdp, 0);
1566                 return;
1567         }
1568 }
1569
1570 /**
1571  * demote_ok - Check to see if it's ok to unlock a glock
1572  * @gl: the glock
1573  *
1574  * Returns: 1 if it's ok
1575  */
1576
1577 static int demote_ok(struct gfs2_glock *gl)
1578 {
1579         const struct gfs2_glock_operations *glops = gl->gl_ops;
1580         int demote = 1;
1581
1582         if (test_bit(GLF_STICKY, &gl->gl_flags))
1583                 demote = 0;
1584         else if (glops->go_demote_ok)
1585                 demote = glops->go_demote_ok(gl);
1586
1587         return demote;
1588 }
1589
1590 /**
1591  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1592  * @gl: the glock
1593  *
1594  */
1595
1596 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1597 {
1598         struct gfs2_sbd *sdp = gl->gl_sbd;
1599
1600         spin_lock(&sdp->sd_reclaim_lock);
1601         if (list_empty(&gl->gl_reclaim)) {
1602                 gfs2_glock_hold(gl);
1603                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1604                 atomic_inc(&sdp->sd_reclaim_count);
1605         }
1606         spin_unlock(&sdp->sd_reclaim_lock);
1607
1608         wake_up(&sdp->sd_reclaim_wq);
1609 }
1610
1611 /**
1612  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1613  * @sdp: the filesystem
1614  *
1615  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1616  * different glock and we notice that there are a lot of glocks in the
1617  * reclaim list.
1618  *
1619  */
1620
1621 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1622 {
1623         struct gfs2_glock *gl;
1624
1625         spin_lock(&sdp->sd_reclaim_lock);
1626         if (list_empty(&sdp->sd_reclaim_list)) {
1627                 spin_unlock(&sdp->sd_reclaim_lock);
1628                 return;
1629         }
1630         gl = list_entry(sdp->sd_reclaim_list.next,
1631                         struct gfs2_glock, gl_reclaim);
1632         list_del_init(&gl->gl_reclaim);
1633         spin_unlock(&sdp->sd_reclaim_lock);
1634
1635         atomic_dec(&sdp->sd_reclaim_count);
1636         atomic_inc(&sdp->sd_reclaimed);
1637
1638         if (gfs2_glmutex_trylock(gl)) {
1639                 if (list_empty(&gl->gl_holders) &&
1640                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1641                         handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1642                 gfs2_glmutex_unlock(gl);
1643         }
1644
1645         gfs2_glock_put(gl);
1646 }
1647
1648 /**
1649  * examine_bucket - Call a function for glock in a hash bucket
1650  * @examiner: the function
1651  * @sdp: the filesystem
1652  * @bucket: the bucket
1653  *
1654  * Returns: 1 if the bucket has entries
1655  */
1656
1657 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1658                           unsigned int hash)
1659 {
1660         struct gfs2_glock *gl, *prev = NULL;
1661         int has_entries = 0;
1662         struct hlist_head *head = &gl_hash_table[hash].hb_list;
1663
1664         read_lock(gl_lock_addr(hash));
1665         /* Can't use hlist_for_each_entry - don't want prefetch here */
1666         if (hlist_empty(head))
1667                 goto out;
1668         gl = list_entry(head->first, struct gfs2_glock, gl_list);
1669         while(1) {
1670                 if (!sdp || gl->gl_sbd == sdp) {
1671                         gfs2_glock_hold(gl);
1672                         read_unlock(gl_lock_addr(hash));
1673                         if (prev)
1674                                 gfs2_glock_put(prev);
1675                         prev = gl;
1676                         examiner(gl);
1677                         has_entries = 1;
1678                         read_lock(gl_lock_addr(hash));
1679                 }
1680                 if (gl->gl_list.next == NULL)
1681                         break;
1682                 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1683         }
1684 out:
1685         read_unlock(gl_lock_addr(hash));
1686         if (prev)
1687                 gfs2_glock_put(prev);
1688         cond_resched();
1689         return has_entries;
1690 }
1691
1692 /**
1693  * scan_glock - look at a glock and see if we can reclaim it
1694  * @gl: the glock to look at
1695  *
1696  */
1697
1698 static void scan_glock(struct gfs2_glock *gl)
1699 {
1700         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1701                 return;
1702
1703         if (gfs2_glmutex_trylock(gl)) {
1704                 if (list_empty(&gl->gl_holders) &&
1705                     gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1706                         goto out_schedule;
1707                 gfs2_glmutex_unlock(gl);
1708         }
1709         return;
1710
1711 out_schedule:
1712         gfs2_glmutex_unlock(gl);
1713         gfs2_glock_schedule_for_reclaim(gl);
1714 }
1715
1716 /**
1717  * clear_glock - look at a glock and see if we can free it from glock cache
1718  * @gl: the glock to look at
1719  *
1720  */
1721
1722 static void clear_glock(struct gfs2_glock *gl)
1723 {
1724         struct gfs2_sbd *sdp = gl->gl_sbd;
1725         int released;
1726
1727         spin_lock(&sdp->sd_reclaim_lock);
1728         if (!list_empty(&gl->gl_reclaim)) {
1729                 list_del_init(&gl->gl_reclaim);
1730                 atomic_dec(&sdp->sd_reclaim_count);
1731                 spin_unlock(&sdp->sd_reclaim_lock);
1732                 released = gfs2_glock_put(gl);
1733                 gfs2_assert(sdp, !released);
1734         } else {
1735                 spin_unlock(&sdp->sd_reclaim_lock);
1736         }
1737
1738         if (gfs2_glmutex_trylock(gl)) {
1739                 if (list_empty(&gl->gl_holders) &&
1740                     gl->gl_state != LM_ST_UNLOCKED)
1741                         handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
1742                 gfs2_glmutex_unlock(gl);
1743         }
1744 }
1745
1746 /**
1747  * gfs2_gl_hash_clear - Empty out the glock hash table
1748  * @sdp: the filesystem
1749  * @wait: wait until it's all gone
1750  *
1751  * Called when unmounting the filesystem, or when inter-node lock manager
1752  * requests DROPLOCKS because it is running out of capacity.
1753  */
1754
1755 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1756 {
1757         unsigned long t;
1758         unsigned int x;
1759         int cont;
1760
1761         t = jiffies;
1762
1763         for (;;) {
1764                 cont = 0;
1765                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1766                         if (examine_bucket(clear_glock, sdp, x))
1767                                 cont = 1;
1768                 }
1769
1770                 if (!wait || !cont)
1771                         break;
1772
1773                 if (time_after_eq(jiffies,
1774                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1775                         fs_warn(sdp, "Unmount seems to be stalled. "
1776                                      "Dumping lock state...\n");
1777                         gfs2_dump_lockstate(sdp);
1778                         t = jiffies;
1779                 }
1780
1781                 down_write(&gfs2_umount_flush_sem);
1782                 invalidate_inodes(sdp->sd_vfs);
1783                 up_write(&gfs2_umount_flush_sem);
1784                 msleep(10);
1785         }
1786 }
1787
1788 /*
1789  *  Diagnostic routines to help debug distributed deadlock
1790  */
1791
1792 static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1793                               unsigned long address)
1794 {
1795         char buffer[KSYM_SYMBOL_LEN];
1796
1797         sprint_symbol(buffer, address);
1798         print_dbg(gi, fmt, buffer);
1799 }
1800
1801 /**
1802  * dump_holder - print information about a glock holder
1803  * @str: a string naming the type of holder
1804  * @gh: the glock holder
1805  *
1806  * Returns: 0 on success, -ENOBUFS when we run out of space
1807  */
1808
1809 static int dump_holder(struct glock_iter *gi, char *str,
1810                        struct gfs2_holder *gh)
1811 {
1812         unsigned int x;
1813         struct task_struct *gh_owner;
1814
1815         print_dbg(gi, "  %s\n", str);
1816         if (gh->gh_owner_pid) {
1817                 print_dbg(gi, "    owner = %ld ", (long)gh->gh_owner_pid);
1818                 gh_owner = find_task_by_pid(gh->gh_owner_pid);
1819                 if (gh_owner)
1820                         print_dbg(gi, "(%s)\n", gh_owner->comm);
1821                 else
1822                         print_dbg(gi, "(ended)\n");
1823         } else
1824                 print_dbg(gi, "    owner = -1\n");
1825         print_dbg(gi, "    gh_state = %u\n", gh->gh_state);
1826         print_dbg(gi, "    gh_flags =");
1827         for (x = 0; x < 32; x++)
1828                 if (gh->gh_flags & (1 << x))
1829                         print_dbg(gi, " %u", x);
1830         print_dbg(gi, " \n");
1831         print_dbg(gi, "    error = %d\n", gh->gh_error);
1832         print_dbg(gi, "    gh_iflags =");
1833         for (x = 0; x < 32; x++)
1834                 if (test_bit(x, &gh->gh_iflags))
1835                         print_dbg(gi, " %u", x);
1836         print_dbg(gi, " \n");
1837         gfs2_print_symbol(gi, "    initialized at: %s\n", gh->gh_ip);
1838
1839         return 0;
1840 }
1841
1842 /**
1843  * dump_inode - print information about an inode
1844  * @ip: the inode
1845  *
1846  * Returns: 0 on success, -ENOBUFS when we run out of space
1847  */
1848
1849 static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
1850 {
1851         unsigned int x;
1852
1853         print_dbg(gi, "  Inode:\n");
1854         print_dbg(gi, "    num = %llu/%llu\n",
1855                   (unsigned long long)ip->i_no_formal_ino,
1856                   (unsigned long long)ip->i_no_addr);
1857         print_dbg(gi, "    type = %u\n", IF2DT(ip->i_inode.i_mode));
1858         print_dbg(gi, "    i_flags =");
1859         for (x = 0; x < 32; x++)
1860                 if (test_bit(x, &ip->i_flags))
1861                         print_dbg(gi, " %u", x);
1862         print_dbg(gi, " \n");
1863         return 0;
1864 }
1865
1866 /**
1867  * dump_glock - print information about a glock
1868  * @gl: the glock
1869  * @count: where we are in the buffer
1870  *
1871  * Returns: 0 on success, -ENOBUFS when we run out of space
1872  */
1873
1874 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
1875 {
1876         struct gfs2_holder *gh;
1877         unsigned int x;
1878         int error = -ENOBUFS;
1879         struct task_struct *gl_owner;
1880
1881         spin_lock(&gl->gl_spin);
1882
1883         print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
1884                    (unsigned long long)gl->gl_name.ln_number);
1885         print_dbg(gi, "  gl_flags =");
1886         for (x = 0; x < 32; x++) {
1887                 if (test_bit(x, &gl->gl_flags))
1888                         print_dbg(gi, " %u", x);
1889         }
1890         if (!test_bit(GLF_LOCK, &gl->gl_flags))
1891                 print_dbg(gi, " (unlocked)");
1892         print_dbg(gi, " \n");
1893         print_dbg(gi, "  gl_ref = %d\n", atomic_read(&gl->gl_ref));
1894         print_dbg(gi, "  gl_state = %u\n", gl->gl_state);
1895         if (gl->gl_owner_pid) {
1896                 gl_owner = find_task_by_pid(gl->gl_owner_pid);
1897                 if (gl_owner)
1898                         print_dbg(gi, "  gl_owner = pid %d (%s)\n",
1899                                   gl->gl_owner_pid, gl_owner->comm);
1900                 else
1901                         print_dbg(gi, "  gl_owner = %d (ended)\n",
1902                                   gl->gl_owner_pid);
1903         } else
1904                 print_dbg(gi, "  gl_owner = -1\n");
1905         print_dbg(gi, "  gl_ip = %lu\n", gl->gl_ip);
1906         print_dbg(gi, "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1907         print_dbg(gi, "  req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1908         print_dbg(gi, "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1909         print_dbg(gi, "  object = %s\n", (gl->gl_object) ? "yes" : "no");
1910         print_dbg(gi, "  le = %s\n",
1911                    (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
1912         print_dbg(gi, "  reclaim = %s\n",
1913                    (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1914         if (gl->gl_aspace)
1915                 print_dbg(gi, "  aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1916                            gl->gl_aspace->i_mapping->nrpages);
1917         else
1918                 print_dbg(gi, "  aspace = no\n");
1919         print_dbg(gi, "  ail = %d\n", atomic_read(&gl->gl_ail_count));
1920         if (gl->gl_req_gh) {
1921                 error = dump_holder(gi, "Request", gl->gl_req_gh);
1922                 if (error)
1923                         goto out;
1924         }
1925         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1926                 error = dump_holder(gi, "Holder", gh);
1927                 if (error)
1928                         goto out;
1929         }
1930         list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1931                 error = dump_holder(gi, "Waiter1", gh);
1932                 if (error)
1933                         goto out;
1934         }
1935         list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1936                 error = dump_holder(gi, "Waiter3", gh);
1937                 if (error)
1938                         goto out;
1939         }
1940         if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1941                 print_dbg(gi, "  Demotion req to state %u (%llu uS ago)\n",
1942                           gl->gl_demote_state, (unsigned long long)
1943                           (jiffies - gl->gl_demote_time)*(1000000/HZ));
1944         }
1945         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1946                 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1947                         list_empty(&gl->gl_holders)) {
1948                         error = dump_inode(gi, gl->gl_object);
1949                         if (error)
1950                                 goto out;
1951                 } else {
1952                         error = -ENOBUFS;
1953                         print_dbg(gi, "  Inode: busy\n");
1954                 }
1955         }
1956
1957         error = 0;
1958
1959 out:
1960         spin_unlock(&gl->gl_spin);
1961         return error;
1962 }
1963
1964 /**
1965  * gfs2_dump_lockstate - print out the current lockstate
1966  * @sdp: the filesystem
1967  * @ub: the buffer to copy the information into
1968  *
1969  * If @ub is NULL, dump the lockstate to the console.
1970  *
1971  */
1972
1973 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1974 {
1975         struct gfs2_glock *gl;
1976         struct hlist_node *h;
1977         unsigned int x;
1978         int error = 0;
1979
1980         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1981
1982                 read_lock(gl_lock_addr(x));
1983
1984                 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1985                         if (gl->gl_sbd != sdp)
1986                                 continue;
1987
1988                         error = dump_glock(NULL, gl);
1989                         if (error)
1990                                 break;
1991                 }
1992
1993                 read_unlock(gl_lock_addr(x));
1994
1995                 if (error)
1996                         break;
1997         }
1998
1999
2000         return error;
2001 }
2002
2003 /**
2004  * gfs2_scand - Look for cached glocks and inodes to toss from memory
2005  * @sdp: Pointer to GFS2 superblock
2006  *
2007  * One of these daemons runs, finding candidates to add to sd_reclaim_list.
2008  * See gfs2_glockd()
2009  */
2010
2011 static int gfs2_scand(void *data)
2012 {
2013         unsigned x;
2014         unsigned delay;
2015
2016         while (!kthread_should_stop()) {
2017                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2018                         examine_bucket(scan_glock, NULL, x);
2019                 if (freezing(current))
2020                         refrigerator();
2021                 delay = scand_secs;
2022                 if (delay < 1)
2023                         delay = 1;
2024                 schedule_timeout_interruptible(delay * HZ);
2025         }
2026
2027         return 0;
2028 }
2029
2030
2031
2032 int __init gfs2_glock_init(void)
2033 {
2034         unsigned i;
2035         for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2036                 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
2037         }
2038 #ifdef GL_HASH_LOCK_SZ
2039         for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2040                 rwlock_init(&gl_hash_locks[i]);
2041         }
2042 #endif
2043
2044         scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
2045         if (IS_ERR(scand_process))
2046                 return PTR_ERR(scand_process);
2047
2048         glock_workqueue = create_workqueue("glock_workqueue");
2049         if (IS_ERR(glock_workqueue)) {
2050                 kthread_stop(scand_process);
2051                 return PTR_ERR(glock_workqueue);
2052         }
2053
2054         return 0;
2055 }
2056
2057 void gfs2_glock_exit(void)
2058 {
2059         destroy_workqueue(glock_workqueue);
2060         kthread_stop(scand_process);
2061 }
2062
2063 module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
2064 MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
2065
2066 static int gfs2_glock_iter_next(struct glock_iter *gi)
2067 {
2068         struct gfs2_glock *gl;
2069
2070 restart:
2071         read_lock(gl_lock_addr(gi->hash));
2072         gl = gi->gl;
2073         if (gl) {
2074                 gi->gl = hlist_entry(gl->gl_list.next,
2075                                      struct gfs2_glock, gl_list);
2076                 if (gi->gl)
2077                         gfs2_glock_hold(gi->gl);
2078         }
2079         read_unlock(gl_lock_addr(gi->hash));
2080         if (gl)
2081                 gfs2_glock_put(gl);
2082         if (gl && gi->gl == NULL)
2083                 gi->hash++;
2084         while(gi->gl == NULL) {
2085                 if (gi->hash >= GFS2_GL_HASH_SIZE)
2086                         return 1;
2087                 read_lock(gl_lock_addr(gi->hash));
2088                 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
2089                                      struct gfs2_glock, gl_list);
2090                 if (gi->gl)
2091                         gfs2_glock_hold(gi->gl);
2092                 read_unlock(gl_lock_addr(gi->hash));
2093                 gi->hash++;
2094         }
2095
2096         if (gi->sdp != gi->gl->gl_sbd)
2097                 goto restart;
2098
2099         return 0;
2100 }
2101
2102 static void gfs2_glock_iter_free(struct glock_iter *gi)
2103 {
2104         if (gi->gl)
2105                 gfs2_glock_put(gi->gl);
2106         kfree(gi);
2107 }
2108
2109 static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2110 {
2111         struct glock_iter *gi;
2112
2113         gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2114         if (!gi)
2115                 return NULL;
2116
2117         gi->sdp = sdp;
2118         gi->hash = 0;
2119         gi->seq = NULL;
2120         gi->gl = NULL;
2121         memset(gi->string, 0, sizeof(gi->string));
2122
2123         if (gfs2_glock_iter_next(gi)) {
2124                 gfs2_glock_iter_free(gi);
2125                 return NULL;
2126         }
2127
2128         return gi;
2129 }
2130
2131 static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2132 {
2133         struct glock_iter *gi;
2134         loff_t n = *pos;
2135
2136         gi = gfs2_glock_iter_init(file->private);
2137         if (!gi)
2138                 return NULL;
2139
2140         while(n--) {
2141                 if (gfs2_glock_iter_next(gi)) {
2142                         gfs2_glock_iter_free(gi);
2143                         return NULL;
2144                 }
2145         }
2146
2147         return gi;
2148 }
2149
2150 static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2151                                  loff_t *pos)
2152 {
2153         struct glock_iter *gi = iter_ptr;
2154
2155         (*pos)++;
2156
2157         if (gfs2_glock_iter_next(gi)) {
2158                 gfs2_glock_iter_free(gi);
2159                 return NULL;
2160         }
2161
2162         return gi;
2163 }
2164
2165 static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2166 {
2167         struct glock_iter *gi = iter_ptr;
2168         if (gi)
2169                 gfs2_glock_iter_free(gi);
2170 }
2171
2172 static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2173 {
2174         struct glock_iter *gi = iter_ptr;
2175
2176         gi->seq = file;
2177         dump_glock(gi, gi->gl);
2178
2179         return 0;
2180 }
2181
2182 static const struct seq_operations gfs2_glock_seq_ops = {
2183         .start = gfs2_glock_seq_start,
2184         .next  = gfs2_glock_seq_next,
2185         .stop  = gfs2_glock_seq_stop,
2186         .show  = gfs2_glock_seq_show,
2187 };
2188
2189 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2190 {
2191         struct seq_file *seq;
2192         int ret;
2193
2194         ret = seq_open(file, &gfs2_glock_seq_ops);
2195         if (ret)
2196                 return ret;
2197
2198         seq = file->private_data;
2199         seq->private = inode->i_private;
2200
2201         return 0;
2202 }
2203
2204 static const struct file_operations gfs2_debug_fops = {
2205         .owner   = THIS_MODULE,
2206         .open    = gfs2_debugfs_open,
2207         .read    = seq_read,
2208         .llseek  = seq_lseek,
2209         .release = seq_release
2210 };
2211
2212 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2213 {
2214         sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2215         if (!sdp->debugfs_dir)
2216                 return -ENOMEM;
2217         sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2218                                                          S_IFREG | S_IRUGO,
2219                                                          sdp->debugfs_dir, sdp,
2220                                                          &gfs2_debug_fops);
2221         if (!sdp->debugfs_dentry_glocks)
2222                 return -ENOMEM;
2223
2224         return 0;
2225 }
2226
2227 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2228 {
2229         if (sdp && sdp->debugfs_dir) {
2230                 if (sdp->debugfs_dentry_glocks) {
2231                         debugfs_remove(sdp->debugfs_dentry_glocks);
2232                         sdp->debugfs_dentry_glocks = NULL;
2233                 }
2234                 debugfs_remove(sdp->debugfs_dir);
2235                 sdp->debugfs_dir = NULL;
2236         }
2237 }
2238
2239 int gfs2_register_debugfs(void)
2240 {
2241         gfs2_root = debugfs_create_dir("gfs2", NULL);
2242         return gfs2_root ? 0 : -ENOMEM;
2243 }
2244
2245 void gfs2_unregister_debugfs(void)
2246 {
2247         debugfs_remove(gfs2_root);
2248         gfs2_root = NULL;
2249 }