xref: /openbmc/linux/fs/gfs2/glock.c (revision b004157ab5b374a498a5874cda68c389219d23e7)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <asm/uaccess.h>
23 
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "glock.h"
27 #include "glops.h"
28 #include "inode.h"
29 #include "lm.h"
30 #include "lops.h"
31 #include "meta_io.h"
32 #include "quota.h"
33 #include "super.h"
34 #include "util.h"
35 
36 struct greedy {
37 	struct gfs2_holder gr_gh;
38 	struct work_struct gr_work;
39 };
40 
41 struct gfs2_gl_hash_bucket {
42         struct hlist_head hb_list;
43 };
44 
45 typedef void (*glock_examiner) (struct gfs2_glock * gl);
46 
47 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
48 static int dump_glock(struct gfs2_glock *gl);
49 static int dump_inode(struct gfs2_inode *ip);
50 
51 #define GFS2_GL_HASH_SHIFT      15
52 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
53 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
54 
55 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
56 
57 /*
58  * Despite what you might think, the numbers below are not arbitrary :-)
59  * They are taken from the ipv4 routing hash code, which is well tested
60  * and thus should be nearly optimal. Later on we might tweek the numbers
61  * but for now this should be fine.
62  *
63  * The reason for putting the locks in a separate array from the list heads
64  * is that we can have fewer locks than list heads and save memory. We use
65  * the same hash function for both, but with a different hash mask.
66  */
67 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
68 	defined(CONFIG_PROVE_LOCKING)
69 
70 #ifdef CONFIG_LOCKDEP
71 # define GL_HASH_LOCK_SZ        256
72 #else
73 # if NR_CPUS >= 32
74 #  define GL_HASH_LOCK_SZ       4096
75 # elif NR_CPUS >= 16
76 #  define GL_HASH_LOCK_SZ       2048
77 # elif NR_CPUS >= 8
78 #  define GL_HASH_LOCK_SZ       1024
79 # elif NR_CPUS >= 4
80 #  define GL_HASH_LOCK_SZ       512
81 # else
82 #  define GL_HASH_LOCK_SZ       256
83 # endif
84 #endif
85 
86 /* We never want more locks than chains */
87 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
88 # undef GL_HASH_LOCK_SZ
89 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
90 #endif
91 
92 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
93 
94 static inline rwlock_t *gl_lock_addr(unsigned int x)
95 {
96 	return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
97 }
98 #else /* not SMP, so no spinlocks required */
99 static inline rwlock_t *gl_lock_addr(x)
100 {
101 	return NULL;
102 }
103 #endif
104 
105 /**
106  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
107  * @actual: the current state of the lock
108  * @requested: the lock state that was requested by the caller
109  * @flags: the modifier flags passed in by the caller
110  *
111  * Returns: 1 if the locks are compatible, 0 otherwise
112  */
113 
114 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
115 				   int flags)
116 {
117 	if (actual == requested)
118 		return 1;
119 
120 	if (flags & GL_EXACT)
121 		return 0;
122 
123 	if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
124 		return 1;
125 
126 	if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
127 		return 1;
128 
129 	return 0;
130 }
131 
132 /**
133  * gl_hash() - Turn glock number into hash bucket number
134  * @lock: The glock number
135  *
136  * Returns: The number of the corresponding hash bucket
137  */
138 
139 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
140 			    const struct lm_lockname *name)
141 {
142 	unsigned int h;
143 
144 	h = jhash(&name->ln_number, sizeof(u64), 0);
145 	h = jhash(&name->ln_type, sizeof(unsigned int), h);
146 	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
147 	h &= GFS2_GL_HASH_MASK;
148 
149 	return h;
150 }
151 
152 /**
153  * glock_free() - Perform a few checks and then release struct gfs2_glock
154  * @gl: The glock to release
155  *
156  * Also calls lock module to release its internal structure for this glock.
157  *
158  */
159 
160 static void glock_free(struct gfs2_glock *gl)
161 {
162 	struct gfs2_sbd *sdp = gl->gl_sbd;
163 	struct inode *aspace = gl->gl_aspace;
164 
165 	gfs2_lm_put_lock(sdp, gl->gl_lock);
166 
167 	if (aspace)
168 		gfs2_aspace_put(aspace);
169 
170 	kmem_cache_free(gfs2_glock_cachep, gl);
171 }
172 
173 /**
174  * gfs2_glock_hold() - increment reference count on glock
175  * @gl: The glock to hold
176  *
177  */
178 
179 void gfs2_glock_hold(struct gfs2_glock *gl)
180 {
181 	atomic_inc(&gl->gl_ref);
182 }
183 
184 /**
185  * gfs2_glock_put() - Decrement reference count on glock
186  * @gl: The glock to put
187  *
188  */
189 
190 int gfs2_glock_put(struct gfs2_glock *gl)
191 {
192 	int rv = 0;
193 	struct gfs2_sbd *sdp = gl->gl_sbd;
194 
195 	write_lock(gl_lock_addr(gl->gl_hash));
196 	if (atomic_dec_and_test(&gl->gl_ref)) {
197 		hlist_del(&gl->gl_list);
198 		write_unlock(gl_lock_addr(gl->gl_hash));
199 		BUG_ON(spin_is_locked(&gl->gl_spin));
200 		gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
201 		gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
202 		gfs2_assert(sdp, list_empty(&gl->gl_holders));
203 		gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
204 		gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
205 		gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
206 		glock_free(gl);
207 		rv = 1;
208 		goto out;
209 	}
210 	write_unlock(gl_lock_addr(gl->gl_hash));
211 out:
212 	return rv;
213 }
214 
215 /**
216  * queue_empty - check to see if a glock's queue is empty
217  * @gl: the glock
218  * @head: the head of the queue to check
219  *
220  * This function protects the list in the event that a process already
221  * has a holder on the list and is adding a second holder for itself.
222  * The glmutex lock is what generally prevents processes from working
223  * on the same glock at once, but the special case of adding a second
224  * holder for yourself ("recursive" locking) doesn't involve locking
225  * glmutex, making the spin lock necessary.
226  *
227  * Returns: 1 if the queue is empty
228  */
229 
230 static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
231 {
232 	int empty;
233 	spin_lock(&gl->gl_spin);
234 	empty = list_empty(head);
235 	spin_unlock(&gl->gl_spin);
236 	return empty;
237 }
238 
239 /**
240  * search_bucket() - Find struct gfs2_glock by lock number
241  * @bucket: the bucket to search
242  * @name: The lock name
243  *
244  * Returns: NULL, or the struct gfs2_glock with the requested number
245  */
246 
247 static struct gfs2_glock *search_bucket(unsigned int hash,
248 					const struct gfs2_sbd *sdp,
249 					const struct lm_lockname *name)
250 {
251 	struct gfs2_glock *gl;
252 	struct hlist_node *h;
253 
254 	hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
255 		if (!lm_name_equal(&gl->gl_name, name))
256 			continue;
257 		if (gl->gl_sbd != sdp)
258 			continue;
259 
260 		atomic_inc(&gl->gl_ref);
261 
262 		return gl;
263 	}
264 
265 	return NULL;
266 }
267 
268 /**
269  * gfs2_glock_find() - Find glock by lock number
270  * @sdp: The GFS2 superblock
271  * @name: The lock name
272  *
273  * Returns: NULL, or the struct gfs2_glock with the requested number
274  */
275 
276 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
277 					  const struct lm_lockname *name)
278 {
279 	unsigned int hash = gl_hash(sdp, name);
280 	struct gfs2_glock *gl;
281 
282 	read_lock(gl_lock_addr(hash));
283 	gl = search_bucket(hash, sdp, name);
284 	read_unlock(gl_lock_addr(hash));
285 
286 	return gl;
287 }
288 
289 /**
290  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
291  * @sdp: The GFS2 superblock
292  * @number: the lock number
293  * @glops: The glock_operations to use
294  * @create: If 0, don't create the glock if it doesn't exist
295  * @glp: the glock is returned here
296  *
297  * This does not lock a glock, just finds/creates structures for one.
298  *
299  * Returns: errno
300  */
301 
302 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
303 		   const struct gfs2_glock_operations *glops, int create,
304 		   struct gfs2_glock **glp)
305 {
306 	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
307 	struct gfs2_glock *gl, *tmp;
308 	unsigned int hash = gl_hash(sdp, &name);
309 	int error;
310 
311 	read_lock(gl_lock_addr(hash));
312 	gl = search_bucket(hash, sdp, &name);
313 	read_unlock(gl_lock_addr(hash));
314 
315 	if (gl || !create) {
316 		*glp = gl;
317 		return 0;
318 	}
319 
320 	gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
321 	if (!gl)
322 		return -ENOMEM;
323 
324 	gl->gl_flags = 0;
325 	gl->gl_name = name;
326 	atomic_set(&gl->gl_ref, 1);
327 	gl->gl_state = LM_ST_UNLOCKED;
328 	gl->gl_hash = hash;
329 	gl->gl_owner = NULL;
330 	gl->gl_ip = 0;
331 	gl->gl_ops = glops;
332 	gl->gl_req_gh = NULL;
333 	gl->gl_req_bh = NULL;
334 	gl->gl_vn = 0;
335 	gl->gl_stamp = jiffies;
336 	gl->gl_object = NULL;
337 	gl->gl_sbd = sdp;
338 	gl->gl_aspace = NULL;
339 	lops_init_le(&gl->gl_le, &gfs2_glock_lops);
340 
341 	/* If this glock protects actual on-disk data or metadata blocks,
342 	   create a VFS inode to manage the pages/buffers holding them. */
343 	if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
344 		gl->gl_aspace = gfs2_aspace_get(sdp);
345 		if (!gl->gl_aspace) {
346 			error = -ENOMEM;
347 			goto fail;
348 		}
349 	}
350 
351 	error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
352 	if (error)
353 		goto fail_aspace;
354 
355 	write_lock(gl_lock_addr(hash));
356 	tmp = search_bucket(hash, sdp, &name);
357 	if (tmp) {
358 		write_unlock(gl_lock_addr(hash));
359 		glock_free(gl);
360 		gl = tmp;
361 	} else {
362 		hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
363 		write_unlock(gl_lock_addr(hash));
364 	}
365 
366 	*glp = gl;
367 
368 	return 0;
369 
370 fail_aspace:
371 	if (gl->gl_aspace)
372 		gfs2_aspace_put(gl->gl_aspace);
373 fail:
374 	kmem_cache_free(gfs2_glock_cachep, gl);
375 	return error;
376 }
377 
378 /**
379  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
380  * @gl: the glock
381  * @state: the state we're requesting
382  * @flags: the modifier flags
383  * @gh: the holder structure
384  *
385  */
386 
387 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
388 		      struct gfs2_holder *gh)
389 {
390 	INIT_LIST_HEAD(&gh->gh_list);
391 	gh->gh_gl = gl;
392 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
393 	gh->gh_owner = current;
394 	gh->gh_state = state;
395 	gh->gh_flags = flags;
396 	gh->gh_error = 0;
397 	gh->gh_iflags = 0;
398 	init_completion(&gh->gh_wait);
399 
400 	if (gh->gh_state == LM_ST_EXCLUSIVE)
401 		gh->gh_flags |= GL_LOCAL_EXCL;
402 
403 	gfs2_glock_hold(gl);
404 }
405 
406 /**
407  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
408  * @state: the state we're requesting
409  * @flags: the modifier flags
410  * @gh: the holder structure
411  *
412  * Don't mess with the glock.
413  *
414  */
415 
416 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
417 {
418 	gh->gh_state = state;
419 	gh->gh_flags = flags;
420 	if (gh->gh_state == LM_ST_EXCLUSIVE)
421 		gh->gh_flags |= GL_LOCAL_EXCL;
422 
423 	gh->gh_iflags &= 1 << HIF_ALLOCED;
424 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
425 }
426 
427 /**
428  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
429  * @gh: the holder structure
430  *
431  */
432 
433 void gfs2_holder_uninit(struct gfs2_holder *gh)
434 {
435 	gfs2_glock_put(gh->gh_gl);
436 	gh->gh_gl = NULL;
437 	gh->gh_ip = 0;
438 }
439 
440 /**
441  * gfs2_holder_get - get a struct gfs2_holder structure
442  * @gl: the glock
443  * @state: the state we're requesting
444  * @flags: the modifier flags
445  * @gfp_flags:
446  *
447  * Figure out how big an impact this function has.  Either:
448  * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
449  * 2) Leave it like it is
450  *
451  * Returns: the holder structure, NULL on ENOMEM
452  */
453 
454 static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
455 					   unsigned int state,
456 					   int flags, gfp_t gfp_flags)
457 {
458 	struct gfs2_holder *gh;
459 
460 	gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
461 	if (!gh)
462 		return NULL;
463 
464 	gfs2_holder_init(gl, state, flags, gh);
465 	set_bit(HIF_ALLOCED, &gh->gh_iflags);
466 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
467 	return gh;
468 }
469 
470 /**
471  * gfs2_holder_put - get rid of a struct gfs2_holder structure
472  * @gh: the holder structure
473  *
474  */
475 
476 static void gfs2_holder_put(struct gfs2_holder *gh)
477 {
478 	gfs2_holder_uninit(gh);
479 	kfree(gh);
480 }
481 
482 /**
483  * rq_mutex - process a mutex request in the queue
484  * @gh: the glock holder
485  *
486  * Returns: 1 if the queue is blocked
487  */
488 
489 static int rq_mutex(struct gfs2_holder *gh)
490 {
491 	struct gfs2_glock *gl = gh->gh_gl;
492 
493 	list_del_init(&gh->gh_list);
494 	/*  gh->gh_error never examined.  */
495 	set_bit(GLF_LOCK, &gl->gl_flags);
496 	complete(&gh->gh_wait);
497 
498 	return 1;
499 }
500 
501 /**
502  * rq_promote - process a promote request in the queue
503  * @gh: the glock holder
504  *
505  * Acquire a new inter-node lock, or change a lock state to more restrictive.
506  *
507  * Returns: 1 if the queue is blocked
508  */
509 
510 static int rq_promote(struct gfs2_holder *gh)
511 {
512 	struct gfs2_glock *gl = gh->gh_gl;
513 	struct gfs2_sbd *sdp = gl->gl_sbd;
514 	const struct gfs2_glock_operations *glops = gl->gl_ops;
515 
516 	if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
517 		if (list_empty(&gl->gl_holders)) {
518 			gl->gl_req_gh = gh;
519 			set_bit(GLF_LOCK, &gl->gl_flags);
520 			spin_unlock(&gl->gl_spin);
521 
522 			if (atomic_read(&sdp->sd_reclaim_count) >
523 			    gfs2_tune_get(sdp, gt_reclaim_limit) &&
524 			    !(gh->gh_flags & LM_FLAG_PRIORITY)) {
525 				gfs2_reclaim_glock(sdp);
526 				gfs2_reclaim_glock(sdp);
527 			}
528 
529 			glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
530 			spin_lock(&gl->gl_spin);
531 		}
532 		return 1;
533 	}
534 
535 	if (list_empty(&gl->gl_holders)) {
536 		set_bit(HIF_FIRST, &gh->gh_iflags);
537 		set_bit(GLF_LOCK, &gl->gl_flags);
538 	} else {
539 		struct gfs2_holder *next_gh;
540 		if (gh->gh_flags & GL_LOCAL_EXCL)
541 			return 1;
542 		next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
543 				     gh_list);
544 		if (next_gh->gh_flags & GL_LOCAL_EXCL)
545 			 return 1;
546 	}
547 
548 	list_move_tail(&gh->gh_list, &gl->gl_holders);
549 	gh->gh_error = 0;
550 	set_bit(HIF_HOLDER, &gh->gh_iflags);
551 
552 	complete(&gh->gh_wait);
553 
554 	return 0;
555 }
556 
557 /**
558  * rq_demote - process a demote request in the queue
559  * @gh: the glock holder
560  *
561  * Returns: 1 if the queue is blocked
562  */
563 
564 static int rq_demote(struct gfs2_holder *gh)
565 {
566 	struct gfs2_glock *gl = gh->gh_gl;
567 	const struct gfs2_glock_operations *glops = gl->gl_ops;
568 
569 	if (!list_empty(&gl->gl_holders))
570 		return 1;
571 
572 	if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
573 		list_del_init(&gh->gh_list);
574 		gh->gh_error = 0;
575 		spin_unlock(&gl->gl_spin);
576 		if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
577 			gfs2_holder_put(gh);
578 		else
579 			complete(&gh->gh_wait);
580 		spin_lock(&gl->gl_spin);
581 	} else {
582 		gl->gl_req_gh = gh;
583 		set_bit(GLF_LOCK, &gl->gl_flags);
584 		spin_unlock(&gl->gl_spin);
585 
586 		if (gh->gh_state == LM_ST_UNLOCKED ||
587 		    gl->gl_state != LM_ST_EXCLUSIVE)
588 			glops->go_drop_th(gl);
589 		else
590 			glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
591 
592 		spin_lock(&gl->gl_spin);
593 	}
594 
595 	return 0;
596 }
597 
598 /**
599  * rq_greedy - process a queued request to drop greedy status
600  * @gh: the glock holder
601  *
602  * Returns: 1 if the queue is blocked
603  */
604 
605 static int rq_greedy(struct gfs2_holder *gh)
606 {
607 	struct gfs2_glock *gl = gh->gh_gl;
608 
609 	list_del_init(&gh->gh_list);
610 	/*  gh->gh_error never examined.  */
611 	clear_bit(GLF_GREEDY, &gl->gl_flags);
612 	spin_unlock(&gl->gl_spin);
613 
614 	gfs2_holder_uninit(gh);
615 	kfree(container_of(gh, struct greedy, gr_gh));
616 
617 	spin_lock(&gl->gl_spin);
618 
619 	return 0;
620 }
621 
622 /**
623  * run_queue - process holder structures on a glock
624  * @gl: the glock
625  *
626  */
627 static void run_queue(struct gfs2_glock *gl)
628 {
629 	struct gfs2_holder *gh;
630 	int blocked = 1;
631 
632 	for (;;) {
633 		if (test_bit(GLF_LOCK, &gl->gl_flags))
634 			break;
635 
636 		if (!list_empty(&gl->gl_waiters1)) {
637 			gh = list_entry(gl->gl_waiters1.next,
638 					struct gfs2_holder, gh_list);
639 
640 			if (test_bit(HIF_MUTEX, &gh->gh_iflags))
641 				blocked = rq_mutex(gh);
642 			else
643 				gfs2_assert_warn(gl->gl_sbd, 0);
644 
645 		} else if (!list_empty(&gl->gl_waiters2) &&
646 			   !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
647 			gh = list_entry(gl->gl_waiters2.next,
648 					struct gfs2_holder, gh_list);
649 
650 			if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
651 				blocked = rq_demote(gh);
652 			else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
653 				blocked = rq_greedy(gh);
654 			else
655 				gfs2_assert_warn(gl->gl_sbd, 0);
656 
657 		} else if (!list_empty(&gl->gl_waiters3)) {
658 			gh = list_entry(gl->gl_waiters3.next,
659 					struct gfs2_holder, gh_list);
660 
661 			if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
662 				blocked = rq_promote(gh);
663 			else
664 				gfs2_assert_warn(gl->gl_sbd, 0);
665 
666 		} else
667 			break;
668 
669 		if (blocked)
670 			break;
671 	}
672 }
673 
674 /**
675  * gfs2_glmutex_lock - acquire a local lock on a glock
676  * @gl: the glock
677  *
678  * Gives caller exclusive access to manipulate a glock structure.
679  */
680 
681 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
682 {
683 	struct gfs2_holder gh;
684 
685 	gfs2_holder_init(gl, 0, 0, &gh);
686 	set_bit(HIF_MUTEX, &gh.gh_iflags);
687 
688 	spin_lock(&gl->gl_spin);
689 	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
690 		list_add_tail(&gh.gh_list, &gl->gl_waiters1);
691 	} else {
692 		gl->gl_owner = current;
693 		gl->gl_ip = (unsigned long)__builtin_return_address(0);
694 		complete(&gh.gh_wait);
695 	}
696 	spin_unlock(&gl->gl_spin);
697 
698 	wait_for_completion(&gh.gh_wait);
699 	gfs2_holder_uninit(&gh);
700 }
701 
702 /**
703  * gfs2_glmutex_trylock - try to acquire a local lock on a glock
704  * @gl: the glock
705  *
706  * Returns: 1 if the glock is acquired
707  */
708 
709 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
710 {
711 	int acquired = 1;
712 
713 	spin_lock(&gl->gl_spin);
714 	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
715 		acquired = 0;
716 	} else {
717 		gl->gl_owner = current;
718 		gl->gl_ip = (unsigned long)__builtin_return_address(0);
719 	}
720 	spin_unlock(&gl->gl_spin);
721 
722 	return acquired;
723 }
724 
725 /**
726  * gfs2_glmutex_unlock - release a local lock on a glock
727  * @gl: the glock
728  *
729  */
730 
731 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
732 {
733 	spin_lock(&gl->gl_spin);
734 	clear_bit(GLF_LOCK, &gl->gl_flags);
735 	gl->gl_owner = NULL;
736 	gl->gl_ip = 0;
737 	run_queue(gl);
738 	BUG_ON(!spin_is_locked(&gl->gl_spin));
739 	spin_unlock(&gl->gl_spin);
740 }
741 
742 /**
743  * handle_callback - add a demote request to a lock's queue
744  * @gl: the glock
745  * @state: the state the caller wants us to change to
746  *
747  * Note: This may fail sliently if we are out of memory.
748  */
749 
750 static void handle_callback(struct gfs2_glock *gl, unsigned int state)
751 {
752 	struct gfs2_holder *gh, *new_gh = NULL;
753 
754 restart:
755 	spin_lock(&gl->gl_spin);
756 
757 	list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
758 		if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
759 		    gl->gl_req_gh != gh) {
760 			if (gh->gh_state != state)
761 				gh->gh_state = LM_ST_UNLOCKED;
762 			goto out;
763 		}
764 	}
765 
766 	if (new_gh) {
767 		list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
768 		new_gh = NULL;
769 	} else {
770 		spin_unlock(&gl->gl_spin);
771 
772 		new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_NOFS);
773 		if (!new_gh)
774 			return;
775 		set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
776 		set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
777 
778 		goto restart;
779 	}
780 
781 out:
782 	spin_unlock(&gl->gl_spin);
783 
784 	if (new_gh)
785 		gfs2_holder_put(new_gh);
786 }
787 
788 /**
789  * state_change - record that the glock is now in a different state
790  * @gl: the glock
791  * @new_state the new state
792  *
793  */
794 
795 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
796 {
797 	int held1, held2;
798 
799 	held1 = (gl->gl_state != LM_ST_UNLOCKED);
800 	held2 = (new_state != LM_ST_UNLOCKED);
801 
802 	if (held1 != held2) {
803 		if (held2)
804 			gfs2_glock_hold(gl);
805 		else
806 			gfs2_glock_put(gl);
807 	}
808 
809 	gl->gl_state = new_state;
810 }
811 
812 /**
813  * xmote_bh - Called after the lock module is done acquiring a lock
814  * @gl: The glock in question
815  * @ret: the int returned from the lock module
816  *
817  */
818 
819 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
820 {
821 	struct gfs2_sbd *sdp = gl->gl_sbd;
822 	const struct gfs2_glock_operations *glops = gl->gl_ops;
823 	struct gfs2_holder *gh = gl->gl_req_gh;
824 	int prev_state = gl->gl_state;
825 	int op_done = 1;
826 
827 	gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
828 	gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
829 	gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
830 
831 	state_change(gl, ret & LM_OUT_ST_MASK);
832 
833 	if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
834 		if (glops->go_inval)
835 			glops->go_inval(gl, DIO_METADATA);
836 	} else if (gl->gl_state == LM_ST_DEFERRED) {
837 		/* We might not want to do this here.
838 		   Look at moving to the inode glops. */
839 		if (glops->go_inval)
840 			glops->go_inval(gl, 0);
841 	}
842 
843 	/*  Deal with each possible exit condition  */
844 
845 	if (!gh)
846 		gl->gl_stamp = jiffies;
847 	else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
848 		spin_lock(&gl->gl_spin);
849 		list_del_init(&gh->gh_list);
850 		gh->gh_error = -EIO;
851 		spin_unlock(&gl->gl_spin);
852 	} else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
853 		spin_lock(&gl->gl_spin);
854 		list_del_init(&gh->gh_list);
855 		if (gl->gl_state == gh->gh_state ||
856 		    gl->gl_state == LM_ST_UNLOCKED) {
857 			gh->gh_error = 0;
858 		} else {
859 			if (gfs2_assert_warn(sdp, gh->gh_flags &
860 					(LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
861 				fs_warn(sdp, "ret = 0x%.8X\n", ret);
862 			gh->gh_error = GLR_TRYFAILED;
863 		}
864 		spin_unlock(&gl->gl_spin);
865 
866 		if (ret & LM_OUT_CANCELED)
867 			handle_callback(gl, LM_ST_UNLOCKED);
868 
869 	} else if (ret & LM_OUT_CANCELED) {
870 		spin_lock(&gl->gl_spin);
871 		list_del_init(&gh->gh_list);
872 		gh->gh_error = GLR_CANCELED;
873 		spin_unlock(&gl->gl_spin);
874 
875 	} else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
876 		spin_lock(&gl->gl_spin);
877 		list_move_tail(&gh->gh_list, &gl->gl_holders);
878 		gh->gh_error = 0;
879 		set_bit(HIF_HOLDER, &gh->gh_iflags);
880 		spin_unlock(&gl->gl_spin);
881 
882 		set_bit(HIF_FIRST, &gh->gh_iflags);
883 
884 		op_done = 0;
885 
886 	} else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
887 		spin_lock(&gl->gl_spin);
888 		list_del_init(&gh->gh_list);
889 		gh->gh_error = GLR_TRYFAILED;
890 		spin_unlock(&gl->gl_spin);
891 
892 	} else {
893 		if (gfs2_assert_withdraw(sdp, 0) == -1)
894 			fs_err(sdp, "ret = 0x%.8X\n", ret);
895 	}
896 
897 	if (glops->go_xmote_bh)
898 		glops->go_xmote_bh(gl);
899 
900 	if (op_done) {
901 		spin_lock(&gl->gl_spin);
902 		gl->gl_req_gh = NULL;
903 		gl->gl_req_bh = NULL;
904 		clear_bit(GLF_LOCK, &gl->gl_flags);
905 		run_queue(gl);
906 		spin_unlock(&gl->gl_spin);
907 	}
908 
909 	gfs2_glock_put(gl);
910 
911 	if (gh) {
912 		if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
913 			gfs2_holder_put(gh);
914 		else
915 			complete(&gh->gh_wait);
916 	}
917 }
918 
919 /**
920  * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
921  * @gl: The glock in question
922  * @state: the requested state
923  * @flags: modifier flags to the lock call
924  *
925  */
926 
927 void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
928 {
929 	struct gfs2_sbd *sdp = gl->gl_sbd;
930 	const struct gfs2_glock_operations *glops = gl->gl_ops;
931 	int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
932 				 LM_FLAG_NOEXP | LM_FLAG_ANY |
933 				 LM_FLAG_PRIORITY);
934 	unsigned int lck_ret;
935 
936 	gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
937 	gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
938 	gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
939 	gfs2_assert_warn(sdp, state != gl->gl_state);
940 
941 	if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
942 		glops->go_sync(gl);
943 
944 	gfs2_glock_hold(gl);
945 	gl->gl_req_bh = xmote_bh;
946 
947 	lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
948 
949 	if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
950 		return;
951 
952 	if (lck_ret & LM_OUT_ASYNC)
953 		gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
954 	else
955 		xmote_bh(gl, lck_ret);
956 }
957 
958 /**
959  * drop_bh - Called after a lock module unlock completes
960  * @gl: the glock
961  * @ret: the return status
962  *
963  * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
964  * Doesn't drop the reference on the glock the top half took out
965  *
966  */
967 
968 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
969 {
970 	struct gfs2_sbd *sdp = gl->gl_sbd;
971 	const struct gfs2_glock_operations *glops = gl->gl_ops;
972 	struct gfs2_holder *gh = gl->gl_req_gh;
973 
974 	clear_bit(GLF_PREFETCH, &gl->gl_flags);
975 
976 	gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
977 	gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
978 	gfs2_assert_warn(sdp, !ret);
979 
980 	state_change(gl, LM_ST_UNLOCKED);
981 
982 	if (glops->go_inval)
983 		glops->go_inval(gl, DIO_METADATA);
984 
985 	if (gh) {
986 		spin_lock(&gl->gl_spin);
987 		list_del_init(&gh->gh_list);
988 		gh->gh_error = 0;
989 		spin_unlock(&gl->gl_spin);
990 	}
991 
992 	if (glops->go_drop_bh)
993 		glops->go_drop_bh(gl);
994 
995 	spin_lock(&gl->gl_spin);
996 	gl->gl_req_gh = NULL;
997 	gl->gl_req_bh = NULL;
998 	clear_bit(GLF_LOCK, &gl->gl_flags);
999 	run_queue(gl);
1000 	spin_unlock(&gl->gl_spin);
1001 
1002 	gfs2_glock_put(gl);
1003 
1004 	if (gh) {
1005 		if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
1006 			gfs2_holder_put(gh);
1007 		else
1008 			complete(&gh->gh_wait);
1009 	}
1010 }
1011 
1012 /**
1013  * gfs2_glock_drop_th - call into the lock module to unlock a lock
1014  * @gl: the glock
1015  *
1016  */
1017 
1018 void gfs2_glock_drop_th(struct gfs2_glock *gl)
1019 {
1020 	struct gfs2_sbd *sdp = gl->gl_sbd;
1021 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1022 	unsigned int ret;
1023 
1024 	gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1025 	gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1026 	gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1027 
1028 	if (gl->gl_state == LM_ST_EXCLUSIVE && glops->go_sync)
1029 		glops->go_sync(gl);
1030 
1031 	gfs2_glock_hold(gl);
1032 	gl->gl_req_bh = drop_bh;
1033 
1034 	ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
1035 
1036 	if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
1037 		return;
1038 
1039 	if (!ret)
1040 		drop_bh(gl, ret);
1041 	else
1042 		gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
1043 }
1044 
1045 /**
1046  * do_cancels - cancel requests for locks stuck waiting on an expire flag
1047  * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1048  *
1049  * Don't cancel GL_NOCANCEL requests.
1050  */
1051 
1052 static void do_cancels(struct gfs2_holder *gh)
1053 {
1054 	struct gfs2_glock *gl = gh->gh_gl;
1055 
1056 	spin_lock(&gl->gl_spin);
1057 
1058 	while (gl->gl_req_gh != gh &&
1059 	       !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1060 	       !list_empty(&gh->gh_list)) {
1061 		if (gl->gl_req_bh && !(gl->gl_req_gh &&
1062 				     (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
1063 			spin_unlock(&gl->gl_spin);
1064 			gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
1065 			msleep(100);
1066 			spin_lock(&gl->gl_spin);
1067 		} else {
1068 			spin_unlock(&gl->gl_spin);
1069 			msleep(100);
1070 			spin_lock(&gl->gl_spin);
1071 		}
1072 	}
1073 
1074 	spin_unlock(&gl->gl_spin);
1075 }
1076 
1077 /**
1078  * glock_wait_internal - wait on a glock acquisition
1079  * @gh: the glock holder
1080  *
1081  * Returns: 0 on success
1082  */
1083 
1084 static int glock_wait_internal(struct gfs2_holder *gh)
1085 {
1086 	struct gfs2_glock *gl = gh->gh_gl;
1087 	struct gfs2_sbd *sdp = gl->gl_sbd;
1088 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1089 
1090 	if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1091 		return -EIO;
1092 
1093 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1094 		spin_lock(&gl->gl_spin);
1095 		if (gl->gl_req_gh != gh &&
1096 		    !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1097 		    !list_empty(&gh->gh_list)) {
1098 			list_del_init(&gh->gh_list);
1099 			gh->gh_error = GLR_TRYFAILED;
1100 			run_queue(gl);
1101 			spin_unlock(&gl->gl_spin);
1102 			return gh->gh_error;
1103 		}
1104 		spin_unlock(&gl->gl_spin);
1105 	}
1106 
1107 	if (gh->gh_flags & LM_FLAG_PRIORITY)
1108 		do_cancels(gh);
1109 
1110 	wait_for_completion(&gh->gh_wait);
1111 
1112 	if (gh->gh_error)
1113 		return gh->gh_error;
1114 
1115 	gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1116 	gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1117 						   gh->gh_flags));
1118 
1119 	if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1120 		gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1121 
1122 		if (glops->go_lock) {
1123 			gh->gh_error = glops->go_lock(gh);
1124 			if (gh->gh_error) {
1125 				spin_lock(&gl->gl_spin);
1126 				list_del_init(&gh->gh_list);
1127 				spin_unlock(&gl->gl_spin);
1128 			}
1129 		}
1130 
1131 		spin_lock(&gl->gl_spin);
1132 		gl->gl_req_gh = NULL;
1133 		gl->gl_req_bh = NULL;
1134 		clear_bit(GLF_LOCK, &gl->gl_flags);
1135 		run_queue(gl);
1136 		spin_unlock(&gl->gl_spin);
1137 	}
1138 
1139 	return gh->gh_error;
1140 }
1141 
1142 static inline struct gfs2_holder *
1143 find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1144 {
1145 	struct gfs2_holder *gh;
1146 
1147 	list_for_each_entry(gh, head, gh_list) {
1148 		if (gh->gh_owner == owner)
1149 			return gh;
1150 	}
1151 
1152 	return NULL;
1153 }
1154 
1155 /**
1156  * add_to_queue - Add a holder to the wait queue (but look for recursion)
1157  * @gh: the holder structure to add
1158  *
1159  */
1160 
1161 static void add_to_queue(struct gfs2_holder *gh)
1162 {
1163 	struct gfs2_glock *gl = gh->gh_gl;
1164 	struct gfs2_holder *existing;
1165 
1166 	BUG_ON(!gh->gh_owner);
1167 
1168 	existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1169 	if (existing) {
1170 		print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1171 		printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid);
1172 		printk(KERN_INFO "lock type : %d lock state : %d\n",
1173 				existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
1174 		print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1175 		printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid);
1176 		printk(KERN_INFO "lock type : %d lock state : %d\n",
1177 				gl->gl_name.ln_type, gl->gl_state);
1178 		BUG();
1179 	}
1180 
1181 	existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1182 	if (existing) {
1183 		print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1184 		print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1185 		BUG();
1186 	}
1187 
1188 	if (gh->gh_flags & LM_FLAG_PRIORITY)
1189 		list_add(&gh->gh_list, &gl->gl_waiters3);
1190 	else
1191 		list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1192 }
1193 
1194 /**
1195  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1196  * @gh: the holder structure
1197  *
1198  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1199  *
1200  * Returns: 0, GLR_TRYFAILED, or errno on failure
1201  */
1202 
1203 int gfs2_glock_nq(struct gfs2_holder *gh)
1204 {
1205 	struct gfs2_glock *gl = gh->gh_gl;
1206 	struct gfs2_sbd *sdp = gl->gl_sbd;
1207 	int error = 0;
1208 
1209 restart:
1210 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1211 		set_bit(HIF_ABORTED, &gh->gh_iflags);
1212 		return -EIO;
1213 	}
1214 
1215 	set_bit(HIF_PROMOTE, &gh->gh_iflags);
1216 
1217 	spin_lock(&gl->gl_spin);
1218 	add_to_queue(gh);
1219 	run_queue(gl);
1220 	spin_unlock(&gl->gl_spin);
1221 
1222 	if (!(gh->gh_flags & GL_ASYNC)) {
1223 		error = glock_wait_internal(gh);
1224 		if (error == GLR_CANCELED) {
1225 			msleep(100);
1226 			goto restart;
1227 		}
1228 	}
1229 
1230 	clear_bit(GLF_PREFETCH, &gl->gl_flags);
1231 
1232 	return error;
1233 }
1234 
1235 /**
1236  * gfs2_glock_poll - poll to see if an async request has been completed
1237  * @gh: the holder
1238  *
1239  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1240  */
1241 
1242 int gfs2_glock_poll(struct gfs2_holder *gh)
1243 {
1244 	struct gfs2_glock *gl = gh->gh_gl;
1245 	int ready = 0;
1246 
1247 	spin_lock(&gl->gl_spin);
1248 
1249 	if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1250 		ready = 1;
1251 	else if (list_empty(&gh->gh_list)) {
1252 		if (gh->gh_error == GLR_CANCELED) {
1253 			spin_unlock(&gl->gl_spin);
1254 			msleep(100);
1255 			if (gfs2_glock_nq(gh))
1256 				return 1;
1257 			return 0;
1258 		} else
1259 			ready = 1;
1260 	}
1261 
1262 	spin_unlock(&gl->gl_spin);
1263 
1264 	return ready;
1265 }
1266 
1267 /**
1268  * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1269  * @gh: the holder structure
1270  *
1271  * Returns: 0, GLR_TRYFAILED, or errno on failure
1272  */
1273 
1274 int gfs2_glock_wait(struct gfs2_holder *gh)
1275 {
1276 	int error;
1277 
1278 	error = glock_wait_internal(gh);
1279 	if (error == GLR_CANCELED) {
1280 		msleep(100);
1281 		gh->gh_flags &= ~GL_ASYNC;
1282 		error = gfs2_glock_nq(gh);
1283 	}
1284 
1285 	return error;
1286 }
1287 
1288 /**
1289  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1290  * @gh: the glock holder
1291  *
1292  */
1293 
1294 void gfs2_glock_dq(struct gfs2_holder *gh)
1295 {
1296 	struct gfs2_glock *gl = gh->gh_gl;
1297 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1298 
1299 	if (gh->gh_flags & GL_NOCACHE)
1300 		handle_callback(gl, LM_ST_UNLOCKED);
1301 
1302 	gfs2_glmutex_lock(gl);
1303 
1304 	spin_lock(&gl->gl_spin);
1305 	list_del_init(&gh->gh_list);
1306 
1307 	if (list_empty(&gl->gl_holders)) {
1308 		spin_unlock(&gl->gl_spin);
1309 
1310 		if (glops->go_unlock)
1311 			glops->go_unlock(gh);
1312 
1313 		gl->gl_stamp = jiffies;
1314 
1315 		spin_lock(&gl->gl_spin);
1316 	}
1317 
1318 	clear_bit(GLF_LOCK, &gl->gl_flags);
1319 	run_queue(gl);
1320 	spin_unlock(&gl->gl_spin);
1321 }
1322 
1323 /**
1324  * gfs2_glock_prefetch - Try to prefetch a glock
1325  * @gl: the glock
1326  * @state: the state to prefetch in
1327  * @flags: flags passed to go_xmote_th()
1328  *
1329  */
1330 
1331 static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state,
1332 				int flags)
1333 {
1334 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1335 
1336 	spin_lock(&gl->gl_spin);
1337 
1338 	if (test_bit(GLF_LOCK, &gl->gl_flags) || !list_empty(&gl->gl_holders) ||
1339 	    !list_empty(&gl->gl_waiters1) || !list_empty(&gl->gl_waiters2) ||
1340 	    !list_empty(&gl->gl_waiters3) ||
1341 	    relaxed_state_ok(gl->gl_state, state, flags)) {
1342 		spin_unlock(&gl->gl_spin);
1343 		return;
1344 	}
1345 
1346 	set_bit(GLF_PREFETCH, &gl->gl_flags);
1347 	set_bit(GLF_LOCK, &gl->gl_flags);
1348 	spin_unlock(&gl->gl_spin);
1349 
1350 	glops->go_xmote_th(gl, state, flags);
1351 }
1352 
1353 static void greedy_work(void *data)
1354 {
1355 	struct greedy *gr = data;
1356 	struct gfs2_holder *gh = &gr->gr_gh;
1357 	struct gfs2_glock *gl = gh->gh_gl;
1358 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1359 
1360 	clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1361 
1362 	if (glops->go_greedy)
1363 		glops->go_greedy(gl);
1364 
1365 	spin_lock(&gl->gl_spin);
1366 
1367 	if (list_empty(&gl->gl_waiters2)) {
1368 		clear_bit(GLF_GREEDY, &gl->gl_flags);
1369 		spin_unlock(&gl->gl_spin);
1370 		gfs2_holder_uninit(gh);
1371 		kfree(gr);
1372 	} else {
1373 		gfs2_glock_hold(gl);
1374 		list_add_tail(&gh->gh_list, &gl->gl_waiters2);
1375 		run_queue(gl);
1376 		spin_unlock(&gl->gl_spin);
1377 		gfs2_glock_put(gl);
1378 	}
1379 }
1380 
1381 /**
1382  * gfs2_glock_be_greedy -
1383  * @gl:
1384  * @time:
1385  *
1386  * Returns: 0 if go_greedy will be called, 1 otherwise
1387  */
1388 
1389 int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1390 {
1391 	struct greedy *gr;
1392 	struct gfs2_holder *gh;
1393 
1394 	if (!time || gl->gl_sbd->sd_args.ar_localcaching ||
1395 	    test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
1396 		return 1;
1397 
1398 	gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
1399 	if (!gr) {
1400 		clear_bit(GLF_GREEDY, &gl->gl_flags);
1401 		return 1;
1402 	}
1403 	gh = &gr->gr_gh;
1404 
1405 	gfs2_holder_init(gl, 0, 0, gh);
1406 	set_bit(HIF_GREEDY, &gh->gh_iflags);
1407 	INIT_WORK(&gr->gr_work, greedy_work, gr);
1408 
1409 	set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1410 	schedule_delayed_work(&gr->gr_work, time);
1411 
1412 	return 0;
1413 }
1414 
1415 /**
1416  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1417  * @gh: the holder structure
1418  *
1419  */
1420 
1421 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1422 {
1423 	gfs2_glock_dq(gh);
1424 	gfs2_holder_uninit(gh);
1425 }
1426 
1427 /**
1428  * gfs2_glock_nq_num - acquire a glock based on lock number
1429  * @sdp: the filesystem
1430  * @number: the lock number
1431  * @glops: the glock operations for the type of glock
1432  * @state: the state to acquire the glock in
1433  * @flags: modifier flags for the aquisition
1434  * @gh: the struct gfs2_holder
1435  *
1436  * Returns: errno
1437  */
1438 
1439 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1440 		      const struct gfs2_glock_operations *glops,
1441 		      unsigned int state, int flags, struct gfs2_holder *gh)
1442 {
1443 	struct gfs2_glock *gl;
1444 	int error;
1445 
1446 	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1447 	if (!error) {
1448 		error = gfs2_glock_nq_init(gl, state, flags, gh);
1449 		gfs2_glock_put(gl);
1450 	}
1451 
1452 	return error;
1453 }
1454 
1455 /**
1456  * glock_compare - Compare two struct gfs2_glock structures for sorting
1457  * @arg_a: the first structure
1458  * @arg_b: the second structure
1459  *
1460  */
1461 
1462 static int glock_compare(const void *arg_a, const void *arg_b)
1463 {
1464 	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1465 	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1466 	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1467 	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1468 
1469 	if (a->ln_number > b->ln_number)
1470 		return 1;
1471 	if (a->ln_number < b->ln_number)
1472 		return -1;
1473 	if (gh_a->gh_state == LM_ST_SHARED && gh_b->gh_state == LM_ST_EXCLUSIVE)
1474 		return 1;
1475 	if (!(gh_a->gh_flags & GL_LOCAL_EXCL) && (gh_b->gh_flags & GL_LOCAL_EXCL))
1476 		return 1;
1477 	return 0;
1478 }
1479 
1480 /**
1481  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1482  * @num_gh: the number of structures
1483  * @ghs: an array of struct gfs2_holder structures
1484  *
1485  * Returns: 0 on success (all glocks acquired),
1486  *          errno on failure (no glocks acquired)
1487  */
1488 
1489 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1490 		     struct gfs2_holder **p)
1491 {
1492 	unsigned int x;
1493 	int error = 0;
1494 
1495 	for (x = 0; x < num_gh; x++)
1496 		p[x] = &ghs[x];
1497 
1498 	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1499 
1500 	for (x = 0; x < num_gh; x++) {
1501 		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1502 
1503 		error = gfs2_glock_nq(p[x]);
1504 		if (error) {
1505 			while (x--)
1506 				gfs2_glock_dq(p[x]);
1507 			break;
1508 		}
1509 	}
1510 
1511 	return error;
1512 }
1513 
1514 /**
1515  * gfs2_glock_nq_m - acquire multiple glocks
1516  * @num_gh: the number of structures
1517  * @ghs: an array of struct gfs2_holder structures
1518  *
1519  * Figure out how big an impact this function has.  Either:
1520  * 1) Replace this code with code that calls gfs2_glock_prefetch()
1521  * 2) Forget async stuff and just call nq_m_sync()
1522  * 3) Leave it like it is
1523  *
1524  * Returns: 0 on success (all glocks acquired),
1525  *          errno on failure (no glocks acquired)
1526  */
1527 
1528 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1529 {
1530 	int *e;
1531 	unsigned int x;
1532 	int borked = 0, serious = 0;
1533 	int error = 0;
1534 
1535 	if (!num_gh)
1536 		return 0;
1537 
1538 	if (num_gh == 1) {
1539 		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1540 		return gfs2_glock_nq(ghs);
1541 	}
1542 
1543 	e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1544 	if (!e)
1545 		return -ENOMEM;
1546 
1547 	for (x = 0; x < num_gh; x++) {
1548 		ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
1549 		error = gfs2_glock_nq(&ghs[x]);
1550 		if (error) {
1551 			borked = 1;
1552 			serious = error;
1553 			num_gh = x;
1554 			break;
1555 		}
1556 	}
1557 
1558 	for (x = 0; x < num_gh; x++) {
1559 		error = e[x] = glock_wait_internal(&ghs[x]);
1560 		if (error) {
1561 			borked = 1;
1562 			if (error != GLR_TRYFAILED && error != GLR_CANCELED)
1563 				serious = error;
1564 		}
1565 	}
1566 
1567 	if (!borked) {
1568 		kfree(e);
1569 		return 0;
1570 	}
1571 
1572 	for (x = 0; x < num_gh; x++)
1573 		if (!e[x])
1574 			gfs2_glock_dq(&ghs[x]);
1575 
1576 	if (serious)
1577 		error = serious;
1578 	else {
1579 		for (x = 0; x < num_gh; x++)
1580 			gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
1581 					  &ghs[x]);
1582 		error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
1583 	}
1584 
1585 	kfree(e);
1586 
1587 	return error;
1588 }
1589 
1590 /**
1591  * gfs2_glock_dq_m - release multiple glocks
1592  * @num_gh: the number of structures
1593  * @ghs: an array of struct gfs2_holder structures
1594  *
1595  */
1596 
1597 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1598 {
1599 	unsigned int x;
1600 
1601 	for (x = 0; x < num_gh; x++)
1602 		gfs2_glock_dq(&ghs[x]);
1603 }
1604 
1605 /**
1606  * gfs2_glock_dq_uninit_m - release multiple glocks
1607  * @num_gh: the number of structures
1608  * @ghs: an array of struct gfs2_holder structures
1609  *
1610  */
1611 
1612 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1613 {
1614 	unsigned int x;
1615 
1616 	for (x = 0; x < num_gh; x++)
1617 		gfs2_glock_dq_uninit(&ghs[x]);
1618 }
1619 
1620 /**
1621  * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1622  * @sdp: the filesystem
1623  * @number: the lock number
1624  * @glops: the glock operations for the type of glock
1625  * @state: the state to acquire the glock in
1626  * @flags: modifier flags for the aquisition
1627  *
1628  * Returns: errno
1629  */
1630 
1631 void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, u64 number,
1632 			     const struct gfs2_glock_operations *glops,
1633 			     unsigned int state, int flags)
1634 {
1635 	struct gfs2_glock *gl;
1636 	int error;
1637 
1638 	if (atomic_read(&sdp->sd_reclaim_count) <
1639 	    gfs2_tune_get(sdp, gt_reclaim_limit)) {
1640 		error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1641 		if (!error) {
1642 			gfs2_glock_prefetch(gl, state, flags);
1643 			gfs2_glock_put(gl);
1644 		}
1645 	}
1646 }
1647 
1648 /**
1649  * gfs2_lvb_hold - attach a LVB from a glock
1650  * @gl: The glock in question
1651  *
1652  */
1653 
1654 int gfs2_lvb_hold(struct gfs2_glock *gl)
1655 {
1656 	int error;
1657 
1658 	gfs2_glmutex_lock(gl);
1659 
1660 	if (!atomic_read(&gl->gl_lvb_count)) {
1661 		error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1662 		if (error) {
1663 			gfs2_glmutex_unlock(gl);
1664 			return error;
1665 		}
1666 		gfs2_glock_hold(gl);
1667 	}
1668 	atomic_inc(&gl->gl_lvb_count);
1669 
1670 	gfs2_glmutex_unlock(gl);
1671 
1672 	return 0;
1673 }
1674 
1675 /**
1676  * gfs2_lvb_unhold - detach a LVB from a glock
1677  * @gl: The glock in question
1678  *
1679  */
1680 
1681 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1682 {
1683 	gfs2_glock_hold(gl);
1684 	gfs2_glmutex_lock(gl);
1685 
1686 	gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1687 	if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1688 		gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1689 		gl->gl_lvb = NULL;
1690 		gfs2_glock_put(gl);
1691 	}
1692 
1693 	gfs2_glmutex_unlock(gl);
1694 	gfs2_glock_put(gl);
1695 }
1696 
1697 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1698 			unsigned int state)
1699 {
1700 	struct gfs2_glock *gl;
1701 
1702 	gl = gfs2_glock_find(sdp, name);
1703 	if (!gl)
1704 		return;
1705 
1706 	if (gl->gl_ops->go_callback)
1707 		gl->gl_ops->go_callback(gl, state);
1708 	handle_callback(gl, state);
1709 
1710 	spin_lock(&gl->gl_spin);
1711 	run_queue(gl);
1712 	spin_unlock(&gl->gl_spin);
1713 
1714 	gfs2_glock_put(gl);
1715 }
1716 
1717 /**
1718  * gfs2_glock_cb - Callback used by locking module
1719  * @sdp: Pointer to the superblock
1720  * @type: Type of callback
1721  * @data: Type dependent data pointer
1722  *
1723  * Called by the locking module when it wants to tell us something.
1724  * Either we need to drop a lock, one of our ASYNC requests completed, or
1725  * a journal from another client needs to be recovered.
1726  */
1727 
1728 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1729 {
1730 	struct gfs2_sbd *sdp = cb_data;
1731 
1732 	switch (type) {
1733 	case LM_CB_NEED_E:
1734 		blocking_cb(sdp, data, LM_ST_UNLOCKED);
1735 		return;
1736 
1737 	case LM_CB_NEED_D:
1738 		blocking_cb(sdp, data, LM_ST_DEFERRED);
1739 		return;
1740 
1741 	case LM_CB_NEED_S:
1742 		blocking_cb(sdp, data, LM_ST_SHARED);
1743 		return;
1744 
1745 	case LM_CB_ASYNC: {
1746 		struct lm_async_cb *async = data;
1747 		struct gfs2_glock *gl;
1748 
1749 		gl = gfs2_glock_find(sdp, &async->lc_name);
1750 		if (gfs2_assert_warn(sdp, gl))
1751 			return;
1752 		if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1753 			gl->gl_req_bh(gl, async->lc_ret);
1754 		gfs2_glock_put(gl);
1755 		return;
1756 	}
1757 
1758 	case LM_CB_NEED_RECOVERY:
1759 		gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1760 		if (sdp->sd_recoverd_process)
1761 			wake_up_process(sdp->sd_recoverd_process);
1762 		return;
1763 
1764 	case LM_CB_DROPLOCKS:
1765 		gfs2_gl_hash_clear(sdp, NO_WAIT);
1766 		gfs2_quota_scan(sdp);
1767 		return;
1768 
1769 	default:
1770 		gfs2_assert_warn(sdp, 0);
1771 		return;
1772 	}
1773 }
1774 
1775 /**
1776  * demote_ok - Check to see if it's ok to unlock a glock
1777  * @gl: the glock
1778  *
1779  * Returns: 1 if it's ok
1780  */
1781 
1782 static int demote_ok(struct gfs2_glock *gl)
1783 {
1784 	struct gfs2_sbd *sdp = gl->gl_sbd;
1785 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1786 	int demote = 1;
1787 
1788 	if (test_bit(GLF_STICKY, &gl->gl_flags))
1789 		demote = 0;
1790 	else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
1791 		demote = time_after_eq(jiffies, gl->gl_stamp +
1792 				    gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
1793 	else if (glops->go_demote_ok)
1794 		demote = glops->go_demote_ok(gl);
1795 
1796 	return demote;
1797 }
1798 
1799 /**
1800  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1801  * @gl: the glock
1802  *
1803  */
1804 
1805 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1806 {
1807 	struct gfs2_sbd *sdp = gl->gl_sbd;
1808 
1809 	spin_lock(&sdp->sd_reclaim_lock);
1810 	if (list_empty(&gl->gl_reclaim)) {
1811 		gfs2_glock_hold(gl);
1812 		list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1813 		atomic_inc(&sdp->sd_reclaim_count);
1814 	}
1815 	spin_unlock(&sdp->sd_reclaim_lock);
1816 
1817 	wake_up(&sdp->sd_reclaim_wq);
1818 }
1819 
1820 /**
1821  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1822  * @sdp: the filesystem
1823  *
1824  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1825  * different glock and we notice that there are a lot of glocks in the
1826  * reclaim list.
1827  *
1828  */
1829 
1830 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1831 {
1832 	struct gfs2_glock *gl;
1833 
1834 	spin_lock(&sdp->sd_reclaim_lock);
1835 	if (list_empty(&sdp->sd_reclaim_list)) {
1836 		spin_unlock(&sdp->sd_reclaim_lock);
1837 		return;
1838 	}
1839 	gl = list_entry(sdp->sd_reclaim_list.next,
1840 			struct gfs2_glock, gl_reclaim);
1841 	list_del_init(&gl->gl_reclaim);
1842 	spin_unlock(&sdp->sd_reclaim_lock);
1843 
1844 	atomic_dec(&sdp->sd_reclaim_count);
1845 	atomic_inc(&sdp->sd_reclaimed);
1846 
1847 	if (gfs2_glmutex_trylock(gl)) {
1848 		if (queue_empty(gl, &gl->gl_holders) &&
1849 		    gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1850 			handle_callback(gl, LM_ST_UNLOCKED);
1851 		gfs2_glmutex_unlock(gl);
1852 	}
1853 
1854 	gfs2_glock_put(gl);
1855 }
1856 
1857 /**
1858  * examine_bucket - Call a function for glock in a hash bucket
1859  * @examiner: the function
1860  * @sdp: the filesystem
1861  * @bucket: the bucket
1862  *
1863  * Returns: 1 if the bucket has entries
1864  */
1865 
1866 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1867 			  unsigned int hash)
1868 {
1869 	struct gfs2_glock *gl, *prev = NULL;
1870 	int has_entries = 0;
1871 	struct hlist_head *head = &gl_hash_table[hash].hb_list;
1872 
1873 	read_lock(gl_lock_addr(hash));
1874 	/* Can't use hlist_for_each_entry - don't want prefetch here */
1875 	if (hlist_empty(head))
1876 		goto out;
1877 	gl = list_entry(head->first, struct gfs2_glock, gl_list);
1878 	while(1) {
1879 		if (gl->gl_sbd == sdp) {
1880 			gfs2_glock_hold(gl);
1881 			read_unlock(gl_lock_addr(hash));
1882 			if (prev)
1883 				gfs2_glock_put(prev);
1884 			prev = gl;
1885 			examiner(gl);
1886 			has_entries = 1;
1887 			read_lock(gl_lock_addr(hash));
1888 		}
1889 		if (gl->gl_list.next == NULL)
1890 			break;
1891 		gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1892 	}
1893 out:
1894 	read_unlock(gl_lock_addr(hash));
1895 	if (prev)
1896 		gfs2_glock_put(prev);
1897 	return has_entries;
1898 }
1899 
1900 /**
1901  * scan_glock - look at a glock and see if we can reclaim it
1902  * @gl: the glock to look at
1903  *
1904  */
1905 
1906 static void scan_glock(struct gfs2_glock *gl)
1907 {
1908 	if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1909 		return;
1910 
1911 	if (gfs2_glmutex_trylock(gl)) {
1912 		if (queue_empty(gl, &gl->gl_holders) &&
1913 		    gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1914 			goto out_schedule;
1915 		gfs2_glmutex_unlock(gl);
1916 	}
1917 	return;
1918 
1919 out_schedule:
1920 	gfs2_glmutex_unlock(gl);
1921 	gfs2_glock_schedule_for_reclaim(gl);
1922 }
1923 
1924 /**
1925  * gfs2_scand_internal - Look for glocks and inodes to toss from memory
1926  * @sdp: the filesystem
1927  *
1928  */
1929 
1930 void gfs2_scand_internal(struct gfs2_sbd *sdp)
1931 {
1932 	unsigned int x;
1933 
1934 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1935 		examine_bucket(scan_glock, sdp, x);
1936 }
1937 
1938 /**
1939  * clear_glock - look at a glock and see if we can free it from glock cache
1940  * @gl: the glock to look at
1941  *
1942  */
1943 
1944 static void clear_glock(struct gfs2_glock *gl)
1945 {
1946 	struct gfs2_sbd *sdp = gl->gl_sbd;
1947 	int released;
1948 
1949 	spin_lock(&sdp->sd_reclaim_lock);
1950 	if (!list_empty(&gl->gl_reclaim)) {
1951 		list_del_init(&gl->gl_reclaim);
1952 		atomic_dec(&sdp->sd_reclaim_count);
1953 		spin_unlock(&sdp->sd_reclaim_lock);
1954 		released = gfs2_glock_put(gl);
1955 		gfs2_assert(sdp, !released);
1956 	} else {
1957 		spin_unlock(&sdp->sd_reclaim_lock);
1958 	}
1959 
1960 	if (gfs2_glmutex_trylock(gl)) {
1961 		if (queue_empty(gl, &gl->gl_holders) &&
1962 		    gl->gl_state != LM_ST_UNLOCKED)
1963 			handle_callback(gl, LM_ST_UNLOCKED);
1964 		gfs2_glmutex_unlock(gl);
1965 	}
1966 }
1967 
1968 /**
1969  * gfs2_gl_hash_clear - Empty out the glock hash table
1970  * @sdp: the filesystem
1971  * @wait: wait until it's all gone
1972  *
1973  * Called when unmounting the filesystem, or when inter-node lock manager
1974  * requests DROPLOCKS because it is running out of capacity.
1975  */
1976 
1977 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1978 {
1979 	unsigned long t;
1980 	unsigned int x;
1981 	int cont;
1982 
1983 	t = jiffies;
1984 
1985 	for (;;) {
1986 		cont = 0;
1987 		for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1988 			if (examine_bucket(clear_glock, sdp, x))
1989 				cont = 1;
1990 		}
1991 
1992 		if (!wait || !cont)
1993 			break;
1994 
1995 		if (time_after_eq(jiffies,
1996 				  t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1997 			fs_warn(sdp, "Unmount seems to be stalled. "
1998 				     "Dumping lock state...\n");
1999 			gfs2_dump_lockstate(sdp);
2000 			t = jiffies;
2001 		}
2002 
2003 		invalidate_inodes(sdp->sd_vfs);
2004 		msleep(10);
2005 	}
2006 }
2007 
2008 /*
2009  *  Diagnostic routines to help debug distributed deadlock
2010  */
2011 
2012 /**
2013  * dump_holder - print information about a glock holder
2014  * @str: a string naming the type of holder
2015  * @gh: the glock holder
2016  *
2017  * Returns: 0 on success, -ENOBUFS when we run out of space
2018  */
2019 
2020 static int dump_holder(char *str, struct gfs2_holder *gh)
2021 {
2022 	unsigned int x;
2023 	int error = -ENOBUFS;
2024 
2025 	printk(KERN_INFO "  %s\n", str);
2026 	printk(KERN_INFO "    owner = %ld\n",
2027 		   (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
2028 	printk(KERN_INFO "    gh_state = %u\n", gh->gh_state);
2029 	printk(KERN_INFO "    gh_flags =");
2030 	for (x = 0; x < 32; x++)
2031 		if (gh->gh_flags & (1 << x))
2032 			printk(" %u", x);
2033 	printk(" \n");
2034 	printk(KERN_INFO "    error = %d\n", gh->gh_error);
2035 	printk(KERN_INFO "    gh_iflags =");
2036 	for (x = 0; x < 32; x++)
2037 		if (test_bit(x, &gh->gh_iflags))
2038 			printk(" %u", x);
2039 	printk(" \n");
2040 	print_symbol(KERN_INFO "    initialized at: %s\n", gh->gh_ip);
2041 
2042 	error = 0;
2043 
2044 	return error;
2045 }
2046 
2047 /**
2048  * dump_inode - print information about an inode
2049  * @ip: the inode
2050  *
2051  * Returns: 0 on success, -ENOBUFS when we run out of space
2052  */
2053 
2054 static int dump_inode(struct gfs2_inode *ip)
2055 {
2056 	unsigned int x;
2057 	int error = -ENOBUFS;
2058 
2059 	printk(KERN_INFO "  Inode:\n");
2060 	printk(KERN_INFO "    num = %llu %llu\n",
2061 		    (unsigned long long)ip->i_num.no_formal_ino,
2062 		    (unsigned long long)ip->i_num.no_addr);
2063 	printk(KERN_INFO "    type = %u\n", IF2DT(ip->i_inode.i_mode));
2064 	printk(KERN_INFO "    i_flags =");
2065 	for (x = 0; x < 32; x++)
2066 		if (test_bit(x, &ip->i_flags))
2067 			printk(" %u", x);
2068 	printk(" \n");
2069 
2070 	error = 0;
2071 
2072 	return error;
2073 }
2074 
2075 /**
2076  * dump_glock - print information about a glock
2077  * @gl: the glock
2078  * @count: where we are in the buffer
2079  *
2080  * Returns: 0 on success, -ENOBUFS when we run out of space
2081  */
2082 
2083 static int dump_glock(struct gfs2_glock *gl)
2084 {
2085 	struct gfs2_holder *gh;
2086 	unsigned int x;
2087 	int error = -ENOBUFS;
2088 
2089 	spin_lock(&gl->gl_spin);
2090 
2091 	printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
2092 	       (unsigned long long)gl->gl_name.ln_number);
2093 	printk(KERN_INFO "  gl_flags =");
2094 	for (x = 0; x < 32; x++) {
2095 		if (test_bit(x, &gl->gl_flags))
2096 			printk(" %u", x);
2097 	}
2098 	printk(" \n");
2099 	printk(KERN_INFO "  gl_ref = %d\n", atomic_read(&gl->gl_ref));
2100 	printk(KERN_INFO "  gl_state = %u\n", gl->gl_state);
2101 	printk(KERN_INFO "  gl_owner = %s\n", gl->gl_owner->comm);
2102 	print_symbol(KERN_INFO "  gl_ip = %s\n", gl->gl_ip);
2103 	printk(KERN_INFO "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
2104 	printk(KERN_INFO "  req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
2105 	printk(KERN_INFO "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
2106 	printk(KERN_INFO "  object = %s\n", (gl->gl_object) ? "yes" : "no");
2107 	printk(KERN_INFO "  le = %s\n",
2108 		   (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
2109 	printk(KERN_INFO "  reclaim = %s\n",
2110 		    (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
2111 	if (gl->gl_aspace)
2112 		printk(KERN_INFO "  aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
2113 		       gl->gl_aspace->i_mapping->nrpages);
2114 	else
2115 		printk(KERN_INFO "  aspace = no\n");
2116 	printk(KERN_INFO "  ail = %d\n", atomic_read(&gl->gl_ail_count));
2117 	if (gl->gl_req_gh) {
2118 		error = dump_holder("Request", gl->gl_req_gh);
2119 		if (error)
2120 			goto out;
2121 	}
2122 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
2123 		error = dump_holder("Holder", gh);
2124 		if (error)
2125 			goto out;
2126 	}
2127 	list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
2128 		error = dump_holder("Waiter1", gh);
2129 		if (error)
2130 			goto out;
2131 	}
2132 	list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
2133 		error = dump_holder("Waiter2", gh);
2134 		if (error)
2135 			goto out;
2136 	}
2137 	list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
2138 		error = dump_holder("Waiter3", gh);
2139 		if (error)
2140 			goto out;
2141 	}
2142 	if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
2143 		if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
2144 		    list_empty(&gl->gl_holders)) {
2145 			error = dump_inode(gl->gl_object);
2146 			if (error)
2147 				goto out;
2148 		} else {
2149 			error = -ENOBUFS;
2150 			printk(KERN_INFO "  Inode: busy\n");
2151 		}
2152 	}
2153 
2154 	error = 0;
2155 
2156 out:
2157 	spin_unlock(&gl->gl_spin);
2158 	return error;
2159 }
2160 
2161 /**
2162  * gfs2_dump_lockstate - print out the current lockstate
2163  * @sdp: the filesystem
2164  * @ub: the buffer to copy the information into
2165  *
2166  * If @ub is NULL, dump the lockstate to the console.
2167  *
2168  */
2169 
2170 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2171 {
2172 	struct gfs2_glock *gl;
2173 	struct hlist_node *h;
2174 	unsigned int x;
2175 	int error = 0;
2176 
2177 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2178 
2179 		read_lock(gl_lock_addr(x));
2180 
2181 		hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
2182 			if (gl->gl_sbd != sdp)
2183 				continue;
2184 
2185 			error = dump_glock(gl);
2186 			if (error)
2187 				break;
2188 		}
2189 
2190 		read_unlock(gl_lock_addr(x));
2191 
2192 		if (error)
2193 			break;
2194 	}
2195 
2196 
2197 	return error;
2198 }
2199 
2200 int __init gfs2_glock_init(void)
2201 {
2202 	unsigned i;
2203 	for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2204 		INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
2205 	}
2206 #ifdef GL_HASH_LOCK_SZ
2207 	for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2208 		rwlock_init(&gl_hash_locks[i]);
2209 	}
2210 #endif
2211 	return 0;
2212 }
2213 
2214