xref: /openbmc/linux/fs/gfs2/glock.c (revision f90e5b5b136ede1f0fd15999e95f13124d6b0dbd)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/buffer_head.h>
14 #include <linux/delay.h>
15 #include <linux/sort.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <asm/uaccess.h>
23 #include <linux/seq_file.h>
24 #include <linux/debugfs.h>
25 #include <linux/kthread.h>
26 #include <linux/freezer.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 #include <linux/rcupdate.h>
30 #include <linux/rculist_bl.h>
31 #include <linux/bit_spinlock.h>
32 
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lops.h"
39 #include "meta_io.h"
40 #include "quota.h"
41 #include "super.h"
42 #include "util.h"
43 #include "bmap.h"
44 #define CREATE_TRACE_POINTS
45 #include "trace_gfs2.h"
46 
47 struct gfs2_glock_iter {
48 	int hash;			/* hash bucket index         */
49 	struct gfs2_sbd *sdp;		/* incore superblock         */
50 	struct gfs2_glock *gl;		/* current glock struct      */
51 	char string[512];		/* scratch space             */
52 };
53 
54 typedef void (*glock_examiner) (struct gfs2_glock * gl);
55 
56 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
57 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
58 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
59 
60 static struct dentry *gfs2_root;
61 static struct workqueue_struct *glock_workqueue;
62 struct workqueue_struct *gfs2_delete_workqueue;
63 static LIST_HEAD(lru_list);
64 static atomic_t lru_count = ATOMIC_INIT(0);
65 static DEFINE_SPINLOCK(lru_lock);
66 
67 #define GFS2_GL_HASH_SHIFT      15
68 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
69 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
70 
71 static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
72 static struct dentry *gfs2_root;
73 
74 /**
75  * gl_hash() - Turn glock number into hash bucket number
76  * @lock: The glock number
77  *
78  * Returns: The number of the corresponding hash bucket
79  */
80 
81 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
82 			    const struct lm_lockname *name)
83 {
84 	unsigned int h;
85 
86 	h = jhash(&name->ln_number, sizeof(u64), 0);
87 	h = jhash(&name->ln_type, sizeof(unsigned int), h);
88 	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
89 	h &= GFS2_GL_HASH_MASK;
90 
91 	return h;
92 }
93 
94 static inline void spin_lock_bucket(unsigned int hash)
95 {
96 	hlist_bl_lock(&gl_hash_table[hash]);
97 }
98 
99 static inline void spin_unlock_bucket(unsigned int hash)
100 {
101 	hlist_bl_unlock(&gl_hash_table[hash]);
102 }
103 
104 static void gfs2_glock_dealloc(struct rcu_head *rcu)
105 {
106 	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
107 
108 	if (gl->gl_ops->go_flags & GLOF_ASPACE)
109 		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
110 	else
111 		kmem_cache_free(gfs2_glock_cachep, gl);
112 }
113 
114 void gfs2_glock_free(struct gfs2_glock *gl)
115 {
116 	struct gfs2_sbd *sdp = gl->gl_sbd;
117 
118 	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
119 	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
120 		wake_up(&sdp->sd_glock_wait);
121 }
122 
123 /**
124  * gfs2_glock_hold() - increment reference count on glock
125  * @gl: The glock to hold
126  *
127  */
128 
129 void gfs2_glock_hold(struct gfs2_glock *gl)
130 {
131 	GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
132 	atomic_inc(&gl->gl_ref);
133 }
134 
135 /**
136  * demote_ok - Check to see if it's ok to unlock a glock
137  * @gl: the glock
138  *
139  * Returns: 1 if it's ok
140  */
141 
142 static int demote_ok(const struct gfs2_glock *gl)
143 {
144 	const struct gfs2_glock_operations *glops = gl->gl_ops;
145 
146 	if (gl->gl_state == LM_ST_UNLOCKED)
147 		return 0;
148 	if (!list_empty(&gl->gl_holders))
149 		return 0;
150 	if (glops->go_demote_ok)
151 		return glops->go_demote_ok(gl);
152 	return 1;
153 }
154 
155 
156 void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
157 {
158 	spin_lock(&lru_lock);
159 
160 	if (!list_empty(&gl->gl_lru))
161 		list_del_init(&gl->gl_lru);
162 	else
163 		atomic_inc(&lru_count);
164 
165 	list_add_tail(&gl->gl_lru, &lru_list);
166 	set_bit(GLF_LRU, &gl->gl_flags);
167 	spin_unlock(&lru_lock);
168 }
169 
170 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
171 {
172 	spin_lock(&lru_lock);
173 	if (!list_empty(&gl->gl_lru)) {
174 		list_del_init(&gl->gl_lru);
175 		atomic_dec(&lru_count);
176 		clear_bit(GLF_LRU, &gl->gl_flags);
177 	}
178 	spin_unlock(&lru_lock);
179 }
180 
181 /**
182  * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
183  * @gl: the glock
184  *
185  * If the glock is demotable, then we add it (or move it) to the end
186  * of the glock LRU list.
187  */
188 
189 static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
190 {
191 	if (demote_ok(gl))
192 		gfs2_glock_add_to_lru(gl);
193 }
194 
195 /**
196  * gfs2_glock_put_nolock() - Decrement reference count on glock
197  * @gl: The glock to put
198  *
199  * This function should only be used if the caller has its own reference
200  * to the glock, in addition to the one it is dropping.
201  */
202 
203 void gfs2_glock_put_nolock(struct gfs2_glock *gl)
204 {
205 	if (atomic_dec_and_test(&gl->gl_ref))
206 		GLOCK_BUG_ON(gl, 1);
207 }
208 
209 /**
210  * gfs2_glock_put() - Decrement reference count on glock
211  * @gl: The glock to put
212  *
213  */
214 
215 void gfs2_glock_put(struct gfs2_glock *gl)
216 {
217 	struct gfs2_sbd *sdp = gl->gl_sbd;
218 	struct address_space *mapping = gfs2_glock2aspace(gl);
219 
220 	if (atomic_dec_and_test(&gl->gl_ref)) {
221 		spin_lock_bucket(gl->gl_hash);
222 		hlist_bl_del_rcu(&gl->gl_list);
223 		spin_unlock_bucket(gl->gl_hash);
224 		gfs2_glock_remove_from_lru(gl);
225 		GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
226 		GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
227 		trace_gfs2_glock_put(gl);
228 		sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
229 	}
230 }
231 
232 /**
233  * search_bucket() - Find struct gfs2_glock by lock number
234  * @bucket: the bucket to search
235  * @name: The lock name
236  *
237  * Returns: NULL, or the struct gfs2_glock with the requested number
238  */
239 
240 static struct gfs2_glock *search_bucket(unsigned int hash,
241 					const struct gfs2_sbd *sdp,
242 					const struct lm_lockname *name)
243 {
244 	struct gfs2_glock *gl;
245 	struct hlist_bl_node *h;
246 
247 	hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
248 		if (!lm_name_equal(&gl->gl_name, name))
249 			continue;
250 		if (gl->gl_sbd != sdp)
251 			continue;
252 		if (atomic_inc_not_zero(&gl->gl_ref))
253 			return gl;
254 	}
255 
256 	return NULL;
257 }
258 
259 /**
260  * may_grant - check if its ok to grant a new lock
261  * @gl: The glock
262  * @gh: The lock request which we wish to grant
263  *
264  * Returns: true if its ok to grant the lock
265  */
266 
267 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
268 {
269 	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
270 	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
271 	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
272 		return 0;
273 	if (gl->gl_state == gh->gh_state)
274 		return 1;
275 	if (gh->gh_flags & GL_EXACT)
276 		return 0;
277 	if (gl->gl_state == LM_ST_EXCLUSIVE) {
278 		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
279 			return 1;
280 		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
281 			return 1;
282 	}
283 	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
284 		return 1;
285 	return 0;
286 }
287 
288 static void gfs2_holder_wake(struct gfs2_holder *gh)
289 {
290 	clear_bit(HIF_WAIT, &gh->gh_iflags);
291 	smp_mb__after_clear_bit();
292 	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
293 }
294 
295 /**
296  * do_error - Something unexpected has happened during a lock request
297  *
298  */
299 
300 static inline void do_error(struct gfs2_glock *gl, const int ret)
301 {
302 	struct gfs2_holder *gh, *tmp;
303 
304 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
305 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
306 			continue;
307 		if (ret & LM_OUT_ERROR)
308 			gh->gh_error = -EIO;
309 		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
310 			gh->gh_error = GLR_TRYFAILED;
311 		else
312 			continue;
313 		list_del_init(&gh->gh_list);
314 		trace_gfs2_glock_queue(gh, 0);
315 		gfs2_holder_wake(gh);
316 	}
317 }
318 
319 /**
320  * do_promote - promote as many requests as possible on the current queue
321  * @gl: The glock
322  *
323  * Returns: 1 if there is a blocked holder at the head of the list, or 2
324  *          if a type specific operation is underway.
325  */
326 
327 static int do_promote(struct gfs2_glock *gl)
328 __releases(&gl->gl_spin)
329 __acquires(&gl->gl_spin)
330 {
331 	const struct gfs2_glock_operations *glops = gl->gl_ops;
332 	struct gfs2_holder *gh, *tmp;
333 	int ret;
334 
335 restart:
336 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
337 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
338 			continue;
339 		if (may_grant(gl, gh)) {
340 			if (gh->gh_list.prev == &gl->gl_holders &&
341 			    glops->go_lock) {
342 				spin_unlock(&gl->gl_spin);
343 				/* FIXME: eliminate this eventually */
344 				ret = glops->go_lock(gh);
345 				spin_lock(&gl->gl_spin);
346 				if (ret) {
347 					if (ret == 1)
348 						return 2;
349 					gh->gh_error = ret;
350 					list_del_init(&gh->gh_list);
351 					trace_gfs2_glock_queue(gh, 0);
352 					gfs2_holder_wake(gh);
353 					goto restart;
354 				}
355 				set_bit(HIF_HOLDER, &gh->gh_iflags);
356 				trace_gfs2_promote(gh, 1);
357 				gfs2_holder_wake(gh);
358 				goto restart;
359 			}
360 			set_bit(HIF_HOLDER, &gh->gh_iflags);
361 			trace_gfs2_promote(gh, 0);
362 			gfs2_holder_wake(gh);
363 			continue;
364 		}
365 		if (gh->gh_list.prev == &gl->gl_holders)
366 			return 1;
367 		do_error(gl, 0);
368 		break;
369 	}
370 	return 0;
371 }
372 
373 /**
374  * find_first_waiter - find the first gh that's waiting for the glock
375  * @gl: the glock
376  */
377 
378 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
379 {
380 	struct gfs2_holder *gh;
381 
382 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
383 		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
384 			return gh;
385 	}
386 	return NULL;
387 }
388 
389 /**
390  * state_change - record that the glock is now in a different state
391  * @gl: the glock
392  * @new_state the new state
393  *
394  */
395 
396 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
397 {
398 	int held1, held2;
399 
400 	held1 = (gl->gl_state != LM_ST_UNLOCKED);
401 	held2 = (new_state != LM_ST_UNLOCKED);
402 
403 	if (held1 != held2) {
404 		if (held2)
405 			gfs2_glock_hold(gl);
406 		else
407 			gfs2_glock_put_nolock(gl);
408 	}
409 	if (held1 && held2 && list_empty(&gl->gl_holders))
410 		clear_bit(GLF_QUEUED, &gl->gl_flags);
411 
412 	gl->gl_state = new_state;
413 	gl->gl_tchange = jiffies;
414 }
415 
416 static void gfs2_demote_wake(struct gfs2_glock *gl)
417 {
418 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
419 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
420 	smp_mb__after_clear_bit();
421 	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
422 }
423 
424 /**
425  * finish_xmote - The DLM has replied to one of our lock requests
426  * @gl: The glock
427  * @ret: The status from the DLM
428  *
429  */
430 
431 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
432 {
433 	const struct gfs2_glock_operations *glops = gl->gl_ops;
434 	struct gfs2_holder *gh;
435 	unsigned state = ret & LM_OUT_ST_MASK;
436 	int rv;
437 
438 	spin_lock(&gl->gl_spin);
439 	trace_gfs2_glock_state_change(gl, state);
440 	state_change(gl, state);
441 	gh = find_first_waiter(gl);
442 
443 	/* Demote to UN request arrived during demote to SH or DF */
444 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
445 	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
446 		gl->gl_target = LM_ST_UNLOCKED;
447 
448 	/* Check for state != intended state */
449 	if (unlikely(state != gl->gl_target)) {
450 		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
451 			/* move to back of queue and try next entry */
452 			if (ret & LM_OUT_CANCELED) {
453 				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
454 					list_move_tail(&gh->gh_list, &gl->gl_holders);
455 				gh = find_first_waiter(gl);
456 				gl->gl_target = gh->gh_state;
457 				goto retry;
458 			}
459 			/* Some error or failed "try lock" - report it */
460 			if ((ret & LM_OUT_ERROR) ||
461 			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
462 				gl->gl_target = gl->gl_state;
463 				do_error(gl, ret);
464 				goto out;
465 			}
466 		}
467 		switch(state) {
468 		/* Unlocked due to conversion deadlock, try again */
469 		case LM_ST_UNLOCKED:
470 retry:
471 			do_xmote(gl, gh, gl->gl_target);
472 			break;
473 		/* Conversion fails, unlock and try again */
474 		case LM_ST_SHARED:
475 		case LM_ST_DEFERRED:
476 			do_xmote(gl, gh, LM_ST_UNLOCKED);
477 			break;
478 		default: /* Everything else */
479 			printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
480 			GLOCK_BUG_ON(gl, 1);
481 		}
482 		spin_unlock(&gl->gl_spin);
483 		return;
484 	}
485 
486 	/* Fast path - we got what we asked for */
487 	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
488 		gfs2_demote_wake(gl);
489 	if (state != LM_ST_UNLOCKED) {
490 		if (glops->go_xmote_bh) {
491 			spin_unlock(&gl->gl_spin);
492 			rv = glops->go_xmote_bh(gl, gh);
493 			spin_lock(&gl->gl_spin);
494 			if (rv) {
495 				do_error(gl, rv);
496 				goto out;
497 			}
498 		}
499 		rv = do_promote(gl);
500 		if (rv == 2)
501 			goto out_locked;
502 	}
503 out:
504 	clear_bit(GLF_LOCK, &gl->gl_flags);
505 out_locked:
506 	spin_unlock(&gl->gl_spin);
507 }
508 
509 /**
510  * do_xmote - Calls the DLM to change the state of a lock
511  * @gl: The lock state
512  * @gh: The holder (only for promotes)
513  * @target: The target lock state
514  *
515  */
516 
517 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
518 __releases(&gl->gl_spin)
519 __acquires(&gl->gl_spin)
520 {
521 	const struct gfs2_glock_operations *glops = gl->gl_ops;
522 	struct gfs2_sbd *sdp = gl->gl_sbd;
523 	unsigned int lck_flags = gh ? gh->gh_flags : 0;
524 	int ret;
525 
526 	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
527 		      LM_FLAG_PRIORITY);
528 	GLOCK_BUG_ON(gl, gl->gl_state == target);
529 	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
530 	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
531 	    glops->go_inval) {
532 		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
533 		do_error(gl, 0); /* Fail queued try locks */
534 	}
535 	gl->gl_req = target;
536 	spin_unlock(&gl->gl_spin);
537 	if (glops->go_xmote_th)
538 		glops->go_xmote_th(gl);
539 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
540 		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
541 	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
542 
543 	gfs2_glock_hold(gl);
544 	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
545 		/* lock_dlm */
546 		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
547 		GLOCK_BUG_ON(gl, ret);
548 	} else { /* lock_nolock */
549 		finish_xmote(gl, target);
550 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
551 			gfs2_glock_put(gl);
552 	}
553 
554 	spin_lock(&gl->gl_spin);
555 }
556 
557 /**
558  * find_first_holder - find the first "holder" gh
559  * @gl: the glock
560  */
561 
562 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
563 {
564 	struct gfs2_holder *gh;
565 
566 	if (!list_empty(&gl->gl_holders)) {
567 		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
568 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
569 			return gh;
570 	}
571 	return NULL;
572 }
573 
574 /**
575  * run_queue - do all outstanding tasks related to a glock
576  * @gl: The glock in question
577  * @nonblock: True if we must not block in run_queue
578  *
579  */
580 
581 static void run_queue(struct gfs2_glock *gl, const int nonblock)
582 __releases(&gl->gl_spin)
583 __acquires(&gl->gl_spin)
584 {
585 	struct gfs2_holder *gh = NULL;
586 	int ret;
587 
588 	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
589 		return;
590 
591 	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
592 
593 	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
594 	    gl->gl_demote_state != gl->gl_state) {
595 		if (find_first_holder(gl))
596 			goto out_unlock;
597 		if (nonblock)
598 			goto out_sched;
599 		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
600 		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
601 		gl->gl_target = gl->gl_demote_state;
602 	} else {
603 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
604 			gfs2_demote_wake(gl);
605 		ret = do_promote(gl);
606 		if (ret == 0)
607 			goto out_unlock;
608 		if (ret == 2)
609 			goto out;
610 		gh = find_first_waiter(gl);
611 		gl->gl_target = gh->gh_state;
612 		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
613 			do_error(gl, 0); /* Fail queued try locks */
614 	}
615 	do_xmote(gl, gh, gl->gl_target);
616 out:
617 	return;
618 
619 out_sched:
620 	clear_bit(GLF_LOCK, &gl->gl_flags);
621 	smp_mb__after_clear_bit();
622 	gfs2_glock_hold(gl);
623 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
624 		gfs2_glock_put_nolock(gl);
625 	return;
626 
627 out_unlock:
628 	clear_bit(GLF_LOCK, &gl->gl_flags);
629 	smp_mb__after_clear_bit();
630 	return;
631 }
632 
633 static void delete_work_func(struct work_struct *work)
634 {
635 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
636 	struct gfs2_sbd *sdp = gl->gl_sbd;
637 	struct gfs2_inode *ip;
638 	struct inode *inode;
639 	u64 no_addr = gl->gl_name.ln_number;
640 
641 	ip = gl->gl_object;
642 	/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
643 
644 	if (ip)
645 		inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
646 	else
647 		inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
648 	if (inode && !IS_ERR(inode)) {
649 		d_prune_aliases(inode);
650 		iput(inode);
651 	}
652 	gfs2_glock_put(gl);
653 }
654 
655 static void glock_work_func(struct work_struct *work)
656 {
657 	unsigned long delay = 0;
658 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
659 	int drop_ref = 0;
660 
661 	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
662 		finish_xmote(gl, gl->gl_reply);
663 		drop_ref = 1;
664 	}
665 	spin_lock(&gl->gl_spin);
666 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
667 	    gl->gl_state != LM_ST_UNLOCKED &&
668 	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
669 		unsigned long holdtime, now = jiffies;
670 
671 		holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
672 		if (time_before(now, holdtime))
673 			delay = holdtime - now;
674 
675 		if (!delay) {
676 			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
677 			set_bit(GLF_DEMOTE, &gl->gl_flags);
678 		}
679 	}
680 	run_queue(gl, 0);
681 	spin_unlock(&gl->gl_spin);
682 	if (!delay ||
683 	    queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
684 		gfs2_glock_put(gl);
685 	if (drop_ref)
686 		gfs2_glock_put(gl);
687 }
688 
689 /**
690  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
691  * @sdp: The GFS2 superblock
692  * @number: the lock number
693  * @glops: The glock_operations to use
694  * @create: If 0, don't create the glock if it doesn't exist
695  * @glp: the glock is returned here
696  *
697  * This does not lock a glock, just finds/creates structures for one.
698  *
699  * Returns: errno
700  */
701 
702 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
703 		   const struct gfs2_glock_operations *glops, int create,
704 		   struct gfs2_glock **glp)
705 {
706 	struct super_block *s = sdp->sd_vfs;
707 	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
708 	struct gfs2_glock *gl, *tmp;
709 	unsigned int hash = gl_hash(sdp, &name);
710 	struct address_space *mapping;
711 	struct kmem_cache *cachep;
712 
713 	rcu_read_lock();
714 	gl = search_bucket(hash, sdp, &name);
715 	rcu_read_unlock();
716 
717 	*glp = gl;
718 	if (gl)
719 		return 0;
720 	if (!create)
721 		return -ENOENT;
722 
723 	if (glops->go_flags & GLOF_ASPACE)
724 		cachep = gfs2_glock_aspace_cachep;
725 	else
726 		cachep = gfs2_glock_cachep;
727 	gl = kmem_cache_alloc(cachep, GFP_KERNEL);
728 	if (!gl)
729 		return -ENOMEM;
730 
731 	atomic_inc(&sdp->sd_glock_disposal);
732 	gl->gl_flags = 0;
733 	gl->gl_name = name;
734 	atomic_set(&gl->gl_ref, 1);
735 	gl->gl_state = LM_ST_UNLOCKED;
736 	gl->gl_target = LM_ST_UNLOCKED;
737 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
738 	gl->gl_hash = hash;
739 	gl->gl_ops = glops;
740 	snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
741 	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
742 	gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
743 	gl->gl_tchange = jiffies;
744 	gl->gl_object = NULL;
745 	gl->gl_sbd = sdp;
746 	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
747 	INIT_WORK(&gl->gl_delete, delete_work_func);
748 
749 	mapping = gfs2_glock2aspace(gl);
750 	if (mapping) {
751                 mapping->a_ops = &gfs2_meta_aops;
752 		mapping->host = s->s_bdev->bd_inode;
753 		mapping->flags = 0;
754 		mapping_set_gfp_mask(mapping, GFP_NOFS);
755 		mapping->assoc_mapping = NULL;
756 		mapping->backing_dev_info = s->s_bdi;
757 		mapping->writeback_index = 0;
758 	}
759 
760 	spin_lock_bucket(hash);
761 	tmp = search_bucket(hash, sdp, &name);
762 	if (tmp) {
763 		spin_unlock_bucket(hash);
764 		kmem_cache_free(cachep, gl);
765 		atomic_dec(&sdp->sd_glock_disposal);
766 		gl = tmp;
767 	} else {
768 		hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
769 		spin_unlock_bucket(hash);
770 	}
771 
772 	*glp = gl;
773 
774 	return 0;
775 }
776 
777 /**
778  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
779  * @gl: the glock
780  * @state: the state we're requesting
781  * @flags: the modifier flags
782  * @gh: the holder structure
783  *
784  */
785 
786 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
787 		      struct gfs2_holder *gh)
788 {
789 	INIT_LIST_HEAD(&gh->gh_list);
790 	gh->gh_gl = gl;
791 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
792 	gh->gh_owner_pid = get_pid(task_pid(current));
793 	gh->gh_state = state;
794 	gh->gh_flags = flags;
795 	gh->gh_error = 0;
796 	gh->gh_iflags = 0;
797 	gfs2_glock_hold(gl);
798 }
799 
800 /**
801  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
802  * @state: the state we're requesting
803  * @flags: the modifier flags
804  * @gh: the holder structure
805  *
806  * Don't mess with the glock.
807  *
808  */
809 
810 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
811 {
812 	gh->gh_state = state;
813 	gh->gh_flags = flags;
814 	gh->gh_iflags = 0;
815 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
816 	if (gh->gh_owner_pid)
817 		put_pid(gh->gh_owner_pid);
818 	gh->gh_owner_pid = get_pid(task_pid(current));
819 }
820 
821 /**
822  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
823  * @gh: the holder structure
824  *
825  */
826 
827 void gfs2_holder_uninit(struct gfs2_holder *gh)
828 {
829 	put_pid(gh->gh_owner_pid);
830 	gfs2_glock_put(gh->gh_gl);
831 	gh->gh_gl = NULL;
832 	gh->gh_ip = 0;
833 }
834 
835 /**
836  * gfs2_glock_holder_wait
837  * @word: unused
838  *
839  * This function and gfs2_glock_demote_wait both show up in the WCHAN
840  * field. Thus I've separated these otherwise identical functions in
841  * order to be more informative to the user.
842  */
843 
844 static int gfs2_glock_holder_wait(void *word)
845 {
846         schedule();
847         return 0;
848 }
849 
850 static int gfs2_glock_demote_wait(void *word)
851 {
852 	schedule();
853 	return 0;
854 }
855 
856 static void wait_on_holder(struct gfs2_holder *gh)
857 {
858 	might_sleep();
859 	wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
860 }
861 
862 static void wait_on_demote(struct gfs2_glock *gl)
863 {
864 	might_sleep();
865 	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
866 }
867 
868 /**
869  * handle_callback - process a demote request
870  * @gl: the glock
871  * @state: the state the caller wants us to change to
872  *
873  * There are only two requests that we are going to see in actual
874  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
875  */
876 
877 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
878 			    unsigned long delay)
879 {
880 	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
881 
882 	set_bit(bit, &gl->gl_flags);
883 	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
884 		gl->gl_demote_state = state;
885 		gl->gl_demote_time = jiffies;
886 	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
887 			gl->gl_demote_state != state) {
888 		gl->gl_demote_state = LM_ST_UNLOCKED;
889 	}
890 	if (gl->gl_ops->go_callback)
891 		gl->gl_ops->go_callback(gl);
892 	trace_gfs2_demote_rq(gl);
893 }
894 
895 /**
896  * gfs2_glock_wait - wait on a glock acquisition
897  * @gh: the glock holder
898  *
899  * Returns: 0 on success
900  */
901 
902 int gfs2_glock_wait(struct gfs2_holder *gh)
903 {
904 	wait_on_holder(gh);
905 	return gh->gh_error;
906 }
907 
908 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
909 {
910 	struct va_format vaf;
911 	va_list args;
912 
913 	va_start(args, fmt);
914 
915 	if (seq) {
916 		struct gfs2_glock_iter *gi = seq->private;
917 		vsprintf(gi->string, fmt, args);
918 		seq_printf(seq, gi->string);
919 	} else {
920 		vaf.fmt = fmt;
921 		vaf.va = &args;
922 
923 		printk(KERN_ERR " %pV", &vaf);
924 	}
925 
926 	va_end(args);
927 }
928 
929 /**
930  * add_to_queue - Add a holder to the wait queue (but look for recursion)
931  * @gh: the holder structure to add
932  *
933  * Eventually we should move the recursive locking trap to a
934  * debugging option or something like that. This is the fast
935  * path and needs to have the minimum number of distractions.
936  *
937  */
938 
939 static inline void add_to_queue(struct gfs2_holder *gh)
940 __releases(&gl->gl_spin)
941 __acquires(&gl->gl_spin)
942 {
943 	struct gfs2_glock *gl = gh->gh_gl;
944 	struct gfs2_sbd *sdp = gl->gl_sbd;
945 	struct list_head *insert_pt = NULL;
946 	struct gfs2_holder *gh2;
947 	int try_lock = 0;
948 
949 	BUG_ON(gh->gh_owner_pid == NULL);
950 	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
951 		BUG();
952 
953 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
954 		if (test_bit(GLF_LOCK, &gl->gl_flags))
955 			try_lock = 1;
956 		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
957 			goto fail;
958 	}
959 
960 	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
961 		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
962 		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
963 			goto trap_recursive;
964 		if (try_lock &&
965 		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
966 		    !may_grant(gl, gh)) {
967 fail:
968 			gh->gh_error = GLR_TRYFAILED;
969 			gfs2_holder_wake(gh);
970 			return;
971 		}
972 		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
973 			continue;
974 		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
975 			insert_pt = &gh2->gh_list;
976 	}
977 	set_bit(GLF_QUEUED, &gl->gl_flags);
978 	trace_gfs2_glock_queue(gh, 1);
979 	if (likely(insert_pt == NULL)) {
980 		list_add_tail(&gh->gh_list, &gl->gl_holders);
981 		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
982 			goto do_cancel;
983 		return;
984 	}
985 	list_add_tail(&gh->gh_list, insert_pt);
986 do_cancel:
987 	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
988 	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
989 		spin_unlock(&gl->gl_spin);
990 		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
991 			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
992 		spin_lock(&gl->gl_spin);
993 	}
994 	return;
995 
996 trap_recursive:
997 	print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
998 	printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
999 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
1000 	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1001 	print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
1002 	printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1003 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
1004 	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
1005 	__dump_glock(NULL, gl);
1006 	BUG();
1007 }
1008 
1009 /**
1010  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1011  * @gh: the holder structure
1012  *
1013  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1014  *
1015  * Returns: 0, GLR_TRYFAILED, or errno on failure
1016  */
1017 
1018 int gfs2_glock_nq(struct gfs2_holder *gh)
1019 {
1020 	struct gfs2_glock *gl = gh->gh_gl;
1021 	struct gfs2_sbd *sdp = gl->gl_sbd;
1022 	int error = 0;
1023 
1024 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1025 		return -EIO;
1026 
1027 	if (test_bit(GLF_LRU, &gl->gl_flags))
1028 		gfs2_glock_remove_from_lru(gl);
1029 
1030 	spin_lock(&gl->gl_spin);
1031 	add_to_queue(gh);
1032 	if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1033 	    test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1034 		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1035 	run_queue(gl, 1);
1036 	spin_unlock(&gl->gl_spin);
1037 
1038 	if (!(gh->gh_flags & GL_ASYNC))
1039 		error = gfs2_glock_wait(gh);
1040 
1041 	return error;
1042 }
1043 
1044 /**
1045  * gfs2_glock_poll - poll to see if an async request has been completed
1046  * @gh: the holder
1047  *
1048  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1049  */
1050 
1051 int gfs2_glock_poll(struct gfs2_holder *gh)
1052 {
1053 	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1054 }
1055 
1056 /**
1057  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1058  * @gh: the glock holder
1059  *
1060  */
1061 
1062 void gfs2_glock_dq(struct gfs2_holder *gh)
1063 {
1064 	struct gfs2_glock *gl = gh->gh_gl;
1065 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1066 	unsigned delay = 0;
1067 	int fast_path = 0;
1068 
1069 	spin_lock(&gl->gl_spin);
1070 	if (gh->gh_flags & GL_NOCACHE)
1071 		handle_callback(gl, LM_ST_UNLOCKED, 0);
1072 
1073 	list_del_init(&gh->gh_list);
1074 	if (find_first_holder(gl) == NULL) {
1075 		if (glops->go_unlock) {
1076 			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1077 			spin_unlock(&gl->gl_spin);
1078 			glops->go_unlock(gh);
1079 			spin_lock(&gl->gl_spin);
1080 			clear_bit(GLF_LOCK, &gl->gl_flags);
1081 		}
1082 		if (list_empty(&gl->gl_holders) &&
1083 		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1084 		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1085 			fast_path = 1;
1086 	}
1087 	if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
1088 		__gfs2_glock_schedule_for_reclaim(gl);
1089 	trace_gfs2_glock_queue(gh, 0);
1090 	spin_unlock(&gl->gl_spin);
1091 	if (likely(fast_path))
1092 		return;
1093 
1094 	gfs2_glock_hold(gl);
1095 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1096 	    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1097 		delay = gl->gl_ops->go_min_hold_time;
1098 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1099 		gfs2_glock_put(gl);
1100 }
1101 
1102 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1103 {
1104 	struct gfs2_glock *gl = gh->gh_gl;
1105 	gfs2_glock_dq(gh);
1106 	wait_on_demote(gl);
1107 }
1108 
1109 /**
1110  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1111  * @gh: the holder structure
1112  *
1113  */
1114 
1115 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1116 {
1117 	gfs2_glock_dq(gh);
1118 	gfs2_holder_uninit(gh);
1119 }
1120 
1121 /**
1122  * gfs2_glock_nq_num - acquire a glock based on lock number
1123  * @sdp: the filesystem
1124  * @number: the lock number
1125  * @glops: the glock operations for the type of glock
1126  * @state: the state to acquire the glock in
1127  * @flags: modifier flags for the acquisition
1128  * @gh: the struct gfs2_holder
1129  *
1130  * Returns: errno
1131  */
1132 
1133 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1134 		      const struct gfs2_glock_operations *glops,
1135 		      unsigned int state, int flags, struct gfs2_holder *gh)
1136 {
1137 	struct gfs2_glock *gl;
1138 	int error;
1139 
1140 	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1141 	if (!error) {
1142 		error = gfs2_glock_nq_init(gl, state, flags, gh);
1143 		gfs2_glock_put(gl);
1144 	}
1145 
1146 	return error;
1147 }
1148 
1149 /**
1150  * glock_compare - Compare two struct gfs2_glock structures for sorting
1151  * @arg_a: the first structure
1152  * @arg_b: the second structure
1153  *
1154  */
1155 
1156 static int glock_compare(const void *arg_a, const void *arg_b)
1157 {
1158 	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1159 	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1160 	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1161 	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1162 
1163 	if (a->ln_number > b->ln_number)
1164 		return 1;
1165 	if (a->ln_number < b->ln_number)
1166 		return -1;
1167 	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1168 	return 0;
1169 }
1170 
1171 /**
1172  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1173  * @num_gh: the number of structures
1174  * @ghs: an array of struct gfs2_holder structures
1175  *
1176  * Returns: 0 on success (all glocks acquired),
1177  *          errno on failure (no glocks acquired)
1178  */
1179 
1180 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1181 		     struct gfs2_holder **p)
1182 {
1183 	unsigned int x;
1184 	int error = 0;
1185 
1186 	for (x = 0; x < num_gh; x++)
1187 		p[x] = &ghs[x];
1188 
1189 	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1190 
1191 	for (x = 0; x < num_gh; x++) {
1192 		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1193 
1194 		error = gfs2_glock_nq(p[x]);
1195 		if (error) {
1196 			while (x--)
1197 				gfs2_glock_dq(p[x]);
1198 			break;
1199 		}
1200 	}
1201 
1202 	return error;
1203 }
1204 
1205 /**
1206  * gfs2_glock_nq_m - acquire multiple glocks
1207  * @num_gh: the number of structures
1208  * @ghs: an array of struct gfs2_holder structures
1209  *
1210  *
1211  * Returns: 0 on success (all glocks acquired),
1212  *          errno on failure (no glocks acquired)
1213  */
1214 
1215 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1216 {
1217 	struct gfs2_holder *tmp[4];
1218 	struct gfs2_holder **pph = tmp;
1219 	int error = 0;
1220 
1221 	switch(num_gh) {
1222 	case 0:
1223 		return 0;
1224 	case 1:
1225 		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1226 		return gfs2_glock_nq(ghs);
1227 	default:
1228 		if (num_gh <= 4)
1229 			break;
1230 		pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1231 		if (!pph)
1232 			return -ENOMEM;
1233 	}
1234 
1235 	error = nq_m_sync(num_gh, ghs, pph);
1236 
1237 	if (pph != tmp)
1238 		kfree(pph);
1239 
1240 	return error;
1241 }
1242 
1243 /**
1244  * gfs2_glock_dq_m - release multiple glocks
1245  * @num_gh: the number of structures
1246  * @ghs: an array of struct gfs2_holder structures
1247  *
1248  */
1249 
1250 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1251 {
1252 	while (num_gh--)
1253 		gfs2_glock_dq(&ghs[num_gh]);
1254 }
1255 
1256 /**
1257  * gfs2_glock_dq_uninit_m - release multiple glocks
1258  * @num_gh: the number of structures
1259  * @ghs: an array of struct gfs2_holder structures
1260  *
1261  */
1262 
1263 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1264 {
1265 	while (num_gh--)
1266 		gfs2_glock_dq_uninit(&ghs[num_gh]);
1267 }
1268 
1269 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1270 {
1271 	unsigned long delay = 0;
1272 	unsigned long holdtime;
1273 	unsigned long now = jiffies;
1274 
1275 	gfs2_glock_hold(gl);
1276 	holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1277 	if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
1278 		if (time_before(now, holdtime))
1279 			delay = holdtime - now;
1280 		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1281 			delay = gl->gl_ops->go_min_hold_time;
1282 	}
1283 
1284 	spin_lock(&gl->gl_spin);
1285 	handle_callback(gl, state, delay);
1286 	spin_unlock(&gl->gl_spin);
1287 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1288 		gfs2_glock_put(gl);
1289 }
1290 
1291 /**
1292  * gfs2_should_freeze - Figure out if glock should be frozen
1293  * @gl: The glock in question
1294  *
1295  * Glocks are not frozen if (a) the result of the dlm operation is
1296  * an error, (b) the locking operation was an unlock operation or
1297  * (c) if there is a "noexp" flagged request anywhere in the queue
1298  *
1299  * Returns: 1 if freezing should occur, 0 otherwise
1300  */
1301 
1302 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1303 {
1304 	const struct gfs2_holder *gh;
1305 
1306 	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1307 		return 0;
1308 	if (gl->gl_target == LM_ST_UNLOCKED)
1309 		return 0;
1310 
1311 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1312 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1313 			continue;
1314 		if (LM_FLAG_NOEXP & gh->gh_flags)
1315 			return 0;
1316 	}
1317 
1318 	return 1;
1319 }
1320 
1321 /**
1322  * gfs2_glock_complete - Callback used by locking
1323  * @gl: Pointer to the glock
1324  * @ret: The return value from the dlm
1325  *
1326  * The gl_reply field is under the gl_spin lock so that it is ok
1327  * to use a bitfield shared with other glock state fields.
1328  */
1329 
1330 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1331 {
1332 	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1333 
1334 	spin_lock(&gl->gl_spin);
1335 	gl->gl_reply = ret;
1336 
1337 	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
1338 		if (gfs2_should_freeze(gl)) {
1339 			set_bit(GLF_FROZEN, &gl->gl_flags);
1340 			spin_unlock(&gl->gl_spin);
1341 			return;
1342 		}
1343 	}
1344 
1345 	spin_unlock(&gl->gl_spin);
1346 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1347 	smp_wmb();
1348 	gfs2_glock_hold(gl);
1349 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1350 		gfs2_glock_put(gl);
1351 }
1352 
1353 
1354 static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
1355 {
1356 	struct gfs2_glock *gl;
1357 	int may_demote;
1358 	int nr_skipped = 0;
1359 	LIST_HEAD(skipped);
1360 
1361 	if (nr == 0)
1362 		goto out;
1363 
1364 	if (!(gfp_mask & __GFP_FS))
1365 		return -1;
1366 
1367 	spin_lock(&lru_lock);
1368 	while(nr && !list_empty(&lru_list)) {
1369 		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1370 		list_del_init(&gl->gl_lru);
1371 		clear_bit(GLF_LRU, &gl->gl_flags);
1372 		atomic_dec(&lru_count);
1373 
1374 		/* Test for being demotable */
1375 		if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1376 			gfs2_glock_hold(gl);
1377 			spin_unlock(&lru_lock);
1378 			spin_lock(&gl->gl_spin);
1379 			may_demote = demote_ok(gl);
1380 			if (may_demote) {
1381 				handle_callback(gl, LM_ST_UNLOCKED, 0);
1382 				nr--;
1383 			}
1384 			clear_bit(GLF_LOCK, &gl->gl_flags);
1385 			smp_mb__after_clear_bit();
1386 			if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1387 				gfs2_glock_put_nolock(gl);
1388 			spin_unlock(&gl->gl_spin);
1389 			spin_lock(&lru_lock);
1390 			continue;
1391 		}
1392 		nr_skipped++;
1393 		list_add(&gl->gl_lru, &skipped);
1394 		set_bit(GLF_LRU, &gl->gl_flags);
1395 	}
1396 	list_splice(&skipped, &lru_list);
1397 	atomic_add(nr_skipped, &lru_count);
1398 	spin_unlock(&lru_lock);
1399 out:
1400 	return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1401 }
1402 
1403 static struct shrinker glock_shrinker = {
1404 	.shrink = gfs2_shrink_glock_memory,
1405 	.seeks = DEFAULT_SEEKS,
1406 };
1407 
1408 /**
1409  * examine_bucket - Call a function for glock in a hash bucket
1410  * @examiner: the function
1411  * @sdp: the filesystem
1412  * @bucket: the bucket
1413  *
1414  */
1415 
1416 static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1417 			  unsigned int hash)
1418 {
1419 	struct gfs2_glock *gl;
1420 	struct hlist_bl_head *head = &gl_hash_table[hash];
1421 	struct hlist_bl_node *pos;
1422 
1423 	rcu_read_lock();
1424 	hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1425 		if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
1426 			examiner(gl);
1427 	}
1428 	rcu_read_unlock();
1429 	cond_resched();
1430 }
1431 
1432 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1433 {
1434 	unsigned x;
1435 
1436 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1437 		examine_bucket(examiner, sdp, x);
1438 }
1439 
1440 
1441 /**
1442  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1443  * @gl: The glock to thaw
1444  *
1445  * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
1446  * so this has to result in the ref count being dropped by one.
1447  */
1448 
1449 static void thaw_glock(struct gfs2_glock *gl)
1450 {
1451 	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1452 		return;
1453 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1454 	gfs2_glock_hold(gl);
1455 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1456 		gfs2_glock_put(gl);
1457 }
1458 
1459 /**
1460  * clear_glock - look at a glock and see if we can free it from glock cache
1461  * @gl: the glock to look at
1462  *
1463  */
1464 
1465 static void clear_glock(struct gfs2_glock *gl)
1466 {
1467 	gfs2_glock_remove_from_lru(gl);
1468 
1469 	spin_lock(&gl->gl_spin);
1470 	if (gl->gl_state != LM_ST_UNLOCKED)
1471 		handle_callback(gl, LM_ST_UNLOCKED, 0);
1472 	spin_unlock(&gl->gl_spin);
1473 	gfs2_glock_hold(gl);
1474 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1475 		gfs2_glock_put(gl);
1476 }
1477 
1478 /**
1479  * gfs2_glock_thaw - Thaw any frozen glocks
1480  * @sdp: The super block
1481  *
1482  */
1483 
1484 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1485 {
1486 	glock_hash_walk(thaw_glock, sdp);
1487 }
1488 
1489 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1490 {
1491 	int ret;
1492 	spin_lock(&gl->gl_spin);
1493 	ret = __dump_glock(seq, gl);
1494 	spin_unlock(&gl->gl_spin);
1495 	return ret;
1496 }
1497 
1498 static void dump_glock_func(struct gfs2_glock *gl)
1499 {
1500 	dump_glock(NULL, gl);
1501 }
1502 
1503 /**
1504  * gfs2_gl_hash_clear - Empty out the glock hash table
1505  * @sdp: the filesystem
1506  * @wait: wait until it's all gone
1507  *
1508  * Called when unmounting the filesystem.
1509  */
1510 
1511 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1512 {
1513 	glock_hash_walk(clear_glock, sdp);
1514 	flush_workqueue(glock_workqueue);
1515 	wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1516 	glock_hash_walk(dump_glock_func, sdp);
1517 }
1518 
1519 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1520 {
1521 	struct gfs2_glock *gl = ip->i_gl;
1522 	int ret;
1523 
1524 	ret = gfs2_truncatei_resume(ip);
1525 	gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1526 
1527 	spin_lock(&gl->gl_spin);
1528 	clear_bit(GLF_LOCK, &gl->gl_flags);
1529 	run_queue(gl, 1);
1530 	spin_unlock(&gl->gl_spin);
1531 }
1532 
1533 static const char *state2str(unsigned state)
1534 {
1535 	switch(state) {
1536 	case LM_ST_UNLOCKED:
1537 		return "UN";
1538 	case LM_ST_SHARED:
1539 		return "SH";
1540 	case LM_ST_DEFERRED:
1541 		return "DF";
1542 	case LM_ST_EXCLUSIVE:
1543 		return "EX";
1544 	}
1545 	return "??";
1546 }
1547 
1548 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1549 {
1550 	char *p = buf;
1551 	if (flags & LM_FLAG_TRY)
1552 		*p++ = 't';
1553 	if (flags & LM_FLAG_TRY_1CB)
1554 		*p++ = 'T';
1555 	if (flags & LM_FLAG_NOEXP)
1556 		*p++ = 'e';
1557 	if (flags & LM_FLAG_ANY)
1558 		*p++ = 'A';
1559 	if (flags & LM_FLAG_PRIORITY)
1560 		*p++ = 'p';
1561 	if (flags & GL_ASYNC)
1562 		*p++ = 'a';
1563 	if (flags & GL_EXACT)
1564 		*p++ = 'E';
1565 	if (flags & GL_NOCACHE)
1566 		*p++ = 'c';
1567 	if (test_bit(HIF_HOLDER, &iflags))
1568 		*p++ = 'H';
1569 	if (test_bit(HIF_WAIT, &iflags))
1570 		*p++ = 'W';
1571 	if (test_bit(HIF_FIRST, &iflags))
1572 		*p++ = 'F';
1573 	*p = 0;
1574 	return buf;
1575 }
1576 
1577 /**
1578  * dump_holder - print information about a glock holder
1579  * @seq: the seq_file struct
1580  * @gh: the glock holder
1581  *
1582  * Returns: 0 on success, -ENOBUFS when we run out of space
1583  */
1584 
1585 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1586 {
1587 	struct task_struct *gh_owner = NULL;
1588 	char flags_buf[32];
1589 
1590 	if (gh->gh_owner_pid)
1591 		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1592 	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1593 		       state2str(gh->gh_state),
1594 		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1595 		       gh->gh_error,
1596 		       gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1597 		       gh_owner ? gh_owner->comm : "(ended)",
1598 		       (void *)gh->gh_ip);
1599 	return 0;
1600 }
1601 
1602 static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1603 {
1604 	const unsigned long *gflags = &gl->gl_flags;
1605 	char *p = buf;
1606 
1607 	if (test_bit(GLF_LOCK, gflags))
1608 		*p++ = 'l';
1609 	if (test_bit(GLF_DEMOTE, gflags))
1610 		*p++ = 'D';
1611 	if (test_bit(GLF_PENDING_DEMOTE, gflags))
1612 		*p++ = 'd';
1613 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1614 		*p++ = 'p';
1615 	if (test_bit(GLF_DIRTY, gflags))
1616 		*p++ = 'y';
1617 	if (test_bit(GLF_LFLUSH, gflags))
1618 		*p++ = 'f';
1619 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1620 		*p++ = 'i';
1621 	if (test_bit(GLF_REPLY_PENDING, gflags))
1622 		*p++ = 'r';
1623 	if (test_bit(GLF_INITIAL, gflags))
1624 		*p++ = 'I';
1625 	if (test_bit(GLF_FROZEN, gflags))
1626 		*p++ = 'F';
1627 	if (test_bit(GLF_QUEUED, gflags))
1628 		*p++ = 'q';
1629 	if (test_bit(GLF_LRU, gflags))
1630 		*p++ = 'L';
1631 	if (gl->gl_object)
1632 		*p++ = 'o';
1633 	*p = 0;
1634 	return buf;
1635 }
1636 
1637 /**
1638  * __dump_glock - print information about a glock
1639  * @seq: The seq_file struct
1640  * @gl: the glock
1641  *
1642  * The file format is as follows:
1643  * One line per object, capital letters are used to indicate objects
1644  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1645  * other objects are indented by a single space and follow the glock to
1646  * which they are related. Fields are indicated by lower case letters
1647  * followed by a colon and the field value, except for strings which are in
1648  * [] so that its possible to see if they are composed of spaces for
1649  * example. The field's are n = number (id of the object), f = flags,
1650  * t = type, s = state, r = refcount, e = error, p = pid.
1651  *
1652  * Returns: 0 on success, -ENOBUFS when we run out of space
1653  */
1654 
1655 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1656 {
1657 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1658 	unsigned long long dtime;
1659 	const struct gfs2_holder *gh;
1660 	char gflags_buf[32];
1661 	int error = 0;
1662 
1663 	dtime = jiffies - gl->gl_demote_time;
1664 	dtime *= 1000000/HZ; /* demote time in uSec */
1665 	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1666 		dtime = 0;
1667 	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d\n",
1668 		  state2str(gl->gl_state),
1669 		  gl->gl_name.ln_type,
1670 		  (unsigned long long)gl->gl_name.ln_number,
1671 		  gflags2str(gflags_buf, gl),
1672 		  state2str(gl->gl_target),
1673 		  state2str(gl->gl_demote_state), dtime,
1674 		  atomic_read(&gl->gl_ail_count),
1675 		  atomic_read(&gl->gl_revokes),
1676 		  atomic_read(&gl->gl_ref));
1677 
1678 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1679 		error = dump_holder(seq, gh);
1680 		if (error)
1681 			goto out;
1682 	}
1683 	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1684 		error = glops->go_dump(seq, gl);
1685 out:
1686 	return error;
1687 }
1688 
1689 
1690 
1691 
1692 int __init gfs2_glock_init(void)
1693 {
1694 	unsigned i;
1695 	for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1696 		INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
1697 	}
1698 
1699 	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1700 					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
1701 	if (IS_ERR(glock_workqueue))
1702 		return PTR_ERR(glock_workqueue);
1703 	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1704 						WQ_MEM_RECLAIM | WQ_FREEZABLE,
1705 						0);
1706 	if (IS_ERR(gfs2_delete_workqueue)) {
1707 		destroy_workqueue(glock_workqueue);
1708 		return PTR_ERR(gfs2_delete_workqueue);
1709 	}
1710 
1711 	register_shrinker(&glock_shrinker);
1712 
1713 	return 0;
1714 }
1715 
1716 void gfs2_glock_exit(void)
1717 {
1718 	unregister_shrinker(&glock_shrinker);
1719 	destroy_workqueue(glock_workqueue);
1720 	destroy_workqueue(gfs2_delete_workqueue);
1721 }
1722 
1723 static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1724 {
1725 	return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1726 			      struct gfs2_glock, gl_list);
1727 }
1728 
1729 static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1730 {
1731 	return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1732 			      struct gfs2_glock, gl_list);
1733 }
1734 
1735 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1736 {
1737 	struct gfs2_glock *gl;
1738 
1739 	do {
1740 		gl = gi->gl;
1741 		if (gl) {
1742 			gi->gl = glock_hash_next(gl);
1743 		} else {
1744 			gi->gl = glock_hash_chain(gi->hash);
1745 		}
1746 		while (gi->gl == NULL) {
1747 			gi->hash++;
1748 			if (gi->hash >= GFS2_GL_HASH_SIZE) {
1749 				rcu_read_unlock();
1750 				return 1;
1751 			}
1752 			gi->gl = glock_hash_chain(gi->hash);
1753 		}
1754 	/* Skip entries for other sb and dead entries */
1755 	} while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
1756 
1757 	return 0;
1758 }
1759 
1760 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1761 {
1762 	struct gfs2_glock_iter *gi = seq->private;
1763 	loff_t n = *pos;
1764 
1765 	gi->hash = 0;
1766 	rcu_read_lock();
1767 
1768 	do {
1769 		if (gfs2_glock_iter_next(gi))
1770 			return NULL;
1771 	} while (n--);
1772 
1773 	return gi->gl;
1774 }
1775 
1776 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1777 				 loff_t *pos)
1778 {
1779 	struct gfs2_glock_iter *gi = seq->private;
1780 
1781 	(*pos)++;
1782 
1783 	if (gfs2_glock_iter_next(gi))
1784 		return NULL;
1785 
1786 	return gi->gl;
1787 }
1788 
1789 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1790 {
1791 	struct gfs2_glock_iter *gi = seq->private;
1792 
1793 	if (gi->gl)
1794 		rcu_read_unlock();
1795 	gi->gl = NULL;
1796 }
1797 
1798 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1799 {
1800 	return dump_glock(seq, iter_ptr);
1801 }
1802 
1803 static const struct seq_operations gfs2_glock_seq_ops = {
1804 	.start = gfs2_glock_seq_start,
1805 	.next  = gfs2_glock_seq_next,
1806 	.stop  = gfs2_glock_seq_stop,
1807 	.show  = gfs2_glock_seq_show,
1808 };
1809 
1810 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1811 {
1812 	int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1813 				   sizeof(struct gfs2_glock_iter));
1814 	if (ret == 0) {
1815 		struct seq_file *seq = file->private_data;
1816 		struct gfs2_glock_iter *gi = seq->private;
1817 		gi->sdp = inode->i_private;
1818 	}
1819 	return ret;
1820 }
1821 
1822 static const struct file_operations gfs2_debug_fops = {
1823 	.owner   = THIS_MODULE,
1824 	.open    = gfs2_debugfs_open,
1825 	.read    = seq_read,
1826 	.llseek  = seq_lseek,
1827 	.release = seq_release_private,
1828 };
1829 
1830 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1831 {
1832 	sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1833 	if (!sdp->debugfs_dir)
1834 		return -ENOMEM;
1835 	sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1836 							 S_IFREG | S_IRUGO,
1837 							 sdp->debugfs_dir, sdp,
1838 							 &gfs2_debug_fops);
1839 	if (!sdp->debugfs_dentry_glocks)
1840 		return -ENOMEM;
1841 
1842 	return 0;
1843 }
1844 
1845 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1846 {
1847 	if (sdp && sdp->debugfs_dir) {
1848 		if (sdp->debugfs_dentry_glocks) {
1849 			debugfs_remove(sdp->debugfs_dentry_glocks);
1850 			sdp->debugfs_dentry_glocks = NULL;
1851 		}
1852 		debugfs_remove(sdp->debugfs_dir);
1853 		sdp->debugfs_dir = NULL;
1854 	}
1855 }
1856 
1857 int gfs2_register_debugfs(void)
1858 {
1859 	gfs2_root = debugfs_create_dir("gfs2", NULL);
1860 	return gfs2_root ? 0 : -ENOMEM;
1861 }
1862 
1863 void gfs2_unregister_debugfs(void)
1864 {
1865 	debugfs_remove(gfs2_root);
1866 	gfs2_root = NULL;
1867 }
1868