xref: /openbmc/linux/fs/gfs2/glock.c (revision 54525552)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/buffer_head.h>
14 #include <linux/delay.h>
15 #include <linux/sort.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <asm/uaccess.h>
23 #include <linux/seq_file.h>
24 #include <linux/debugfs.h>
25 #include <linux/kthread.h>
26 #include <linux/freezer.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 #include <linux/rcupdate.h>
30 #include <linux/rculist_bl.h>
31 #include <linux/bit_spinlock.h>
32 
33 #include "gfs2.h"
34 #include "incore.h"
35 #include "glock.h"
36 #include "glops.h"
37 #include "inode.h"
38 #include "lops.h"
39 #include "meta_io.h"
40 #include "quota.h"
41 #include "super.h"
42 #include "util.h"
43 #include "bmap.h"
44 #define CREATE_TRACE_POINTS
45 #include "trace_gfs2.h"
46 
47 struct gfs2_glock_iter {
48 	int hash;			/* hash bucket index         */
49 	struct gfs2_sbd *sdp;		/* incore superblock         */
50 	struct gfs2_glock *gl;		/* current glock struct      */
51 	char string[512];		/* scratch space             */
52 };
53 
54 typedef void (*glock_examiner) (struct gfs2_glock * gl);
55 
56 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
57 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
58 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
59 
60 static struct dentry *gfs2_root;
61 static struct workqueue_struct *glock_workqueue;
62 struct workqueue_struct *gfs2_delete_workqueue;
63 static LIST_HEAD(lru_list);
64 static atomic_t lru_count = ATOMIC_INIT(0);
65 static DEFINE_SPINLOCK(lru_lock);
66 
67 #define GFS2_GL_HASH_SHIFT      15
68 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
69 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
70 
71 static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
72 static struct dentry *gfs2_root;
73 
74 /**
75  * gl_hash() - Turn glock number into hash bucket number
76  * @lock: The glock number
77  *
78  * Returns: The number of the corresponding hash bucket
79  */
80 
81 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
82 			    const struct lm_lockname *name)
83 {
84 	unsigned int h;
85 
86 	h = jhash(&name->ln_number, sizeof(u64), 0);
87 	h = jhash(&name->ln_type, sizeof(unsigned int), h);
88 	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
89 	h &= GFS2_GL_HASH_MASK;
90 
91 	return h;
92 }
93 
94 static inline void spin_lock_bucket(unsigned int hash)
95 {
96 	hlist_bl_lock(&gl_hash_table[hash]);
97 }
98 
99 static inline void spin_unlock_bucket(unsigned int hash)
100 {
101 	hlist_bl_unlock(&gl_hash_table[hash]);
102 }
103 
104 static void gfs2_glock_dealloc(struct rcu_head *rcu)
105 {
106 	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
107 
108 	if (gl->gl_ops->go_flags & GLOF_ASPACE)
109 		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
110 	else
111 		kmem_cache_free(gfs2_glock_cachep, gl);
112 }
113 
114 void gfs2_glock_free(struct gfs2_glock *gl)
115 {
116 	struct gfs2_sbd *sdp = gl->gl_sbd;
117 
118 	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
119 	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
120 		wake_up(&sdp->sd_glock_wait);
121 }
122 
123 /**
124  * gfs2_glock_hold() - increment reference count on glock
125  * @gl: The glock to hold
126  *
127  */
128 
129 void gfs2_glock_hold(struct gfs2_glock *gl)
130 {
131 	GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
132 	atomic_inc(&gl->gl_ref);
133 }
134 
135 /**
136  * demote_ok - Check to see if it's ok to unlock a glock
137  * @gl: the glock
138  *
139  * Returns: 1 if it's ok
140  */
141 
142 static int demote_ok(const struct gfs2_glock *gl)
143 {
144 	const struct gfs2_glock_operations *glops = gl->gl_ops;
145 
146 	if (gl->gl_state == LM_ST_UNLOCKED)
147 		return 0;
148 	if (!list_empty(&gl->gl_holders))
149 		return 0;
150 	if (glops->go_demote_ok)
151 		return glops->go_demote_ok(gl);
152 	return 1;
153 }
154 
155 
156 void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
157 {
158 	spin_lock(&lru_lock);
159 
160 	if (!list_empty(&gl->gl_lru))
161 		list_del_init(&gl->gl_lru);
162 	else
163 		atomic_inc(&lru_count);
164 
165 	list_add_tail(&gl->gl_lru, &lru_list);
166 	set_bit(GLF_LRU, &gl->gl_flags);
167 	spin_unlock(&lru_lock);
168 }
169 
170 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
171 {
172 	spin_lock(&lru_lock);
173 	if (!list_empty(&gl->gl_lru)) {
174 		list_del_init(&gl->gl_lru);
175 		atomic_dec(&lru_count);
176 		clear_bit(GLF_LRU, &gl->gl_flags);
177 	}
178 	spin_unlock(&lru_lock);
179 }
180 
181 /**
182  * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
183  * @gl: the glock
184  *
185  * If the glock is demotable, then we add it (or move it) to the end
186  * of the glock LRU list.
187  */
188 
189 static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
190 {
191 	if (demote_ok(gl))
192 		gfs2_glock_add_to_lru(gl);
193 }
194 
195 /**
196  * gfs2_glock_put_nolock() - Decrement reference count on glock
197  * @gl: The glock to put
198  *
199  * This function should only be used if the caller has its own reference
200  * to the glock, in addition to the one it is dropping.
201  */
202 
203 void gfs2_glock_put_nolock(struct gfs2_glock *gl)
204 {
205 	if (atomic_dec_and_test(&gl->gl_ref))
206 		GLOCK_BUG_ON(gl, 1);
207 }
208 
209 /**
210  * gfs2_glock_put() - Decrement reference count on glock
211  * @gl: The glock to put
212  *
213  */
214 
215 void gfs2_glock_put(struct gfs2_glock *gl)
216 {
217 	struct gfs2_sbd *sdp = gl->gl_sbd;
218 	struct address_space *mapping = gfs2_glock2aspace(gl);
219 
220 	if (atomic_dec_and_test(&gl->gl_ref)) {
221 		spin_lock_bucket(gl->gl_hash);
222 		hlist_bl_del_rcu(&gl->gl_list);
223 		spin_unlock_bucket(gl->gl_hash);
224 		gfs2_glock_remove_from_lru(gl);
225 		GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
226 		GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
227 		trace_gfs2_glock_put(gl);
228 		sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
229 	}
230 }
231 
232 /**
233  * search_bucket() - Find struct gfs2_glock by lock number
234  * @bucket: the bucket to search
235  * @name: The lock name
236  *
237  * Returns: NULL, or the struct gfs2_glock with the requested number
238  */
239 
240 static struct gfs2_glock *search_bucket(unsigned int hash,
241 					const struct gfs2_sbd *sdp,
242 					const struct lm_lockname *name)
243 {
244 	struct gfs2_glock *gl;
245 	struct hlist_bl_node *h;
246 
247 	hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
248 		if (!lm_name_equal(&gl->gl_name, name))
249 			continue;
250 		if (gl->gl_sbd != sdp)
251 			continue;
252 		if (atomic_inc_not_zero(&gl->gl_ref))
253 			return gl;
254 	}
255 
256 	return NULL;
257 }
258 
259 /**
260  * may_grant - check if its ok to grant a new lock
261  * @gl: The glock
262  * @gh: The lock request which we wish to grant
263  *
264  * Returns: true if its ok to grant the lock
265  */
266 
267 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
268 {
269 	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
270 	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
271 	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
272 		return 0;
273 	if (gl->gl_state == gh->gh_state)
274 		return 1;
275 	if (gh->gh_flags & GL_EXACT)
276 		return 0;
277 	if (gl->gl_state == LM_ST_EXCLUSIVE) {
278 		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
279 			return 1;
280 		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
281 			return 1;
282 	}
283 	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
284 		return 1;
285 	return 0;
286 }
287 
288 static void gfs2_holder_wake(struct gfs2_holder *gh)
289 {
290 	clear_bit(HIF_WAIT, &gh->gh_iflags);
291 	smp_mb__after_clear_bit();
292 	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
293 }
294 
295 /**
296  * do_error - Something unexpected has happened during a lock request
297  *
298  */
299 
300 static inline void do_error(struct gfs2_glock *gl, const int ret)
301 {
302 	struct gfs2_holder *gh, *tmp;
303 
304 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
305 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
306 			continue;
307 		if (ret & LM_OUT_ERROR)
308 			gh->gh_error = -EIO;
309 		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
310 			gh->gh_error = GLR_TRYFAILED;
311 		else
312 			continue;
313 		list_del_init(&gh->gh_list);
314 		trace_gfs2_glock_queue(gh, 0);
315 		gfs2_holder_wake(gh);
316 	}
317 }
318 
319 /**
320  * do_promote - promote as many requests as possible on the current queue
321  * @gl: The glock
322  *
323  * Returns: 1 if there is a blocked holder at the head of the list, or 2
324  *          if a type specific operation is underway.
325  */
326 
327 static int do_promote(struct gfs2_glock *gl)
328 __releases(&gl->gl_spin)
329 __acquires(&gl->gl_spin)
330 {
331 	const struct gfs2_glock_operations *glops = gl->gl_ops;
332 	struct gfs2_holder *gh, *tmp;
333 	int ret;
334 
335 restart:
336 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
337 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
338 			continue;
339 		if (may_grant(gl, gh)) {
340 			if (gh->gh_list.prev == &gl->gl_holders &&
341 			    glops->go_lock) {
342 				spin_unlock(&gl->gl_spin);
343 				/* FIXME: eliminate this eventually */
344 				ret = glops->go_lock(gh);
345 				spin_lock(&gl->gl_spin);
346 				if (ret) {
347 					if (ret == 1)
348 						return 2;
349 					gh->gh_error = ret;
350 					list_del_init(&gh->gh_list);
351 					trace_gfs2_glock_queue(gh, 0);
352 					gfs2_holder_wake(gh);
353 					goto restart;
354 				}
355 				set_bit(HIF_HOLDER, &gh->gh_iflags);
356 				trace_gfs2_promote(gh, 1);
357 				gfs2_holder_wake(gh);
358 				goto restart;
359 			}
360 			set_bit(HIF_HOLDER, &gh->gh_iflags);
361 			trace_gfs2_promote(gh, 0);
362 			gfs2_holder_wake(gh);
363 			continue;
364 		}
365 		if (gh->gh_list.prev == &gl->gl_holders)
366 			return 1;
367 		do_error(gl, 0);
368 		break;
369 	}
370 	return 0;
371 }
372 
373 /**
374  * find_first_waiter - find the first gh that's waiting for the glock
375  * @gl: the glock
376  */
377 
378 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
379 {
380 	struct gfs2_holder *gh;
381 
382 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
383 		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
384 			return gh;
385 	}
386 	return NULL;
387 }
388 
389 /**
390  * state_change - record that the glock is now in a different state
391  * @gl: the glock
392  * @new_state the new state
393  *
394  */
395 
396 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
397 {
398 	int held1, held2;
399 
400 	held1 = (gl->gl_state != LM_ST_UNLOCKED);
401 	held2 = (new_state != LM_ST_UNLOCKED);
402 
403 	if (held1 != held2) {
404 		if (held2)
405 			gfs2_glock_hold(gl);
406 		else
407 			gfs2_glock_put_nolock(gl);
408 	}
409 	if (held1 && held2 && list_empty(&gl->gl_holders))
410 		clear_bit(GLF_QUEUED, &gl->gl_flags);
411 
412 	gl->gl_state = new_state;
413 	gl->gl_tchange = jiffies;
414 }
415 
416 static void gfs2_demote_wake(struct gfs2_glock *gl)
417 {
418 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
419 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
420 	smp_mb__after_clear_bit();
421 	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
422 }
423 
424 /**
425  * finish_xmote - The DLM has replied to one of our lock requests
426  * @gl: The glock
427  * @ret: The status from the DLM
428  *
429  */
430 
431 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
432 {
433 	const struct gfs2_glock_operations *glops = gl->gl_ops;
434 	struct gfs2_holder *gh;
435 	unsigned state = ret & LM_OUT_ST_MASK;
436 	int rv;
437 
438 	spin_lock(&gl->gl_spin);
439 	trace_gfs2_glock_state_change(gl, state);
440 	state_change(gl, state);
441 	gh = find_first_waiter(gl);
442 
443 	/* Demote to UN request arrived during demote to SH or DF */
444 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
445 	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
446 		gl->gl_target = LM_ST_UNLOCKED;
447 
448 	/* Check for state != intended state */
449 	if (unlikely(state != gl->gl_target)) {
450 		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
451 			/* move to back of queue and try next entry */
452 			if (ret & LM_OUT_CANCELED) {
453 				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
454 					list_move_tail(&gh->gh_list, &gl->gl_holders);
455 				gh = find_first_waiter(gl);
456 				gl->gl_target = gh->gh_state;
457 				goto retry;
458 			}
459 			/* Some error or failed "try lock" - report it */
460 			if ((ret & LM_OUT_ERROR) ||
461 			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
462 				gl->gl_target = gl->gl_state;
463 				do_error(gl, ret);
464 				goto out;
465 			}
466 		}
467 		switch(state) {
468 		/* Unlocked due to conversion deadlock, try again */
469 		case LM_ST_UNLOCKED:
470 retry:
471 			do_xmote(gl, gh, gl->gl_target);
472 			break;
473 		/* Conversion fails, unlock and try again */
474 		case LM_ST_SHARED:
475 		case LM_ST_DEFERRED:
476 			do_xmote(gl, gh, LM_ST_UNLOCKED);
477 			break;
478 		default: /* Everything else */
479 			printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
480 			GLOCK_BUG_ON(gl, 1);
481 		}
482 		spin_unlock(&gl->gl_spin);
483 		return;
484 	}
485 
486 	/* Fast path - we got what we asked for */
487 	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
488 		gfs2_demote_wake(gl);
489 	if (state != LM_ST_UNLOCKED) {
490 		if (glops->go_xmote_bh) {
491 			spin_unlock(&gl->gl_spin);
492 			rv = glops->go_xmote_bh(gl, gh);
493 			spin_lock(&gl->gl_spin);
494 			if (rv) {
495 				do_error(gl, rv);
496 				goto out;
497 			}
498 		}
499 		rv = do_promote(gl);
500 		if (rv == 2)
501 			goto out_locked;
502 	}
503 out:
504 	clear_bit(GLF_LOCK, &gl->gl_flags);
505 out_locked:
506 	spin_unlock(&gl->gl_spin);
507 }
508 
509 /**
510  * do_xmote - Calls the DLM to change the state of a lock
511  * @gl: The lock state
512  * @gh: The holder (only for promotes)
513  * @target: The target lock state
514  *
515  */
516 
517 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
518 __releases(&gl->gl_spin)
519 __acquires(&gl->gl_spin)
520 {
521 	const struct gfs2_glock_operations *glops = gl->gl_ops;
522 	struct gfs2_sbd *sdp = gl->gl_sbd;
523 	unsigned int lck_flags = gh ? gh->gh_flags : 0;
524 	int ret;
525 
526 	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
527 		      LM_FLAG_PRIORITY);
528 	GLOCK_BUG_ON(gl, gl->gl_state == target);
529 	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
530 	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
531 	    glops->go_inval) {
532 		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
533 		do_error(gl, 0); /* Fail queued try locks */
534 	}
535 	gl->gl_req = target;
536 	spin_unlock(&gl->gl_spin);
537 	if (glops->go_xmote_th)
538 		glops->go_xmote_th(gl);
539 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
540 		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
541 	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
542 
543 	gfs2_glock_hold(gl);
544 	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
545 		/* lock_dlm */
546 		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
547 		GLOCK_BUG_ON(gl, ret);
548 	} else { /* lock_nolock */
549 		finish_xmote(gl, target);
550 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
551 			gfs2_glock_put(gl);
552 	}
553 
554 	spin_lock(&gl->gl_spin);
555 }
556 
557 /**
558  * find_first_holder - find the first "holder" gh
559  * @gl: the glock
560  */
561 
562 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
563 {
564 	struct gfs2_holder *gh;
565 
566 	if (!list_empty(&gl->gl_holders)) {
567 		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
568 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
569 			return gh;
570 	}
571 	return NULL;
572 }
573 
574 /**
575  * run_queue - do all outstanding tasks related to a glock
576  * @gl: The glock in question
577  * @nonblock: True if we must not block in run_queue
578  *
579  */
580 
581 static void run_queue(struct gfs2_glock *gl, const int nonblock)
582 __releases(&gl->gl_spin)
583 __acquires(&gl->gl_spin)
584 {
585 	struct gfs2_holder *gh = NULL;
586 	int ret;
587 
588 	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
589 		return;
590 
591 	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
592 
593 	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
594 	    gl->gl_demote_state != gl->gl_state) {
595 		if (find_first_holder(gl))
596 			goto out_unlock;
597 		if (nonblock)
598 			goto out_sched;
599 		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
600 		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
601 		gl->gl_target = gl->gl_demote_state;
602 	} else {
603 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
604 			gfs2_demote_wake(gl);
605 		ret = do_promote(gl);
606 		if (ret == 0)
607 			goto out_unlock;
608 		if (ret == 2)
609 			goto out;
610 		gh = find_first_waiter(gl);
611 		gl->gl_target = gh->gh_state;
612 		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
613 			do_error(gl, 0); /* Fail queued try locks */
614 	}
615 	do_xmote(gl, gh, gl->gl_target);
616 out:
617 	return;
618 
619 out_sched:
620 	clear_bit(GLF_LOCK, &gl->gl_flags);
621 	smp_mb__after_clear_bit();
622 	gfs2_glock_hold(gl);
623 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
624 		gfs2_glock_put_nolock(gl);
625 	return;
626 
627 out_unlock:
628 	clear_bit(GLF_LOCK, &gl->gl_flags);
629 	smp_mb__after_clear_bit();
630 	return;
631 }
632 
633 static void delete_work_func(struct work_struct *work)
634 {
635 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
636 	struct gfs2_sbd *sdp = gl->gl_sbd;
637 	struct gfs2_inode *ip;
638 	struct inode *inode;
639 	u64 no_addr = gl->gl_name.ln_number;
640 
641 	ip = gl->gl_object;
642 	/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
643 
644 	if (ip)
645 		inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
646 	else
647 		inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
648 	if (inode && !IS_ERR(inode)) {
649 		d_prune_aliases(inode);
650 		iput(inode);
651 	}
652 	gfs2_glock_put(gl);
653 }
654 
655 static void glock_work_func(struct work_struct *work)
656 {
657 	unsigned long delay = 0;
658 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
659 	int drop_ref = 0;
660 
661 	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
662 		finish_xmote(gl, gl->gl_reply);
663 		drop_ref = 1;
664 	}
665 	spin_lock(&gl->gl_spin);
666 	if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
667 	    gl->gl_state != LM_ST_UNLOCKED &&
668 	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
669 		unsigned long holdtime, now = jiffies;
670 		holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
671 		if (time_before(now, holdtime))
672 			delay = holdtime - now;
673 		set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
674 	}
675 	run_queue(gl, 0);
676 	spin_unlock(&gl->gl_spin);
677 	if (!delay ||
678 	    queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
679 		gfs2_glock_put(gl);
680 	if (drop_ref)
681 		gfs2_glock_put(gl);
682 }
683 
684 /**
685  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
686  * @sdp: The GFS2 superblock
687  * @number: the lock number
688  * @glops: The glock_operations to use
689  * @create: If 0, don't create the glock if it doesn't exist
690  * @glp: the glock is returned here
691  *
692  * This does not lock a glock, just finds/creates structures for one.
693  *
694  * Returns: errno
695  */
696 
697 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
698 		   const struct gfs2_glock_operations *glops, int create,
699 		   struct gfs2_glock **glp)
700 {
701 	struct super_block *s = sdp->sd_vfs;
702 	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
703 	struct gfs2_glock *gl, *tmp;
704 	unsigned int hash = gl_hash(sdp, &name);
705 	struct address_space *mapping;
706 	struct kmem_cache *cachep;
707 
708 	rcu_read_lock();
709 	gl = search_bucket(hash, sdp, &name);
710 	rcu_read_unlock();
711 
712 	*glp = gl;
713 	if (gl)
714 		return 0;
715 	if (!create)
716 		return -ENOENT;
717 
718 	if (glops->go_flags & GLOF_ASPACE)
719 		cachep = gfs2_glock_aspace_cachep;
720 	else
721 		cachep = gfs2_glock_cachep;
722 	gl = kmem_cache_alloc(cachep, GFP_KERNEL);
723 	if (!gl)
724 		return -ENOMEM;
725 
726 	atomic_inc(&sdp->sd_glock_disposal);
727 	gl->gl_flags = 0;
728 	gl->gl_name = name;
729 	atomic_set(&gl->gl_ref, 1);
730 	gl->gl_state = LM_ST_UNLOCKED;
731 	gl->gl_target = LM_ST_UNLOCKED;
732 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
733 	gl->gl_hash = hash;
734 	gl->gl_ops = glops;
735 	snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
736 	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
737 	gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
738 	gl->gl_tchange = jiffies;
739 	gl->gl_object = NULL;
740 	gl->gl_sbd = sdp;
741 	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
742 	INIT_WORK(&gl->gl_delete, delete_work_func);
743 
744 	mapping = gfs2_glock2aspace(gl);
745 	if (mapping) {
746                 mapping->a_ops = &gfs2_meta_aops;
747 		mapping->host = s->s_bdev->bd_inode;
748 		mapping->flags = 0;
749 		mapping_set_gfp_mask(mapping, GFP_NOFS);
750 		mapping->assoc_mapping = NULL;
751 		mapping->backing_dev_info = s->s_bdi;
752 		mapping->writeback_index = 0;
753 	}
754 
755 	spin_lock_bucket(hash);
756 	tmp = search_bucket(hash, sdp, &name);
757 	if (tmp) {
758 		spin_unlock_bucket(hash);
759 		kmem_cache_free(cachep, gl);
760 		atomic_dec(&sdp->sd_glock_disposal);
761 		gl = tmp;
762 	} else {
763 		hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
764 		spin_unlock_bucket(hash);
765 	}
766 
767 	*glp = gl;
768 
769 	return 0;
770 }
771 
772 /**
773  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
774  * @gl: the glock
775  * @state: the state we're requesting
776  * @flags: the modifier flags
777  * @gh: the holder structure
778  *
779  */
780 
781 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
782 		      struct gfs2_holder *gh)
783 {
784 	INIT_LIST_HEAD(&gh->gh_list);
785 	gh->gh_gl = gl;
786 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
787 	gh->gh_owner_pid = get_pid(task_pid(current));
788 	gh->gh_state = state;
789 	gh->gh_flags = flags;
790 	gh->gh_error = 0;
791 	gh->gh_iflags = 0;
792 	gfs2_glock_hold(gl);
793 }
794 
795 /**
796  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
797  * @state: the state we're requesting
798  * @flags: the modifier flags
799  * @gh: the holder structure
800  *
801  * Don't mess with the glock.
802  *
803  */
804 
805 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
806 {
807 	gh->gh_state = state;
808 	gh->gh_flags = flags;
809 	gh->gh_iflags = 0;
810 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
811 	if (gh->gh_owner_pid)
812 		put_pid(gh->gh_owner_pid);
813 	gh->gh_owner_pid = get_pid(task_pid(current));
814 }
815 
816 /**
817  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
818  * @gh: the holder structure
819  *
820  */
821 
822 void gfs2_holder_uninit(struct gfs2_holder *gh)
823 {
824 	put_pid(gh->gh_owner_pid);
825 	gfs2_glock_put(gh->gh_gl);
826 	gh->gh_gl = NULL;
827 	gh->gh_ip = 0;
828 }
829 
830 /**
831  * gfs2_glock_holder_wait
832  * @word: unused
833  *
834  * This function and gfs2_glock_demote_wait both show up in the WCHAN
835  * field. Thus I've separated these otherwise identical functions in
836  * order to be more informative to the user.
837  */
838 
839 static int gfs2_glock_holder_wait(void *word)
840 {
841         schedule();
842         return 0;
843 }
844 
845 static int gfs2_glock_demote_wait(void *word)
846 {
847 	schedule();
848 	return 0;
849 }
850 
851 static void wait_on_holder(struct gfs2_holder *gh)
852 {
853 	might_sleep();
854 	wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
855 }
856 
857 static void wait_on_demote(struct gfs2_glock *gl)
858 {
859 	might_sleep();
860 	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
861 }
862 
863 /**
864  * handle_callback - process a demote request
865  * @gl: the glock
866  * @state: the state the caller wants us to change to
867  *
868  * There are only two requests that we are going to see in actual
869  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
870  */
871 
872 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
873 			    unsigned long delay)
874 {
875 	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
876 
877 	set_bit(bit, &gl->gl_flags);
878 	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
879 		gl->gl_demote_state = state;
880 		gl->gl_demote_time = jiffies;
881 	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
882 			gl->gl_demote_state != state) {
883 		gl->gl_demote_state = LM_ST_UNLOCKED;
884 	}
885 	if (gl->gl_ops->go_callback)
886 		gl->gl_ops->go_callback(gl);
887 	trace_gfs2_demote_rq(gl);
888 }
889 
890 /**
891  * gfs2_glock_wait - wait on a glock acquisition
892  * @gh: the glock holder
893  *
894  * Returns: 0 on success
895  */
896 
897 int gfs2_glock_wait(struct gfs2_holder *gh)
898 {
899 	wait_on_holder(gh);
900 	return gh->gh_error;
901 }
902 
903 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
904 {
905 	struct va_format vaf;
906 	va_list args;
907 
908 	va_start(args, fmt);
909 
910 	if (seq) {
911 		struct gfs2_glock_iter *gi = seq->private;
912 		vsprintf(gi->string, fmt, args);
913 		seq_printf(seq, gi->string);
914 	} else {
915 		vaf.fmt = fmt;
916 		vaf.va = &args;
917 
918 		printk(KERN_ERR " %pV", &vaf);
919 	}
920 
921 	va_end(args);
922 }
923 
924 /**
925  * add_to_queue - Add a holder to the wait queue (but look for recursion)
926  * @gh: the holder structure to add
927  *
928  * Eventually we should move the recursive locking trap to a
929  * debugging option or something like that. This is the fast
930  * path and needs to have the minimum number of distractions.
931  *
932  */
933 
934 static inline void add_to_queue(struct gfs2_holder *gh)
935 __releases(&gl->gl_spin)
936 __acquires(&gl->gl_spin)
937 {
938 	struct gfs2_glock *gl = gh->gh_gl;
939 	struct gfs2_sbd *sdp = gl->gl_sbd;
940 	struct list_head *insert_pt = NULL;
941 	struct gfs2_holder *gh2;
942 	int try_lock = 0;
943 
944 	BUG_ON(gh->gh_owner_pid == NULL);
945 	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
946 		BUG();
947 
948 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
949 		if (test_bit(GLF_LOCK, &gl->gl_flags))
950 			try_lock = 1;
951 		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
952 			goto fail;
953 	}
954 
955 	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
956 		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
957 		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
958 			goto trap_recursive;
959 		if (try_lock &&
960 		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
961 		    !may_grant(gl, gh)) {
962 fail:
963 			gh->gh_error = GLR_TRYFAILED;
964 			gfs2_holder_wake(gh);
965 			return;
966 		}
967 		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
968 			continue;
969 		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
970 			insert_pt = &gh2->gh_list;
971 	}
972 	set_bit(GLF_QUEUED, &gl->gl_flags);
973 	trace_gfs2_glock_queue(gh, 1);
974 	if (likely(insert_pt == NULL)) {
975 		list_add_tail(&gh->gh_list, &gl->gl_holders);
976 		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
977 			goto do_cancel;
978 		return;
979 	}
980 	list_add_tail(&gh->gh_list, insert_pt);
981 do_cancel:
982 	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
983 	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
984 		spin_unlock(&gl->gl_spin);
985 		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
986 			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
987 		spin_lock(&gl->gl_spin);
988 	}
989 	return;
990 
991 trap_recursive:
992 	print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
993 	printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
994 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
995 	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
996 	print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
997 	printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
998 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
999 	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
1000 	__dump_glock(NULL, gl);
1001 	BUG();
1002 }
1003 
1004 /**
1005  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1006  * @gh: the holder structure
1007  *
1008  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1009  *
1010  * Returns: 0, GLR_TRYFAILED, or errno on failure
1011  */
1012 
1013 int gfs2_glock_nq(struct gfs2_holder *gh)
1014 {
1015 	struct gfs2_glock *gl = gh->gh_gl;
1016 	struct gfs2_sbd *sdp = gl->gl_sbd;
1017 	int error = 0;
1018 
1019 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1020 		return -EIO;
1021 
1022 	if (test_bit(GLF_LRU, &gl->gl_flags))
1023 		gfs2_glock_remove_from_lru(gl);
1024 
1025 	spin_lock(&gl->gl_spin);
1026 	add_to_queue(gh);
1027 	if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1028 	    test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1029 		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1030 	run_queue(gl, 1);
1031 	spin_unlock(&gl->gl_spin);
1032 
1033 	if (!(gh->gh_flags & GL_ASYNC))
1034 		error = gfs2_glock_wait(gh);
1035 
1036 	return error;
1037 }
1038 
1039 /**
1040  * gfs2_glock_poll - poll to see if an async request has been completed
1041  * @gh: the holder
1042  *
1043  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1044  */
1045 
1046 int gfs2_glock_poll(struct gfs2_holder *gh)
1047 {
1048 	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1049 }
1050 
1051 /**
1052  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1053  * @gh: the glock holder
1054  *
1055  */
1056 
1057 void gfs2_glock_dq(struct gfs2_holder *gh)
1058 {
1059 	struct gfs2_glock *gl = gh->gh_gl;
1060 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1061 	unsigned delay = 0;
1062 	int fast_path = 0;
1063 
1064 	spin_lock(&gl->gl_spin);
1065 	if (gh->gh_flags & GL_NOCACHE)
1066 		handle_callback(gl, LM_ST_UNLOCKED, 0);
1067 
1068 	list_del_init(&gh->gh_list);
1069 	if (find_first_holder(gl) == NULL) {
1070 		if (glops->go_unlock) {
1071 			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1072 			spin_unlock(&gl->gl_spin);
1073 			glops->go_unlock(gh);
1074 			spin_lock(&gl->gl_spin);
1075 			clear_bit(GLF_LOCK, &gl->gl_flags);
1076 		}
1077 		if (list_empty(&gl->gl_holders) &&
1078 		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1079 		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1080 			fast_path = 1;
1081 	}
1082 	if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
1083 		__gfs2_glock_schedule_for_reclaim(gl);
1084 	trace_gfs2_glock_queue(gh, 0);
1085 	spin_unlock(&gl->gl_spin);
1086 	if (likely(fast_path))
1087 		return;
1088 
1089 	gfs2_glock_hold(gl);
1090 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1091 	    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1092 		delay = gl->gl_ops->go_min_hold_time;
1093 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1094 		gfs2_glock_put(gl);
1095 }
1096 
1097 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1098 {
1099 	struct gfs2_glock *gl = gh->gh_gl;
1100 	gfs2_glock_dq(gh);
1101 	wait_on_demote(gl);
1102 }
1103 
1104 /**
1105  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1106  * @gh: the holder structure
1107  *
1108  */
1109 
1110 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1111 {
1112 	gfs2_glock_dq(gh);
1113 	gfs2_holder_uninit(gh);
1114 }
1115 
1116 /**
1117  * gfs2_glock_nq_num - acquire a glock based on lock number
1118  * @sdp: the filesystem
1119  * @number: the lock number
1120  * @glops: the glock operations for the type of glock
1121  * @state: the state to acquire the glock in
1122  * @flags: modifier flags for the acquisition
1123  * @gh: the struct gfs2_holder
1124  *
1125  * Returns: errno
1126  */
1127 
1128 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1129 		      const struct gfs2_glock_operations *glops,
1130 		      unsigned int state, int flags, struct gfs2_holder *gh)
1131 {
1132 	struct gfs2_glock *gl;
1133 	int error;
1134 
1135 	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1136 	if (!error) {
1137 		error = gfs2_glock_nq_init(gl, state, flags, gh);
1138 		gfs2_glock_put(gl);
1139 	}
1140 
1141 	return error;
1142 }
1143 
1144 /**
1145  * glock_compare - Compare two struct gfs2_glock structures for sorting
1146  * @arg_a: the first structure
1147  * @arg_b: the second structure
1148  *
1149  */
1150 
1151 static int glock_compare(const void *arg_a, const void *arg_b)
1152 {
1153 	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1154 	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1155 	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1156 	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1157 
1158 	if (a->ln_number > b->ln_number)
1159 		return 1;
1160 	if (a->ln_number < b->ln_number)
1161 		return -1;
1162 	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1163 	return 0;
1164 }
1165 
1166 /**
1167  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1168  * @num_gh: the number of structures
1169  * @ghs: an array of struct gfs2_holder structures
1170  *
1171  * Returns: 0 on success (all glocks acquired),
1172  *          errno on failure (no glocks acquired)
1173  */
1174 
1175 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1176 		     struct gfs2_holder **p)
1177 {
1178 	unsigned int x;
1179 	int error = 0;
1180 
1181 	for (x = 0; x < num_gh; x++)
1182 		p[x] = &ghs[x];
1183 
1184 	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1185 
1186 	for (x = 0; x < num_gh; x++) {
1187 		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1188 
1189 		error = gfs2_glock_nq(p[x]);
1190 		if (error) {
1191 			while (x--)
1192 				gfs2_glock_dq(p[x]);
1193 			break;
1194 		}
1195 	}
1196 
1197 	return error;
1198 }
1199 
1200 /**
1201  * gfs2_glock_nq_m - acquire multiple glocks
1202  * @num_gh: the number of structures
1203  * @ghs: an array of struct gfs2_holder structures
1204  *
1205  *
1206  * Returns: 0 on success (all glocks acquired),
1207  *          errno on failure (no glocks acquired)
1208  */
1209 
1210 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1211 {
1212 	struct gfs2_holder *tmp[4];
1213 	struct gfs2_holder **pph = tmp;
1214 	int error = 0;
1215 
1216 	switch(num_gh) {
1217 	case 0:
1218 		return 0;
1219 	case 1:
1220 		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1221 		return gfs2_glock_nq(ghs);
1222 	default:
1223 		if (num_gh <= 4)
1224 			break;
1225 		pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1226 		if (!pph)
1227 			return -ENOMEM;
1228 	}
1229 
1230 	error = nq_m_sync(num_gh, ghs, pph);
1231 
1232 	if (pph != tmp)
1233 		kfree(pph);
1234 
1235 	return error;
1236 }
1237 
1238 /**
1239  * gfs2_glock_dq_m - release multiple glocks
1240  * @num_gh: the number of structures
1241  * @ghs: an array of struct gfs2_holder structures
1242  *
1243  */
1244 
1245 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1246 {
1247 	while (num_gh--)
1248 		gfs2_glock_dq(&ghs[num_gh]);
1249 }
1250 
1251 /**
1252  * gfs2_glock_dq_uninit_m - release multiple glocks
1253  * @num_gh: the number of structures
1254  * @ghs: an array of struct gfs2_holder structures
1255  *
1256  */
1257 
1258 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1259 {
1260 	while (num_gh--)
1261 		gfs2_glock_dq_uninit(&ghs[num_gh]);
1262 }
1263 
1264 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1265 {
1266 	unsigned long delay = 0;
1267 	unsigned long holdtime;
1268 	unsigned long now = jiffies;
1269 
1270 	gfs2_glock_hold(gl);
1271 	holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1272 	if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
1273 		if (time_before(now, holdtime))
1274 			delay = holdtime - now;
1275 		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1276 			delay = gl->gl_ops->go_min_hold_time;
1277 	}
1278 
1279 	spin_lock(&gl->gl_spin);
1280 	handle_callback(gl, state, delay);
1281 	spin_unlock(&gl->gl_spin);
1282 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1283 		gfs2_glock_put(gl);
1284 }
1285 
1286 /**
1287  * gfs2_should_freeze - Figure out if glock should be frozen
1288  * @gl: The glock in question
1289  *
1290  * Glocks are not frozen if (a) the result of the dlm operation is
1291  * an error, (b) the locking operation was an unlock operation or
1292  * (c) if there is a "noexp" flagged request anywhere in the queue
1293  *
1294  * Returns: 1 if freezing should occur, 0 otherwise
1295  */
1296 
1297 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1298 {
1299 	const struct gfs2_holder *gh;
1300 
1301 	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1302 		return 0;
1303 	if (gl->gl_target == LM_ST_UNLOCKED)
1304 		return 0;
1305 
1306 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1307 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1308 			continue;
1309 		if (LM_FLAG_NOEXP & gh->gh_flags)
1310 			return 0;
1311 	}
1312 
1313 	return 1;
1314 }
1315 
1316 /**
1317  * gfs2_glock_complete - Callback used by locking
1318  * @gl: Pointer to the glock
1319  * @ret: The return value from the dlm
1320  *
1321  * The gl_reply field is under the gl_spin lock so that it is ok
1322  * to use a bitfield shared with other glock state fields.
1323  */
1324 
1325 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1326 {
1327 	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1328 
1329 	spin_lock(&gl->gl_spin);
1330 	gl->gl_reply = ret;
1331 
1332 	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
1333 		if (gfs2_should_freeze(gl)) {
1334 			set_bit(GLF_FROZEN, &gl->gl_flags);
1335 			spin_unlock(&gl->gl_spin);
1336 			return;
1337 		}
1338 	}
1339 
1340 	spin_unlock(&gl->gl_spin);
1341 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1342 	smp_wmb();
1343 	gfs2_glock_hold(gl);
1344 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1345 		gfs2_glock_put(gl);
1346 }
1347 
1348 
1349 static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
1350 {
1351 	struct gfs2_glock *gl;
1352 	int may_demote;
1353 	int nr_skipped = 0;
1354 	LIST_HEAD(skipped);
1355 
1356 	if (nr == 0)
1357 		goto out;
1358 
1359 	if (!(gfp_mask & __GFP_FS))
1360 		return -1;
1361 
1362 	spin_lock(&lru_lock);
1363 	while(nr && !list_empty(&lru_list)) {
1364 		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1365 		list_del_init(&gl->gl_lru);
1366 		clear_bit(GLF_LRU, &gl->gl_flags);
1367 		atomic_dec(&lru_count);
1368 
1369 		/* Test for being demotable */
1370 		if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1371 			gfs2_glock_hold(gl);
1372 			spin_unlock(&lru_lock);
1373 			spin_lock(&gl->gl_spin);
1374 			may_demote = demote_ok(gl);
1375 			if (may_demote) {
1376 				handle_callback(gl, LM_ST_UNLOCKED, 0);
1377 				nr--;
1378 			}
1379 			clear_bit(GLF_LOCK, &gl->gl_flags);
1380 			smp_mb__after_clear_bit();
1381 			if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1382 				gfs2_glock_put_nolock(gl);
1383 			spin_unlock(&gl->gl_spin);
1384 			spin_lock(&lru_lock);
1385 			continue;
1386 		}
1387 		nr_skipped++;
1388 		list_add(&gl->gl_lru, &skipped);
1389 		set_bit(GLF_LRU, &gl->gl_flags);
1390 	}
1391 	list_splice(&skipped, &lru_list);
1392 	atomic_add(nr_skipped, &lru_count);
1393 	spin_unlock(&lru_lock);
1394 out:
1395 	return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1396 }
1397 
1398 static struct shrinker glock_shrinker = {
1399 	.shrink = gfs2_shrink_glock_memory,
1400 	.seeks = DEFAULT_SEEKS,
1401 };
1402 
1403 /**
1404  * examine_bucket - Call a function for glock in a hash bucket
1405  * @examiner: the function
1406  * @sdp: the filesystem
1407  * @bucket: the bucket
1408  *
1409  */
1410 
1411 static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1412 			  unsigned int hash)
1413 {
1414 	struct gfs2_glock *gl;
1415 	struct hlist_bl_head *head = &gl_hash_table[hash];
1416 	struct hlist_bl_node *pos;
1417 
1418 	rcu_read_lock();
1419 	hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1420 		if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
1421 			examiner(gl);
1422 	}
1423 	rcu_read_unlock();
1424 	cond_resched();
1425 }
1426 
1427 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1428 {
1429 	unsigned x;
1430 
1431 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1432 		examine_bucket(examiner, sdp, x);
1433 }
1434 
1435 
1436 /**
1437  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1438  * @gl: The glock to thaw
1439  *
1440  * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
1441  * so this has to result in the ref count being dropped by one.
1442  */
1443 
1444 static void thaw_glock(struct gfs2_glock *gl)
1445 {
1446 	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1447 		return;
1448 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1449 	gfs2_glock_hold(gl);
1450 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1451 		gfs2_glock_put(gl);
1452 }
1453 
1454 /**
1455  * clear_glock - look at a glock and see if we can free it from glock cache
1456  * @gl: the glock to look at
1457  *
1458  */
1459 
1460 static void clear_glock(struct gfs2_glock *gl)
1461 {
1462 	gfs2_glock_remove_from_lru(gl);
1463 
1464 	spin_lock(&gl->gl_spin);
1465 	if (gl->gl_state != LM_ST_UNLOCKED)
1466 		handle_callback(gl, LM_ST_UNLOCKED, 0);
1467 	spin_unlock(&gl->gl_spin);
1468 	gfs2_glock_hold(gl);
1469 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1470 		gfs2_glock_put(gl);
1471 }
1472 
1473 /**
1474  * gfs2_glock_thaw - Thaw any frozen glocks
1475  * @sdp: The super block
1476  *
1477  */
1478 
1479 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1480 {
1481 	glock_hash_walk(thaw_glock, sdp);
1482 }
1483 
1484 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1485 {
1486 	int ret;
1487 	spin_lock(&gl->gl_spin);
1488 	ret = __dump_glock(seq, gl);
1489 	spin_unlock(&gl->gl_spin);
1490 	return ret;
1491 }
1492 
1493 static void dump_glock_func(struct gfs2_glock *gl)
1494 {
1495 	dump_glock(NULL, gl);
1496 }
1497 
1498 /**
1499  * gfs2_gl_hash_clear - Empty out the glock hash table
1500  * @sdp: the filesystem
1501  * @wait: wait until it's all gone
1502  *
1503  * Called when unmounting the filesystem.
1504  */
1505 
1506 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1507 {
1508 	glock_hash_walk(clear_glock, sdp);
1509 	flush_workqueue(glock_workqueue);
1510 	wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1511 	glock_hash_walk(dump_glock_func, sdp);
1512 }
1513 
1514 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1515 {
1516 	struct gfs2_glock *gl = ip->i_gl;
1517 	int ret;
1518 
1519 	ret = gfs2_truncatei_resume(ip);
1520 	gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1521 
1522 	spin_lock(&gl->gl_spin);
1523 	clear_bit(GLF_LOCK, &gl->gl_flags);
1524 	run_queue(gl, 1);
1525 	spin_unlock(&gl->gl_spin);
1526 }
1527 
1528 static const char *state2str(unsigned state)
1529 {
1530 	switch(state) {
1531 	case LM_ST_UNLOCKED:
1532 		return "UN";
1533 	case LM_ST_SHARED:
1534 		return "SH";
1535 	case LM_ST_DEFERRED:
1536 		return "DF";
1537 	case LM_ST_EXCLUSIVE:
1538 		return "EX";
1539 	}
1540 	return "??";
1541 }
1542 
1543 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1544 {
1545 	char *p = buf;
1546 	if (flags & LM_FLAG_TRY)
1547 		*p++ = 't';
1548 	if (flags & LM_FLAG_TRY_1CB)
1549 		*p++ = 'T';
1550 	if (flags & LM_FLAG_NOEXP)
1551 		*p++ = 'e';
1552 	if (flags & LM_FLAG_ANY)
1553 		*p++ = 'A';
1554 	if (flags & LM_FLAG_PRIORITY)
1555 		*p++ = 'p';
1556 	if (flags & GL_ASYNC)
1557 		*p++ = 'a';
1558 	if (flags & GL_EXACT)
1559 		*p++ = 'E';
1560 	if (flags & GL_NOCACHE)
1561 		*p++ = 'c';
1562 	if (test_bit(HIF_HOLDER, &iflags))
1563 		*p++ = 'H';
1564 	if (test_bit(HIF_WAIT, &iflags))
1565 		*p++ = 'W';
1566 	if (test_bit(HIF_FIRST, &iflags))
1567 		*p++ = 'F';
1568 	*p = 0;
1569 	return buf;
1570 }
1571 
1572 /**
1573  * dump_holder - print information about a glock holder
1574  * @seq: the seq_file struct
1575  * @gh: the glock holder
1576  *
1577  * Returns: 0 on success, -ENOBUFS when we run out of space
1578  */
1579 
1580 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1581 {
1582 	struct task_struct *gh_owner = NULL;
1583 	char flags_buf[32];
1584 
1585 	if (gh->gh_owner_pid)
1586 		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1587 	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1588 		       state2str(gh->gh_state),
1589 		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1590 		       gh->gh_error,
1591 		       gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1592 		       gh_owner ? gh_owner->comm : "(ended)",
1593 		       (void *)gh->gh_ip);
1594 	return 0;
1595 }
1596 
1597 static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1598 {
1599 	const unsigned long *gflags = &gl->gl_flags;
1600 	char *p = buf;
1601 
1602 	if (test_bit(GLF_LOCK, gflags))
1603 		*p++ = 'l';
1604 	if (test_bit(GLF_DEMOTE, gflags))
1605 		*p++ = 'D';
1606 	if (test_bit(GLF_PENDING_DEMOTE, gflags))
1607 		*p++ = 'd';
1608 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1609 		*p++ = 'p';
1610 	if (test_bit(GLF_DIRTY, gflags))
1611 		*p++ = 'y';
1612 	if (test_bit(GLF_LFLUSH, gflags))
1613 		*p++ = 'f';
1614 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1615 		*p++ = 'i';
1616 	if (test_bit(GLF_REPLY_PENDING, gflags))
1617 		*p++ = 'r';
1618 	if (test_bit(GLF_INITIAL, gflags))
1619 		*p++ = 'I';
1620 	if (test_bit(GLF_FROZEN, gflags))
1621 		*p++ = 'F';
1622 	if (test_bit(GLF_QUEUED, gflags))
1623 		*p++ = 'q';
1624 	if (test_bit(GLF_LRU, gflags))
1625 		*p++ = 'L';
1626 	if (gl->gl_object)
1627 		*p++ = 'o';
1628 	*p = 0;
1629 	return buf;
1630 }
1631 
1632 /**
1633  * __dump_glock - print information about a glock
1634  * @seq: The seq_file struct
1635  * @gl: the glock
1636  *
1637  * The file format is as follows:
1638  * One line per object, capital letters are used to indicate objects
1639  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1640  * other objects are indented by a single space and follow the glock to
1641  * which they are related. Fields are indicated by lower case letters
1642  * followed by a colon and the field value, except for strings which are in
1643  * [] so that its possible to see if they are composed of spaces for
1644  * example. The field's are n = number (id of the object), f = flags,
1645  * t = type, s = state, r = refcount, e = error, p = pid.
1646  *
1647  * Returns: 0 on success, -ENOBUFS when we run out of space
1648  */
1649 
1650 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1651 {
1652 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1653 	unsigned long long dtime;
1654 	const struct gfs2_holder *gh;
1655 	char gflags_buf[32];
1656 	int error = 0;
1657 
1658 	dtime = jiffies - gl->gl_demote_time;
1659 	dtime *= 1000000/HZ; /* demote time in uSec */
1660 	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1661 		dtime = 0;
1662 	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d\n",
1663 		  state2str(gl->gl_state),
1664 		  gl->gl_name.ln_type,
1665 		  (unsigned long long)gl->gl_name.ln_number,
1666 		  gflags2str(gflags_buf, gl),
1667 		  state2str(gl->gl_target),
1668 		  state2str(gl->gl_demote_state), dtime,
1669 		  atomic_read(&gl->gl_ail_count),
1670 		  atomic_read(&gl->gl_revokes),
1671 		  atomic_read(&gl->gl_ref));
1672 
1673 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1674 		error = dump_holder(seq, gh);
1675 		if (error)
1676 			goto out;
1677 	}
1678 	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1679 		error = glops->go_dump(seq, gl);
1680 out:
1681 	return error;
1682 }
1683 
1684 
1685 
1686 
1687 int __init gfs2_glock_init(void)
1688 {
1689 	unsigned i;
1690 	for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1691 		INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
1692 	}
1693 
1694 	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1695 					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
1696 	if (IS_ERR(glock_workqueue))
1697 		return PTR_ERR(glock_workqueue);
1698 	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1699 						WQ_MEM_RECLAIM | WQ_FREEZABLE,
1700 						0);
1701 	if (IS_ERR(gfs2_delete_workqueue)) {
1702 		destroy_workqueue(glock_workqueue);
1703 		return PTR_ERR(gfs2_delete_workqueue);
1704 	}
1705 
1706 	register_shrinker(&glock_shrinker);
1707 
1708 	return 0;
1709 }
1710 
1711 void gfs2_glock_exit(void)
1712 {
1713 	unregister_shrinker(&glock_shrinker);
1714 	destroy_workqueue(glock_workqueue);
1715 	destroy_workqueue(gfs2_delete_workqueue);
1716 }
1717 
1718 static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1719 {
1720 	return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1721 			      struct gfs2_glock, gl_list);
1722 }
1723 
1724 static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1725 {
1726 	return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1727 			      struct gfs2_glock, gl_list);
1728 }
1729 
1730 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1731 {
1732 	struct gfs2_glock *gl;
1733 
1734 	do {
1735 		gl = gi->gl;
1736 		if (gl) {
1737 			gi->gl = glock_hash_next(gl);
1738 		} else {
1739 			gi->gl = glock_hash_chain(gi->hash);
1740 		}
1741 		while (gi->gl == NULL) {
1742 			gi->hash++;
1743 			if (gi->hash >= GFS2_GL_HASH_SIZE) {
1744 				rcu_read_unlock();
1745 				return 1;
1746 			}
1747 			gi->gl = glock_hash_chain(gi->hash);
1748 		}
1749 	/* Skip entries for other sb and dead entries */
1750 	} while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
1751 
1752 	return 0;
1753 }
1754 
1755 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1756 {
1757 	struct gfs2_glock_iter *gi = seq->private;
1758 	loff_t n = *pos;
1759 
1760 	gi->hash = 0;
1761 	rcu_read_lock();
1762 
1763 	do {
1764 		if (gfs2_glock_iter_next(gi))
1765 			return NULL;
1766 	} while (n--);
1767 
1768 	return gi->gl;
1769 }
1770 
1771 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1772 				 loff_t *pos)
1773 {
1774 	struct gfs2_glock_iter *gi = seq->private;
1775 
1776 	(*pos)++;
1777 
1778 	if (gfs2_glock_iter_next(gi))
1779 		return NULL;
1780 
1781 	return gi->gl;
1782 }
1783 
1784 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1785 {
1786 	struct gfs2_glock_iter *gi = seq->private;
1787 
1788 	if (gi->gl)
1789 		rcu_read_unlock();
1790 	gi->gl = NULL;
1791 }
1792 
1793 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1794 {
1795 	return dump_glock(seq, iter_ptr);
1796 }
1797 
1798 static const struct seq_operations gfs2_glock_seq_ops = {
1799 	.start = gfs2_glock_seq_start,
1800 	.next  = gfs2_glock_seq_next,
1801 	.stop  = gfs2_glock_seq_stop,
1802 	.show  = gfs2_glock_seq_show,
1803 };
1804 
1805 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1806 {
1807 	int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1808 				   sizeof(struct gfs2_glock_iter));
1809 	if (ret == 0) {
1810 		struct seq_file *seq = file->private_data;
1811 		struct gfs2_glock_iter *gi = seq->private;
1812 		gi->sdp = inode->i_private;
1813 	}
1814 	return ret;
1815 }
1816 
1817 static const struct file_operations gfs2_debug_fops = {
1818 	.owner   = THIS_MODULE,
1819 	.open    = gfs2_debugfs_open,
1820 	.read    = seq_read,
1821 	.llseek  = seq_lseek,
1822 	.release = seq_release_private,
1823 };
1824 
1825 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1826 {
1827 	sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1828 	if (!sdp->debugfs_dir)
1829 		return -ENOMEM;
1830 	sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1831 							 S_IFREG | S_IRUGO,
1832 							 sdp->debugfs_dir, sdp,
1833 							 &gfs2_debug_fops);
1834 	if (!sdp->debugfs_dentry_glocks)
1835 		return -ENOMEM;
1836 
1837 	return 0;
1838 }
1839 
1840 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1841 {
1842 	if (sdp && sdp->debugfs_dir) {
1843 		if (sdp->debugfs_dentry_glocks) {
1844 			debugfs_remove(sdp->debugfs_dentry_glocks);
1845 			sdp->debugfs_dentry_glocks = NULL;
1846 		}
1847 		debugfs_remove(sdp->debugfs_dir);
1848 		sdp->debugfs_dir = NULL;
1849 	}
1850 }
1851 
1852 int gfs2_register_debugfs(void)
1853 {
1854 	gfs2_root = debugfs_create_dir("gfs2", NULL);
1855 	return gfs2_root ? 0 : -ENOMEM;
1856 }
1857 
1858 void gfs2_unregister_debugfs(void)
1859 {
1860 	debugfs_remove(gfs2_root);
1861 	gfs2_root = NULL;
1862 }
1863