xref: /openbmc/linux/fs/gfs2/glock.c (revision c0e297dc)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/buffer_head.h>
16 #include <linux/delay.h>
17 #include <linux/sort.h>
18 #include <linux/jhash.h>
19 #include <linux/kallsyms.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/list.h>
22 #include <linux/wait.h>
23 #include <linux/module.h>
24 #include <asm/uaccess.h>
25 #include <linux/seq_file.h>
26 #include <linux/debugfs.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/workqueue.h>
30 #include <linux/jiffies.h>
31 #include <linux/rcupdate.h>
32 #include <linux/rculist_bl.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/percpu.h>
35 #include <linux/list_sort.h>
36 #include <linux/lockref.h>
37 
38 #include "gfs2.h"
39 #include "incore.h"
40 #include "glock.h"
41 #include "glops.h"
42 #include "inode.h"
43 #include "lops.h"
44 #include "meta_io.h"
45 #include "quota.h"
46 #include "super.h"
47 #include "util.h"
48 #include "bmap.h"
49 #define CREATE_TRACE_POINTS
50 #include "trace_gfs2.h"
51 
52 struct gfs2_glock_iter {
53 	int hash;			/* hash bucket index           */
54 	unsigned nhash;			/* Index within current bucket */
55 	struct gfs2_sbd *sdp;		/* incore superblock           */
56 	struct gfs2_glock *gl;		/* current glock struct        */
57 	loff_t last_pos;		/* last position               */
58 };
59 
60 typedef void (*glock_examiner) (struct gfs2_glock * gl);
61 
62 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
63 
64 static struct dentry *gfs2_root;
65 static struct workqueue_struct *glock_workqueue;
66 struct workqueue_struct *gfs2_delete_workqueue;
67 static LIST_HEAD(lru_list);
68 static atomic_t lru_count = ATOMIC_INIT(0);
69 static DEFINE_SPINLOCK(lru_lock);
70 
71 #define GFS2_GL_HASH_SHIFT      15
72 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
73 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
74 
75 static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
76 static struct dentry *gfs2_root;
77 
78 /**
79  * gl_hash() - Turn glock number into hash bucket number
80  * @lock: The glock number
81  *
82  * Returns: The number of the corresponding hash bucket
83  */
84 
85 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
86 			    const struct lm_lockname *name)
87 {
88 	unsigned int h;
89 
90 	h = jhash(&name->ln_number, sizeof(u64), 0);
91 	h = jhash(&name->ln_type, sizeof(unsigned int), h);
92 	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
93 	h &= GFS2_GL_HASH_MASK;
94 
95 	return h;
96 }
97 
98 static inline void spin_lock_bucket(unsigned int hash)
99 {
100 	hlist_bl_lock(&gl_hash_table[hash]);
101 }
102 
103 static inline void spin_unlock_bucket(unsigned int hash)
104 {
105 	hlist_bl_unlock(&gl_hash_table[hash]);
106 }
107 
108 static void gfs2_glock_dealloc(struct rcu_head *rcu)
109 {
110 	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
111 
112 	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
113 		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
114 	} else {
115 		kfree(gl->gl_lksb.sb_lvbptr);
116 		kmem_cache_free(gfs2_glock_cachep, gl);
117 	}
118 }
119 
120 void gfs2_glock_free(struct gfs2_glock *gl)
121 {
122 	struct gfs2_sbd *sdp = gl->gl_sbd;
123 
124 	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
125 	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
126 		wake_up(&sdp->sd_glock_wait);
127 }
128 
129 /**
130  * gfs2_glock_hold() - increment reference count on glock
131  * @gl: The glock to hold
132  *
133  */
134 
135 static void gfs2_glock_hold(struct gfs2_glock *gl)
136 {
137 	GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
138 	lockref_get(&gl->gl_lockref);
139 }
140 
141 /**
142  * demote_ok - Check to see if it's ok to unlock a glock
143  * @gl: the glock
144  *
145  * Returns: 1 if it's ok
146  */
147 
148 static int demote_ok(const struct gfs2_glock *gl)
149 {
150 	const struct gfs2_glock_operations *glops = gl->gl_ops;
151 
152 	if (gl->gl_state == LM_ST_UNLOCKED)
153 		return 0;
154 	if (!list_empty(&gl->gl_holders))
155 		return 0;
156 	if (glops->go_demote_ok)
157 		return glops->go_demote_ok(gl);
158 	return 1;
159 }
160 
161 
162 void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
163 {
164 	spin_lock(&lru_lock);
165 
166 	if (!list_empty(&gl->gl_lru))
167 		list_del_init(&gl->gl_lru);
168 	else
169 		atomic_inc(&lru_count);
170 
171 	list_add_tail(&gl->gl_lru, &lru_list);
172 	set_bit(GLF_LRU, &gl->gl_flags);
173 	spin_unlock(&lru_lock);
174 }
175 
176 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
177 {
178 	spin_lock(&lru_lock);
179 	if (!list_empty(&gl->gl_lru)) {
180 		list_del_init(&gl->gl_lru);
181 		atomic_dec(&lru_count);
182 		clear_bit(GLF_LRU, &gl->gl_flags);
183 	}
184 	spin_unlock(&lru_lock);
185 }
186 
187 /**
188  * gfs2_glock_put() - Decrement reference count on glock
189  * @gl: The glock to put
190  *
191  */
192 
193 void gfs2_glock_put(struct gfs2_glock *gl)
194 {
195 	struct gfs2_sbd *sdp = gl->gl_sbd;
196 	struct address_space *mapping = gfs2_glock2aspace(gl);
197 
198 	if (lockref_put_or_lock(&gl->gl_lockref))
199 		return;
200 
201 	lockref_mark_dead(&gl->gl_lockref);
202 
203 	gfs2_glock_remove_from_lru(gl);
204 	spin_unlock(&gl->gl_lockref.lock);
205 	spin_lock_bucket(gl->gl_hash);
206 	hlist_bl_del_rcu(&gl->gl_list);
207 	spin_unlock_bucket(gl->gl_hash);
208 	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
209 	GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
210 	trace_gfs2_glock_put(gl);
211 	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
212 }
213 
214 /**
215  * search_bucket() - Find struct gfs2_glock by lock number
216  * @bucket: the bucket to search
217  * @name: The lock name
218  *
219  * Returns: NULL, or the struct gfs2_glock with the requested number
220  */
221 
222 static struct gfs2_glock *search_bucket(unsigned int hash,
223 					const struct gfs2_sbd *sdp,
224 					const struct lm_lockname *name)
225 {
226 	struct gfs2_glock *gl;
227 	struct hlist_bl_node *h;
228 
229 	hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
230 		if (!lm_name_equal(&gl->gl_name, name))
231 			continue;
232 		if (gl->gl_sbd != sdp)
233 			continue;
234 		if (lockref_get_not_dead(&gl->gl_lockref))
235 			return gl;
236 	}
237 
238 	return NULL;
239 }
240 
241 /**
242  * may_grant - check if its ok to grant a new lock
243  * @gl: The glock
244  * @gh: The lock request which we wish to grant
245  *
246  * Returns: true if its ok to grant the lock
247  */
248 
249 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
250 {
251 	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
252 	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
253 	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
254 		return 0;
255 	if (gl->gl_state == gh->gh_state)
256 		return 1;
257 	if (gh->gh_flags & GL_EXACT)
258 		return 0;
259 	if (gl->gl_state == LM_ST_EXCLUSIVE) {
260 		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
261 			return 1;
262 		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
263 			return 1;
264 	}
265 	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
266 		return 1;
267 	return 0;
268 }
269 
270 static void gfs2_holder_wake(struct gfs2_holder *gh)
271 {
272 	clear_bit(HIF_WAIT, &gh->gh_iflags);
273 	smp_mb__after_atomic();
274 	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
275 }
276 
277 /**
278  * do_error - Something unexpected has happened during a lock request
279  *
280  */
281 
282 static inline void do_error(struct gfs2_glock *gl, const int ret)
283 {
284 	struct gfs2_holder *gh, *tmp;
285 
286 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
287 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
288 			continue;
289 		if (ret & LM_OUT_ERROR)
290 			gh->gh_error = -EIO;
291 		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
292 			gh->gh_error = GLR_TRYFAILED;
293 		else
294 			continue;
295 		list_del_init(&gh->gh_list);
296 		trace_gfs2_glock_queue(gh, 0);
297 		gfs2_holder_wake(gh);
298 	}
299 }
300 
301 /**
302  * do_promote - promote as many requests as possible on the current queue
303  * @gl: The glock
304  *
305  * Returns: 1 if there is a blocked holder at the head of the list, or 2
306  *          if a type specific operation is underway.
307  */
308 
309 static int do_promote(struct gfs2_glock *gl)
310 __releases(&gl->gl_spin)
311 __acquires(&gl->gl_spin)
312 {
313 	const struct gfs2_glock_operations *glops = gl->gl_ops;
314 	struct gfs2_holder *gh, *tmp;
315 	int ret;
316 
317 restart:
318 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
319 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
320 			continue;
321 		if (may_grant(gl, gh)) {
322 			if (gh->gh_list.prev == &gl->gl_holders &&
323 			    glops->go_lock) {
324 				spin_unlock(&gl->gl_spin);
325 				/* FIXME: eliminate this eventually */
326 				ret = glops->go_lock(gh);
327 				spin_lock(&gl->gl_spin);
328 				if (ret) {
329 					if (ret == 1)
330 						return 2;
331 					gh->gh_error = ret;
332 					list_del_init(&gh->gh_list);
333 					trace_gfs2_glock_queue(gh, 0);
334 					gfs2_holder_wake(gh);
335 					goto restart;
336 				}
337 				set_bit(HIF_HOLDER, &gh->gh_iflags);
338 				trace_gfs2_promote(gh, 1);
339 				gfs2_holder_wake(gh);
340 				goto restart;
341 			}
342 			set_bit(HIF_HOLDER, &gh->gh_iflags);
343 			trace_gfs2_promote(gh, 0);
344 			gfs2_holder_wake(gh);
345 			continue;
346 		}
347 		if (gh->gh_list.prev == &gl->gl_holders)
348 			return 1;
349 		do_error(gl, 0);
350 		break;
351 	}
352 	return 0;
353 }
354 
355 /**
356  * find_first_waiter - find the first gh that's waiting for the glock
357  * @gl: the glock
358  */
359 
360 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
361 {
362 	struct gfs2_holder *gh;
363 
364 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
365 		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
366 			return gh;
367 	}
368 	return NULL;
369 }
370 
371 /**
372  * state_change - record that the glock is now in a different state
373  * @gl: the glock
374  * @new_state the new state
375  *
376  */
377 
378 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
379 {
380 	int held1, held2;
381 
382 	held1 = (gl->gl_state != LM_ST_UNLOCKED);
383 	held2 = (new_state != LM_ST_UNLOCKED);
384 
385 	if (held1 != held2) {
386 		GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
387 		if (held2)
388 			gl->gl_lockref.count++;
389 		else
390 			gl->gl_lockref.count--;
391 	}
392 	if (held1 && held2 && list_empty(&gl->gl_holders))
393 		clear_bit(GLF_QUEUED, &gl->gl_flags);
394 
395 	if (new_state != gl->gl_target)
396 		/* shorten our minimum hold time */
397 		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
398 				       GL_GLOCK_MIN_HOLD);
399 	gl->gl_state = new_state;
400 	gl->gl_tchange = jiffies;
401 }
402 
403 static void gfs2_demote_wake(struct gfs2_glock *gl)
404 {
405 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
406 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
407 	smp_mb__after_atomic();
408 	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
409 }
410 
411 /**
412  * finish_xmote - The DLM has replied to one of our lock requests
413  * @gl: The glock
414  * @ret: The status from the DLM
415  *
416  */
417 
418 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
419 {
420 	const struct gfs2_glock_operations *glops = gl->gl_ops;
421 	struct gfs2_holder *gh;
422 	unsigned state = ret & LM_OUT_ST_MASK;
423 	int rv;
424 
425 	spin_lock(&gl->gl_spin);
426 	trace_gfs2_glock_state_change(gl, state);
427 	state_change(gl, state);
428 	gh = find_first_waiter(gl);
429 
430 	/* Demote to UN request arrived during demote to SH or DF */
431 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
432 	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
433 		gl->gl_target = LM_ST_UNLOCKED;
434 
435 	/* Check for state != intended state */
436 	if (unlikely(state != gl->gl_target)) {
437 		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
438 			/* move to back of queue and try next entry */
439 			if (ret & LM_OUT_CANCELED) {
440 				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
441 					list_move_tail(&gh->gh_list, &gl->gl_holders);
442 				gh = find_first_waiter(gl);
443 				gl->gl_target = gh->gh_state;
444 				goto retry;
445 			}
446 			/* Some error or failed "try lock" - report it */
447 			if ((ret & LM_OUT_ERROR) ||
448 			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
449 				gl->gl_target = gl->gl_state;
450 				do_error(gl, ret);
451 				goto out;
452 			}
453 		}
454 		switch(state) {
455 		/* Unlocked due to conversion deadlock, try again */
456 		case LM_ST_UNLOCKED:
457 retry:
458 			do_xmote(gl, gh, gl->gl_target);
459 			break;
460 		/* Conversion fails, unlock and try again */
461 		case LM_ST_SHARED:
462 		case LM_ST_DEFERRED:
463 			do_xmote(gl, gh, LM_ST_UNLOCKED);
464 			break;
465 		default: /* Everything else */
466 			pr_err("wanted %u got %u\n", gl->gl_target, state);
467 			GLOCK_BUG_ON(gl, 1);
468 		}
469 		spin_unlock(&gl->gl_spin);
470 		return;
471 	}
472 
473 	/* Fast path - we got what we asked for */
474 	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
475 		gfs2_demote_wake(gl);
476 	if (state != LM_ST_UNLOCKED) {
477 		if (glops->go_xmote_bh) {
478 			spin_unlock(&gl->gl_spin);
479 			rv = glops->go_xmote_bh(gl, gh);
480 			spin_lock(&gl->gl_spin);
481 			if (rv) {
482 				do_error(gl, rv);
483 				goto out;
484 			}
485 		}
486 		rv = do_promote(gl);
487 		if (rv == 2)
488 			goto out_locked;
489 	}
490 out:
491 	clear_bit(GLF_LOCK, &gl->gl_flags);
492 out_locked:
493 	spin_unlock(&gl->gl_spin);
494 }
495 
496 /**
497  * do_xmote - Calls the DLM to change the state of a lock
498  * @gl: The lock state
499  * @gh: The holder (only for promotes)
500  * @target: The target lock state
501  *
502  */
503 
504 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
505 __releases(&gl->gl_spin)
506 __acquires(&gl->gl_spin)
507 {
508 	const struct gfs2_glock_operations *glops = gl->gl_ops;
509 	struct gfs2_sbd *sdp = gl->gl_sbd;
510 	unsigned int lck_flags = gh ? gh->gh_flags : 0;
511 	int ret;
512 
513 	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
514 		      LM_FLAG_PRIORITY);
515 	GLOCK_BUG_ON(gl, gl->gl_state == target);
516 	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
517 	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
518 	    glops->go_inval) {
519 		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
520 		do_error(gl, 0); /* Fail queued try locks */
521 	}
522 	gl->gl_req = target;
523 	set_bit(GLF_BLOCKING, &gl->gl_flags);
524 	if ((gl->gl_req == LM_ST_UNLOCKED) ||
525 	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
526 	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
527 		clear_bit(GLF_BLOCKING, &gl->gl_flags);
528 	spin_unlock(&gl->gl_spin);
529 	if (glops->go_sync)
530 		glops->go_sync(gl);
531 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
532 		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
533 	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
534 
535 	gfs2_glock_hold(gl);
536 	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
537 		/* lock_dlm */
538 		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
539 		if (ret) {
540 			pr_err("lm_lock ret %d\n", ret);
541 			GLOCK_BUG_ON(gl, 1);
542 		}
543 	} else { /* lock_nolock */
544 		finish_xmote(gl, target);
545 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
546 			gfs2_glock_put(gl);
547 	}
548 
549 	spin_lock(&gl->gl_spin);
550 }
551 
552 /**
553  * find_first_holder - find the first "holder" gh
554  * @gl: the glock
555  */
556 
557 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
558 {
559 	struct gfs2_holder *gh;
560 
561 	if (!list_empty(&gl->gl_holders)) {
562 		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
563 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
564 			return gh;
565 	}
566 	return NULL;
567 }
568 
569 /**
570  * run_queue - do all outstanding tasks related to a glock
571  * @gl: The glock in question
572  * @nonblock: True if we must not block in run_queue
573  *
574  */
575 
576 static void run_queue(struct gfs2_glock *gl, const int nonblock)
577 __releases(&gl->gl_spin)
578 __acquires(&gl->gl_spin)
579 {
580 	struct gfs2_holder *gh = NULL;
581 	int ret;
582 
583 	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
584 		return;
585 
586 	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
587 
588 	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
589 	    gl->gl_demote_state != gl->gl_state) {
590 		if (find_first_holder(gl))
591 			goto out_unlock;
592 		if (nonblock)
593 			goto out_sched;
594 		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
595 		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
596 		gl->gl_target = gl->gl_demote_state;
597 	} else {
598 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
599 			gfs2_demote_wake(gl);
600 		ret = do_promote(gl);
601 		if (ret == 0)
602 			goto out_unlock;
603 		if (ret == 2)
604 			goto out;
605 		gh = find_first_waiter(gl);
606 		gl->gl_target = gh->gh_state;
607 		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
608 			do_error(gl, 0); /* Fail queued try locks */
609 	}
610 	do_xmote(gl, gh, gl->gl_target);
611 out:
612 	return;
613 
614 out_sched:
615 	clear_bit(GLF_LOCK, &gl->gl_flags);
616 	smp_mb__after_atomic();
617 	gl->gl_lockref.count++;
618 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
619 		gl->gl_lockref.count--;
620 	return;
621 
622 out_unlock:
623 	clear_bit(GLF_LOCK, &gl->gl_flags);
624 	smp_mb__after_atomic();
625 	return;
626 }
627 
628 static void delete_work_func(struct work_struct *work)
629 {
630 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
631 	struct gfs2_sbd *sdp = gl->gl_sbd;
632 	struct gfs2_inode *ip;
633 	struct inode *inode;
634 	u64 no_addr = gl->gl_name.ln_number;
635 
636 	ip = gl->gl_object;
637 	/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
638 
639 	if (ip)
640 		inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
641 	else
642 		inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
643 	if (inode && !IS_ERR(inode)) {
644 		d_prune_aliases(inode);
645 		iput(inode);
646 	}
647 	gfs2_glock_put(gl);
648 }
649 
650 static void glock_work_func(struct work_struct *work)
651 {
652 	unsigned long delay = 0;
653 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
654 	int drop_ref = 0;
655 
656 	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
657 		finish_xmote(gl, gl->gl_reply);
658 		drop_ref = 1;
659 	}
660 	spin_lock(&gl->gl_spin);
661 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
662 	    gl->gl_state != LM_ST_UNLOCKED &&
663 	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
664 		unsigned long holdtime, now = jiffies;
665 
666 		holdtime = gl->gl_tchange + gl->gl_hold_time;
667 		if (time_before(now, holdtime))
668 			delay = holdtime - now;
669 
670 		if (!delay) {
671 			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
672 			set_bit(GLF_DEMOTE, &gl->gl_flags);
673 		}
674 	}
675 	run_queue(gl, 0);
676 	spin_unlock(&gl->gl_spin);
677 	if (!delay)
678 		gfs2_glock_put(gl);
679 	else {
680 		if (gl->gl_name.ln_type != LM_TYPE_INODE)
681 			delay = 0;
682 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
683 			gfs2_glock_put(gl);
684 	}
685 	if (drop_ref)
686 		gfs2_glock_put(gl);
687 }
688 
689 /**
690  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
691  * @sdp: The GFS2 superblock
692  * @number: the lock number
693  * @glops: The glock_operations to use
694  * @create: If 0, don't create the glock if it doesn't exist
695  * @glp: the glock is returned here
696  *
697  * This does not lock a glock, just finds/creates structures for one.
698  *
699  * Returns: errno
700  */
701 
702 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
703 		   const struct gfs2_glock_operations *glops, int create,
704 		   struct gfs2_glock **glp)
705 {
706 	struct super_block *s = sdp->sd_vfs;
707 	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
708 	struct gfs2_glock *gl, *tmp;
709 	unsigned int hash = gl_hash(sdp, &name);
710 	struct address_space *mapping;
711 	struct kmem_cache *cachep;
712 
713 	rcu_read_lock();
714 	gl = search_bucket(hash, sdp, &name);
715 	rcu_read_unlock();
716 
717 	*glp = gl;
718 	if (gl)
719 		return 0;
720 	if (!create)
721 		return -ENOENT;
722 
723 	if (glops->go_flags & GLOF_ASPACE)
724 		cachep = gfs2_glock_aspace_cachep;
725 	else
726 		cachep = gfs2_glock_cachep;
727 	gl = kmem_cache_alloc(cachep, GFP_NOFS);
728 	if (!gl)
729 		return -ENOMEM;
730 
731 	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
732 
733 	if (glops->go_flags & GLOF_LVB) {
734 		gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
735 		if (!gl->gl_lksb.sb_lvbptr) {
736 			kmem_cache_free(cachep, gl);
737 			return -ENOMEM;
738 		}
739 	}
740 
741 	atomic_inc(&sdp->sd_glock_disposal);
742 	gl->gl_sbd = sdp;
743 	gl->gl_flags = 0;
744 	gl->gl_name = name;
745 	gl->gl_lockref.count = 1;
746 	gl->gl_state = LM_ST_UNLOCKED;
747 	gl->gl_target = LM_ST_UNLOCKED;
748 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
749 	gl->gl_hash = hash;
750 	gl->gl_ops = glops;
751 	gl->gl_dstamp = ktime_set(0, 0);
752 	preempt_disable();
753 	/* We use the global stats to estimate the initial per-glock stats */
754 	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
755 	preempt_enable();
756 	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
757 	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
758 	gl->gl_tchange = jiffies;
759 	gl->gl_object = NULL;
760 	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
761 	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
762 	INIT_WORK(&gl->gl_delete, delete_work_func);
763 
764 	mapping = gfs2_glock2aspace(gl);
765 	if (mapping) {
766                 mapping->a_ops = &gfs2_meta_aops;
767 		mapping->host = s->s_bdev->bd_inode;
768 		mapping->flags = 0;
769 		mapping_set_gfp_mask(mapping, GFP_NOFS);
770 		mapping->private_data = NULL;
771 		mapping->writeback_index = 0;
772 	}
773 
774 	spin_lock_bucket(hash);
775 	tmp = search_bucket(hash, sdp, &name);
776 	if (tmp) {
777 		spin_unlock_bucket(hash);
778 		kfree(gl->gl_lksb.sb_lvbptr);
779 		kmem_cache_free(cachep, gl);
780 		atomic_dec(&sdp->sd_glock_disposal);
781 		gl = tmp;
782 	} else {
783 		hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
784 		spin_unlock_bucket(hash);
785 	}
786 
787 	*glp = gl;
788 
789 	return 0;
790 }
791 
792 /**
793  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
794  * @gl: the glock
795  * @state: the state we're requesting
796  * @flags: the modifier flags
797  * @gh: the holder structure
798  *
799  */
800 
801 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
802 		      struct gfs2_holder *gh)
803 {
804 	INIT_LIST_HEAD(&gh->gh_list);
805 	gh->gh_gl = gl;
806 	gh->gh_ip = _RET_IP_;
807 	gh->gh_owner_pid = get_pid(task_pid(current));
808 	gh->gh_state = state;
809 	gh->gh_flags = flags;
810 	gh->gh_error = 0;
811 	gh->gh_iflags = 0;
812 	gfs2_glock_hold(gl);
813 }
814 
815 /**
816  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
817  * @state: the state we're requesting
818  * @flags: the modifier flags
819  * @gh: the holder structure
820  *
821  * Don't mess with the glock.
822  *
823  */
824 
825 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
826 {
827 	gh->gh_state = state;
828 	gh->gh_flags = flags;
829 	gh->gh_iflags = 0;
830 	gh->gh_ip = _RET_IP_;
831 	put_pid(gh->gh_owner_pid);
832 	gh->gh_owner_pid = get_pid(task_pid(current));
833 }
834 
835 /**
836  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
837  * @gh: the holder structure
838  *
839  */
840 
841 void gfs2_holder_uninit(struct gfs2_holder *gh)
842 {
843 	put_pid(gh->gh_owner_pid);
844 	gfs2_glock_put(gh->gh_gl);
845 	gh->gh_gl = NULL;
846 	gh->gh_ip = 0;
847 }
848 
849 /**
850  * gfs2_glock_wait - wait on a glock acquisition
851  * @gh: the glock holder
852  *
853  * Returns: 0 on success
854  */
855 
856 int gfs2_glock_wait(struct gfs2_holder *gh)
857 {
858 	unsigned long time1 = jiffies;
859 
860 	might_sleep();
861 	wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
862 	if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
863 		/* Lengthen the minimum hold time. */
864 		gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
865 					      GL_GLOCK_HOLD_INCR,
866 					      GL_GLOCK_MAX_HOLD);
867 	return gh->gh_error;
868 }
869 
870 /**
871  * handle_callback - process a demote request
872  * @gl: the glock
873  * @state: the state the caller wants us to change to
874  *
875  * There are only two requests that we are going to see in actual
876  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
877  */
878 
879 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
880 			    unsigned long delay, bool remote)
881 {
882 	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
883 
884 	set_bit(bit, &gl->gl_flags);
885 	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
886 		gl->gl_demote_state = state;
887 		gl->gl_demote_time = jiffies;
888 	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
889 			gl->gl_demote_state != state) {
890 		gl->gl_demote_state = LM_ST_UNLOCKED;
891 	}
892 	if (gl->gl_ops->go_callback)
893 		gl->gl_ops->go_callback(gl, remote);
894 	trace_gfs2_demote_rq(gl, remote);
895 }
896 
897 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
898 {
899 	struct va_format vaf;
900 	va_list args;
901 
902 	va_start(args, fmt);
903 
904 	if (seq) {
905 		seq_vprintf(seq, fmt, args);
906 	} else {
907 		vaf.fmt = fmt;
908 		vaf.va = &args;
909 
910 		pr_err("%pV", &vaf);
911 	}
912 
913 	va_end(args);
914 }
915 
916 /**
917  * add_to_queue - Add a holder to the wait queue (but look for recursion)
918  * @gh: the holder structure to add
919  *
920  * Eventually we should move the recursive locking trap to a
921  * debugging option or something like that. This is the fast
922  * path and needs to have the minimum number of distractions.
923  *
924  */
925 
926 static inline void add_to_queue(struct gfs2_holder *gh)
927 __releases(&gl->gl_spin)
928 __acquires(&gl->gl_spin)
929 {
930 	struct gfs2_glock *gl = gh->gh_gl;
931 	struct gfs2_sbd *sdp = gl->gl_sbd;
932 	struct list_head *insert_pt = NULL;
933 	struct gfs2_holder *gh2;
934 	int try_futile = 0;
935 
936 	BUG_ON(gh->gh_owner_pid == NULL);
937 	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
938 		BUG();
939 
940 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
941 		if (test_bit(GLF_LOCK, &gl->gl_flags))
942 			try_futile = !may_grant(gl, gh);
943 		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
944 			goto fail;
945 	}
946 
947 	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
948 		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
949 		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
950 			goto trap_recursive;
951 		if (try_futile &&
952 		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
953 fail:
954 			gh->gh_error = GLR_TRYFAILED;
955 			gfs2_holder_wake(gh);
956 			return;
957 		}
958 		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
959 			continue;
960 		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
961 			insert_pt = &gh2->gh_list;
962 	}
963 	set_bit(GLF_QUEUED, &gl->gl_flags);
964 	trace_gfs2_glock_queue(gh, 1);
965 	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
966 	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
967 	if (likely(insert_pt == NULL)) {
968 		list_add_tail(&gh->gh_list, &gl->gl_holders);
969 		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
970 			goto do_cancel;
971 		return;
972 	}
973 	list_add_tail(&gh->gh_list, insert_pt);
974 do_cancel:
975 	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
976 	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
977 		spin_unlock(&gl->gl_spin);
978 		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
979 			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
980 		spin_lock(&gl->gl_spin);
981 	}
982 	return;
983 
984 trap_recursive:
985 	pr_err("original: %pSR\n", (void *)gh2->gh_ip);
986 	pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
987 	pr_err("lock type: %d req lock state : %d\n",
988 	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
989 	pr_err("new: %pSR\n", (void *)gh->gh_ip);
990 	pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
991 	pr_err("lock type: %d req lock state : %d\n",
992 	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
993 	gfs2_dump_glock(NULL, gl);
994 	BUG();
995 }
996 
997 /**
998  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
999  * @gh: the holder structure
1000  *
1001  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1002  *
1003  * Returns: 0, GLR_TRYFAILED, or errno on failure
1004  */
1005 
1006 int gfs2_glock_nq(struct gfs2_holder *gh)
1007 {
1008 	struct gfs2_glock *gl = gh->gh_gl;
1009 	struct gfs2_sbd *sdp = gl->gl_sbd;
1010 	int error = 0;
1011 
1012 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1013 		return -EIO;
1014 
1015 	if (test_bit(GLF_LRU, &gl->gl_flags))
1016 		gfs2_glock_remove_from_lru(gl);
1017 
1018 	spin_lock(&gl->gl_spin);
1019 	add_to_queue(gh);
1020 	if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1021 		     test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
1022 		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1023 		gl->gl_lockref.count++;
1024 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1025 			gl->gl_lockref.count--;
1026 	}
1027 	run_queue(gl, 1);
1028 	spin_unlock(&gl->gl_spin);
1029 
1030 	if (!(gh->gh_flags & GL_ASYNC))
1031 		error = gfs2_glock_wait(gh);
1032 
1033 	return error;
1034 }
1035 
1036 /**
1037  * gfs2_glock_poll - poll to see if an async request has been completed
1038  * @gh: the holder
1039  *
1040  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1041  */
1042 
1043 int gfs2_glock_poll(struct gfs2_holder *gh)
1044 {
1045 	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1046 }
1047 
1048 /**
1049  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1050  * @gh: the glock holder
1051  *
1052  */
1053 
1054 void gfs2_glock_dq(struct gfs2_holder *gh)
1055 {
1056 	struct gfs2_glock *gl = gh->gh_gl;
1057 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1058 	unsigned delay = 0;
1059 	int fast_path = 0;
1060 
1061 	spin_lock(&gl->gl_spin);
1062 	if (gh->gh_flags & GL_NOCACHE)
1063 		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1064 
1065 	list_del_init(&gh->gh_list);
1066 	if (find_first_holder(gl) == NULL) {
1067 		if (glops->go_unlock) {
1068 			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1069 			spin_unlock(&gl->gl_spin);
1070 			glops->go_unlock(gh);
1071 			spin_lock(&gl->gl_spin);
1072 			clear_bit(GLF_LOCK, &gl->gl_flags);
1073 		}
1074 		if (list_empty(&gl->gl_holders) &&
1075 		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1076 		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1077 			fast_path = 1;
1078 	}
1079 	if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
1080 	    (glops->go_flags & GLOF_LRU))
1081 		gfs2_glock_add_to_lru(gl);
1082 
1083 	trace_gfs2_glock_queue(gh, 0);
1084 	spin_unlock(&gl->gl_spin);
1085 	if (likely(fast_path))
1086 		return;
1087 
1088 	gfs2_glock_hold(gl);
1089 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1090 	    !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1091 	    gl->gl_name.ln_type == LM_TYPE_INODE)
1092 		delay = gl->gl_hold_time;
1093 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1094 		gfs2_glock_put(gl);
1095 }
1096 
1097 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1098 {
1099 	struct gfs2_glock *gl = gh->gh_gl;
1100 	gfs2_glock_dq(gh);
1101 	might_sleep();
1102 	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1103 }
1104 
1105 /**
1106  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1107  * @gh: the holder structure
1108  *
1109  */
1110 
1111 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1112 {
1113 	gfs2_glock_dq(gh);
1114 	gfs2_holder_uninit(gh);
1115 }
1116 
1117 /**
1118  * gfs2_glock_nq_num - acquire a glock based on lock number
1119  * @sdp: the filesystem
1120  * @number: the lock number
1121  * @glops: the glock operations for the type of glock
1122  * @state: the state to acquire the glock in
1123  * @flags: modifier flags for the acquisition
1124  * @gh: the struct gfs2_holder
1125  *
1126  * Returns: errno
1127  */
1128 
1129 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1130 		      const struct gfs2_glock_operations *glops,
1131 		      unsigned int state, int flags, struct gfs2_holder *gh)
1132 {
1133 	struct gfs2_glock *gl;
1134 	int error;
1135 
1136 	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1137 	if (!error) {
1138 		error = gfs2_glock_nq_init(gl, state, flags, gh);
1139 		gfs2_glock_put(gl);
1140 	}
1141 
1142 	return error;
1143 }
1144 
1145 /**
1146  * glock_compare - Compare two struct gfs2_glock structures for sorting
1147  * @arg_a: the first structure
1148  * @arg_b: the second structure
1149  *
1150  */
1151 
1152 static int glock_compare(const void *arg_a, const void *arg_b)
1153 {
1154 	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1155 	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1156 	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1157 	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1158 
1159 	if (a->ln_number > b->ln_number)
1160 		return 1;
1161 	if (a->ln_number < b->ln_number)
1162 		return -1;
1163 	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1164 	return 0;
1165 }
1166 
1167 /**
1168  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1169  * @num_gh: the number of structures
1170  * @ghs: an array of struct gfs2_holder structures
1171  *
1172  * Returns: 0 on success (all glocks acquired),
1173  *          errno on failure (no glocks acquired)
1174  */
1175 
1176 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1177 		     struct gfs2_holder **p)
1178 {
1179 	unsigned int x;
1180 	int error = 0;
1181 
1182 	for (x = 0; x < num_gh; x++)
1183 		p[x] = &ghs[x];
1184 
1185 	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1186 
1187 	for (x = 0; x < num_gh; x++) {
1188 		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1189 
1190 		error = gfs2_glock_nq(p[x]);
1191 		if (error) {
1192 			while (x--)
1193 				gfs2_glock_dq(p[x]);
1194 			break;
1195 		}
1196 	}
1197 
1198 	return error;
1199 }
1200 
1201 /**
1202  * gfs2_glock_nq_m - acquire multiple glocks
1203  * @num_gh: the number of structures
1204  * @ghs: an array of struct gfs2_holder structures
1205  *
1206  *
1207  * Returns: 0 on success (all glocks acquired),
1208  *          errno on failure (no glocks acquired)
1209  */
1210 
1211 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1212 {
1213 	struct gfs2_holder *tmp[4];
1214 	struct gfs2_holder **pph = tmp;
1215 	int error = 0;
1216 
1217 	switch(num_gh) {
1218 	case 0:
1219 		return 0;
1220 	case 1:
1221 		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1222 		return gfs2_glock_nq(ghs);
1223 	default:
1224 		if (num_gh <= 4)
1225 			break;
1226 		pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1227 		if (!pph)
1228 			return -ENOMEM;
1229 	}
1230 
1231 	error = nq_m_sync(num_gh, ghs, pph);
1232 
1233 	if (pph != tmp)
1234 		kfree(pph);
1235 
1236 	return error;
1237 }
1238 
1239 /**
1240  * gfs2_glock_dq_m - release multiple glocks
1241  * @num_gh: the number of structures
1242  * @ghs: an array of struct gfs2_holder structures
1243  *
1244  */
1245 
1246 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1247 {
1248 	while (num_gh--)
1249 		gfs2_glock_dq(&ghs[num_gh]);
1250 }
1251 
1252 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1253 {
1254 	unsigned long delay = 0;
1255 	unsigned long holdtime;
1256 	unsigned long now = jiffies;
1257 
1258 	gfs2_glock_hold(gl);
1259 	holdtime = gl->gl_tchange + gl->gl_hold_time;
1260 	if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1261 	    gl->gl_name.ln_type == LM_TYPE_INODE) {
1262 		if (time_before(now, holdtime))
1263 			delay = holdtime - now;
1264 		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1265 			delay = gl->gl_hold_time;
1266 	}
1267 
1268 	spin_lock(&gl->gl_spin);
1269 	handle_callback(gl, state, delay, true);
1270 	spin_unlock(&gl->gl_spin);
1271 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1272 		gfs2_glock_put(gl);
1273 }
1274 
1275 /**
1276  * gfs2_should_freeze - Figure out if glock should be frozen
1277  * @gl: The glock in question
1278  *
1279  * Glocks are not frozen if (a) the result of the dlm operation is
1280  * an error, (b) the locking operation was an unlock operation or
1281  * (c) if there is a "noexp" flagged request anywhere in the queue
1282  *
1283  * Returns: 1 if freezing should occur, 0 otherwise
1284  */
1285 
1286 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1287 {
1288 	const struct gfs2_holder *gh;
1289 
1290 	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1291 		return 0;
1292 	if (gl->gl_target == LM_ST_UNLOCKED)
1293 		return 0;
1294 
1295 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1296 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1297 			continue;
1298 		if (LM_FLAG_NOEXP & gh->gh_flags)
1299 			return 0;
1300 	}
1301 
1302 	return 1;
1303 }
1304 
1305 /**
1306  * gfs2_glock_complete - Callback used by locking
1307  * @gl: Pointer to the glock
1308  * @ret: The return value from the dlm
1309  *
1310  * The gl_reply field is under the gl_spin lock so that it is ok
1311  * to use a bitfield shared with other glock state fields.
1312  */
1313 
1314 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1315 {
1316 	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1317 
1318 	spin_lock(&gl->gl_spin);
1319 	gl->gl_reply = ret;
1320 
1321 	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1322 		if (gfs2_should_freeze(gl)) {
1323 			set_bit(GLF_FROZEN, &gl->gl_flags);
1324 			spin_unlock(&gl->gl_spin);
1325 			return;
1326 		}
1327 	}
1328 
1329 	gl->gl_lockref.count++;
1330 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1331 	spin_unlock(&gl->gl_spin);
1332 
1333 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1334 		gfs2_glock_put(gl);
1335 }
1336 
1337 static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1338 {
1339 	struct gfs2_glock *gla, *glb;
1340 
1341 	gla = list_entry(a, struct gfs2_glock, gl_lru);
1342 	glb = list_entry(b, struct gfs2_glock, gl_lru);
1343 
1344 	if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1345 		return 1;
1346 	if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1347 		return -1;
1348 
1349 	return 0;
1350 }
1351 
1352 /**
1353  * gfs2_dispose_glock_lru - Demote a list of glocks
1354  * @list: The list to dispose of
1355  *
1356  * Disposing of glocks may involve disk accesses, so that here we sort
1357  * the glocks by number (i.e. disk location of the inodes) so that if
1358  * there are any such accesses, they'll be sent in order (mostly).
1359  *
1360  * Must be called under the lru_lock, but may drop and retake this
1361  * lock. While the lru_lock is dropped, entries may vanish from the
1362  * list, but no new entries will appear on the list (since it is
1363  * private)
1364  */
1365 
1366 static void gfs2_dispose_glock_lru(struct list_head *list)
1367 __releases(&lru_lock)
1368 __acquires(&lru_lock)
1369 {
1370 	struct gfs2_glock *gl;
1371 
1372 	list_sort(NULL, list, glock_cmp);
1373 
1374 	while(!list_empty(list)) {
1375 		gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1376 		list_del_init(&gl->gl_lru);
1377 		if (!spin_trylock(&gl->gl_spin)) {
1378 add_back_to_lru:
1379 			list_add(&gl->gl_lru, &lru_list);
1380 			atomic_inc(&lru_count);
1381 			continue;
1382 		}
1383 		if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1384 			spin_unlock(&gl->gl_spin);
1385 			goto add_back_to_lru;
1386 		}
1387 		clear_bit(GLF_LRU, &gl->gl_flags);
1388 		gl->gl_lockref.count++;
1389 		if (demote_ok(gl))
1390 			handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1391 		WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1392 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1393 			gl->gl_lockref.count--;
1394 		spin_unlock(&gl->gl_spin);
1395 		cond_resched_lock(&lru_lock);
1396 	}
1397 }
1398 
1399 /**
1400  * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1401  * @nr: The number of entries to scan
1402  *
1403  * This function selects the entries on the LRU which are able to
1404  * be demoted, and then kicks off the process by calling
1405  * gfs2_dispose_glock_lru() above.
1406  */
1407 
1408 static long gfs2_scan_glock_lru(int nr)
1409 {
1410 	struct gfs2_glock *gl;
1411 	LIST_HEAD(skipped);
1412 	LIST_HEAD(dispose);
1413 	long freed = 0;
1414 
1415 	spin_lock(&lru_lock);
1416 	while ((nr-- >= 0) && !list_empty(&lru_list)) {
1417 		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1418 
1419 		/* Test for being demotable */
1420 		if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1421 			list_move(&gl->gl_lru, &dispose);
1422 			atomic_dec(&lru_count);
1423 			freed++;
1424 			continue;
1425 		}
1426 
1427 		list_move(&gl->gl_lru, &skipped);
1428 	}
1429 	list_splice(&skipped, &lru_list);
1430 	if (!list_empty(&dispose))
1431 		gfs2_dispose_glock_lru(&dispose);
1432 	spin_unlock(&lru_lock);
1433 
1434 	return freed;
1435 }
1436 
1437 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1438 					    struct shrink_control *sc)
1439 {
1440 	if (!(sc->gfp_mask & __GFP_FS))
1441 		return SHRINK_STOP;
1442 	return gfs2_scan_glock_lru(sc->nr_to_scan);
1443 }
1444 
1445 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1446 					     struct shrink_control *sc)
1447 {
1448 	return vfs_pressure_ratio(atomic_read(&lru_count));
1449 }
1450 
1451 static struct shrinker glock_shrinker = {
1452 	.seeks = DEFAULT_SEEKS,
1453 	.count_objects = gfs2_glock_shrink_count,
1454 	.scan_objects = gfs2_glock_shrink_scan,
1455 };
1456 
1457 /**
1458  * examine_bucket - Call a function for glock in a hash bucket
1459  * @examiner: the function
1460  * @sdp: the filesystem
1461  * @bucket: the bucket
1462  *
1463  */
1464 
1465 static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1466 			  unsigned int hash)
1467 {
1468 	struct gfs2_glock *gl;
1469 	struct hlist_bl_head *head = &gl_hash_table[hash];
1470 	struct hlist_bl_node *pos;
1471 
1472 	rcu_read_lock();
1473 	hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1474 		if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref))
1475 			examiner(gl);
1476 	}
1477 	rcu_read_unlock();
1478 	cond_resched();
1479 }
1480 
1481 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1482 {
1483 	unsigned x;
1484 
1485 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1486 		examine_bucket(examiner, sdp, x);
1487 }
1488 
1489 
1490 /**
1491  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1492  * @gl: The glock to thaw
1493  *
1494  */
1495 
1496 static void thaw_glock(struct gfs2_glock *gl)
1497 {
1498 	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1499 		goto out;
1500 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1501 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
1502 out:
1503 		gfs2_glock_put(gl);
1504 	}
1505 }
1506 
1507 /**
1508  * clear_glock - look at a glock and see if we can free it from glock cache
1509  * @gl: the glock to look at
1510  *
1511  */
1512 
1513 static void clear_glock(struct gfs2_glock *gl)
1514 {
1515 	gfs2_glock_remove_from_lru(gl);
1516 
1517 	spin_lock(&gl->gl_spin);
1518 	if (gl->gl_state != LM_ST_UNLOCKED)
1519 		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1520 	spin_unlock(&gl->gl_spin);
1521 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1522 		gfs2_glock_put(gl);
1523 }
1524 
1525 /**
1526  * gfs2_glock_thaw - Thaw any frozen glocks
1527  * @sdp: The super block
1528  *
1529  */
1530 
1531 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1532 {
1533 	glock_hash_walk(thaw_glock, sdp);
1534 }
1535 
1536 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1537 {
1538 	spin_lock(&gl->gl_spin);
1539 	gfs2_dump_glock(seq, gl);
1540 	spin_unlock(&gl->gl_spin);
1541 }
1542 
1543 static void dump_glock_func(struct gfs2_glock *gl)
1544 {
1545 	dump_glock(NULL, gl);
1546 }
1547 
1548 /**
1549  * gfs2_gl_hash_clear - Empty out the glock hash table
1550  * @sdp: the filesystem
1551  * @wait: wait until it's all gone
1552  *
1553  * Called when unmounting the filesystem.
1554  */
1555 
1556 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1557 {
1558 	set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1559 	flush_workqueue(glock_workqueue);
1560 	glock_hash_walk(clear_glock, sdp);
1561 	flush_workqueue(glock_workqueue);
1562 	wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1563 	glock_hash_walk(dump_glock_func, sdp);
1564 }
1565 
1566 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1567 {
1568 	struct gfs2_glock *gl = ip->i_gl;
1569 	int ret;
1570 
1571 	ret = gfs2_truncatei_resume(ip);
1572 	gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1573 
1574 	spin_lock(&gl->gl_spin);
1575 	clear_bit(GLF_LOCK, &gl->gl_flags);
1576 	run_queue(gl, 1);
1577 	spin_unlock(&gl->gl_spin);
1578 }
1579 
1580 static const char *state2str(unsigned state)
1581 {
1582 	switch(state) {
1583 	case LM_ST_UNLOCKED:
1584 		return "UN";
1585 	case LM_ST_SHARED:
1586 		return "SH";
1587 	case LM_ST_DEFERRED:
1588 		return "DF";
1589 	case LM_ST_EXCLUSIVE:
1590 		return "EX";
1591 	}
1592 	return "??";
1593 }
1594 
1595 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1596 {
1597 	char *p = buf;
1598 	if (flags & LM_FLAG_TRY)
1599 		*p++ = 't';
1600 	if (flags & LM_FLAG_TRY_1CB)
1601 		*p++ = 'T';
1602 	if (flags & LM_FLAG_NOEXP)
1603 		*p++ = 'e';
1604 	if (flags & LM_FLAG_ANY)
1605 		*p++ = 'A';
1606 	if (flags & LM_FLAG_PRIORITY)
1607 		*p++ = 'p';
1608 	if (flags & GL_ASYNC)
1609 		*p++ = 'a';
1610 	if (flags & GL_EXACT)
1611 		*p++ = 'E';
1612 	if (flags & GL_NOCACHE)
1613 		*p++ = 'c';
1614 	if (test_bit(HIF_HOLDER, &iflags))
1615 		*p++ = 'H';
1616 	if (test_bit(HIF_WAIT, &iflags))
1617 		*p++ = 'W';
1618 	if (test_bit(HIF_FIRST, &iflags))
1619 		*p++ = 'F';
1620 	*p = 0;
1621 	return buf;
1622 }
1623 
1624 /**
1625  * dump_holder - print information about a glock holder
1626  * @seq: the seq_file struct
1627  * @gh: the glock holder
1628  *
1629  */
1630 
1631 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1632 {
1633 	struct task_struct *gh_owner = NULL;
1634 	char flags_buf[32];
1635 
1636 	rcu_read_lock();
1637 	if (gh->gh_owner_pid)
1638 		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1639 	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1640 		       state2str(gh->gh_state),
1641 		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1642 		       gh->gh_error,
1643 		       gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1644 		       gh_owner ? gh_owner->comm : "(ended)",
1645 		       (void *)gh->gh_ip);
1646 	rcu_read_unlock();
1647 }
1648 
1649 static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1650 {
1651 	const unsigned long *gflags = &gl->gl_flags;
1652 	char *p = buf;
1653 
1654 	if (test_bit(GLF_LOCK, gflags))
1655 		*p++ = 'l';
1656 	if (test_bit(GLF_DEMOTE, gflags))
1657 		*p++ = 'D';
1658 	if (test_bit(GLF_PENDING_DEMOTE, gflags))
1659 		*p++ = 'd';
1660 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1661 		*p++ = 'p';
1662 	if (test_bit(GLF_DIRTY, gflags))
1663 		*p++ = 'y';
1664 	if (test_bit(GLF_LFLUSH, gflags))
1665 		*p++ = 'f';
1666 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1667 		*p++ = 'i';
1668 	if (test_bit(GLF_REPLY_PENDING, gflags))
1669 		*p++ = 'r';
1670 	if (test_bit(GLF_INITIAL, gflags))
1671 		*p++ = 'I';
1672 	if (test_bit(GLF_FROZEN, gflags))
1673 		*p++ = 'F';
1674 	if (test_bit(GLF_QUEUED, gflags))
1675 		*p++ = 'q';
1676 	if (test_bit(GLF_LRU, gflags))
1677 		*p++ = 'L';
1678 	if (gl->gl_object)
1679 		*p++ = 'o';
1680 	if (test_bit(GLF_BLOCKING, gflags))
1681 		*p++ = 'b';
1682 	*p = 0;
1683 	return buf;
1684 }
1685 
1686 /**
1687  * gfs2_dump_glock - print information about a glock
1688  * @seq: The seq_file struct
1689  * @gl: the glock
1690  *
1691  * The file format is as follows:
1692  * One line per object, capital letters are used to indicate objects
1693  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1694  * other objects are indented by a single space and follow the glock to
1695  * which they are related. Fields are indicated by lower case letters
1696  * followed by a colon and the field value, except for strings which are in
1697  * [] so that its possible to see if they are composed of spaces for
1698  * example. The field's are n = number (id of the object), f = flags,
1699  * t = type, s = state, r = refcount, e = error, p = pid.
1700  *
1701  */
1702 
1703 void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1704 {
1705 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1706 	unsigned long long dtime;
1707 	const struct gfs2_holder *gh;
1708 	char gflags_buf[32];
1709 
1710 	dtime = jiffies - gl->gl_demote_time;
1711 	dtime *= 1000000/HZ; /* demote time in uSec */
1712 	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1713 		dtime = 0;
1714 	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1715 		  state2str(gl->gl_state),
1716 		  gl->gl_name.ln_type,
1717 		  (unsigned long long)gl->gl_name.ln_number,
1718 		  gflags2str(gflags_buf, gl),
1719 		  state2str(gl->gl_target),
1720 		  state2str(gl->gl_demote_state), dtime,
1721 		  atomic_read(&gl->gl_ail_count),
1722 		  atomic_read(&gl->gl_revokes),
1723 		  (int)gl->gl_lockref.count, gl->gl_hold_time);
1724 
1725 	list_for_each_entry(gh, &gl->gl_holders, gh_list)
1726 		dump_holder(seq, gh);
1727 
1728 	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1729 		glops->go_dump(seq, gl);
1730 }
1731 
1732 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1733 {
1734 	struct gfs2_glock *gl = iter_ptr;
1735 
1736 	seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n",
1737 		   gl->gl_name.ln_type,
1738 		   (unsigned long long)gl->gl_name.ln_number,
1739 		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1740 		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1741 		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1742 		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1743 		   (long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1744 		   (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1745 		   (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1746 		   (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1747 	return 0;
1748 }
1749 
1750 static const char *gfs2_gltype[] = {
1751 	"type",
1752 	"reserved",
1753 	"nondisk",
1754 	"inode",
1755 	"rgrp",
1756 	"meta",
1757 	"iopen",
1758 	"flock",
1759 	"plock",
1760 	"quota",
1761 	"journal",
1762 };
1763 
1764 static const char *gfs2_stype[] = {
1765 	[GFS2_LKS_SRTT]		= "srtt",
1766 	[GFS2_LKS_SRTTVAR]	= "srttvar",
1767 	[GFS2_LKS_SRTTB]	= "srttb",
1768 	[GFS2_LKS_SRTTVARB]	= "srttvarb",
1769 	[GFS2_LKS_SIRT]		= "sirt",
1770 	[GFS2_LKS_SIRTVAR]	= "sirtvar",
1771 	[GFS2_LKS_DCOUNT]	= "dlm",
1772 	[GFS2_LKS_QCOUNT]	= "queue",
1773 };
1774 
1775 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1776 
1777 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1778 {
1779 	struct gfs2_glock_iter *gi = seq->private;
1780 	struct gfs2_sbd *sdp = gi->sdp;
1781 	unsigned index = gi->hash >> 3;
1782 	unsigned subindex = gi->hash & 0x07;
1783 	s64 value;
1784 	int i;
1785 
1786 	if (index == 0 && subindex != 0)
1787 		return 0;
1788 
1789 	seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1790 		   (index == 0) ? "cpu": gfs2_stype[subindex]);
1791 
1792 	for_each_possible_cpu(i) {
1793                 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1794 		if (index == 0) {
1795 			value = i;
1796 		} else {
1797 			value = lkstats->lkstats[index - 1].stats[subindex];
1798 		}
1799 		seq_printf(seq, " %15lld", (long long)value);
1800 	}
1801 	seq_putc(seq, '\n');
1802 	return 0;
1803 }
1804 
1805 int __init gfs2_glock_init(void)
1806 {
1807 	unsigned i;
1808 	for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1809 		INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
1810 	}
1811 
1812 	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1813 					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
1814 	if (!glock_workqueue)
1815 		return -ENOMEM;
1816 	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1817 						WQ_MEM_RECLAIM | WQ_FREEZABLE,
1818 						0);
1819 	if (!gfs2_delete_workqueue) {
1820 		destroy_workqueue(glock_workqueue);
1821 		return -ENOMEM;
1822 	}
1823 
1824 	register_shrinker(&glock_shrinker);
1825 
1826 	return 0;
1827 }
1828 
1829 void gfs2_glock_exit(void)
1830 {
1831 	unregister_shrinker(&glock_shrinker);
1832 	destroy_workqueue(glock_workqueue);
1833 	destroy_workqueue(gfs2_delete_workqueue);
1834 }
1835 
1836 static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1837 {
1838 	return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1839 			      struct gfs2_glock, gl_list);
1840 }
1841 
1842 static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1843 {
1844 	return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1845 			      struct gfs2_glock, gl_list);
1846 }
1847 
1848 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1849 {
1850 	struct gfs2_glock *gl;
1851 
1852 	do {
1853 		gl = gi->gl;
1854 		if (gl) {
1855 			gi->gl = glock_hash_next(gl);
1856 			gi->nhash++;
1857 		} else {
1858 			if (gi->hash >= GFS2_GL_HASH_SIZE) {
1859 				rcu_read_unlock();
1860 				return 1;
1861 			}
1862 			gi->gl = glock_hash_chain(gi->hash);
1863 			gi->nhash = 0;
1864 		}
1865 		while (gi->gl == NULL) {
1866 			gi->hash++;
1867 			if (gi->hash >= GFS2_GL_HASH_SIZE) {
1868 				rcu_read_unlock();
1869 				return 1;
1870 			}
1871 			gi->gl = glock_hash_chain(gi->hash);
1872 			gi->nhash = 0;
1873 		}
1874 	/* Skip entries for other sb and dead entries */
1875 	} while (gi->sdp != gi->gl->gl_sbd ||
1876 		 __lockref_is_dead(&gi->gl->gl_lockref));
1877 
1878 	return 0;
1879 }
1880 
1881 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1882 {
1883 	struct gfs2_glock_iter *gi = seq->private;
1884 	loff_t n = *pos;
1885 
1886 	if (gi->last_pos <= *pos)
1887 		n = gi->nhash + (*pos - gi->last_pos);
1888 	else
1889 		gi->hash = 0;
1890 
1891 	gi->nhash = 0;
1892 	rcu_read_lock();
1893 
1894 	do {
1895 		if (gfs2_glock_iter_next(gi))
1896 			return NULL;
1897 	} while (n--);
1898 
1899 	gi->last_pos = *pos;
1900 	return gi->gl;
1901 }
1902 
1903 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1904 				 loff_t *pos)
1905 {
1906 	struct gfs2_glock_iter *gi = seq->private;
1907 
1908 	(*pos)++;
1909 	gi->last_pos = *pos;
1910 	if (gfs2_glock_iter_next(gi))
1911 		return NULL;
1912 
1913 	return gi->gl;
1914 }
1915 
1916 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1917 {
1918 	struct gfs2_glock_iter *gi = seq->private;
1919 
1920 	if (gi->gl)
1921 		rcu_read_unlock();
1922 	gi->gl = NULL;
1923 }
1924 
1925 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1926 {
1927 	dump_glock(seq, iter_ptr);
1928 	return 0;
1929 }
1930 
1931 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1932 {
1933 	struct gfs2_glock_iter *gi = seq->private;
1934 
1935 	gi->hash = *pos;
1936 	if (*pos >= GFS2_NR_SBSTATS)
1937 		return NULL;
1938 	preempt_disable();
1939 	return SEQ_START_TOKEN;
1940 }
1941 
1942 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1943 				   loff_t *pos)
1944 {
1945 	struct gfs2_glock_iter *gi = seq->private;
1946 	(*pos)++;
1947 	gi->hash++;
1948 	if (gi->hash >= GFS2_NR_SBSTATS) {
1949 		preempt_enable();
1950 		return NULL;
1951 	}
1952 	return SEQ_START_TOKEN;
1953 }
1954 
1955 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
1956 {
1957 	preempt_enable();
1958 }
1959 
1960 static const struct seq_operations gfs2_glock_seq_ops = {
1961 	.start = gfs2_glock_seq_start,
1962 	.next  = gfs2_glock_seq_next,
1963 	.stop  = gfs2_glock_seq_stop,
1964 	.show  = gfs2_glock_seq_show,
1965 };
1966 
1967 static const struct seq_operations gfs2_glstats_seq_ops = {
1968 	.start = gfs2_glock_seq_start,
1969 	.next  = gfs2_glock_seq_next,
1970 	.stop  = gfs2_glock_seq_stop,
1971 	.show  = gfs2_glstats_seq_show,
1972 };
1973 
1974 static const struct seq_operations gfs2_sbstats_seq_ops = {
1975 	.start = gfs2_sbstats_seq_start,
1976 	.next  = gfs2_sbstats_seq_next,
1977 	.stop  = gfs2_sbstats_seq_stop,
1978 	.show  = gfs2_sbstats_seq_show,
1979 };
1980 
1981 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
1982 
1983 static int gfs2_glocks_open(struct inode *inode, struct file *file)
1984 {
1985 	int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1986 				   sizeof(struct gfs2_glock_iter));
1987 	if (ret == 0) {
1988 		struct seq_file *seq = file->private_data;
1989 		struct gfs2_glock_iter *gi = seq->private;
1990 		gi->sdp = inode->i_private;
1991 		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
1992 		if (seq->buf)
1993 			seq->size = GFS2_SEQ_GOODSIZE;
1994 	}
1995 	return ret;
1996 }
1997 
1998 static int gfs2_glstats_open(struct inode *inode, struct file *file)
1999 {
2000 	int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
2001 				   sizeof(struct gfs2_glock_iter));
2002 	if (ret == 0) {
2003 		struct seq_file *seq = file->private_data;
2004 		struct gfs2_glock_iter *gi = seq->private;
2005 		gi->sdp = inode->i_private;
2006 		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2007 		if (seq->buf)
2008 			seq->size = GFS2_SEQ_GOODSIZE;
2009 	}
2010 	return ret;
2011 }
2012 
2013 static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2014 {
2015 	int ret = seq_open_private(file, &gfs2_sbstats_seq_ops,
2016 				   sizeof(struct gfs2_glock_iter));
2017 	if (ret == 0) {
2018 		struct seq_file *seq = file->private_data;
2019 		struct gfs2_glock_iter *gi = seq->private;
2020 		gi->sdp = inode->i_private;
2021 	}
2022 	return ret;
2023 }
2024 
2025 static const struct file_operations gfs2_glocks_fops = {
2026 	.owner   = THIS_MODULE,
2027 	.open    = gfs2_glocks_open,
2028 	.read    = seq_read,
2029 	.llseek  = seq_lseek,
2030 	.release = seq_release_private,
2031 };
2032 
2033 static const struct file_operations gfs2_glstats_fops = {
2034 	.owner   = THIS_MODULE,
2035 	.open    = gfs2_glstats_open,
2036 	.read    = seq_read,
2037 	.llseek  = seq_lseek,
2038 	.release = seq_release_private,
2039 };
2040 
2041 static const struct file_operations gfs2_sbstats_fops = {
2042 	.owner   = THIS_MODULE,
2043 	.open	 = gfs2_sbstats_open,
2044 	.read    = seq_read,
2045 	.llseek  = seq_lseek,
2046 	.release = seq_release_private,
2047 };
2048 
2049 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2050 {
2051 	struct dentry *dent;
2052 
2053 	dent = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2054 	if (IS_ERR_OR_NULL(dent))
2055 		goto fail;
2056 	sdp->debugfs_dir = dent;
2057 
2058 	dent = debugfs_create_file("glocks",
2059 				   S_IFREG | S_IRUGO,
2060 				   sdp->debugfs_dir, sdp,
2061 				   &gfs2_glocks_fops);
2062 	if (IS_ERR_OR_NULL(dent))
2063 		goto fail;
2064 	sdp->debugfs_dentry_glocks = dent;
2065 
2066 	dent = debugfs_create_file("glstats",
2067 				   S_IFREG | S_IRUGO,
2068 				   sdp->debugfs_dir, sdp,
2069 				   &gfs2_glstats_fops);
2070 	if (IS_ERR_OR_NULL(dent))
2071 		goto fail;
2072 	sdp->debugfs_dentry_glstats = dent;
2073 
2074 	dent = debugfs_create_file("sbstats",
2075 				   S_IFREG | S_IRUGO,
2076 				   sdp->debugfs_dir, sdp,
2077 				   &gfs2_sbstats_fops);
2078 	if (IS_ERR_OR_NULL(dent))
2079 		goto fail;
2080 	sdp->debugfs_dentry_sbstats = dent;
2081 
2082 	return 0;
2083 fail:
2084 	gfs2_delete_debugfs_file(sdp);
2085 	return dent ? PTR_ERR(dent) : -ENOMEM;
2086 }
2087 
2088 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2089 {
2090 	if (sdp->debugfs_dir) {
2091 		if (sdp->debugfs_dentry_glocks) {
2092 			debugfs_remove(sdp->debugfs_dentry_glocks);
2093 			sdp->debugfs_dentry_glocks = NULL;
2094 		}
2095 		if (sdp->debugfs_dentry_glstats) {
2096 			debugfs_remove(sdp->debugfs_dentry_glstats);
2097 			sdp->debugfs_dentry_glstats = NULL;
2098 		}
2099 		if (sdp->debugfs_dentry_sbstats) {
2100 			debugfs_remove(sdp->debugfs_dentry_sbstats);
2101 			sdp->debugfs_dentry_sbstats = NULL;
2102 		}
2103 		debugfs_remove(sdp->debugfs_dir);
2104 		sdp->debugfs_dir = NULL;
2105 	}
2106 }
2107 
2108 int gfs2_register_debugfs(void)
2109 {
2110 	gfs2_root = debugfs_create_dir("gfs2", NULL);
2111 	if (IS_ERR(gfs2_root))
2112 		return PTR_ERR(gfs2_root);
2113 	return gfs2_root ? 0 : -ENOMEM;
2114 }
2115 
2116 void gfs2_unregister_debugfs(void)
2117 {
2118 	debugfs_remove(gfs2_root);
2119 	gfs2_root = NULL;
2120 }
2121