xref: /openbmc/linux/fs/gfs2/glock.c (revision 2874c5fd)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/buffer_head.h>
16 #include <linux/delay.h>
17 #include <linux/sort.h>
18 #include <linux/hash.h>
19 #include <linux/jhash.h>
20 #include <linux/kallsyms.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/list.h>
23 #include <linux/wait.h>
24 #include <linux/module.h>
25 #include <linux/uaccess.h>
26 #include <linux/seq_file.h>
27 #include <linux/debugfs.h>
28 #include <linux/kthread.h>
29 #include <linux/freezer.h>
30 #include <linux/workqueue.h>
31 #include <linux/jiffies.h>
32 #include <linux/rcupdate.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/bit_spinlock.h>
35 #include <linux/percpu.h>
36 #include <linux/list_sort.h>
37 #include <linux/lockref.h>
38 #include <linux/rhashtable.h>
39 
40 #include "gfs2.h"
41 #include "incore.h"
42 #include "glock.h"
43 #include "glops.h"
44 #include "inode.h"
45 #include "lops.h"
46 #include "meta_io.h"
47 #include "quota.h"
48 #include "super.h"
49 #include "util.h"
50 #include "bmap.h"
51 #define CREATE_TRACE_POINTS
52 #include "trace_gfs2.h"
53 
54 struct gfs2_glock_iter {
55 	struct gfs2_sbd *sdp;		/* incore superblock           */
56 	struct rhashtable_iter hti;	/* rhashtable iterator         */
57 	struct gfs2_glock *gl;		/* current glock struct        */
58 	loff_t last_pos;		/* last position               */
59 };
60 
61 typedef void (*glock_examiner) (struct gfs2_glock * gl);
62 
63 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
64 
65 static struct dentry *gfs2_root;
66 static struct workqueue_struct *glock_workqueue;
67 struct workqueue_struct *gfs2_delete_workqueue;
68 static LIST_HEAD(lru_list);
69 static atomic_t lru_count = ATOMIC_INIT(0);
70 static DEFINE_SPINLOCK(lru_lock);
71 
72 #define GFS2_GL_HASH_SHIFT      15
73 #define GFS2_GL_HASH_SIZE       BIT(GFS2_GL_HASH_SHIFT)
74 
75 static const struct rhashtable_params ht_parms = {
76 	.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
77 	.key_len = offsetofend(struct lm_lockname, ln_type),
78 	.key_offset = offsetof(struct gfs2_glock, gl_name),
79 	.head_offset = offsetof(struct gfs2_glock, gl_node),
80 };
81 
82 static struct rhashtable gl_hash_table;
83 
84 #define GLOCK_WAIT_TABLE_BITS 12
85 #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS)
86 static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;
87 
88 struct wait_glock_queue {
89 	struct lm_lockname *name;
90 	wait_queue_entry_t wait;
91 };
92 
93 static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
94 			       int sync, void *key)
95 {
96 	struct wait_glock_queue *wait_glock =
97 		container_of(wait, struct wait_glock_queue, wait);
98 	struct lm_lockname *wait_name = wait_glock->name;
99 	struct lm_lockname *wake_name = key;
100 
101 	if (wake_name->ln_sbd != wait_name->ln_sbd ||
102 	    wake_name->ln_number != wait_name->ln_number ||
103 	    wake_name->ln_type != wait_name->ln_type)
104 		return 0;
105 	return autoremove_wake_function(wait, mode, sync, key);
106 }
107 
108 static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
109 {
110 	u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
111 
112 	return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
113 }
114 
115 /**
116  * wake_up_glock  -  Wake up waiters on a glock
117  * @gl: the glock
118  */
119 static void wake_up_glock(struct gfs2_glock *gl)
120 {
121 	wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name);
122 
123 	if (waitqueue_active(wq))
124 		__wake_up(wq, TASK_NORMAL, 1, &gl->gl_name);
125 }
126 
127 static void gfs2_glock_dealloc(struct rcu_head *rcu)
128 {
129 	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
130 
131 	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
132 		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
133 	} else {
134 		kfree(gl->gl_lksb.sb_lvbptr);
135 		kmem_cache_free(gfs2_glock_cachep, gl);
136 	}
137 }
138 
139 void gfs2_glock_free(struct gfs2_glock *gl)
140 {
141 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
142 
143 	BUG_ON(test_bit(GLF_REVOKES, &gl->gl_flags));
144 	rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
145 	smp_mb();
146 	wake_up_glock(gl);
147 	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
148 	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
149 		wake_up(&sdp->sd_glock_wait);
150 }
151 
152 /**
153  * gfs2_glock_hold() - increment reference count on glock
154  * @gl: The glock to hold
155  *
156  */
157 
158 void gfs2_glock_hold(struct gfs2_glock *gl)
159 {
160 	GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
161 	lockref_get(&gl->gl_lockref);
162 }
163 
164 /**
165  * demote_ok - Check to see if it's ok to unlock a glock
166  * @gl: the glock
167  *
168  * Returns: 1 if it's ok
169  */
170 
171 static int demote_ok(const struct gfs2_glock *gl)
172 {
173 	const struct gfs2_glock_operations *glops = gl->gl_ops;
174 
175 	if (gl->gl_state == LM_ST_UNLOCKED)
176 		return 0;
177 	if (!list_empty(&gl->gl_holders))
178 		return 0;
179 	if (glops->go_demote_ok)
180 		return glops->go_demote_ok(gl);
181 	return 1;
182 }
183 
184 
185 void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
186 {
187 	if (!(gl->gl_ops->go_flags & GLOF_LRU))
188 		return;
189 
190 	spin_lock(&lru_lock);
191 
192 	list_del(&gl->gl_lru);
193 	list_add_tail(&gl->gl_lru, &lru_list);
194 
195 	if (!test_bit(GLF_LRU, &gl->gl_flags)) {
196 		set_bit(GLF_LRU, &gl->gl_flags);
197 		atomic_inc(&lru_count);
198 	}
199 
200 	spin_unlock(&lru_lock);
201 }
202 
203 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
204 {
205 	if (!(gl->gl_ops->go_flags & GLOF_LRU))
206 		return;
207 
208 	spin_lock(&lru_lock);
209 	if (test_bit(GLF_LRU, &gl->gl_flags)) {
210 		list_del_init(&gl->gl_lru);
211 		atomic_dec(&lru_count);
212 		clear_bit(GLF_LRU, &gl->gl_flags);
213 	}
214 	spin_unlock(&lru_lock);
215 }
216 
217 /*
218  * Enqueue the glock on the work queue.  Passes one glock reference on to the
219  * work queue.
220  */
221 static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
222 	if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
223 		/*
224 		 * We are holding the lockref spinlock, and the work was still
225 		 * queued above.  The queued work (glock_work_func) takes that
226 		 * spinlock before dropping its glock reference(s), so it
227 		 * cannot have dropped them in the meantime.
228 		 */
229 		GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2);
230 		gl->gl_lockref.count--;
231 	}
232 }
233 
234 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
235 	spin_lock(&gl->gl_lockref.lock);
236 	__gfs2_glock_queue_work(gl, delay);
237 	spin_unlock(&gl->gl_lockref.lock);
238 }
239 
240 static void __gfs2_glock_put(struct gfs2_glock *gl)
241 {
242 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
243 	struct address_space *mapping = gfs2_glock2aspace(gl);
244 
245 	lockref_mark_dead(&gl->gl_lockref);
246 
247 	gfs2_glock_remove_from_lru(gl);
248 	spin_unlock(&gl->gl_lockref.lock);
249 	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
250 	GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
251 	trace_gfs2_glock_put(gl);
252 	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
253 }
254 
255 /*
256  * Cause the glock to be put in work queue context.
257  */
258 void gfs2_glock_queue_put(struct gfs2_glock *gl)
259 {
260 	gfs2_glock_queue_work(gl, 0);
261 }
262 
263 /**
264  * gfs2_glock_put() - Decrement reference count on glock
265  * @gl: The glock to put
266  *
267  */
268 
269 void gfs2_glock_put(struct gfs2_glock *gl)
270 {
271 	if (lockref_put_or_lock(&gl->gl_lockref))
272 		return;
273 
274 	__gfs2_glock_put(gl);
275 }
276 
277 /**
278  * may_grant - check if its ok to grant a new lock
279  * @gl: The glock
280  * @gh: The lock request which we wish to grant
281  *
282  * Returns: true if its ok to grant the lock
283  */
284 
285 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
286 {
287 	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
288 	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
289 	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
290 		return 0;
291 	if (gl->gl_state == gh->gh_state)
292 		return 1;
293 	if (gh->gh_flags & GL_EXACT)
294 		return 0;
295 	if (gl->gl_state == LM_ST_EXCLUSIVE) {
296 		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
297 			return 1;
298 		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
299 			return 1;
300 	}
301 	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
302 		return 1;
303 	return 0;
304 }
305 
306 static void gfs2_holder_wake(struct gfs2_holder *gh)
307 {
308 	clear_bit(HIF_WAIT, &gh->gh_iflags);
309 	smp_mb__after_atomic();
310 	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
311 }
312 
313 /**
314  * do_error - Something unexpected has happened during a lock request
315  *
316  */
317 
318 static void do_error(struct gfs2_glock *gl, const int ret)
319 {
320 	struct gfs2_holder *gh, *tmp;
321 
322 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
323 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
324 			continue;
325 		if (ret & LM_OUT_ERROR)
326 			gh->gh_error = -EIO;
327 		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
328 			gh->gh_error = GLR_TRYFAILED;
329 		else
330 			continue;
331 		list_del_init(&gh->gh_list);
332 		trace_gfs2_glock_queue(gh, 0);
333 		gfs2_holder_wake(gh);
334 	}
335 }
336 
337 /**
338  * do_promote - promote as many requests as possible on the current queue
339  * @gl: The glock
340  *
341  * Returns: 1 if there is a blocked holder at the head of the list, or 2
342  *          if a type specific operation is underway.
343  */
344 
345 static int do_promote(struct gfs2_glock *gl)
346 __releases(&gl->gl_lockref.lock)
347 __acquires(&gl->gl_lockref.lock)
348 {
349 	const struct gfs2_glock_operations *glops = gl->gl_ops;
350 	struct gfs2_holder *gh, *tmp;
351 	int ret;
352 
353 restart:
354 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
355 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
356 			continue;
357 		if (may_grant(gl, gh)) {
358 			if (gh->gh_list.prev == &gl->gl_holders &&
359 			    glops->go_lock) {
360 				spin_unlock(&gl->gl_lockref.lock);
361 				/* FIXME: eliminate this eventually */
362 				ret = glops->go_lock(gh);
363 				spin_lock(&gl->gl_lockref.lock);
364 				if (ret) {
365 					if (ret == 1)
366 						return 2;
367 					gh->gh_error = ret;
368 					list_del_init(&gh->gh_list);
369 					trace_gfs2_glock_queue(gh, 0);
370 					gfs2_holder_wake(gh);
371 					goto restart;
372 				}
373 				set_bit(HIF_HOLDER, &gh->gh_iflags);
374 				trace_gfs2_promote(gh, 1);
375 				gfs2_holder_wake(gh);
376 				goto restart;
377 			}
378 			set_bit(HIF_HOLDER, &gh->gh_iflags);
379 			trace_gfs2_promote(gh, 0);
380 			gfs2_holder_wake(gh);
381 			continue;
382 		}
383 		if (gh->gh_list.prev == &gl->gl_holders)
384 			return 1;
385 		do_error(gl, 0);
386 		break;
387 	}
388 	return 0;
389 }
390 
391 /**
392  * find_first_waiter - find the first gh that's waiting for the glock
393  * @gl: the glock
394  */
395 
396 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
397 {
398 	struct gfs2_holder *gh;
399 
400 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
401 		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
402 			return gh;
403 	}
404 	return NULL;
405 }
406 
407 /**
408  * state_change - record that the glock is now in a different state
409  * @gl: the glock
410  * @new_state the new state
411  *
412  */
413 
414 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
415 {
416 	int held1, held2;
417 
418 	held1 = (gl->gl_state != LM_ST_UNLOCKED);
419 	held2 = (new_state != LM_ST_UNLOCKED);
420 
421 	if (held1 != held2) {
422 		GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
423 		if (held2)
424 			gl->gl_lockref.count++;
425 		else
426 			gl->gl_lockref.count--;
427 	}
428 	if (held1 && held2 && list_empty(&gl->gl_holders))
429 		clear_bit(GLF_QUEUED, &gl->gl_flags);
430 
431 	if (new_state != gl->gl_target)
432 		/* shorten our minimum hold time */
433 		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
434 				       GL_GLOCK_MIN_HOLD);
435 	gl->gl_state = new_state;
436 	gl->gl_tchange = jiffies;
437 }
438 
439 static void gfs2_demote_wake(struct gfs2_glock *gl)
440 {
441 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
442 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
443 	smp_mb__after_atomic();
444 	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
445 }
446 
447 /**
448  * finish_xmote - The DLM has replied to one of our lock requests
449  * @gl: The glock
450  * @ret: The status from the DLM
451  *
452  */
453 
454 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
455 {
456 	const struct gfs2_glock_operations *glops = gl->gl_ops;
457 	struct gfs2_holder *gh;
458 	unsigned state = ret & LM_OUT_ST_MASK;
459 	int rv;
460 
461 	spin_lock(&gl->gl_lockref.lock);
462 	trace_gfs2_glock_state_change(gl, state);
463 	state_change(gl, state);
464 	gh = find_first_waiter(gl);
465 
466 	/* Demote to UN request arrived during demote to SH or DF */
467 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
468 	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
469 		gl->gl_target = LM_ST_UNLOCKED;
470 
471 	/* Check for state != intended state */
472 	if (unlikely(state != gl->gl_target)) {
473 		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
474 			/* move to back of queue and try next entry */
475 			if (ret & LM_OUT_CANCELED) {
476 				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
477 					list_move_tail(&gh->gh_list, &gl->gl_holders);
478 				gh = find_first_waiter(gl);
479 				gl->gl_target = gh->gh_state;
480 				goto retry;
481 			}
482 			/* Some error or failed "try lock" - report it */
483 			if ((ret & LM_OUT_ERROR) ||
484 			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
485 				gl->gl_target = gl->gl_state;
486 				do_error(gl, ret);
487 				goto out;
488 			}
489 		}
490 		switch(state) {
491 		/* Unlocked due to conversion deadlock, try again */
492 		case LM_ST_UNLOCKED:
493 retry:
494 			do_xmote(gl, gh, gl->gl_target);
495 			break;
496 		/* Conversion fails, unlock and try again */
497 		case LM_ST_SHARED:
498 		case LM_ST_DEFERRED:
499 			do_xmote(gl, gh, LM_ST_UNLOCKED);
500 			break;
501 		default: /* Everything else */
502 			fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n",
503 			       gl->gl_target, state);
504 			GLOCK_BUG_ON(gl, 1);
505 		}
506 		spin_unlock(&gl->gl_lockref.lock);
507 		return;
508 	}
509 
510 	/* Fast path - we got what we asked for */
511 	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
512 		gfs2_demote_wake(gl);
513 	if (state != LM_ST_UNLOCKED) {
514 		if (glops->go_xmote_bh) {
515 			spin_unlock(&gl->gl_lockref.lock);
516 			rv = glops->go_xmote_bh(gl, gh);
517 			spin_lock(&gl->gl_lockref.lock);
518 			if (rv) {
519 				do_error(gl, rv);
520 				goto out;
521 			}
522 		}
523 		rv = do_promote(gl);
524 		if (rv == 2)
525 			goto out_locked;
526 	}
527 out:
528 	clear_bit(GLF_LOCK, &gl->gl_flags);
529 out_locked:
530 	spin_unlock(&gl->gl_lockref.lock);
531 }
532 
533 /**
534  * do_xmote - Calls the DLM to change the state of a lock
535  * @gl: The lock state
536  * @gh: The holder (only for promotes)
537  * @target: The target lock state
538  *
539  */
540 
541 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
542 __releases(&gl->gl_lockref.lock)
543 __acquires(&gl->gl_lockref.lock)
544 {
545 	const struct gfs2_glock_operations *glops = gl->gl_ops;
546 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
547 	unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
548 	int ret;
549 
550 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
551 	    target != LM_ST_UNLOCKED)
552 		return;
553 	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
554 		      LM_FLAG_PRIORITY);
555 	GLOCK_BUG_ON(gl, gl->gl_state == target);
556 	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
557 	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
558 	    glops->go_inval) {
559 		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
560 		do_error(gl, 0); /* Fail queued try locks */
561 	}
562 	gl->gl_req = target;
563 	set_bit(GLF_BLOCKING, &gl->gl_flags);
564 	if ((gl->gl_req == LM_ST_UNLOCKED) ||
565 	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
566 	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
567 		clear_bit(GLF_BLOCKING, &gl->gl_flags);
568 	spin_unlock(&gl->gl_lockref.lock);
569 	if (glops->go_sync)
570 		glops->go_sync(gl);
571 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
572 		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
573 	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
574 
575 	gfs2_glock_hold(gl);
576 	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
577 		/* lock_dlm */
578 		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
579 		if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
580 		    target == LM_ST_UNLOCKED &&
581 		    test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
582 			finish_xmote(gl, target);
583 			gfs2_glock_queue_work(gl, 0);
584 		}
585 		else if (ret) {
586 			fs_err(sdp, "lm_lock ret %d\n", ret);
587 			GLOCK_BUG_ON(gl, !test_bit(SDF_SHUTDOWN,
588 						   &sdp->sd_flags));
589 		}
590 	} else { /* lock_nolock */
591 		finish_xmote(gl, target);
592 		gfs2_glock_queue_work(gl, 0);
593 	}
594 
595 	spin_lock(&gl->gl_lockref.lock);
596 }
597 
598 /**
599  * find_first_holder - find the first "holder" gh
600  * @gl: the glock
601  */
602 
603 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
604 {
605 	struct gfs2_holder *gh;
606 
607 	if (!list_empty(&gl->gl_holders)) {
608 		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
609 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
610 			return gh;
611 	}
612 	return NULL;
613 }
614 
615 /**
616  * run_queue - do all outstanding tasks related to a glock
617  * @gl: The glock in question
618  * @nonblock: True if we must not block in run_queue
619  *
620  */
621 
622 static void run_queue(struct gfs2_glock *gl, const int nonblock)
623 __releases(&gl->gl_lockref.lock)
624 __acquires(&gl->gl_lockref.lock)
625 {
626 	struct gfs2_holder *gh = NULL;
627 	int ret;
628 
629 	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
630 		return;
631 
632 	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
633 
634 	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
635 	    gl->gl_demote_state != gl->gl_state) {
636 		if (find_first_holder(gl))
637 			goto out_unlock;
638 		if (nonblock)
639 			goto out_sched;
640 		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
641 		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
642 		gl->gl_target = gl->gl_demote_state;
643 	} else {
644 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
645 			gfs2_demote_wake(gl);
646 		ret = do_promote(gl);
647 		if (ret == 0)
648 			goto out_unlock;
649 		if (ret == 2)
650 			goto out;
651 		gh = find_first_waiter(gl);
652 		gl->gl_target = gh->gh_state;
653 		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
654 			do_error(gl, 0); /* Fail queued try locks */
655 	}
656 	do_xmote(gl, gh, gl->gl_target);
657 out:
658 	return;
659 
660 out_sched:
661 	clear_bit(GLF_LOCK, &gl->gl_flags);
662 	smp_mb__after_atomic();
663 	gl->gl_lockref.count++;
664 	__gfs2_glock_queue_work(gl, 0);
665 	return;
666 
667 out_unlock:
668 	clear_bit(GLF_LOCK, &gl->gl_flags);
669 	smp_mb__after_atomic();
670 	return;
671 }
672 
673 static void delete_work_func(struct work_struct *work)
674 {
675 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
676 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
677 	struct inode *inode;
678 	u64 no_addr = gl->gl_name.ln_number;
679 
680 	/* If someone's using this glock to create a new dinode, the block must
681 	   have been freed by another node, then re-used, in which case our
682 	   iopen callback is too late after the fact. Ignore it. */
683 	if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
684 		goto out;
685 
686 	inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
687 	if (inode && !IS_ERR(inode)) {
688 		d_prune_aliases(inode);
689 		iput(inode);
690 	}
691 out:
692 	gfs2_glock_put(gl);
693 }
694 
695 static void glock_work_func(struct work_struct *work)
696 {
697 	unsigned long delay = 0;
698 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
699 	unsigned int drop_refs = 1;
700 
701 	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
702 		finish_xmote(gl, gl->gl_reply);
703 		drop_refs++;
704 	}
705 	spin_lock(&gl->gl_lockref.lock);
706 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
707 	    gl->gl_state != LM_ST_UNLOCKED &&
708 	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
709 		unsigned long holdtime, now = jiffies;
710 
711 		holdtime = gl->gl_tchange + gl->gl_hold_time;
712 		if (time_before(now, holdtime))
713 			delay = holdtime - now;
714 
715 		if (!delay) {
716 			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
717 			set_bit(GLF_DEMOTE, &gl->gl_flags);
718 		}
719 	}
720 	run_queue(gl, 0);
721 	if (delay) {
722 		/* Keep one glock reference for the work we requeue. */
723 		drop_refs--;
724 		if (gl->gl_name.ln_type != LM_TYPE_INODE)
725 			delay = 0;
726 		__gfs2_glock_queue_work(gl, delay);
727 	}
728 
729 	/*
730 	 * Drop the remaining glock references manually here. (Mind that
731 	 * __gfs2_glock_queue_work depends on the lockref spinlock begin held
732 	 * here as well.)
733 	 */
734 	gl->gl_lockref.count -= drop_refs;
735 	if (!gl->gl_lockref.count) {
736 		__gfs2_glock_put(gl);
737 		return;
738 	}
739 	spin_unlock(&gl->gl_lockref.lock);
740 }
741 
742 static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
743 					    struct gfs2_glock *new)
744 {
745 	struct wait_glock_queue wait;
746 	wait_queue_head_t *wq = glock_waitqueue(name);
747 	struct gfs2_glock *gl;
748 
749 	wait.name = name;
750 	init_wait(&wait.wait);
751 	wait.wait.func = glock_wake_function;
752 
753 again:
754 	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
755 	rcu_read_lock();
756 	if (new) {
757 		gl = rhashtable_lookup_get_insert_fast(&gl_hash_table,
758 			&new->gl_node, ht_parms);
759 		if (IS_ERR(gl))
760 			goto out;
761 	} else {
762 		gl = rhashtable_lookup_fast(&gl_hash_table,
763 			name, ht_parms);
764 	}
765 	if (gl && !lockref_get_not_dead(&gl->gl_lockref)) {
766 		rcu_read_unlock();
767 		schedule();
768 		goto again;
769 	}
770 out:
771 	rcu_read_unlock();
772 	finish_wait(wq, &wait.wait);
773 	return gl;
774 }
775 
776 /**
777  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
778  * @sdp: The GFS2 superblock
779  * @number: the lock number
780  * @glops: The glock_operations to use
781  * @create: If 0, don't create the glock if it doesn't exist
782  * @glp: the glock is returned here
783  *
784  * This does not lock a glock, just finds/creates structures for one.
785  *
786  * Returns: errno
787  */
788 
789 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
790 		   const struct gfs2_glock_operations *glops, int create,
791 		   struct gfs2_glock **glp)
792 {
793 	struct super_block *s = sdp->sd_vfs;
794 	struct lm_lockname name = { .ln_number = number,
795 				    .ln_type = glops->go_type,
796 				    .ln_sbd = sdp };
797 	struct gfs2_glock *gl, *tmp;
798 	struct address_space *mapping;
799 	struct kmem_cache *cachep;
800 	int ret = 0;
801 
802 	gl = find_insert_glock(&name, NULL);
803 	if (gl) {
804 		*glp = gl;
805 		return 0;
806 	}
807 	if (!create)
808 		return -ENOENT;
809 
810 	if (glops->go_flags & GLOF_ASPACE)
811 		cachep = gfs2_glock_aspace_cachep;
812 	else
813 		cachep = gfs2_glock_cachep;
814 	gl = kmem_cache_alloc(cachep, GFP_NOFS);
815 	if (!gl)
816 		return -ENOMEM;
817 
818 	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
819 
820 	if (glops->go_flags & GLOF_LVB) {
821 		gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
822 		if (!gl->gl_lksb.sb_lvbptr) {
823 			kmem_cache_free(cachep, gl);
824 			return -ENOMEM;
825 		}
826 	}
827 
828 	atomic_inc(&sdp->sd_glock_disposal);
829 	gl->gl_node.next = NULL;
830 	gl->gl_flags = 0;
831 	gl->gl_name = name;
832 	gl->gl_lockref.count = 1;
833 	gl->gl_state = LM_ST_UNLOCKED;
834 	gl->gl_target = LM_ST_UNLOCKED;
835 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
836 	gl->gl_ops = glops;
837 	gl->gl_dstamp = 0;
838 	preempt_disable();
839 	/* We use the global stats to estimate the initial per-glock stats */
840 	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
841 	preempt_enable();
842 	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
843 	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
844 	gl->gl_tchange = jiffies;
845 	gl->gl_object = NULL;
846 	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
847 	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
848 	INIT_WORK(&gl->gl_delete, delete_work_func);
849 
850 	mapping = gfs2_glock2aspace(gl);
851 	if (mapping) {
852                 mapping->a_ops = &gfs2_meta_aops;
853 		mapping->host = s->s_bdev->bd_inode;
854 		mapping->flags = 0;
855 		mapping_set_gfp_mask(mapping, GFP_NOFS);
856 		mapping->private_data = NULL;
857 		mapping->writeback_index = 0;
858 	}
859 
860 	tmp = find_insert_glock(&name, gl);
861 	if (!tmp) {
862 		*glp = gl;
863 		goto out;
864 	}
865 	if (IS_ERR(tmp)) {
866 		ret = PTR_ERR(tmp);
867 		goto out_free;
868 	}
869 	*glp = tmp;
870 
871 out_free:
872 	kfree(gl->gl_lksb.sb_lvbptr);
873 	kmem_cache_free(cachep, gl);
874 	atomic_dec(&sdp->sd_glock_disposal);
875 
876 out:
877 	return ret;
878 }
879 
880 /**
881  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
882  * @gl: the glock
883  * @state: the state we're requesting
884  * @flags: the modifier flags
885  * @gh: the holder structure
886  *
887  */
888 
889 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
890 		      struct gfs2_holder *gh)
891 {
892 	INIT_LIST_HEAD(&gh->gh_list);
893 	gh->gh_gl = gl;
894 	gh->gh_ip = _RET_IP_;
895 	gh->gh_owner_pid = get_pid(task_pid(current));
896 	gh->gh_state = state;
897 	gh->gh_flags = flags;
898 	gh->gh_error = 0;
899 	gh->gh_iflags = 0;
900 	gfs2_glock_hold(gl);
901 }
902 
903 /**
904  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
905  * @state: the state we're requesting
906  * @flags: the modifier flags
907  * @gh: the holder structure
908  *
909  * Don't mess with the glock.
910  *
911  */
912 
913 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
914 {
915 	gh->gh_state = state;
916 	gh->gh_flags = flags;
917 	gh->gh_iflags = 0;
918 	gh->gh_ip = _RET_IP_;
919 	put_pid(gh->gh_owner_pid);
920 	gh->gh_owner_pid = get_pid(task_pid(current));
921 }
922 
923 /**
924  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
925  * @gh: the holder structure
926  *
927  */
928 
929 void gfs2_holder_uninit(struct gfs2_holder *gh)
930 {
931 	put_pid(gh->gh_owner_pid);
932 	gfs2_glock_put(gh->gh_gl);
933 	gfs2_holder_mark_uninitialized(gh);
934 	gh->gh_ip = 0;
935 }
936 
937 /**
938  * gfs2_glock_wait - wait on a glock acquisition
939  * @gh: the glock holder
940  *
941  * Returns: 0 on success
942  */
943 
944 int gfs2_glock_wait(struct gfs2_holder *gh)
945 {
946 	unsigned long time1 = jiffies;
947 
948 	might_sleep();
949 	wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
950 	if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
951 		/* Lengthen the minimum hold time. */
952 		gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
953 					      GL_GLOCK_HOLD_INCR,
954 					      GL_GLOCK_MAX_HOLD);
955 	return gh->gh_error;
956 }
957 
958 /**
959  * handle_callback - process a demote request
960  * @gl: the glock
961  * @state: the state the caller wants us to change to
962  *
963  * There are only two requests that we are going to see in actual
964  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
965  */
966 
967 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
968 			    unsigned long delay, bool remote)
969 {
970 	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
971 
972 	set_bit(bit, &gl->gl_flags);
973 	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
974 		gl->gl_demote_state = state;
975 		gl->gl_demote_time = jiffies;
976 	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
977 			gl->gl_demote_state != state) {
978 		gl->gl_demote_state = LM_ST_UNLOCKED;
979 	}
980 	if (gl->gl_ops->go_callback)
981 		gl->gl_ops->go_callback(gl, remote);
982 	trace_gfs2_demote_rq(gl, remote);
983 }
984 
985 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
986 {
987 	struct va_format vaf;
988 	va_list args;
989 
990 	va_start(args, fmt);
991 
992 	if (seq) {
993 		seq_vprintf(seq, fmt, args);
994 	} else {
995 		vaf.fmt = fmt;
996 		vaf.va = &args;
997 
998 		pr_err("%pV", &vaf);
999 	}
1000 
1001 	va_end(args);
1002 }
1003 
1004 /**
1005  * add_to_queue - Add a holder to the wait queue (but look for recursion)
1006  * @gh: the holder structure to add
1007  *
1008  * Eventually we should move the recursive locking trap to a
1009  * debugging option or something like that. This is the fast
1010  * path and needs to have the minimum number of distractions.
1011  *
1012  */
1013 
1014 static inline void add_to_queue(struct gfs2_holder *gh)
1015 __releases(&gl->gl_lockref.lock)
1016 __acquires(&gl->gl_lockref.lock)
1017 {
1018 	struct gfs2_glock *gl = gh->gh_gl;
1019 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1020 	struct list_head *insert_pt = NULL;
1021 	struct gfs2_holder *gh2;
1022 	int try_futile = 0;
1023 
1024 	BUG_ON(gh->gh_owner_pid == NULL);
1025 	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1026 		BUG();
1027 
1028 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1029 		if (test_bit(GLF_LOCK, &gl->gl_flags))
1030 			try_futile = !may_grant(gl, gh);
1031 		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
1032 			goto fail;
1033 	}
1034 
1035 	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
1036 		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1037 		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1038 			goto trap_recursive;
1039 		if (try_futile &&
1040 		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
1041 fail:
1042 			gh->gh_error = GLR_TRYFAILED;
1043 			gfs2_holder_wake(gh);
1044 			return;
1045 		}
1046 		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
1047 			continue;
1048 		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1049 			insert_pt = &gh2->gh_list;
1050 	}
1051 	set_bit(GLF_QUEUED, &gl->gl_flags);
1052 	trace_gfs2_glock_queue(gh, 1);
1053 	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
1054 	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
1055 	if (likely(insert_pt == NULL)) {
1056 		list_add_tail(&gh->gh_list, &gl->gl_holders);
1057 		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1058 			goto do_cancel;
1059 		return;
1060 	}
1061 	list_add_tail(&gh->gh_list, insert_pt);
1062 do_cancel:
1063 	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1064 	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1065 		spin_unlock(&gl->gl_lockref.lock);
1066 		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1067 			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1068 		spin_lock(&gl->gl_lockref.lock);
1069 	}
1070 	return;
1071 
1072 trap_recursive:
1073 	fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
1074 	fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1075 	fs_err(sdp, "lock type: %d req lock state : %d\n",
1076 	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1077 	fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
1078 	fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
1079 	fs_err(sdp, "lock type: %d req lock state : %d\n",
1080 	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
1081 	gfs2_dump_glock(NULL, gl);
1082 	BUG();
1083 }
1084 
1085 /**
1086  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1087  * @gh: the holder structure
1088  *
1089  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1090  *
1091  * Returns: 0, GLR_TRYFAILED, or errno on failure
1092  */
1093 
1094 int gfs2_glock_nq(struct gfs2_holder *gh)
1095 {
1096 	struct gfs2_glock *gl = gh->gh_gl;
1097 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
1098 	int error = 0;
1099 
1100 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1101 		return -EIO;
1102 
1103 	if (test_bit(GLF_LRU, &gl->gl_flags))
1104 		gfs2_glock_remove_from_lru(gl);
1105 
1106 	spin_lock(&gl->gl_lockref.lock);
1107 	add_to_queue(gh);
1108 	if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1109 		     test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
1110 		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1111 		gl->gl_lockref.count++;
1112 		__gfs2_glock_queue_work(gl, 0);
1113 	}
1114 	run_queue(gl, 1);
1115 	spin_unlock(&gl->gl_lockref.lock);
1116 
1117 	if (!(gh->gh_flags & GL_ASYNC))
1118 		error = gfs2_glock_wait(gh);
1119 
1120 	return error;
1121 }
1122 
1123 /**
1124  * gfs2_glock_poll - poll to see if an async request has been completed
1125  * @gh: the holder
1126  *
1127  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1128  */
1129 
1130 int gfs2_glock_poll(struct gfs2_holder *gh)
1131 {
1132 	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1133 }
1134 
1135 /**
1136  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1137  * @gh: the glock holder
1138  *
1139  */
1140 
1141 void gfs2_glock_dq(struct gfs2_holder *gh)
1142 {
1143 	struct gfs2_glock *gl = gh->gh_gl;
1144 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1145 	unsigned delay = 0;
1146 	int fast_path = 0;
1147 
1148 	spin_lock(&gl->gl_lockref.lock);
1149 	if (gh->gh_flags & GL_NOCACHE)
1150 		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1151 
1152 	list_del_init(&gh->gh_list);
1153 	clear_bit(HIF_HOLDER, &gh->gh_iflags);
1154 	if (find_first_holder(gl) == NULL) {
1155 		if (glops->go_unlock) {
1156 			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1157 			spin_unlock(&gl->gl_lockref.lock);
1158 			glops->go_unlock(gh);
1159 			spin_lock(&gl->gl_lockref.lock);
1160 			clear_bit(GLF_LOCK, &gl->gl_flags);
1161 		}
1162 		if (list_empty(&gl->gl_holders) &&
1163 		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1164 		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1165 			fast_path = 1;
1166 	}
1167 	if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
1168 		gfs2_glock_add_to_lru(gl);
1169 
1170 	trace_gfs2_glock_queue(gh, 0);
1171 	if (unlikely(!fast_path)) {
1172 		gl->gl_lockref.count++;
1173 		if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1174 		    !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1175 		    gl->gl_name.ln_type == LM_TYPE_INODE)
1176 			delay = gl->gl_hold_time;
1177 		__gfs2_glock_queue_work(gl, delay);
1178 	}
1179 	spin_unlock(&gl->gl_lockref.lock);
1180 }
1181 
1182 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1183 {
1184 	struct gfs2_glock *gl = gh->gh_gl;
1185 	gfs2_glock_dq(gh);
1186 	might_sleep();
1187 	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
1188 }
1189 
1190 /**
1191  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1192  * @gh: the holder structure
1193  *
1194  */
1195 
1196 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1197 {
1198 	gfs2_glock_dq(gh);
1199 	gfs2_holder_uninit(gh);
1200 }
1201 
1202 /**
1203  * gfs2_glock_nq_num - acquire a glock based on lock number
1204  * @sdp: the filesystem
1205  * @number: the lock number
1206  * @glops: the glock operations for the type of glock
1207  * @state: the state to acquire the glock in
1208  * @flags: modifier flags for the acquisition
1209  * @gh: the struct gfs2_holder
1210  *
1211  * Returns: errno
1212  */
1213 
1214 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1215 		      const struct gfs2_glock_operations *glops,
1216 		      unsigned int state, u16 flags, struct gfs2_holder *gh)
1217 {
1218 	struct gfs2_glock *gl;
1219 	int error;
1220 
1221 	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1222 	if (!error) {
1223 		error = gfs2_glock_nq_init(gl, state, flags, gh);
1224 		gfs2_glock_put(gl);
1225 	}
1226 
1227 	return error;
1228 }
1229 
1230 /**
1231  * glock_compare - Compare two struct gfs2_glock structures for sorting
1232  * @arg_a: the first structure
1233  * @arg_b: the second structure
1234  *
1235  */
1236 
1237 static int glock_compare(const void *arg_a, const void *arg_b)
1238 {
1239 	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1240 	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1241 	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1242 	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1243 
1244 	if (a->ln_number > b->ln_number)
1245 		return 1;
1246 	if (a->ln_number < b->ln_number)
1247 		return -1;
1248 	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1249 	return 0;
1250 }
1251 
1252 /**
1253  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1254  * @num_gh: the number of structures
1255  * @ghs: an array of struct gfs2_holder structures
1256  *
1257  * Returns: 0 on success (all glocks acquired),
1258  *          errno on failure (no glocks acquired)
1259  */
1260 
1261 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1262 		     struct gfs2_holder **p)
1263 {
1264 	unsigned int x;
1265 	int error = 0;
1266 
1267 	for (x = 0; x < num_gh; x++)
1268 		p[x] = &ghs[x];
1269 
1270 	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1271 
1272 	for (x = 0; x < num_gh; x++) {
1273 		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1274 
1275 		error = gfs2_glock_nq(p[x]);
1276 		if (error) {
1277 			while (x--)
1278 				gfs2_glock_dq(p[x]);
1279 			break;
1280 		}
1281 	}
1282 
1283 	return error;
1284 }
1285 
1286 /**
1287  * gfs2_glock_nq_m - acquire multiple glocks
1288  * @num_gh: the number of structures
1289  * @ghs: an array of struct gfs2_holder structures
1290  *
1291  *
1292  * Returns: 0 on success (all glocks acquired),
1293  *          errno on failure (no glocks acquired)
1294  */
1295 
1296 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1297 {
1298 	struct gfs2_holder *tmp[4];
1299 	struct gfs2_holder **pph = tmp;
1300 	int error = 0;
1301 
1302 	switch(num_gh) {
1303 	case 0:
1304 		return 0;
1305 	case 1:
1306 		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1307 		return gfs2_glock_nq(ghs);
1308 	default:
1309 		if (num_gh <= 4)
1310 			break;
1311 		pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *),
1312 				    GFP_NOFS);
1313 		if (!pph)
1314 			return -ENOMEM;
1315 	}
1316 
1317 	error = nq_m_sync(num_gh, ghs, pph);
1318 
1319 	if (pph != tmp)
1320 		kfree(pph);
1321 
1322 	return error;
1323 }
1324 
1325 /**
1326  * gfs2_glock_dq_m - release multiple glocks
1327  * @num_gh: the number of structures
1328  * @ghs: an array of struct gfs2_holder structures
1329  *
1330  */
1331 
1332 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1333 {
1334 	while (num_gh--)
1335 		gfs2_glock_dq(&ghs[num_gh]);
1336 }
1337 
1338 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1339 {
1340 	unsigned long delay = 0;
1341 	unsigned long holdtime;
1342 	unsigned long now = jiffies;
1343 
1344 	gfs2_glock_hold(gl);
1345 	holdtime = gl->gl_tchange + gl->gl_hold_time;
1346 	if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1347 	    gl->gl_name.ln_type == LM_TYPE_INODE) {
1348 		if (time_before(now, holdtime))
1349 			delay = holdtime - now;
1350 		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1351 			delay = gl->gl_hold_time;
1352 	}
1353 
1354 	spin_lock(&gl->gl_lockref.lock);
1355 	handle_callback(gl, state, delay, true);
1356 	__gfs2_glock_queue_work(gl, delay);
1357 	spin_unlock(&gl->gl_lockref.lock);
1358 }
1359 
1360 /**
1361  * gfs2_should_freeze - Figure out if glock should be frozen
1362  * @gl: The glock in question
1363  *
1364  * Glocks are not frozen if (a) the result of the dlm operation is
1365  * an error, (b) the locking operation was an unlock operation or
1366  * (c) if there is a "noexp" flagged request anywhere in the queue
1367  *
1368  * Returns: 1 if freezing should occur, 0 otherwise
1369  */
1370 
1371 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1372 {
1373 	const struct gfs2_holder *gh;
1374 
1375 	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1376 		return 0;
1377 	if (gl->gl_target == LM_ST_UNLOCKED)
1378 		return 0;
1379 
1380 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1381 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1382 			continue;
1383 		if (LM_FLAG_NOEXP & gh->gh_flags)
1384 			return 0;
1385 	}
1386 
1387 	return 1;
1388 }
1389 
1390 /**
1391  * gfs2_glock_complete - Callback used by locking
1392  * @gl: Pointer to the glock
1393  * @ret: The return value from the dlm
1394  *
1395  * The gl_reply field is under the gl_lockref.lock lock so that it is ok
1396  * to use a bitfield shared with other glock state fields.
1397  */
1398 
1399 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1400 {
1401 	struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
1402 
1403 	spin_lock(&gl->gl_lockref.lock);
1404 	gl->gl_reply = ret;
1405 
1406 	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1407 		if (gfs2_should_freeze(gl)) {
1408 			set_bit(GLF_FROZEN, &gl->gl_flags);
1409 			spin_unlock(&gl->gl_lockref.lock);
1410 			return;
1411 		}
1412 	}
1413 
1414 	gl->gl_lockref.count++;
1415 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1416 	__gfs2_glock_queue_work(gl, 0);
1417 	spin_unlock(&gl->gl_lockref.lock);
1418 }
1419 
1420 static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1421 {
1422 	struct gfs2_glock *gla, *glb;
1423 
1424 	gla = list_entry(a, struct gfs2_glock, gl_lru);
1425 	glb = list_entry(b, struct gfs2_glock, gl_lru);
1426 
1427 	if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1428 		return 1;
1429 	if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1430 		return -1;
1431 
1432 	return 0;
1433 }
1434 
1435 /**
1436  * gfs2_dispose_glock_lru - Demote a list of glocks
1437  * @list: The list to dispose of
1438  *
1439  * Disposing of glocks may involve disk accesses, so that here we sort
1440  * the glocks by number (i.e. disk location of the inodes) so that if
1441  * there are any such accesses, they'll be sent in order (mostly).
1442  *
1443  * Must be called under the lru_lock, but may drop and retake this
1444  * lock. While the lru_lock is dropped, entries may vanish from the
1445  * list, but no new entries will appear on the list (since it is
1446  * private)
1447  */
1448 
1449 static void gfs2_dispose_glock_lru(struct list_head *list)
1450 __releases(&lru_lock)
1451 __acquires(&lru_lock)
1452 {
1453 	struct gfs2_glock *gl;
1454 
1455 	list_sort(NULL, list, glock_cmp);
1456 
1457 	while(!list_empty(list)) {
1458 		gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1459 		list_del_init(&gl->gl_lru);
1460 		if (!spin_trylock(&gl->gl_lockref.lock)) {
1461 add_back_to_lru:
1462 			list_add(&gl->gl_lru, &lru_list);
1463 			set_bit(GLF_LRU, &gl->gl_flags);
1464 			atomic_inc(&lru_count);
1465 			continue;
1466 		}
1467 		if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1468 			spin_unlock(&gl->gl_lockref.lock);
1469 			goto add_back_to_lru;
1470 		}
1471 		gl->gl_lockref.count++;
1472 		if (demote_ok(gl))
1473 			handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1474 		WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1475 		__gfs2_glock_queue_work(gl, 0);
1476 		spin_unlock(&gl->gl_lockref.lock);
1477 		cond_resched_lock(&lru_lock);
1478 	}
1479 }
1480 
1481 /**
1482  * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1483  * @nr: The number of entries to scan
1484  *
1485  * This function selects the entries on the LRU which are able to
1486  * be demoted, and then kicks off the process by calling
1487  * gfs2_dispose_glock_lru() above.
1488  */
1489 
1490 static long gfs2_scan_glock_lru(int nr)
1491 {
1492 	struct gfs2_glock *gl;
1493 	LIST_HEAD(skipped);
1494 	LIST_HEAD(dispose);
1495 	long freed = 0;
1496 
1497 	spin_lock(&lru_lock);
1498 	while ((nr-- >= 0) && !list_empty(&lru_list)) {
1499 		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1500 
1501 		/* Test for being demotable */
1502 		if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1503 			list_move(&gl->gl_lru, &dispose);
1504 			atomic_dec(&lru_count);
1505 			clear_bit(GLF_LRU, &gl->gl_flags);
1506 			freed++;
1507 			continue;
1508 		}
1509 
1510 		list_move(&gl->gl_lru, &skipped);
1511 	}
1512 	list_splice(&skipped, &lru_list);
1513 	if (!list_empty(&dispose))
1514 		gfs2_dispose_glock_lru(&dispose);
1515 	spin_unlock(&lru_lock);
1516 
1517 	return freed;
1518 }
1519 
1520 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1521 					    struct shrink_control *sc)
1522 {
1523 	if (!(sc->gfp_mask & __GFP_FS))
1524 		return SHRINK_STOP;
1525 	return gfs2_scan_glock_lru(sc->nr_to_scan);
1526 }
1527 
1528 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1529 					     struct shrink_control *sc)
1530 {
1531 	return vfs_pressure_ratio(atomic_read(&lru_count));
1532 }
1533 
1534 static struct shrinker glock_shrinker = {
1535 	.seeks = DEFAULT_SEEKS,
1536 	.count_objects = gfs2_glock_shrink_count,
1537 	.scan_objects = gfs2_glock_shrink_scan,
1538 };
1539 
1540 /**
1541  * examine_bucket - Call a function for glock in a hash bucket
1542  * @examiner: the function
1543  * @sdp: the filesystem
1544  * @bucket: the bucket
1545  *
1546  * Note that the function can be called multiple times on the same
1547  * object.  So the user must ensure that the function can cope with
1548  * that.
1549  */
1550 
1551 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1552 {
1553 	struct gfs2_glock *gl;
1554 	struct rhashtable_iter iter;
1555 
1556 	rhashtable_walk_enter(&gl_hash_table, &iter);
1557 
1558 	do {
1559 		rhashtable_walk_start(&iter);
1560 
1561 		while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
1562 			if (gl->gl_name.ln_sbd == sdp &&
1563 			    lockref_get_not_dead(&gl->gl_lockref))
1564 				examiner(gl);
1565 
1566 		rhashtable_walk_stop(&iter);
1567 	} while (cond_resched(), gl == ERR_PTR(-EAGAIN));
1568 
1569 	rhashtable_walk_exit(&iter);
1570 }
1571 
1572 /**
1573  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1574  * @gl: The glock to thaw
1575  *
1576  */
1577 
1578 static void thaw_glock(struct gfs2_glock *gl)
1579 {
1580 	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
1581 		gfs2_glock_put(gl);
1582 		return;
1583 	}
1584 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1585 	gfs2_glock_queue_work(gl, 0);
1586 }
1587 
1588 /**
1589  * clear_glock - look at a glock and see if we can free it from glock cache
1590  * @gl: the glock to look at
1591  *
1592  */
1593 
1594 static void clear_glock(struct gfs2_glock *gl)
1595 {
1596 	gfs2_glock_remove_from_lru(gl);
1597 
1598 	spin_lock(&gl->gl_lockref.lock);
1599 	if (gl->gl_state != LM_ST_UNLOCKED)
1600 		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1601 	__gfs2_glock_queue_work(gl, 0);
1602 	spin_unlock(&gl->gl_lockref.lock);
1603 }
1604 
1605 /**
1606  * gfs2_glock_thaw - Thaw any frozen glocks
1607  * @sdp: The super block
1608  *
1609  */
1610 
1611 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1612 {
1613 	glock_hash_walk(thaw_glock, sdp);
1614 }
1615 
1616 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1617 {
1618 	spin_lock(&gl->gl_lockref.lock);
1619 	gfs2_dump_glock(seq, gl);
1620 	spin_unlock(&gl->gl_lockref.lock);
1621 }
1622 
1623 static void dump_glock_func(struct gfs2_glock *gl)
1624 {
1625 	dump_glock(NULL, gl);
1626 }
1627 
1628 /**
1629  * gfs2_gl_hash_clear - Empty out the glock hash table
1630  * @sdp: the filesystem
1631  * @wait: wait until it's all gone
1632  *
1633  * Called when unmounting the filesystem.
1634  */
1635 
1636 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1637 {
1638 	set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1639 	flush_workqueue(glock_workqueue);
1640 	glock_hash_walk(clear_glock, sdp);
1641 	flush_workqueue(glock_workqueue);
1642 	wait_event_timeout(sdp->sd_glock_wait,
1643 			   atomic_read(&sdp->sd_glock_disposal) == 0,
1644 			   HZ * 600);
1645 	glock_hash_walk(dump_glock_func, sdp);
1646 }
1647 
1648 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1649 {
1650 	struct gfs2_glock *gl = ip->i_gl;
1651 	int ret;
1652 
1653 	ret = gfs2_truncatei_resume(ip);
1654 	gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
1655 
1656 	spin_lock(&gl->gl_lockref.lock);
1657 	clear_bit(GLF_LOCK, &gl->gl_flags);
1658 	run_queue(gl, 1);
1659 	spin_unlock(&gl->gl_lockref.lock);
1660 }
1661 
1662 static const char *state2str(unsigned state)
1663 {
1664 	switch(state) {
1665 	case LM_ST_UNLOCKED:
1666 		return "UN";
1667 	case LM_ST_SHARED:
1668 		return "SH";
1669 	case LM_ST_DEFERRED:
1670 		return "DF";
1671 	case LM_ST_EXCLUSIVE:
1672 		return "EX";
1673 	}
1674 	return "??";
1675 }
1676 
1677 static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
1678 {
1679 	char *p = buf;
1680 	if (flags & LM_FLAG_TRY)
1681 		*p++ = 't';
1682 	if (flags & LM_FLAG_TRY_1CB)
1683 		*p++ = 'T';
1684 	if (flags & LM_FLAG_NOEXP)
1685 		*p++ = 'e';
1686 	if (flags & LM_FLAG_ANY)
1687 		*p++ = 'A';
1688 	if (flags & LM_FLAG_PRIORITY)
1689 		*p++ = 'p';
1690 	if (flags & GL_ASYNC)
1691 		*p++ = 'a';
1692 	if (flags & GL_EXACT)
1693 		*p++ = 'E';
1694 	if (flags & GL_NOCACHE)
1695 		*p++ = 'c';
1696 	if (test_bit(HIF_HOLDER, &iflags))
1697 		*p++ = 'H';
1698 	if (test_bit(HIF_WAIT, &iflags))
1699 		*p++ = 'W';
1700 	if (test_bit(HIF_FIRST, &iflags))
1701 		*p++ = 'F';
1702 	*p = 0;
1703 	return buf;
1704 }
1705 
1706 /**
1707  * dump_holder - print information about a glock holder
1708  * @seq: the seq_file struct
1709  * @gh: the glock holder
1710  *
1711  */
1712 
1713 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1714 {
1715 	struct task_struct *gh_owner = NULL;
1716 	char flags_buf[32];
1717 
1718 	rcu_read_lock();
1719 	if (gh->gh_owner_pid)
1720 		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1721 	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1722 		       state2str(gh->gh_state),
1723 		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1724 		       gh->gh_error,
1725 		       gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1726 		       gh_owner ? gh_owner->comm : "(ended)",
1727 		       (void *)gh->gh_ip);
1728 	rcu_read_unlock();
1729 }
1730 
1731 static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1732 {
1733 	const unsigned long *gflags = &gl->gl_flags;
1734 	char *p = buf;
1735 
1736 	if (test_bit(GLF_LOCK, gflags))
1737 		*p++ = 'l';
1738 	if (test_bit(GLF_DEMOTE, gflags))
1739 		*p++ = 'D';
1740 	if (test_bit(GLF_PENDING_DEMOTE, gflags))
1741 		*p++ = 'd';
1742 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1743 		*p++ = 'p';
1744 	if (test_bit(GLF_DIRTY, gflags))
1745 		*p++ = 'y';
1746 	if (test_bit(GLF_LFLUSH, gflags))
1747 		*p++ = 'f';
1748 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1749 		*p++ = 'i';
1750 	if (test_bit(GLF_REPLY_PENDING, gflags))
1751 		*p++ = 'r';
1752 	if (test_bit(GLF_INITIAL, gflags))
1753 		*p++ = 'I';
1754 	if (test_bit(GLF_FROZEN, gflags))
1755 		*p++ = 'F';
1756 	if (test_bit(GLF_QUEUED, gflags))
1757 		*p++ = 'q';
1758 	if (test_bit(GLF_LRU, gflags))
1759 		*p++ = 'L';
1760 	if (gl->gl_object)
1761 		*p++ = 'o';
1762 	if (test_bit(GLF_BLOCKING, gflags))
1763 		*p++ = 'b';
1764 	*p = 0;
1765 	return buf;
1766 }
1767 
1768 /**
1769  * gfs2_dump_glock - print information about a glock
1770  * @seq: The seq_file struct
1771  * @gl: the glock
1772  *
1773  * The file format is as follows:
1774  * One line per object, capital letters are used to indicate objects
1775  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1776  * other objects are indented by a single space and follow the glock to
1777  * which they are related. Fields are indicated by lower case letters
1778  * followed by a colon and the field value, except for strings which are in
1779  * [] so that its possible to see if they are composed of spaces for
1780  * example. The field's are n = number (id of the object), f = flags,
1781  * t = type, s = state, r = refcount, e = error, p = pid.
1782  *
1783  */
1784 
1785 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1786 {
1787 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1788 	unsigned long long dtime;
1789 	const struct gfs2_holder *gh;
1790 	char gflags_buf[32];
1791 
1792 	dtime = jiffies - gl->gl_demote_time;
1793 	dtime *= 1000000/HZ; /* demote time in uSec */
1794 	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1795 		dtime = 0;
1796 	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1797 		  state2str(gl->gl_state),
1798 		  gl->gl_name.ln_type,
1799 		  (unsigned long long)gl->gl_name.ln_number,
1800 		  gflags2str(gflags_buf, gl),
1801 		  state2str(gl->gl_target),
1802 		  state2str(gl->gl_demote_state), dtime,
1803 		  atomic_read(&gl->gl_ail_count),
1804 		  test_bit(GLF_REVOKES, &gl->gl_flags) ? 1 : 0,
1805 		  (int)gl->gl_lockref.count, gl->gl_hold_time);
1806 
1807 	list_for_each_entry(gh, &gl->gl_holders, gh_list)
1808 		dump_holder(seq, gh);
1809 
1810 	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1811 		glops->go_dump(seq, gl);
1812 }
1813 
1814 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1815 {
1816 	struct gfs2_glock *gl = iter_ptr;
1817 
1818 	seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
1819 		   gl->gl_name.ln_type,
1820 		   (unsigned long long)gl->gl_name.ln_number,
1821 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1822 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1823 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1824 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1825 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1826 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1827 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1828 		   (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1829 	return 0;
1830 }
1831 
1832 static const char *gfs2_gltype[] = {
1833 	"type",
1834 	"reserved",
1835 	"nondisk",
1836 	"inode",
1837 	"rgrp",
1838 	"meta",
1839 	"iopen",
1840 	"flock",
1841 	"plock",
1842 	"quota",
1843 	"journal",
1844 };
1845 
1846 static const char *gfs2_stype[] = {
1847 	[GFS2_LKS_SRTT]		= "srtt",
1848 	[GFS2_LKS_SRTTVAR]	= "srttvar",
1849 	[GFS2_LKS_SRTTB]	= "srttb",
1850 	[GFS2_LKS_SRTTVARB]	= "srttvarb",
1851 	[GFS2_LKS_SIRT]		= "sirt",
1852 	[GFS2_LKS_SIRTVAR]	= "sirtvar",
1853 	[GFS2_LKS_DCOUNT]	= "dlm",
1854 	[GFS2_LKS_QCOUNT]	= "queue",
1855 };
1856 
1857 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1858 
1859 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1860 {
1861 	struct gfs2_sbd *sdp = seq->private;
1862 	loff_t pos = *(loff_t *)iter_ptr;
1863 	unsigned index = pos >> 3;
1864 	unsigned subindex = pos & 0x07;
1865 	int i;
1866 
1867 	if (index == 0 && subindex != 0)
1868 		return 0;
1869 
1870 	seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1871 		   (index == 0) ? "cpu": gfs2_stype[subindex]);
1872 
1873 	for_each_possible_cpu(i) {
1874                 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1875 
1876 		if (index == 0)
1877 			seq_printf(seq, " %15u", i);
1878 		else
1879 			seq_printf(seq, " %15llu", (unsigned long long)lkstats->
1880 				   lkstats[index - 1].stats[subindex]);
1881 	}
1882 	seq_putc(seq, '\n');
1883 	return 0;
1884 }
1885 
1886 int __init gfs2_glock_init(void)
1887 {
1888 	int i, ret;
1889 
1890 	ret = rhashtable_init(&gl_hash_table, &ht_parms);
1891 	if (ret < 0)
1892 		return ret;
1893 
1894 	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1895 					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
1896 	if (!glock_workqueue) {
1897 		rhashtable_destroy(&gl_hash_table);
1898 		return -ENOMEM;
1899 	}
1900 	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1901 						WQ_MEM_RECLAIM | WQ_FREEZABLE,
1902 						0);
1903 	if (!gfs2_delete_workqueue) {
1904 		destroy_workqueue(glock_workqueue);
1905 		rhashtable_destroy(&gl_hash_table);
1906 		return -ENOMEM;
1907 	}
1908 
1909 	ret = register_shrinker(&glock_shrinker);
1910 	if (ret) {
1911 		destroy_workqueue(gfs2_delete_workqueue);
1912 		destroy_workqueue(glock_workqueue);
1913 		rhashtable_destroy(&gl_hash_table);
1914 		return ret;
1915 	}
1916 
1917 	for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++)
1918 		init_waitqueue_head(glock_wait_table + i);
1919 
1920 	return 0;
1921 }
1922 
1923 void gfs2_glock_exit(void)
1924 {
1925 	unregister_shrinker(&glock_shrinker);
1926 	rhashtable_destroy(&gl_hash_table);
1927 	destroy_workqueue(glock_workqueue);
1928 	destroy_workqueue(gfs2_delete_workqueue);
1929 }
1930 
1931 static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
1932 {
1933 	struct gfs2_glock *gl = gi->gl;
1934 
1935 	if (gl) {
1936 		if (n == 0)
1937 			return;
1938 		if (!lockref_put_not_zero(&gl->gl_lockref))
1939 			gfs2_glock_queue_put(gl);
1940 	}
1941 	for (;;) {
1942 		gl = rhashtable_walk_next(&gi->hti);
1943 		if (IS_ERR_OR_NULL(gl)) {
1944 			if (gl == ERR_PTR(-EAGAIN)) {
1945 				n = 1;
1946 				continue;
1947 			}
1948 			gl = NULL;
1949 			break;
1950 		}
1951 		if (gl->gl_name.ln_sbd != gi->sdp)
1952 			continue;
1953 		if (n <= 1) {
1954 			if (!lockref_get_not_dead(&gl->gl_lockref))
1955 				continue;
1956 			break;
1957 		} else {
1958 			if (__lockref_is_dead(&gl->gl_lockref))
1959 				continue;
1960 			n--;
1961 		}
1962 	}
1963 	gi->gl = gl;
1964 }
1965 
1966 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1967 	__acquires(RCU)
1968 {
1969 	struct gfs2_glock_iter *gi = seq->private;
1970 	loff_t n;
1971 
1972 	/*
1973 	 * We can either stay where we are, skip to the next hash table
1974 	 * entry, or start from the beginning.
1975 	 */
1976 	if (*pos < gi->last_pos) {
1977 		rhashtable_walk_exit(&gi->hti);
1978 		rhashtable_walk_enter(&gl_hash_table, &gi->hti);
1979 		n = *pos + 1;
1980 	} else {
1981 		n = *pos - gi->last_pos;
1982 	}
1983 
1984 	rhashtable_walk_start(&gi->hti);
1985 
1986 	gfs2_glock_iter_next(gi, n);
1987 	gi->last_pos = *pos;
1988 	return gi->gl;
1989 }
1990 
1991 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1992 				 loff_t *pos)
1993 {
1994 	struct gfs2_glock_iter *gi = seq->private;
1995 
1996 	(*pos)++;
1997 	gi->last_pos = *pos;
1998 	gfs2_glock_iter_next(gi, 1);
1999 	return gi->gl;
2000 }
2001 
2002 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
2003 	__releases(RCU)
2004 {
2005 	struct gfs2_glock_iter *gi = seq->private;
2006 
2007 	rhashtable_walk_stop(&gi->hti);
2008 }
2009 
2010 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
2011 {
2012 	dump_glock(seq, iter_ptr);
2013 	return 0;
2014 }
2015 
2016 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
2017 {
2018 	preempt_disable();
2019 	if (*pos >= GFS2_NR_SBSTATS)
2020 		return NULL;
2021 	return pos;
2022 }
2023 
2024 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
2025 				   loff_t *pos)
2026 {
2027 	(*pos)++;
2028 	if (*pos >= GFS2_NR_SBSTATS)
2029 		return NULL;
2030 	return pos;
2031 }
2032 
2033 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
2034 {
2035 	preempt_enable();
2036 }
2037 
2038 static const struct seq_operations gfs2_glock_seq_ops = {
2039 	.start = gfs2_glock_seq_start,
2040 	.next  = gfs2_glock_seq_next,
2041 	.stop  = gfs2_glock_seq_stop,
2042 	.show  = gfs2_glock_seq_show,
2043 };
2044 
2045 static const struct seq_operations gfs2_glstats_seq_ops = {
2046 	.start = gfs2_glock_seq_start,
2047 	.next  = gfs2_glock_seq_next,
2048 	.stop  = gfs2_glock_seq_stop,
2049 	.show  = gfs2_glstats_seq_show,
2050 };
2051 
2052 static const struct seq_operations gfs2_sbstats_seq_ops = {
2053 	.start = gfs2_sbstats_seq_start,
2054 	.next  = gfs2_sbstats_seq_next,
2055 	.stop  = gfs2_sbstats_seq_stop,
2056 	.show  = gfs2_sbstats_seq_show,
2057 };
2058 
2059 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2060 
2061 static int __gfs2_glocks_open(struct inode *inode, struct file *file,
2062 			      const struct seq_operations *ops)
2063 {
2064 	int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter));
2065 	if (ret == 0) {
2066 		struct seq_file *seq = file->private_data;
2067 		struct gfs2_glock_iter *gi = seq->private;
2068 
2069 		gi->sdp = inode->i_private;
2070 		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2071 		if (seq->buf)
2072 			seq->size = GFS2_SEQ_GOODSIZE;
2073 		/*
2074 		 * Initially, we are "before" the first hash table entry; the
2075 		 * first call to rhashtable_walk_next gets us the first entry.
2076 		 */
2077 		gi->last_pos = -1;
2078 		gi->gl = NULL;
2079 		rhashtable_walk_enter(&gl_hash_table, &gi->hti);
2080 	}
2081 	return ret;
2082 }
2083 
2084 static int gfs2_glocks_open(struct inode *inode, struct file *file)
2085 {
2086 	return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops);
2087 }
2088 
2089 static int gfs2_glocks_release(struct inode *inode, struct file *file)
2090 {
2091 	struct seq_file *seq = file->private_data;
2092 	struct gfs2_glock_iter *gi = seq->private;
2093 
2094 	if (gi->gl)
2095 		gfs2_glock_put(gi->gl);
2096 	rhashtable_walk_exit(&gi->hti);
2097 	return seq_release_private(inode, file);
2098 }
2099 
2100 static int gfs2_glstats_open(struct inode *inode, struct file *file)
2101 {
2102 	return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops);
2103 }
2104 
2105 static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2106 {
2107 	int ret = seq_open(file, &gfs2_sbstats_seq_ops);
2108 	if (ret == 0) {
2109 		struct seq_file *seq = file->private_data;
2110 		seq->private = inode->i_private;  /* sdp */
2111 	}
2112 	return ret;
2113 }
2114 
2115 static const struct file_operations gfs2_glocks_fops = {
2116 	.owner   = THIS_MODULE,
2117 	.open    = gfs2_glocks_open,
2118 	.read    = seq_read,
2119 	.llseek  = seq_lseek,
2120 	.release = gfs2_glocks_release,
2121 };
2122 
2123 static const struct file_operations gfs2_glstats_fops = {
2124 	.owner   = THIS_MODULE,
2125 	.open    = gfs2_glstats_open,
2126 	.read    = seq_read,
2127 	.llseek  = seq_lseek,
2128 	.release = gfs2_glocks_release,
2129 };
2130 
2131 static const struct file_operations gfs2_sbstats_fops = {
2132 	.owner   = THIS_MODULE,
2133 	.open	 = gfs2_sbstats_open,
2134 	.read    = seq_read,
2135 	.llseek  = seq_lseek,
2136 	.release = seq_release,
2137 };
2138 
2139 void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2140 {
2141 	sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2142 
2143 	debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2144 			    &gfs2_glocks_fops);
2145 
2146 	debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2147 			    &gfs2_glstats_fops);
2148 
2149 	debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp,
2150 			    &gfs2_sbstats_fops);
2151 }
2152 
2153 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2154 {
2155 	debugfs_remove_recursive(sdp->debugfs_dir);
2156 	sdp->debugfs_dir = NULL;
2157 }
2158 
2159 void gfs2_register_debugfs(void)
2160 {
2161 	gfs2_root = debugfs_create_dir("gfs2", NULL);
2162 }
2163 
2164 void gfs2_unregister_debugfs(void)
2165 {
2166 	debugfs_remove(gfs2_root);
2167 	gfs2_root = NULL;
2168 }
2169