xref: /openbmc/linux/fs/gfs2/glock.c (revision 4e5e4705)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/buffer_head.h>
14 #include <linux/delay.h>
15 #include <linux/sort.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <asm/uaccess.h>
23 #include <linux/seq_file.h>
24 #include <linux/debugfs.h>
25 #include <linux/kthread.h>
26 #include <linux/freezer.h>
27 #include <linux/workqueue.h>
28 #include <linux/jiffies.h>
29 #include <linux/rcupdate.h>
30 #include <linux/rculist_bl.h>
31 #include <linux/bit_spinlock.h>
32 #include <linux/percpu.h>
33 #include <linux/list_sort.h>
34 #include <linux/lockref.h>
35 
36 #include "gfs2.h"
37 #include "incore.h"
38 #include "glock.h"
39 #include "glops.h"
40 #include "inode.h"
41 #include "lops.h"
42 #include "meta_io.h"
43 #include "quota.h"
44 #include "super.h"
45 #include "util.h"
46 #include "bmap.h"
47 #define CREATE_TRACE_POINTS
48 #include "trace_gfs2.h"
49 
50 struct gfs2_glock_iter {
51 	int hash;			/* hash bucket index           */
52 	unsigned nhash;			/* Index within current bucket */
53 	struct gfs2_sbd *sdp;		/* incore superblock           */
54 	struct gfs2_glock *gl;		/* current glock struct        */
55 	loff_t last_pos;		/* last position               */
56 };
57 
58 typedef void (*glock_examiner) (struct gfs2_glock * gl);
59 
60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
61 
62 static struct dentry *gfs2_root;
63 static struct workqueue_struct *glock_workqueue;
64 struct workqueue_struct *gfs2_delete_workqueue;
65 static LIST_HEAD(lru_list);
66 static atomic_t lru_count = ATOMIC_INIT(0);
67 static DEFINE_SPINLOCK(lru_lock);
68 
69 #define GFS2_GL_HASH_SHIFT      15
70 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
71 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
72 
73 static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
74 static struct dentry *gfs2_root;
75 
76 /**
77  * gl_hash() - Turn glock number into hash bucket number
78  * @lock: The glock number
79  *
80  * Returns: The number of the corresponding hash bucket
81  */
82 
83 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
84 			    const struct lm_lockname *name)
85 {
86 	unsigned int h;
87 
88 	h = jhash(&name->ln_number, sizeof(u64), 0);
89 	h = jhash(&name->ln_type, sizeof(unsigned int), h);
90 	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
91 	h &= GFS2_GL_HASH_MASK;
92 
93 	return h;
94 }
95 
96 static inline void spin_lock_bucket(unsigned int hash)
97 {
98 	hlist_bl_lock(&gl_hash_table[hash]);
99 }
100 
101 static inline void spin_unlock_bucket(unsigned int hash)
102 {
103 	hlist_bl_unlock(&gl_hash_table[hash]);
104 }
105 
106 static void gfs2_glock_dealloc(struct rcu_head *rcu)
107 {
108 	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
109 
110 	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
111 		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
112 	} else {
113 		kfree(gl->gl_lksb.sb_lvbptr);
114 		kmem_cache_free(gfs2_glock_cachep, gl);
115 	}
116 }
117 
118 void gfs2_glock_free(struct gfs2_glock *gl)
119 {
120 	struct gfs2_sbd *sdp = gl->gl_sbd;
121 
122 	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
123 	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
124 		wake_up(&sdp->sd_glock_wait);
125 }
126 
127 /**
128  * gfs2_glock_hold() - increment reference count on glock
129  * @gl: The glock to hold
130  *
131  */
132 
133 static void gfs2_glock_hold(struct gfs2_glock *gl)
134 {
135 	GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
136 	lockref_get(&gl->gl_lockref);
137 }
138 
139 /**
140  * demote_ok - Check to see if it's ok to unlock a glock
141  * @gl: the glock
142  *
143  * Returns: 1 if it's ok
144  */
145 
146 static int demote_ok(const struct gfs2_glock *gl)
147 {
148 	const struct gfs2_glock_operations *glops = gl->gl_ops;
149 
150 	if (gl->gl_state == LM_ST_UNLOCKED)
151 		return 0;
152 	if (!list_empty(&gl->gl_holders))
153 		return 0;
154 	if (glops->go_demote_ok)
155 		return glops->go_demote_ok(gl);
156 	return 1;
157 }
158 
159 
160 void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
161 {
162 	spin_lock(&lru_lock);
163 
164 	if (!list_empty(&gl->gl_lru))
165 		list_del_init(&gl->gl_lru);
166 	else
167 		atomic_inc(&lru_count);
168 
169 	list_add_tail(&gl->gl_lru, &lru_list);
170 	set_bit(GLF_LRU, &gl->gl_flags);
171 	spin_unlock(&lru_lock);
172 }
173 
174 static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
175 {
176 	if (!list_empty(&gl->gl_lru)) {
177 		list_del_init(&gl->gl_lru);
178 		atomic_dec(&lru_count);
179 		clear_bit(GLF_LRU, &gl->gl_flags);
180 	}
181 }
182 
183 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
184 {
185 	spin_lock(&lru_lock);
186 	__gfs2_glock_remove_from_lru(gl);
187 	spin_unlock(&lru_lock);
188 }
189 
190 /**
191  * gfs2_glock_put() - Decrement reference count on glock
192  * @gl: The glock to put
193  *
194  */
195 
196 void gfs2_glock_put(struct gfs2_glock *gl)
197 {
198 	struct gfs2_sbd *sdp = gl->gl_sbd;
199 	struct address_space *mapping = gfs2_glock2aspace(gl);
200 
201 	if (lockref_put_or_lock(&gl->gl_lockref))
202 		return;
203 
204 	lockref_mark_dead(&gl->gl_lockref);
205 
206 	spin_lock(&lru_lock);
207 	__gfs2_glock_remove_from_lru(gl);
208 	spin_unlock(&lru_lock);
209 	spin_unlock(&gl->gl_lockref.lock);
210 	spin_lock_bucket(gl->gl_hash);
211 	hlist_bl_del_rcu(&gl->gl_list);
212 	spin_unlock_bucket(gl->gl_hash);
213 	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
214 	GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
215 	trace_gfs2_glock_put(gl);
216 	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
217 }
218 
219 /**
220  * search_bucket() - Find struct gfs2_glock by lock number
221  * @bucket: the bucket to search
222  * @name: The lock name
223  *
224  * Returns: NULL, or the struct gfs2_glock with the requested number
225  */
226 
227 static struct gfs2_glock *search_bucket(unsigned int hash,
228 					const struct gfs2_sbd *sdp,
229 					const struct lm_lockname *name)
230 {
231 	struct gfs2_glock *gl;
232 	struct hlist_bl_node *h;
233 
234 	hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
235 		if (!lm_name_equal(&gl->gl_name, name))
236 			continue;
237 		if (gl->gl_sbd != sdp)
238 			continue;
239 		if (lockref_get_not_dead(&gl->gl_lockref))
240 			return gl;
241 	}
242 
243 	return NULL;
244 }
245 
246 /**
247  * may_grant - check if its ok to grant a new lock
248  * @gl: The glock
249  * @gh: The lock request which we wish to grant
250  *
251  * Returns: true if its ok to grant the lock
252  */
253 
254 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
255 {
256 	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
257 	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
258 	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
259 		return 0;
260 	if (gl->gl_state == gh->gh_state)
261 		return 1;
262 	if (gh->gh_flags & GL_EXACT)
263 		return 0;
264 	if (gl->gl_state == LM_ST_EXCLUSIVE) {
265 		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
266 			return 1;
267 		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
268 			return 1;
269 	}
270 	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
271 		return 1;
272 	return 0;
273 }
274 
275 static void gfs2_holder_wake(struct gfs2_holder *gh)
276 {
277 	clear_bit(HIF_WAIT, &gh->gh_iflags);
278 	smp_mb__after_clear_bit();
279 	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
280 }
281 
282 /**
283  * do_error - Something unexpected has happened during a lock request
284  *
285  */
286 
287 static inline void do_error(struct gfs2_glock *gl, const int ret)
288 {
289 	struct gfs2_holder *gh, *tmp;
290 
291 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
292 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
293 			continue;
294 		if (ret & LM_OUT_ERROR)
295 			gh->gh_error = -EIO;
296 		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
297 			gh->gh_error = GLR_TRYFAILED;
298 		else
299 			continue;
300 		list_del_init(&gh->gh_list);
301 		trace_gfs2_glock_queue(gh, 0);
302 		gfs2_holder_wake(gh);
303 	}
304 }
305 
306 /**
307  * do_promote - promote as many requests as possible on the current queue
308  * @gl: The glock
309  *
310  * Returns: 1 if there is a blocked holder at the head of the list, or 2
311  *          if a type specific operation is underway.
312  */
313 
314 static int do_promote(struct gfs2_glock *gl)
315 __releases(&gl->gl_spin)
316 __acquires(&gl->gl_spin)
317 {
318 	const struct gfs2_glock_operations *glops = gl->gl_ops;
319 	struct gfs2_holder *gh, *tmp;
320 	int ret;
321 
322 restart:
323 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
324 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
325 			continue;
326 		if (may_grant(gl, gh)) {
327 			if (gh->gh_list.prev == &gl->gl_holders &&
328 			    glops->go_lock) {
329 				spin_unlock(&gl->gl_spin);
330 				/* FIXME: eliminate this eventually */
331 				ret = glops->go_lock(gh);
332 				spin_lock(&gl->gl_spin);
333 				if (ret) {
334 					if (ret == 1)
335 						return 2;
336 					gh->gh_error = ret;
337 					list_del_init(&gh->gh_list);
338 					trace_gfs2_glock_queue(gh, 0);
339 					gfs2_holder_wake(gh);
340 					goto restart;
341 				}
342 				set_bit(HIF_HOLDER, &gh->gh_iflags);
343 				trace_gfs2_promote(gh, 1);
344 				gfs2_holder_wake(gh);
345 				goto restart;
346 			}
347 			set_bit(HIF_HOLDER, &gh->gh_iflags);
348 			trace_gfs2_promote(gh, 0);
349 			gfs2_holder_wake(gh);
350 			continue;
351 		}
352 		if (gh->gh_list.prev == &gl->gl_holders)
353 			return 1;
354 		do_error(gl, 0);
355 		break;
356 	}
357 	return 0;
358 }
359 
360 /**
361  * find_first_waiter - find the first gh that's waiting for the glock
362  * @gl: the glock
363  */
364 
365 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
366 {
367 	struct gfs2_holder *gh;
368 
369 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
370 		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
371 			return gh;
372 	}
373 	return NULL;
374 }
375 
376 /**
377  * state_change - record that the glock is now in a different state
378  * @gl: the glock
379  * @new_state the new state
380  *
381  */
382 
383 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
384 {
385 	int held1, held2;
386 
387 	held1 = (gl->gl_state != LM_ST_UNLOCKED);
388 	held2 = (new_state != LM_ST_UNLOCKED);
389 
390 	if (held1 != held2) {
391 		GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
392 		if (held2)
393 			gl->gl_lockref.count++;
394 		else
395 			gl->gl_lockref.count--;
396 	}
397 	if (held1 && held2 && list_empty(&gl->gl_holders))
398 		clear_bit(GLF_QUEUED, &gl->gl_flags);
399 
400 	if (new_state != gl->gl_target)
401 		/* shorten our minimum hold time */
402 		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
403 				       GL_GLOCK_MIN_HOLD);
404 	gl->gl_state = new_state;
405 	gl->gl_tchange = jiffies;
406 }
407 
408 static void gfs2_demote_wake(struct gfs2_glock *gl)
409 {
410 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
411 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
412 	smp_mb__after_clear_bit();
413 	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
414 }
415 
416 /**
417  * finish_xmote - The DLM has replied to one of our lock requests
418  * @gl: The glock
419  * @ret: The status from the DLM
420  *
421  */
422 
423 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
424 {
425 	const struct gfs2_glock_operations *glops = gl->gl_ops;
426 	struct gfs2_holder *gh;
427 	unsigned state = ret & LM_OUT_ST_MASK;
428 	int rv;
429 
430 	spin_lock(&gl->gl_spin);
431 	trace_gfs2_glock_state_change(gl, state);
432 	state_change(gl, state);
433 	gh = find_first_waiter(gl);
434 
435 	/* Demote to UN request arrived during demote to SH or DF */
436 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
437 	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
438 		gl->gl_target = LM_ST_UNLOCKED;
439 
440 	/* Check for state != intended state */
441 	if (unlikely(state != gl->gl_target)) {
442 		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
443 			/* move to back of queue and try next entry */
444 			if (ret & LM_OUT_CANCELED) {
445 				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
446 					list_move_tail(&gh->gh_list, &gl->gl_holders);
447 				gh = find_first_waiter(gl);
448 				gl->gl_target = gh->gh_state;
449 				goto retry;
450 			}
451 			/* Some error or failed "try lock" - report it */
452 			if ((ret & LM_OUT_ERROR) ||
453 			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
454 				gl->gl_target = gl->gl_state;
455 				do_error(gl, ret);
456 				goto out;
457 			}
458 		}
459 		switch(state) {
460 		/* Unlocked due to conversion deadlock, try again */
461 		case LM_ST_UNLOCKED:
462 retry:
463 			do_xmote(gl, gh, gl->gl_target);
464 			break;
465 		/* Conversion fails, unlock and try again */
466 		case LM_ST_SHARED:
467 		case LM_ST_DEFERRED:
468 			do_xmote(gl, gh, LM_ST_UNLOCKED);
469 			break;
470 		default: /* Everything else */
471 			printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
472 			GLOCK_BUG_ON(gl, 1);
473 		}
474 		spin_unlock(&gl->gl_spin);
475 		return;
476 	}
477 
478 	/* Fast path - we got what we asked for */
479 	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
480 		gfs2_demote_wake(gl);
481 	if (state != LM_ST_UNLOCKED) {
482 		if (glops->go_xmote_bh) {
483 			spin_unlock(&gl->gl_spin);
484 			rv = glops->go_xmote_bh(gl, gh);
485 			spin_lock(&gl->gl_spin);
486 			if (rv) {
487 				do_error(gl, rv);
488 				goto out;
489 			}
490 		}
491 		rv = do_promote(gl);
492 		if (rv == 2)
493 			goto out_locked;
494 	}
495 out:
496 	clear_bit(GLF_LOCK, &gl->gl_flags);
497 out_locked:
498 	spin_unlock(&gl->gl_spin);
499 }
500 
501 /**
502  * do_xmote - Calls the DLM to change the state of a lock
503  * @gl: The lock state
504  * @gh: The holder (only for promotes)
505  * @target: The target lock state
506  *
507  */
508 
509 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
510 __releases(&gl->gl_spin)
511 __acquires(&gl->gl_spin)
512 {
513 	const struct gfs2_glock_operations *glops = gl->gl_ops;
514 	struct gfs2_sbd *sdp = gl->gl_sbd;
515 	unsigned int lck_flags = gh ? gh->gh_flags : 0;
516 	int ret;
517 
518 	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
519 		      LM_FLAG_PRIORITY);
520 	GLOCK_BUG_ON(gl, gl->gl_state == target);
521 	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
522 	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
523 	    glops->go_inval) {
524 		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
525 		do_error(gl, 0); /* Fail queued try locks */
526 	}
527 	gl->gl_req = target;
528 	set_bit(GLF_BLOCKING, &gl->gl_flags);
529 	if ((gl->gl_req == LM_ST_UNLOCKED) ||
530 	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
531 	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
532 		clear_bit(GLF_BLOCKING, &gl->gl_flags);
533 	spin_unlock(&gl->gl_spin);
534 	if (glops->go_sync)
535 		glops->go_sync(gl);
536 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
537 		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
538 	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
539 
540 	gfs2_glock_hold(gl);
541 	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
542 		/* lock_dlm */
543 		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
544 		if (ret) {
545 			printk(KERN_ERR "GFS2: lm_lock ret %d\n", ret);
546 			GLOCK_BUG_ON(gl, 1);
547 		}
548 	} else { /* lock_nolock */
549 		finish_xmote(gl, target);
550 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
551 			gfs2_glock_put(gl);
552 	}
553 
554 	spin_lock(&gl->gl_spin);
555 }
556 
557 /**
558  * find_first_holder - find the first "holder" gh
559  * @gl: the glock
560  */
561 
562 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
563 {
564 	struct gfs2_holder *gh;
565 
566 	if (!list_empty(&gl->gl_holders)) {
567 		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
568 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
569 			return gh;
570 	}
571 	return NULL;
572 }
573 
574 /**
575  * run_queue - do all outstanding tasks related to a glock
576  * @gl: The glock in question
577  * @nonblock: True if we must not block in run_queue
578  *
579  */
580 
581 static void run_queue(struct gfs2_glock *gl, const int nonblock)
582 __releases(&gl->gl_spin)
583 __acquires(&gl->gl_spin)
584 {
585 	struct gfs2_holder *gh = NULL;
586 	int ret;
587 
588 	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
589 		return;
590 
591 	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
592 
593 	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
594 	    gl->gl_demote_state != gl->gl_state) {
595 		if (find_first_holder(gl))
596 			goto out_unlock;
597 		if (nonblock)
598 			goto out_sched;
599 		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
600 		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
601 		gl->gl_target = gl->gl_demote_state;
602 	} else {
603 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
604 			gfs2_demote_wake(gl);
605 		ret = do_promote(gl);
606 		if (ret == 0)
607 			goto out_unlock;
608 		if (ret == 2)
609 			goto out;
610 		gh = find_first_waiter(gl);
611 		gl->gl_target = gh->gh_state;
612 		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
613 			do_error(gl, 0); /* Fail queued try locks */
614 	}
615 	do_xmote(gl, gh, gl->gl_target);
616 out:
617 	return;
618 
619 out_sched:
620 	clear_bit(GLF_LOCK, &gl->gl_flags);
621 	smp_mb__after_clear_bit();
622 	gl->gl_lockref.count++;
623 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
624 		gl->gl_lockref.count--;
625 	return;
626 
627 out_unlock:
628 	clear_bit(GLF_LOCK, &gl->gl_flags);
629 	smp_mb__after_clear_bit();
630 	return;
631 }
632 
633 static void delete_work_func(struct work_struct *work)
634 {
635 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
636 	struct gfs2_sbd *sdp = gl->gl_sbd;
637 	struct gfs2_inode *ip;
638 	struct inode *inode;
639 	u64 no_addr = gl->gl_name.ln_number;
640 
641 	ip = gl->gl_object;
642 	/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
643 
644 	if (ip)
645 		inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
646 	else
647 		inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
648 	if (inode && !IS_ERR(inode)) {
649 		d_prune_aliases(inode);
650 		iput(inode);
651 	}
652 	gfs2_glock_put(gl);
653 }
654 
655 static void glock_work_func(struct work_struct *work)
656 {
657 	unsigned long delay = 0;
658 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
659 	int drop_ref = 0;
660 
661 	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
662 		finish_xmote(gl, gl->gl_reply);
663 		drop_ref = 1;
664 	}
665 	spin_lock(&gl->gl_spin);
666 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
667 	    gl->gl_state != LM_ST_UNLOCKED &&
668 	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
669 		unsigned long holdtime, now = jiffies;
670 
671 		holdtime = gl->gl_tchange + gl->gl_hold_time;
672 		if (time_before(now, holdtime))
673 			delay = holdtime - now;
674 
675 		if (!delay) {
676 			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
677 			set_bit(GLF_DEMOTE, &gl->gl_flags);
678 		}
679 	}
680 	run_queue(gl, 0);
681 	spin_unlock(&gl->gl_spin);
682 	if (!delay)
683 		gfs2_glock_put(gl);
684 	else {
685 		if (gl->gl_name.ln_type != LM_TYPE_INODE)
686 			delay = 0;
687 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
688 			gfs2_glock_put(gl);
689 	}
690 	if (drop_ref)
691 		gfs2_glock_put(gl);
692 }
693 
694 /**
695  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
696  * @sdp: The GFS2 superblock
697  * @number: the lock number
698  * @glops: The glock_operations to use
699  * @create: If 0, don't create the glock if it doesn't exist
700  * @glp: the glock is returned here
701  *
702  * This does not lock a glock, just finds/creates structures for one.
703  *
704  * Returns: errno
705  */
706 
707 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
708 		   const struct gfs2_glock_operations *glops, int create,
709 		   struct gfs2_glock **glp)
710 {
711 	struct super_block *s = sdp->sd_vfs;
712 	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
713 	struct gfs2_glock *gl, *tmp;
714 	unsigned int hash = gl_hash(sdp, &name);
715 	struct address_space *mapping;
716 	struct kmem_cache *cachep;
717 
718 	rcu_read_lock();
719 	gl = search_bucket(hash, sdp, &name);
720 	rcu_read_unlock();
721 
722 	*glp = gl;
723 	if (gl)
724 		return 0;
725 	if (!create)
726 		return -ENOENT;
727 
728 	if (glops->go_flags & GLOF_ASPACE)
729 		cachep = gfs2_glock_aspace_cachep;
730 	else
731 		cachep = gfs2_glock_cachep;
732 	gl = kmem_cache_alloc(cachep, GFP_KERNEL);
733 	if (!gl)
734 		return -ENOMEM;
735 
736 	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
737 
738 	if (glops->go_flags & GLOF_LVB) {
739 		gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
740 		if (!gl->gl_lksb.sb_lvbptr) {
741 			kmem_cache_free(cachep, gl);
742 			return -ENOMEM;
743 		}
744 	}
745 
746 	atomic_inc(&sdp->sd_glock_disposal);
747 	gl->gl_sbd = sdp;
748 	gl->gl_flags = 0;
749 	gl->gl_name = name;
750 	gl->gl_lockref.count = 1;
751 	gl->gl_state = LM_ST_UNLOCKED;
752 	gl->gl_target = LM_ST_UNLOCKED;
753 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
754 	gl->gl_hash = hash;
755 	gl->gl_ops = glops;
756 	gl->gl_dstamp = ktime_set(0, 0);
757 	preempt_disable();
758 	/* We use the global stats to estimate the initial per-glock stats */
759 	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
760 	preempt_enable();
761 	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
762 	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
763 	gl->gl_tchange = jiffies;
764 	gl->gl_object = NULL;
765 	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
766 	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
767 	INIT_WORK(&gl->gl_delete, delete_work_func);
768 
769 	mapping = gfs2_glock2aspace(gl);
770 	if (mapping) {
771                 mapping->a_ops = &gfs2_meta_aops;
772 		mapping->host = s->s_bdev->bd_inode;
773 		mapping->flags = 0;
774 		mapping_set_gfp_mask(mapping, GFP_NOFS);
775 		mapping->private_data = NULL;
776 		mapping->backing_dev_info = s->s_bdi;
777 		mapping->writeback_index = 0;
778 	}
779 
780 	spin_lock_bucket(hash);
781 	tmp = search_bucket(hash, sdp, &name);
782 	if (tmp) {
783 		spin_unlock_bucket(hash);
784 		kfree(gl->gl_lksb.sb_lvbptr);
785 		kmem_cache_free(cachep, gl);
786 		atomic_dec(&sdp->sd_glock_disposal);
787 		gl = tmp;
788 	} else {
789 		hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
790 		spin_unlock_bucket(hash);
791 	}
792 
793 	*glp = gl;
794 
795 	return 0;
796 }
797 
798 /**
799  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
800  * @gl: the glock
801  * @state: the state we're requesting
802  * @flags: the modifier flags
803  * @gh: the holder structure
804  *
805  */
806 
807 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
808 		      struct gfs2_holder *gh)
809 {
810 	INIT_LIST_HEAD(&gh->gh_list);
811 	gh->gh_gl = gl;
812 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
813 	gh->gh_owner_pid = get_pid(task_pid(current));
814 	gh->gh_state = state;
815 	gh->gh_flags = flags;
816 	gh->gh_error = 0;
817 	gh->gh_iflags = 0;
818 	gfs2_glock_hold(gl);
819 }
820 
821 /**
822  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
823  * @state: the state we're requesting
824  * @flags: the modifier flags
825  * @gh: the holder structure
826  *
827  * Don't mess with the glock.
828  *
829  */
830 
831 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
832 {
833 	gh->gh_state = state;
834 	gh->gh_flags = flags;
835 	gh->gh_iflags = 0;
836 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
837 	if (gh->gh_owner_pid)
838 		put_pid(gh->gh_owner_pid);
839 	gh->gh_owner_pid = get_pid(task_pid(current));
840 }
841 
842 /**
843  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
844  * @gh: the holder structure
845  *
846  */
847 
848 void gfs2_holder_uninit(struct gfs2_holder *gh)
849 {
850 	put_pid(gh->gh_owner_pid);
851 	gfs2_glock_put(gh->gh_gl);
852 	gh->gh_gl = NULL;
853 	gh->gh_ip = 0;
854 }
855 
856 /**
857  * gfs2_glock_holder_wait
858  * @word: unused
859  *
860  * This function and gfs2_glock_demote_wait both show up in the WCHAN
861  * field. Thus I've separated these otherwise identical functions in
862  * order to be more informative to the user.
863  */
864 
865 static int gfs2_glock_holder_wait(void *word)
866 {
867         schedule();
868         return 0;
869 }
870 
871 static int gfs2_glock_demote_wait(void *word)
872 {
873 	schedule();
874 	return 0;
875 }
876 
877 /**
878  * gfs2_glock_wait - wait on a glock acquisition
879  * @gh: the glock holder
880  *
881  * Returns: 0 on success
882  */
883 
884 int gfs2_glock_wait(struct gfs2_holder *gh)
885 {
886 	unsigned long time1 = jiffies;
887 
888 	might_sleep();
889 	wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
890 	if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
891 		/* Lengthen the minimum hold time. */
892 		gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
893 					      GL_GLOCK_HOLD_INCR,
894 					      GL_GLOCK_MAX_HOLD);
895 	return gh->gh_error;
896 }
897 
898 /**
899  * handle_callback - process a demote request
900  * @gl: the glock
901  * @state: the state the caller wants us to change to
902  *
903  * There are only two requests that we are going to see in actual
904  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
905  */
906 
907 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
908 			    unsigned long delay, bool remote)
909 {
910 	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
911 
912 	set_bit(bit, &gl->gl_flags);
913 	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
914 		gl->gl_demote_state = state;
915 		gl->gl_demote_time = jiffies;
916 	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
917 			gl->gl_demote_state != state) {
918 		gl->gl_demote_state = LM_ST_UNLOCKED;
919 	}
920 	if (gl->gl_ops->go_callback)
921 		gl->gl_ops->go_callback(gl, remote);
922 	trace_gfs2_demote_rq(gl, remote);
923 }
924 
925 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
926 {
927 	struct va_format vaf;
928 	va_list args;
929 
930 	va_start(args, fmt);
931 
932 	if (seq) {
933 		seq_vprintf(seq, fmt, args);
934 	} else {
935 		vaf.fmt = fmt;
936 		vaf.va = &args;
937 
938 		printk(KERN_ERR " %pV", &vaf);
939 	}
940 
941 	va_end(args);
942 }
943 
944 /**
945  * add_to_queue - Add a holder to the wait queue (but look for recursion)
946  * @gh: the holder structure to add
947  *
948  * Eventually we should move the recursive locking trap to a
949  * debugging option or something like that. This is the fast
950  * path and needs to have the minimum number of distractions.
951  *
952  */
953 
954 static inline void add_to_queue(struct gfs2_holder *gh)
955 __releases(&gl->gl_spin)
956 __acquires(&gl->gl_spin)
957 {
958 	struct gfs2_glock *gl = gh->gh_gl;
959 	struct gfs2_sbd *sdp = gl->gl_sbd;
960 	struct list_head *insert_pt = NULL;
961 	struct gfs2_holder *gh2;
962 	int try_futile = 0;
963 
964 	BUG_ON(gh->gh_owner_pid == NULL);
965 	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
966 		BUG();
967 
968 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
969 		if (test_bit(GLF_LOCK, &gl->gl_flags))
970 			try_futile = !may_grant(gl, gh);
971 		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
972 			goto fail;
973 	}
974 
975 	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
976 		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
977 		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
978 			goto trap_recursive;
979 		if (try_futile &&
980 		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
981 fail:
982 			gh->gh_error = GLR_TRYFAILED;
983 			gfs2_holder_wake(gh);
984 			return;
985 		}
986 		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
987 			continue;
988 		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
989 			insert_pt = &gh2->gh_list;
990 	}
991 	set_bit(GLF_QUEUED, &gl->gl_flags);
992 	trace_gfs2_glock_queue(gh, 1);
993 	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
994 	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
995 	if (likely(insert_pt == NULL)) {
996 		list_add_tail(&gh->gh_list, &gl->gl_holders);
997 		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
998 			goto do_cancel;
999 		return;
1000 	}
1001 	list_add_tail(&gh->gh_list, insert_pt);
1002 do_cancel:
1003 	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
1004 	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1005 		spin_unlock(&gl->gl_spin);
1006 		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1007 			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1008 		spin_lock(&gl->gl_spin);
1009 	}
1010 	return;
1011 
1012 trap_recursive:
1013 	printk(KERN_ERR "original: %pSR\n", (void *)gh2->gh_ip);
1014 	printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1015 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
1016 	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1017 	printk(KERN_ERR "new: %pSR\n", (void *)gh->gh_ip);
1018 	printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1019 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
1020 	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
1021 	gfs2_dump_glock(NULL, gl);
1022 	BUG();
1023 }
1024 
1025 /**
1026  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1027  * @gh: the holder structure
1028  *
1029  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1030  *
1031  * Returns: 0, GLR_TRYFAILED, or errno on failure
1032  */
1033 
1034 int gfs2_glock_nq(struct gfs2_holder *gh)
1035 {
1036 	struct gfs2_glock *gl = gh->gh_gl;
1037 	struct gfs2_sbd *sdp = gl->gl_sbd;
1038 	int error = 0;
1039 
1040 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1041 		return -EIO;
1042 
1043 	if (test_bit(GLF_LRU, &gl->gl_flags))
1044 		gfs2_glock_remove_from_lru(gl);
1045 
1046 	spin_lock(&gl->gl_spin);
1047 	add_to_queue(gh);
1048 	if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1049 	    test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1050 		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1051 	run_queue(gl, 1);
1052 	spin_unlock(&gl->gl_spin);
1053 
1054 	if (!(gh->gh_flags & GL_ASYNC))
1055 		error = gfs2_glock_wait(gh);
1056 
1057 	return error;
1058 }
1059 
1060 /**
1061  * gfs2_glock_poll - poll to see if an async request has been completed
1062  * @gh: the holder
1063  *
1064  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1065  */
1066 
1067 int gfs2_glock_poll(struct gfs2_holder *gh)
1068 {
1069 	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1070 }
1071 
1072 /**
1073  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1074  * @gh: the glock holder
1075  *
1076  */
1077 
1078 void gfs2_glock_dq(struct gfs2_holder *gh)
1079 {
1080 	struct gfs2_glock *gl = gh->gh_gl;
1081 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1082 	unsigned delay = 0;
1083 	int fast_path = 0;
1084 
1085 	spin_lock(&gl->gl_spin);
1086 	if (gh->gh_flags & GL_NOCACHE)
1087 		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1088 
1089 	list_del_init(&gh->gh_list);
1090 	if (find_first_holder(gl) == NULL) {
1091 		if (glops->go_unlock) {
1092 			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1093 			spin_unlock(&gl->gl_spin);
1094 			glops->go_unlock(gh);
1095 			spin_lock(&gl->gl_spin);
1096 			clear_bit(GLF_LOCK, &gl->gl_flags);
1097 		}
1098 		if (list_empty(&gl->gl_holders) &&
1099 		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1100 		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1101 			fast_path = 1;
1102 	}
1103 	if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
1104 		gfs2_glock_add_to_lru(gl);
1105 
1106 	trace_gfs2_glock_queue(gh, 0);
1107 	spin_unlock(&gl->gl_spin);
1108 	if (likely(fast_path))
1109 		return;
1110 
1111 	gfs2_glock_hold(gl);
1112 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1113 	    !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
1114 	    gl->gl_name.ln_type == LM_TYPE_INODE)
1115 		delay = gl->gl_hold_time;
1116 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1117 		gfs2_glock_put(gl);
1118 }
1119 
1120 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1121 {
1122 	struct gfs2_glock *gl = gh->gh_gl;
1123 	gfs2_glock_dq(gh);
1124 	might_sleep();
1125 	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
1126 }
1127 
1128 /**
1129  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1130  * @gh: the holder structure
1131  *
1132  */
1133 
1134 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1135 {
1136 	gfs2_glock_dq(gh);
1137 	gfs2_holder_uninit(gh);
1138 }
1139 
1140 /**
1141  * gfs2_glock_nq_num - acquire a glock based on lock number
1142  * @sdp: the filesystem
1143  * @number: the lock number
1144  * @glops: the glock operations for the type of glock
1145  * @state: the state to acquire the glock in
1146  * @flags: modifier flags for the acquisition
1147  * @gh: the struct gfs2_holder
1148  *
1149  * Returns: errno
1150  */
1151 
1152 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1153 		      const struct gfs2_glock_operations *glops,
1154 		      unsigned int state, int flags, struct gfs2_holder *gh)
1155 {
1156 	struct gfs2_glock *gl;
1157 	int error;
1158 
1159 	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1160 	if (!error) {
1161 		error = gfs2_glock_nq_init(gl, state, flags, gh);
1162 		gfs2_glock_put(gl);
1163 	}
1164 
1165 	return error;
1166 }
1167 
1168 /**
1169  * glock_compare - Compare two struct gfs2_glock structures for sorting
1170  * @arg_a: the first structure
1171  * @arg_b: the second structure
1172  *
1173  */
1174 
1175 static int glock_compare(const void *arg_a, const void *arg_b)
1176 {
1177 	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1178 	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1179 	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1180 	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1181 
1182 	if (a->ln_number > b->ln_number)
1183 		return 1;
1184 	if (a->ln_number < b->ln_number)
1185 		return -1;
1186 	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1187 	return 0;
1188 }
1189 
1190 /**
1191  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1192  * @num_gh: the number of structures
1193  * @ghs: an array of struct gfs2_holder structures
1194  *
1195  * Returns: 0 on success (all glocks acquired),
1196  *          errno on failure (no glocks acquired)
1197  */
1198 
1199 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1200 		     struct gfs2_holder **p)
1201 {
1202 	unsigned int x;
1203 	int error = 0;
1204 
1205 	for (x = 0; x < num_gh; x++)
1206 		p[x] = &ghs[x];
1207 
1208 	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1209 
1210 	for (x = 0; x < num_gh; x++) {
1211 		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1212 
1213 		error = gfs2_glock_nq(p[x]);
1214 		if (error) {
1215 			while (x--)
1216 				gfs2_glock_dq(p[x]);
1217 			break;
1218 		}
1219 	}
1220 
1221 	return error;
1222 }
1223 
1224 /**
1225  * gfs2_glock_nq_m - acquire multiple glocks
1226  * @num_gh: the number of structures
1227  * @ghs: an array of struct gfs2_holder structures
1228  *
1229  *
1230  * Returns: 0 on success (all glocks acquired),
1231  *          errno on failure (no glocks acquired)
1232  */
1233 
1234 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1235 {
1236 	struct gfs2_holder *tmp[4];
1237 	struct gfs2_holder **pph = tmp;
1238 	int error = 0;
1239 
1240 	switch(num_gh) {
1241 	case 0:
1242 		return 0;
1243 	case 1:
1244 		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1245 		return gfs2_glock_nq(ghs);
1246 	default:
1247 		if (num_gh <= 4)
1248 			break;
1249 		pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1250 		if (!pph)
1251 			return -ENOMEM;
1252 	}
1253 
1254 	error = nq_m_sync(num_gh, ghs, pph);
1255 
1256 	if (pph != tmp)
1257 		kfree(pph);
1258 
1259 	return error;
1260 }
1261 
1262 /**
1263  * gfs2_glock_dq_m - release multiple glocks
1264  * @num_gh: the number of structures
1265  * @ghs: an array of struct gfs2_holder structures
1266  *
1267  */
1268 
1269 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1270 {
1271 	while (num_gh--)
1272 		gfs2_glock_dq(&ghs[num_gh]);
1273 }
1274 
1275 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1276 {
1277 	unsigned long delay = 0;
1278 	unsigned long holdtime;
1279 	unsigned long now = jiffies;
1280 
1281 	gfs2_glock_hold(gl);
1282 	holdtime = gl->gl_tchange + gl->gl_hold_time;
1283 	if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
1284 	    gl->gl_name.ln_type == LM_TYPE_INODE) {
1285 		if (time_before(now, holdtime))
1286 			delay = holdtime - now;
1287 		if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1288 			delay = gl->gl_hold_time;
1289 	}
1290 
1291 	spin_lock(&gl->gl_spin);
1292 	handle_callback(gl, state, delay, true);
1293 	spin_unlock(&gl->gl_spin);
1294 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1295 		gfs2_glock_put(gl);
1296 }
1297 
1298 /**
1299  * gfs2_should_freeze - Figure out if glock should be frozen
1300  * @gl: The glock in question
1301  *
1302  * Glocks are not frozen if (a) the result of the dlm operation is
1303  * an error, (b) the locking operation was an unlock operation or
1304  * (c) if there is a "noexp" flagged request anywhere in the queue
1305  *
1306  * Returns: 1 if freezing should occur, 0 otherwise
1307  */
1308 
1309 static int gfs2_should_freeze(const struct gfs2_glock *gl)
1310 {
1311 	const struct gfs2_holder *gh;
1312 
1313 	if (gl->gl_reply & ~LM_OUT_ST_MASK)
1314 		return 0;
1315 	if (gl->gl_target == LM_ST_UNLOCKED)
1316 		return 0;
1317 
1318 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1319 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1320 			continue;
1321 		if (LM_FLAG_NOEXP & gh->gh_flags)
1322 			return 0;
1323 	}
1324 
1325 	return 1;
1326 }
1327 
1328 /**
1329  * gfs2_glock_complete - Callback used by locking
1330  * @gl: Pointer to the glock
1331  * @ret: The return value from the dlm
1332  *
1333  * The gl_reply field is under the gl_spin lock so that it is ok
1334  * to use a bitfield shared with other glock state fields.
1335  */
1336 
1337 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1338 {
1339 	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1340 
1341 	spin_lock(&gl->gl_spin);
1342 	gl->gl_reply = ret;
1343 
1344 	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
1345 		if (gfs2_should_freeze(gl)) {
1346 			set_bit(GLF_FROZEN, &gl->gl_flags);
1347 			spin_unlock(&gl->gl_spin);
1348 			return;
1349 		}
1350 	}
1351 
1352 	gl->gl_lockref.count++;
1353 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1354 	spin_unlock(&gl->gl_spin);
1355 
1356 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1357 		gfs2_glock_put(gl);
1358 }
1359 
1360 static int glock_cmp(void *priv, struct list_head *a, struct list_head *b)
1361 {
1362 	struct gfs2_glock *gla, *glb;
1363 
1364 	gla = list_entry(a, struct gfs2_glock, gl_lru);
1365 	glb = list_entry(b, struct gfs2_glock, gl_lru);
1366 
1367 	if (gla->gl_name.ln_number > glb->gl_name.ln_number)
1368 		return 1;
1369 	if (gla->gl_name.ln_number < glb->gl_name.ln_number)
1370 		return -1;
1371 
1372 	return 0;
1373 }
1374 
1375 /**
1376  * gfs2_dispose_glock_lru - Demote a list of glocks
1377  * @list: The list to dispose of
1378  *
1379  * Disposing of glocks may involve disk accesses, so that here we sort
1380  * the glocks by number (i.e. disk location of the inodes) so that if
1381  * there are any such accesses, they'll be sent in order (mostly).
1382  *
1383  * Must be called under the lru_lock, but may drop and retake this
1384  * lock. While the lru_lock is dropped, entries may vanish from the
1385  * list, but no new entries will appear on the list (since it is
1386  * private)
1387  */
1388 
1389 static void gfs2_dispose_glock_lru(struct list_head *list)
1390 __releases(&lru_lock)
1391 __acquires(&lru_lock)
1392 {
1393 	struct gfs2_glock *gl;
1394 
1395 	list_sort(NULL, list, glock_cmp);
1396 
1397 	while(!list_empty(list)) {
1398 		gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1399 		list_del_init(&gl->gl_lru);
1400 		if (!spin_trylock(&gl->gl_spin)) {
1401 			list_add(&gl->gl_lru, &lru_list);
1402 			atomic_inc(&lru_count);
1403 			continue;
1404 		}
1405 		clear_bit(GLF_LRU, &gl->gl_flags);
1406 		spin_unlock(&lru_lock);
1407 		gl->gl_lockref.count++;
1408 		if (demote_ok(gl))
1409 			handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1410 		WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
1411 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1412 			gl->gl_lockref.count--;
1413 		spin_unlock(&gl->gl_spin);
1414 		spin_lock(&lru_lock);
1415 	}
1416 }
1417 
1418 /**
1419  * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
1420  * @nr: The number of entries to scan
1421  *
1422  * This function selects the entries on the LRU which are able to
1423  * be demoted, and then kicks off the process by calling
1424  * gfs2_dispose_glock_lru() above.
1425  */
1426 
1427 static long gfs2_scan_glock_lru(int nr)
1428 {
1429 	struct gfs2_glock *gl;
1430 	LIST_HEAD(skipped);
1431 	LIST_HEAD(dispose);
1432 	long freed = 0;
1433 
1434 	spin_lock(&lru_lock);
1435 	while ((nr-- >= 0) && !list_empty(&lru_list)) {
1436 		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1437 
1438 		/* Test for being demotable */
1439 		if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1440 			list_move(&gl->gl_lru, &dispose);
1441 			atomic_dec(&lru_count);
1442 			freed++;
1443 			continue;
1444 		}
1445 
1446 		list_move(&gl->gl_lru, &skipped);
1447 	}
1448 	list_splice(&skipped, &lru_list);
1449 	if (!list_empty(&dispose))
1450 		gfs2_dispose_glock_lru(&dispose);
1451 	spin_unlock(&lru_lock);
1452 
1453 	return freed;
1454 }
1455 
1456 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
1457 					    struct shrink_control *sc)
1458 {
1459 	if (!(sc->gfp_mask & __GFP_FS))
1460 		return SHRINK_STOP;
1461 	return gfs2_scan_glock_lru(sc->nr_to_scan);
1462 }
1463 
1464 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
1465 					     struct shrink_control *sc)
1466 {
1467 	return vfs_pressure_ratio(atomic_read(&lru_count));
1468 }
1469 
1470 static struct shrinker glock_shrinker = {
1471 	.seeks = DEFAULT_SEEKS,
1472 	.count_objects = gfs2_glock_shrink_count,
1473 	.scan_objects = gfs2_glock_shrink_scan,
1474 };
1475 
1476 /**
1477  * examine_bucket - Call a function for glock in a hash bucket
1478  * @examiner: the function
1479  * @sdp: the filesystem
1480  * @bucket: the bucket
1481  *
1482  */
1483 
1484 static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
1485 			  unsigned int hash)
1486 {
1487 	struct gfs2_glock *gl;
1488 	struct hlist_bl_head *head = &gl_hash_table[hash];
1489 	struct hlist_bl_node *pos;
1490 
1491 	rcu_read_lock();
1492 	hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1493 		if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref))
1494 			examiner(gl);
1495 	}
1496 	rcu_read_unlock();
1497 	cond_resched();
1498 }
1499 
1500 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1501 {
1502 	unsigned x;
1503 
1504 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1505 		examine_bucket(examiner, sdp, x);
1506 }
1507 
1508 
1509 /**
1510  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1511  * @gl: The glock to thaw
1512  *
1513  */
1514 
1515 static void thaw_glock(struct gfs2_glock *gl)
1516 {
1517 	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1518 		goto out;
1519 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1520 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
1521 out:
1522 		gfs2_glock_put(gl);
1523 	}
1524 }
1525 
1526 /**
1527  * clear_glock - look at a glock and see if we can free it from glock cache
1528  * @gl: the glock to look at
1529  *
1530  */
1531 
1532 static void clear_glock(struct gfs2_glock *gl)
1533 {
1534 	gfs2_glock_remove_from_lru(gl);
1535 
1536 	spin_lock(&gl->gl_spin);
1537 	if (gl->gl_state != LM_ST_UNLOCKED)
1538 		handle_callback(gl, LM_ST_UNLOCKED, 0, false);
1539 	spin_unlock(&gl->gl_spin);
1540 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1541 		gfs2_glock_put(gl);
1542 }
1543 
1544 /**
1545  * gfs2_glock_thaw - Thaw any frozen glocks
1546  * @sdp: The super block
1547  *
1548  */
1549 
1550 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1551 {
1552 	glock_hash_walk(thaw_glock, sdp);
1553 }
1554 
1555 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1556 {
1557 	int ret;
1558 	spin_lock(&gl->gl_spin);
1559 	ret = gfs2_dump_glock(seq, gl);
1560 	spin_unlock(&gl->gl_spin);
1561 	return ret;
1562 }
1563 
1564 static void dump_glock_func(struct gfs2_glock *gl)
1565 {
1566 	dump_glock(NULL, gl);
1567 }
1568 
1569 /**
1570  * gfs2_gl_hash_clear - Empty out the glock hash table
1571  * @sdp: the filesystem
1572  * @wait: wait until it's all gone
1573  *
1574  * Called when unmounting the filesystem.
1575  */
1576 
1577 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1578 {
1579 	set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
1580 	flush_workqueue(glock_workqueue);
1581 	glock_hash_walk(clear_glock, sdp);
1582 	flush_workqueue(glock_workqueue);
1583 	wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
1584 	glock_hash_walk(dump_glock_func, sdp);
1585 }
1586 
1587 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1588 {
1589 	struct gfs2_glock *gl = ip->i_gl;
1590 	int ret;
1591 
1592 	ret = gfs2_truncatei_resume(ip);
1593 	gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1594 
1595 	spin_lock(&gl->gl_spin);
1596 	clear_bit(GLF_LOCK, &gl->gl_flags);
1597 	run_queue(gl, 1);
1598 	spin_unlock(&gl->gl_spin);
1599 }
1600 
1601 static const char *state2str(unsigned state)
1602 {
1603 	switch(state) {
1604 	case LM_ST_UNLOCKED:
1605 		return "UN";
1606 	case LM_ST_SHARED:
1607 		return "SH";
1608 	case LM_ST_DEFERRED:
1609 		return "DF";
1610 	case LM_ST_EXCLUSIVE:
1611 		return "EX";
1612 	}
1613 	return "??";
1614 }
1615 
1616 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1617 {
1618 	char *p = buf;
1619 	if (flags & LM_FLAG_TRY)
1620 		*p++ = 't';
1621 	if (flags & LM_FLAG_TRY_1CB)
1622 		*p++ = 'T';
1623 	if (flags & LM_FLAG_NOEXP)
1624 		*p++ = 'e';
1625 	if (flags & LM_FLAG_ANY)
1626 		*p++ = 'A';
1627 	if (flags & LM_FLAG_PRIORITY)
1628 		*p++ = 'p';
1629 	if (flags & GL_ASYNC)
1630 		*p++ = 'a';
1631 	if (flags & GL_EXACT)
1632 		*p++ = 'E';
1633 	if (flags & GL_NOCACHE)
1634 		*p++ = 'c';
1635 	if (test_bit(HIF_HOLDER, &iflags))
1636 		*p++ = 'H';
1637 	if (test_bit(HIF_WAIT, &iflags))
1638 		*p++ = 'W';
1639 	if (test_bit(HIF_FIRST, &iflags))
1640 		*p++ = 'F';
1641 	*p = 0;
1642 	return buf;
1643 }
1644 
1645 /**
1646  * dump_holder - print information about a glock holder
1647  * @seq: the seq_file struct
1648  * @gh: the glock holder
1649  *
1650  * Returns: 0 on success, -ENOBUFS when we run out of space
1651  */
1652 
1653 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1654 {
1655 	struct task_struct *gh_owner = NULL;
1656 	char flags_buf[32];
1657 
1658 	if (gh->gh_owner_pid)
1659 		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1660 	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1661 		       state2str(gh->gh_state),
1662 		       hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1663 		       gh->gh_error,
1664 		       gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1665 		       gh_owner ? gh_owner->comm : "(ended)",
1666 		       (void *)gh->gh_ip);
1667 	return 0;
1668 }
1669 
1670 static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
1671 {
1672 	const unsigned long *gflags = &gl->gl_flags;
1673 	char *p = buf;
1674 
1675 	if (test_bit(GLF_LOCK, gflags))
1676 		*p++ = 'l';
1677 	if (test_bit(GLF_DEMOTE, gflags))
1678 		*p++ = 'D';
1679 	if (test_bit(GLF_PENDING_DEMOTE, gflags))
1680 		*p++ = 'd';
1681 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1682 		*p++ = 'p';
1683 	if (test_bit(GLF_DIRTY, gflags))
1684 		*p++ = 'y';
1685 	if (test_bit(GLF_LFLUSH, gflags))
1686 		*p++ = 'f';
1687 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1688 		*p++ = 'i';
1689 	if (test_bit(GLF_REPLY_PENDING, gflags))
1690 		*p++ = 'r';
1691 	if (test_bit(GLF_INITIAL, gflags))
1692 		*p++ = 'I';
1693 	if (test_bit(GLF_FROZEN, gflags))
1694 		*p++ = 'F';
1695 	if (test_bit(GLF_QUEUED, gflags))
1696 		*p++ = 'q';
1697 	if (test_bit(GLF_LRU, gflags))
1698 		*p++ = 'L';
1699 	if (gl->gl_object)
1700 		*p++ = 'o';
1701 	if (test_bit(GLF_BLOCKING, gflags))
1702 		*p++ = 'b';
1703 	*p = 0;
1704 	return buf;
1705 }
1706 
1707 /**
1708  * gfs2_dump_glock - print information about a glock
1709  * @seq: The seq_file struct
1710  * @gl: the glock
1711  *
1712  * The file format is as follows:
1713  * One line per object, capital letters are used to indicate objects
1714  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1715  * other objects are indented by a single space and follow the glock to
1716  * which they are related. Fields are indicated by lower case letters
1717  * followed by a colon and the field value, except for strings which are in
1718  * [] so that its possible to see if they are composed of spaces for
1719  * example. The field's are n = number (id of the object), f = flags,
1720  * t = type, s = state, r = refcount, e = error, p = pid.
1721  *
1722  * Returns: 0 on success, -ENOBUFS when we run out of space
1723  */
1724 
1725 int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1726 {
1727 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1728 	unsigned long long dtime;
1729 	const struct gfs2_holder *gh;
1730 	char gflags_buf[32];
1731 	int error = 0;
1732 
1733 	dtime = jiffies - gl->gl_demote_time;
1734 	dtime *= 1000000/HZ; /* demote time in uSec */
1735 	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1736 		dtime = 0;
1737 	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
1738 		  state2str(gl->gl_state),
1739 		  gl->gl_name.ln_type,
1740 		  (unsigned long long)gl->gl_name.ln_number,
1741 		  gflags2str(gflags_buf, gl),
1742 		  state2str(gl->gl_target),
1743 		  state2str(gl->gl_demote_state), dtime,
1744 		  atomic_read(&gl->gl_ail_count),
1745 		  atomic_read(&gl->gl_revokes),
1746 		  (int)gl->gl_lockref.count, gl->gl_hold_time);
1747 
1748 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1749 		error = dump_holder(seq, gh);
1750 		if (error)
1751 			goto out;
1752 	}
1753 	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1754 		error = glops->go_dump(seq, gl);
1755 out:
1756 	return error;
1757 }
1758 
1759 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
1760 {
1761 	struct gfs2_glock *gl = iter_ptr;
1762 
1763 	seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n",
1764 		   gl->gl_name.ln_type,
1765 		   (unsigned long long)gl->gl_name.ln_number,
1766 		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
1767 		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
1768 		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
1769 		   (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB],
1770 		   (long long)gl->gl_stats.stats[GFS2_LKS_SIRT],
1771 		   (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR],
1772 		   (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT],
1773 		   (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]);
1774 	return 0;
1775 }
1776 
1777 static const char *gfs2_gltype[] = {
1778 	"type",
1779 	"reserved",
1780 	"nondisk",
1781 	"inode",
1782 	"rgrp",
1783 	"meta",
1784 	"iopen",
1785 	"flock",
1786 	"plock",
1787 	"quota",
1788 	"journal",
1789 };
1790 
1791 static const char *gfs2_stype[] = {
1792 	[GFS2_LKS_SRTT]		= "srtt",
1793 	[GFS2_LKS_SRTTVAR]	= "srttvar",
1794 	[GFS2_LKS_SRTTB]	= "srttb",
1795 	[GFS2_LKS_SRTTVARB]	= "srttvarb",
1796 	[GFS2_LKS_SIRT]		= "sirt",
1797 	[GFS2_LKS_SIRTVAR]	= "sirtvar",
1798 	[GFS2_LKS_DCOUNT]	= "dlm",
1799 	[GFS2_LKS_QCOUNT]	= "queue",
1800 };
1801 
1802 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype))
1803 
1804 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
1805 {
1806 	struct gfs2_glock_iter *gi = seq->private;
1807 	struct gfs2_sbd *sdp = gi->sdp;
1808 	unsigned index = gi->hash >> 3;
1809 	unsigned subindex = gi->hash & 0x07;
1810 	s64 value;
1811 	int i;
1812 
1813 	if (index == 0 && subindex != 0)
1814 		return 0;
1815 
1816 	seq_printf(seq, "%-10s %8s:", gfs2_gltype[index],
1817 		   (index == 0) ? "cpu": gfs2_stype[subindex]);
1818 
1819 	for_each_possible_cpu(i) {
1820                 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i);
1821 		if (index == 0) {
1822 			value = i;
1823 		} else {
1824 			value = lkstats->lkstats[index - 1].stats[subindex];
1825 		}
1826 		seq_printf(seq, " %15lld", (long long)value);
1827 	}
1828 	seq_putc(seq, '\n');
1829 	return 0;
1830 }
1831 
1832 int __init gfs2_glock_init(void)
1833 {
1834 	unsigned i;
1835 	for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1836 		INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
1837 	}
1838 
1839 	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1840 					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
1841 	if (!glock_workqueue)
1842 		return -ENOMEM;
1843 	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1844 						WQ_MEM_RECLAIM | WQ_FREEZABLE,
1845 						0);
1846 	if (!gfs2_delete_workqueue) {
1847 		destroy_workqueue(glock_workqueue);
1848 		return -ENOMEM;
1849 	}
1850 
1851 	register_shrinker(&glock_shrinker);
1852 
1853 	return 0;
1854 }
1855 
1856 void gfs2_glock_exit(void)
1857 {
1858 	unregister_shrinker(&glock_shrinker);
1859 	destroy_workqueue(glock_workqueue);
1860 	destroy_workqueue(gfs2_delete_workqueue);
1861 }
1862 
1863 static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1864 {
1865 	return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1866 			      struct gfs2_glock, gl_list);
1867 }
1868 
1869 static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1870 {
1871 	return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
1872 			      struct gfs2_glock, gl_list);
1873 }
1874 
1875 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1876 {
1877 	struct gfs2_glock *gl;
1878 
1879 	do {
1880 		gl = gi->gl;
1881 		if (gl) {
1882 			gi->gl = glock_hash_next(gl);
1883 			gi->nhash++;
1884 		} else {
1885 			if (gi->hash >= GFS2_GL_HASH_SIZE) {
1886 				rcu_read_unlock();
1887 				return 1;
1888 			}
1889 			gi->gl = glock_hash_chain(gi->hash);
1890 			gi->nhash = 0;
1891 		}
1892 		while (gi->gl == NULL) {
1893 			gi->hash++;
1894 			if (gi->hash >= GFS2_GL_HASH_SIZE) {
1895 				rcu_read_unlock();
1896 				return 1;
1897 			}
1898 			gi->gl = glock_hash_chain(gi->hash);
1899 			gi->nhash = 0;
1900 		}
1901 	/* Skip entries for other sb and dead entries */
1902 	} while (gi->sdp != gi->gl->gl_sbd ||
1903 		 __lockref_is_dead(&gi->gl->gl_lockref));
1904 
1905 	return 0;
1906 }
1907 
1908 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1909 {
1910 	struct gfs2_glock_iter *gi = seq->private;
1911 	loff_t n = *pos;
1912 
1913 	if (gi->last_pos <= *pos)
1914 		n = gi->nhash + (*pos - gi->last_pos);
1915 	else
1916 		gi->hash = 0;
1917 
1918 	gi->nhash = 0;
1919 	rcu_read_lock();
1920 
1921 	do {
1922 		if (gfs2_glock_iter_next(gi))
1923 			return NULL;
1924 	} while (n--);
1925 
1926 	gi->last_pos = *pos;
1927 	return gi->gl;
1928 }
1929 
1930 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1931 				 loff_t *pos)
1932 {
1933 	struct gfs2_glock_iter *gi = seq->private;
1934 
1935 	(*pos)++;
1936 	gi->last_pos = *pos;
1937 	if (gfs2_glock_iter_next(gi))
1938 		return NULL;
1939 
1940 	return gi->gl;
1941 }
1942 
1943 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1944 {
1945 	struct gfs2_glock_iter *gi = seq->private;
1946 
1947 	if (gi->gl)
1948 		rcu_read_unlock();
1949 	gi->gl = NULL;
1950 }
1951 
1952 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1953 {
1954 	return dump_glock(seq, iter_ptr);
1955 }
1956 
1957 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
1958 {
1959 	struct gfs2_glock_iter *gi = seq->private;
1960 
1961 	gi->hash = *pos;
1962 	if (*pos >= GFS2_NR_SBSTATS)
1963 		return NULL;
1964 	preempt_disable();
1965 	return SEQ_START_TOKEN;
1966 }
1967 
1968 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
1969 				   loff_t *pos)
1970 {
1971 	struct gfs2_glock_iter *gi = seq->private;
1972 	(*pos)++;
1973 	gi->hash++;
1974 	if (gi->hash >= GFS2_NR_SBSTATS) {
1975 		preempt_enable();
1976 		return NULL;
1977 	}
1978 	return SEQ_START_TOKEN;
1979 }
1980 
1981 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
1982 {
1983 	preempt_enable();
1984 }
1985 
1986 static const struct seq_operations gfs2_glock_seq_ops = {
1987 	.start = gfs2_glock_seq_start,
1988 	.next  = gfs2_glock_seq_next,
1989 	.stop  = gfs2_glock_seq_stop,
1990 	.show  = gfs2_glock_seq_show,
1991 };
1992 
1993 static const struct seq_operations gfs2_glstats_seq_ops = {
1994 	.start = gfs2_glock_seq_start,
1995 	.next  = gfs2_glock_seq_next,
1996 	.stop  = gfs2_glock_seq_stop,
1997 	.show  = gfs2_glstats_seq_show,
1998 };
1999 
2000 static const struct seq_operations gfs2_sbstats_seq_ops = {
2001 	.start = gfs2_sbstats_seq_start,
2002 	.next  = gfs2_sbstats_seq_next,
2003 	.stop  = gfs2_sbstats_seq_stop,
2004 	.show  = gfs2_sbstats_seq_show,
2005 };
2006 
2007 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
2008 
2009 static int gfs2_glocks_open(struct inode *inode, struct file *file)
2010 {
2011 	int ret = seq_open_private(file, &gfs2_glock_seq_ops,
2012 				   sizeof(struct gfs2_glock_iter));
2013 	if (ret == 0) {
2014 		struct seq_file *seq = file->private_data;
2015 		struct gfs2_glock_iter *gi = seq->private;
2016 		gi->sdp = inode->i_private;
2017 		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2018 		if (seq->buf)
2019 			seq->size = GFS2_SEQ_GOODSIZE;
2020 	}
2021 	return ret;
2022 }
2023 
2024 static int gfs2_glstats_open(struct inode *inode, struct file *file)
2025 {
2026 	int ret = seq_open_private(file, &gfs2_glstats_seq_ops,
2027 				   sizeof(struct gfs2_glock_iter));
2028 	if (ret == 0) {
2029 		struct seq_file *seq = file->private_data;
2030 		struct gfs2_glock_iter *gi = seq->private;
2031 		gi->sdp = inode->i_private;
2032 		seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
2033 		if (seq->buf)
2034 			seq->size = GFS2_SEQ_GOODSIZE;
2035 	}
2036 	return ret;
2037 }
2038 
2039 static int gfs2_sbstats_open(struct inode *inode, struct file *file)
2040 {
2041 	int ret = seq_open_private(file, &gfs2_sbstats_seq_ops,
2042 				   sizeof(struct gfs2_glock_iter));
2043 	if (ret == 0) {
2044 		struct seq_file *seq = file->private_data;
2045 		struct gfs2_glock_iter *gi = seq->private;
2046 		gi->sdp = inode->i_private;
2047 	}
2048 	return ret;
2049 }
2050 
2051 static const struct file_operations gfs2_glocks_fops = {
2052 	.owner   = THIS_MODULE,
2053 	.open    = gfs2_glocks_open,
2054 	.read    = seq_read,
2055 	.llseek  = seq_lseek,
2056 	.release = seq_release_private,
2057 };
2058 
2059 static const struct file_operations gfs2_glstats_fops = {
2060 	.owner   = THIS_MODULE,
2061 	.open    = gfs2_glstats_open,
2062 	.read    = seq_read,
2063 	.llseek  = seq_lseek,
2064 	.release = seq_release_private,
2065 };
2066 
2067 static const struct file_operations gfs2_sbstats_fops = {
2068 	.owner   = THIS_MODULE,
2069 	.open	 = gfs2_sbstats_open,
2070 	.read    = seq_read,
2071 	.llseek  = seq_lseek,
2072 	.release = seq_release_private,
2073 };
2074 
2075 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2076 {
2077 	sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2078 	if (!sdp->debugfs_dir)
2079 		return -ENOMEM;
2080 	sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2081 							 S_IFREG | S_IRUGO,
2082 							 sdp->debugfs_dir, sdp,
2083 							 &gfs2_glocks_fops);
2084 	if (!sdp->debugfs_dentry_glocks)
2085 		goto fail;
2086 
2087 	sdp->debugfs_dentry_glstats = debugfs_create_file("glstats",
2088 							S_IFREG | S_IRUGO,
2089 							sdp->debugfs_dir, sdp,
2090 							&gfs2_glstats_fops);
2091 	if (!sdp->debugfs_dentry_glstats)
2092 		goto fail;
2093 
2094 	sdp->debugfs_dentry_sbstats = debugfs_create_file("sbstats",
2095 							S_IFREG | S_IRUGO,
2096 							sdp->debugfs_dir, sdp,
2097 							&gfs2_sbstats_fops);
2098 	if (!sdp->debugfs_dentry_sbstats)
2099 		goto fail;
2100 
2101 	return 0;
2102 fail:
2103 	gfs2_delete_debugfs_file(sdp);
2104 	return -ENOMEM;
2105 }
2106 
2107 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2108 {
2109 	if (sdp->debugfs_dir) {
2110 		if (sdp->debugfs_dentry_glocks) {
2111 			debugfs_remove(sdp->debugfs_dentry_glocks);
2112 			sdp->debugfs_dentry_glocks = NULL;
2113 		}
2114 		if (sdp->debugfs_dentry_glstats) {
2115 			debugfs_remove(sdp->debugfs_dentry_glstats);
2116 			sdp->debugfs_dentry_glstats = NULL;
2117 		}
2118 		if (sdp->debugfs_dentry_sbstats) {
2119 			debugfs_remove(sdp->debugfs_dentry_sbstats);
2120 			sdp->debugfs_dentry_sbstats = NULL;
2121 		}
2122 		debugfs_remove(sdp->debugfs_dir);
2123 		sdp->debugfs_dir = NULL;
2124 	}
2125 }
2126 
2127 int gfs2_register_debugfs(void)
2128 {
2129 	gfs2_root = debugfs_create_dir("gfs2", NULL);
2130 	return gfs2_root ? 0 : -ENOMEM;
2131 }
2132 
2133 void gfs2_unregister_debugfs(void)
2134 {
2135 	debugfs_remove(gfs2_root);
2136 	gfs2_root = NULL;
2137 }
2138