xref: /openbmc/linux/fs/gfs2/glock.c (revision 78c99ba1)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/buffer_head.h>
14 #include <linux/delay.h>
15 #include <linux/sort.h>
16 #include <linux/jhash.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/list.h>
20 #include <linux/wait.h>
21 #include <linux/module.h>
22 #include <linux/rwsem.h>
23 #include <asm/uaccess.h>
24 #include <linux/seq_file.h>
25 #include <linux/debugfs.h>
26 #include <linux/kthread.h>
27 #include <linux/freezer.h>
28 #include <linux/workqueue.h>
29 #include <linux/jiffies.h>
30 
31 #include "gfs2.h"
32 #include "incore.h"
33 #include "glock.h"
34 #include "glops.h"
35 #include "inode.h"
36 #include "lops.h"
37 #include "meta_io.h"
38 #include "quota.h"
39 #include "super.h"
40 #include "util.h"
41 #include "bmap.h"
42 
43 struct gfs2_gl_hash_bucket {
44         struct hlist_head hb_list;
45 };
46 
47 struct gfs2_glock_iter {
48 	int hash;			/* hash bucket index         */
49 	struct gfs2_sbd *sdp;		/* incore superblock         */
50 	struct gfs2_glock *gl;		/* current glock struct      */
51 	char string[512];		/* scratch space             */
52 };
53 
54 typedef void (*glock_examiner) (struct gfs2_glock * gl);
55 
56 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
57 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
58 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
59 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
60 
61 static DECLARE_RWSEM(gfs2_umount_flush_sem);
62 static struct dentry *gfs2_root;
63 static struct workqueue_struct *glock_workqueue;
64 static LIST_HEAD(lru_list);
65 static atomic_t lru_count = ATOMIC_INIT(0);
66 static DEFINE_SPINLOCK(lru_lock);
67 
68 #define GFS2_GL_HASH_SHIFT      15
69 #define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
70 #define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)
71 
72 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
73 static struct dentry *gfs2_root;
74 
75 /*
76  * Despite what you might think, the numbers below are not arbitrary :-)
77  * They are taken from the ipv4 routing hash code, which is well tested
78  * and thus should be nearly optimal. Later on we might tweek the numbers
79  * but for now this should be fine.
80  *
81  * The reason for putting the locks in a separate array from the list heads
82  * is that we can have fewer locks than list heads and save memory. We use
83  * the same hash function for both, but with a different hash mask.
84  */
85 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
86 	defined(CONFIG_PROVE_LOCKING)
87 
88 #ifdef CONFIG_LOCKDEP
89 # define GL_HASH_LOCK_SZ        256
90 #else
91 # if NR_CPUS >= 32
92 #  define GL_HASH_LOCK_SZ       4096
93 # elif NR_CPUS >= 16
94 #  define GL_HASH_LOCK_SZ       2048
95 # elif NR_CPUS >= 8
96 #  define GL_HASH_LOCK_SZ       1024
97 # elif NR_CPUS >= 4
98 #  define GL_HASH_LOCK_SZ       512
99 # else
100 #  define GL_HASH_LOCK_SZ       256
101 # endif
102 #endif
103 
104 /* We never want more locks than chains */
105 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
106 # undef GL_HASH_LOCK_SZ
107 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
108 #endif
109 
110 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
111 
112 static inline rwlock_t *gl_lock_addr(unsigned int x)
113 {
114 	return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
115 }
116 #else /* not SMP, so no spinlocks required */
117 static inline rwlock_t *gl_lock_addr(unsigned int x)
118 {
119 	return NULL;
120 }
121 #endif
122 
123 /**
124  * gl_hash() - Turn glock number into hash bucket number
125  * @lock: The glock number
126  *
127  * Returns: The number of the corresponding hash bucket
128  */
129 
130 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
131 			    const struct lm_lockname *name)
132 {
133 	unsigned int h;
134 
135 	h = jhash(&name->ln_number, sizeof(u64), 0);
136 	h = jhash(&name->ln_type, sizeof(unsigned int), h);
137 	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
138 	h &= GFS2_GL_HASH_MASK;
139 
140 	return h;
141 }
142 
143 /**
144  * glock_free() - Perform a few checks and then release struct gfs2_glock
145  * @gl: The glock to release
146  *
147  * Also calls lock module to release its internal structure for this glock.
148  *
149  */
150 
151 static void glock_free(struct gfs2_glock *gl)
152 {
153 	struct gfs2_sbd *sdp = gl->gl_sbd;
154 	struct inode *aspace = gl->gl_aspace;
155 
156 	if (aspace)
157 		gfs2_aspace_put(aspace);
158 
159 	sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl);
160 }
161 
162 /**
163  * gfs2_glock_hold() - increment reference count on glock
164  * @gl: The glock to hold
165  *
166  */
167 
168 static void gfs2_glock_hold(struct gfs2_glock *gl)
169 {
170 	GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
171 	atomic_inc(&gl->gl_ref);
172 }
173 
174 /**
175  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
176  * @gl: the glock
177  *
178  */
179 
180 static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
181 {
182 	spin_lock(&lru_lock);
183 	if (list_empty(&gl->gl_lru) && gl->gl_state != LM_ST_UNLOCKED) {
184 		list_add_tail(&gl->gl_lru, &lru_list);
185 		atomic_inc(&lru_count);
186 	}
187 	spin_unlock(&lru_lock);
188 }
189 
190 /**
191  * gfs2_glock_put() - Decrement reference count on glock
192  * @gl: The glock to put
193  *
194  */
195 
196 int gfs2_glock_put(struct gfs2_glock *gl)
197 {
198 	int rv = 0;
199 
200 	write_lock(gl_lock_addr(gl->gl_hash));
201 	if (atomic_dec_and_test(&gl->gl_ref)) {
202 		hlist_del(&gl->gl_list);
203 		write_unlock(gl_lock_addr(gl->gl_hash));
204 		spin_lock(&lru_lock);
205 		if (!list_empty(&gl->gl_lru)) {
206 			list_del_init(&gl->gl_lru);
207 			atomic_dec(&lru_count);
208 		}
209 		spin_unlock(&lru_lock);
210 		GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
211 		glock_free(gl);
212 		rv = 1;
213 		goto out;
214 	}
215 	/* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */
216 	if (atomic_read(&gl->gl_ref) == 2)
217 		gfs2_glock_schedule_for_reclaim(gl);
218 	write_unlock(gl_lock_addr(gl->gl_hash));
219 out:
220 	return rv;
221 }
222 
223 /**
224  * search_bucket() - Find struct gfs2_glock by lock number
225  * @bucket: the bucket to search
226  * @name: The lock name
227  *
228  * Returns: NULL, or the struct gfs2_glock with the requested number
229  */
230 
231 static struct gfs2_glock *search_bucket(unsigned int hash,
232 					const struct gfs2_sbd *sdp,
233 					const struct lm_lockname *name)
234 {
235 	struct gfs2_glock *gl;
236 	struct hlist_node *h;
237 
238 	hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
239 		if (!lm_name_equal(&gl->gl_name, name))
240 			continue;
241 		if (gl->gl_sbd != sdp)
242 			continue;
243 
244 		atomic_inc(&gl->gl_ref);
245 
246 		return gl;
247 	}
248 
249 	return NULL;
250 }
251 
252 /**
253  * may_grant - check if its ok to grant a new lock
254  * @gl: The glock
255  * @gh: The lock request which we wish to grant
256  *
257  * Returns: true if its ok to grant the lock
258  */
259 
260 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
261 {
262 	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
263 	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
264 	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
265 		return 0;
266 	if (gl->gl_state == gh->gh_state)
267 		return 1;
268 	if (gh->gh_flags & GL_EXACT)
269 		return 0;
270 	if (gl->gl_state == LM_ST_EXCLUSIVE) {
271 		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
272 			return 1;
273 		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
274 			return 1;
275 	}
276 	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
277 		return 1;
278 	return 0;
279 }
280 
281 static void gfs2_holder_wake(struct gfs2_holder *gh)
282 {
283 	clear_bit(HIF_WAIT, &gh->gh_iflags);
284 	smp_mb__after_clear_bit();
285 	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
286 }
287 
288 /**
289  * do_promote - promote as many requests as possible on the current queue
290  * @gl: The glock
291  *
292  * Returns: 1 if there is a blocked holder at the head of the list, or 2
293  *          if a type specific operation is underway.
294  */
295 
296 static int do_promote(struct gfs2_glock *gl)
297 __releases(&gl->gl_spin)
298 __acquires(&gl->gl_spin)
299 {
300 	const struct gfs2_glock_operations *glops = gl->gl_ops;
301 	struct gfs2_holder *gh, *tmp;
302 	int ret;
303 
304 restart:
305 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
306 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
307 			continue;
308 		if (may_grant(gl, gh)) {
309 			if (gh->gh_list.prev == &gl->gl_holders &&
310 			    glops->go_lock) {
311 				spin_unlock(&gl->gl_spin);
312 				/* FIXME: eliminate this eventually */
313 				ret = glops->go_lock(gh);
314 				spin_lock(&gl->gl_spin);
315 				if (ret) {
316 					if (ret == 1)
317 						return 2;
318 					gh->gh_error = ret;
319 					list_del_init(&gh->gh_list);
320 					gfs2_holder_wake(gh);
321 					goto restart;
322 				}
323 				set_bit(HIF_HOLDER, &gh->gh_iflags);
324 				gfs2_holder_wake(gh);
325 				goto restart;
326 			}
327 			set_bit(HIF_HOLDER, &gh->gh_iflags);
328 			gfs2_holder_wake(gh);
329 			continue;
330 		}
331 		if (gh->gh_list.prev == &gl->gl_holders)
332 			return 1;
333 		break;
334 	}
335 	return 0;
336 }
337 
338 /**
339  * do_error - Something unexpected has happened during a lock request
340  *
341  */
342 
343 static inline void do_error(struct gfs2_glock *gl, const int ret)
344 {
345 	struct gfs2_holder *gh, *tmp;
346 
347 	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
348 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
349 			continue;
350 		if (ret & LM_OUT_ERROR)
351 			gh->gh_error = -EIO;
352 		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
353 			gh->gh_error = GLR_TRYFAILED;
354 		else
355 			continue;
356 		list_del_init(&gh->gh_list);
357 		gfs2_holder_wake(gh);
358 	}
359 }
360 
361 /**
362  * find_first_waiter - find the first gh that's waiting for the glock
363  * @gl: the glock
364  */
365 
366 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
367 {
368 	struct gfs2_holder *gh;
369 
370 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
371 		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
372 			return gh;
373 	}
374 	return NULL;
375 }
376 
377 /**
378  * state_change - record that the glock is now in a different state
379  * @gl: the glock
380  * @new_state the new state
381  *
382  */
383 
384 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
385 {
386 	int held1, held2;
387 
388 	held1 = (gl->gl_state != LM_ST_UNLOCKED);
389 	held2 = (new_state != LM_ST_UNLOCKED);
390 
391 	if (held1 != held2) {
392 		if (held2)
393 			gfs2_glock_hold(gl);
394 		else
395 			gfs2_glock_put(gl);
396 	}
397 
398 	gl->gl_state = new_state;
399 	gl->gl_tchange = jiffies;
400 }
401 
402 static void gfs2_demote_wake(struct gfs2_glock *gl)
403 {
404 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
405 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
406 	smp_mb__after_clear_bit();
407 	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
408 }
409 
410 /**
411  * finish_xmote - The DLM has replied to one of our lock requests
412  * @gl: The glock
413  * @ret: The status from the DLM
414  *
415  */
416 
417 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
418 {
419 	const struct gfs2_glock_operations *glops = gl->gl_ops;
420 	struct gfs2_holder *gh;
421 	unsigned state = ret & LM_OUT_ST_MASK;
422 	int rv;
423 
424 	spin_lock(&gl->gl_spin);
425 	state_change(gl, state);
426 	gh = find_first_waiter(gl);
427 
428 	/* Demote to UN request arrived during demote to SH or DF */
429 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
430 	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
431 		gl->gl_target = LM_ST_UNLOCKED;
432 
433 	/* Check for state != intended state */
434 	if (unlikely(state != gl->gl_target)) {
435 		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
436 			/* move to back of queue and try next entry */
437 			if (ret & LM_OUT_CANCELED) {
438 				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
439 					list_move_tail(&gh->gh_list, &gl->gl_holders);
440 				gh = find_first_waiter(gl);
441 				gl->gl_target = gh->gh_state;
442 				goto retry;
443 			}
444 			/* Some error or failed "try lock" - report it */
445 			if ((ret & LM_OUT_ERROR) ||
446 			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
447 				gl->gl_target = gl->gl_state;
448 				do_error(gl, ret);
449 				goto out;
450 			}
451 		}
452 		switch(state) {
453 		/* Unlocked due to conversion deadlock, try again */
454 		case LM_ST_UNLOCKED:
455 retry:
456 			do_xmote(gl, gh, gl->gl_target);
457 			break;
458 		/* Conversion fails, unlock and try again */
459 		case LM_ST_SHARED:
460 		case LM_ST_DEFERRED:
461 			do_xmote(gl, gh, LM_ST_UNLOCKED);
462 			break;
463 		default: /* Everything else */
464 			printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
465 			GLOCK_BUG_ON(gl, 1);
466 		}
467 		spin_unlock(&gl->gl_spin);
468 		gfs2_glock_put(gl);
469 		return;
470 	}
471 
472 	/* Fast path - we got what we asked for */
473 	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
474 		gfs2_demote_wake(gl);
475 	if (state != LM_ST_UNLOCKED) {
476 		if (glops->go_xmote_bh) {
477 			spin_unlock(&gl->gl_spin);
478 			rv = glops->go_xmote_bh(gl, gh);
479 			if (rv == -EAGAIN)
480 				return;
481 			spin_lock(&gl->gl_spin);
482 			if (rv) {
483 				do_error(gl, rv);
484 				goto out;
485 			}
486 		}
487 		rv = do_promote(gl);
488 		if (rv == 2)
489 			goto out_locked;
490 	}
491 out:
492 	clear_bit(GLF_LOCK, &gl->gl_flags);
493 out_locked:
494 	spin_unlock(&gl->gl_spin);
495 	gfs2_glock_put(gl);
496 }
497 
498 static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
499 				 unsigned int req_state,
500 				 unsigned int flags)
501 {
502 	int ret = LM_OUT_ERROR;
503 
504 	if (!sdp->sd_lockstruct.ls_ops->lm_lock)
505 		return req_state == LM_ST_UNLOCKED ? 0 : req_state;
506 
507 	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
508 		ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock,
509 							 req_state, flags);
510 	return ret;
511 }
512 
513 /**
514  * do_xmote - Calls the DLM to change the state of a lock
515  * @gl: The lock state
516  * @gh: The holder (only for promotes)
517  * @target: The target lock state
518  *
519  */
520 
521 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
522 __releases(&gl->gl_spin)
523 __acquires(&gl->gl_spin)
524 {
525 	const struct gfs2_glock_operations *glops = gl->gl_ops;
526 	struct gfs2_sbd *sdp = gl->gl_sbd;
527 	unsigned int lck_flags = gh ? gh->gh_flags : 0;
528 	int ret;
529 
530 	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
531 		      LM_FLAG_PRIORITY);
532 	BUG_ON(gl->gl_state == target);
533 	BUG_ON(gl->gl_state == gl->gl_target);
534 	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
535 	    glops->go_inval) {
536 		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
537 		do_error(gl, 0); /* Fail queued try locks */
538 	}
539 	spin_unlock(&gl->gl_spin);
540 	if (glops->go_xmote_th)
541 		glops->go_xmote_th(gl);
542 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
543 		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
544 	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
545 
546 	gfs2_glock_hold(gl);
547 	if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
548 	    gl->gl_state == LM_ST_DEFERRED) &&
549 	    !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
550 		lck_flags |= LM_FLAG_TRY_1CB;
551 	ret = gfs2_lm_lock(sdp, gl, target, lck_flags);
552 
553 	if (!(ret & LM_OUT_ASYNC)) {
554 		finish_xmote(gl, ret);
555 		gfs2_glock_hold(gl);
556 		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
557 			gfs2_glock_put(gl);
558 	} else {
559 		GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
560 	}
561 	spin_lock(&gl->gl_spin);
562 }
563 
564 /**
565  * find_first_holder - find the first "holder" gh
566  * @gl: the glock
567  */
568 
569 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
570 {
571 	struct gfs2_holder *gh;
572 
573 	if (!list_empty(&gl->gl_holders)) {
574 		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
575 		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
576 			return gh;
577 	}
578 	return NULL;
579 }
580 
581 /**
582  * run_queue - do all outstanding tasks related to a glock
583  * @gl: The glock in question
584  * @nonblock: True if we must not block in run_queue
585  *
586  */
587 
588 static void run_queue(struct gfs2_glock *gl, const int nonblock)
589 __releases(&gl->gl_spin)
590 __acquires(&gl->gl_spin)
591 {
592 	struct gfs2_holder *gh = NULL;
593 	int ret;
594 
595 	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
596 		return;
597 
598 	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
599 
600 	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
601 	    gl->gl_demote_state != gl->gl_state) {
602 		if (find_first_holder(gl))
603 			goto out_unlock;
604 		if (nonblock)
605 			goto out_sched;
606 		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
607 		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
608 		gl->gl_target = gl->gl_demote_state;
609 	} else {
610 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
611 			gfs2_demote_wake(gl);
612 		ret = do_promote(gl);
613 		if (ret == 0)
614 			goto out_unlock;
615 		if (ret == 2)
616 			goto out;
617 		gh = find_first_waiter(gl);
618 		gl->gl_target = gh->gh_state;
619 		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
620 			do_error(gl, 0); /* Fail queued try locks */
621 	}
622 	do_xmote(gl, gh, gl->gl_target);
623 out:
624 	return;
625 
626 out_sched:
627 	gfs2_glock_hold(gl);
628 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
629 		gfs2_glock_put(gl);
630 out_unlock:
631 	clear_bit(GLF_LOCK, &gl->gl_flags);
632 	goto out;
633 }
634 
635 static void glock_work_func(struct work_struct *work)
636 {
637 	unsigned long delay = 0;
638 	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
639 
640 	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
641 		finish_xmote(gl, gl->gl_reply);
642 	down_read(&gfs2_umount_flush_sem);
643 	spin_lock(&gl->gl_spin);
644 	if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
645 	    gl->gl_state != LM_ST_UNLOCKED &&
646 	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
647 		unsigned long holdtime, now = jiffies;
648 		holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
649 		if (time_before(now, holdtime))
650 			delay = holdtime - now;
651 		set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
652 	}
653 	run_queue(gl, 0);
654 	spin_unlock(&gl->gl_spin);
655 	up_read(&gfs2_umount_flush_sem);
656 	if (!delay ||
657 	    queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
658 		gfs2_glock_put(gl);
659 }
660 
661 /**
662  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
663  * @sdp: The GFS2 superblock
664  * @number: the lock number
665  * @glops: The glock_operations to use
666  * @create: If 0, don't create the glock if it doesn't exist
667  * @glp: the glock is returned here
668  *
669  * This does not lock a glock, just finds/creates structures for one.
670  *
671  * Returns: errno
672  */
673 
674 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
675 		   const struct gfs2_glock_operations *glops, int create,
676 		   struct gfs2_glock **glp)
677 {
678 	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
679 	struct gfs2_glock *gl, *tmp;
680 	unsigned int hash = gl_hash(sdp, &name);
681 	int error;
682 
683 	read_lock(gl_lock_addr(hash));
684 	gl = search_bucket(hash, sdp, &name);
685 	read_unlock(gl_lock_addr(hash));
686 
687 	*glp = gl;
688 	if (gl)
689 		return 0;
690 	if (!create)
691 		return -ENOENT;
692 
693 	gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
694 	if (!gl)
695 		return -ENOMEM;
696 
697 	gl->gl_flags = 0;
698 	gl->gl_name = name;
699 	atomic_set(&gl->gl_ref, 1);
700 	gl->gl_state = LM_ST_UNLOCKED;
701 	gl->gl_target = LM_ST_UNLOCKED;
702 	gl->gl_demote_state = LM_ST_EXCLUSIVE;
703 	gl->gl_hash = hash;
704 	gl->gl_ops = glops;
705 	snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
706 	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
707 	gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
708 	gl->gl_tchange = jiffies;
709 	gl->gl_object = NULL;
710 	gl->gl_sbd = sdp;
711 	gl->gl_aspace = NULL;
712 	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
713 
714 	/* If this glock protects actual on-disk data or metadata blocks,
715 	   create a VFS inode to manage the pages/buffers holding them. */
716 	if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
717 		gl->gl_aspace = gfs2_aspace_get(sdp);
718 		if (!gl->gl_aspace) {
719 			error = -ENOMEM;
720 			goto fail;
721 		}
722 	}
723 
724 	write_lock(gl_lock_addr(hash));
725 	tmp = search_bucket(hash, sdp, &name);
726 	if (tmp) {
727 		write_unlock(gl_lock_addr(hash));
728 		glock_free(gl);
729 		gl = tmp;
730 	} else {
731 		hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
732 		write_unlock(gl_lock_addr(hash));
733 	}
734 
735 	*glp = gl;
736 
737 	return 0;
738 
739 fail:
740 	kmem_cache_free(gfs2_glock_cachep, gl);
741 	return error;
742 }
743 
744 /**
745  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
746  * @gl: the glock
747  * @state: the state we're requesting
748  * @flags: the modifier flags
749  * @gh: the holder structure
750  *
751  */
752 
753 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
754 		      struct gfs2_holder *gh)
755 {
756 	INIT_LIST_HEAD(&gh->gh_list);
757 	gh->gh_gl = gl;
758 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
759 	gh->gh_owner_pid = get_pid(task_pid(current));
760 	gh->gh_state = state;
761 	gh->gh_flags = flags;
762 	gh->gh_error = 0;
763 	gh->gh_iflags = 0;
764 	gfs2_glock_hold(gl);
765 }
766 
767 /**
768  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
769  * @state: the state we're requesting
770  * @flags: the modifier flags
771  * @gh: the holder structure
772  *
773  * Don't mess with the glock.
774  *
775  */
776 
777 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
778 {
779 	gh->gh_state = state;
780 	gh->gh_flags = flags;
781 	gh->gh_iflags = 0;
782 	gh->gh_ip = (unsigned long)__builtin_return_address(0);
783 }
784 
785 /**
786  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
787  * @gh: the holder structure
788  *
789  */
790 
791 void gfs2_holder_uninit(struct gfs2_holder *gh)
792 {
793 	put_pid(gh->gh_owner_pid);
794 	gfs2_glock_put(gh->gh_gl);
795 	gh->gh_gl = NULL;
796 	gh->gh_ip = 0;
797 }
798 
799 /**
800  * gfs2_glock_holder_wait
801  * @word: unused
802  *
803  * This function and gfs2_glock_demote_wait both show up in the WCHAN
804  * field. Thus I've separated these otherwise identical functions in
805  * order to be more informative to the user.
806  */
807 
808 static int gfs2_glock_holder_wait(void *word)
809 {
810         schedule();
811         return 0;
812 }
813 
814 static int gfs2_glock_demote_wait(void *word)
815 {
816 	schedule();
817 	return 0;
818 }
819 
820 static void wait_on_holder(struct gfs2_holder *gh)
821 {
822 	might_sleep();
823 	wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
824 }
825 
826 static void wait_on_demote(struct gfs2_glock *gl)
827 {
828 	might_sleep();
829 	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
830 }
831 
832 /**
833  * handle_callback - process a demote request
834  * @gl: the glock
835  * @state: the state the caller wants us to change to
836  *
837  * There are only two requests that we are going to see in actual
838  * practise: LM_ST_SHARED and LM_ST_UNLOCKED
839  */
840 
841 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
842 			    unsigned long delay)
843 {
844 	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
845 
846 	set_bit(bit, &gl->gl_flags);
847 	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
848 		gl->gl_demote_state = state;
849 		gl->gl_demote_time = jiffies;
850 	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
851 			gl->gl_demote_state != state) {
852 		gl->gl_demote_state = LM_ST_UNLOCKED;
853 	}
854 }
855 
856 /**
857  * gfs2_glock_wait - wait on a glock acquisition
858  * @gh: the glock holder
859  *
860  * Returns: 0 on success
861  */
862 
863 int gfs2_glock_wait(struct gfs2_holder *gh)
864 {
865 	wait_on_holder(gh);
866 	return gh->gh_error;
867 }
868 
869 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
870 {
871 	va_list args;
872 
873 	va_start(args, fmt);
874 	if (seq) {
875 		struct gfs2_glock_iter *gi = seq->private;
876 		vsprintf(gi->string, fmt, args);
877 		seq_printf(seq, gi->string);
878 	} else {
879 		printk(KERN_ERR " ");
880 		vprintk(fmt, args);
881 	}
882 	va_end(args);
883 }
884 
885 /**
886  * add_to_queue - Add a holder to the wait queue (but look for recursion)
887  * @gh: the holder structure to add
888  *
889  * Eventually we should move the recursive locking trap to a
890  * debugging option or something like that. This is the fast
891  * path and needs to have the minimum number of distractions.
892  *
893  */
894 
895 static inline void add_to_queue(struct gfs2_holder *gh)
896 __releases(&gl->gl_spin)
897 __acquires(&gl->gl_spin)
898 {
899 	struct gfs2_glock *gl = gh->gh_gl;
900 	struct gfs2_sbd *sdp = gl->gl_sbd;
901 	struct list_head *insert_pt = NULL;
902 	struct gfs2_holder *gh2;
903 	int try_lock = 0;
904 
905 	BUG_ON(gh->gh_owner_pid == NULL);
906 	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
907 		BUG();
908 
909 	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
910 		if (test_bit(GLF_LOCK, &gl->gl_flags))
911 			try_lock = 1;
912 		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
913 			goto fail;
914 	}
915 
916 	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
917 		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
918 		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
919 			goto trap_recursive;
920 		if (try_lock &&
921 		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
922 		    !may_grant(gl, gh)) {
923 fail:
924 			gh->gh_error = GLR_TRYFAILED;
925 			gfs2_holder_wake(gh);
926 			return;
927 		}
928 		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
929 			continue;
930 		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
931 			insert_pt = &gh2->gh_list;
932 	}
933 	if (likely(insert_pt == NULL)) {
934 		list_add_tail(&gh->gh_list, &gl->gl_holders);
935 		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
936 			goto do_cancel;
937 		return;
938 	}
939 	list_add_tail(&gh->gh_list, insert_pt);
940 do_cancel:
941 	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
942 	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
943 		spin_unlock(&gl->gl_spin);
944 		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
945 			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
946 		spin_lock(&gl->gl_spin);
947 	}
948 	return;
949 
950 trap_recursive:
951 	print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
952 	printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
953 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
954 	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
955 	print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
956 	printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
957 	printk(KERN_ERR "lock type: %d req lock state : %d\n",
958 	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
959 	__dump_glock(NULL, gl);
960 	BUG();
961 }
962 
963 /**
964  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
965  * @gh: the holder structure
966  *
967  * if (gh->gh_flags & GL_ASYNC), this never returns an error
968  *
969  * Returns: 0, GLR_TRYFAILED, or errno on failure
970  */
971 
972 int gfs2_glock_nq(struct gfs2_holder *gh)
973 {
974 	struct gfs2_glock *gl = gh->gh_gl;
975 	struct gfs2_sbd *sdp = gl->gl_sbd;
976 	int error = 0;
977 
978 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
979 		return -EIO;
980 
981 	spin_lock(&gl->gl_spin);
982 	add_to_queue(gh);
983 	run_queue(gl, 1);
984 	spin_unlock(&gl->gl_spin);
985 
986 	if (!(gh->gh_flags & GL_ASYNC))
987 		error = gfs2_glock_wait(gh);
988 
989 	return error;
990 }
991 
992 /**
993  * gfs2_glock_poll - poll to see if an async request has been completed
994  * @gh: the holder
995  *
996  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
997  */
998 
999 int gfs2_glock_poll(struct gfs2_holder *gh)
1000 {
1001 	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1002 }
1003 
1004 /**
1005  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1006  * @gh: the glock holder
1007  *
1008  */
1009 
1010 void gfs2_glock_dq(struct gfs2_holder *gh)
1011 {
1012 	struct gfs2_glock *gl = gh->gh_gl;
1013 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1014 	unsigned delay = 0;
1015 	int fast_path = 0;
1016 
1017 	spin_lock(&gl->gl_spin);
1018 	if (gh->gh_flags & GL_NOCACHE)
1019 		handle_callback(gl, LM_ST_UNLOCKED, 0);
1020 
1021 	list_del_init(&gh->gh_list);
1022 	if (find_first_holder(gl) == NULL) {
1023 		if (glops->go_unlock) {
1024 			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1025 			spin_unlock(&gl->gl_spin);
1026 			glops->go_unlock(gh);
1027 			spin_lock(&gl->gl_spin);
1028 			clear_bit(GLF_LOCK, &gl->gl_flags);
1029 		}
1030 		if (list_empty(&gl->gl_holders) &&
1031 		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1032 		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1033 			fast_path = 1;
1034 	}
1035 	spin_unlock(&gl->gl_spin);
1036 	if (likely(fast_path))
1037 		return;
1038 
1039 	gfs2_glock_hold(gl);
1040 	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1041 	    !test_bit(GLF_DEMOTE, &gl->gl_flags))
1042 		delay = gl->gl_ops->go_min_hold_time;
1043 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1044 		gfs2_glock_put(gl);
1045 }
1046 
1047 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1048 {
1049 	struct gfs2_glock *gl = gh->gh_gl;
1050 	gfs2_glock_dq(gh);
1051 	wait_on_demote(gl);
1052 }
1053 
1054 /**
1055  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1056  * @gh: the holder structure
1057  *
1058  */
1059 
1060 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1061 {
1062 	gfs2_glock_dq(gh);
1063 	gfs2_holder_uninit(gh);
1064 }
1065 
1066 /**
1067  * gfs2_glock_nq_num - acquire a glock based on lock number
1068  * @sdp: the filesystem
1069  * @number: the lock number
1070  * @glops: the glock operations for the type of glock
1071  * @state: the state to acquire the glock in
1072  * @flags: modifier flags for the aquisition
1073  * @gh: the struct gfs2_holder
1074  *
1075  * Returns: errno
1076  */
1077 
1078 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1079 		      const struct gfs2_glock_operations *glops,
1080 		      unsigned int state, int flags, struct gfs2_holder *gh)
1081 {
1082 	struct gfs2_glock *gl;
1083 	int error;
1084 
1085 	error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1086 	if (!error) {
1087 		error = gfs2_glock_nq_init(gl, state, flags, gh);
1088 		gfs2_glock_put(gl);
1089 	}
1090 
1091 	return error;
1092 }
1093 
1094 /**
1095  * glock_compare - Compare two struct gfs2_glock structures for sorting
1096  * @arg_a: the first structure
1097  * @arg_b: the second structure
1098  *
1099  */
1100 
1101 static int glock_compare(const void *arg_a, const void *arg_b)
1102 {
1103 	const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1104 	const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1105 	const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1106 	const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1107 
1108 	if (a->ln_number > b->ln_number)
1109 		return 1;
1110 	if (a->ln_number < b->ln_number)
1111 		return -1;
1112 	BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1113 	return 0;
1114 }
1115 
1116 /**
1117  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1118  * @num_gh: the number of structures
1119  * @ghs: an array of struct gfs2_holder structures
1120  *
1121  * Returns: 0 on success (all glocks acquired),
1122  *          errno on failure (no glocks acquired)
1123  */
1124 
1125 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1126 		     struct gfs2_holder **p)
1127 {
1128 	unsigned int x;
1129 	int error = 0;
1130 
1131 	for (x = 0; x < num_gh; x++)
1132 		p[x] = &ghs[x];
1133 
1134 	sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1135 
1136 	for (x = 0; x < num_gh; x++) {
1137 		p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1138 
1139 		error = gfs2_glock_nq(p[x]);
1140 		if (error) {
1141 			while (x--)
1142 				gfs2_glock_dq(p[x]);
1143 			break;
1144 		}
1145 	}
1146 
1147 	return error;
1148 }
1149 
1150 /**
1151  * gfs2_glock_nq_m - acquire multiple glocks
1152  * @num_gh: the number of structures
1153  * @ghs: an array of struct gfs2_holder structures
1154  *
1155  *
1156  * Returns: 0 on success (all glocks acquired),
1157  *          errno on failure (no glocks acquired)
1158  */
1159 
1160 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1161 {
1162 	struct gfs2_holder *tmp[4];
1163 	struct gfs2_holder **pph = tmp;
1164 	int error = 0;
1165 
1166 	switch(num_gh) {
1167 	case 0:
1168 		return 0;
1169 	case 1:
1170 		ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1171 		return gfs2_glock_nq(ghs);
1172 	default:
1173 		if (num_gh <= 4)
1174 			break;
1175 		pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1176 		if (!pph)
1177 			return -ENOMEM;
1178 	}
1179 
1180 	error = nq_m_sync(num_gh, ghs, pph);
1181 
1182 	if (pph != tmp)
1183 		kfree(pph);
1184 
1185 	return error;
1186 }
1187 
1188 /**
1189  * gfs2_glock_dq_m - release multiple glocks
1190  * @num_gh: the number of structures
1191  * @ghs: an array of struct gfs2_holder structures
1192  *
1193  */
1194 
1195 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1196 {
1197 	unsigned int x;
1198 
1199 	for (x = 0; x < num_gh; x++)
1200 		gfs2_glock_dq(&ghs[x]);
1201 }
1202 
1203 /**
1204  * gfs2_glock_dq_uninit_m - release multiple glocks
1205  * @num_gh: the number of structures
1206  * @ghs: an array of struct gfs2_holder structures
1207  *
1208  */
1209 
1210 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1211 {
1212 	unsigned int x;
1213 
1214 	for (x = 0; x < num_gh; x++)
1215 		gfs2_glock_dq_uninit(&ghs[x]);
1216 }
1217 
1218 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
1219 {
1220 	unsigned long delay = 0;
1221 	unsigned long holdtime;
1222 	unsigned long now = jiffies;
1223 
1224 	gfs2_glock_hold(gl);
1225 	holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1226 	if (time_before(now, holdtime))
1227 		delay = holdtime - now;
1228 	if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1229 		delay = gl->gl_ops->go_min_hold_time;
1230 
1231 	spin_lock(&gl->gl_spin);
1232 	handle_callback(gl, state, delay);
1233 	spin_unlock(&gl->gl_spin);
1234 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1235 		gfs2_glock_put(gl);
1236 }
1237 
1238 /**
1239  * gfs2_glock_complete - Callback used by locking
1240  * @gl: Pointer to the glock
1241  * @ret: The return value from the dlm
1242  *
1243  */
1244 
1245 void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
1246 {
1247 	struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
1248 	gl->gl_reply = ret;
1249 	if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
1250 		struct gfs2_holder *gh;
1251 		spin_lock(&gl->gl_spin);
1252 		gh = find_first_waiter(gl);
1253 		if ((!(gh && (gh->gh_flags & LM_FLAG_NOEXP)) &&
1254 		     (gl->gl_target != LM_ST_UNLOCKED)) ||
1255 		    ((ret & ~LM_OUT_ST_MASK) != 0))
1256 			set_bit(GLF_FROZEN, &gl->gl_flags);
1257 		spin_unlock(&gl->gl_spin);
1258 		if (test_bit(GLF_FROZEN, &gl->gl_flags))
1259 			return;
1260 	}
1261 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1262 	gfs2_glock_hold(gl);
1263 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1264 		gfs2_glock_put(gl);
1265 }
1266 
1267 /**
1268  * demote_ok - Check to see if it's ok to unlock a glock
1269  * @gl: the glock
1270  *
1271  * Returns: 1 if it's ok
1272  */
1273 
1274 static int demote_ok(const struct gfs2_glock *gl)
1275 {
1276 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1277 
1278 	if (gl->gl_state == LM_ST_UNLOCKED)
1279 		return 0;
1280 	if (!list_empty(&gl->gl_holders))
1281 		return 0;
1282 	if (glops->go_demote_ok)
1283 		return glops->go_demote_ok(gl);
1284 	return 1;
1285 }
1286 
1287 
1288 static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
1289 {
1290 	struct gfs2_glock *gl;
1291 	int may_demote;
1292 	int nr_skipped = 0;
1293 	int got_ref = 0;
1294 	LIST_HEAD(skipped);
1295 
1296 	if (nr == 0)
1297 		goto out;
1298 
1299 	if (!(gfp_mask & __GFP_FS))
1300 		return -1;
1301 
1302 	spin_lock(&lru_lock);
1303 	while(nr && !list_empty(&lru_list)) {
1304 		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1305 		list_del_init(&gl->gl_lru);
1306 		atomic_dec(&lru_count);
1307 
1308 		/* Test for being demotable */
1309 		if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1310 			gfs2_glock_hold(gl);
1311 			got_ref = 1;
1312 			spin_unlock(&lru_lock);
1313 			spin_lock(&gl->gl_spin);
1314 			may_demote = demote_ok(gl);
1315 			spin_unlock(&gl->gl_spin);
1316 			clear_bit(GLF_LOCK, &gl->gl_flags);
1317 			if (may_demote) {
1318 				handle_callback(gl, LM_ST_UNLOCKED, 0);
1319 				nr--;
1320 				if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1321 					gfs2_glock_put(gl);
1322 				got_ref = 0;
1323 			}
1324 			spin_lock(&lru_lock);
1325 			if (may_demote)
1326 				continue;
1327 		}
1328 		if (list_empty(&gl->gl_lru) &&
1329 		    (atomic_read(&gl->gl_ref) <= (2 + got_ref))) {
1330 			nr_skipped++;
1331 			list_add(&gl->gl_lru, &skipped);
1332 		}
1333 		if (got_ref) {
1334 			spin_unlock(&lru_lock);
1335 			gfs2_glock_put(gl);
1336 			spin_lock(&lru_lock);
1337 			got_ref = 0;
1338 		}
1339 	}
1340 	list_splice(&skipped, &lru_list);
1341 	atomic_add(nr_skipped, &lru_count);
1342 	spin_unlock(&lru_lock);
1343 out:
1344 	return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
1345 }
1346 
1347 static struct shrinker glock_shrinker = {
1348 	.shrink = gfs2_shrink_glock_memory,
1349 	.seeks = DEFAULT_SEEKS,
1350 };
1351 
1352 /**
1353  * examine_bucket - Call a function for glock in a hash bucket
1354  * @examiner: the function
1355  * @sdp: the filesystem
1356  * @bucket: the bucket
1357  *
1358  * Returns: 1 if the bucket has entries
1359  */
1360 
1361 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1362 			  unsigned int hash)
1363 {
1364 	struct gfs2_glock *gl, *prev = NULL;
1365 	int has_entries = 0;
1366 	struct hlist_head *head = &gl_hash_table[hash].hb_list;
1367 
1368 	read_lock(gl_lock_addr(hash));
1369 	/* Can't use hlist_for_each_entry - don't want prefetch here */
1370 	if (hlist_empty(head))
1371 		goto out;
1372 	gl = list_entry(head->first, struct gfs2_glock, gl_list);
1373 	while(1) {
1374 		if (!sdp || gl->gl_sbd == sdp) {
1375 			gfs2_glock_hold(gl);
1376 			read_unlock(gl_lock_addr(hash));
1377 			if (prev)
1378 				gfs2_glock_put(prev);
1379 			prev = gl;
1380 			examiner(gl);
1381 			has_entries = 1;
1382 			read_lock(gl_lock_addr(hash));
1383 		}
1384 		if (gl->gl_list.next == NULL)
1385 			break;
1386 		gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1387 	}
1388 out:
1389 	read_unlock(gl_lock_addr(hash));
1390 	if (prev)
1391 		gfs2_glock_put(prev);
1392 	cond_resched();
1393 	return has_entries;
1394 }
1395 
1396 
1397 /**
1398  * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1399  * @gl: The glock to thaw
1400  *
1401  * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
1402  * so this has to result in the ref count being dropped by one.
1403  */
1404 
1405 static void thaw_glock(struct gfs2_glock *gl)
1406 {
1407 	if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1408 		return;
1409 	set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1410 	gfs2_glock_hold(gl);
1411 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1412 		gfs2_glock_put(gl);
1413 }
1414 
1415 /**
1416  * clear_glock - look at a glock and see if we can free it from glock cache
1417  * @gl: the glock to look at
1418  *
1419  */
1420 
1421 static void clear_glock(struct gfs2_glock *gl)
1422 {
1423 	spin_lock(&lru_lock);
1424 	if (!list_empty(&gl->gl_lru)) {
1425 		list_del_init(&gl->gl_lru);
1426 		atomic_dec(&lru_count);
1427 	}
1428 	spin_unlock(&lru_lock);
1429 
1430 	spin_lock(&gl->gl_spin);
1431 	if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED)
1432 		handle_callback(gl, LM_ST_UNLOCKED, 0);
1433 	spin_unlock(&gl->gl_spin);
1434 	gfs2_glock_hold(gl);
1435 	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1436 		gfs2_glock_put(gl);
1437 }
1438 
1439 /**
1440  * gfs2_glock_thaw - Thaw any frozen glocks
1441  * @sdp: The super block
1442  *
1443  */
1444 
1445 void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1446 {
1447 	unsigned x;
1448 
1449 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1450 		examine_bucket(thaw_glock, sdp, x);
1451 }
1452 
1453 /**
1454  * gfs2_gl_hash_clear - Empty out the glock hash table
1455  * @sdp: the filesystem
1456  * @wait: wait until it's all gone
1457  *
1458  * Called when unmounting the filesystem.
1459  */
1460 
1461 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
1462 {
1463 	unsigned long t;
1464 	unsigned int x;
1465 	int cont;
1466 
1467 	t = jiffies;
1468 
1469 	for (;;) {
1470 		cont = 0;
1471 		for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1472 			if (examine_bucket(clear_glock, sdp, x))
1473 				cont = 1;
1474 		}
1475 
1476 		if (!cont)
1477 			break;
1478 
1479 		if (time_after_eq(jiffies,
1480 				  t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1481 			fs_warn(sdp, "Unmount seems to be stalled. "
1482 				     "Dumping lock state...\n");
1483 			gfs2_dump_lockstate(sdp);
1484 			t = jiffies;
1485 		}
1486 
1487 		down_write(&gfs2_umount_flush_sem);
1488 		invalidate_inodes(sdp->sd_vfs);
1489 		up_write(&gfs2_umount_flush_sem);
1490 		msleep(10);
1491 	}
1492 }
1493 
1494 void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1495 {
1496 	struct gfs2_glock *gl = ip->i_gl;
1497 	int ret;
1498 
1499 	ret = gfs2_truncatei_resume(ip);
1500 	gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1501 
1502 	spin_lock(&gl->gl_spin);
1503 	clear_bit(GLF_LOCK, &gl->gl_flags);
1504 	run_queue(gl, 1);
1505 	spin_unlock(&gl->gl_spin);
1506 }
1507 
1508 static const char *state2str(unsigned state)
1509 {
1510 	switch(state) {
1511 	case LM_ST_UNLOCKED:
1512 		return "UN";
1513 	case LM_ST_SHARED:
1514 		return "SH";
1515 	case LM_ST_DEFERRED:
1516 		return "DF";
1517 	case LM_ST_EXCLUSIVE:
1518 		return "EX";
1519 	}
1520 	return "??";
1521 }
1522 
1523 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1524 {
1525 	char *p = buf;
1526 	if (flags & LM_FLAG_TRY)
1527 		*p++ = 't';
1528 	if (flags & LM_FLAG_TRY_1CB)
1529 		*p++ = 'T';
1530 	if (flags & LM_FLAG_NOEXP)
1531 		*p++ = 'e';
1532 	if (flags & LM_FLAG_ANY)
1533 		*p++ = 'A';
1534 	if (flags & LM_FLAG_PRIORITY)
1535 		*p++ = 'p';
1536 	if (flags & GL_ASYNC)
1537 		*p++ = 'a';
1538 	if (flags & GL_EXACT)
1539 		*p++ = 'E';
1540 	if (flags & GL_NOCACHE)
1541 		*p++ = 'c';
1542 	if (test_bit(HIF_HOLDER, &iflags))
1543 		*p++ = 'H';
1544 	if (test_bit(HIF_WAIT, &iflags))
1545 		*p++ = 'W';
1546 	if (test_bit(HIF_FIRST, &iflags))
1547 		*p++ = 'F';
1548 	*p = 0;
1549 	return buf;
1550 }
1551 
1552 /**
1553  * dump_holder - print information about a glock holder
1554  * @seq: the seq_file struct
1555  * @gh: the glock holder
1556  *
1557  * Returns: 0 on success, -ENOBUFS when we run out of space
1558  */
1559 
1560 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
1561 {
1562 	struct task_struct *gh_owner = NULL;
1563 	char buffer[KSYM_SYMBOL_LEN];
1564 	char flags_buf[32];
1565 
1566 	sprint_symbol(buffer, gh->gh_ip);
1567 	if (gh->gh_owner_pid)
1568 		gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
1569 	gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n",
1570 		  state2str(gh->gh_state),
1571 		  hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1572 		  gh->gh_error,
1573 		  gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1574 		  gh_owner ? gh_owner->comm : "(ended)", buffer);
1575 	return 0;
1576 }
1577 
1578 static const char *gflags2str(char *buf, const unsigned long *gflags)
1579 {
1580 	char *p = buf;
1581 	if (test_bit(GLF_LOCK, gflags))
1582 		*p++ = 'l';
1583 	if (test_bit(GLF_DEMOTE, gflags))
1584 		*p++ = 'D';
1585 	if (test_bit(GLF_PENDING_DEMOTE, gflags))
1586 		*p++ = 'd';
1587 	if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1588 		*p++ = 'p';
1589 	if (test_bit(GLF_DIRTY, gflags))
1590 		*p++ = 'y';
1591 	if (test_bit(GLF_LFLUSH, gflags))
1592 		*p++ = 'f';
1593 	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1594 		*p++ = 'i';
1595 	if (test_bit(GLF_REPLY_PENDING, gflags))
1596 		*p++ = 'r';
1597 	if (test_bit(GLF_INITIAL, gflags))
1598 		*p++ = 'I';
1599 	if (test_bit(GLF_FROZEN, gflags))
1600 		*p++ = 'F';
1601 	*p = 0;
1602 	return buf;
1603 }
1604 
1605 /**
1606  * __dump_glock - print information about a glock
1607  * @seq: The seq_file struct
1608  * @gl: the glock
1609  *
1610  * The file format is as follows:
1611  * One line per object, capital letters are used to indicate objects
1612  * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1613  * other objects are indented by a single space and follow the glock to
1614  * which they are related. Fields are indicated by lower case letters
1615  * followed by a colon and the field value, except for strings which are in
1616  * [] so that its possible to see if they are composed of spaces for
1617  * example. The field's are n = number (id of the object), f = flags,
1618  * t = type, s = state, r = refcount, e = error, p = pid.
1619  *
1620  * Returns: 0 on success, -ENOBUFS when we run out of space
1621  */
1622 
1623 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1624 {
1625 	const struct gfs2_glock_operations *glops = gl->gl_ops;
1626 	unsigned long long dtime;
1627 	const struct gfs2_holder *gh;
1628 	char gflags_buf[32];
1629 	int error = 0;
1630 
1631 	dtime = jiffies - gl->gl_demote_time;
1632 	dtime *= 1000000/HZ; /* demote time in uSec */
1633 	if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1634 		dtime = 0;
1635 	gfs2_print_dbg(seq, "G:  s:%s n:%u/%llu f:%s t:%s d:%s/%llu a:%d r:%d\n",
1636 		  state2str(gl->gl_state),
1637 		  gl->gl_name.ln_type,
1638 		  (unsigned long long)gl->gl_name.ln_number,
1639 		  gflags2str(gflags_buf, &gl->gl_flags),
1640 		  state2str(gl->gl_target),
1641 		  state2str(gl->gl_demote_state), dtime,
1642 		  atomic_read(&gl->gl_ail_count),
1643 		  atomic_read(&gl->gl_ref));
1644 
1645 	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1646 		error = dump_holder(seq, gh);
1647 		if (error)
1648 			goto out;
1649 	}
1650 	if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1651 		error = glops->go_dump(seq, gl);
1652 out:
1653 	return error;
1654 }
1655 
1656 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1657 {
1658 	int ret;
1659 	spin_lock(&gl->gl_spin);
1660 	ret = __dump_glock(seq, gl);
1661 	spin_unlock(&gl->gl_spin);
1662 	return ret;
1663 }
1664 
1665 /**
1666  * gfs2_dump_lockstate - print out the current lockstate
1667  * @sdp: the filesystem
1668  * @ub: the buffer to copy the information into
1669  *
1670  * If @ub is NULL, dump the lockstate to the console.
1671  *
1672  */
1673 
1674 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
1675 {
1676 	struct gfs2_glock *gl;
1677 	struct hlist_node *h;
1678 	unsigned int x;
1679 	int error = 0;
1680 
1681 	for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1682 
1683 		read_lock(gl_lock_addr(x));
1684 
1685 		hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
1686 			if (gl->gl_sbd != sdp)
1687 				continue;
1688 
1689 			error = dump_glock(NULL, gl);
1690 			if (error)
1691 				break;
1692 		}
1693 
1694 		read_unlock(gl_lock_addr(x));
1695 
1696 		if (error)
1697 			break;
1698 	}
1699 
1700 
1701 	return error;
1702 }
1703 
1704 
1705 int __init gfs2_glock_init(void)
1706 {
1707 	unsigned i;
1708 	for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
1709 		INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
1710 	}
1711 #ifdef GL_HASH_LOCK_SZ
1712 	for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
1713 		rwlock_init(&gl_hash_locks[i]);
1714 	}
1715 #endif
1716 
1717 	glock_workqueue = create_workqueue("glock_workqueue");
1718 	if (IS_ERR(glock_workqueue))
1719 		return PTR_ERR(glock_workqueue);
1720 
1721 	register_shrinker(&glock_shrinker);
1722 
1723 	return 0;
1724 }
1725 
1726 void gfs2_glock_exit(void)
1727 {
1728 	unregister_shrinker(&glock_shrinker);
1729 	destroy_workqueue(glock_workqueue);
1730 }
1731 
1732 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
1733 {
1734 	struct gfs2_glock *gl;
1735 
1736 restart:
1737 	read_lock(gl_lock_addr(gi->hash));
1738 	gl = gi->gl;
1739 	if (gl) {
1740 		gi->gl = hlist_entry(gl->gl_list.next,
1741 				     struct gfs2_glock, gl_list);
1742 	} else {
1743 		gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1744 				     struct gfs2_glock, gl_list);
1745 	}
1746 	if (gi->gl)
1747 		gfs2_glock_hold(gi->gl);
1748 	read_unlock(gl_lock_addr(gi->hash));
1749 	if (gl)
1750 		gfs2_glock_put(gl);
1751 	while (gi->gl == NULL) {
1752 		gi->hash++;
1753 		if (gi->hash >= GFS2_GL_HASH_SIZE)
1754 			return 1;
1755 		read_lock(gl_lock_addr(gi->hash));
1756 		gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
1757 				     struct gfs2_glock, gl_list);
1758 		if (gi->gl)
1759 			gfs2_glock_hold(gi->gl);
1760 		read_unlock(gl_lock_addr(gi->hash));
1761 	}
1762 
1763 	if (gi->sdp != gi->gl->gl_sbd)
1764 		goto restart;
1765 
1766 	return 0;
1767 }
1768 
1769 static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
1770 {
1771 	if (gi->gl)
1772 		gfs2_glock_put(gi->gl);
1773 	gi->gl = NULL;
1774 }
1775 
1776 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
1777 {
1778 	struct gfs2_glock_iter *gi = seq->private;
1779 	loff_t n = *pos;
1780 
1781 	gi->hash = 0;
1782 
1783 	do {
1784 		if (gfs2_glock_iter_next(gi)) {
1785 			gfs2_glock_iter_free(gi);
1786 			return NULL;
1787 		}
1788 	} while (n--);
1789 
1790 	return gi->gl;
1791 }
1792 
1793 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
1794 				 loff_t *pos)
1795 {
1796 	struct gfs2_glock_iter *gi = seq->private;
1797 
1798 	(*pos)++;
1799 
1800 	if (gfs2_glock_iter_next(gi)) {
1801 		gfs2_glock_iter_free(gi);
1802 		return NULL;
1803 	}
1804 
1805 	return gi->gl;
1806 }
1807 
1808 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
1809 {
1810 	struct gfs2_glock_iter *gi = seq->private;
1811 	gfs2_glock_iter_free(gi);
1812 }
1813 
1814 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
1815 {
1816 	return dump_glock(seq, iter_ptr);
1817 }
1818 
1819 static const struct seq_operations gfs2_glock_seq_ops = {
1820 	.start = gfs2_glock_seq_start,
1821 	.next  = gfs2_glock_seq_next,
1822 	.stop  = gfs2_glock_seq_stop,
1823 	.show  = gfs2_glock_seq_show,
1824 };
1825 
1826 static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1827 {
1828 	int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1829 				   sizeof(struct gfs2_glock_iter));
1830 	if (ret == 0) {
1831 		struct seq_file *seq = file->private_data;
1832 		struct gfs2_glock_iter *gi = seq->private;
1833 		gi->sdp = inode->i_private;
1834 	}
1835 	return ret;
1836 }
1837 
1838 static const struct file_operations gfs2_debug_fops = {
1839 	.owner   = THIS_MODULE,
1840 	.open    = gfs2_debugfs_open,
1841 	.read    = seq_read,
1842 	.llseek  = seq_lseek,
1843 	.release = seq_release_private,
1844 };
1845 
1846 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1847 {
1848 	sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1849 	if (!sdp->debugfs_dir)
1850 		return -ENOMEM;
1851 	sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1852 							 S_IFREG | S_IRUGO,
1853 							 sdp->debugfs_dir, sdp,
1854 							 &gfs2_debug_fops);
1855 	if (!sdp->debugfs_dentry_glocks)
1856 		return -ENOMEM;
1857 
1858 	return 0;
1859 }
1860 
1861 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1862 {
1863 	if (sdp && sdp->debugfs_dir) {
1864 		if (sdp->debugfs_dentry_glocks) {
1865 			debugfs_remove(sdp->debugfs_dentry_glocks);
1866 			sdp->debugfs_dentry_glocks = NULL;
1867 		}
1868 		debugfs_remove(sdp->debugfs_dir);
1869 		sdp->debugfs_dir = NULL;
1870 	}
1871 }
1872 
1873 int gfs2_register_debugfs(void)
1874 {
1875 	gfs2_root = debugfs_create_dir("gfs2", NULL);
1876 	return gfs2_root ? 0 : -ENOMEM;
1877 }
1878 
1879 void gfs2_unregister_debugfs(void)
1880 {
1881 	debugfs_remove(gfs2_root);
1882 	gfs2_root = NULL;
1883 }
1884