Lines Matching refs:gl
57 struct gfs2_glock *gl; /* current glock struct */ member
61 typedef void (*glock_examiner) (struct gfs2_glock * gl);
63 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
65 static void handle_callback(struct gfs2_glock *gl, unsigned int state,
121 static void wake_up_glock(struct gfs2_glock *gl) in wake_up_glock() argument
123 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); in wake_up_glock()
126 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); in wake_up_glock()
131 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); in gfs2_glock_dealloc() local
133 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_dealloc()
134 if (gl->gl_ops->go_flags & GLOF_ASPACE) { in gfs2_glock_dealloc()
136 container_of(gl, struct gfs2_glock_aspace, glock); in gfs2_glock_dealloc()
139 kmem_cache_free(gfs2_glock_cachep, gl); in gfs2_glock_dealloc()
155 static bool glock_blocked_by_withdraw(struct gfs2_glock *gl) in glock_blocked_by_withdraw() argument
157 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in glock_blocked_by_withdraw()
161 if (gl->gl_ops->go_flags & GLOF_NONDISK) in glock_blocked_by_withdraw()
164 gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr) in glock_blocked_by_withdraw()
169 static void __gfs2_glock_free(struct gfs2_glock *gl) in __gfs2_glock_free() argument
171 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); in __gfs2_glock_free()
173 wake_up_glock(gl); in __gfs2_glock_free()
174 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); in __gfs2_glock_free()
177 void gfs2_glock_free(struct gfs2_glock *gl) { in gfs2_glock_free() argument
178 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_free()
180 __gfs2_glock_free(gl); in gfs2_glock_free()
185 void gfs2_glock_free_later(struct gfs2_glock *gl) { in gfs2_glock_free_later() argument
186 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_free_later()
189 list_add(&gl->gl_lru, &sdp->sd_dead_glocks); in gfs2_glock_free_later()
200 struct gfs2_glock *gl; in gfs2_free_dead_glocks() local
202 gl = list_first_entry(list, struct gfs2_glock, gl_lru); in gfs2_free_dead_glocks()
203 list_del_init(&gl->gl_lru); in gfs2_free_dead_glocks()
204 __gfs2_glock_free(gl); in gfs2_free_dead_glocks()
214 struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl) in gfs2_glock_hold() argument
216 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in gfs2_glock_hold()
217 lockref_get(&gl->gl_lockref); in gfs2_glock_hold()
218 return gl; in gfs2_glock_hold()
228 static int demote_ok(const struct gfs2_glock *gl) in demote_ok() argument
230 const struct gfs2_glock_operations *glops = gl->gl_ops; in demote_ok()
232 if (gl->gl_state == LM_ST_UNLOCKED) in demote_ok()
234 if (!list_empty(&gl->gl_holders)) in demote_ok()
237 return glops->go_demote_ok(gl); in demote_ok()
242 void gfs2_glock_add_to_lru(struct gfs2_glock *gl) in gfs2_glock_add_to_lru() argument
244 if (!(gl->gl_ops->go_flags & GLOF_LRU)) in gfs2_glock_add_to_lru()
249 list_move_tail(&gl->gl_lru, &lru_list); in gfs2_glock_add_to_lru()
251 if (!test_bit(GLF_LRU, &gl->gl_flags)) { in gfs2_glock_add_to_lru()
252 set_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_add_to_lru()
259 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) in gfs2_glock_remove_from_lru() argument
261 if (!(gl->gl_ops->go_flags & GLOF_LRU)) in gfs2_glock_remove_from_lru()
265 if (test_bit(GLF_LRU, &gl->gl_flags)) { in gfs2_glock_remove_from_lru()
266 list_del_init(&gl->gl_lru); in gfs2_glock_remove_from_lru()
268 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_glock_remove_from_lru()
277 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { in gfs2_glock_queue_work() argument
278 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { in gfs2_glock_queue_work()
285 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2); in gfs2_glock_queue_work()
286 gl->gl_lockref.count--; in gfs2_glock_queue_work()
290 static void __gfs2_glock_put(struct gfs2_glock *gl) in __gfs2_glock_put() argument
292 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in __gfs2_glock_put()
293 struct address_space *mapping = gfs2_glock2aspace(gl); in __gfs2_glock_put()
295 lockref_mark_dead(&gl->gl_lockref); in __gfs2_glock_put()
296 spin_unlock(&gl->gl_lockref.lock); in __gfs2_glock_put()
297 gfs2_glock_remove_from_lru(gl); in __gfs2_glock_put()
298 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); in __gfs2_glock_put()
302 GLOCK_BUG_ON(gl, !mapping_empty(mapping)); in __gfs2_glock_put()
304 trace_gfs2_glock_put(gl); in __gfs2_glock_put()
305 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); in __gfs2_glock_put()
314 void gfs2_glock_put(struct gfs2_glock *gl) in gfs2_glock_put() argument
316 if (lockref_put_or_lock(&gl->gl_lockref)) in gfs2_glock_put()
319 __gfs2_glock_put(gl); in gfs2_glock_put()
329 void gfs2_glock_put_async(struct gfs2_glock *gl) in gfs2_glock_put_async() argument
331 if (lockref_put_or_lock(&gl->gl_lockref)) in gfs2_glock_put_async()
334 GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1); in gfs2_glock_put_async()
335 gfs2_glock_queue_work(gl, 0); in gfs2_glock_put_async()
336 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_put_async()
353 static inline bool may_grant(struct gfs2_glock *gl, in may_grant() argument
358 GLOCK_BUG_ON(gl, !test_bit(HIF_HOLDER, ¤t_gh->gh_iflags)); in may_grant()
382 if (gl->gl_state == gh->gh_state) in may_grant()
386 if (gl->gl_state == LM_ST_EXCLUSIVE) { in may_grant()
391 return gl->gl_state != LM_ST_UNLOCKED; in may_grant()
413 static void do_error(struct gfs2_glock *gl, const int ret) in do_error() argument
417 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { in do_error()
437 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) in find_first_holder() argument
441 if (!list_empty(&gl->gl_holders)) { in find_first_holder()
442 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, in find_first_holder()
458 struct gfs2_glock *gl = gh->gh_gl; in gfs2_instantiate() local
459 const struct gfs2_glock_operations *glops = gl->gl_ops; in gfs2_instantiate()
463 if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags)) in gfs2_instantiate()
470 if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) { in gfs2_instantiate()
471 wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG, in gfs2_instantiate()
483 ret = glops->go_instantiate(gl); in gfs2_instantiate()
485 clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags); in gfs2_instantiate()
486 clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags); in gfs2_instantiate()
503 static bool do_promote(struct gfs2_glock *gl) in do_promote() argument
507 current_gh = find_first_holder(gl); in do_promote()
508 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in do_promote()
511 if (!may_grant(gl, current_gh, gh)) { in do_promote()
518 if (list_is_first(&gh->gh_list, &gl->gl_holders)) in do_promote()
520 do_error(gl, 0); in do_promote()
537 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) in find_first_waiter() argument
541 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in find_first_waiter()
554 static void state_change(struct gfs2_glock *gl, unsigned int new_state) in state_change() argument
558 held1 = (gl->gl_state != LM_ST_UNLOCKED); in state_change()
562 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); in state_change()
564 gl->gl_lockref.count++; in state_change()
566 gl->gl_lockref.count--; in state_change()
568 if (new_state != gl->gl_target) in state_change()
570 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, in state_change()
572 gl->gl_state = new_state; in state_change()
573 gl->gl_tchange = jiffies; in state_change()
576 static void gfs2_set_demote(struct gfs2_glock *gl) in gfs2_set_demote() argument
578 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_set_demote()
580 set_bit(GLF_DEMOTE, &gl->gl_flags); in gfs2_set_demote()
585 static void gfs2_demote_wake(struct gfs2_glock *gl) in gfs2_demote_wake() argument
587 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_demote_wake()
588 clear_bit(GLF_DEMOTE, &gl->gl_flags); in gfs2_demote_wake()
590 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); in gfs2_demote_wake()
600 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) in finish_xmote() argument
602 const struct gfs2_glock_operations *glops = gl->gl_ops; in finish_xmote()
606 trace_gfs2_glock_state_change(gl, state); in finish_xmote()
607 state_change(gl, state); in finish_xmote()
608 gh = find_first_waiter(gl); in finish_xmote()
611 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && in finish_xmote()
612 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) in finish_xmote()
613 gl->gl_target = LM_ST_UNLOCKED; in finish_xmote()
616 if (unlikely(state != gl->gl_target)) { in finish_xmote()
619 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { in finish_xmote()
622 list_move_tail(&gh->gh_list, &gl->gl_holders); in finish_xmote()
623 gh = find_first_waiter(gl); in finish_xmote()
624 gl->gl_target = gh->gh_state; in finish_xmote()
625 if (do_promote(gl)) in finish_xmote()
632 gl->gl_target = gl->gl_state; in finish_xmote()
633 do_error(gl, ret); in finish_xmote()
641 do_xmote(gl, gh, gl->gl_target); in finish_xmote()
646 do_xmote(gl, gh, LM_ST_UNLOCKED); in finish_xmote()
649 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n", in finish_xmote()
650 gl->gl_target, state); in finish_xmote()
651 GLOCK_BUG_ON(gl, 1); in finish_xmote()
657 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) in finish_xmote()
658 gfs2_demote_wake(gl); in finish_xmote()
663 spin_unlock(&gl->gl_lockref.lock); in finish_xmote()
664 rv = glops->go_xmote_bh(gl); in finish_xmote()
665 spin_lock(&gl->gl_lockref.lock); in finish_xmote()
667 do_error(gl, rv); in finish_xmote()
671 do_promote(gl); in finish_xmote()
674 clear_bit(GLF_LOCK, &gl->gl_flags); in finish_xmote()
677 static bool is_system_glock(struct gfs2_glock *gl) in is_system_glock() argument
679 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in is_system_glock()
682 if (gl == m_ip->i_gl) in is_system_glock()
695 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, in do_xmote() argument
697 __releases(&gl->gl_lockref.lock) in do_xmote()
698 __acquires(&gl->gl_lockref.lock) in do_xmote()
700 const struct gfs2_glock_operations *glops = gl->gl_ops; in do_xmote()
701 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in do_xmote()
706 if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) && in do_xmote()
711 GLOCK_BUG_ON(gl, gl->gl_state == target); in do_xmote()
712 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); in do_xmote()
721 &gl->gl_flags)) in do_xmote()
723 do_error(gl, 0); /* Fail queued try locks */ in do_xmote()
725 gl->gl_req = target; in do_xmote()
726 set_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
727 if ((gl->gl_req == LM_ST_UNLOCKED) || in do_xmote()
728 (gl->gl_state == LM_ST_EXCLUSIVE) || in do_xmote()
730 clear_bit(GLF_BLOCKING, &gl->gl_flags); in do_xmote()
734 spin_unlock(&gl->gl_lockref.lock); in do_xmote()
736 ret = glops->go_sync(gl); in do_xmote()
744 gfs2_dump_glock(NULL, gl, true); in do_xmote()
746 spin_lock(&gl->gl_lockref.lock); in do_xmote()
750 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) { in do_xmote()
758 if ((atomic_read(&gl->gl_ail_count) != 0) && in do_xmote()
760 gfs2_glock_assert_warn(gl, in do_xmote()
761 !atomic_read(&gl->gl_ail_count)); in do_xmote()
762 gfs2_dump_glock(NULL, gl, true); in do_xmote()
764 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); in do_xmote()
765 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
767 spin_lock(&gl->gl_lockref.lock); in do_xmote()
770 gl->gl_lockref.count++; in do_xmote()
794 if (glock_blocked_by_withdraw(gl) && in do_xmote()
797 if (!is_system_glock(gl)) { in do_xmote()
798 handle_callback(gl, LM_ST_UNLOCKED, 0, false); /* sets demote */ in do_xmote()
805 state_change(gl, LM_ST_UNLOCKED); in do_xmote()
810 clear_bit(GLF_LOCK, &gl->gl_flags); in do_xmote()
811 clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
812 gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); in do_xmote()
815 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); in do_xmote()
820 spin_unlock(&gl->gl_lockref.lock); in do_xmote()
821 ret = ls->ls_ops->lm_lock(gl, target, lck_flags); in do_xmote()
822 spin_lock(&gl->gl_lockref.lock); in do_xmote()
824 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && in do_xmote()
833 target = gl->gl_state | LM_OUT_ERROR; in do_xmote()
841 finish_xmote(gl, target); in do_xmote()
842 gfs2_glock_queue_work(gl, 0); in do_xmote()
852 static void run_queue(struct gfs2_glock *gl, const int nonblock) in run_queue() argument
853 __releases(&gl->gl_lockref.lock) in run_queue()
854 __acquires(&gl->gl_lockref.lock) in run_queue()
858 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) in run_queue()
861 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); in run_queue()
863 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && in run_queue()
864 gl->gl_demote_state != gl->gl_state) { in run_queue()
865 if (find_first_holder(gl)) in run_queue()
869 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); in run_queue()
870 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); in run_queue()
871 gl->gl_target = gl->gl_demote_state; in run_queue()
873 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) in run_queue()
874 gfs2_demote_wake(gl); in run_queue()
875 if (do_promote(gl)) in run_queue()
877 gh = find_first_waiter(gl); in run_queue()
878 gl->gl_target = gh->gh_state; in run_queue()
880 do_error(gl, 0); /* Fail queued try locks */ in run_queue()
882 do_xmote(gl, gh, gl->gl_target); in run_queue()
886 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
888 gl->gl_lockref.count++; in run_queue()
889 gfs2_glock_queue_work(gl, 0); in run_queue()
893 clear_bit(GLF_LOCK, &gl->gl_flags); in run_queue()
903 void glock_set_object(struct gfs2_glock *gl, void *object) in glock_set_object() argument
907 spin_lock(&gl->gl_lockref.lock); in glock_set_object()
908 prev_object = gl->gl_object; in glock_set_object()
909 gl->gl_object = object; in glock_set_object()
910 spin_unlock(&gl->gl_lockref.lock); in glock_set_object()
911 if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL)) { in glock_set_object()
913 gl->gl_name.ln_type, in glock_set_object()
914 (unsigned long long)gl->gl_name.ln_number); in glock_set_object()
915 gfs2_dump_glock(NULL, gl, true); in glock_set_object()
924 void glock_clear_object(struct gfs2_glock *gl, void *object) in glock_clear_object() argument
928 spin_lock(&gl->gl_lockref.lock); in glock_clear_object()
929 prev_object = gl->gl_object; in glock_clear_object()
930 gl->gl_object = NULL; in glock_clear_object()
931 spin_unlock(&gl->gl_lockref.lock); in glock_clear_object()
932 if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object)) { in glock_clear_object()
934 gl->gl_name.ln_type, in glock_clear_object()
935 (unsigned long long)gl->gl_name.ln_number); in glock_clear_object()
936 gfs2_dump_glock(NULL, gl, true); in glock_clear_object()
940 void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation) in gfs2_inode_remember_delete() argument
942 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; in gfs2_inode_remember_delete()
950 bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation) in gfs2_inode_already_deleted() argument
952 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; in gfs2_inode_already_deleted()
959 static void gfs2_glock_poke(struct gfs2_glock *gl) in gfs2_glock_poke() argument
965 __gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh, _RET_IP_); in gfs2_glock_poke()
972 static bool gfs2_try_evict(struct gfs2_glock *gl) in gfs2_try_evict() argument
987 spin_lock(&gl->gl_lockref.lock); in gfs2_try_evict()
988 ip = gl->gl_object; in gfs2_try_evict()
991 spin_unlock(&gl->gl_lockref.lock); in gfs2_try_evict()
993 gl->gl_no_formal_ino = ip->i_no_formal_ino; in gfs2_try_evict()
999 spin_lock(&gl->gl_lockref.lock); in gfs2_try_evict()
1000 ip = gl->gl_object; in gfs2_try_evict()
1006 spin_unlock(&gl->gl_lockref.lock); in gfs2_try_evict()
1016 bool gfs2_queue_try_to_evict(struct gfs2_glock *gl) in gfs2_queue_try_to_evict() argument
1018 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_queue_try_to_evict()
1020 if (test_and_set_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) in gfs2_queue_try_to_evict()
1023 &gl->gl_delete, 0); in gfs2_queue_try_to_evict()
1026 bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later) in gfs2_queue_verify_delete() argument
1028 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_queue_verify_delete()
1031 if (test_and_set_bit(GLF_VERIFY_DELETE, &gl->gl_flags)) in gfs2_queue_verify_delete()
1034 return queue_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, delay); in gfs2_queue_verify_delete()
1040 struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); in delete_work_func() local
1041 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in delete_work_func()
1043 u64 no_addr = gl->gl_name.ln_number; in delete_work_func()
1045 if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) { in delete_work_func()
1063 if (gfs2_try_evict(gl)) { in delete_work_func()
1066 if (gfs2_queue_verify_delete(gl, true)) in delete_work_func()
1072 if (test_and_clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags)) { in delete_work_func()
1073 inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, in delete_work_func()
1078 gfs2_queue_verify_delete(gl, true)) in delete_work_func()
1087 gfs2_glock_put(gl); in delete_work_func()
1093 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); in glock_work_func() local
1096 spin_lock(&gl->gl_lockref.lock); in glock_work_func()
1097 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { in glock_work_func()
1098 clear_bit(GLF_REPLY_PENDING, &gl->gl_flags); in glock_work_func()
1099 finish_xmote(gl, gl->gl_reply); in glock_work_func()
1102 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in glock_work_func()
1103 gl->gl_state != LM_ST_UNLOCKED && in glock_work_func()
1104 gl->gl_demote_state != LM_ST_EXCLUSIVE) { in glock_work_func()
1107 holdtime = gl->gl_tchange + gl->gl_hold_time; in glock_work_func()
1112 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); in glock_work_func()
1113 gfs2_set_demote(gl); in glock_work_func()
1116 run_queue(gl, 0); in glock_work_func()
1120 if (gl->gl_name.ln_type != LM_TYPE_INODE) in glock_work_func()
1122 gfs2_glock_queue_work(gl, delay); in glock_work_func()
1130 gl->gl_lockref.count -= drop_refs; in glock_work_func()
1131 if (!gl->gl_lockref.count) { in glock_work_func()
1132 __gfs2_glock_put(gl); in glock_work_func()
1135 spin_unlock(&gl->gl_lockref.lock); in glock_work_func()
1143 struct gfs2_glock *gl; in find_insert_glock() local
1153 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table, in find_insert_glock()
1155 if (IS_ERR(gl)) in find_insert_glock()
1158 gl = rhashtable_lookup_fast(&gl_hash_table, in find_insert_glock()
1161 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) { in find_insert_glock()
1169 return gl; in find_insert_glock()
1193 struct gfs2_glock *gl, *tmp; in gfs2_glock_get() local
1197 gl = find_insert_glock(&name, NULL); in gfs2_glock_get()
1198 if (gl) { in gfs2_glock_get()
1199 *glp = gl; in gfs2_glock_get()
1210 gl = &gla->glock; in gfs2_glock_get()
1212 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_NOFS); in gfs2_glock_get()
1213 if (!gl) in gfs2_glock_get()
1216 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); in gfs2_glock_get()
1217 gl->gl_ops = glops; in gfs2_glock_get()
1220 gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); in gfs2_glock_get()
1221 if (!gl->gl_lksb.sb_lvbptr) { in gfs2_glock_get()
1222 gfs2_glock_dealloc(&gl->gl_rcu); in gfs2_glock_get()
1228 gl->gl_node.next = NULL; in gfs2_glock_get()
1229 gl->gl_flags = glops->go_instantiate ? BIT(GLF_INSTANTIATE_NEEDED) : 0; in gfs2_glock_get()
1230 gl->gl_name = name; in gfs2_glock_get()
1231 lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass); in gfs2_glock_get()
1232 gl->gl_lockref.count = 1; in gfs2_glock_get()
1233 gl->gl_state = LM_ST_UNLOCKED; in gfs2_glock_get()
1234 gl->gl_target = LM_ST_UNLOCKED; in gfs2_glock_get()
1235 gl->gl_demote_state = LM_ST_EXCLUSIVE; in gfs2_glock_get()
1236 gl->gl_dstamp = 0; in gfs2_glock_get()
1239 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; in gfs2_glock_get()
1241 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; in gfs2_glock_get()
1242 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; in gfs2_glock_get()
1243 gl->gl_tchange = jiffies; in gfs2_glock_get()
1244 gl->gl_object = NULL; in gfs2_glock_get()
1245 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; in gfs2_glock_get()
1246 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); in gfs2_glock_get()
1247 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) in gfs2_glock_get()
1248 INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func); in gfs2_glock_get()
1250 mapping = gfs2_glock2aspace(gl); in gfs2_glock_get()
1260 tmp = find_insert_glock(&name, gl); in gfs2_glock_get()
1262 *glp = gl; in gfs2_glock_get()
1272 gfs2_glock_dealloc(&gl->gl_rcu); in gfs2_glock_get()
1289 void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, in __gfs2_holder_init() argument
1293 gh->gh_gl = gfs2_glock_hold(gl); in __gfs2_holder_init()
1335 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl, in gfs2_glock_update_hold_time() argument
1341 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR, in gfs2_glock_update_hold_time()
1457 static void handle_callback(struct gfs2_glock *gl, unsigned int state, in handle_callback() argument
1461 set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); in handle_callback()
1463 gfs2_set_demote(gl); in handle_callback()
1464 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { in handle_callback()
1465 gl->gl_demote_state = state; in handle_callback()
1466 gl->gl_demote_time = jiffies; in handle_callback()
1467 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && in handle_callback()
1468 gl->gl_demote_state != state) { in handle_callback()
1469 gl->gl_demote_state = LM_ST_UNLOCKED; in handle_callback()
1471 if (gl->gl_ops->go_callback) in handle_callback()
1472 gl->gl_ops->go_callback(gl, remote); in handle_callback()
1473 trace_gfs2_demote_rq(gl, remote); in handle_callback()
1515 __releases(&gl->gl_lockref.lock) in add_to_queue()
1516 __acquires(&gl->gl_lockref.lock) in add_to_queue()
1518 struct gfs2_glock *gl = gh->gh_gl; in add_to_queue() local
1519 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in add_to_queue()
1524 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); in add_to_queue()
1526 GLOCK_BUG_ON(gl, true); in add_to_queue()
1529 if (test_bit(GLF_LOCK, &gl->gl_flags)) { in add_to_queue()
1532 current_gh = find_first_holder(gl); in add_to_queue()
1533 try_futile = !may_grant(gl, current_gh, gh); in add_to_queue()
1535 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) in add_to_queue()
1539 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { in add_to_queue()
1548 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { in add_to_queue()
1560 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); in add_to_queue()
1561 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); in add_to_queue()
1563 list_add_tail(&gh->gh_list, &gl->gl_holders); in add_to_queue()
1567 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); in add_to_queue()
1568 spin_unlock(&gl->gl_lockref.lock); in add_to_queue()
1570 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); in add_to_queue()
1571 spin_lock(&gl->gl_lockref.lock); in add_to_queue()
1583 gfs2_dump_glock(NULL, gl, true); in add_to_queue()
1598 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_nq() local
1601 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP)) in gfs2_glock_nq()
1604 if (test_bit(GLF_LRU, &gl->gl_flags)) in gfs2_glock_nq()
1605 gfs2_glock_remove_from_lru(gl); in gfs2_glock_nq()
1608 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_nq()
1611 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { in gfs2_glock_nq()
1612 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_nq()
1613 gl->gl_lockref.count++; in gfs2_glock_nq()
1614 gfs2_glock_queue_work(gl, 0); in gfs2_glock_nq()
1616 run_queue(gl, 1); in gfs2_glock_nq()
1617 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_nq()
1637 static inline bool needs_demote(struct gfs2_glock *gl) in needs_demote() argument
1639 return (test_bit(GLF_DEMOTE, &gl->gl_flags) || in needs_demote()
1640 test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)); in needs_demote()
1645 struct gfs2_glock *gl = gh->gh_gl; in __gfs2_glock_dq() local
1655 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in __gfs2_glock_dq()
1665 if (!needs_demote(gl)) { in __gfs2_glock_dq()
1666 if (list_empty(&gl->gl_holders)) in __gfs2_glock_dq()
1670 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) in __gfs2_glock_dq()
1671 gfs2_glock_add_to_lru(gl); in __gfs2_glock_dq()
1674 gl->gl_lockref.count++; in __gfs2_glock_dq()
1675 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && in __gfs2_glock_dq()
1676 !test_bit(GLF_DEMOTE, &gl->gl_flags) && in __gfs2_glock_dq()
1677 gl->gl_name.ln_type == LM_TYPE_INODE) in __gfs2_glock_dq()
1678 delay = gl->gl_hold_time; in __gfs2_glock_dq()
1679 gfs2_glock_queue_work(gl, delay); in __gfs2_glock_dq()
1690 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq() local
1691 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_glock_dq()
1693 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1702 if (list_is_first(&gh->gh_list, &gl->gl_holders) && in gfs2_glock_dq()
1704 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1705 gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl); in gfs2_glock_dq()
1707 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1718 glock_blocked_by_withdraw(gl) && in gfs2_glock_dq()
1721 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1725 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1730 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_dq()
1735 struct gfs2_glock *gl = gh->gh_gl; in gfs2_glock_dq_wait() local
1738 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); in gfs2_glock_dq_wait()
1769 struct gfs2_glock *gl; in gfs2_glock_nq_num() local
1772 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); in gfs2_glock_nq_num()
1774 error = gfs2_glock_nq_init(gl, state, flags, gh); in gfs2_glock_nq_num()
1775 gfs2_glock_put(gl); in gfs2_glock_nq_num()
1886 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) in gfs2_glock_cb() argument
1892 gfs2_glock_hold(gl); in gfs2_glock_cb()
1893 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_cb()
1894 holdtime = gl->gl_tchange + gl->gl_hold_time; in gfs2_glock_cb()
1895 if (!list_empty(&gl->gl_holders) && in gfs2_glock_cb()
1896 gl->gl_name.ln_type == LM_TYPE_INODE) { in gfs2_glock_cb()
1899 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) in gfs2_glock_cb()
1900 delay = gl->gl_hold_time; in gfs2_glock_cb()
1902 handle_callback(gl, state, delay, true); in gfs2_glock_cb()
1903 gfs2_glock_queue_work(gl, delay); in gfs2_glock_cb()
1904 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_cb()
1918 static int gfs2_should_freeze(const struct gfs2_glock *gl) in gfs2_should_freeze() argument
1922 if (gl->gl_reply & ~LM_OUT_ST_MASK) in gfs2_should_freeze()
1924 if (gl->gl_target == LM_ST_UNLOCKED) in gfs2_should_freeze()
1927 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_should_freeze()
1946 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) in gfs2_glock_complete() argument
1948 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; in gfs2_glock_complete()
1950 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1951 gl->gl_reply = ret; in gfs2_glock_complete()
1954 if (gfs2_should_freeze(gl)) { in gfs2_glock_complete()
1955 set_bit(GLF_FROZEN, &gl->gl_flags); in gfs2_glock_complete()
1956 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_complete()
1961 gl->gl_lockref.count++; in gfs2_glock_complete()
1962 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in gfs2_glock_complete()
1963 gfs2_glock_queue_work(gl, 0); in gfs2_glock_complete()
1964 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_complete()
2001 struct gfs2_glock *gl; in gfs2_dispose_glock_lru() local
2006 gl = list_first_entry(list, struct gfs2_glock, gl_lru); in gfs2_dispose_glock_lru()
2007 list_del_init(&gl->gl_lru); in gfs2_dispose_glock_lru()
2008 clear_bit(GLF_LRU, &gl->gl_flags); in gfs2_dispose_glock_lru()
2009 if (!spin_trylock(&gl->gl_lockref.lock)) { in gfs2_dispose_glock_lru()
2011 list_add(&gl->gl_lru, &lru_list); in gfs2_dispose_glock_lru()
2012 set_bit(GLF_LRU, &gl->gl_flags); in gfs2_dispose_glock_lru()
2016 if (test_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_dispose_glock_lru()
2017 spin_unlock(&gl->gl_lockref.lock); in gfs2_dispose_glock_lru()
2020 gl->gl_lockref.count++; in gfs2_dispose_glock_lru()
2021 if (demote_ok(gl)) in gfs2_dispose_glock_lru()
2022 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in gfs2_dispose_glock_lru()
2023 gfs2_glock_queue_work(gl, 0); in gfs2_dispose_glock_lru()
2024 spin_unlock(&gl->gl_lockref.lock); in gfs2_dispose_glock_lru()
2040 struct gfs2_glock *gl, *next; in gfs2_scan_glock_lru() local
2045 list_for_each_entry_safe(gl, next, &lru_list, gl_lru) { in gfs2_scan_glock_lru()
2049 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { in gfs2_scan_glock_lru()
2050 if (!spin_trylock(&gl->gl_lockref.lock)) in gfs2_scan_glock_lru()
2052 if (gl->gl_lockref.count <= 1 && in gfs2_scan_glock_lru()
2053 (gl->gl_state == LM_ST_UNLOCKED || in gfs2_scan_glock_lru()
2054 demote_ok(gl))) { in gfs2_scan_glock_lru()
2055 list_move(&gl->gl_lru, &dispose); in gfs2_scan_glock_lru()
2059 spin_unlock(&gl->gl_lockref.lock); in gfs2_scan_glock_lru()
2101 struct gfs2_glock *gl; in glock_hash_walk() local
2109 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) { in glock_hash_walk()
2110 if (gl->gl_name.ln_sbd == sdp) in glock_hash_walk()
2111 examiner(gl); in glock_hash_walk()
2115 } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); in glock_hash_walk()
2120 void gfs2_cancel_delete_work(struct gfs2_glock *gl) in gfs2_cancel_delete_work() argument
2122 clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags); in gfs2_cancel_delete_work()
2123 clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags); in gfs2_cancel_delete_work()
2124 if (cancel_delayed_work(&gl->gl_delete)) in gfs2_cancel_delete_work()
2125 gfs2_glock_put(gl); in gfs2_cancel_delete_work()
2128 static void flush_delete_work(struct gfs2_glock *gl) in flush_delete_work() argument
2130 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) { in flush_delete_work()
2131 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in flush_delete_work()
2133 if (cancel_delayed_work(&gl->gl_delete)) { in flush_delete_work()
2135 &gl->gl_delete, 0); in flush_delete_work()
2152 static void thaw_glock(struct gfs2_glock *gl) in thaw_glock() argument
2154 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) in thaw_glock()
2156 if (!lockref_get_not_dead(&gl->gl_lockref)) in thaw_glock()
2159 spin_lock(&gl->gl_lockref.lock); in thaw_glock()
2160 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); in thaw_glock()
2161 gfs2_glock_queue_work(gl, 0); in thaw_glock()
2162 spin_unlock(&gl->gl_lockref.lock); in thaw_glock()
2171 static void clear_glock(struct gfs2_glock *gl) in clear_glock() argument
2173 gfs2_glock_remove_from_lru(gl); in clear_glock()
2175 spin_lock(&gl->gl_lockref.lock); in clear_glock()
2176 if (!__lockref_is_dead(&gl->gl_lockref)) { in clear_glock()
2177 gl->gl_lockref.count++; in clear_glock()
2178 if (gl->gl_state != LM_ST_UNLOCKED) in clear_glock()
2179 handle_callback(gl, LM_ST_UNLOCKED, 0, false); in clear_glock()
2180 gfs2_glock_queue_work(gl, 0); in clear_glock()
2182 spin_unlock(&gl->gl_lockref.lock); in clear_glock()
2196 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) in dump_glock() argument
2198 spin_lock(&gl->gl_lockref.lock); in dump_glock()
2199 gfs2_dump_glock(seq, gl, fsid); in dump_glock()
2200 spin_unlock(&gl->gl_lockref.lock); in dump_glock()
2203 static void dump_glock_func(struct gfs2_glock *gl) in dump_glock_func() argument
2205 dump_glock(NULL, gl, true); in dump_glock_func()
2208 static void withdraw_dq(struct gfs2_glock *gl) in withdraw_dq() argument
2210 spin_lock(&gl->gl_lockref.lock); in withdraw_dq()
2211 if (!__lockref_is_dead(&gl->gl_lockref) && in withdraw_dq()
2212 glock_blocked_by_withdraw(gl)) in withdraw_dq()
2213 do_error(gl, LM_OUT_ERROR); /* remove pending waiters */ in withdraw_dq()
2214 spin_unlock(&gl->gl_lockref.lock); in withdraw_dq()
2319 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) in gflags2str() argument
2321 const unsigned long *gflags = &gl->gl_flags; in gflags2str()
2344 if (!list_empty(&gl->gl_holders)) in gflags2str()
2348 if (gl->gl_object) in gflags2str()
2384 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) in gfs2_dump_glock() argument
2386 const struct gfs2_glock_operations *glops = gl->gl_ops; in gfs2_dump_glock()
2390 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_dump_glock()
2394 if (gl->gl_ops->go_flags & GLOF_ASPACE) { in gfs2_dump_glock()
2395 struct address_space *mapping = gfs2_glock2aspace(gl); in gfs2_dump_glock()
2402 dtime = jiffies - gl->gl_demote_time; in gfs2_dump_glock()
2404 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_dump_glock()
2408 fs_id_buf, state2str(gl->gl_state), in gfs2_dump_glock()
2409 gl->gl_name.ln_type, in gfs2_dump_glock()
2410 (unsigned long long)gl->gl_name.ln_number, in gfs2_dump_glock()
2411 gflags2str(gflags_buf, gl), in gfs2_dump_glock()
2412 state2str(gl->gl_target), in gfs2_dump_glock()
2413 state2str(gl->gl_demote_state), dtime, in gfs2_dump_glock()
2414 atomic_read(&gl->gl_ail_count), in gfs2_dump_glock()
2415 atomic_read(&gl->gl_revokes), in gfs2_dump_glock()
2416 (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages); in gfs2_dump_glock()
2418 list_for_each_entry(gh, &gl->gl_holders, gh_list) in gfs2_dump_glock()
2421 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) in gfs2_dump_glock()
2422 glops->go_dump(seq, gl, fs_id_buf); in gfs2_dump_glock()
2427 struct gfs2_glock *gl = iter_ptr; in gfs2_glstats_seq_show() local
2430 gl->gl_name.ln_type, in gfs2_glstats_seq_show()
2431 (unsigned long long)gl->gl_name.ln_number, in gfs2_glstats_seq_show()
2432 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], in gfs2_glstats_seq_show()
2433 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], in gfs2_glstats_seq_show()
2434 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], in gfs2_glstats_seq_show()
2435 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], in gfs2_glstats_seq_show()
2436 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], in gfs2_glstats_seq_show()
2437 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], in gfs2_glstats_seq_show()
2438 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], in gfs2_glstats_seq_show()
2439 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); in gfs2_glstats_seq_show()
2534 struct gfs2_glock *gl = gi->gl; in gfs2_glock_iter_next() local
2536 if (gl) { in gfs2_glock_iter_next()
2539 gfs2_glock_put_async(gl); in gfs2_glock_iter_next()
2542 gl = rhashtable_walk_next(&gi->hti); in gfs2_glock_iter_next()
2543 if (IS_ERR_OR_NULL(gl)) { in gfs2_glock_iter_next()
2544 if (gl == ERR_PTR(-EAGAIN)) { in gfs2_glock_iter_next()
2548 gl = NULL; in gfs2_glock_iter_next()
2551 if (gl->gl_name.ln_sbd != gi->sdp) in gfs2_glock_iter_next()
2554 if (!lockref_get_not_dead(&gl->gl_lockref)) in gfs2_glock_iter_next()
2558 if (__lockref_is_dead(&gl->gl_lockref)) in gfs2_glock_iter_next()
2563 gi->gl = gl; in gfs2_glock_iter_next()
2588 return gi->gl; in gfs2_glock_seq_start()
2599 return gi->gl; in gfs2_glock_seq_next()
2678 gi->gl = NULL; in __gfs2_glocks_open()
2694 if (gi->gl) in gfs2_glocks_release()
2695 gfs2_glock_put(gi->gl); in gfs2_glocks_release()
2845 struct gfs2_glock *gl; in gfs2_glockfd_seq_show() local
2848 gl = GFS2_I(inode)->i_iopen_gh.gh_gl; in gfs2_glockfd_seq_show()
2849 if (gl) { in gfs2_glockfd_seq_show()
2851 i->tgid, i->fd, gl->gl_name.ln_type, in gfs2_glockfd_seq_show()
2852 (unsigned long long)gl->gl_name.ln_number); in gfs2_glockfd_seq_show()