Lines Matching refs:gl

126 	void (*lm_put_lock) (struct gfs2_glock *gl);
127 int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
129 void (*lm_cancel) (struct gfs2_glock *gl);
138 static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) in gfs2_glock_is_locked_by_me() argument
144 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_is_locked_by_me()
146 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_glock_is_locked_by_me()
154 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_is_locked_by_me()
159 static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl) in gfs2_glock_is_held_excl() argument
161 return gl->gl_state == LM_ST_EXCLUSIVE; in gfs2_glock_is_held_excl()
164 static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl) in gfs2_glock_is_held_dfrd() argument
166 return gl->gl_state == LM_ST_DEFERRED; in gfs2_glock_is_held_dfrd()
169 static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl) in gfs2_glock_is_held_shrd() argument
171 return gl->gl_state == LM_ST_SHARED; in gfs2_glock_is_held_shrd()
174 static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl) in gfs2_glock2aspace() argument
176 if (gl->gl_ops->go_flags & GLOF_ASPACE) { in gfs2_glock2aspace()
178 container_of(gl, struct gfs2_glock_aspace, glock); in gfs2_glock2aspace()
187 struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl);
188 void gfs2_glock_put(struct gfs2_glock *gl);
189 void gfs2_glock_queue_put(struct gfs2_glock *gl);
191 void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
194 static inline void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, in gfs2_holder_init() argument
196 __gfs2_holder_init(gl, state, flags, gh, _RET_IP_); in gfs2_holder_init()
217 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
219 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \ argument
220 gfs2_dump_glock(NULL, gl, true); \
222 #define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \ argument
223 gfs2_dump_glock(NULL, gl, true); \
224 gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \
226 #define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \ argument
227 gfs2_dump_glock(NULL, gl, true); \
228 gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
244 static inline int gfs2_glock_nq_init(struct gfs2_glock *gl, in gfs2_glock_nq_init() argument
250 __gfs2_holder_init(gl, state, flags, gh, _RET_IP_); in gfs2_glock_nq_init()
259 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
260 void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
261 bool gfs2_queue_try_to_evict(struct gfs2_glock *gl);
262 void gfs2_cancel_delete_work(struct gfs2_glock *gl);
267 void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
268 void gfs2_glock_free(struct gfs2_glock *gl);
269 void gfs2_glock_free_later(struct gfs2_glock *gl);
279 void glock_set_object(struct gfs2_glock *gl, void *object);
280 void glock_clear_object(struct gfs2_glock *gl, void *object);
299 void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
300 bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);