1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/sched.h> 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/buffer_head.h> 13 #include <linux/delay.h> 14 #include <linux/sort.h> 15 #include <linux/hash.h> 16 #include <linux/jhash.h> 17 #include <linux/kallsyms.h> 18 #include <linux/gfs2_ondisk.h> 19 #include <linux/list.h> 20 #include <linux/wait.h> 21 #include <linux/module.h> 22 #include <linux/uaccess.h> 23 #include <linux/seq_file.h> 24 #include <linux/debugfs.h> 25 #include <linux/kthread.h> 26 #include <linux/freezer.h> 27 #include <linux/workqueue.h> 28 #include <linux/jiffies.h> 29 #include <linux/rcupdate.h> 30 #include <linux/rculist_bl.h> 31 #include <linux/bit_spinlock.h> 32 #include <linux/percpu.h> 33 #include <linux/list_sort.h> 34 #include <linux/lockref.h> 35 #include <linux/rhashtable.h> 36 37 #include "gfs2.h" 38 #include "incore.h" 39 #include "glock.h" 40 #include "glops.h" 41 #include "inode.h" 42 #include "lops.h" 43 #include "meta_io.h" 44 #include "quota.h" 45 #include "super.h" 46 #include "util.h" 47 #include "bmap.h" 48 #define CREATE_TRACE_POINTS 49 #include "trace_gfs2.h" 50 51 struct gfs2_glock_iter { 52 struct gfs2_sbd *sdp; /* incore superblock */ 53 struct rhashtable_iter hti; /* rhashtable iterator */ 54 struct gfs2_glock *gl; /* current glock struct */ 55 loff_t last_pos; /* last position */ 56 }; 57 58 typedef void (*glock_examiner) (struct gfs2_glock * gl); 59 60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); 61 62 static struct dentry *gfs2_root; 63 static struct workqueue_struct *glock_workqueue; 64 struct workqueue_struct *gfs2_delete_workqueue; 65 static LIST_HEAD(lru_list); 66 static atomic_t lru_count = ATOMIC_INIT(0); 67 static DEFINE_SPINLOCK(lru_lock); 68 69 #define GFS2_GL_HASH_SHIFT 15 70 #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT) 71 72 static const struct rhashtable_params ht_parms = { 73 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4, 74 .key_len = offsetofend(struct lm_lockname, ln_type), 75 .key_offset = offsetof(struct gfs2_glock, gl_name), 76 .head_offset = offsetof(struct gfs2_glock, gl_node), 77 }; 78 79 static struct rhashtable gl_hash_table; 80 81 #define GLOCK_WAIT_TABLE_BITS 12 82 #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS) 83 static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned; 84 85 struct wait_glock_queue { 86 struct lm_lockname *name; 87 wait_queue_entry_t wait; 88 }; 89 90 static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode, 91 int sync, void *key) 92 { 93 struct wait_glock_queue *wait_glock = 94 container_of(wait, struct wait_glock_queue, wait); 95 struct lm_lockname *wait_name = wait_glock->name; 96 struct lm_lockname *wake_name = key; 97 98 if (wake_name->ln_sbd != wait_name->ln_sbd || 99 wake_name->ln_number != wait_name->ln_number || 100 wake_name->ln_type != wait_name->ln_type) 101 return 0; 102 return autoremove_wake_function(wait, mode, sync, key); 103 } 104 105 static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name) 106 { 107 u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0); 108 109 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS); 110 } 111 112 /** 113 * wake_up_glock - Wake up waiters on a glock 114 * @gl: the glock 115 */ 116 static void wake_up_glock(struct gfs2_glock *gl) 117 { 118 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); 119 120 if (waitqueue_active(wq)) 121 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); 122 } 123 124 static void gfs2_glock_dealloc(struct rcu_head *rcu) 125 { 126 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); 127 128 kfree(gl->gl_lksb.sb_lvbptr); 129 if (gl->gl_ops->go_flags & GLOF_ASPACE) 130 kmem_cache_free(gfs2_glock_aspace_cachep, gl); 131 else 132 kmem_cache_free(gfs2_glock_cachep, gl); 133 } 134 135 /** 136 * glock_blocked_by_withdraw - determine if we can still use a glock 137 * @gl: the glock 138 * 139 * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted 140 * when we're withdrawn. For example, to maintain metadata integrity, we should 141 * disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like 142 * iopen or the transaction glocks may be safely used because none of their 143 * metadata goes through the journal. So in general, we should disallow all 144 * glocks that are journaled, and allow all the others. One exception is: 145 * we need to allow our active journal to be promoted and demoted so others 146 * may recover it and we can reacquire it when they're done. 147 */ 148 static bool glock_blocked_by_withdraw(struct gfs2_glock *gl) 149 { 150 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 151 152 if (likely(!gfs2_withdrawn(sdp))) 153 return false; 154 if (gl->gl_ops->go_flags & GLOF_NONDISK) 155 return false; 156 if (!sdp->sd_jdesc || 157 gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr) 158 return false; 159 return true; 160 } 161 162 void gfs2_glock_free(struct gfs2_glock *gl) 163 { 164 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 165 166 gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0); 167 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); 168 smp_mb(); 169 wake_up_glock(gl); 170 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); 171 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 172 wake_up(&sdp->sd_glock_wait); 173 } 174 175 /** 176 * gfs2_glock_hold() - increment reference count on glock 177 * @gl: The glock to hold 178 * 179 */ 180 181 void gfs2_glock_hold(struct gfs2_glock *gl) 182 { 183 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 184 lockref_get(&gl->gl_lockref); 185 } 186 187 /** 188 * demote_ok - Check to see if it's ok to unlock a glock 189 * @gl: the glock 190 * 191 * Returns: 1 if it's ok 192 */ 193 194 static int demote_ok(const struct gfs2_glock *gl) 195 { 196 const struct gfs2_glock_operations *glops = gl->gl_ops; 197 198 if (gl->gl_state == LM_ST_UNLOCKED) 199 return 0; 200 if (!list_empty(&gl->gl_holders)) 201 return 0; 202 if (glops->go_demote_ok) 203 return glops->go_demote_ok(gl); 204 return 1; 205 } 206 207 208 void gfs2_glock_add_to_lru(struct gfs2_glock *gl) 209 { 210 if (!(gl->gl_ops->go_flags & GLOF_LRU)) 211 return; 212 213 spin_lock(&lru_lock); 214 215 list_del(&gl->gl_lru); 216 list_add_tail(&gl->gl_lru, &lru_list); 217 218 if (!test_bit(GLF_LRU, &gl->gl_flags)) { 219 set_bit(GLF_LRU, &gl->gl_flags); 220 atomic_inc(&lru_count); 221 } 222 223 spin_unlock(&lru_lock); 224 } 225 226 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) 227 { 228 if (!(gl->gl_ops->go_flags & GLOF_LRU)) 229 return; 230 231 spin_lock(&lru_lock); 232 if (test_bit(GLF_LRU, &gl->gl_flags)) { 233 list_del_init(&gl->gl_lru); 234 atomic_dec(&lru_count); 235 clear_bit(GLF_LRU, &gl->gl_flags); 236 } 237 spin_unlock(&lru_lock); 238 } 239 240 /* 241 * Enqueue the glock on the work queue. Passes one glock reference on to the 242 * work queue. 243 */ 244 static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { 245 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { 246 /* 247 * We are holding the lockref spinlock, and the work was still 248 * queued above. The queued work (glock_work_func) takes that 249 * spinlock before dropping its glock reference(s), so it 250 * cannot have dropped them in the meantime. 251 */ 252 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2); 253 gl->gl_lockref.count--; 254 } 255 } 256 257 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { 258 spin_lock(&gl->gl_lockref.lock); 259 __gfs2_glock_queue_work(gl, delay); 260 spin_unlock(&gl->gl_lockref.lock); 261 } 262 263 static void __gfs2_glock_put(struct gfs2_glock *gl) 264 { 265 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 266 struct address_space *mapping = gfs2_glock2aspace(gl); 267 268 lockref_mark_dead(&gl->gl_lockref); 269 270 gfs2_glock_remove_from_lru(gl); 271 spin_unlock(&gl->gl_lockref.lock); 272 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 273 if (mapping) { 274 truncate_inode_pages_final(mapping); 275 if (!gfs2_withdrawn(sdp)) 276 GLOCK_BUG_ON(gl, !mapping_empty(mapping)); 277 } 278 trace_gfs2_glock_put(gl); 279 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); 280 } 281 282 /* 283 * Cause the glock to be put in work queue context. 284 */ 285 void gfs2_glock_queue_put(struct gfs2_glock *gl) 286 { 287 gfs2_glock_queue_work(gl, 0); 288 } 289 290 /** 291 * gfs2_glock_put() - Decrement reference count on glock 292 * @gl: The glock to put 293 * 294 */ 295 296 void gfs2_glock_put(struct gfs2_glock *gl) 297 { 298 if (lockref_put_or_lock(&gl->gl_lockref)) 299 return; 300 301 __gfs2_glock_put(gl); 302 } 303 304 /** 305 * may_grant - check if its ok to grant a new lock 306 * @gl: The glock 307 * @gh: The lock request which we wish to grant 308 * 309 * Returns: true if its ok to grant the lock 310 */ 311 312 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) 313 { 314 const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list); 315 316 if (gh != gh_head) { 317 /** 318 * Here we make a special exception to grant holders who agree 319 * to share the EX lock with other holders who also have the 320 * bit set. If the original holder has the LM_FLAG_NODE_SCOPE bit 321 * is set, we grant more holders with the bit set. 322 */ 323 if (gh_head->gh_state == LM_ST_EXCLUSIVE && 324 (gh_head->gh_flags & LM_FLAG_NODE_SCOPE) && 325 gh->gh_state == LM_ST_EXCLUSIVE && 326 (gh->gh_flags & LM_FLAG_NODE_SCOPE)) 327 return 1; 328 if ((gh->gh_state == LM_ST_EXCLUSIVE || 329 gh_head->gh_state == LM_ST_EXCLUSIVE)) 330 return 0; 331 } 332 if (gl->gl_state == gh->gh_state) 333 return 1; 334 if (gh->gh_flags & GL_EXACT) 335 return 0; 336 if (gl->gl_state == LM_ST_EXCLUSIVE) { 337 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED) 338 return 1; 339 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED) 340 return 1; 341 } 342 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) 343 return 1; 344 return 0; 345 } 346 347 static void gfs2_holder_wake(struct gfs2_holder *gh) 348 { 349 clear_bit(HIF_WAIT, &gh->gh_iflags); 350 smp_mb__after_atomic(); 351 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 352 if (gh->gh_flags & GL_ASYNC) { 353 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd; 354 355 wake_up(&sdp->sd_async_glock_wait); 356 } 357 } 358 359 /** 360 * do_error - Something unexpected has happened during a lock request 361 * @gl: The glock 362 * @ret: The status from the DLM 363 */ 364 365 static void do_error(struct gfs2_glock *gl, const int ret) 366 { 367 struct gfs2_holder *gh, *tmp; 368 369 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 370 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 371 continue; 372 if (ret & LM_OUT_ERROR) 373 gh->gh_error = -EIO; 374 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) 375 gh->gh_error = GLR_TRYFAILED; 376 else 377 continue; 378 list_del_init(&gh->gh_list); 379 trace_gfs2_glock_queue(gh, 0); 380 gfs2_holder_wake(gh); 381 } 382 } 383 384 /** 385 * do_promote - promote as many requests as possible on the current queue 386 * @gl: The glock 387 * 388 * Returns: 1 if there is a blocked holder at the head of the list, or 2 389 * if a type specific operation is underway. 390 */ 391 392 static int do_promote(struct gfs2_glock *gl) 393 __releases(&gl->gl_lockref.lock) 394 __acquires(&gl->gl_lockref.lock) 395 { 396 const struct gfs2_glock_operations *glops = gl->gl_ops; 397 struct gfs2_holder *gh, *tmp; 398 int ret; 399 400 restart: 401 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 402 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 403 continue; 404 if (may_grant(gl, gh)) { 405 if (gh->gh_list.prev == &gl->gl_holders && 406 glops->go_lock) { 407 spin_unlock(&gl->gl_lockref.lock); 408 /* FIXME: eliminate this eventually */ 409 ret = glops->go_lock(gh); 410 spin_lock(&gl->gl_lockref.lock); 411 if (ret) { 412 if (ret == 1) 413 return 2; 414 gh->gh_error = ret; 415 list_del_init(&gh->gh_list); 416 trace_gfs2_glock_queue(gh, 0); 417 gfs2_holder_wake(gh); 418 goto restart; 419 } 420 set_bit(HIF_HOLDER, &gh->gh_iflags); 421 trace_gfs2_promote(gh, 1); 422 gfs2_holder_wake(gh); 423 goto restart; 424 } 425 set_bit(HIF_HOLDER, &gh->gh_iflags); 426 trace_gfs2_promote(gh, 0); 427 gfs2_holder_wake(gh); 428 continue; 429 } 430 if (gh->gh_list.prev == &gl->gl_holders) 431 return 1; 432 do_error(gl, 0); 433 break; 434 } 435 return 0; 436 } 437 438 /** 439 * find_first_waiter - find the first gh that's waiting for the glock 440 * @gl: the glock 441 */ 442 443 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) 444 { 445 struct gfs2_holder *gh; 446 447 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 448 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) 449 return gh; 450 } 451 return NULL; 452 } 453 454 /** 455 * state_change - record that the glock is now in a different state 456 * @gl: the glock 457 * @new_state: the new state 458 */ 459 460 static void state_change(struct gfs2_glock *gl, unsigned int new_state) 461 { 462 int held1, held2; 463 464 held1 = (gl->gl_state != LM_ST_UNLOCKED); 465 held2 = (new_state != LM_ST_UNLOCKED); 466 467 if (held1 != held2) { 468 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 469 if (held2) 470 gl->gl_lockref.count++; 471 else 472 gl->gl_lockref.count--; 473 } 474 if (new_state != gl->gl_target) 475 /* shorten our minimum hold time */ 476 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, 477 GL_GLOCK_MIN_HOLD); 478 gl->gl_state = new_state; 479 gl->gl_tchange = jiffies; 480 } 481 482 static void gfs2_set_demote(struct gfs2_glock *gl) 483 { 484 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 485 486 set_bit(GLF_DEMOTE, &gl->gl_flags); 487 smp_mb(); 488 wake_up(&sdp->sd_async_glock_wait); 489 } 490 491 static void gfs2_demote_wake(struct gfs2_glock *gl) 492 { 493 gl->gl_demote_state = LM_ST_EXCLUSIVE; 494 clear_bit(GLF_DEMOTE, &gl->gl_flags); 495 smp_mb__after_atomic(); 496 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); 497 } 498 499 /** 500 * finish_xmote - The DLM has replied to one of our lock requests 501 * @gl: The glock 502 * @ret: The status from the DLM 503 * 504 */ 505 506 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) 507 { 508 const struct gfs2_glock_operations *glops = gl->gl_ops; 509 struct gfs2_holder *gh; 510 unsigned state = ret & LM_OUT_ST_MASK; 511 int rv; 512 513 spin_lock(&gl->gl_lockref.lock); 514 trace_gfs2_glock_state_change(gl, state); 515 state_change(gl, state); 516 gh = find_first_waiter(gl); 517 518 /* Demote to UN request arrived during demote to SH or DF */ 519 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && 520 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) 521 gl->gl_target = LM_ST_UNLOCKED; 522 523 /* Check for state != intended state */ 524 if (unlikely(state != gl->gl_target)) { 525 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { 526 /* move to back of queue and try next entry */ 527 if (ret & LM_OUT_CANCELED) { 528 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) 529 list_move_tail(&gh->gh_list, &gl->gl_holders); 530 gh = find_first_waiter(gl); 531 gl->gl_target = gh->gh_state; 532 goto retry; 533 } 534 /* Some error or failed "try lock" - report it */ 535 if ((ret & LM_OUT_ERROR) || 536 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 537 gl->gl_target = gl->gl_state; 538 do_error(gl, ret); 539 goto out; 540 } 541 } 542 switch(state) { 543 /* Unlocked due to conversion deadlock, try again */ 544 case LM_ST_UNLOCKED: 545 retry: 546 do_xmote(gl, gh, gl->gl_target); 547 break; 548 /* Conversion fails, unlock and try again */ 549 case LM_ST_SHARED: 550 case LM_ST_DEFERRED: 551 do_xmote(gl, gh, LM_ST_UNLOCKED); 552 break; 553 default: /* Everything else */ 554 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n", 555 gl->gl_target, state); 556 GLOCK_BUG_ON(gl, 1); 557 } 558 spin_unlock(&gl->gl_lockref.lock); 559 return; 560 } 561 562 /* Fast path - we got what we asked for */ 563 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) 564 gfs2_demote_wake(gl); 565 if (state != LM_ST_UNLOCKED) { 566 if (glops->go_xmote_bh) { 567 spin_unlock(&gl->gl_lockref.lock); 568 rv = glops->go_xmote_bh(gl); 569 spin_lock(&gl->gl_lockref.lock); 570 if (rv) { 571 do_error(gl, rv); 572 goto out; 573 } 574 } 575 rv = do_promote(gl); 576 if (rv == 2) 577 goto out_locked; 578 } 579 out: 580 clear_bit(GLF_LOCK, &gl->gl_flags); 581 out_locked: 582 spin_unlock(&gl->gl_lockref.lock); 583 } 584 585 static bool is_system_glock(struct gfs2_glock *gl) 586 { 587 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 588 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 589 590 if (gl == m_ip->i_gl) 591 return true; 592 return false; 593 } 594 595 /** 596 * do_xmote - Calls the DLM to change the state of a lock 597 * @gl: The lock state 598 * @gh: The holder (only for promotes) 599 * @target: The target lock state 600 * 601 */ 602 603 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) 604 __releases(&gl->gl_lockref.lock) 605 __acquires(&gl->gl_lockref.lock) 606 { 607 const struct gfs2_glock_operations *glops = gl->gl_ops; 608 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 609 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); 610 int ret; 611 612 if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) && 613 gh && !(gh->gh_flags & LM_FLAG_NOEXP)) 614 return; 615 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | 616 LM_FLAG_PRIORITY); 617 GLOCK_BUG_ON(gl, gl->gl_state == target); 618 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); 619 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) && 620 glops->go_inval) { 621 /* 622 * If another process is already doing the invalidate, let that 623 * finish first. The glock state machine will get back to this 624 * holder again later. 625 */ 626 if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS, 627 &gl->gl_flags)) 628 return; 629 do_error(gl, 0); /* Fail queued try locks */ 630 } 631 gl->gl_req = target; 632 set_bit(GLF_BLOCKING, &gl->gl_flags); 633 if ((gl->gl_req == LM_ST_UNLOCKED) || 634 (gl->gl_state == LM_ST_EXCLUSIVE) || 635 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) 636 clear_bit(GLF_BLOCKING, &gl->gl_flags); 637 spin_unlock(&gl->gl_lockref.lock); 638 if (glops->go_sync) { 639 ret = glops->go_sync(gl); 640 /* If we had a problem syncing (due to io errors or whatever, 641 * we should not invalidate the metadata or tell dlm to 642 * release the glock to other nodes. 643 */ 644 if (ret) { 645 if (cmpxchg(&sdp->sd_log_error, 0, ret)) { 646 fs_err(sdp, "Error %d syncing glock \n", ret); 647 gfs2_dump_glock(NULL, gl, true); 648 } 649 goto skip_inval; 650 } 651 } 652 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) { 653 /* 654 * The call to go_sync should have cleared out the ail list. 655 * If there are still items, we have a problem. We ought to 656 * withdraw, but we can't because the withdraw code also uses 657 * glocks. Warn about the error, dump the glock, then fall 658 * through and wait for logd to do the withdraw for us. 659 */ 660 if ((atomic_read(&gl->gl_ail_count) != 0) && 661 (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) { 662 gfs2_glock_assert_warn(gl, 663 !atomic_read(&gl->gl_ail_count)); 664 gfs2_dump_glock(NULL, gl, true); 665 } 666 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); 667 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 668 } 669 670 skip_inval: 671 gfs2_glock_hold(gl); 672 /* 673 * Check for an error encountered since we called go_sync and go_inval. 674 * If so, we can't withdraw from the glock code because the withdraw 675 * code itself uses glocks (see function signal_our_withdraw) to 676 * change the mount to read-only. Most importantly, we must not call 677 * dlm to unlock the glock until the journal is in a known good state 678 * (after journal replay) otherwise other nodes may use the object 679 * (rgrp or dinode) and then later, journal replay will corrupt the 680 * file system. The best we can do here is wait for the logd daemon 681 * to see sd_log_error and withdraw, and in the meantime, requeue the 682 * work for later. 683 * 684 * We make a special exception for some system glocks, such as the 685 * system statfs inode glock, which needs to be granted before the 686 * gfs2_quotad daemon can exit, and that exit needs to finish before 687 * we can unmount the withdrawn file system. 688 * 689 * However, if we're just unlocking the lock (say, for unmount, when 690 * gfs2_gl_hash_clear calls clear_glock) and recovery is complete 691 * then it's okay to tell dlm to unlock it. 692 */ 693 if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp))) 694 gfs2_withdraw_delayed(sdp); 695 if (glock_blocked_by_withdraw(gl) && 696 (target != LM_ST_UNLOCKED || 697 test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) { 698 if (!is_system_glock(gl)) { 699 gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); 700 goto out; 701 } else { 702 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 703 } 704 } 705 706 if (sdp->sd_lockstruct.ls_ops->lm_lock) { 707 /* lock_dlm */ 708 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); 709 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && 710 target == LM_ST_UNLOCKED && 711 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) { 712 finish_xmote(gl, target); 713 gfs2_glock_queue_work(gl, 0); 714 } else if (ret) { 715 fs_err(sdp, "lm_lock ret %d\n", ret); 716 GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp)); 717 } 718 } else { /* lock_nolock */ 719 finish_xmote(gl, target); 720 gfs2_glock_queue_work(gl, 0); 721 } 722 out: 723 spin_lock(&gl->gl_lockref.lock); 724 } 725 726 /** 727 * find_first_holder - find the first "holder" gh 728 * @gl: the glock 729 */ 730 731 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) 732 { 733 struct gfs2_holder *gh; 734 735 if (!list_empty(&gl->gl_holders)) { 736 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); 737 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 738 return gh; 739 } 740 return NULL; 741 } 742 743 /** 744 * run_queue - do all outstanding tasks related to a glock 745 * @gl: The glock in question 746 * @nonblock: True if we must not block in run_queue 747 * 748 */ 749 750 static void run_queue(struct gfs2_glock *gl, const int nonblock) 751 __releases(&gl->gl_lockref.lock) 752 __acquires(&gl->gl_lockref.lock) 753 { 754 struct gfs2_holder *gh = NULL; 755 int ret; 756 757 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 758 return; 759 760 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); 761 762 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && 763 gl->gl_demote_state != gl->gl_state) { 764 if (find_first_holder(gl)) 765 goto out_unlock; 766 if (nonblock) 767 goto out_sched; 768 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 769 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); 770 gl->gl_target = gl->gl_demote_state; 771 } else { 772 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) 773 gfs2_demote_wake(gl); 774 ret = do_promote(gl); 775 if (ret == 0) 776 goto out_unlock; 777 if (ret == 2) 778 goto out; 779 gh = find_first_waiter(gl); 780 gl->gl_target = gh->gh_state; 781 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 782 do_error(gl, 0); /* Fail queued try locks */ 783 } 784 do_xmote(gl, gh, gl->gl_target); 785 out: 786 return; 787 788 out_sched: 789 clear_bit(GLF_LOCK, &gl->gl_flags); 790 smp_mb__after_atomic(); 791 gl->gl_lockref.count++; 792 __gfs2_glock_queue_work(gl, 0); 793 return; 794 795 out_unlock: 796 clear_bit(GLF_LOCK, &gl->gl_flags); 797 smp_mb__after_atomic(); 798 return; 799 } 800 801 void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation) 802 { 803 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; 804 805 if (ri->ri_magic == 0) 806 ri->ri_magic = cpu_to_be32(GFS2_MAGIC); 807 if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC)) 808 ri->ri_generation_deleted = cpu_to_be64(generation); 809 } 810 811 bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation) 812 { 813 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; 814 815 if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC)) 816 return false; 817 return generation <= be64_to_cpu(ri->ri_generation_deleted); 818 } 819 820 static void gfs2_glock_poke(struct gfs2_glock *gl) 821 { 822 int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP; 823 struct gfs2_holder gh; 824 int error; 825 826 gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh); 827 error = gfs2_glock_nq(&gh); 828 if (!error) 829 gfs2_glock_dq(&gh); 830 gfs2_holder_uninit(&gh); 831 } 832 833 static bool gfs2_try_evict(struct gfs2_glock *gl) 834 { 835 struct gfs2_inode *ip; 836 bool evicted = false; 837 838 /* 839 * If there is contention on the iopen glock and we have an inode, try 840 * to grab and release the inode so that it can be evicted. This will 841 * allow the remote node to go ahead and delete the inode without us 842 * having to do it, which will avoid rgrp glock thrashing. 843 * 844 * The remote node is likely still holding the corresponding inode 845 * glock, so it will run before we get to verify that the delete has 846 * happened below. 847 */ 848 spin_lock(&gl->gl_lockref.lock); 849 ip = gl->gl_object; 850 if (ip && !igrab(&ip->i_inode)) 851 ip = NULL; 852 spin_unlock(&gl->gl_lockref.lock); 853 if (ip) { 854 struct gfs2_glock *inode_gl = NULL; 855 856 gl->gl_no_formal_ino = ip->i_no_formal_ino; 857 set_bit(GIF_DEFERRED_DELETE, &ip->i_flags); 858 d_prune_aliases(&ip->i_inode); 859 iput(&ip->i_inode); 860 861 /* If the inode was evicted, gl->gl_object will now be NULL. */ 862 spin_lock(&gl->gl_lockref.lock); 863 ip = gl->gl_object; 864 if (ip) { 865 inode_gl = ip->i_gl; 866 lockref_get(&inode_gl->gl_lockref); 867 clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags); 868 } 869 spin_unlock(&gl->gl_lockref.lock); 870 if (inode_gl) { 871 gfs2_glock_poke(inode_gl); 872 gfs2_glock_put(inode_gl); 873 } 874 evicted = !ip; 875 } 876 return evicted; 877 } 878 879 static void delete_work_func(struct work_struct *work) 880 { 881 struct delayed_work *dwork = to_delayed_work(work); 882 struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); 883 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 884 struct inode *inode; 885 u64 no_addr = gl->gl_name.ln_number; 886 887 spin_lock(&gl->gl_lockref.lock); 888 clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); 889 spin_unlock(&gl->gl_lockref.lock); 890 891 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { 892 /* 893 * If we can evict the inode, give the remote node trying to 894 * delete the inode some time before verifying that the delete 895 * has happened. Otherwise, if we cause contention on the inode glock 896 * immediately, the remote node will think that we still have 897 * the inode in use, and so it will give up waiting. 898 * 899 * If we can't evict the inode, signal to the remote node that 900 * the inode is still in use. We'll later try to delete the 901 * inode locally in gfs2_evict_inode. 902 * 903 * FIXME: We only need to verify that the remote node has 904 * deleted the inode because nodes before this remote delete 905 * rework won't cooperate. At a later time, when we no longer 906 * care about compatibility with such nodes, we can skip this 907 * step entirely. 908 */ 909 if (gfs2_try_evict(gl)) { 910 if (gfs2_queue_delete_work(gl, 5 * HZ)) 911 return; 912 } 913 goto out; 914 } 915 916 inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, 917 GFS2_BLKST_UNLINKED); 918 if (!IS_ERR_OR_NULL(inode)) { 919 d_prune_aliases(inode); 920 iput(inode); 921 } 922 out: 923 gfs2_glock_put(gl); 924 } 925 926 static void glock_work_func(struct work_struct *work) 927 { 928 unsigned long delay = 0; 929 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 930 unsigned int drop_refs = 1; 931 932 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { 933 finish_xmote(gl, gl->gl_reply); 934 drop_refs++; 935 } 936 spin_lock(&gl->gl_lockref.lock); 937 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 938 gl->gl_state != LM_ST_UNLOCKED && 939 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 940 unsigned long holdtime, now = jiffies; 941 942 holdtime = gl->gl_tchange + gl->gl_hold_time; 943 if (time_before(now, holdtime)) 944 delay = holdtime - now; 945 946 if (!delay) { 947 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 948 gfs2_set_demote(gl); 949 } 950 } 951 run_queue(gl, 0); 952 if (delay) { 953 /* Keep one glock reference for the work we requeue. */ 954 drop_refs--; 955 if (gl->gl_name.ln_type != LM_TYPE_INODE) 956 delay = 0; 957 __gfs2_glock_queue_work(gl, delay); 958 } 959 960 /* 961 * Drop the remaining glock references manually here. (Mind that 962 * __gfs2_glock_queue_work depends on the lockref spinlock begin held 963 * here as well.) 964 */ 965 gl->gl_lockref.count -= drop_refs; 966 if (!gl->gl_lockref.count) { 967 __gfs2_glock_put(gl); 968 return; 969 } 970 spin_unlock(&gl->gl_lockref.lock); 971 } 972 973 static struct gfs2_glock *find_insert_glock(struct lm_lockname *name, 974 struct gfs2_glock *new) 975 { 976 struct wait_glock_queue wait; 977 wait_queue_head_t *wq = glock_waitqueue(name); 978 struct gfs2_glock *gl; 979 980 wait.name = name; 981 init_wait(&wait.wait); 982 wait.wait.func = glock_wake_function; 983 984 again: 985 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 986 rcu_read_lock(); 987 if (new) { 988 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table, 989 &new->gl_node, ht_parms); 990 if (IS_ERR(gl)) 991 goto out; 992 } else { 993 gl = rhashtable_lookup_fast(&gl_hash_table, 994 name, ht_parms); 995 } 996 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) { 997 rcu_read_unlock(); 998 schedule(); 999 goto again; 1000 } 1001 out: 1002 rcu_read_unlock(); 1003 finish_wait(wq, &wait.wait); 1004 return gl; 1005 } 1006 1007 /** 1008 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist 1009 * @sdp: The GFS2 superblock 1010 * @number: the lock number 1011 * @glops: The glock_operations to use 1012 * @create: If 0, don't create the glock if it doesn't exist 1013 * @glp: the glock is returned here 1014 * 1015 * This does not lock a glock, just finds/creates structures for one. 1016 * 1017 * Returns: errno 1018 */ 1019 1020 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 1021 const struct gfs2_glock_operations *glops, int create, 1022 struct gfs2_glock **glp) 1023 { 1024 struct super_block *s = sdp->sd_vfs; 1025 struct lm_lockname name = { .ln_number = number, 1026 .ln_type = glops->go_type, 1027 .ln_sbd = sdp }; 1028 struct gfs2_glock *gl, *tmp; 1029 struct address_space *mapping; 1030 struct kmem_cache *cachep; 1031 int ret = 0; 1032 1033 gl = find_insert_glock(&name, NULL); 1034 if (gl) { 1035 *glp = gl; 1036 return 0; 1037 } 1038 if (!create) 1039 return -ENOENT; 1040 1041 if (glops->go_flags & GLOF_ASPACE) 1042 cachep = gfs2_glock_aspace_cachep; 1043 else 1044 cachep = gfs2_glock_cachep; 1045 gl = kmem_cache_alloc(cachep, GFP_NOFS); 1046 if (!gl) 1047 return -ENOMEM; 1048 1049 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 1050 1051 if (glops->go_flags & GLOF_LVB) { 1052 gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); 1053 if (!gl->gl_lksb.sb_lvbptr) { 1054 kmem_cache_free(cachep, gl); 1055 return -ENOMEM; 1056 } 1057 } 1058 1059 atomic_inc(&sdp->sd_glock_disposal); 1060 gl->gl_node.next = NULL; 1061 gl->gl_flags = 0; 1062 gl->gl_name = name; 1063 lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass); 1064 gl->gl_lockref.count = 1; 1065 gl->gl_state = LM_ST_UNLOCKED; 1066 gl->gl_target = LM_ST_UNLOCKED; 1067 gl->gl_demote_state = LM_ST_EXCLUSIVE; 1068 gl->gl_ops = glops; 1069 gl->gl_dstamp = 0; 1070 preempt_disable(); 1071 /* We use the global stats to estimate the initial per-glock stats */ 1072 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; 1073 preempt_enable(); 1074 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; 1075 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; 1076 gl->gl_tchange = jiffies; 1077 gl->gl_object = NULL; 1078 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; 1079 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 1080 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) 1081 INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func); 1082 1083 mapping = gfs2_glock2aspace(gl); 1084 if (mapping) { 1085 mapping->a_ops = &gfs2_meta_aops; 1086 mapping->host = s->s_bdev->bd_inode; 1087 mapping->flags = 0; 1088 mapping_set_gfp_mask(mapping, GFP_NOFS); 1089 mapping->private_data = NULL; 1090 mapping->writeback_index = 0; 1091 } 1092 1093 tmp = find_insert_glock(&name, gl); 1094 if (!tmp) { 1095 *glp = gl; 1096 goto out; 1097 } 1098 if (IS_ERR(tmp)) { 1099 ret = PTR_ERR(tmp); 1100 goto out_free; 1101 } 1102 *glp = tmp; 1103 1104 out_free: 1105 kfree(gl->gl_lksb.sb_lvbptr); 1106 kmem_cache_free(cachep, gl); 1107 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 1108 wake_up(&sdp->sd_glock_wait); 1109 1110 out: 1111 return ret; 1112 } 1113 1114 /** 1115 * gfs2_holder_init - initialize a struct gfs2_holder in the default way 1116 * @gl: the glock 1117 * @state: the state we're requesting 1118 * @flags: the modifier flags 1119 * @gh: the holder structure 1120 * 1121 */ 1122 1123 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, 1124 struct gfs2_holder *gh) 1125 { 1126 INIT_LIST_HEAD(&gh->gh_list); 1127 gh->gh_gl = gl; 1128 gh->gh_ip = _RET_IP_; 1129 gh->gh_owner_pid = get_pid(task_pid(current)); 1130 gh->gh_state = state; 1131 gh->gh_flags = flags; 1132 gh->gh_error = 0; 1133 gh->gh_iflags = 0; 1134 gfs2_glock_hold(gl); 1135 } 1136 1137 /** 1138 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it 1139 * @state: the state we're requesting 1140 * @flags: the modifier flags 1141 * @gh: the holder structure 1142 * 1143 * Don't mess with the glock. 1144 * 1145 */ 1146 1147 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh) 1148 { 1149 gh->gh_state = state; 1150 gh->gh_flags = flags; 1151 gh->gh_iflags = 0; 1152 gh->gh_ip = _RET_IP_; 1153 put_pid(gh->gh_owner_pid); 1154 gh->gh_owner_pid = get_pid(task_pid(current)); 1155 } 1156 1157 /** 1158 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) 1159 * @gh: the holder structure 1160 * 1161 */ 1162 1163 void gfs2_holder_uninit(struct gfs2_holder *gh) 1164 { 1165 put_pid(gh->gh_owner_pid); 1166 gfs2_glock_put(gh->gh_gl); 1167 gfs2_holder_mark_uninitialized(gh); 1168 gh->gh_ip = 0; 1169 } 1170 1171 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl, 1172 unsigned long start_time) 1173 { 1174 /* Have we waited longer that a second? */ 1175 if (time_after(jiffies, start_time + HZ)) { 1176 /* Lengthen the minimum hold time. */ 1177 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR, 1178 GL_GLOCK_MAX_HOLD); 1179 } 1180 } 1181 1182 /** 1183 * gfs2_glock_wait - wait on a glock acquisition 1184 * @gh: the glock holder 1185 * 1186 * Returns: 0 on success 1187 */ 1188 1189 int gfs2_glock_wait(struct gfs2_holder *gh) 1190 { 1191 unsigned long start_time = jiffies; 1192 1193 might_sleep(); 1194 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); 1195 gfs2_glock_update_hold_time(gh->gh_gl, start_time); 1196 return gh->gh_error; 1197 } 1198 1199 static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs) 1200 { 1201 int i; 1202 1203 for (i = 0; i < num_gh; i++) 1204 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) 1205 return 1; 1206 return 0; 1207 } 1208 1209 /** 1210 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions 1211 * @num_gh: the number of holders in the array 1212 * @ghs: the glock holder array 1213 * 1214 * Returns: 0 on success, meaning all glocks have been granted and are held. 1215 * -ESTALE if the request timed out, meaning all glocks were released, 1216 * and the caller should retry the operation. 1217 */ 1218 1219 int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs) 1220 { 1221 struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd; 1222 int i, ret = 0, timeout = 0; 1223 unsigned long start_time = jiffies; 1224 bool keep_waiting; 1225 1226 might_sleep(); 1227 /* 1228 * Total up the (minimum hold time * 2) of all glocks and use that to 1229 * determine the max amount of time we should wait. 1230 */ 1231 for (i = 0; i < num_gh; i++) 1232 timeout += ghs[i].gh_gl->gl_hold_time << 1; 1233 1234 wait_for_dlm: 1235 if (!wait_event_timeout(sdp->sd_async_glock_wait, 1236 !glocks_pending(num_gh, ghs), timeout)) 1237 ret = -ESTALE; /* request timed out. */ 1238 1239 /* 1240 * If dlm granted all our requests, we need to adjust the glock 1241 * minimum hold time values according to how long we waited. 1242 * 1243 * If our request timed out, we need to repeatedly release any held 1244 * glocks we acquired thus far to allow dlm to acquire the remaining 1245 * glocks without deadlocking. We cannot currently cancel outstanding 1246 * glock acquisitions. 1247 * 1248 * The HIF_WAIT bit tells us which requests still need a response from 1249 * dlm. 1250 * 1251 * If dlm sent us any errors, we return the first error we find. 1252 */ 1253 keep_waiting = false; 1254 for (i = 0; i < num_gh; i++) { 1255 /* Skip holders we have already dequeued below. */ 1256 if (!gfs2_holder_queued(&ghs[i])) 1257 continue; 1258 /* Skip holders with a pending DLM response. */ 1259 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) { 1260 keep_waiting = true; 1261 continue; 1262 } 1263 1264 if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) { 1265 if (ret == -ESTALE) 1266 gfs2_glock_dq(&ghs[i]); 1267 else 1268 gfs2_glock_update_hold_time(ghs[i].gh_gl, 1269 start_time); 1270 } 1271 if (!ret) 1272 ret = ghs[i].gh_error; 1273 } 1274 1275 if (keep_waiting) 1276 goto wait_for_dlm; 1277 1278 /* 1279 * At this point, we've either acquired all locks or released them all. 1280 */ 1281 return ret; 1282 } 1283 1284 /** 1285 * handle_callback - process a demote request 1286 * @gl: the glock 1287 * @state: the state the caller wants us to change to 1288 * @delay: zero to demote immediately; otherwise pending demote 1289 * @remote: true if this came from a different cluster node 1290 * 1291 * There are only two requests that we are going to see in actual 1292 * practise: LM_ST_SHARED and LM_ST_UNLOCKED 1293 */ 1294 1295 static void handle_callback(struct gfs2_glock *gl, unsigned int state, 1296 unsigned long delay, bool remote) 1297 { 1298 if (delay) 1299 set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 1300 else 1301 gfs2_set_demote(gl); 1302 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { 1303 gl->gl_demote_state = state; 1304 gl->gl_demote_time = jiffies; 1305 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && 1306 gl->gl_demote_state != state) { 1307 gl->gl_demote_state = LM_ST_UNLOCKED; 1308 } 1309 if (gl->gl_ops->go_callback) 1310 gl->gl_ops->go_callback(gl, remote); 1311 trace_gfs2_demote_rq(gl, remote); 1312 } 1313 1314 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) 1315 { 1316 struct va_format vaf; 1317 va_list args; 1318 1319 va_start(args, fmt); 1320 1321 if (seq) { 1322 seq_vprintf(seq, fmt, args); 1323 } else { 1324 vaf.fmt = fmt; 1325 vaf.va = &args; 1326 1327 pr_err("%pV", &vaf); 1328 } 1329 1330 va_end(args); 1331 } 1332 1333 /** 1334 * add_to_queue - Add a holder to the wait queue (but look for recursion) 1335 * @gh: the holder structure to add 1336 * 1337 * Eventually we should move the recursive locking trap to a 1338 * debugging option or something like that. This is the fast 1339 * path and needs to have the minimum number of distractions. 1340 * 1341 */ 1342 1343 static inline void add_to_queue(struct gfs2_holder *gh) 1344 __releases(&gl->gl_lockref.lock) 1345 __acquires(&gl->gl_lockref.lock) 1346 { 1347 struct gfs2_glock *gl = gh->gh_gl; 1348 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1349 struct list_head *insert_pt = NULL; 1350 struct gfs2_holder *gh2; 1351 int try_futile = 0; 1352 1353 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); 1354 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 1355 GLOCK_BUG_ON(gl, true); 1356 1357 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { 1358 if (test_bit(GLF_LOCK, &gl->gl_flags)) 1359 try_futile = !may_grant(gl, gh); 1360 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) 1361 goto fail; 1362 } 1363 1364 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { 1365 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid && 1366 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK))) 1367 goto trap_recursive; 1368 if (try_futile && 1369 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 1370 fail: 1371 gh->gh_error = GLR_TRYFAILED; 1372 gfs2_holder_wake(gh); 1373 return; 1374 } 1375 if (test_bit(HIF_HOLDER, &gh2->gh_iflags)) 1376 continue; 1377 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) 1378 insert_pt = &gh2->gh_list; 1379 } 1380 trace_gfs2_glock_queue(gh, 1); 1381 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); 1382 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); 1383 if (likely(insert_pt == NULL)) { 1384 list_add_tail(&gh->gh_list, &gl->gl_holders); 1385 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) 1386 goto do_cancel; 1387 return; 1388 } 1389 list_add_tail(&gh->gh_list, insert_pt); 1390 do_cancel: 1391 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); 1392 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { 1393 spin_unlock(&gl->gl_lockref.lock); 1394 if (sdp->sd_lockstruct.ls_ops->lm_cancel) 1395 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); 1396 spin_lock(&gl->gl_lockref.lock); 1397 } 1398 return; 1399 1400 trap_recursive: 1401 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip); 1402 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid)); 1403 fs_err(sdp, "lock type: %d req lock state : %d\n", 1404 gh2->gh_gl->gl_name.ln_type, gh2->gh_state); 1405 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip); 1406 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid)); 1407 fs_err(sdp, "lock type: %d req lock state : %d\n", 1408 gh->gh_gl->gl_name.ln_type, gh->gh_state); 1409 gfs2_dump_glock(NULL, gl, true); 1410 BUG(); 1411 } 1412 1413 /** 1414 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) 1415 * @gh: the holder structure 1416 * 1417 * if (gh->gh_flags & GL_ASYNC), this never returns an error 1418 * 1419 * Returns: 0, GLR_TRYFAILED, or errno on failure 1420 */ 1421 1422 int gfs2_glock_nq(struct gfs2_holder *gh) 1423 { 1424 struct gfs2_glock *gl = gh->gh_gl; 1425 int error = 0; 1426 1427 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP)) 1428 return -EIO; 1429 1430 if (test_bit(GLF_LRU, &gl->gl_flags)) 1431 gfs2_glock_remove_from_lru(gl); 1432 1433 spin_lock(&gl->gl_lockref.lock); 1434 add_to_queue(gh); 1435 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && 1436 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { 1437 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1438 gl->gl_lockref.count++; 1439 __gfs2_glock_queue_work(gl, 0); 1440 } 1441 run_queue(gl, 1); 1442 spin_unlock(&gl->gl_lockref.lock); 1443 1444 if (!(gh->gh_flags & GL_ASYNC)) 1445 error = gfs2_glock_wait(gh); 1446 1447 return error; 1448 } 1449 1450 /** 1451 * gfs2_glock_poll - poll to see if an async request has been completed 1452 * @gh: the holder 1453 * 1454 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on 1455 */ 1456 1457 int gfs2_glock_poll(struct gfs2_holder *gh) 1458 { 1459 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; 1460 } 1461 1462 /** 1463 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) 1464 * @gh: the glock holder 1465 * 1466 */ 1467 1468 void gfs2_glock_dq(struct gfs2_holder *gh) 1469 { 1470 struct gfs2_glock *gl = gh->gh_gl; 1471 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1472 unsigned delay = 0; 1473 int fast_path = 0; 1474 1475 spin_lock(&gl->gl_lockref.lock); 1476 /* 1477 * If we're in the process of file system withdraw, we cannot just 1478 * dequeue any glocks until our journal is recovered, lest we 1479 * introduce file system corruption. We need two exceptions to this 1480 * rule: We need to allow unlocking of nondisk glocks and the glock 1481 * for our own journal that needs recovery. 1482 */ 1483 if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) && 1484 glock_blocked_by_withdraw(gl) && 1485 gh->gh_gl != sdp->sd_jinode_gl) { 1486 sdp->sd_glock_dqs_held++; 1487 spin_unlock(&gl->gl_lockref.lock); 1488 might_sleep(); 1489 wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY, 1490 TASK_UNINTERRUPTIBLE); 1491 spin_lock(&gl->gl_lockref.lock); 1492 } 1493 if (gh->gh_flags & GL_NOCACHE) 1494 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1495 1496 list_del_init(&gh->gh_list); 1497 clear_bit(HIF_HOLDER, &gh->gh_iflags); 1498 if (find_first_holder(gl) == NULL) { 1499 if (list_empty(&gl->gl_holders) && 1500 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1501 !test_bit(GLF_DEMOTE, &gl->gl_flags)) 1502 fast_path = 1; 1503 } 1504 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) 1505 gfs2_glock_add_to_lru(gl); 1506 1507 trace_gfs2_glock_queue(gh, 0); 1508 if (unlikely(!fast_path)) { 1509 gl->gl_lockref.count++; 1510 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1511 !test_bit(GLF_DEMOTE, &gl->gl_flags) && 1512 gl->gl_name.ln_type == LM_TYPE_INODE) 1513 delay = gl->gl_hold_time; 1514 __gfs2_glock_queue_work(gl, delay); 1515 } 1516 spin_unlock(&gl->gl_lockref.lock); 1517 } 1518 1519 void gfs2_glock_dq_wait(struct gfs2_holder *gh) 1520 { 1521 struct gfs2_glock *gl = gh->gh_gl; 1522 gfs2_glock_dq(gh); 1523 might_sleep(); 1524 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); 1525 } 1526 1527 /** 1528 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1529 * @gh: the holder structure 1530 * 1531 */ 1532 1533 void gfs2_glock_dq_uninit(struct gfs2_holder *gh) 1534 { 1535 gfs2_glock_dq(gh); 1536 gfs2_holder_uninit(gh); 1537 } 1538 1539 /** 1540 * gfs2_glock_nq_num - acquire a glock based on lock number 1541 * @sdp: the filesystem 1542 * @number: the lock number 1543 * @glops: the glock operations for the type of glock 1544 * @state: the state to acquire the glock in 1545 * @flags: modifier flags for the acquisition 1546 * @gh: the struct gfs2_holder 1547 * 1548 * Returns: errno 1549 */ 1550 1551 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, 1552 const struct gfs2_glock_operations *glops, 1553 unsigned int state, u16 flags, struct gfs2_holder *gh) 1554 { 1555 struct gfs2_glock *gl; 1556 int error; 1557 1558 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); 1559 if (!error) { 1560 error = gfs2_glock_nq_init(gl, state, flags, gh); 1561 gfs2_glock_put(gl); 1562 } 1563 1564 return error; 1565 } 1566 1567 /** 1568 * glock_compare - Compare two struct gfs2_glock structures for sorting 1569 * @arg_a: the first structure 1570 * @arg_b: the second structure 1571 * 1572 */ 1573 1574 static int glock_compare(const void *arg_a, const void *arg_b) 1575 { 1576 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; 1577 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; 1578 const struct lm_lockname *a = &gh_a->gh_gl->gl_name; 1579 const struct lm_lockname *b = &gh_b->gh_gl->gl_name; 1580 1581 if (a->ln_number > b->ln_number) 1582 return 1; 1583 if (a->ln_number < b->ln_number) 1584 return -1; 1585 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); 1586 return 0; 1587 } 1588 1589 /** 1590 * nq_m_sync - synchonously acquire more than one glock in deadlock free order 1591 * @num_gh: the number of structures 1592 * @ghs: an array of struct gfs2_holder structures 1593 * @p: placeholder for the holder structure to pass back 1594 * 1595 * Returns: 0 on success (all glocks acquired), 1596 * errno on failure (no glocks acquired) 1597 */ 1598 1599 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, 1600 struct gfs2_holder **p) 1601 { 1602 unsigned int x; 1603 int error = 0; 1604 1605 for (x = 0; x < num_gh; x++) 1606 p[x] = &ghs[x]; 1607 1608 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); 1609 1610 for (x = 0; x < num_gh; x++) { 1611 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1612 1613 error = gfs2_glock_nq(p[x]); 1614 if (error) { 1615 while (x--) 1616 gfs2_glock_dq(p[x]); 1617 break; 1618 } 1619 } 1620 1621 return error; 1622 } 1623 1624 /** 1625 * gfs2_glock_nq_m - acquire multiple glocks 1626 * @num_gh: the number of structures 1627 * @ghs: an array of struct gfs2_holder structures 1628 * 1629 * 1630 * Returns: 0 on success (all glocks acquired), 1631 * errno on failure (no glocks acquired) 1632 */ 1633 1634 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1635 { 1636 struct gfs2_holder *tmp[4]; 1637 struct gfs2_holder **pph = tmp; 1638 int error = 0; 1639 1640 switch(num_gh) { 1641 case 0: 1642 return 0; 1643 case 1: 1644 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1645 return gfs2_glock_nq(ghs); 1646 default: 1647 if (num_gh <= 4) 1648 break; 1649 pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *), 1650 GFP_NOFS); 1651 if (!pph) 1652 return -ENOMEM; 1653 } 1654 1655 error = nq_m_sync(num_gh, ghs, pph); 1656 1657 if (pph != tmp) 1658 kfree(pph); 1659 1660 return error; 1661 } 1662 1663 /** 1664 * gfs2_glock_dq_m - release multiple glocks 1665 * @num_gh: the number of structures 1666 * @ghs: an array of struct gfs2_holder structures 1667 * 1668 */ 1669 1670 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1671 { 1672 while (num_gh--) 1673 gfs2_glock_dq(&ghs[num_gh]); 1674 } 1675 1676 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) 1677 { 1678 unsigned long delay = 0; 1679 unsigned long holdtime; 1680 unsigned long now = jiffies; 1681 1682 gfs2_glock_hold(gl); 1683 spin_lock(&gl->gl_lockref.lock); 1684 holdtime = gl->gl_tchange + gl->gl_hold_time; 1685 if (!list_empty(&gl->gl_holders) && 1686 gl->gl_name.ln_type == LM_TYPE_INODE) { 1687 if (time_before(now, holdtime)) 1688 delay = holdtime - now; 1689 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 1690 delay = gl->gl_hold_time; 1691 } 1692 handle_callback(gl, state, delay, true); 1693 __gfs2_glock_queue_work(gl, delay); 1694 spin_unlock(&gl->gl_lockref.lock); 1695 } 1696 1697 /** 1698 * gfs2_should_freeze - Figure out if glock should be frozen 1699 * @gl: The glock in question 1700 * 1701 * Glocks are not frozen if (a) the result of the dlm operation is 1702 * an error, (b) the locking operation was an unlock operation or 1703 * (c) if there is a "noexp" flagged request anywhere in the queue 1704 * 1705 * Returns: 1 if freezing should occur, 0 otherwise 1706 */ 1707 1708 static int gfs2_should_freeze(const struct gfs2_glock *gl) 1709 { 1710 const struct gfs2_holder *gh; 1711 1712 if (gl->gl_reply & ~LM_OUT_ST_MASK) 1713 return 0; 1714 if (gl->gl_target == LM_ST_UNLOCKED) 1715 return 0; 1716 1717 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1718 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 1719 continue; 1720 if (LM_FLAG_NOEXP & gh->gh_flags) 1721 return 0; 1722 } 1723 1724 return 1; 1725 } 1726 1727 /** 1728 * gfs2_glock_complete - Callback used by locking 1729 * @gl: Pointer to the glock 1730 * @ret: The return value from the dlm 1731 * 1732 * The gl_reply field is under the gl_lockref.lock lock so that it is ok 1733 * to use a bitfield shared with other glock state fields. 1734 */ 1735 1736 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1737 { 1738 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; 1739 1740 spin_lock(&gl->gl_lockref.lock); 1741 gl->gl_reply = ret; 1742 1743 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { 1744 if (gfs2_should_freeze(gl)) { 1745 set_bit(GLF_FROZEN, &gl->gl_flags); 1746 spin_unlock(&gl->gl_lockref.lock); 1747 return; 1748 } 1749 } 1750 1751 gl->gl_lockref.count++; 1752 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1753 __gfs2_glock_queue_work(gl, 0); 1754 spin_unlock(&gl->gl_lockref.lock); 1755 } 1756 1757 static int glock_cmp(void *priv, const struct list_head *a, 1758 const struct list_head *b) 1759 { 1760 struct gfs2_glock *gla, *glb; 1761 1762 gla = list_entry(a, struct gfs2_glock, gl_lru); 1763 glb = list_entry(b, struct gfs2_glock, gl_lru); 1764 1765 if (gla->gl_name.ln_number > glb->gl_name.ln_number) 1766 return 1; 1767 if (gla->gl_name.ln_number < glb->gl_name.ln_number) 1768 return -1; 1769 1770 return 0; 1771 } 1772 1773 /** 1774 * gfs2_dispose_glock_lru - Demote a list of glocks 1775 * @list: The list to dispose of 1776 * 1777 * Disposing of glocks may involve disk accesses, so that here we sort 1778 * the glocks by number (i.e. disk location of the inodes) so that if 1779 * there are any such accesses, they'll be sent in order (mostly). 1780 * 1781 * Must be called under the lru_lock, but may drop and retake this 1782 * lock. While the lru_lock is dropped, entries may vanish from the 1783 * list, but no new entries will appear on the list (since it is 1784 * private) 1785 */ 1786 1787 static void gfs2_dispose_glock_lru(struct list_head *list) 1788 __releases(&lru_lock) 1789 __acquires(&lru_lock) 1790 { 1791 struct gfs2_glock *gl; 1792 1793 list_sort(NULL, list, glock_cmp); 1794 1795 while(!list_empty(list)) { 1796 gl = list_first_entry(list, struct gfs2_glock, gl_lru); 1797 list_del_init(&gl->gl_lru); 1798 clear_bit(GLF_LRU, &gl->gl_flags); 1799 if (!spin_trylock(&gl->gl_lockref.lock)) { 1800 add_back_to_lru: 1801 list_add(&gl->gl_lru, &lru_list); 1802 set_bit(GLF_LRU, &gl->gl_flags); 1803 atomic_inc(&lru_count); 1804 continue; 1805 } 1806 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1807 spin_unlock(&gl->gl_lockref.lock); 1808 goto add_back_to_lru; 1809 } 1810 gl->gl_lockref.count++; 1811 if (demote_ok(gl)) 1812 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1813 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); 1814 __gfs2_glock_queue_work(gl, 0); 1815 spin_unlock(&gl->gl_lockref.lock); 1816 cond_resched_lock(&lru_lock); 1817 } 1818 } 1819 1820 /** 1821 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote 1822 * @nr: The number of entries to scan 1823 * 1824 * This function selects the entries on the LRU which are able to 1825 * be demoted, and then kicks off the process by calling 1826 * gfs2_dispose_glock_lru() above. 1827 */ 1828 1829 static long gfs2_scan_glock_lru(int nr) 1830 { 1831 struct gfs2_glock *gl; 1832 LIST_HEAD(skipped); 1833 LIST_HEAD(dispose); 1834 long freed = 0; 1835 1836 spin_lock(&lru_lock); 1837 while ((nr-- >= 0) && !list_empty(&lru_list)) { 1838 gl = list_first_entry(&lru_list, struct gfs2_glock, gl_lru); 1839 1840 /* Test for being demotable */ 1841 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { 1842 list_move(&gl->gl_lru, &dispose); 1843 atomic_dec(&lru_count); 1844 freed++; 1845 continue; 1846 } 1847 1848 list_move(&gl->gl_lru, &skipped); 1849 } 1850 list_splice(&skipped, &lru_list); 1851 if (!list_empty(&dispose)) 1852 gfs2_dispose_glock_lru(&dispose); 1853 spin_unlock(&lru_lock); 1854 1855 return freed; 1856 } 1857 1858 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink, 1859 struct shrink_control *sc) 1860 { 1861 if (!(sc->gfp_mask & __GFP_FS)) 1862 return SHRINK_STOP; 1863 return gfs2_scan_glock_lru(sc->nr_to_scan); 1864 } 1865 1866 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink, 1867 struct shrink_control *sc) 1868 { 1869 return vfs_pressure_ratio(atomic_read(&lru_count)); 1870 } 1871 1872 static struct shrinker glock_shrinker = { 1873 .seeks = DEFAULT_SEEKS, 1874 .count_objects = gfs2_glock_shrink_count, 1875 .scan_objects = gfs2_glock_shrink_scan, 1876 }; 1877 1878 /** 1879 * glock_hash_walk - Call a function for glock in a hash bucket 1880 * @examiner: the function 1881 * @sdp: the filesystem 1882 * 1883 * Note that the function can be called multiple times on the same 1884 * object. So the user must ensure that the function can cope with 1885 * that. 1886 */ 1887 1888 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) 1889 { 1890 struct gfs2_glock *gl; 1891 struct rhashtable_iter iter; 1892 1893 rhashtable_walk_enter(&gl_hash_table, &iter); 1894 1895 do { 1896 rhashtable_walk_start(&iter); 1897 1898 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) 1899 if (gl->gl_name.ln_sbd == sdp && 1900 lockref_get_not_dead(&gl->gl_lockref)) 1901 examiner(gl); 1902 1903 rhashtable_walk_stop(&iter); 1904 } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); 1905 1906 rhashtable_walk_exit(&iter); 1907 } 1908 1909 bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay) 1910 { 1911 bool queued; 1912 1913 spin_lock(&gl->gl_lockref.lock); 1914 queued = queue_delayed_work(gfs2_delete_workqueue, 1915 &gl->gl_delete, delay); 1916 if (queued) 1917 set_bit(GLF_PENDING_DELETE, &gl->gl_flags); 1918 spin_unlock(&gl->gl_lockref.lock); 1919 return queued; 1920 } 1921 1922 void gfs2_cancel_delete_work(struct gfs2_glock *gl) 1923 { 1924 if (cancel_delayed_work_sync(&gl->gl_delete)) { 1925 clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); 1926 gfs2_glock_put(gl); 1927 } 1928 } 1929 1930 bool gfs2_delete_work_queued(const struct gfs2_glock *gl) 1931 { 1932 return test_bit(GLF_PENDING_DELETE, &gl->gl_flags); 1933 } 1934 1935 static void flush_delete_work(struct gfs2_glock *gl) 1936 { 1937 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) { 1938 if (cancel_delayed_work(&gl->gl_delete)) { 1939 queue_delayed_work(gfs2_delete_workqueue, 1940 &gl->gl_delete, 0); 1941 } 1942 } 1943 gfs2_glock_queue_work(gl, 0); 1944 } 1945 1946 void gfs2_flush_delete_work(struct gfs2_sbd *sdp) 1947 { 1948 glock_hash_walk(flush_delete_work, sdp); 1949 flush_workqueue(gfs2_delete_workqueue); 1950 } 1951 1952 /** 1953 * thaw_glock - thaw out a glock which has an unprocessed reply waiting 1954 * @gl: The glock to thaw 1955 * 1956 */ 1957 1958 static void thaw_glock(struct gfs2_glock *gl) 1959 { 1960 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) { 1961 gfs2_glock_put(gl); 1962 return; 1963 } 1964 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1965 gfs2_glock_queue_work(gl, 0); 1966 } 1967 1968 /** 1969 * clear_glock - look at a glock and see if we can free it from glock cache 1970 * @gl: the glock to look at 1971 * 1972 */ 1973 1974 static void clear_glock(struct gfs2_glock *gl) 1975 { 1976 gfs2_glock_remove_from_lru(gl); 1977 1978 spin_lock(&gl->gl_lockref.lock); 1979 if (gl->gl_state != LM_ST_UNLOCKED) 1980 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1981 __gfs2_glock_queue_work(gl, 0); 1982 spin_unlock(&gl->gl_lockref.lock); 1983 } 1984 1985 /** 1986 * gfs2_glock_thaw - Thaw any frozen glocks 1987 * @sdp: The super block 1988 * 1989 */ 1990 1991 void gfs2_glock_thaw(struct gfs2_sbd *sdp) 1992 { 1993 glock_hash_walk(thaw_glock, sdp); 1994 } 1995 1996 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 1997 { 1998 spin_lock(&gl->gl_lockref.lock); 1999 gfs2_dump_glock(seq, gl, fsid); 2000 spin_unlock(&gl->gl_lockref.lock); 2001 } 2002 2003 static void dump_glock_func(struct gfs2_glock *gl) 2004 { 2005 dump_glock(NULL, gl, true); 2006 } 2007 2008 /** 2009 * gfs2_gl_hash_clear - Empty out the glock hash table 2010 * @sdp: the filesystem 2011 * 2012 * Called when unmounting the filesystem. 2013 */ 2014 2015 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) 2016 { 2017 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags); 2018 flush_workqueue(glock_workqueue); 2019 glock_hash_walk(clear_glock, sdp); 2020 flush_workqueue(glock_workqueue); 2021 wait_event_timeout(sdp->sd_glock_wait, 2022 atomic_read(&sdp->sd_glock_disposal) == 0, 2023 HZ * 600); 2024 glock_hash_walk(dump_glock_func, sdp); 2025 } 2026 2027 void gfs2_glock_finish_truncate(struct gfs2_inode *ip) 2028 { 2029 struct gfs2_glock *gl = ip->i_gl; 2030 int ret; 2031 2032 ret = gfs2_truncatei_resume(ip); 2033 gfs2_glock_assert_withdraw(gl, ret == 0); 2034 2035 spin_lock(&gl->gl_lockref.lock); 2036 clear_bit(GLF_LOCK, &gl->gl_flags); 2037 run_queue(gl, 1); 2038 spin_unlock(&gl->gl_lockref.lock); 2039 } 2040 2041 static const char *state2str(unsigned state) 2042 { 2043 switch(state) { 2044 case LM_ST_UNLOCKED: 2045 return "UN"; 2046 case LM_ST_SHARED: 2047 return "SH"; 2048 case LM_ST_DEFERRED: 2049 return "DF"; 2050 case LM_ST_EXCLUSIVE: 2051 return "EX"; 2052 } 2053 return "??"; 2054 } 2055 2056 static const char *hflags2str(char *buf, u16 flags, unsigned long iflags) 2057 { 2058 char *p = buf; 2059 if (flags & LM_FLAG_TRY) 2060 *p++ = 't'; 2061 if (flags & LM_FLAG_TRY_1CB) 2062 *p++ = 'T'; 2063 if (flags & LM_FLAG_NOEXP) 2064 *p++ = 'e'; 2065 if (flags & LM_FLAG_ANY) 2066 *p++ = 'A'; 2067 if (flags & LM_FLAG_PRIORITY) 2068 *p++ = 'p'; 2069 if (flags & LM_FLAG_NODE_SCOPE) 2070 *p++ = 'n'; 2071 if (flags & GL_ASYNC) 2072 *p++ = 'a'; 2073 if (flags & GL_EXACT) 2074 *p++ = 'E'; 2075 if (flags & GL_NOCACHE) 2076 *p++ = 'c'; 2077 if (test_bit(HIF_HOLDER, &iflags)) 2078 *p++ = 'H'; 2079 if (test_bit(HIF_WAIT, &iflags)) 2080 *p++ = 'W'; 2081 if (test_bit(HIF_FIRST, &iflags)) 2082 *p++ = 'F'; 2083 *p = 0; 2084 return buf; 2085 } 2086 2087 /** 2088 * dump_holder - print information about a glock holder 2089 * @seq: the seq_file struct 2090 * @gh: the glock holder 2091 * @fs_id_buf: pointer to file system id (if requested) 2092 * 2093 */ 2094 2095 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh, 2096 const char *fs_id_buf) 2097 { 2098 struct task_struct *gh_owner = NULL; 2099 char flags_buf[32]; 2100 2101 rcu_read_lock(); 2102 if (gh->gh_owner_pid) 2103 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); 2104 gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n", 2105 fs_id_buf, state2str(gh->gh_state), 2106 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), 2107 gh->gh_error, 2108 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, 2109 gh_owner ? gh_owner->comm : "(ended)", 2110 (void *)gh->gh_ip); 2111 rcu_read_unlock(); 2112 } 2113 2114 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) 2115 { 2116 const unsigned long *gflags = &gl->gl_flags; 2117 char *p = buf; 2118 2119 if (test_bit(GLF_LOCK, gflags)) 2120 *p++ = 'l'; 2121 if (test_bit(GLF_DEMOTE, gflags)) 2122 *p++ = 'D'; 2123 if (test_bit(GLF_PENDING_DEMOTE, gflags)) 2124 *p++ = 'd'; 2125 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags)) 2126 *p++ = 'p'; 2127 if (test_bit(GLF_DIRTY, gflags)) 2128 *p++ = 'y'; 2129 if (test_bit(GLF_LFLUSH, gflags)) 2130 *p++ = 'f'; 2131 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags)) 2132 *p++ = 'i'; 2133 if (test_bit(GLF_REPLY_PENDING, gflags)) 2134 *p++ = 'r'; 2135 if (test_bit(GLF_INITIAL, gflags)) 2136 *p++ = 'I'; 2137 if (test_bit(GLF_FROZEN, gflags)) 2138 *p++ = 'F'; 2139 if (!list_empty(&gl->gl_holders)) 2140 *p++ = 'q'; 2141 if (test_bit(GLF_LRU, gflags)) 2142 *p++ = 'L'; 2143 if (gl->gl_object) 2144 *p++ = 'o'; 2145 if (test_bit(GLF_BLOCKING, gflags)) 2146 *p++ = 'b'; 2147 if (test_bit(GLF_PENDING_DELETE, gflags)) 2148 *p++ = 'P'; 2149 if (test_bit(GLF_FREEING, gflags)) 2150 *p++ = 'x'; 2151 *p = 0; 2152 return buf; 2153 } 2154 2155 /** 2156 * gfs2_dump_glock - print information about a glock 2157 * @seq: The seq_file struct 2158 * @gl: the glock 2159 * @fsid: If true, also dump the file system id 2160 * 2161 * The file format is as follows: 2162 * One line per object, capital letters are used to indicate objects 2163 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented, 2164 * other objects are indented by a single space and follow the glock to 2165 * which they are related. Fields are indicated by lower case letters 2166 * followed by a colon and the field value, except for strings which are in 2167 * [] so that its possible to see if they are composed of spaces for 2168 * example. The field's are n = number (id of the object), f = flags, 2169 * t = type, s = state, r = refcount, e = error, p = pid. 2170 * 2171 */ 2172 2173 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 2174 { 2175 const struct gfs2_glock_operations *glops = gl->gl_ops; 2176 unsigned long long dtime; 2177 const struct gfs2_holder *gh; 2178 char gflags_buf[32]; 2179 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 2180 char fs_id_buf[sizeof(sdp->sd_fsname) + 7]; 2181 unsigned long nrpages = 0; 2182 2183 if (gl->gl_ops->go_flags & GLOF_ASPACE) { 2184 struct address_space *mapping = gfs2_glock2aspace(gl); 2185 2186 nrpages = mapping->nrpages; 2187 } 2188 memset(fs_id_buf, 0, sizeof(fs_id_buf)); 2189 if (fsid && sdp) /* safety precaution */ 2190 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname); 2191 dtime = jiffies - gl->gl_demote_time; 2192 dtime *= 1000000/HZ; /* demote time in uSec */ 2193 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) 2194 dtime = 0; 2195 gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d " 2196 "v:%d r:%d m:%ld p:%lu\n", 2197 fs_id_buf, state2str(gl->gl_state), 2198 gl->gl_name.ln_type, 2199 (unsigned long long)gl->gl_name.ln_number, 2200 gflags2str(gflags_buf, gl), 2201 state2str(gl->gl_target), 2202 state2str(gl->gl_demote_state), dtime, 2203 atomic_read(&gl->gl_ail_count), 2204 atomic_read(&gl->gl_revokes), 2205 (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages); 2206 2207 list_for_each_entry(gh, &gl->gl_holders, gh_list) 2208 dump_holder(seq, gh, fs_id_buf); 2209 2210 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) 2211 glops->go_dump(seq, gl, fs_id_buf); 2212 } 2213 2214 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr) 2215 { 2216 struct gfs2_glock *gl = iter_ptr; 2217 2218 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n", 2219 gl->gl_name.ln_type, 2220 (unsigned long long)gl->gl_name.ln_number, 2221 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], 2222 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], 2223 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], 2224 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], 2225 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], 2226 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], 2227 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], 2228 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); 2229 return 0; 2230 } 2231 2232 static const char *gfs2_gltype[] = { 2233 "type", 2234 "reserved", 2235 "nondisk", 2236 "inode", 2237 "rgrp", 2238 "meta", 2239 "iopen", 2240 "flock", 2241 "plock", 2242 "quota", 2243 "journal", 2244 }; 2245 2246 static const char *gfs2_stype[] = { 2247 [GFS2_LKS_SRTT] = "srtt", 2248 [GFS2_LKS_SRTTVAR] = "srttvar", 2249 [GFS2_LKS_SRTTB] = "srttb", 2250 [GFS2_LKS_SRTTVARB] = "srttvarb", 2251 [GFS2_LKS_SIRT] = "sirt", 2252 [GFS2_LKS_SIRTVAR] = "sirtvar", 2253 [GFS2_LKS_DCOUNT] = "dlm", 2254 [GFS2_LKS_QCOUNT] = "queue", 2255 }; 2256 2257 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype)) 2258 2259 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr) 2260 { 2261 struct gfs2_sbd *sdp = seq->private; 2262 loff_t pos = *(loff_t *)iter_ptr; 2263 unsigned index = pos >> 3; 2264 unsigned subindex = pos & 0x07; 2265 int i; 2266 2267 if (index == 0 && subindex != 0) 2268 return 0; 2269 2270 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index], 2271 (index == 0) ? "cpu": gfs2_stype[subindex]); 2272 2273 for_each_possible_cpu(i) { 2274 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); 2275 2276 if (index == 0) 2277 seq_printf(seq, " %15u", i); 2278 else 2279 seq_printf(seq, " %15llu", (unsigned long long)lkstats-> 2280 lkstats[index - 1].stats[subindex]); 2281 } 2282 seq_putc(seq, '\n'); 2283 return 0; 2284 } 2285 2286 int __init gfs2_glock_init(void) 2287 { 2288 int i, ret; 2289 2290 ret = rhashtable_init(&gl_hash_table, &ht_parms); 2291 if (ret < 0) 2292 return ret; 2293 2294 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 2295 WQ_HIGHPRI | WQ_FREEZABLE, 0); 2296 if (!glock_workqueue) { 2297 rhashtable_destroy(&gl_hash_table); 2298 return -ENOMEM; 2299 } 2300 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 2301 WQ_MEM_RECLAIM | WQ_FREEZABLE, 2302 0); 2303 if (!gfs2_delete_workqueue) { 2304 destroy_workqueue(glock_workqueue); 2305 rhashtable_destroy(&gl_hash_table); 2306 return -ENOMEM; 2307 } 2308 2309 ret = register_shrinker(&glock_shrinker); 2310 if (ret) { 2311 destroy_workqueue(gfs2_delete_workqueue); 2312 destroy_workqueue(glock_workqueue); 2313 rhashtable_destroy(&gl_hash_table); 2314 return ret; 2315 } 2316 2317 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++) 2318 init_waitqueue_head(glock_wait_table + i); 2319 2320 return 0; 2321 } 2322 2323 void gfs2_glock_exit(void) 2324 { 2325 unregister_shrinker(&glock_shrinker); 2326 rhashtable_destroy(&gl_hash_table); 2327 destroy_workqueue(glock_workqueue); 2328 destroy_workqueue(gfs2_delete_workqueue); 2329 } 2330 2331 static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n) 2332 { 2333 struct gfs2_glock *gl = gi->gl; 2334 2335 if (gl) { 2336 if (n == 0) 2337 return; 2338 if (!lockref_put_not_zero(&gl->gl_lockref)) 2339 gfs2_glock_queue_put(gl); 2340 } 2341 for (;;) { 2342 gl = rhashtable_walk_next(&gi->hti); 2343 if (IS_ERR_OR_NULL(gl)) { 2344 if (gl == ERR_PTR(-EAGAIN)) { 2345 n = 1; 2346 continue; 2347 } 2348 gl = NULL; 2349 break; 2350 } 2351 if (gl->gl_name.ln_sbd != gi->sdp) 2352 continue; 2353 if (n <= 1) { 2354 if (!lockref_get_not_dead(&gl->gl_lockref)) 2355 continue; 2356 break; 2357 } else { 2358 if (__lockref_is_dead(&gl->gl_lockref)) 2359 continue; 2360 n--; 2361 } 2362 } 2363 gi->gl = gl; 2364 } 2365 2366 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) 2367 __acquires(RCU) 2368 { 2369 struct gfs2_glock_iter *gi = seq->private; 2370 loff_t n; 2371 2372 /* 2373 * We can either stay where we are, skip to the next hash table 2374 * entry, or start from the beginning. 2375 */ 2376 if (*pos < gi->last_pos) { 2377 rhashtable_walk_exit(&gi->hti); 2378 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2379 n = *pos + 1; 2380 } else { 2381 n = *pos - gi->last_pos; 2382 } 2383 2384 rhashtable_walk_start(&gi->hti); 2385 2386 gfs2_glock_iter_next(gi, n); 2387 gi->last_pos = *pos; 2388 return gi->gl; 2389 } 2390 2391 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr, 2392 loff_t *pos) 2393 { 2394 struct gfs2_glock_iter *gi = seq->private; 2395 2396 (*pos)++; 2397 gi->last_pos = *pos; 2398 gfs2_glock_iter_next(gi, 1); 2399 return gi->gl; 2400 } 2401 2402 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) 2403 __releases(RCU) 2404 { 2405 struct gfs2_glock_iter *gi = seq->private; 2406 2407 rhashtable_walk_stop(&gi->hti); 2408 } 2409 2410 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) 2411 { 2412 dump_glock(seq, iter_ptr, false); 2413 return 0; 2414 } 2415 2416 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos) 2417 { 2418 preempt_disable(); 2419 if (*pos >= GFS2_NR_SBSTATS) 2420 return NULL; 2421 return pos; 2422 } 2423 2424 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr, 2425 loff_t *pos) 2426 { 2427 (*pos)++; 2428 if (*pos >= GFS2_NR_SBSTATS) 2429 return NULL; 2430 return pos; 2431 } 2432 2433 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr) 2434 { 2435 preempt_enable(); 2436 } 2437 2438 static const struct seq_operations gfs2_glock_seq_ops = { 2439 .start = gfs2_glock_seq_start, 2440 .next = gfs2_glock_seq_next, 2441 .stop = gfs2_glock_seq_stop, 2442 .show = gfs2_glock_seq_show, 2443 }; 2444 2445 static const struct seq_operations gfs2_glstats_seq_ops = { 2446 .start = gfs2_glock_seq_start, 2447 .next = gfs2_glock_seq_next, 2448 .stop = gfs2_glock_seq_stop, 2449 .show = gfs2_glstats_seq_show, 2450 }; 2451 2452 static const struct seq_operations gfs2_sbstats_sops = { 2453 .start = gfs2_sbstats_seq_start, 2454 .next = gfs2_sbstats_seq_next, 2455 .stop = gfs2_sbstats_seq_stop, 2456 .show = gfs2_sbstats_seq_show, 2457 }; 2458 2459 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL) 2460 2461 static int __gfs2_glocks_open(struct inode *inode, struct file *file, 2462 const struct seq_operations *ops) 2463 { 2464 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter)); 2465 if (ret == 0) { 2466 struct seq_file *seq = file->private_data; 2467 struct gfs2_glock_iter *gi = seq->private; 2468 2469 gi->sdp = inode->i_private; 2470 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); 2471 if (seq->buf) 2472 seq->size = GFS2_SEQ_GOODSIZE; 2473 /* 2474 * Initially, we are "before" the first hash table entry; the 2475 * first call to rhashtable_walk_next gets us the first entry. 2476 */ 2477 gi->last_pos = -1; 2478 gi->gl = NULL; 2479 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2480 } 2481 return ret; 2482 } 2483 2484 static int gfs2_glocks_open(struct inode *inode, struct file *file) 2485 { 2486 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops); 2487 } 2488 2489 static int gfs2_glocks_release(struct inode *inode, struct file *file) 2490 { 2491 struct seq_file *seq = file->private_data; 2492 struct gfs2_glock_iter *gi = seq->private; 2493 2494 if (gi->gl) 2495 gfs2_glock_put(gi->gl); 2496 rhashtable_walk_exit(&gi->hti); 2497 return seq_release_private(inode, file); 2498 } 2499 2500 static int gfs2_glstats_open(struct inode *inode, struct file *file) 2501 { 2502 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops); 2503 } 2504 2505 static const struct file_operations gfs2_glocks_fops = { 2506 .owner = THIS_MODULE, 2507 .open = gfs2_glocks_open, 2508 .read = seq_read, 2509 .llseek = seq_lseek, 2510 .release = gfs2_glocks_release, 2511 }; 2512 2513 static const struct file_operations gfs2_glstats_fops = { 2514 .owner = THIS_MODULE, 2515 .open = gfs2_glstats_open, 2516 .read = seq_read, 2517 .llseek = seq_lseek, 2518 .release = gfs2_glocks_release, 2519 }; 2520 2521 DEFINE_SEQ_ATTRIBUTE(gfs2_sbstats); 2522 2523 void gfs2_create_debugfs_file(struct gfs2_sbd *sdp) 2524 { 2525 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root); 2526 2527 debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2528 &gfs2_glocks_fops); 2529 2530 debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2531 &gfs2_glstats_fops); 2532 2533 debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2534 &gfs2_sbstats_fops); 2535 } 2536 2537 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) 2538 { 2539 debugfs_remove_recursive(sdp->debugfs_dir); 2540 sdp->debugfs_dir = NULL; 2541 } 2542 2543 void gfs2_register_debugfs(void) 2544 { 2545 gfs2_root = debugfs_create_dir("gfs2", NULL); 2546 } 2547 2548 void gfs2_unregister_debugfs(void) 2549 { 2550 debugfs_remove(gfs2_root); 2551 gfs2_root = NULL; 2552 } 2553