1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/sched.h> 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/buffer_head.h> 13 #include <linux/delay.h> 14 #include <linux/sort.h> 15 #include <linux/hash.h> 16 #include <linux/jhash.h> 17 #include <linux/kallsyms.h> 18 #include <linux/gfs2_ondisk.h> 19 #include <linux/list.h> 20 #include <linux/wait.h> 21 #include <linux/module.h> 22 #include <linux/uaccess.h> 23 #include <linux/seq_file.h> 24 #include <linux/debugfs.h> 25 #include <linux/kthread.h> 26 #include <linux/freezer.h> 27 #include <linux/workqueue.h> 28 #include <linux/jiffies.h> 29 #include <linux/rcupdate.h> 30 #include <linux/rculist_bl.h> 31 #include <linux/bit_spinlock.h> 32 #include <linux/percpu.h> 33 #include <linux/list_sort.h> 34 #include <linux/lockref.h> 35 #include <linux/rhashtable.h> 36 37 #include "gfs2.h" 38 #include "incore.h" 39 #include "glock.h" 40 #include "glops.h" 41 #include "inode.h" 42 #include "lops.h" 43 #include "meta_io.h" 44 #include "quota.h" 45 #include "super.h" 46 #include "util.h" 47 #include "bmap.h" 48 #define CREATE_TRACE_POINTS 49 #include "trace_gfs2.h" 50 51 struct gfs2_glock_iter { 52 struct gfs2_sbd *sdp; /* incore superblock */ 53 struct rhashtable_iter hti; /* rhashtable iterator */ 54 struct gfs2_glock *gl; /* current glock struct */ 55 loff_t last_pos; /* last position */ 56 }; 57 58 typedef void (*glock_examiner) (struct gfs2_glock * gl); 59 60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); 61 62 static struct dentry *gfs2_root; 63 static struct workqueue_struct *glock_workqueue; 64 struct workqueue_struct *gfs2_delete_workqueue; 65 static LIST_HEAD(lru_list); 66 static atomic_t lru_count = ATOMIC_INIT(0); 67 static DEFINE_SPINLOCK(lru_lock); 68 69 #define GFS2_GL_HASH_SHIFT 15 70 #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT) 71 72 static const struct rhashtable_params ht_parms = { 73 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4, 74 .key_len = offsetofend(struct lm_lockname, ln_type), 75 .key_offset = offsetof(struct gfs2_glock, gl_name), 76 .head_offset = offsetof(struct gfs2_glock, gl_node), 77 }; 78 79 static struct rhashtable gl_hash_table; 80 81 #define GLOCK_WAIT_TABLE_BITS 12 82 #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS) 83 static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned; 84 85 struct wait_glock_queue { 86 struct lm_lockname *name; 87 wait_queue_entry_t wait; 88 }; 89 90 static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode, 91 int sync, void *key) 92 { 93 struct wait_glock_queue *wait_glock = 94 container_of(wait, struct wait_glock_queue, wait); 95 struct lm_lockname *wait_name = wait_glock->name; 96 struct lm_lockname *wake_name = key; 97 98 if (wake_name->ln_sbd != wait_name->ln_sbd || 99 wake_name->ln_number != wait_name->ln_number || 100 wake_name->ln_type != wait_name->ln_type) 101 return 0; 102 return autoremove_wake_function(wait, mode, sync, key); 103 } 104 105 static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name) 106 { 107 u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0); 108 109 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS); 110 } 111 112 /** 113 * wake_up_glock - Wake up waiters on a glock 114 * @gl: the glock 115 */ 116 static void wake_up_glock(struct gfs2_glock *gl) 117 { 118 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); 119 120 if (waitqueue_active(wq)) 121 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); 122 } 123 124 static void gfs2_glock_dealloc(struct rcu_head *rcu) 125 { 126 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); 127 128 kfree(gl->gl_lksb.sb_lvbptr); 129 if (gl->gl_ops->go_flags & GLOF_ASPACE) 130 kmem_cache_free(gfs2_glock_aspace_cachep, gl); 131 else 132 kmem_cache_free(gfs2_glock_cachep, gl); 133 } 134 135 /** 136 * glock_blocked_by_withdraw - determine if we can still use a glock 137 * @gl: the glock 138 * 139 * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted 140 * when we're withdrawn. For example, to maintain metadata integrity, we should 141 * disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like 142 * iopen or the transaction glocks may be safely used because none of their 143 * metadata goes through the journal. So in general, we should disallow all 144 * glocks that are journaled, and allow all the others. One exception is: 145 * we need to allow our active journal to be promoted and demoted so others 146 * may recover it and we can reacquire it when they're done. 147 */ 148 static bool glock_blocked_by_withdraw(struct gfs2_glock *gl) 149 { 150 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 151 152 if (likely(!gfs2_withdrawn(sdp))) 153 return false; 154 if (gl->gl_ops->go_flags & GLOF_NONDISK) 155 return false; 156 if (!sdp->sd_jdesc || 157 gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr) 158 return false; 159 return true; 160 } 161 162 void gfs2_glock_free(struct gfs2_glock *gl) 163 { 164 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 165 166 gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0); 167 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); 168 smp_mb(); 169 wake_up_glock(gl); 170 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); 171 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 172 wake_up(&sdp->sd_glock_wait); 173 } 174 175 /** 176 * gfs2_glock_hold() - increment reference count on glock 177 * @gl: The glock to hold 178 * 179 */ 180 181 void gfs2_glock_hold(struct gfs2_glock *gl) 182 { 183 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 184 lockref_get(&gl->gl_lockref); 185 } 186 187 /** 188 * demote_ok - Check to see if it's ok to unlock a glock 189 * @gl: the glock 190 * 191 * Returns: 1 if it's ok 192 */ 193 194 static int demote_ok(const struct gfs2_glock *gl) 195 { 196 const struct gfs2_glock_operations *glops = gl->gl_ops; 197 198 if (gl->gl_state == LM_ST_UNLOCKED) 199 return 0; 200 if (!list_empty(&gl->gl_holders)) 201 return 0; 202 if (glops->go_demote_ok) 203 return glops->go_demote_ok(gl); 204 return 1; 205 } 206 207 208 void gfs2_glock_add_to_lru(struct gfs2_glock *gl) 209 { 210 if (!(gl->gl_ops->go_flags & GLOF_LRU)) 211 return; 212 213 spin_lock(&lru_lock); 214 215 list_del(&gl->gl_lru); 216 list_add_tail(&gl->gl_lru, &lru_list); 217 218 if (!test_bit(GLF_LRU, &gl->gl_flags)) { 219 set_bit(GLF_LRU, &gl->gl_flags); 220 atomic_inc(&lru_count); 221 } 222 223 spin_unlock(&lru_lock); 224 } 225 226 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) 227 { 228 if (!(gl->gl_ops->go_flags & GLOF_LRU)) 229 return; 230 231 spin_lock(&lru_lock); 232 if (test_bit(GLF_LRU, &gl->gl_flags)) { 233 list_del_init(&gl->gl_lru); 234 atomic_dec(&lru_count); 235 clear_bit(GLF_LRU, &gl->gl_flags); 236 } 237 spin_unlock(&lru_lock); 238 } 239 240 /* 241 * Enqueue the glock on the work queue. Passes one glock reference on to the 242 * work queue. 243 */ 244 static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { 245 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { 246 /* 247 * We are holding the lockref spinlock, and the work was still 248 * queued above. The queued work (glock_work_func) takes that 249 * spinlock before dropping its glock reference(s), so it 250 * cannot have dropped them in the meantime. 251 */ 252 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2); 253 gl->gl_lockref.count--; 254 } 255 } 256 257 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { 258 spin_lock(&gl->gl_lockref.lock); 259 __gfs2_glock_queue_work(gl, delay); 260 spin_unlock(&gl->gl_lockref.lock); 261 } 262 263 static void __gfs2_glock_put(struct gfs2_glock *gl) 264 { 265 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 266 struct address_space *mapping = gfs2_glock2aspace(gl); 267 268 lockref_mark_dead(&gl->gl_lockref); 269 270 gfs2_glock_remove_from_lru(gl); 271 spin_unlock(&gl->gl_lockref.lock); 272 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 273 if (mapping) { 274 truncate_inode_pages_final(mapping); 275 if (!gfs2_withdrawn(sdp)) 276 GLOCK_BUG_ON(gl, mapping->nrpages || 277 mapping->nrexceptional); 278 } 279 trace_gfs2_glock_put(gl); 280 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); 281 } 282 283 /* 284 * Cause the glock to be put in work queue context. 285 */ 286 void gfs2_glock_queue_put(struct gfs2_glock *gl) 287 { 288 gfs2_glock_queue_work(gl, 0); 289 } 290 291 /** 292 * gfs2_glock_put() - Decrement reference count on glock 293 * @gl: The glock to put 294 * 295 */ 296 297 void gfs2_glock_put(struct gfs2_glock *gl) 298 { 299 if (lockref_put_or_lock(&gl->gl_lockref)) 300 return; 301 302 __gfs2_glock_put(gl); 303 } 304 305 /** 306 * may_grant - check if its ok to grant a new lock 307 * @gl: The glock 308 * @gh: The lock request which we wish to grant 309 * 310 * Returns: true if its ok to grant the lock 311 */ 312 313 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) 314 { 315 const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list); 316 if ((gh->gh_state == LM_ST_EXCLUSIVE || 317 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head) 318 return 0; 319 if (gl->gl_state == gh->gh_state) 320 return 1; 321 if (gh->gh_flags & GL_EXACT) 322 return 0; 323 if (gl->gl_state == LM_ST_EXCLUSIVE) { 324 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED) 325 return 1; 326 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED) 327 return 1; 328 } 329 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) 330 return 1; 331 return 0; 332 } 333 334 static void gfs2_holder_wake(struct gfs2_holder *gh) 335 { 336 clear_bit(HIF_WAIT, &gh->gh_iflags); 337 smp_mb__after_atomic(); 338 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 339 if (gh->gh_flags & GL_ASYNC) { 340 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd; 341 342 wake_up(&sdp->sd_async_glock_wait); 343 } 344 } 345 346 /** 347 * do_error - Something unexpected has happened during a lock request 348 * 349 */ 350 351 static void do_error(struct gfs2_glock *gl, const int ret) 352 { 353 struct gfs2_holder *gh, *tmp; 354 355 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 356 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 357 continue; 358 if (ret & LM_OUT_ERROR) 359 gh->gh_error = -EIO; 360 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) 361 gh->gh_error = GLR_TRYFAILED; 362 else 363 continue; 364 list_del_init(&gh->gh_list); 365 trace_gfs2_glock_queue(gh, 0); 366 gfs2_holder_wake(gh); 367 } 368 } 369 370 /** 371 * do_promote - promote as many requests as possible on the current queue 372 * @gl: The glock 373 * 374 * Returns: 1 if there is a blocked holder at the head of the list, or 2 375 * if a type specific operation is underway. 376 */ 377 378 static int do_promote(struct gfs2_glock *gl) 379 __releases(&gl->gl_lockref.lock) 380 __acquires(&gl->gl_lockref.lock) 381 { 382 const struct gfs2_glock_operations *glops = gl->gl_ops; 383 struct gfs2_holder *gh, *tmp; 384 int ret; 385 386 restart: 387 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 388 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 389 continue; 390 if (may_grant(gl, gh)) { 391 if (gh->gh_list.prev == &gl->gl_holders && 392 glops->go_lock) { 393 spin_unlock(&gl->gl_lockref.lock); 394 /* FIXME: eliminate this eventually */ 395 ret = glops->go_lock(gh); 396 spin_lock(&gl->gl_lockref.lock); 397 if (ret) { 398 if (ret == 1) 399 return 2; 400 gh->gh_error = ret; 401 list_del_init(&gh->gh_list); 402 trace_gfs2_glock_queue(gh, 0); 403 gfs2_holder_wake(gh); 404 goto restart; 405 } 406 set_bit(HIF_HOLDER, &gh->gh_iflags); 407 trace_gfs2_promote(gh, 1); 408 gfs2_holder_wake(gh); 409 goto restart; 410 } 411 set_bit(HIF_HOLDER, &gh->gh_iflags); 412 trace_gfs2_promote(gh, 0); 413 gfs2_holder_wake(gh); 414 continue; 415 } 416 if (gh->gh_list.prev == &gl->gl_holders) 417 return 1; 418 do_error(gl, 0); 419 break; 420 } 421 return 0; 422 } 423 424 /** 425 * find_first_waiter - find the first gh that's waiting for the glock 426 * @gl: the glock 427 */ 428 429 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) 430 { 431 struct gfs2_holder *gh; 432 433 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 434 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) 435 return gh; 436 } 437 return NULL; 438 } 439 440 /** 441 * state_change - record that the glock is now in a different state 442 * @gl: the glock 443 * @new_state the new state 444 * 445 */ 446 447 static void state_change(struct gfs2_glock *gl, unsigned int new_state) 448 { 449 int held1, held2; 450 451 held1 = (gl->gl_state != LM_ST_UNLOCKED); 452 held2 = (new_state != LM_ST_UNLOCKED); 453 454 if (held1 != held2) { 455 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 456 if (held2) 457 gl->gl_lockref.count++; 458 else 459 gl->gl_lockref.count--; 460 } 461 if (new_state != gl->gl_target) 462 /* shorten our minimum hold time */ 463 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, 464 GL_GLOCK_MIN_HOLD); 465 gl->gl_state = new_state; 466 gl->gl_tchange = jiffies; 467 } 468 469 static void gfs2_set_demote(struct gfs2_glock *gl) 470 { 471 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 472 473 set_bit(GLF_DEMOTE, &gl->gl_flags); 474 smp_mb(); 475 wake_up(&sdp->sd_async_glock_wait); 476 } 477 478 static void gfs2_demote_wake(struct gfs2_glock *gl) 479 { 480 gl->gl_demote_state = LM_ST_EXCLUSIVE; 481 clear_bit(GLF_DEMOTE, &gl->gl_flags); 482 smp_mb__after_atomic(); 483 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); 484 } 485 486 /** 487 * finish_xmote - The DLM has replied to one of our lock requests 488 * @gl: The glock 489 * @ret: The status from the DLM 490 * 491 */ 492 493 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) 494 { 495 const struct gfs2_glock_operations *glops = gl->gl_ops; 496 struct gfs2_holder *gh; 497 unsigned state = ret & LM_OUT_ST_MASK; 498 int rv; 499 500 spin_lock(&gl->gl_lockref.lock); 501 trace_gfs2_glock_state_change(gl, state); 502 state_change(gl, state); 503 gh = find_first_waiter(gl); 504 505 /* Demote to UN request arrived during demote to SH or DF */ 506 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && 507 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) 508 gl->gl_target = LM_ST_UNLOCKED; 509 510 /* Check for state != intended state */ 511 if (unlikely(state != gl->gl_target)) { 512 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { 513 /* move to back of queue and try next entry */ 514 if (ret & LM_OUT_CANCELED) { 515 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) 516 list_move_tail(&gh->gh_list, &gl->gl_holders); 517 gh = find_first_waiter(gl); 518 gl->gl_target = gh->gh_state; 519 goto retry; 520 } 521 /* Some error or failed "try lock" - report it */ 522 if ((ret & LM_OUT_ERROR) || 523 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 524 gl->gl_target = gl->gl_state; 525 do_error(gl, ret); 526 goto out; 527 } 528 } 529 switch(state) { 530 /* Unlocked due to conversion deadlock, try again */ 531 case LM_ST_UNLOCKED: 532 retry: 533 do_xmote(gl, gh, gl->gl_target); 534 break; 535 /* Conversion fails, unlock and try again */ 536 case LM_ST_SHARED: 537 case LM_ST_DEFERRED: 538 do_xmote(gl, gh, LM_ST_UNLOCKED); 539 break; 540 default: /* Everything else */ 541 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n", 542 gl->gl_target, state); 543 GLOCK_BUG_ON(gl, 1); 544 } 545 spin_unlock(&gl->gl_lockref.lock); 546 return; 547 } 548 549 /* Fast path - we got what we asked for */ 550 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) 551 gfs2_demote_wake(gl); 552 if (state != LM_ST_UNLOCKED) { 553 if (glops->go_xmote_bh) { 554 spin_unlock(&gl->gl_lockref.lock); 555 rv = glops->go_xmote_bh(gl, gh); 556 spin_lock(&gl->gl_lockref.lock); 557 if (rv) { 558 do_error(gl, rv); 559 goto out; 560 } 561 } 562 rv = do_promote(gl); 563 if (rv == 2) 564 goto out_locked; 565 } 566 out: 567 clear_bit(GLF_LOCK, &gl->gl_flags); 568 out_locked: 569 spin_unlock(&gl->gl_lockref.lock); 570 } 571 572 /** 573 * do_xmote - Calls the DLM to change the state of a lock 574 * @gl: The lock state 575 * @gh: The holder (only for promotes) 576 * @target: The target lock state 577 * 578 */ 579 580 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) 581 __releases(&gl->gl_lockref.lock) 582 __acquires(&gl->gl_lockref.lock) 583 { 584 const struct gfs2_glock_operations *glops = gl->gl_ops; 585 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 586 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); 587 int ret; 588 589 if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) && 590 gh && !(gh->gh_flags & LM_FLAG_NOEXP)) 591 return; 592 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | 593 LM_FLAG_PRIORITY); 594 GLOCK_BUG_ON(gl, gl->gl_state == target); 595 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); 596 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) && 597 glops->go_inval) { 598 /* 599 * If another process is already doing the invalidate, let that 600 * finish first. The glock state machine will get back to this 601 * holder again later. 602 */ 603 if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS, 604 &gl->gl_flags)) 605 return; 606 do_error(gl, 0); /* Fail queued try locks */ 607 } 608 gl->gl_req = target; 609 set_bit(GLF_BLOCKING, &gl->gl_flags); 610 if ((gl->gl_req == LM_ST_UNLOCKED) || 611 (gl->gl_state == LM_ST_EXCLUSIVE) || 612 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) 613 clear_bit(GLF_BLOCKING, &gl->gl_flags); 614 spin_unlock(&gl->gl_lockref.lock); 615 if (glops->go_sync) { 616 ret = glops->go_sync(gl); 617 /* If we had a problem syncing (due to io errors or whatever, 618 * we should not invalidate the metadata or tell dlm to 619 * release the glock to other nodes. 620 */ 621 if (ret) { 622 if (cmpxchg(&sdp->sd_log_error, 0, ret)) { 623 fs_err(sdp, "Error %d syncing glock \n", ret); 624 gfs2_dump_glock(NULL, gl, true); 625 } 626 goto skip_inval; 627 } 628 } 629 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) { 630 /* 631 * The call to go_sync should have cleared out the ail list. 632 * If there are still items, we have a problem. We ought to 633 * withdraw, but we can't because the withdraw code also uses 634 * glocks. Warn about the error, dump the glock, then fall 635 * through and wait for logd to do the withdraw for us. 636 */ 637 if ((atomic_read(&gl->gl_ail_count) != 0) && 638 (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) { 639 gfs2_glock_assert_warn(gl, 640 !atomic_read(&gl->gl_ail_count)); 641 gfs2_dump_glock(NULL, gl, true); 642 } 643 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); 644 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 645 } 646 647 skip_inval: 648 gfs2_glock_hold(gl); 649 /* 650 * Check for an error encountered since we called go_sync and go_inval. 651 * If so, we can't withdraw from the glock code because the withdraw 652 * code itself uses glocks (see function signal_our_withdraw) to 653 * change the mount to read-only. Most importantly, we must not call 654 * dlm to unlock the glock until the journal is in a known good state 655 * (after journal replay) otherwise other nodes may use the object 656 * (rgrp or dinode) and then later, journal replay will corrupt the 657 * file system. The best we can do here is wait for the logd daemon 658 * to see sd_log_error and withdraw, and in the meantime, requeue the 659 * work for later. 660 * 661 * However, if we're just unlocking the lock (say, for unmount, when 662 * gfs2_gl_hash_clear calls clear_glock) and recovery is complete 663 * then it's okay to tell dlm to unlock it. 664 */ 665 if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp))) 666 gfs2_withdraw_delayed(sdp); 667 if (glock_blocked_by_withdraw(gl)) { 668 if (target != LM_ST_UNLOCKED || 669 test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags)) { 670 gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); 671 goto out; 672 } 673 } 674 675 if (sdp->sd_lockstruct.ls_ops->lm_lock) { 676 /* lock_dlm */ 677 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); 678 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && 679 target == LM_ST_UNLOCKED && 680 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) { 681 finish_xmote(gl, target); 682 gfs2_glock_queue_work(gl, 0); 683 } else if (ret) { 684 fs_err(sdp, "lm_lock ret %d\n", ret); 685 GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp)); 686 } 687 } else { /* lock_nolock */ 688 finish_xmote(gl, target); 689 gfs2_glock_queue_work(gl, 0); 690 } 691 out: 692 spin_lock(&gl->gl_lockref.lock); 693 } 694 695 /** 696 * find_first_holder - find the first "holder" gh 697 * @gl: the glock 698 */ 699 700 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) 701 { 702 struct gfs2_holder *gh; 703 704 if (!list_empty(&gl->gl_holders)) { 705 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); 706 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 707 return gh; 708 } 709 return NULL; 710 } 711 712 /** 713 * run_queue - do all outstanding tasks related to a glock 714 * @gl: The glock in question 715 * @nonblock: True if we must not block in run_queue 716 * 717 */ 718 719 static void run_queue(struct gfs2_glock *gl, const int nonblock) 720 __releases(&gl->gl_lockref.lock) 721 __acquires(&gl->gl_lockref.lock) 722 { 723 struct gfs2_holder *gh = NULL; 724 int ret; 725 726 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 727 return; 728 729 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); 730 731 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && 732 gl->gl_demote_state != gl->gl_state) { 733 if (find_first_holder(gl)) 734 goto out_unlock; 735 if (nonblock) 736 goto out_sched; 737 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 738 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); 739 gl->gl_target = gl->gl_demote_state; 740 } else { 741 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) 742 gfs2_demote_wake(gl); 743 ret = do_promote(gl); 744 if (ret == 0) 745 goto out_unlock; 746 if (ret == 2) 747 goto out; 748 gh = find_first_waiter(gl); 749 gl->gl_target = gh->gh_state; 750 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 751 do_error(gl, 0); /* Fail queued try locks */ 752 } 753 do_xmote(gl, gh, gl->gl_target); 754 out: 755 return; 756 757 out_sched: 758 clear_bit(GLF_LOCK, &gl->gl_flags); 759 smp_mb__after_atomic(); 760 gl->gl_lockref.count++; 761 __gfs2_glock_queue_work(gl, 0); 762 return; 763 764 out_unlock: 765 clear_bit(GLF_LOCK, &gl->gl_flags); 766 smp_mb__after_atomic(); 767 return; 768 } 769 770 void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation) 771 { 772 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; 773 774 if (ri->ri_magic == 0) 775 ri->ri_magic = cpu_to_be32(GFS2_MAGIC); 776 if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC)) 777 ri->ri_generation_deleted = cpu_to_be64(generation); 778 } 779 780 bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation) 781 { 782 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; 783 784 if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC)) 785 return false; 786 return generation <= be64_to_cpu(ri->ri_generation_deleted); 787 } 788 789 static void gfs2_glock_poke(struct gfs2_glock *gl) 790 { 791 int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP; 792 struct gfs2_holder gh; 793 int error; 794 795 gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh); 796 error = gfs2_glock_nq(&gh); 797 if (!error) 798 gfs2_glock_dq(&gh); 799 gfs2_holder_uninit(&gh); 800 } 801 802 static bool gfs2_try_evict(struct gfs2_glock *gl) 803 { 804 struct gfs2_inode *ip; 805 bool evicted = false; 806 807 /* 808 * If there is contention on the iopen glock and we have an inode, try 809 * to grab and release the inode so that it can be evicted. This will 810 * allow the remote node to go ahead and delete the inode without us 811 * having to do it, which will avoid rgrp glock thrashing. 812 * 813 * The remote node is likely still holding the corresponding inode 814 * glock, so it will run before we get to verify that the delete has 815 * happened below. 816 */ 817 spin_lock(&gl->gl_lockref.lock); 818 ip = gl->gl_object; 819 if (ip && !igrab(&ip->i_inode)) 820 ip = NULL; 821 spin_unlock(&gl->gl_lockref.lock); 822 if (ip) { 823 struct gfs2_glock *inode_gl = NULL; 824 825 gl->gl_no_formal_ino = ip->i_no_formal_ino; 826 set_bit(GIF_DEFERRED_DELETE, &ip->i_flags); 827 d_prune_aliases(&ip->i_inode); 828 iput(&ip->i_inode); 829 830 /* If the inode was evicted, gl->gl_object will now be NULL. */ 831 spin_lock(&gl->gl_lockref.lock); 832 ip = gl->gl_object; 833 if (ip) { 834 inode_gl = ip->i_gl; 835 lockref_get(&inode_gl->gl_lockref); 836 clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags); 837 } 838 spin_unlock(&gl->gl_lockref.lock); 839 if (inode_gl) { 840 gfs2_glock_poke(inode_gl); 841 gfs2_glock_put(inode_gl); 842 } 843 evicted = !ip; 844 } 845 return evicted; 846 } 847 848 static void delete_work_func(struct work_struct *work) 849 { 850 struct delayed_work *dwork = to_delayed_work(work); 851 struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); 852 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 853 struct inode *inode; 854 u64 no_addr = gl->gl_name.ln_number; 855 856 spin_lock(&gl->gl_lockref.lock); 857 clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); 858 spin_unlock(&gl->gl_lockref.lock); 859 860 /* If someone's using this glock to create a new dinode, the block must 861 have been freed by another node, then re-used, in which case our 862 iopen callback is too late after the fact. Ignore it. */ 863 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags)) 864 goto out; 865 866 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { 867 /* 868 * If we can evict the inode, give the remote node trying to 869 * delete the inode some time before verifying that the delete 870 * has happened. Otherwise, if we cause contention on the inode glock 871 * immediately, the remote node will think that we still have 872 * the inode in use, and so it will give up waiting. 873 * 874 * If we can't evict the inode, signal to the remote node that 875 * the inode is still in use. We'll later try to delete the 876 * inode locally in gfs2_evict_inode. 877 * 878 * FIXME: We only need to verify that the remote node has 879 * deleted the inode because nodes before this remote delete 880 * rework won't cooperate. At a later time, when we no longer 881 * care about compatibility with such nodes, we can skip this 882 * step entirely. 883 */ 884 if (gfs2_try_evict(gl)) { 885 if (gfs2_queue_delete_work(gl, 5 * HZ)) 886 return; 887 } 888 goto out; 889 } 890 891 inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, 892 GFS2_BLKST_UNLINKED); 893 if (!IS_ERR_OR_NULL(inode)) { 894 d_prune_aliases(inode); 895 iput(inode); 896 } 897 out: 898 gfs2_glock_put(gl); 899 } 900 901 static void glock_work_func(struct work_struct *work) 902 { 903 unsigned long delay = 0; 904 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 905 unsigned int drop_refs = 1; 906 907 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { 908 finish_xmote(gl, gl->gl_reply); 909 drop_refs++; 910 } 911 spin_lock(&gl->gl_lockref.lock); 912 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 913 gl->gl_state != LM_ST_UNLOCKED && 914 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 915 unsigned long holdtime, now = jiffies; 916 917 holdtime = gl->gl_tchange + gl->gl_hold_time; 918 if (time_before(now, holdtime)) 919 delay = holdtime - now; 920 921 if (!delay) { 922 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 923 gfs2_set_demote(gl); 924 } 925 } 926 run_queue(gl, 0); 927 if (delay) { 928 /* Keep one glock reference for the work we requeue. */ 929 drop_refs--; 930 if (gl->gl_name.ln_type != LM_TYPE_INODE) 931 delay = 0; 932 __gfs2_glock_queue_work(gl, delay); 933 } 934 935 /* 936 * Drop the remaining glock references manually here. (Mind that 937 * __gfs2_glock_queue_work depends on the lockref spinlock begin held 938 * here as well.) 939 */ 940 gl->gl_lockref.count -= drop_refs; 941 if (!gl->gl_lockref.count) { 942 __gfs2_glock_put(gl); 943 return; 944 } 945 spin_unlock(&gl->gl_lockref.lock); 946 } 947 948 static struct gfs2_glock *find_insert_glock(struct lm_lockname *name, 949 struct gfs2_glock *new) 950 { 951 struct wait_glock_queue wait; 952 wait_queue_head_t *wq = glock_waitqueue(name); 953 struct gfs2_glock *gl; 954 955 wait.name = name; 956 init_wait(&wait.wait); 957 wait.wait.func = glock_wake_function; 958 959 again: 960 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 961 rcu_read_lock(); 962 if (new) { 963 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table, 964 &new->gl_node, ht_parms); 965 if (IS_ERR(gl)) 966 goto out; 967 } else { 968 gl = rhashtable_lookup_fast(&gl_hash_table, 969 name, ht_parms); 970 } 971 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) { 972 rcu_read_unlock(); 973 schedule(); 974 goto again; 975 } 976 out: 977 rcu_read_unlock(); 978 finish_wait(wq, &wait.wait); 979 return gl; 980 } 981 982 /** 983 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist 984 * @sdp: The GFS2 superblock 985 * @number: the lock number 986 * @glops: The glock_operations to use 987 * @create: If 0, don't create the glock if it doesn't exist 988 * @glp: the glock is returned here 989 * 990 * This does not lock a glock, just finds/creates structures for one. 991 * 992 * Returns: errno 993 */ 994 995 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 996 const struct gfs2_glock_operations *glops, int create, 997 struct gfs2_glock **glp) 998 { 999 struct super_block *s = sdp->sd_vfs; 1000 struct lm_lockname name = { .ln_number = number, 1001 .ln_type = glops->go_type, 1002 .ln_sbd = sdp }; 1003 struct gfs2_glock *gl, *tmp; 1004 struct address_space *mapping; 1005 struct kmem_cache *cachep; 1006 int ret = 0; 1007 1008 gl = find_insert_glock(&name, NULL); 1009 if (gl) { 1010 *glp = gl; 1011 return 0; 1012 } 1013 if (!create) 1014 return -ENOENT; 1015 1016 if (glops->go_flags & GLOF_ASPACE) 1017 cachep = gfs2_glock_aspace_cachep; 1018 else 1019 cachep = gfs2_glock_cachep; 1020 gl = kmem_cache_alloc(cachep, GFP_NOFS); 1021 if (!gl) 1022 return -ENOMEM; 1023 1024 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 1025 1026 if (glops->go_flags & GLOF_LVB) { 1027 gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); 1028 if (!gl->gl_lksb.sb_lvbptr) { 1029 kmem_cache_free(cachep, gl); 1030 return -ENOMEM; 1031 } 1032 } 1033 1034 atomic_inc(&sdp->sd_glock_disposal); 1035 gl->gl_node.next = NULL; 1036 gl->gl_flags = 0; 1037 gl->gl_name = name; 1038 gl->gl_lockref.count = 1; 1039 gl->gl_state = LM_ST_UNLOCKED; 1040 gl->gl_target = LM_ST_UNLOCKED; 1041 gl->gl_demote_state = LM_ST_EXCLUSIVE; 1042 gl->gl_ops = glops; 1043 gl->gl_dstamp = 0; 1044 preempt_disable(); 1045 /* We use the global stats to estimate the initial per-glock stats */ 1046 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; 1047 preempt_enable(); 1048 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; 1049 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; 1050 gl->gl_tchange = jiffies; 1051 gl->gl_object = NULL; 1052 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; 1053 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 1054 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) 1055 INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func); 1056 1057 mapping = gfs2_glock2aspace(gl); 1058 if (mapping) { 1059 mapping->a_ops = &gfs2_meta_aops; 1060 mapping->host = s->s_bdev->bd_inode; 1061 mapping->flags = 0; 1062 mapping_set_gfp_mask(mapping, GFP_NOFS); 1063 mapping->private_data = NULL; 1064 mapping->writeback_index = 0; 1065 } 1066 1067 tmp = find_insert_glock(&name, gl); 1068 if (!tmp) { 1069 *glp = gl; 1070 goto out; 1071 } 1072 if (IS_ERR(tmp)) { 1073 ret = PTR_ERR(tmp); 1074 goto out_free; 1075 } 1076 *glp = tmp; 1077 1078 out_free: 1079 kfree(gl->gl_lksb.sb_lvbptr); 1080 kmem_cache_free(cachep, gl); 1081 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 1082 wake_up(&sdp->sd_glock_wait); 1083 1084 out: 1085 return ret; 1086 } 1087 1088 /** 1089 * gfs2_holder_init - initialize a struct gfs2_holder in the default way 1090 * @gl: the glock 1091 * @state: the state we're requesting 1092 * @flags: the modifier flags 1093 * @gh: the holder structure 1094 * 1095 */ 1096 1097 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, 1098 struct gfs2_holder *gh) 1099 { 1100 INIT_LIST_HEAD(&gh->gh_list); 1101 gh->gh_gl = gl; 1102 gh->gh_ip = _RET_IP_; 1103 gh->gh_owner_pid = get_pid(task_pid(current)); 1104 gh->gh_state = state; 1105 gh->gh_flags = flags; 1106 gh->gh_error = 0; 1107 gh->gh_iflags = 0; 1108 gfs2_glock_hold(gl); 1109 } 1110 1111 /** 1112 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it 1113 * @state: the state we're requesting 1114 * @flags: the modifier flags 1115 * @gh: the holder structure 1116 * 1117 * Don't mess with the glock. 1118 * 1119 */ 1120 1121 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh) 1122 { 1123 gh->gh_state = state; 1124 gh->gh_flags = flags; 1125 gh->gh_iflags = 0; 1126 gh->gh_ip = _RET_IP_; 1127 put_pid(gh->gh_owner_pid); 1128 gh->gh_owner_pid = get_pid(task_pid(current)); 1129 } 1130 1131 /** 1132 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) 1133 * @gh: the holder structure 1134 * 1135 */ 1136 1137 void gfs2_holder_uninit(struct gfs2_holder *gh) 1138 { 1139 put_pid(gh->gh_owner_pid); 1140 gfs2_glock_put(gh->gh_gl); 1141 gfs2_holder_mark_uninitialized(gh); 1142 gh->gh_ip = 0; 1143 } 1144 1145 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl, 1146 unsigned long start_time) 1147 { 1148 /* Have we waited longer that a second? */ 1149 if (time_after(jiffies, start_time + HZ)) { 1150 /* Lengthen the minimum hold time. */ 1151 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR, 1152 GL_GLOCK_MAX_HOLD); 1153 } 1154 } 1155 1156 /** 1157 * gfs2_glock_wait - wait on a glock acquisition 1158 * @gh: the glock holder 1159 * 1160 * Returns: 0 on success 1161 */ 1162 1163 int gfs2_glock_wait(struct gfs2_holder *gh) 1164 { 1165 unsigned long start_time = jiffies; 1166 1167 might_sleep(); 1168 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); 1169 gfs2_glock_update_hold_time(gh->gh_gl, start_time); 1170 return gh->gh_error; 1171 } 1172 1173 static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs) 1174 { 1175 int i; 1176 1177 for (i = 0; i < num_gh; i++) 1178 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) 1179 return 1; 1180 return 0; 1181 } 1182 1183 /** 1184 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions 1185 * @num_gh: the number of holders in the array 1186 * @ghs: the glock holder array 1187 * 1188 * Returns: 0 on success, meaning all glocks have been granted and are held. 1189 * -ESTALE if the request timed out, meaning all glocks were released, 1190 * and the caller should retry the operation. 1191 */ 1192 1193 int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs) 1194 { 1195 struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd; 1196 int i, ret = 0, timeout = 0; 1197 unsigned long start_time = jiffies; 1198 bool keep_waiting; 1199 1200 might_sleep(); 1201 /* 1202 * Total up the (minimum hold time * 2) of all glocks and use that to 1203 * determine the max amount of time we should wait. 1204 */ 1205 for (i = 0; i < num_gh; i++) 1206 timeout += ghs[i].gh_gl->gl_hold_time << 1; 1207 1208 wait_for_dlm: 1209 if (!wait_event_timeout(sdp->sd_async_glock_wait, 1210 !glocks_pending(num_gh, ghs), timeout)) 1211 ret = -ESTALE; /* request timed out. */ 1212 1213 /* 1214 * If dlm granted all our requests, we need to adjust the glock 1215 * minimum hold time values according to how long we waited. 1216 * 1217 * If our request timed out, we need to repeatedly release any held 1218 * glocks we acquired thus far to allow dlm to acquire the remaining 1219 * glocks without deadlocking. We cannot currently cancel outstanding 1220 * glock acquisitions. 1221 * 1222 * The HIF_WAIT bit tells us which requests still need a response from 1223 * dlm. 1224 * 1225 * If dlm sent us any errors, we return the first error we find. 1226 */ 1227 keep_waiting = false; 1228 for (i = 0; i < num_gh; i++) { 1229 /* Skip holders we have already dequeued below. */ 1230 if (!gfs2_holder_queued(&ghs[i])) 1231 continue; 1232 /* Skip holders with a pending DLM response. */ 1233 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) { 1234 keep_waiting = true; 1235 continue; 1236 } 1237 1238 if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) { 1239 if (ret == -ESTALE) 1240 gfs2_glock_dq(&ghs[i]); 1241 else 1242 gfs2_glock_update_hold_time(ghs[i].gh_gl, 1243 start_time); 1244 } 1245 if (!ret) 1246 ret = ghs[i].gh_error; 1247 } 1248 1249 if (keep_waiting) 1250 goto wait_for_dlm; 1251 1252 /* 1253 * At this point, we've either acquired all locks or released them all. 1254 */ 1255 return ret; 1256 } 1257 1258 /** 1259 * handle_callback - process a demote request 1260 * @gl: the glock 1261 * @state: the state the caller wants us to change to 1262 * 1263 * There are only two requests that we are going to see in actual 1264 * practise: LM_ST_SHARED and LM_ST_UNLOCKED 1265 */ 1266 1267 static void handle_callback(struct gfs2_glock *gl, unsigned int state, 1268 unsigned long delay, bool remote) 1269 { 1270 if (delay) 1271 set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 1272 else 1273 gfs2_set_demote(gl); 1274 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { 1275 gl->gl_demote_state = state; 1276 gl->gl_demote_time = jiffies; 1277 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && 1278 gl->gl_demote_state != state) { 1279 gl->gl_demote_state = LM_ST_UNLOCKED; 1280 } 1281 if (gl->gl_ops->go_callback) 1282 gl->gl_ops->go_callback(gl, remote); 1283 trace_gfs2_demote_rq(gl, remote); 1284 } 1285 1286 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) 1287 { 1288 struct va_format vaf; 1289 va_list args; 1290 1291 va_start(args, fmt); 1292 1293 if (seq) { 1294 seq_vprintf(seq, fmt, args); 1295 } else { 1296 vaf.fmt = fmt; 1297 vaf.va = &args; 1298 1299 pr_err("%pV", &vaf); 1300 } 1301 1302 va_end(args); 1303 } 1304 1305 /** 1306 * add_to_queue - Add a holder to the wait queue (but look for recursion) 1307 * @gh: the holder structure to add 1308 * 1309 * Eventually we should move the recursive locking trap to a 1310 * debugging option or something like that. This is the fast 1311 * path and needs to have the minimum number of distractions. 1312 * 1313 */ 1314 1315 static inline void add_to_queue(struct gfs2_holder *gh) 1316 __releases(&gl->gl_lockref.lock) 1317 __acquires(&gl->gl_lockref.lock) 1318 { 1319 struct gfs2_glock *gl = gh->gh_gl; 1320 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1321 struct list_head *insert_pt = NULL; 1322 struct gfs2_holder *gh2; 1323 int try_futile = 0; 1324 1325 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); 1326 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 1327 GLOCK_BUG_ON(gl, true); 1328 1329 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { 1330 if (test_bit(GLF_LOCK, &gl->gl_flags)) 1331 try_futile = !may_grant(gl, gh); 1332 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) 1333 goto fail; 1334 } 1335 1336 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { 1337 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid && 1338 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK))) 1339 goto trap_recursive; 1340 if (try_futile && 1341 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 1342 fail: 1343 gh->gh_error = GLR_TRYFAILED; 1344 gfs2_holder_wake(gh); 1345 return; 1346 } 1347 if (test_bit(HIF_HOLDER, &gh2->gh_iflags)) 1348 continue; 1349 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) 1350 insert_pt = &gh2->gh_list; 1351 } 1352 trace_gfs2_glock_queue(gh, 1); 1353 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); 1354 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); 1355 if (likely(insert_pt == NULL)) { 1356 list_add_tail(&gh->gh_list, &gl->gl_holders); 1357 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) 1358 goto do_cancel; 1359 return; 1360 } 1361 list_add_tail(&gh->gh_list, insert_pt); 1362 do_cancel: 1363 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); 1364 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { 1365 spin_unlock(&gl->gl_lockref.lock); 1366 if (sdp->sd_lockstruct.ls_ops->lm_cancel) 1367 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); 1368 spin_lock(&gl->gl_lockref.lock); 1369 } 1370 return; 1371 1372 trap_recursive: 1373 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip); 1374 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid)); 1375 fs_err(sdp, "lock type: %d req lock state : %d\n", 1376 gh2->gh_gl->gl_name.ln_type, gh2->gh_state); 1377 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip); 1378 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid)); 1379 fs_err(sdp, "lock type: %d req lock state : %d\n", 1380 gh->gh_gl->gl_name.ln_type, gh->gh_state); 1381 gfs2_dump_glock(NULL, gl, true); 1382 BUG(); 1383 } 1384 1385 /** 1386 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) 1387 * @gh: the holder structure 1388 * 1389 * if (gh->gh_flags & GL_ASYNC), this never returns an error 1390 * 1391 * Returns: 0, GLR_TRYFAILED, or errno on failure 1392 */ 1393 1394 int gfs2_glock_nq(struct gfs2_holder *gh) 1395 { 1396 struct gfs2_glock *gl = gh->gh_gl; 1397 int error = 0; 1398 1399 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP)) 1400 return -EIO; 1401 1402 if (test_bit(GLF_LRU, &gl->gl_flags)) 1403 gfs2_glock_remove_from_lru(gl); 1404 1405 spin_lock(&gl->gl_lockref.lock); 1406 add_to_queue(gh); 1407 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && 1408 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { 1409 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1410 gl->gl_lockref.count++; 1411 __gfs2_glock_queue_work(gl, 0); 1412 } 1413 run_queue(gl, 1); 1414 spin_unlock(&gl->gl_lockref.lock); 1415 1416 if (!(gh->gh_flags & GL_ASYNC)) 1417 error = gfs2_glock_wait(gh); 1418 1419 return error; 1420 } 1421 1422 /** 1423 * gfs2_glock_poll - poll to see if an async request has been completed 1424 * @gh: the holder 1425 * 1426 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on 1427 */ 1428 1429 int gfs2_glock_poll(struct gfs2_holder *gh) 1430 { 1431 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; 1432 } 1433 1434 /** 1435 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) 1436 * @gh: the glock holder 1437 * 1438 */ 1439 1440 void gfs2_glock_dq(struct gfs2_holder *gh) 1441 { 1442 struct gfs2_glock *gl = gh->gh_gl; 1443 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1444 unsigned delay = 0; 1445 int fast_path = 0; 1446 1447 spin_lock(&gl->gl_lockref.lock); 1448 /* 1449 * If we're in the process of file system withdraw, we cannot just 1450 * dequeue any glocks until our journal is recovered, lest we 1451 * introduce file system corruption. We need two exceptions to this 1452 * rule: We need to allow unlocking of nondisk glocks and the glock 1453 * for our own journal that needs recovery. 1454 */ 1455 if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) && 1456 glock_blocked_by_withdraw(gl) && 1457 gh->gh_gl != sdp->sd_jinode_gl) { 1458 sdp->sd_glock_dqs_held++; 1459 might_sleep(); 1460 wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY, 1461 TASK_UNINTERRUPTIBLE); 1462 } 1463 if (gh->gh_flags & GL_NOCACHE) 1464 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1465 1466 list_del_init(&gh->gh_list); 1467 clear_bit(HIF_HOLDER, &gh->gh_iflags); 1468 if (find_first_holder(gl) == NULL) { 1469 if (list_empty(&gl->gl_holders) && 1470 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1471 !test_bit(GLF_DEMOTE, &gl->gl_flags)) 1472 fast_path = 1; 1473 } 1474 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) 1475 gfs2_glock_add_to_lru(gl); 1476 1477 trace_gfs2_glock_queue(gh, 0); 1478 if (unlikely(!fast_path)) { 1479 gl->gl_lockref.count++; 1480 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1481 !test_bit(GLF_DEMOTE, &gl->gl_flags) && 1482 gl->gl_name.ln_type == LM_TYPE_INODE) 1483 delay = gl->gl_hold_time; 1484 __gfs2_glock_queue_work(gl, delay); 1485 } 1486 spin_unlock(&gl->gl_lockref.lock); 1487 } 1488 1489 void gfs2_glock_dq_wait(struct gfs2_holder *gh) 1490 { 1491 struct gfs2_glock *gl = gh->gh_gl; 1492 gfs2_glock_dq(gh); 1493 might_sleep(); 1494 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); 1495 } 1496 1497 /** 1498 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1499 * @gh: the holder structure 1500 * 1501 */ 1502 1503 void gfs2_glock_dq_uninit(struct gfs2_holder *gh) 1504 { 1505 gfs2_glock_dq(gh); 1506 gfs2_holder_uninit(gh); 1507 } 1508 1509 /** 1510 * gfs2_glock_nq_num - acquire a glock based on lock number 1511 * @sdp: the filesystem 1512 * @number: the lock number 1513 * @glops: the glock operations for the type of glock 1514 * @state: the state to acquire the glock in 1515 * @flags: modifier flags for the acquisition 1516 * @gh: the struct gfs2_holder 1517 * 1518 * Returns: errno 1519 */ 1520 1521 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, 1522 const struct gfs2_glock_operations *glops, 1523 unsigned int state, u16 flags, struct gfs2_holder *gh) 1524 { 1525 struct gfs2_glock *gl; 1526 int error; 1527 1528 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); 1529 if (!error) { 1530 error = gfs2_glock_nq_init(gl, state, flags, gh); 1531 gfs2_glock_put(gl); 1532 } 1533 1534 return error; 1535 } 1536 1537 /** 1538 * glock_compare - Compare two struct gfs2_glock structures for sorting 1539 * @arg_a: the first structure 1540 * @arg_b: the second structure 1541 * 1542 */ 1543 1544 static int glock_compare(const void *arg_a, const void *arg_b) 1545 { 1546 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; 1547 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; 1548 const struct lm_lockname *a = &gh_a->gh_gl->gl_name; 1549 const struct lm_lockname *b = &gh_b->gh_gl->gl_name; 1550 1551 if (a->ln_number > b->ln_number) 1552 return 1; 1553 if (a->ln_number < b->ln_number) 1554 return -1; 1555 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); 1556 return 0; 1557 } 1558 1559 /** 1560 * nq_m_sync - synchonously acquire more than one glock in deadlock free order 1561 * @num_gh: the number of structures 1562 * @ghs: an array of struct gfs2_holder structures 1563 * 1564 * Returns: 0 on success (all glocks acquired), 1565 * errno on failure (no glocks acquired) 1566 */ 1567 1568 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, 1569 struct gfs2_holder **p) 1570 { 1571 unsigned int x; 1572 int error = 0; 1573 1574 for (x = 0; x < num_gh; x++) 1575 p[x] = &ghs[x]; 1576 1577 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); 1578 1579 for (x = 0; x < num_gh; x++) { 1580 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1581 1582 error = gfs2_glock_nq(p[x]); 1583 if (error) { 1584 while (x--) 1585 gfs2_glock_dq(p[x]); 1586 break; 1587 } 1588 } 1589 1590 return error; 1591 } 1592 1593 /** 1594 * gfs2_glock_nq_m - acquire multiple glocks 1595 * @num_gh: the number of structures 1596 * @ghs: an array of struct gfs2_holder structures 1597 * 1598 * 1599 * Returns: 0 on success (all glocks acquired), 1600 * errno on failure (no glocks acquired) 1601 */ 1602 1603 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1604 { 1605 struct gfs2_holder *tmp[4]; 1606 struct gfs2_holder **pph = tmp; 1607 int error = 0; 1608 1609 switch(num_gh) { 1610 case 0: 1611 return 0; 1612 case 1: 1613 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1614 return gfs2_glock_nq(ghs); 1615 default: 1616 if (num_gh <= 4) 1617 break; 1618 pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *), 1619 GFP_NOFS); 1620 if (!pph) 1621 return -ENOMEM; 1622 } 1623 1624 error = nq_m_sync(num_gh, ghs, pph); 1625 1626 if (pph != tmp) 1627 kfree(pph); 1628 1629 return error; 1630 } 1631 1632 /** 1633 * gfs2_glock_dq_m - release multiple glocks 1634 * @num_gh: the number of structures 1635 * @ghs: an array of struct gfs2_holder structures 1636 * 1637 */ 1638 1639 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1640 { 1641 while (num_gh--) 1642 gfs2_glock_dq(&ghs[num_gh]); 1643 } 1644 1645 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) 1646 { 1647 unsigned long delay = 0; 1648 unsigned long holdtime; 1649 unsigned long now = jiffies; 1650 1651 gfs2_glock_hold(gl); 1652 spin_lock(&gl->gl_lockref.lock); 1653 holdtime = gl->gl_tchange + gl->gl_hold_time; 1654 if (!list_empty(&gl->gl_holders) && 1655 gl->gl_name.ln_type == LM_TYPE_INODE) { 1656 if (time_before(now, holdtime)) 1657 delay = holdtime - now; 1658 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 1659 delay = gl->gl_hold_time; 1660 } 1661 handle_callback(gl, state, delay, true); 1662 __gfs2_glock_queue_work(gl, delay); 1663 spin_unlock(&gl->gl_lockref.lock); 1664 } 1665 1666 /** 1667 * gfs2_should_freeze - Figure out if glock should be frozen 1668 * @gl: The glock in question 1669 * 1670 * Glocks are not frozen if (a) the result of the dlm operation is 1671 * an error, (b) the locking operation was an unlock operation or 1672 * (c) if there is a "noexp" flagged request anywhere in the queue 1673 * 1674 * Returns: 1 if freezing should occur, 0 otherwise 1675 */ 1676 1677 static int gfs2_should_freeze(const struct gfs2_glock *gl) 1678 { 1679 const struct gfs2_holder *gh; 1680 1681 if (gl->gl_reply & ~LM_OUT_ST_MASK) 1682 return 0; 1683 if (gl->gl_target == LM_ST_UNLOCKED) 1684 return 0; 1685 1686 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1687 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 1688 continue; 1689 if (LM_FLAG_NOEXP & gh->gh_flags) 1690 return 0; 1691 } 1692 1693 return 1; 1694 } 1695 1696 /** 1697 * gfs2_glock_complete - Callback used by locking 1698 * @gl: Pointer to the glock 1699 * @ret: The return value from the dlm 1700 * 1701 * The gl_reply field is under the gl_lockref.lock lock so that it is ok 1702 * to use a bitfield shared with other glock state fields. 1703 */ 1704 1705 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1706 { 1707 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; 1708 1709 spin_lock(&gl->gl_lockref.lock); 1710 gl->gl_reply = ret; 1711 1712 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { 1713 if (gfs2_should_freeze(gl)) { 1714 set_bit(GLF_FROZEN, &gl->gl_flags); 1715 spin_unlock(&gl->gl_lockref.lock); 1716 return; 1717 } 1718 } 1719 1720 gl->gl_lockref.count++; 1721 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1722 __gfs2_glock_queue_work(gl, 0); 1723 spin_unlock(&gl->gl_lockref.lock); 1724 } 1725 1726 static int glock_cmp(void *priv, struct list_head *a, struct list_head *b) 1727 { 1728 struct gfs2_glock *gla, *glb; 1729 1730 gla = list_entry(a, struct gfs2_glock, gl_lru); 1731 glb = list_entry(b, struct gfs2_glock, gl_lru); 1732 1733 if (gla->gl_name.ln_number > glb->gl_name.ln_number) 1734 return 1; 1735 if (gla->gl_name.ln_number < glb->gl_name.ln_number) 1736 return -1; 1737 1738 return 0; 1739 } 1740 1741 /** 1742 * gfs2_dispose_glock_lru - Demote a list of glocks 1743 * @list: The list to dispose of 1744 * 1745 * Disposing of glocks may involve disk accesses, so that here we sort 1746 * the glocks by number (i.e. disk location of the inodes) so that if 1747 * there are any such accesses, they'll be sent in order (mostly). 1748 * 1749 * Must be called under the lru_lock, but may drop and retake this 1750 * lock. While the lru_lock is dropped, entries may vanish from the 1751 * list, but no new entries will appear on the list (since it is 1752 * private) 1753 */ 1754 1755 static void gfs2_dispose_glock_lru(struct list_head *list) 1756 __releases(&lru_lock) 1757 __acquires(&lru_lock) 1758 { 1759 struct gfs2_glock *gl; 1760 1761 list_sort(NULL, list, glock_cmp); 1762 1763 while(!list_empty(list)) { 1764 gl = list_first_entry(list, struct gfs2_glock, gl_lru); 1765 list_del_init(&gl->gl_lru); 1766 if (!spin_trylock(&gl->gl_lockref.lock)) { 1767 add_back_to_lru: 1768 list_add(&gl->gl_lru, &lru_list); 1769 set_bit(GLF_LRU, &gl->gl_flags); 1770 atomic_inc(&lru_count); 1771 continue; 1772 } 1773 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1774 spin_unlock(&gl->gl_lockref.lock); 1775 goto add_back_to_lru; 1776 } 1777 gl->gl_lockref.count++; 1778 if (demote_ok(gl)) 1779 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1780 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); 1781 __gfs2_glock_queue_work(gl, 0); 1782 spin_unlock(&gl->gl_lockref.lock); 1783 cond_resched_lock(&lru_lock); 1784 } 1785 } 1786 1787 /** 1788 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote 1789 * @nr: The number of entries to scan 1790 * 1791 * This function selects the entries on the LRU which are able to 1792 * be demoted, and then kicks off the process by calling 1793 * gfs2_dispose_glock_lru() above. 1794 */ 1795 1796 static long gfs2_scan_glock_lru(int nr) 1797 { 1798 struct gfs2_glock *gl; 1799 LIST_HEAD(skipped); 1800 LIST_HEAD(dispose); 1801 long freed = 0; 1802 1803 spin_lock(&lru_lock); 1804 while ((nr-- >= 0) && !list_empty(&lru_list)) { 1805 gl = list_first_entry(&lru_list, struct gfs2_glock, gl_lru); 1806 1807 /* Test for being demotable */ 1808 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { 1809 list_move(&gl->gl_lru, &dispose); 1810 atomic_dec(&lru_count); 1811 clear_bit(GLF_LRU, &gl->gl_flags); 1812 freed++; 1813 continue; 1814 } 1815 1816 list_move(&gl->gl_lru, &skipped); 1817 } 1818 list_splice(&skipped, &lru_list); 1819 if (!list_empty(&dispose)) 1820 gfs2_dispose_glock_lru(&dispose); 1821 spin_unlock(&lru_lock); 1822 1823 return freed; 1824 } 1825 1826 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink, 1827 struct shrink_control *sc) 1828 { 1829 if (!(sc->gfp_mask & __GFP_FS)) 1830 return SHRINK_STOP; 1831 return gfs2_scan_glock_lru(sc->nr_to_scan); 1832 } 1833 1834 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink, 1835 struct shrink_control *sc) 1836 { 1837 return vfs_pressure_ratio(atomic_read(&lru_count)); 1838 } 1839 1840 static struct shrinker glock_shrinker = { 1841 .seeks = DEFAULT_SEEKS, 1842 .count_objects = gfs2_glock_shrink_count, 1843 .scan_objects = gfs2_glock_shrink_scan, 1844 }; 1845 1846 /** 1847 * glock_hash_walk - Call a function for glock in a hash bucket 1848 * @examiner: the function 1849 * @sdp: the filesystem 1850 * 1851 * Note that the function can be called multiple times on the same 1852 * object. So the user must ensure that the function can cope with 1853 * that. 1854 */ 1855 1856 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) 1857 { 1858 struct gfs2_glock *gl; 1859 struct rhashtable_iter iter; 1860 1861 rhashtable_walk_enter(&gl_hash_table, &iter); 1862 1863 do { 1864 rhashtable_walk_start(&iter); 1865 1866 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) 1867 if (gl->gl_name.ln_sbd == sdp && 1868 lockref_get_not_dead(&gl->gl_lockref)) 1869 examiner(gl); 1870 1871 rhashtable_walk_stop(&iter); 1872 } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); 1873 1874 rhashtable_walk_exit(&iter); 1875 } 1876 1877 bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay) 1878 { 1879 bool queued; 1880 1881 spin_lock(&gl->gl_lockref.lock); 1882 queued = queue_delayed_work(gfs2_delete_workqueue, 1883 &gl->gl_delete, delay); 1884 if (queued) 1885 set_bit(GLF_PENDING_DELETE, &gl->gl_flags); 1886 spin_unlock(&gl->gl_lockref.lock); 1887 return queued; 1888 } 1889 1890 void gfs2_cancel_delete_work(struct gfs2_glock *gl) 1891 { 1892 if (cancel_delayed_work_sync(&gl->gl_delete)) { 1893 clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); 1894 gfs2_glock_put(gl); 1895 } 1896 } 1897 1898 bool gfs2_delete_work_queued(const struct gfs2_glock *gl) 1899 { 1900 return test_bit(GLF_PENDING_DELETE, &gl->gl_flags); 1901 } 1902 1903 static void flush_delete_work(struct gfs2_glock *gl) 1904 { 1905 if (gl->gl_name.ln_type == LM_TYPE_IOPEN) { 1906 if (cancel_delayed_work(&gl->gl_delete)) { 1907 queue_delayed_work(gfs2_delete_workqueue, 1908 &gl->gl_delete, 0); 1909 } 1910 } 1911 gfs2_glock_queue_work(gl, 0); 1912 } 1913 1914 void gfs2_flush_delete_work(struct gfs2_sbd *sdp) 1915 { 1916 glock_hash_walk(flush_delete_work, sdp); 1917 flush_workqueue(gfs2_delete_workqueue); 1918 } 1919 1920 /** 1921 * thaw_glock - thaw out a glock which has an unprocessed reply waiting 1922 * @gl: The glock to thaw 1923 * 1924 */ 1925 1926 static void thaw_glock(struct gfs2_glock *gl) 1927 { 1928 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) { 1929 gfs2_glock_put(gl); 1930 return; 1931 } 1932 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1933 gfs2_glock_queue_work(gl, 0); 1934 } 1935 1936 /** 1937 * clear_glock - look at a glock and see if we can free it from glock cache 1938 * @gl: the glock to look at 1939 * 1940 */ 1941 1942 static void clear_glock(struct gfs2_glock *gl) 1943 { 1944 gfs2_glock_remove_from_lru(gl); 1945 1946 spin_lock(&gl->gl_lockref.lock); 1947 if (gl->gl_state != LM_ST_UNLOCKED) 1948 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1949 __gfs2_glock_queue_work(gl, 0); 1950 spin_unlock(&gl->gl_lockref.lock); 1951 } 1952 1953 /** 1954 * gfs2_glock_thaw - Thaw any frozen glocks 1955 * @sdp: The super block 1956 * 1957 */ 1958 1959 void gfs2_glock_thaw(struct gfs2_sbd *sdp) 1960 { 1961 glock_hash_walk(thaw_glock, sdp); 1962 } 1963 1964 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 1965 { 1966 spin_lock(&gl->gl_lockref.lock); 1967 gfs2_dump_glock(seq, gl, fsid); 1968 spin_unlock(&gl->gl_lockref.lock); 1969 } 1970 1971 static void dump_glock_func(struct gfs2_glock *gl) 1972 { 1973 dump_glock(NULL, gl, true); 1974 } 1975 1976 /** 1977 * gfs2_gl_hash_clear - Empty out the glock hash table 1978 * @sdp: the filesystem 1979 * @wait: wait until it's all gone 1980 * 1981 * Called when unmounting the filesystem. 1982 */ 1983 1984 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) 1985 { 1986 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags); 1987 flush_workqueue(glock_workqueue); 1988 glock_hash_walk(clear_glock, sdp); 1989 flush_workqueue(glock_workqueue); 1990 wait_event_timeout(sdp->sd_glock_wait, 1991 atomic_read(&sdp->sd_glock_disposal) == 0, 1992 HZ * 600); 1993 glock_hash_walk(dump_glock_func, sdp); 1994 } 1995 1996 void gfs2_glock_finish_truncate(struct gfs2_inode *ip) 1997 { 1998 struct gfs2_glock *gl = ip->i_gl; 1999 int ret; 2000 2001 ret = gfs2_truncatei_resume(ip); 2002 gfs2_glock_assert_withdraw(gl, ret == 0); 2003 2004 spin_lock(&gl->gl_lockref.lock); 2005 clear_bit(GLF_LOCK, &gl->gl_flags); 2006 run_queue(gl, 1); 2007 spin_unlock(&gl->gl_lockref.lock); 2008 } 2009 2010 static const char *state2str(unsigned state) 2011 { 2012 switch(state) { 2013 case LM_ST_UNLOCKED: 2014 return "UN"; 2015 case LM_ST_SHARED: 2016 return "SH"; 2017 case LM_ST_DEFERRED: 2018 return "DF"; 2019 case LM_ST_EXCLUSIVE: 2020 return "EX"; 2021 } 2022 return "??"; 2023 } 2024 2025 static const char *hflags2str(char *buf, u16 flags, unsigned long iflags) 2026 { 2027 char *p = buf; 2028 if (flags & LM_FLAG_TRY) 2029 *p++ = 't'; 2030 if (flags & LM_FLAG_TRY_1CB) 2031 *p++ = 'T'; 2032 if (flags & LM_FLAG_NOEXP) 2033 *p++ = 'e'; 2034 if (flags & LM_FLAG_ANY) 2035 *p++ = 'A'; 2036 if (flags & LM_FLAG_PRIORITY) 2037 *p++ = 'p'; 2038 if (flags & GL_ASYNC) 2039 *p++ = 'a'; 2040 if (flags & GL_EXACT) 2041 *p++ = 'E'; 2042 if (flags & GL_NOCACHE) 2043 *p++ = 'c'; 2044 if (test_bit(HIF_HOLDER, &iflags)) 2045 *p++ = 'H'; 2046 if (test_bit(HIF_WAIT, &iflags)) 2047 *p++ = 'W'; 2048 if (test_bit(HIF_FIRST, &iflags)) 2049 *p++ = 'F'; 2050 *p = 0; 2051 return buf; 2052 } 2053 2054 /** 2055 * dump_holder - print information about a glock holder 2056 * @seq: the seq_file struct 2057 * @gh: the glock holder 2058 * @fs_id_buf: pointer to file system id (if requested) 2059 * 2060 */ 2061 2062 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh, 2063 const char *fs_id_buf) 2064 { 2065 struct task_struct *gh_owner = NULL; 2066 char flags_buf[32]; 2067 2068 rcu_read_lock(); 2069 if (gh->gh_owner_pid) 2070 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); 2071 gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n", 2072 fs_id_buf, state2str(gh->gh_state), 2073 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), 2074 gh->gh_error, 2075 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, 2076 gh_owner ? gh_owner->comm : "(ended)", 2077 (void *)gh->gh_ip); 2078 rcu_read_unlock(); 2079 } 2080 2081 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) 2082 { 2083 const unsigned long *gflags = &gl->gl_flags; 2084 char *p = buf; 2085 2086 if (test_bit(GLF_LOCK, gflags)) 2087 *p++ = 'l'; 2088 if (test_bit(GLF_DEMOTE, gflags)) 2089 *p++ = 'D'; 2090 if (test_bit(GLF_PENDING_DEMOTE, gflags)) 2091 *p++ = 'd'; 2092 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags)) 2093 *p++ = 'p'; 2094 if (test_bit(GLF_DIRTY, gflags)) 2095 *p++ = 'y'; 2096 if (test_bit(GLF_LFLUSH, gflags)) 2097 *p++ = 'f'; 2098 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags)) 2099 *p++ = 'i'; 2100 if (test_bit(GLF_REPLY_PENDING, gflags)) 2101 *p++ = 'r'; 2102 if (test_bit(GLF_INITIAL, gflags)) 2103 *p++ = 'I'; 2104 if (test_bit(GLF_FROZEN, gflags)) 2105 *p++ = 'F'; 2106 if (!list_empty(&gl->gl_holders)) 2107 *p++ = 'q'; 2108 if (test_bit(GLF_LRU, gflags)) 2109 *p++ = 'L'; 2110 if (gl->gl_object) 2111 *p++ = 'o'; 2112 if (test_bit(GLF_BLOCKING, gflags)) 2113 *p++ = 'b'; 2114 if (test_bit(GLF_INODE_CREATING, gflags)) 2115 *p++ = 'c'; 2116 if (test_bit(GLF_PENDING_DELETE, gflags)) 2117 *p++ = 'P'; 2118 if (test_bit(GLF_FREEING, gflags)) 2119 *p++ = 'x'; 2120 *p = 0; 2121 return buf; 2122 } 2123 2124 /** 2125 * gfs2_dump_glock - print information about a glock 2126 * @seq: The seq_file struct 2127 * @gl: the glock 2128 * @fsid: If true, also dump the file system id 2129 * 2130 * The file format is as follows: 2131 * One line per object, capital letters are used to indicate objects 2132 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented, 2133 * other objects are indented by a single space and follow the glock to 2134 * which they are related. Fields are indicated by lower case letters 2135 * followed by a colon and the field value, except for strings which are in 2136 * [] so that its possible to see if they are composed of spaces for 2137 * example. The field's are n = number (id of the object), f = flags, 2138 * t = type, s = state, r = refcount, e = error, p = pid. 2139 * 2140 */ 2141 2142 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 2143 { 2144 const struct gfs2_glock_operations *glops = gl->gl_ops; 2145 unsigned long long dtime; 2146 const struct gfs2_holder *gh; 2147 char gflags_buf[32]; 2148 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 2149 char fs_id_buf[sizeof(sdp->sd_fsname) + 7]; 2150 unsigned long nrpages = 0; 2151 2152 if (gl->gl_ops->go_flags & GLOF_ASPACE) { 2153 struct address_space *mapping = gfs2_glock2aspace(gl); 2154 2155 nrpages = mapping->nrpages; 2156 } 2157 memset(fs_id_buf, 0, sizeof(fs_id_buf)); 2158 if (fsid && sdp) /* safety precaution */ 2159 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname); 2160 dtime = jiffies - gl->gl_demote_time; 2161 dtime *= 1000000/HZ; /* demote time in uSec */ 2162 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) 2163 dtime = 0; 2164 gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d " 2165 "v:%d r:%d m:%ld p:%lu\n", 2166 fs_id_buf, state2str(gl->gl_state), 2167 gl->gl_name.ln_type, 2168 (unsigned long long)gl->gl_name.ln_number, 2169 gflags2str(gflags_buf, gl), 2170 state2str(gl->gl_target), 2171 state2str(gl->gl_demote_state), dtime, 2172 atomic_read(&gl->gl_ail_count), 2173 atomic_read(&gl->gl_revokes), 2174 (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages); 2175 2176 list_for_each_entry(gh, &gl->gl_holders, gh_list) 2177 dump_holder(seq, gh, fs_id_buf); 2178 2179 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) 2180 glops->go_dump(seq, gl, fs_id_buf); 2181 } 2182 2183 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr) 2184 { 2185 struct gfs2_glock *gl = iter_ptr; 2186 2187 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n", 2188 gl->gl_name.ln_type, 2189 (unsigned long long)gl->gl_name.ln_number, 2190 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], 2191 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], 2192 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], 2193 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], 2194 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], 2195 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], 2196 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], 2197 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); 2198 return 0; 2199 } 2200 2201 static const char *gfs2_gltype[] = { 2202 "type", 2203 "reserved", 2204 "nondisk", 2205 "inode", 2206 "rgrp", 2207 "meta", 2208 "iopen", 2209 "flock", 2210 "plock", 2211 "quota", 2212 "journal", 2213 }; 2214 2215 static const char *gfs2_stype[] = { 2216 [GFS2_LKS_SRTT] = "srtt", 2217 [GFS2_LKS_SRTTVAR] = "srttvar", 2218 [GFS2_LKS_SRTTB] = "srttb", 2219 [GFS2_LKS_SRTTVARB] = "srttvarb", 2220 [GFS2_LKS_SIRT] = "sirt", 2221 [GFS2_LKS_SIRTVAR] = "sirtvar", 2222 [GFS2_LKS_DCOUNT] = "dlm", 2223 [GFS2_LKS_QCOUNT] = "queue", 2224 }; 2225 2226 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype)) 2227 2228 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr) 2229 { 2230 struct gfs2_sbd *sdp = seq->private; 2231 loff_t pos = *(loff_t *)iter_ptr; 2232 unsigned index = pos >> 3; 2233 unsigned subindex = pos & 0x07; 2234 int i; 2235 2236 if (index == 0 && subindex != 0) 2237 return 0; 2238 2239 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index], 2240 (index == 0) ? "cpu": gfs2_stype[subindex]); 2241 2242 for_each_possible_cpu(i) { 2243 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); 2244 2245 if (index == 0) 2246 seq_printf(seq, " %15u", i); 2247 else 2248 seq_printf(seq, " %15llu", (unsigned long long)lkstats-> 2249 lkstats[index - 1].stats[subindex]); 2250 } 2251 seq_putc(seq, '\n'); 2252 return 0; 2253 } 2254 2255 int __init gfs2_glock_init(void) 2256 { 2257 int i, ret; 2258 2259 ret = rhashtable_init(&gl_hash_table, &ht_parms); 2260 if (ret < 0) 2261 return ret; 2262 2263 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 2264 WQ_HIGHPRI | WQ_FREEZABLE, 0); 2265 if (!glock_workqueue) { 2266 rhashtable_destroy(&gl_hash_table); 2267 return -ENOMEM; 2268 } 2269 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 2270 WQ_MEM_RECLAIM | WQ_FREEZABLE, 2271 0); 2272 if (!gfs2_delete_workqueue) { 2273 destroy_workqueue(glock_workqueue); 2274 rhashtable_destroy(&gl_hash_table); 2275 return -ENOMEM; 2276 } 2277 2278 ret = register_shrinker(&glock_shrinker); 2279 if (ret) { 2280 destroy_workqueue(gfs2_delete_workqueue); 2281 destroy_workqueue(glock_workqueue); 2282 rhashtable_destroy(&gl_hash_table); 2283 return ret; 2284 } 2285 2286 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++) 2287 init_waitqueue_head(glock_wait_table + i); 2288 2289 return 0; 2290 } 2291 2292 void gfs2_glock_exit(void) 2293 { 2294 unregister_shrinker(&glock_shrinker); 2295 rhashtable_destroy(&gl_hash_table); 2296 destroy_workqueue(glock_workqueue); 2297 destroy_workqueue(gfs2_delete_workqueue); 2298 } 2299 2300 static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n) 2301 { 2302 struct gfs2_glock *gl = gi->gl; 2303 2304 if (gl) { 2305 if (n == 0) 2306 return; 2307 if (!lockref_put_not_zero(&gl->gl_lockref)) 2308 gfs2_glock_queue_put(gl); 2309 } 2310 for (;;) { 2311 gl = rhashtable_walk_next(&gi->hti); 2312 if (IS_ERR_OR_NULL(gl)) { 2313 if (gl == ERR_PTR(-EAGAIN)) { 2314 n = 1; 2315 continue; 2316 } 2317 gl = NULL; 2318 break; 2319 } 2320 if (gl->gl_name.ln_sbd != gi->sdp) 2321 continue; 2322 if (n <= 1) { 2323 if (!lockref_get_not_dead(&gl->gl_lockref)) 2324 continue; 2325 break; 2326 } else { 2327 if (__lockref_is_dead(&gl->gl_lockref)) 2328 continue; 2329 n--; 2330 } 2331 } 2332 gi->gl = gl; 2333 } 2334 2335 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) 2336 __acquires(RCU) 2337 { 2338 struct gfs2_glock_iter *gi = seq->private; 2339 loff_t n; 2340 2341 /* 2342 * We can either stay where we are, skip to the next hash table 2343 * entry, or start from the beginning. 2344 */ 2345 if (*pos < gi->last_pos) { 2346 rhashtable_walk_exit(&gi->hti); 2347 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2348 n = *pos + 1; 2349 } else { 2350 n = *pos - gi->last_pos; 2351 } 2352 2353 rhashtable_walk_start(&gi->hti); 2354 2355 gfs2_glock_iter_next(gi, n); 2356 gi->last_pos = *pos; 2357 return gi->gl; 2358 } 2359 2360 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr, 2361 loff_t *pos) 2362 { 2363 struct gfs2_glock_iter *gi = seq->private; 2364 2365 (*pos)++; 2366 gi->last_pos = *pos; 2367 gfs2_glock_iter_next(gi, 1); 2368 return gi->gl; 2369 } 2370 2371 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) 2372 __releases(RCU) 2373 { 2374 struct gfs2_glock_iter *gi = seq->private; 2375 2376 rhashtable_walk_stop(&gi->hti); 2377 } 2378 2379 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) 2380 { 2381 dump_glock(seq, iter_ptr, false); 2382 return 0; 2383 } 2384 2385 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos) 2386 { 2387 preempt_disable(); 2388 if (*pos >= GFS2_NR_SBSTATS) 2389 return NULL; 2390 return pos; 2391 } 2392 2393 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr, 2394 loff_t *pos) 2395 { 2396 (*pos)++; 2397 if (*pos >= GFS2_NR_SBSTATS) 2398 return NULL; 2399 return pos; 2400 } 2401 2402 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr) 2403 { 2404 preempt_enable(); 2405 } 2406 2407 static const struct seq_operations gfs2_glock_seq_ops = { 2408 .start = gfs2_glock_seq_start, 2409 .next = gfs2_glock_seq_next, 2410 .stop = gfs2_glock_seq_stop, 2411 .show = gfs2_glock_seq_show, 2412 }; 2413 2414 static const struct seq_operations gfs2_glstats_seq_ops = { 2415 .start = gfs2_glock_seq_start, 2416 .next = gfs2_glock_seq_next, 2417 .stop = gfs2_glock_seq_stop, 2418 .show = gfs2_glstats_seq_show, 2419 }; 2420 2421 static const struct seq_operations gfs2_sbstats_sops = { 2422 .start = gfs2_sbstats_seq_start, 2423 .next = gfs2_sbstats_seq_next, 2424 .stop = gfs2_sbstats_seq_stop, 2425 .show = gfs2_sbstats_seq_show, 2426 }; 2427 2428 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL) 2429 2430 static int __gfs2_glocks_open(struct inode *inode, struct file *file, 2431 const struct seq_operations *ops) 2432 { 2433 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter)); 2434 if (ret == 0) { 2435 struct seq_file *seq = file->private_data; 2436 struct gfs2_glock_iter *gi = seq->private; 2437 2438 gi->sdp = inode->i_private; 2439 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); 2440 if (seq->buf) 2441 seq->size = GFS2_SEQ_GOODSIZE; 2442 /* 2443 * Initially, we are "before" the first hash table entry; the 2444 * first call to rhashtable_walk_next gets us the first entry. 2445 */ 2446 gi->last_pos = -1; 2447 gi->gl = NULL; 2448 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2449 } 2450 return ret; 2451 } 2452 2453 static int gfs2_glocks_open(struct inode *inode, struct file *file) 2454 { 2455 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops); 2456 } 2457 2458 static int gfs2_glocks_release(struct inode *inode, struct file *file) 2459 { 2460 struct seq_file *seq = file->private_data; 2461 struct gfs2_glock_iter *gi = seq->private; 2462 2463 if (gi->gl) 2464 gfs2_glock_put(gi->gl); 2465 rhashtable_walk_exit(&gi->hti); 2466 return seq_release_private(inode, file); 2467 } 2468 2469 static int gfs2_glstats_open(struct inode *inode, struct file *file) 2470 { 2471 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops); 2472 } 2473 2474 static const struct file_operations gfs2_glocks_fops = { 2475 .owner = THIS_MODULE, 2476 .open = gfs2_glocks_open, 2477 .read = seq_read, 2478 .llseek = seq_lseek, 2479 .release = gfs2_glocks_release, 2480 }; 2481 2482 static const struct file_operations gfs2_glstats_fops = { 2483 .owner = THIS_MODULE, 2484 .open = gfs2_glstats_open, 2485 .read = seq_read, 2486 .llseek = seq_lseek, 2487 .release = gfs2_glocks_release, 2488 }; 2489 2490 DEFINE_SEQ_ATTRIBUTE(gfs2_sbstats); 2491 2492 void gfs2_create_debugfs_file(struct gfs2_sbd *sdp) 2493 { 2494 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root); 2495 2496 debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2497 &gfs2_glocks_fops); 2498 2499 debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2500 &gfs2_glstats_fops); 2501 2502 debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2503 &gfs2_sbstats_fops); 2504 } 2505 2506 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) 2507 { 2508 debugfs_remove_recursive(sdp->debugfs_dir); 2509 sdp->debugfs_dir = NULL; 2510 } 2511 2512 void gfs2_register_debugfs(void) 2513 { 2514 gfs2_root = debugfs_create_dir("gfs2", NULL); 2515 } 2516 2517 void gfs2_unregister_debugfs(void) 2518 { 2519 debugfs_remove(gfs2_root); 2520 gfs2_root = NULL; 2521 } 2522