1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/sched.h> 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/buffer_head.h> 13 #include <linux/delay.h> 14 #include <linux/sort.h> 15 #include <linux/hash.h> 16 #include <linux/jhash.h> 17 #include <linux/kallsyms.h> 18 #include <linux/gfs2_ondisk.h> 19 #include <linux/list.h> 20 #include <linux/wait.h> 21 #include <linux/module.h> 22 #include <linux/uaccess.h> 23 #include <linux/seq_file.h> 24 #include <linux/debugfs.h> 25 #include <linux/kthread.h> 26 #include <linux/freezer.h> 27 #include <linux/workqueue.h> 28 #include <linux/jiffies.h> 29 #include <linux/rcupdate.h> 30 #include <linux/rculist_bl.h> 31 #include <linux/bit_spinlock.h> 32 #include <linux/percpu.h> 33 #include <linux/list_sort.h> 34 #include <linux/lockref.h> 35 #include <linux/rhashtable.h> 36 37 #include "gfs2.h" 38 #include "incore.h" 39 #include "glock.h" 40 #include "glops.h" 41 #include "inode.h" 42 #include "lops.h" 43 #include "meta_io.h" 44 #include "quota.h" 45 #include "super.h" 46 #include "util.h" 47 #include "bmap.h" 48 #define CREATE_TRACE_POINTS 49 #include "trace_gfs2.h" 50 51 struct gfs2_glock_iter { 52 struct gfs2_sbd *sdp; /* incore superblock */ 53 struct rhashtable_iter hti; /* rhashtable iterator */ 54 struct gfs2_glock *gl; /* current glock struct */ 55 loff_t last_pos; /* last position */ 56 }; 57 58 typedef void (*glock_examiner) (struct gfs2_glock * gl); 59 60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); 61 62 static struct dentry *gfs2_root; 63 static struct workqueue_struct *glock_workqueue; 64 struct workqueue_struct *gfs2_delete_workqueue; 65 static LIST_HEAD(lru_list); 66 static atomic_t lru_count = ATOMIC_INIT(0); 67 static DEFINE_SPINLOCK(lru_lock); 68 69 #define GFS2_GL_HASH_SHIFT 15 70 #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT) 71 72 static const struct rhashtable_params ht_parms = { 73 .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4, 74 .key_len = offsetofend(struct lm_lockname, ln_type), 75 .key_offset = offsetof(struct gfs2_glock, gl_name), 76 .head_offset = offsetof(struct gfs2_glock, gl_node), 77 }; 78 79 static struct rhashtable gl_hash_table; 80 81 #define GLOCK_WAIT_TABLE_BITS 12 82 #define GLOCK_WAIT_TABLE_SIZE (1 << GLOCK_WAIT_TABLE_BITS) 83 static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned; 84 85 struct wait_glock_queue { 86 struct lm_lockname *name; 87 wait_queue_entry_t wait; 88 }; 89 90 static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode, 91 int sync, void *key) 92 { 93 struct wait_glock_queue *wait_glock = 94 container_of(wait, struct wait_glock_queue, wait); 95 struct lm_lockname *wait_name = wait_glock->name; 96 struct lm_lockname *wake_name = key; 97 98 if (wake_name->ln_sbd != wait_name->ln_sbd || 99 wake_name->ln_number != wait_name->ln_number || 100 wake_name->ln_type != wait_name->ln_type) 101 return 0; 102 return autoremove_wake_function(wait, mode, sync, key); 103 } 104 105 static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name) 106 { 107 u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0); 108 109 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS); 110 } 111 112 /** 113 * wake_up_glock - Wake up waiters on a glock 114 * @gl: the glock 115 */ 116 static void wake_up_glock(struct gfs2_glock *gl) 117 { 118 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); 119 120 if (waitqueue_active(wq)) 121 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); 122 } 123 124 static void gfs2_glock_dealloc(struct rcu_head *rcu) 125 { 126 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); 127 128 kfree(gl->gl_lksb.sb_lvbptr); 129 if (gl->gl_ops->go_flags & GLOF_ASPACE) 130 kmem_cache_free(gfs2_glock_aspace_cachep, gl); 131 else 132 kmem_cache_free(gfs2_glock_cachep, gl); 133 } 134 135 /** 136 * glock_blocked_by_withdraw - determine if we can still use a glock 137 * @gl: the glock 138 * 139 * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted 140 * when we're withdrawn. For example, to maintain metadata integrity, we should 141 * disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like 142 * iopen or the transaction glocks may be safely used because none of their 143 * metadata goes through the journal. So in general, we should disallow all 144 * glocks that are journaled, and allow all the others. One exception is: 145 * we need to allow our active journal to be promoted and demoted so others 146 * may recover it and we can reacquire it when they're done. 147 */ 148 static bool glock_blocked_by_withdraw(struct gfs2_glock *gl) 149 { 150 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 151 152 if (likely(!gfs2_withdrawn(sdp))) 153 return false; 154 if (gl->gl_ops->go_flags & GLOF_NONDISK) 155 return false; 156 if (!sdp->sd_jdesc || 157 gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr) 158 return false; 159 return true; 160 } 161 162 void gfs2_glock_free(struct gfs2_glock *gl) 163 { 164 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 165 166 gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0); 167 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); 168 smp_mb(); 169 wake_up_glock(gl); 170 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); 171 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 172 wake_up(&sdp->sd_glock_wait); 173 } 174 175 /** 176 * gfs2_glock_hold() - increment reference count on glock 177 * @gl: The glock to hold 178 * 179 */ 180 181 void gfs2_glock_hold(struct gfs2_glock *gl) 182 { 183 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 184 lockref_get(&gl->gl_lockref); 185 } 186 187 /** 188 * demote_ok - Check to see if it's ok to unlock a glock 189 * @gl: the glock 190 * 191 * Returns: 1 if it's ok 192 */ 193 194 static int demote_ok(const struct gfs2_glock *gl) 195 { 196 const struct gfs2_glock_operations *glops = gl->gl_ops; 197 198 if (gl->gl_state == LM_ST_UNLOCKED) 199 return 0; 200 if (!list_empty(&gl->gl_holders)) 201 return 0; 202 if (glops->go_demote_ok) 203 return glops->go_demote_ok(gl); 204 return 1; 205 } 206 207 208 void gfs2_glock_add_to_lru(struct gfs2_glock *gl) 209 { 210 if (!(gl->gl_ops->go_flags & GLOF_LRU)) 211 return; 212 213 spin_lock(&lru_lock); 214 215 list_del(&gl->gl_lru); 216 list_add_tail(&gl->gl_lru, &lru_list); 217 218 if (!test_bit(GLF_LRU, &gl->gl_flags)) { 219 set_bit(GLF_LRU, &gl->gl_flags); 220 atomic_inc(&lru_count); 221 } 222 223 spin_unlock(&lru_lock); 224 } 225 226 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) 227 { 228 if (!(gl->gl_ops->go_flags & GLOF_LRU)) 229 return; 230 231 spin_lock(&lru_lock); 232 if (test_bit(GLF_LRU, &gl->gl_flags)) { 233 list_del_init(&gl->gl_lru); 234 atomic_dec(&lru_count); 235 clear_bit(GLF_LRU, &gl->gl_flags); 236 } 237 spin_unlock(&lru_lock); 238 } 239 240 /* 241 * Enqueue the glock on the work queue. Passes one glock reference on to the 242 * work queue. 243 */ 244 static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { 245 if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { 246 /* 247 * We are holding the lockref spinlock, and the work was still 248 * queued above. The queued work (glock_work_func) takes that 249 * spinlock before dropping its glock reference(s), so it 250 * cannot have dropped them in the meantime. 251 */ 252 GLOCK_BUG_ON(gl, gl->gl_lockref.count < 2); 253 gl->gl_lockref.count--; 254 } 255 } 256 257 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { 258 spin_lock(&gl->gl_lockref.lock); 259 __gfs2_glock_queue_work(gl, delay); 260 spin_unlock(&gl->gl_lockref.lock); 261 } 262 263 static void __gfs2_glock_put(struct gfs2_glock *gl) 264 { 265 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 266 struct address_space *mapping = gfs2_glock2aspace(gl); 267 268 lockref_mark_dead(&gl->gl_lockref); 269 270 gfs2_glock_remove_from_lru(gl); 271 spin_unlock(&gl->gl_lockref.lock); 272 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 273 GLOCK_BUG_ON(gl, mapping && mapping->nrpages && !gfs2_withdrawn(sdp)); 274 trace_gfs2_glock_put(gl); 275 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); 276 } 277 278 /* 279 * Cause the glock to be put in work queue context. 280 */ 281 void gfs2_glock_queue_put(struct gfs2_glock *gl) 282 { 283 gfs2_glock_queue_work(gl, 0); 284 } 285 286 /** 287 * gfs2_glock_put() - Decrement reference count on glock 288 * @gl: The glock to put 289 * 290 */ 291 292 void gfs2_glock_put(struct gfs2_glock *gl) 293 { 294 if (lockref_put_or_lock(&gl->gl_lockref)) 295 return; 296 297 __gfs2_glock_put(gl); 298 } 299 300 /** 301 * may_grant - check if its ok to grant a new lock 302 * @gl: The glock 303 * @gh: The lock request which we wish to grant 304 * 305 * Returns: true if its ok to grant the lock 306 */ 307 308 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) 309 { 310 const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list); 311 if ((gh->gh_state == LM_ST_EXCLUSIVE || 312 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head) 313 return 0; 314 if (gl->gl_state == gh->gh_state) 315 return 1; 316 if (gh->gh_flags & GL_EXACT) 317 return 0; 318 if (gl->gl_state == LM_ST_EXCLUSIVE) { 319 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED) 320 return 1; 321 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED) 322 return 1; 323 } 324 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) 325 return 1; 326 return 0; 327 } 328 329 static void gfs2_holder_wake(struct gfs2_holder *gh) 330 { 331 clear_bit(HIF_WAIT, &gh->gh_iflags); 332 smp_mb__after_atomic(); 333 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 334 if (gh->gh_flags & GL_ASYNC) { 335 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd; 336 337 wake_up(&sdp->sd_async_glock_wait); 338 } 339 } 340 341 /** 342 * do_error - Something unexpected has happened during a lock request 343 * 344 */ 345 346 static void do_error(struct gfs2_glock *gl, const int ret) 347 { 348 struct gfs2_holder *gh, *tmp; 349 350 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 351 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 352 continue; 353 if (ret & LM_OUT_ERROR) 354 gh->gh_error = -EIO; 355 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) 356 gh->gh_error = GLR_TRYFAILED; 357 else 358 continue; 359 list_del_init(&gh->gh_list); 360 trace_gfs2_glock_queue(gh, 0); 361 gfs2_holder_wake(gh); 362 } 363 } 364 365 /** 366 * do_promote - promote as many requests as possible on the current queue 367 * @gl: The glock 368 * 369 * Returns: 1 if there is a blocked holder at the head of the list, or 2 370 * if a type specific operation is underway. 371 */ 372 373 static int do_promote(struct gfs2_glock *gl) 374 __releases(&gl->gl_lockref.lock) 375 __acquires(&gl->gl_lockref.lock) 376 { 377 const struct gfs2_glock_operations *glops = gl->gl_ops; 378 struct gfs2_holder *gh, *tmp; 379 int ret; 380 381 restart: 382 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 383 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 384 continue; 385 if (may_grant(gl, gh)) { 386 if (gh->gh_list.prev == &gl->gl_holders && 387 glops->go_lock) { 388 spin_unlock(&gl->gl_lockref.lock); 389 /* FIXME: eliminate this eventually */ 390 ret = glops->go_lock(gh); 391 spin_lock(&gl->gl_lockref.lock); 392 if (ret) { 393 if (ret == 1) 394 return 2; 395 gh->gh_error = ret; 396 list_del_init(&gh->gh_list); 397 trace_gfs2_glock_queue(gh, 0); 398 gfs2_holder_wake(gh); 399 goto restart; 400 } 401 set_bit(HIF_HOLDER, &gh->gh_iflags); 402 trace_gfs2_promote(gh, 1); 403 gfs2_holder_wake(gh); 404 goto restart; 405 } 406 set_bit(HIF_HOLDER, &gh->gh_iflags); 407 trace_gfs2_promote(gh, 0); 408 gfs2_holder_wake(gh); 409 continue; 410 } 411 if (gh->gh_list.prev == &gl->gl_holders) 412 return 1; 413 do_error(gl, 0); 414 break; 415 } 416 return 0; 417 } 418 419 /** 420 * find_first_waiter - find the first gh that's waiting for the glock 421 * @gl: the glock 422 */ 423 424 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) 425 { 426 struct gfs2_holder *gh; 427 428 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 429 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) 430 return gh; 431 } 432 return NULL; 433 } 434 435 /** 436 * state_change - record that the glock is now in a different state 437 * @gl: the glock 438 * @new_state the new state 439 * 440 */ 441 442 static void state_change(struct gfs2_glock *gl, unsigned int new_state) 443 { 444 int held1, held2; 445 446 held1 = (gl->gl_state != LM_ST_UNLOCKED); 447 held2 = (new_state != LM_ST_UNLOCKED); 448 449 if (held1 != held2) { 450 GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 451 if (held2) 452 gl->gl_lockref.count++; 453 else 454 gl->gl_lockref.count--; 455 } 456 if (held1 && held2 && list_empty(&gl->gl_holders)) 457 clear_bit(GLF_QUEUED, &gl->gl_flags); 458 459 if (new_state != gl->gl_target) 460 /* shorten our minimum hold time */ 461 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, 462 GL_GLOCK_MIN_HOLD); 463 gl->gl_state = new_state; 464 gl->gl_tchange = jiffies; 465 } 466 467 static void gfs2_set_demote(struct gfs2_glock *gl) 468 { 469 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 470 471 set_bit(GLF_DEMOTE, &gl->gl_flags); 472 smp_mb(); 473 wake_up(&sdp->sd_async_glock_wait); 474 } 475 476 static void gfs2_demote_wake(struct gfs2_glock *gl) 477 { 478 gl->gl_demote_state = LM_ST_EXCLUSIVE; 479 clear_bit(GLF_DEMOTE, &gl->gl_flags); 480 smp_mb__after_atomic(); 481 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); 482 } 483 484 /** 485 * finish_xmote - The DLM has replied to one of our lock requests 486 * @gl: The glock 487 * @ret: The status from the DLM 488 * 489 */ 490 491 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) 492 { 493 const struct gfs2_glock_operations *glops = gl->gl_ops; 494 struct gfs2_holder *gh; 495 unsigned state = ret & LM_OUT_ST_MASK; 496 int rv; 497 498 spin_lock(&gl->gl_lockref.lock); 499 trace_gfs2_glock_state_change(gl, state); 500 state_change(gl, state); 501 gh = find_first_waiter(gl); 502 503 /* Demote to UN request arrived during demote to SH or DF */ 504 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && 505 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) 506 gl->gl_target = LM_ST_UNLOCKED; 507 508 /* Check for state != intended state */ 509 if (unlikely(state != gl->gl_target)) { 510 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { 511 /* move to back of queue and try next entry */ 512 if (ret & LM_OUT_CANCELED) { 513 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) 514 list_move_tail(&gh->gh_list, &gl->gl_holders); 515 gh = find_first_waiter(gl); 516 gl->gl_target = gh->gh_state; 517 goto retry; 518 } 519 /* Some error or failed "try lock" - report it */ 520 if ((ret & LM_OUT_ERROR) || 521 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 522 gl->gl_target = gl->gl_state; 523 do_error(gl, ret); 524 goto out; 525 } 526 } 527 switch(state) { 528 /* Unlocked due to conversion deadlock, try again */ 529 case LM_ST_UNLOCKED: 530 retry: 531 do_xmote(gl, gh, gl->gl_target); 532 break; 533 /* Conversion fails, unlock and try again */ 534 case LM_ST_SHARED: 535 case LM_ST_DEFERRED: 536 do_xmote(gl, gh, LM_ST_UNLOCKED); 537 break; 538 default: /* Everything else */ 539 fs_err(gl->gl_name.ln_sbd, "wanted %u got %u\n", 540 gl->gl_target, state); 541 GLOCK_BUG_ON(gl, 1); 542 } 543 spin_unlock(&gl->gl_lockref.lock); 544 return; 545 } 546 547 /* Fast path - we got what we asked for */ 548 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) 549 gfs2_demote_wake(gl); 550 if (state != LM_ST_UNLOCKED) { 551 if (glops->go_xmote_bh) { 552 spin_unlock(&gl->gl_lockref.lock); 553 rv = glops->go_xmote_bh(gl, gh); 554 spin_lock(&gl->gl_lockref.lock); 555 if (rv) { 556 do_error(gl, rv); 557 goto out; 558 } 559 } 560 rv = do_promote(gl); 561 if (rv == 2) 562 goto out_locked; 563 } 564 out: 565 clear_bit(GLF_LOCK, &gl->gl_flags); 566 out_locked: 567 spin_unlock(&gl->gl_lockref.lock); 568 } 569 570 /** 571 * do_xmote - Calls the DLM to change the state of a lock 572 * @gl: The lock state 573 * @gh: The holder (only for promotes) 574 * @target: The target lock state 575 * 576 */ 577 578 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) 579 __releases(&gl->gl_lockref.lock) 580 __acquires(&gl->gl_lockref.lock) 581 { 582 const struct gfs2_glock_operations *glops = gl->gl_ops; 583 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 584 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); 585 int ret; 586 587 if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) && 588 gh && !(gh->gh_flags & LM_FLAG_NOEXP)) 589 return; 590 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | 591 LM_FLAG_PRIORITY); 592 GLOCK_BUG_ON(gl, gl->gl_state == target); 593 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); 594 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) && 595 glops->go_inval) { 596 /* 597 * If another process is already doing the invalidate, let that 598 * finish first. The glock state machine will get back to this 599 * holder again later. 600 */ 601 if (test_and_set_bit(GLF_INVALIDATE_IN_PROGRESS, 602 &gl->gl_flags)) 603 return; 604 do_error(gl, 0); /* Fail queued try locks */ 605 } 606 gl->gl_req = target; 607 set_bit(GLF_BLOCKING, &gl->gl_flags); 608 if ((gl->gl_req == LM_ST_UNLOCKED) || 609 (gl->gl_state == LM_ST_EXCLUSIVE) || 610 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) 611 clear_bit(GLF_BLOCKING, &gl->gl_flags); 612 spin_unlock(&gl->gl_lockref.lock); 613 if (glops->go_sync) { 614 ret = glops->go_sync(gl); 615 /* If we had a problem syncing (due to io errors or whatever, 616 * we should not invalidate the metadata or tell dlm to 617 * release the glock to other nodes. 618 */ 619 if (ret) { 620 if (cmpxchg(&sdp->sd_log_error, 0, ret)) { 621 fs_err(sdp, "Error %d syncing glock \n", ret); 622 gfs2_dump_glock(NULL, gl, true); 623 } 624 goto skip_inval; 625 } 626 } 627 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) { 628 /* 629 * The call to go_sync should have cleared out the ail list. 630 * If there are still items, we have a problem. We ought to 631 * withdraw, but we can't because the withdraw code also uses 632 * glocks. Warn about the error, dump the glock, then fall 633 * through and wait for logd to do the withdraw for us. 634 */ 635 if ((atomic_read(&gl->gl_ail_count) != 0) && 636 (!cmpxchg(&sdp->sd_log_error, 0, -EIO))) { 637 gfs2_glock_assert_warn(gl, 638 !atomic_read(&gl->gl_ail_count)); 639 gfs2_dump_glock(NULL, gl, true); 640 } 641 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); 642 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 643 } 644 645 skip_inval: 646 gfs2_glock_hold(gl); 647 /* 648 * Check for an error encountered since we called go_sync and go_inval. 649 * If so, we can't withdraw from the glock code because the withdraw 650 * code itself uses glocks (see function signal_our_withdraw) to 651 * change the mount to read-only. Most importantly, we must not call 652 * dlm to unlock the glock until the journal is in a known good state 653 * (after journal replay) otherwise other nodes may use the object 654 * (rgrp or dinode) and then later, journal replay will corrupt the 655 * file system. The best we can do here is wait for the logd daemon 656 * to see sd_log_error and withdraw, and in the meantime, requeue the 657 * work for later. 658 * 659 * However, if we're just unlocking the lock (say, for unmount, when 660 * gfs2_gl_hash_clear calls clear_glock) and recovery is complete 661 * then it's okay to tell dlm to unlock it. 662 */ 663 if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp))) 664 gfs2_withdraw_delayed(sdp); 665 if (glock_blocked_by_withdraw(gl)) { 666 if (target != LM_ST_UNLOCKED || 667 test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags)) { 668 gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); 669 goto out; 670 } 671 } 672 673 if (sdp->sd_lockstruct.ls_ops->lm_lock) { 674 /* lock_dlm */ 675 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); 676 if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && 677 target == LM_ST_UNLOCKED && 678 test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) { 679 finish_xmote(gl, target); 680 gfs2_glock_queue_work(gl, 0); 681 } else if (ret) { 682 fs_err(sdp, "lm_lock ret %d\n", ret); 683 GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp)); 684 } 685 } else { /* lock_nolock */ 686 finish_xmote(gl, target); 687 gfs2_glock_queue_work(gl, 0); 688 } 689 out: 690 spin_lock(&gl->gl_lockref.lock); 691 } 692 693 /** 694 * find_first_holder - find the first "holder" gh 695 * @gl: the glock 696 */ 697 698 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) 699 { 700 struct gfs2_holder *gh; 701 702 if (!list_empty(&gl->gl_holders)) { 703 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); 704 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 705 return gh; 706 } 707 return NULL; 708 } 709 710 /** 711 * run_queue - do all outstanding tasks related to a glock 712 * @gl: The glock in question 713 * @nonblock: True if we must not block in run_queue 714 * 715 */ 716 717 static void run_queue(struct gfs2_glock *gl, const int nonblock) 718 __releases(&gl->gl_lockref.lock) 719 __acquires(&gl->gl_lockref.lock) 720 { 721 struct gfs2_holder *gh = NULL; 722 int ret; 723 724 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 725 return; 726 727 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); 728 729 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && 730 gl->gl_demote_state != gl->gl_state) { 731 if (find_first_holder(gl)) 732 goto out_unlock; 733 if (nonblock) 734 goto out_sched; 735 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 736 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); 737 gl->gl_target = gl->gl_demote_state; 738 } else { 739 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) 740 gfs2_demote_wake(gl); 741 ret = do_promote(gl); 742 if (ret == 0) 743 goto out_unlock; 744 if (ret == 2) 745 goto out; 746 gh = find_first_waiter(gl); 747 gl->gl_target = gh->gh_state; 748 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 749 do_error(gl, 0); /* Fail queued try locks */ 750 } 751 do_xmote(gl, gh, gl->gl_target); 752 out: 753 return; 754 755 out_sched: 756 clear_bit(GLF_LOCK, &gl->gl_flags); 757 smp_mb__after_atomic(); 758 gl->gl_lockref.count++; 759 __gfs2_glock_queue_work(gl, 0); 760 return; 761 762 out_unlock: 763 clear_bit(GLF_LOCK, &gl->gl_flags); 764 smp_mb__after_atomic(); 765 return; 766 } 767 768 void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation) 769 { 770 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; 771 772 if (ri->ri_magic == 0) 773 ri->ri_magic = cpu_to_be32(GFS2_MAGIC); 774 if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC)) 775 ri->ri_generation_deleted = cpu_to_be64(generation); 776 } 777 778 bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation) 779 { 780 struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr; 781 782 if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC)) 783 return false; 784 return generation <= be64_to_cpu(ri->ri_generation_deleted); 785 } 786 787 static void gfs2_glock_poke(struct gfs2_glock *gl) 788 { 789 int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP; 790 struct gfs2_holder gh; 791 int error; 792 793 gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh); 794 error = gfs2_glock_nq(&gh); 795 if (!error) 796 gfs2_glock_dq(&gh); 797 gfs2_holder_uninit(&gh); 798 } 799 800 static bool gfs2_try_evict(struct gfs2_glock *gl) 801 { 802 struct gfs2_inode *ip; 803 bool evicted = false; 804 805 /* 806 * If there is contention on the iopen glock and we have an inode, try 807 * to grab and release the inode so that it can be evicted. This will 808 * allow the remote node to go ahead and delete the inode without us 809 * having to do it, which will avoid rgrp glock thrashing. 810 * 811 * The remote node is likely still holding the corresponding inode 812 * glock, so it will run before we get to verify that the delete has 813 * happened below. 814 */ 815 spin_lock(&gl->gl_lockref.lock); 816 ip = gl->gl_object; 817 if (ip && !igrab(&ip->i_inode)) 818 ip = NULL; 819 spin_unlock(&gl->gl_lockref.lock); 820 if (ip) { 821 struct gfs2_glock *inode_gl = NULL; 822 823 gl->gl_no_formal_ino = ip->i_no_formal_ino; 824 set_bit(GIF_DEFERRED_DELETE, &ip->i_flags); 825 d_prune_aliases(&ip->i_inode); 826 iput(&ip->i_inode); 827 828 /* If the inode was evicted, gl->gl_object will now be NULL. */ 829 spin_lock(&gl->gl_lockref.lock); 830 ip = gl->gl_object; 831 if (ip) { 832 inode_gl = ip->i_gl; 833 lockref_get(&inode_gl->gl_lockref); 834 clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags); 835 } 836 spin_unlock(&gl->gl_lockref.lock); 837 if (inode_gl) { 838 gfs2_glock_poke(inode_gl); 839 gfs2_glock_put(inode_gl); 840 } 841 evicted = !ip; 842 } 843 return evicted; 844 } 845 846 static void delete_work_func(struct work_struct *work) 847 { 848 struct delayed_work *dwork = to_delayed_work(work); 849 struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); 850 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 851 struct inode *inode; 852 u64 no_addr = gl->gl_name.ln_number; 853 854 spin_lock(&gl->gl_lockref.lock); 855 clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); 856 spin_unlock(&gl->gl_lockref.lock); 857 858 /* If someone's using this glock to create a new dinode, the block must 859 have been freed by another node, then re-used, in which case our 860 iopen callback is too late after the fact. Ignore it. */ 861 if (test_bit(GLF_INODE_CREATING, &gl->gl_flags)) 862 goto out; 863 864 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { 865 /* 866 * If we can evict the inode, give the remote node trying to 867 * delete the inode some time before verifying that the delete 868 * has happened. Otherwise, if we cause contention on the inode glock 869 * immediately, the remote node will think that we still have 870 * the inode in use, and so it will give up waiting. 871 * 872 * If we can't evict the inode, signal to the remote node that 873 * the inode is still in use. We'll later try to delete the 874 * inode locally in gfs2_evict_inode. 875 * 876 * FIXME: We only need to verify that the remote node has 877 * deleted the inode because nodes before this remote delete 878 * rework won't cooperate. At a later time, when we no longer 879 * care about compatibility with such nodes, we can skip this 880 * step entirely. 881 */ 882 if (gfs2_try_evict(gl)) { 883 if (gfs2_queue_delete_work(gl, 5 * HZ)) 884 return; 885 } 886 goto out; 887 } 888 889 inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, 890 GFS2_BLKST_UNLINKED); 891 if (!IS_ERR_OR_NULL(inode)) { 892 d_prune_aliases(inode); 893 iput(inode); 894 } 895 out: 896 gfs2_glock_put(gl); 897 } 898 899 static void glock_work_func(struct work_struct *work) 900 { 901 unsigned long delay = 0; 902 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 903 unsigned int drop_refs = 1; 904 905 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { 906 finish_xmote(gl, gl->gl_reply); 907 drop_refs++; 908 } 909 spin_lock(&gl->gl_lockref.lock); 910 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 911 gl->gl_state != LM_ST_UNLOCKED && 912 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 913 unsigned long holdtime, now = jiffies; 914 915 holdtime = gl->gl_tchange + gl->gl_hold_time; 916 if (time_before(now, holdtime)) 917 delay = holdtime - now; 918 919 if (!delay) { 920 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 921 gfs2_set_demote(gl); 922 } 923 } 924 run_queue(gl, 0); 925 if (delay) { 926 /* Keep one glock reference for the work we requeue. */ 927 drop_refs--; 928 if (gl->gl_name.ln_type != LM_TYPE_INODE) 929 delay = 0; 930 __gfs2_glock_queue_work(gl, delay); 931 } 932 933 /* 934 * Drop the remaining glock references manually here. (Mind that 935 * __gfs2_glock_queue_work depends on the lockref spinlock begin held 936 * here as well.) 937 */ 938 gl->gl_lockref.count -= drop_refs; 939 if (!gl->gl_lockref.count) { 940 __gfs2_glock_put(gl); 941 return; 942 } 943 spin_unlock(&gl->gl_lockref.lock); 944 } 945 946 static struct gfs2_glock *find_insert_glock(struct lm_lockname *name, 947 struct gfs2_glock *new) 948 { 949 struct wait_glock_queue wait; 950 wait_queue_head_t *wq = glock_waitqueue(name); 951 struct gfs2_glock *gl; 952 953 wait.name = name; 954 init_wait(&wait.wait); 955 wait.wait.func = glock_wake_function; 956 957 again: 958 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); 959 rcu_read_lock(); 960 if (new) { 961 gl = rhashtable_lookup_get_insert_fast(&gl_hash_table, 962 &new->gl_node, ht_parms); 963 if (IS_ERR(gl)) 964 goto out; 965 } else { 966 gl = rhashtable_lookup_fast(&gl_hash_table, 967 name, ht_parms); 968 } 969 if (gl && !lockref_get_not_dead(&gl->gl_lockref)) { 970 rcu_read_unlock(); 971 schedule(); 972 goto again; 973 } 974 out: 975 rcu_read_unlock(); 976 finish_wait(wq, &wait.wait); 977 return gl; 978 } 979 980 /** 981 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist 982 * @sdp: The GFS2 superblock 983 * @number: the lock number 984 * @glops: The glock_operations to use 985 * @create: If 0, don't create the glock if it doesn't exist 986 * @glp: the glock is returned here 987 * 988 * This does not lock a glock, just finds/creates structures for one. 989 * 990 * Returns: errno 991 */ 992 993 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 994 const struct gfs2_glock_operations *glops, int create, 995 struct gfs2_glock **glp) 996 { 997 struct super_block *s = sdp->sd_vfs; 998 struct lm_lockname name = { .ln_number = number, 999 .ln_type = glops->go_type, 1000 .ln_sbd = sdp }; 1001 struct gfs2_glock *gl, *tmp; 1002 struct address_space *mapping; 1003 struct kmem_cache *cachep; 1004 int ret = 0; 1005 1006 gl = find_insert_glock(&name, NULL); 1007 if (gl) { 1008 *glp = gl; 1009 return 0; 1010 } 1011 if (!create) 1012 return -ENOENT; 1013 1014 if (glops->go_flags & GLOF_ASPACE) 1015 cachep = gfs2_glock_aspace_cachep; 1016 else 1017 cachep = gfs2_glock_cachep; 1018 gl = kmem_cache_alloc(cachep, GFP_NOFS); 1019 if (!gl) 1020 return -ENOMEM; 1021 1022 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 1023 1024 if (glops->go_flags & GLOF_LVB) { 1025 gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); 1026 if (!gl->gl_lksb.sb_lvbptr) { 1027 kmem_cache_free(cachep, gl); 1028 return -ENOMEM; 1029 } 1030 } 1031 1032 atomic_inc(&sdp->sd_glock_disposal); 1033 gl->gl_node.next = NULL; 1034 gl->gl_flags = 0; 1035 gl->gl_name = name; 1036 gl->gl_lockref.count = 1; 1037 gl->gl_state = LM_ST_UNLOCKED; 1038 gl->gl_target = LM_ST_UNLOCKED; 1039 gl->gl_demote_state = LM_ST_EXCLUSIVE; 1040 gl->gl_ops = glops; 1041 gl->gl_dstamp = 0; 1042 preempt_disable(); 1043 /* We use the global stats to estimate the initial per-glock stats */ 1044 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; 1045 preempt_enable(); 1046 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; 1047 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; 1048 gl->gl_tchange = jiffies; 1049 gl->gl_object = NULL; 1050 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; 1051 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 1052 INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func); 1053 1054 mapping = gfs2_glock2aspace(gl); 1055 if (mapping) { 1056 mapping->a_ops = &gfs2_meta_aops; 1057 mapping->host = s->s_bdev->bd_inode; 1058 mapping->flags = 0; 1059 mapping_set_gfp_mask(mapping, GFP_NOFS); 1060 mapping->private_data = NULL; 1061 mapping->writeback_index = 0; 1062 } 1063 1064 tmp = find_insert_glock(&name, gl); 1065 if (!tmp) { 1066 *glp = gl; 1067 goto out; 1068 } 1069 if (IS_ERR(tmp)) { 1070 ret = PTR_ERR(tmp); 1071 goto out_free; 1072 } 1073 *glp = tmp; 1074 1075 out_free: 1076 kfree(gl->gl_lksb.sb_lvbptr); 1077 kmem_cache_free(cachep, gl); 1078 atomic_dec(&sdp->sd_glock_disposal); 1079 1080 out: 1081 return ret; 1082 } 1083 1084 /** 1085 * gfs2_holder_init - initialize a struct gfs2_holder in the default way 1086 * @gl: the glock 1087 * @state: the state we're requesting 1088 * @flags: the modifier flags 1089 * @gh: the holder structure 1090 * 1091 */ 1092 1093 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, 1094 struct gfs2_holder *gh) 1095 { 1096 INIT_LIST_HEAD(&gh->gh_list); 1097 gh->gh_gl = gl; 1098 gh->gh_ip = _RET_IP_; 1099 gh->gh_owner_pid = get_pid(task_pid(current)); 1100 gh->gh_state = state; 1101 gh->gh_flags = flags; 1102 gh->gh_error = 0; 1103 gh->gh_iflags = 0; 1104 gfs2_glock_hold(gl); 1105 } 1106 1107 /** 1108 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it 1109 * @state: the state we're requesting 1110 * @flags: the modifier flags 1111 * @gh: the holder structure 1112 * 1113 * Don't mess with the glock. 1114 * 1115 */ 1116 1117 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh) 1118 { 1119 gh->gh_state = state; 1120 gh->gh_flags = flags; 1121 gh->gh_iflags = 0; 1122 gh->gh_ip = _RET_IP_; 1123 put_pid(gh->gh_owner_pid); 1124 gh->gh_owner_pid = get_pid(task_pid(current)); 1125 } 1126 1127 /** 1128 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) 1129 * @gh: the holder structure 1130 * 1131 */ 1132 1133 void gfs2_holder_uninit(struct gfs2_holder *gh) 1134 { 1135 put_pid(gh->gh_owner_pid); 1136 gfs2_glock_put(gh->gh_gl); 1137 gfs2_holder_mark_uninitialized(gh); 1138 gh->gh_ip = 0; 1139 } 1140 1141 static void gfs2_glock_update_hold_time(struct gfs2_glock *gl, 1142 unsigned long start_time) 1143 { 1144 /* Have we waited longer that a second? */ 1145 if (time_after(jiffies, start_time + HZ)) { 1146 /* Lengthen the minimum hold time. */ 1147 gl->gl_hold_time = min(gl->gl_hold_time + GL_GLOCK_HOLD_INCR, 1148 GL_GLOCK_MAX_HOLD); 1149 } 1150 } 1151 1152 /** 1153 * gfs2_glock_wait - wait on a glock acquisition 1154 * @gh: the glock holder 1155 * 1156 * Returns: 0 on success 1157 */ 1158 1159 int gfs2_glock_wait(struct gfs2_holder *gh) 1160 { 1161 unsigned long start_time = jiffies; 1162 1163 might_sleep(); 1164 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); 1165 gfs2_glock_update_hold_time(gh->gh_gl, start_time); 1166 return gh->gh_error; 1167 } 1168 1169 static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs) 1170 { 1171 int i; 1172 1173 for (i = 0; i < num_gh; i++) 1174 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) 1175 return 1; 1176 return 0; 1177 } 1178 1179 /** 1180 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions 1181 * @num_gh: the number of holders in the array 1182 * @ghs: the glock holder array 1183 * 1184 * Returns: 0 on success, meaning all glocks have been granted and are held. 1185 * -ESTALE if the request timed out, meaning all glocks were released, 1186 * and the caller should retry the operation. 1187 */ 1188 1189 int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs) 1190 { 1191 struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd; 1192 int i, ret = 0, timeout = 0; 1193 unsigned long start_time = jiffies; 1194 bool keep_waiting; 1195 1196 might_sleep(); 1197 /* 1198 * Total up the (minimum hold time * 2) of all glocks and use that to 1199 * determine the max amount of time we should wait. 1200 */ 1201 for (i = 0; i < num_gh; i++) 1202 timeout += ghs[i].gh_gl->gl_hold_time << 1; 1203 1204 wait_for_dlm: 1205 if (!wait_event_timeout(sdp->sd_async_glock_wait, 1206 !glocks_pending(num_gh, ghs), timeout)) 1207 ret = -ESTALE; /* request timed out. */ 1208 1209 /* 1210 * If dlm granted all our requests, we need to adjust the glock 1211 * minimum hold time values according to how long we waited. 1212 * 1213 * If our request timed out, we need to repeatedly release any held 1214 * glocks we acquired thus far to allow dlm to acquire the remaining 1215 * glocks without deadlocking. We cannot currently cancel outstanding 1216 * glock acquisitions. 1217 * 1218 * The HIF_WAIT bit tells us which requests still need a response from 1219 * dlm. 1220 * 1221 * If dlm sent us any errors, we return the first error we find. 1222 */ 1223 keep_waiting = false; 1224 for (i = 0; i < num_gh; i++) { 1225 /* Skip holders we have already dequeued below. */ 1226 if (!gfs2_holder_queued(&ghs[i])) 1227 continue; 1228 /* Skip holders with a pending DLM response. */ 1229 if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) { 1230 keep_waiting = true; 1231 continue; 1232 } 1233 1234 if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) { 1235 if (ret == -ESTALE) 1236 gfs2_glock_dq(&ghs[i]); 1237 else 1238 gfs2_glock_update_hold_time(ghs[i].gh_gl, 1239 start_time); 1240 } 1241 if (!ret) 1242 ret = ghs[i].gh_error; 1243 } 1244 1245 if (keep_waiting) 1246 goto wait_for_dlm; 1247 1248 /* 1249 * At this point, we've either acquired all locks or released them all. 1250 */ 1251 return ret; 1252 } 1253 1254 /** 1255 * handle_callback - process a demote request 1256 * @gl: the glock 1257 * @state: the state the caller wants us to change to 1258 * 1259 * There are only two requests that we are going to see in actual 1260 * practise: LM_ST_SHARED and LM_ST_UNLOCKED 1261 */ 1262 1263 static void handle_callback(struct gfs2_glock *gl, unsigned int state, 1264 unsigned long delay, bool remote) 1265 { 1266 if (delay) 1267 set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 1268 else 1269 gfs2_set_demote(gl); 1270 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { 1271 gl->gl_demote_state = state; 1272 gl->gl_demote_time = jiffies; 1273 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && 1274 gl->gl_demote_state != state) { 1275 gl->gl_demote_state = LM_ST_UNLOCKED; 1276 } 1277 if (gl->gl_ops->go_callback) 1278 gl->gl_ops->go_callback(gl, remote); 1279 trace_gfs2_demote_rq(gl, remote); 1280 } 1281 1282 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) 1283 { 1284 struct va_format vaf; 1285 va_list args; 1286 1287 va_start(args, fmt); 1288 1289 if (seq) { 1290 seq_vprintf(seq, fmt, args); 1291 } else { 1292 vaf.fmt = fmt; 1293 vaf.va = &args; 1294 1295 pr_err("%pV", &vaf); 1296 } 1297 1298 va_end(args); 1299 } 1300 1301 /** 1302 * add_to_queue - Add a holder to the wait queue (but look for recursion) 1303 * @gh: the holder structure to add 1304 * 1305 * Eventually we should move the recursive locking trap to a 1306 * debugging option or something like that. This is the fast 1307 * path and needs to have the minimum number of distractions. 1308 * 1309 */ 1310 1311 static inline void add_to_queue(struct gfs2_holder *gh) 1312 __releases(&gl->gl_lockref.lock) 1313 __acquires(&gl->gl_lockref.lock) 1314 { 1315 struct gfs2_glock *gl = gh->gh_gl; 1316 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1317 struct list_head *insert_pt = NULL; 1318 struct gfs2_holder *gh2; 1319 int try_futile = 0; 1320 1321 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); 1322 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 1323 GLOCK_BUG_ON(gl, true); 1324 1325 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { 1326 if (test_bit(GLF_LOCK, &gl->gl_flags)) 1327 try_futile = !may_grant(gl, gh); 1328 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) 1329 goto fail; 1330 } 1331 1332 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { 1333 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid && 1334 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK))) 1335 goto trap_recursive; 1336 if (try_futile && 1337 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 1338 fail: 1339 gh->gh_error = GLR_TRYFAILED; 1340 gfs2_holder_wake(gh); 1341 return; 1342 } 1343 if (test_bit(HIF_HOLDER, &gh2->gh_iflags)) 1344 continue; 1345 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) 1346 insert_pt = &gh2->gh_list; 1347 } 1348 set_bit(GLF_QUEUED, &gl->gl_flags); 1349 trace_gfs2_glock_queue(gh, 1); 1350 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); 1351 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); 1352 if (likely(insert_pt == NULL)) { 1353 list_add_tail(&gh->gh_list, &gl->gl_holders); 1354 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) 1355 goto do_cancel; 1356 return; 1357 } 1358 list_add_tail(&gh->gh_list, insert_pt); 1359 do_cancel: 1360 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list); 1361 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { 1362 spin_unlock(&gl->gl_lockref.lock); 1363 if (sdp->sd_lockstruct.ls_ops->lm_cancel) 1364 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); 1365 spin_lock(&gl->gl_lockref.lock); 1366 } 1367 return; 1368 1369 trap_recursive: 1370 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip); 1371 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid)); 1372 fs_err(sdp, "lock type: %d req lock state : %d\n", 1373 gh2->gh_gl->gl_name.ln_type, gh2->gh_state); 1374 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip); 1375 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid)); 1376 fs_err(sdp, "lock type: %d req lock state : %d\n", 1377 gh->gh_gl->gl_name.ln_type, gh->gh_state); 1378 gfs2_dump_glock(NULL, gl, true); 1379 BUG(); 1380 } 1381 1382 /** 1383 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) 1384 * @gh: the holder structure 1385 * 1386 * if (gh->gh_flags & GL_ASYNC), this never returns an error 1387 * 1388 * Returns: 0, GLR_TRYFAILED, or errno on failure 1389 */ 1390 1391 int gfs2_glock_nq(struct gfs2_holder *gh) 1392 { 1393 struct gfs2_glock *gl = gh->gh_gl; 1394 int error = 0; 1395 1396 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP)) 1397 return -EIO; 1398 1399 if (test_bit(GLF_LRU, &gl->gl_flags)) 1400 gfs2_glock_remove_from_lru(gl); 1401 1402 spin_lock(&gl->gl_lockref.lock); 1403 add_to_queue(gh); 1404 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && 1405 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { 1406 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1407 gl->gl_lockref.count++; 1408 __gfs2_glock_queue_work(gl, 0); 1409 } 1410 run_queue(gl, 1); 1411 spin_unlock(&gl->gl_lockref.lock); 1412 1413 if (!(gh->gh_flags & GL_ASYNC)) 1414 error = gfs2_glock_wait(gh); 1415 1416 return error; 1417 } 1418 1419 /** 1420 * gfs2_glock_poll - poll to see if an async request has been completed 1421 * @gh: the holder 1422 * 1423 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on 1424 */ 1425 1426 int gfs2_glock_poll(struct gfs2_holder *gh) 1427 { 1428 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; 1429 } 1430 1431 /** 1432 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) 1433 * @gh: the glock holder 1434 * 1435 */ 1436 1437 void gfs2_glock_dq(struct gfs2_holder *gh) 1438 { 1439 struct gfs2_glock *gl = gh->gh_gl; 1440 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1441 unsigned delay = 0; 1442 int fast_path = 0; 1443 1444 spin_lock(&gl->gl_lockref.lock); 1445 /* 1446 * If we're in the process of file system withdraw, we cannot just 1447 * dequeue any glocks until our journal is recovered, lest we 1448 * introduce file system corruption. We need two exceptions to this 1449 * rule: We need to allow unlocking of nondisk glocks and the glock 1450 * for our own journal that needs recovery. 1451 */ 1452 if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) && 1453 glock_blocked_by_withdraw(gl) && 1454 gh->gh_gl != sdp->sd_jinode_gl) { 1455 sdp->sd_glock_dqs_held++; 1456 might_sleep(); 1457 wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY, 1458 TASK_UNINTERRUPTIBLE); 1459 } 1460 if (gh->gh_flags & GL_NOCACHE) 1461 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1462 1463 list_del_init(&gh->gh_list); 1464 clear_bit(HIF_HOLDER, &gh->gh_iflags); 1465 if (find_first_holder(gl) == NULL) { 1466 if (list_empty(&gl->gl_holders) && 1467 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1468 !test_bit(GLF_DEMOTE, &gl->gl_flags)) 1469 fast_path = 1; 1470 } 1471 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) 1472 gfs2_glock_add_to_lru(gl); 1473 1474 trace_gfs2_glock_queue(gh, 0); 1475 if (unlikely(!fast_path)) { 1476 gl->gl_lockref.count++; 1477 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1478 !test_bit(GLF_DEMOTE, &gl->gl_flags) && 1479 gl->gl_name.ln_type == LM_TYPE_INODE) 1480 delay = gl->gl_hold_time; 1481 __gfs2_glock_queue_work(gl, delay); 1482 } 1483 spin_unlock(&gl->gl_lockref.lock); 1484 } 1485 1486 void gfs2_glock_dq_wait(struct gfs2_holder *gh) 1487 { 1488 struct gfs2_glock *gl = gh->gh_gl; 1489 gfs2_glock_dq(gh); 1490 might_sleep(); 1491 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE); 1492 } 1493 1494 /** 1495 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1496 * @gh: the holder structure 1497 * 1498 */ 1499 1500 void gfs2_glock_dq_uninit(struct gfs2_holder *gh) 1501 { 1502 gfs2_glock_dq(gh); 1503 gfs2_holder_uninit(gh); 1504 } 1505 1506 /** 1507 * gfs2_glock_nq_num - acquire a glock based on lock number 1508 * @sdp: the filesystem 1509 * @number: the lock number 1510 * @glops: the glock operations for the type of glock 1511 * @state: the state to acquire the glock in 1512 * @flags: modifier flags for the acquisition 1513 * @gh: the struct gfs2_holder 1514 * 1515 * Returns: errno 1516 */ 1517 1518 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, 1519 const struct gfs2_glock_operations *glops, 1520 unsigned int state, u16 flags, struct gfs2_holder *gh) 1521 { 1522 struct gfs2_glock *gl; 1523 int error; 1524 1525 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); 1526 if (!error) { 1527 error = gfs2_glock_nq_init(gl, state, flags, gh); 1528 gfs2_glock_put(gl); 1529 } 1530 1531 return error; 1532 } 1533 1534 /** 1535 * glock_compare - Compare two struct gfs2_glock structures for sorting 1536 * @arg_a: the first structure 1537 * @arg_b: the second structure 1538 * 1539 */ 1540 1541 static int glock_compare(const void *arg_a, const void *arg_b) 1542 { 1543 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; 1544 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; 1545 const struct lm_lockname *a = &gh_a->gh_gl->gl_name; 1546 const struct lm_lockname *b = &gh_b->gh_gl->gl_name; 1547 1548 if (a->ln_number > b->ln_number) 1549 return 1; 1550 if (a->ln_number < b->ln_number) 1551 return -1; 1552 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); 1553 return 0; 1554 } 1555 1556 /** 1557 * nq_m_sync - synchonously acquire more than one glock in deadlock free order 1558 * @num_gh: the number of structures 1559 * @ghs: an array of struct gfs2_holder structures 1560 * 1561 * Returns: 0 on success (all glocks acquired), 1562 * errno on failure (no glocks acquired) 1563 */ 1564 1565 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, 1566 struct gfs2_holder **p) 1567 { 1568 unsigned int x; 1569 int error = 0; 1570 1571 for (x = 0; x < num_gh; x++) 1572 p[x] = &ghs[x]; 1573 1574 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); 1575 1576 for (x = 0; x < num_gh; x++) { 1577 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1578 1579 error = gfs2_glock_nq(p[x]); 1580 if (error) { 1581 while (x--) 1582 gfs2_glock_dq(p[x]); 1583 break; 1584 } 1585 } 1586 1587 return error; 1588 } 1589 1590 /** 1591 * gfs2_glock_nq_m - acquire multiple glocks 1592 * @num_gh: the number of structures 1593 * @ghs: an array of struct gfs2_holder structures 1594 * 1595 * 1596 * Returns: 0 on success (all glocks acquired), 1597 * errno on failure (no glocks acquired) 1598 */ 1599 1600 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1601 { 1602 struct gfs2_holder *tmp[4]; 1603 struct gfs2_holder **pph = tmp; 1604 int error = 0; 1605 1606 switch(num_gh) { 1607 case 0: 1608 return 0; 1609 case 1: 1610 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1611 return gfs2_glock_nq(ghs); 1612 default: 1613 if (num_gh <= 4) 1614 break; 1615 pph = kmalloc_array(num_gh, sizeof(struct gfs2_holder *), 1616 GFP_NOFS); 1617 if (!pph) 1618 return -ENOMEM; 1619 } 1620 1621 error = nq_m_sync(num_gh, ghs, pph); 1622 1623 if (pph != tmp) 1624 kfree(pph); 1625 1626 return error; 1627 } 1628 1629 /** 1630 * gfs2_glock_dq_m - release multiple glocks 1631 * @num_gh: the number of structures 1632 * @ghs: an array of struct gfs2_holder structures 1633 * 1634 */ 1635 1636 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1637 { 1638 while (num_gh--) 1639 gfs2_glock_dq(&ghs[num_gh]); 1640 } 1641 1642 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) 1643 { 1644 unsigned long delay = 0; 1645 unsigned long holdtime; 1646 unsigned long now = jiffies; 1647 1648 gfs2_glock_hold(gl); 1649 holdtime = gl->gl_tchange + gl->gl_hold_time; 1650 if (test_bit(GLF_QUEUED, &gl->gl_flags) && 1651 gl->gl_name.ln_type == LM_TYPE_INODE) { 1652 if (time_before(now, holdtime)) 1653 delay = holdtime - now; 1654 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 1655 delay = gl->gl_hold_time; 1656 } 1657 1658 spin_lock(&gl->gl_lockref.lock); 1659 handle_callback(gl, state, delay, true); 1660 __gfs2_glock_queue_work(gl, delay); 1661 spin_unlock(&gl->gl_lockref.lock); 1662 } 1663 1664 /** 1665 * gfs2_should_freeze - Figure out if glock should be frozen 1666 * @gl: The glock in question 1667 * 1668 * Glocks are not frozen if (a) the result of the dlm operation is 1669 * an error, (b) the locking operation was an unlock operation or 1670 * (c) if there is a "noexp" flagged request anywhere in the queue 1671 * 1672 * Returns: 1 if freezing should occur, 0 otherwise 1673 */ 1674 1675 static int gfs2_should_freeze(const struct gfs2_glock *gl) 1676 { 1677 const struct gfs2_holder *gh; 1678 1679 if (gl->gl_reply & ~LM_OUT_ST_MASK) 1680 return 0; 1681 if (gl->gl_target == LM_ST_UNLOCKED) 1682 return 0; 1683 1684 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1685 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 1686 continue; 1687 if (LM_FLAG_NOEXP & gh->gh_flags) 1688 return 0; 1689 } 1690 1691 return 1; 1692 } 1693 1694 /** 1695 * gfs2_glock_complete - Callback used by locking 1696 * @gl: Pointer to the glock 1697 * @ret: The return value from the dlm 1698 * 1699 * The gl_reply field is under the gl_lockref.lock lock so that it is ok 1700 * to use a bitfield shared with other glock state fields. 1701 */ 1702 1703 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1704 { 1705 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; 1706 1707 spin_lock(&gl->gl_lockref.lock); 1708 gl->gl_reply = ret; 1709 1710 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { 1711 if (gfs2_should_freeze(gl)) { 1712 set_bit(GLF_FROZEN, &gl->gl_flags); 1713 spin_unlock(&gl->gl_lockref.lock); 1714 return; 1715 } 1716 } 1717 1718 gl->gl_lockref.count++; 1719 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1720 __gfs2_glock_queue_work(gl, 0); 1721 spin_unlock(&gl->gl_lockref.lock); 1722 } 1723 1724 static int glock_cmp(void *priv, struct list_head *a, struct list_head *b) 1725 { 1726 struct gfs2_glock *gla, *glb; 1727 1728 gla = list_entry(a, struct gfs2_glock, gl_lru); 1729 glb = list_entry(b, struct gfs2_glock, gl_lru); 1730 1731 if (gla->gl_name.ln_number > glb->gl_name.ln_number) 1732 return 1; 1733 if (gla->gl_name.ln_number < glb->gl_name.ln_number) 1734 return -1; 1735 1736 return 0; 1737 } 1738 1739 /** 1740 * gfs2_dispose_glock_lru - Demote a list of glocks 1741 * @list: The list to dispose of 1742 * 1743 * Disposing of glocks may involve disk accesses, so that here we sort 1744 * the glocks by number (i.e. disk location of the inodes) so that if 1745 * there are any such accesses, they'll be sent in order (mostly). 1746 * 1747 * Must be called under the lru_lock, but may drop and retake this 1748 * lock. While the lru_lock is dropped, entries may vanish from the 1749 * list, but no new entries will appear on the list (since it is 1750 * private) 1751 */ 1752 1753 static void gfs2_dispose_glock_lru(struct list_head *list) 1754 __releases(&lru_lock) 1755 __acquires(&lru_lock) 1756 { 1757 struct gfs2_glock *gl; 1758 1759 list_sort(NULL, list, glock_cmp); 1760 1761 while(!list_empty(list)) { 1762 gl = list_first_entry(list, struct gfs2_glock, gl_lru); 1763 list_del_init(&gl->gl_lru); 1764 if (!spin_trylock(&gl->gl_lockref.lock)) { 1765 add_back_to_lru: 1766 list_add(&gl->gl_lru, &lru_list); 1767 set_bit(GLF_LRU, &gl->gl_flags); 1768 atomic_inc(&lru_count); 1769 continue; 1770 } 1771 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1772 spin_unlock(&gl->gl_lockref.lock); 1773 goto add_back_to_lru; 1774 } 1775 gl->gl_lockref.count++; 1776 if (demote_ok(gl)) 1777 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1778 WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); 1779 __gfs2_glock_queue_work(gl, 0); 1780 spin_unlock(&gl->gl_lockref.lock); 1781 cond_resched_lock(&lru_lock); 1782 } 1783 } 1784 1785 /** 1786 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote 1787 * @nr: The number of entries to scan 1788 * 1789 * This function selects the entries on the LRU which are able to 1790 * be demoted, and then kicks off the process by calling 1791 * gfs2_dispose_glock_lru() above. 1792 */ 1793 1794 static long gfs2_scan_glock_lru(int nr) 1795 { 1796 struct gfs2_glock *gl; 1797 LIST_HEAD(skipped); 1798 LIST_HEAD(dispose); 1799 long freed = 0; 1800 1801 spin_lock(&lru_lock); 1802 while ((nr-- >= 0) && !list_empty(&lru_list)) { 1803 gl = list_first_entry(&lru_list, struct gfs2_glock, gl_lru); 1804 1805 /* Test for being demotable */ 1806 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { 1807 list_move(&gl->gl_lru, &dispose); 1808 atomic_dec(&lru_count); 1809 clear_bit(GLF_LRU, &gl->gl_flags); 1810 freed++; 1811 continue; 1812 } 1813 1814 list_move(&gl->gl_lru, &skipped); 1815 } 1816 list_splice(&skipped, &lru_list); 1817 if (!list_empty(&dispose)) 1818 gfs2_dispose_glock_lru(&dispose); 1819 spin_unlock(&lru_lock); 1820 1821 return freed; 1822 } 1823 1824 static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink, 1825 struct shrink_control *sc) 1826 { 1827 if (!(sc->gfp_mask & __GFP_FS)) 1828 return SHRINK_STOP; 1829 return gfs2_scan_glock_lru(sc->nr_to_scan); 1830 } 1831 1832 static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink, 1833 struct shrink_control *sc) 1834 { 1835 return vfs_pressure_ratio(atomic_read(&lru_count)); 1836 } 1837 1838 static struct shrinker glock_shrinker = { 1839 .seeks = DEFAULT_SEEKS, 1840 .count_objects = gfs2_glock_shrink_count, 1841 .scan_objects = gfs2_glock_shrink_scan, 1842 }; 1843 1844 /** 1845 * examine_bucket - Call a function for glock in a hash bucket 1846 * @examiner: the function 1847 * @sdp: the filesystem 1848 * @bucket: the bucket 1849 * 1850 * Note that the function can be called multiple times on the same 1851 * object. So the user must ensure that the function can cope with 1852 * that. 1853 */ 1854 1855 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) 1856 { 1857 struct gfs2_glock *gl; 1858 struct rhashtable_iter iter; 1859 1860 rhashtable_walk_enter(&gl_hash_table, &iter); 1861 1862 do { 1863 rhashtable_walk_start(&iter); 1864 1865 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) 1866 if (gl->gl_name.ln_sbd == sdp && 1867 lockref_get_not_dead(&gl->gl_lockref)) 1868 examiner(gl); 1869 1870 rhashtable_walk_stop(&iter); 1871 } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); 1872 1873 rhashtable_walk_exit(&iter); 1874 } 1875 1876 bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay) 1877 { 1878 bool queued; 1879 1880 spin_lock(&gl->gl_lockref.lock); 1881 queued = queue_delayed_work(gfs2_delete_workqueue, 1882 &gl->gl_delete, delay); 1883 if (queued) 1884 set_bit(GLF_PENDING_DELETE, &gl->gl_flags); 1885 spin_unlock(&gl->gl_lockref.lock); 1886 return queued; 1887 } 1888 1889 void gfs2_cancel_delete_work(struct gfs2_glock *gl) 1890 { 1891 if (cancel_delayed_work_sync(&gl->gl_delete)) { 1892 clear_bit(GLF_PENDING_DELETE, &gl->gl_flags); 1893 gfs2_glock_put(gl); 1894 } 1895 } 1896 1897 bool gfs2_delete_work_queued(const struct gfs2_glock *gl) 1898 { 1899 return test_bit(GLF_PENDING_DELETE, &gl->gl_flags); 1900 } 1901 1902 static void flush_delete_work(struct gfs2_glock *gl) 1903 { 1904 if (cancel_delayed_work(&gl->gl_delete)) { 1905 queue_delayed_work(gfs2_delete_workqueue, 1906 &gl->gl_delete, 0); 1907 } 1908 gfs2_glock_queue_work(gl, 0); 1909 } 1910 1911 void gfs2_flush_delete_work(struct gfs2_sbd *sdp) 1912 { 1913 glock_hash_walk(flush_delete_work, sdp); 1914 flush_workqueue(gfs2_delete_workqueue); 1915 } 1916 1917 /** 1918 * thaw_glock - thaw out a glock which has an unprocessed reply waiting 1919 * @gl: The glock to thaw 1920 * 1921 */ 1922 1923 static void thaw_glock(struct gfs2_glock *gl) 1924 { 1925 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) { 1926 gfs2_glock_put(gl); 1927 return; 1928 } 1929 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1930 gfs2_glock_queue_work(gl, 0); 1931 } 1932 1933 /** 1934 * clear_glock - look at a glock and see if we can free it from glock cache 1935 * @gl: the glock to look at 1936 * 1937 */ 1938 1939 static void clear_glock(struct gfs2_glock *gl) 1940 { 1941 gfs2_glock_remove_from_lru(gl); 1942 1943 spin_lock(&gl->gl_lockref.lock); 1944 if (gl->gl_state != LM_ST_UNLOCKED) 1945 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1946 __gfs2_glock_queue_work(gl, 0); 1947 spin_unlock(&gl->gl_lockref.lock); 1948 } 1949 1950 /** 1951 * gfs2_glock_thaw - Thaw any frozen glocks 1952 * @sdp: The super block 1953 * 1954 */ 1955 1956 void gfs2_glock_thaw(struct gfs2_sbd *sdp) 1957 { 1958 glock_hash_walk(thaw_glock, sdp); 1959 } 1960 1961 static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 1962 { 1963 spin_lock(&gl->gl_lockref.lock); 1964 gfs2_dump_glock(seq, gl, fsid); 1965 spin_unlock(&gl->gl_lockref.lock); 1966 } 1967 1968 static void dump_glock_func(struct gfs2_glock *gl) 1969 { 1970 dump_glock(NULL, gl, true); 1971 } 1972 1973 /** 1974 * gfs2_gl_hash_clear - Empty out the glock hash table 1975 * @sdp: the filesystem 1976 * @wait: wait until it's all gone 1977 * 1978 * Called when unmounting the filesystem. 1979 */ 1980 1981 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) 1982 { 1983 set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags); 1984 flush_workqueue(glock_workqueue); 1985 glock_hash_walk(clear_glock, sdp); 1986 flush_workqueue(glock_workqueue); 1987 wait_event_timeout(sdp->sd_glock_wait, 1988 atomic_read(&sdp->sd_glock_disposal) == 0, 1989 HZ * 600); 1990 glock_hash_walk(dump_glock_func, sdp); 1991 } 1992 1993 void gfs2_glock_finish_truncate(struct gfs2_inode *ip) 1994 { 1995 struct gfs2_glock *gl = ip->i_gl; 1996 int ret; 1997 1998 ret = gfs2_truncatei_resume(ip); 1999 gfs2_glock_assert_withdraw(gl, ret == 0); 2000 2001 spin_lock(&gl->gl_lockref.lock); 2002 clear_bit(GLF_LOCK, &gl->gl_flags); 2003 run_queue(gl, 1); 2004 spin_unlock(&gl->gl_lockref.lock); 2005 } 2006 2007 static const char *state2str(unsigned state) 2008 { 2009 switch(state) { 2010 case LM_ST_UNLOCKED: 2011 return "UN"; 2012 case LM_ST_SHARED: 2013 return "SH"; 2014 case LM_ST_DEFERRED: 2015 return "DF"; 2016 case LM_ST_EXCLUSIVE: 2017 return "EX"; 2018 } 2019 return "??"; 2020 } 2021 2022 static const char *hflags2str(char *buf, u16 flags, unsigned long iflags) 2023 { 2024 char *p = buf; 2025 if (flags & LM_FLAG_TRY) 2026 *p++ = 't'; 2027 if (flags & LM_FLAG_TRY_1CB) 2028 *p++ = 'T'; 2029 if (flags & LM_FLAG_NOEXP) 2030 *p++ = 'e'; 2031 if (flags & LM_FLAG_ANY) 2032 *p++ = 'A'; 2033 if (flags & LM_FLAG_PRIORITY) 2034 *p++ = 'p'; 2035 if (flags & GL_ASYNC) 2036 *p++ = 'a'; 2037 if (flags & GL_EXACT) 2038 *p++ = 'E'; 2039 if (flags & GL_NOCACHE) 2040 *p++ = 'c'; 2041 if (test_bit(HIF_HOLDER, &iflags)) 2042 *p++ = 'H'; 2043 if (test_bit(HIF_WAIT, &iflags)) 2044 *p++ = 'W'; 2045 if (test_bit(HIF_FIRST, &iflags)) 2046 *p++ = 'F'; 2047 *p = 0; 2048 return buf; 2049 } 2050 2051 /** 2052 * dump_holder - print information about a glock holder 2053 * @seq: the seq_file struct 2054 * @gh: the glock holder 2055 * @fs_id_buf: pointer to file system id (if requested) 2056 * 2057 */ 2058 2059 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh, 2060 const char *fs_id_buf) 2061 { 2062 struct task_struct *gh_owner = NULL; 2063 char flags_buf[32]; 2064 2065 rcu_read_lock(); 2066 if (gh->gh_owner_pid) 2067 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); 2068 gfs2_print_dbg(seq, "%s H: s:%s f:%s e:%d p:%ld [%s] %pS\n", 2069 fs_id_buf, state2str(gh->gh_state), 2070 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), 2071 gh->gh_error, 2072 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, 2073 gh_owner ? gh_owner->comm : "(ended)", 2074 (void *)gh->gh_ip); 2075 rcu_read_unlock(); 2076 } 2077 2078 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) 2079 { 2080 const unsigned long *gflags = &gl->gl_flags; 2081 char *p = buf; 2082 2083 if (test_bit(GLF_LOCK, gflags)) 2084 *p++ = 'l'; 2085 if (test_bit(GLF_DEMOTE, gflags)) 2086 *p++ = 'D'; 2087 if (test_bit(GLF_PENDING_DEMOTE, gflags)) 2088 *p++ = 'd'; 2089 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags)) 2090 *p++ = 'p'; 2091 if (test_bit(GLF_DIRTY, gflags)) 2092 *p++ = 'y'; 2093 if (test_bit(GLF_LFLUSH, gflags)) 2094 *p++ = 'f'; 2095 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags)) 2096 *p++ = 'i'; 2097 if (test_bit(GLF_REPLY_PENDING, gflags)) 2098 *p++ = 'r'; 2099 if (test_bit(GLF_INITIAL, gflags)) 2100 *p++ = 'I'; 2101 if (test_bit(GLF_FROZEN, gflags)) 2102 *p++ = 'F'; 2103 if (test_bit(GLF_QUEUED, gflags)) 2104 *p++ = 'q'; 2105 if (test_bit(GLF_LRU, gflags)) 2106 *p++ = 'L'; 2107 if (gl->gl_object) 2108 *p++ = 'o'; 2109 if (test_bit(GLF_BLOCKING, gflags)) 2110 *p++ = 'b'; 2111 if (test_bit(GLF_INODE_CREATING, gflags)) 2112 *p++ = 'c'; 2113 if (test_bit(GLF_PENDING_DELETE, gflags)) 2114 *p++ = 'P'; 2115 if (test_bit(GLF_FREEING, gflags)) 2116 *p++ = 'x'; 2117 *p = 0; 2118 return buf; 2119 } 2120 2121 /** 2122 * gfs2_dump_glock - print information about a glock 2123 * @seq: The seq_file struct 2124 * @gl: the glock 2125 * @fsid: If true, also dump the file system id 2126 * 2127 * The file format is as follows: 2128 * One line per object, capital letters are used to indicate objects 2129 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented, 2130 * other objects are indented by a single space and follow the glock to 2131 * which they are related. Fields are indicated by lower case letters 2132 * followed by a colon and the field value, except for strings which are in 2133 * [] so that its possible to see if they are composed of spaces for 2134 * example. The field's are n = number (id of the object), f = flags, 2135 * t = type, s = state, r = refcount, e = error, p = pid. 2136 * 2137 */ 2138 2139 void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid) 2140 { 2141 const struct gfs2_glock_operations *glops = gl->gl_ops; 2142 unsigned long long dtime; 2143 const struct gfs2_holder *gh; 2144 char gflags_buf[32]; 2145 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 2146 char fs_id_buf[sizeof(sdp->sd_fsname) + 7]; 2147 unsigned long nrpages = 0; 2148 2149 if (gl->gl_ops->go_flags & GLOF_ASPACE) { 2150 struct address_space *mapping = gfs2_glock2aspace(gl); 2151 2152 nrpages = mapping->nrpages; 2153 } 2154 memset(fs_id_buf, 0, sizeof(fs_id_buf)); 2155 if (fsid && sdp) /* safety precaution */ 2156 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname); 2157 dtime = jiffies - gl->gl_demote_time; 2158 dtime *= 1000000/HZ; /* demote time in uSec */ 2159 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) 2160 dtime = 0; 2161 gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d " 2162 "v:%d r:%d m:%ld p:%lu\n", 2163 fs_id_buf, state2str(gl->gl_state), 2164 gl->gl_name.ln_type, 2165 (unsigned long long)gl->gl_name.ln_number, 2166 gflags2str(gflags_buf, gl), 2167 state2str(gl->gl_target), 2168 state2str(gl->gl_demote_state), dtime, 2169 atomic_read(&gl->gl_ail_count), 2170 atomic_read(&gl->gl_revokes), 2171 (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages); 2172 2173 list_for_each_entry(gh, &gl->gl_holders, gh_list) 2174 dump_holder(seq, gh, fs_id_buf); 2175 2176 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) 2177 glops->go_dump(seq, gl, fs_id_buf); 2178 } 2179 2180 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr) 2181 { 2182 struct gfs2_glock *gl = iter_ptr; 2183 2184 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n", 2185 gl->gl_name.ln_type, 2186 (unsigned long long)gl->gl_name.ln_number, 2187 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], 2188 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], 2189 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], 2190 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], 2191 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRT], 2192 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], 2193 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], 2194 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); 2195 return 0; 2196 } 2197 2198 static const char *gfs2_gltype[] = { 2199 "type", 2200 "reserved", 2201 "nondisk", 2202 "inode", 2203 "rgrp", 2204 "meta", 2205 "iopen", 2206 "flock", 2207 "plock", 2208 "quota", 2209 "journal", 2210 }; 2211 2212 static const char *gfs2_stype[] = { 2213 [GFS2_LKS_SRTT] = "srtt", 2214 [GFS2_LKS_SRTTVAR] = "srttvar", 2215 [GFS2_LKS_SRTTB] = "srttb", 2216 [GFS2_LKS_SRTTVARB] = "srttvarb", 2217 [GFS2_LKS_SIRT] = "sirt", 2218 [GFS2_LKS_SIRTVAR] = "sirtvar", 2219 [GFS2_LKS_DCOUNT] = "dlm", 2220 [GFS2_LKS_QCOUNT] = "queue", 2221 }; 2222 2223 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype)) 2224 2225 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr) 2226 { 2227 struct gfs2_sbd *sdp = seq->private; 2228 loff_t pos = *(loff_t *)iter_ptr; 2229 unsigned index = pos >> 3; 2230 unsigned subindex = pos & 0x07; 2231 int i; 2232 2233 if (index == 0 && subindex != 0) 2234 return 0; 2235 2236 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index], 2237 (index == 0) ? "cpu": gfs2_stype[subindex]); 2238 2239 for_each_possible_cpu(i) { 2240 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); 2241 2242 if (index == 0) 2243 seq_printf(seq, " %15u", i); 2244 else 2245 seq_printf(seq, " %15llu", (unsigned long long)lkstats-> 2246 lkstats[index - 1].stats[subindex]); 2247 } 2248 seq_putc(seq, '\n'); 2249 return 0; 2250 } 2251 2252 int __init gfs2_glock_init(void) 2253 { 2254 int i, ret; 2255 2256 ret = rhashtable_init(&gl_hash_table, &ht_parms); 2257 if (ret < 0) 2258 return ret; 2259 2260 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 2261 WQ_HIGHPRI | WQ_FREEZABLE, 0); 2262 if (!glock_workqueue) { 2263 rhashtable_destroy(&gl_hash_table); 2264 return -ENOMEM; 2265 } 2266 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 2267 WQ_MEM_RECLAIM | WQ_FREEZABLE, 2268 0); 2269 if (!gfs2_delete_workqueue) { 2270 destroy_workqueue(glock_workqueue); 2271 rhashtable_destroy(&gl_hash_table); 2272 return -ENOMEM; 2273 } 2274 2275 ret = register_shrinker(&glock_shrinker); 2276 if (ret) { 2277 destroy_workqueue(gfs2_delete_workqueue); 2278 destroy_workqueue(glock_workqueue); 2279 rhashtable_destroy(&gl_hash_table); 2280 return ret; 2281 } 2282 2283 for (i = 0; i < GLOCK_WAIT_TABLE_SIZE; i++) 2284 init_waitqueue_head(glock_wait_table + i); 2285 2286 return 0; 2287 } 2288 2289 void gfs2_glock_exit(void) 2290 { 2291 unregister_shrinker(&glock_shrinker); 2292 rhashtable_destroy(&gl_hash_table); 2293 destroy_workqueue(glock_workqueue); 2294 destroy_workqueue(gfs2_delete_workqueue); 2295 } 2296 2297 static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n) 2298 { 2299 struct gfs2_glock *gl = gi->gl; 2300 2301 if (gl) { 2302 if (n == 0) 2303 return; 2304 if (!lockref_put_not_zero(&gl->gl_lockref)) 2305 gfs2_glock_queue_put(gl); 2306 } 2307 for (;;) { 2308 gl = rhashtable_walk_next(&gi->hti); 2309 if (IS_ERR_OR_NULL(gl)) { 2310 if (gl == ERR_PTR(-EAGAIN)) { 2311 n = 1; 2312 continue; 2313 } 2314 gl = NULL; 2315 break; 2316 } 2317 if (gl->gl_name.ln_sbd != gi->sdp) 2318 continue; 2319 if (n <= 1) { 2320 if (!lockref_get_not_dead(&gl->gl_lockref)) 2321 continue; 2322 break; 2323 } else { 2324 if (__lockref_is_dead(&gl->gl_lockref)) 2325 continue; 2326 n--; 2327 } 2328 } 2329 gi->gl = gl; 2330 } 2331 2332 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) 2333 __acquires(RCU) 2334 { 2335 struct gfs2_glock_iter *gi = seq->private; 2336 loff_t n; 2337 2338 /* 2339 * We can either stay where we are, skip to the next hash table 2340 * entry, or start from the beginning. 2341 */ 2342 if (*pos < gi->last_pos) { 2343 rhashtable_walk_exit(&gi->hti); 2344 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2345 n = *pos + 1; 2346 } else { 2347 n = *pos - gi->last_pos; 2348 } 2349 2350 rhashtable_walk_start(&gi->hti); 2351 2352 gfs2_glock_iter_next(gi, n); 2353 gi->last_pos = *pos; 2354 return gi->gl; 2355 } 2356 2357 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr, 2358 loff_t *pos) 2359 { 2360 struct gfs2_glock_iter *gi = seq->private; 2361 2362 (*pos)++; 2363 gi->last_pos = *pos; 2364 gfs2_glock_iter_next(gi, 1); 2365 return gi->gl; 2366 } 2367 2368 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) 2369 __releases(RCU) 2370 { 2371 struct gfs2_glock_iter *gi = seq->private; 2372 2373 rhashtable_walk_stop(&gi->hti); 2374 } 2375 2376 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) 2377 { 2378 dump_glock(seq, iter_ptr, false); 2379 return 0; 2380 } 2381 2382 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos) 2383 { 2384 preempt_disable(); 2385 if (*pos >= GFS2_NR_SBSTATS) 2386 return NULL; 2387 return pos; 2388 } 2389 2390 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr, 2391 loff_t *pos) 2392 { 2393 (*pos)++; 2394 if (*pos >= GFS2_NR_SBSTATS) 2395 return NULL; 2396 return pos; 2397 } 2398 2399 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr) 2400 { 2401 preempt_enable(); 2402 } 2403 2404 static const struct seq_operations gfs2_glock_seq_ops = { 2405 .start = gfs2_glock_seq_start, 2406 .next = gfs2_glock_seq_next, 2407 .stop = gfs2_glock_seq_stop, 2408 .show = gfs2_glock_seq_show, 2409 }; 2410 2411 static const struct seq_operations gfs2_glstats_seq_ops = { 2412 .start = gfs2_glock_seq_start, 2413 .next = gfs2_glock_seq_next, 2414 .stop = gfs2_glock_seq_stop, 2415 .show = gfs2_glstats_seq_show, 2416 }; 2417 2418 static const struct seq_operations gfs2_sbstats_seq_ops = { 2419 .start = gfs2_sbstats_seq_start, 2420 .next = gfs2_sbstats_seq_next, 2421 .stop = gfs2_sbstats_seq_stop, 2422 .show = gfs2_sbstats_seq_show, 2423 }; 2424 2425 #define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL) 2426 2427 static int __gfs2_glocks_open(struct inode *inode, struct file *file, 2428 const struct seq_operations *ops) 2429 { 2430 int ret = seq_open_private(file, ops, sizeof(struct gfs2_glock_iter)); 2431 if (ret == 0) { 2432 struct seq_file *seq = file->private_data; 2433 struct gfs2_glock_iter *gi = seq->private; 2434 2435 gi->sdp = inode->i_private; 2436 seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN); 2437 if (seq->buf) 2438 seq->size = GFS2_SEQ_GOODSIZE; 2439 /* 2440 * Initially, we are "before" the first hash table entry; the 2441 * first call to rhashtable_walk_next gets us the first entry. 2442 */ 2443 gi->last_pos = -1; 2444 gi->gl = NULL; 2445 rhashtable_walk_enter(&gl_hash_table, &gi->hti); 2446 } 2447 return ret; 2448 } 2449 2450 static int gfs2_glocks_open(struct inode *inode, struct file *file) 2451 { 2452 return __gfs2_glocks_open(inode, file, &gfs2_glock_seq_ops); 2453 } 2454 2455 static int gfs2_glocks_release(struct inode *inode, struct file *file) 2456 { 2457 struct seq_file *seq = file->private_data; 2458 struct gfs2_glock_iter *gi = seq->private; 2459 2460 if (gi->gl) 2461 gfs2_glock_put(gi->gl); 2462 rhashtable_walk_exit(&gi->hti); 2463 return seq_release_private(inode, file); 2464 } 2465 2466 static int gfs2_glstats_open(struct inode *inode, struct file *file) 2467 { 2468 return __gfs2_glocks_open(inode, file, &gfs2_glstats_seq_ops); 2469 } 2470 2471 static int gfs2_sbstats_open(struct inode *inode, struct file *file) 2472 { 2473 int ret = seq_open(file, &gfs2_sbstats_seq_ops); 2474 if (ret == 0) { 2475 struct seq_file *seq = file->private_data; 2476 seq->private = inode->i_private; /* sdp */ 2477 } 2478 return ret; 2479 } 2480 2481 static const struct file_operations gfs2_glocks_fops = { 2482 .owner = THIS_MODULE, 2483 .open = gfs2_glocks_open, 2484 .read = seq_read, 2485 .llseek = seq_lseek, 2486 .release = gfs2_glocks_release, 2487 }; 2488 2489 static const struct file_operations gfs2_glstats_fops = { 2490 .owner = THIS_MODULE, 2491 .open = gfs2_glstats_open, 2492 .read = seq_read, 2493 .llseek = seq_lseek, 2494 .release = gfs2_glocks_release, 2495 }; 2496 2497 static const struct file_operations gfs2_sbstats_fops = { 2498 .owner = THIS_MODULE, 2499 .open = gfs2_sbstats_open, 2500 .read = seq_read, 2501 .llseek = seq_lseek, 2502 .release = seq_release, 2503 }; 2504 2505 void gfs2_create_debugfs_file(struct gfs2_sbd *sdp) 2506 { 2507 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root); 2508 2509 debugfs_create_file("glocks", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2510 &gfs2_glocks_fops); 2511 2512 debugfs_create_file("glstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2513 &gfs2_glstats_fops); 2514 2515 debugfs_create_file("sbstats", S_IFREG | S_IRUGO, sdp->debugfs_dir, sdp, 2516 &gfs2_sbstats_fops); 2517 } 2518 2519 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) 2520 { 2521 debugfs_remove_recursive(sdp->debugfs_dir); 2522 sdp->debugfs_dir = NULL; 2523 } 2524 2525 void gfs2_register_debugfs(void) 2526 { 2527 gfs2_root = debugfs_create_dir("gfs2", NULL); 2528 } 2529 2530 void gfs2_unregister_debugfs(void) 2531 { 2532 debugfs_remove(gfs2_root); 2533 gfs2_root = NULL; 2534 } 2535