1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/buffer_head.h> 14 #include <linux/delay.h> 15 #include <linux/sort.h> 16 #include <linux/jhash.h> 17 #include <linux/kallsyms.h> 18 #include <linux/gfs2_ondisk.h> 19 #include <linux/list.h> 20 #include <linux/wait.h> 21 #include <linux/module.h> 22 #include <asm/uaccess.h> 23 #include <linux/seq_file.h> 24 #include <linux/debugfs.h> 25 #include <linux/kthread.h> 26 #include <linux/freezer.h> 27 #include <linux/workqueue.h> 28 #include <linux/jiffies.h> 29 #include <linux/rcupdate.h> 30 #include <linux/rculist_bl.h> 31 #include <linux/bit_spinlock.h> 32 #include <linux/percpu.h> 33 34 #include "gfs2.h" 35 #include "incore.h" 36 #include "glock.h" 37 #include "glops.h" 38 #include "inode.h" 39 #include "lops.h" 40 #include "meta_io.h" 41 #include "quota.h" 42 #include "super.h" 43 #include "util.h" 44 #include "bmap.h" 45 #define CREATE_TRACE_POINTS 46 #include "trace_gfs2.h" 47 48 struct gfs2_glock_iter { 49 int hash; /* hash bucket index */ 50 struct gfs2_sbd *sdp; /* incore superblock */ 51 struct gfs2_glock *gl; /* current glock struct */ 52 char string[512]; /* scratch space */ 53 }; 54 55 typedef void (*glock_examiner) (struct gfs2_glock * gl); 56 57 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl); 58 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0) 59 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); 60 61 static struct dentry *gfs2_root; 62 static struct workqueue_struct *glock_workqueue; 63 struct workqueue_struct *gfs2_delete_workqueue; 64 static LIST_HEAD(lru_list); 65 static atomic_t lru_count = ATOMIC_INIT(0); 66 static DEFINE_SPINLOCK(lru_lock); 67 68 #define GFS2_GL_HASH_SHIFT 15 69 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) 70 #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1) 71 72 static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE]; 73 static struct dentry *gfs2_root; 74 75 /** 76 * gl_hash() - Turn glock number into hash bucket number 77 * @lock: The glock number 78 * 79 * Returns: The number of the corresponding hash bucket 80 */ 81 82 static unsigned int gl_hash(const struct gfs2_sbd *sdp, 83 const struct lm_lockname *name) 84 { 85 unsigned int h; 86 87 h = jhash(&name->ln_number, sizeof(u64), 0); 88 h = jhash(&name->ln_type, sizeof(unsigned int), h); 89 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h); 90 h &= GFS2_GL_HASH_MASK; 91 92 return h; 93 } 94 95 static inline void spin_lock_bucket(unsigned int hash) 96 { 97 hlist_bl_lock(&gl_hash_table[hash]); 98 } 99 100 static inline void spin_unlock_bucket(unsigned int hash) 101 { 102 hlist_bl_unlock(&gl_hash_table[hash]); 103 } 104 105 static void gfs2_glock_dealloc(struct rcu_head *rcu) 106 { 107 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); 108 109 if (gl->gl_ops->go_flags & GLOF_ASPACE) 110 kmem_cache_free(gfs2_glock_aspace_cachep, gl); 111 else 112 kmem_cache_free(gfs2_glock_cachep, gl); 113 } 114 115 void gfs2_glock_free(struct gfs2_glock *gl) 116 { 117 struct gfs2_sbd *sdp = gl->gl_sbd; 118 119 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); 120 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) 121 wake_up(&sdp->sd_glock_wait); 122 } 123 124 /** 125 * gfs2_glock_hold() - increment reference count on glock 126 * @gl: The glock to hold 127 * 128 */ 129 130 void gfs2_glock_hold(struct gfs2_glock *gl) 131 { 132 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0); 133 atomic_inc(&gl->gl_ref); 134 } 135 136 /** 137 * demote_ok - Check to see if it's ok to unlock a glock 138 * @gl: the glock 139 * 140 * Returns: 1 if it's ok 141 */ 142 143 static int demote_ok(const struct gfs2_glock *gl) 144 { 145 const struct gfs2_glock_operations *glops = gl->gl_ops; 146 147 if (gl->gl_state == LM_ST_UNLOCKED) 148 return 0; 149 if (!list_empty(&gl->gl_holders)) 150 return 0; 151 if (glops->go_demote_ok) 152 return glops->go_demote_ok(gl); 153 return 1; 154 } 155 156 157 void gfs2_glock_add_to_lru(struct gfs2_glock *gl) 158 { 159 spin_lock(&lru_lock); 160 161 if (!list_empty(&gl->gl_lru)) 162 list_del_init(&gl->gl_lru); 163 else 164 atomic_inc(&lru_count); 165 166 list_add_tail(&gl->gl_lru, &lru_list); 167 set_bit(GLF_LRU, &gl->gl_flags); 168 spin_unlock(&lru_lock); 169 } 170 171 static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl) 172 { 173 if (!list_empty(&gl->gl_lru)) { 174 list_del_init(&gl->gl_lru); 175 atomic_dec(&lru_count); 176 clear_bit(GLF_LRU, &gl->gl_flags); 177 } 178 } 179 180 static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) 181 { 182 spin_lock(&lru_lock); 183 __gfs2_glock_remove_from_lru(gl); 184 spin_unlock(&lru_lock); 185 } 186 187 /** 188 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list 189 * @gl: the glock 190 * 191 * If the glock is demotable, then we add it (or move it) to the end 192 * of the glock LRU list. 193 */ 194 195 static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) 196 { 197 if (demote_ok(gl)) 198 gfs2_glock_add_to_lru(gl); 199 } 200 201 /** 202 * gfs2_glock_put_nolock() - Decrement reference count on glock 203 * @gl: The glock to put 204 * 205 * This function should only be used if the caller has its own reference 206 * to the glock, in addition to the one it is dropping. 207 */ 208 209 void gfs2_glock_put_nolock(struct gfs2_glock *gl) 210 { 211 if (atomic_dec_and_test(&gl->gl_ref)) 212 GLOCK_BUG_ON(gl, 1); 213 } 214 215 /** 216 * gfs2_glock_put() - Decrement reference count on glock 217 * @gl: The glock to put 218 * 219 */ 220 221 void gfs2_glock_put(struct gfs2_glock *gl) 222 { 223 struct gfs2_sbd *sdp = gl->gl_sbd; 224 struct address_space *mapping = gfs2_glock2aspace(gl); 225 226 if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) { 227 __gfs2_glock_remove_from_lru(gl); 228 spin_unlock(&lru_lock); 229 spin_lock_bucket(gl->gl_hash); 230 hlist_bl_del_rcu(&gl->gl_list); 231 spin_unlock_bucket(gl->gl_hash); 232 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 233 GLOCK_BUG_ON(gl, mapping && mapping->nrpages); 234 trace_gfs2_glock_put(gl); 235 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); 236 } 237 } 238 239 /** 240 * search_bucket() - Find struct gfs2_glock by lock number 241 * @bucket: the bucket to search 242 * @name: The lock name 243 * 244 * Returns: NULL, or the struct gfs2_glock with the requested number 245 */ 246 247 static struct gfs2_glock *search_bucket(unsigned int hash, 248 const struct gfs2_sbd *sdp, 249 const struct lm_lockname *name) 250 { 251 struct gfs2_glock *gl; 252 struct hlist_bl_node *h; 253 254 hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) { 255 if (!lm_name_equal(&gl->gl_name, name)) 256 continue; 257 if (gl->gl_sbd != sdp) 258 continue; 259 if (atomic_inc_not_zero(&gl->gl_ref)) 260 return gl; 261 } 262 263 return NULL; 264 } 265 266 /** 267 * may_grant - check if its ok to grant a new lock 268 * @gl: The glock 269 * @gh: The lock request which we wish to grant 270 * 271 * Returns: true if its ok to grant the lock 272 */ 273 274 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) 275 { 276 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list); 277 if ((gh->gh_state == LM_ST_EXCLUSIVE || 278 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head) 279 return 0; 280 if (gl->gl_state == gh->gh_state) 281 return 1; 282 if (gh->gh_flags & GL_EXACT) 283 return 0; 284 if (gl->gl_state == LM_ST_EXCLUSIVE) { 285 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED) 286 return 1; 287 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED) 288 return 1; 289 } 290 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) 291 return 1; 292 return 0; 293 } 294 295 static void gfs2_holder_wake(struct gfs2_holder *gh) 296 { 297 clear_bit(HIF_WAIT, &gh->gh_iflags); 298 smp_mb__after_clear_bit(); 299 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 300 } 301 302 /** 303 * do_error - Something unexpected has happened during a lock request 304 * 305 */ 306 307 static inline void do_error(struct gfs2_glock *gl, const int ret) 308 { 309 struct gfs2_holder *gh, *tmp; 310 311 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 312 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 313 continue; 314 if (ret & LM_OUT_ERROR) 315 gh->gh_error = -EIO; 316 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) 317 gh->gh_error = GLR_TRYFAILED; 318 else 319 continue; 320 list_del_init(&gh->gh_list); 321 trace_gfs2_glock_queue(gh, 0); 322 gfs2_holder_wake(gh); 323 } 324 } 325 326 /** 327 * do_promote - promote as many requests as possible on the current queue 328 * @gl: The glock 329 * 330 * Returns: 1 if there is a blocked holder at the head of the list, or 2 331 * if a type specific operation is underway. 332 */ 333 334 static int do_promote(struct gfs2_glock *gl) 335 __releases(&gl->gl_spin) 336 __acquires(&gl->gl_spin) 337 { 338 const struct gfs2_glock_operations *glops = gl->gl_ops; 339 struct gfs2_holder *gh, *tmp; 340 int ret; 341 342 restart: 343 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { 344 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 345 continue; 346 if (may_grant(gl, gh)) { 347 if (gh->gh_list.prev == &gl->gl_holders && 348 glops->go_lock) { 349 spin_unlock(&gl->gl_spin); 350 /* FIXME: eliminate this eventually */ 351 ret = glops->go_lock(gh); 352 spin_lock(&gl->gl_spin); 353 if (ret) { 354 if (ret == 1) 355 return 2; 356 gh->gh_error = ret; 357 list_del_init(&gh->gh_list); 358 trace_gfs2_glock_queue(gh, 0); 359 gfs2_holder_wake(gh); 360 goto restart; 361 } 362 set_bit(HIF_HOLDER, &gh->gh_iflags); 363 trace_gfs2_promote(gh, 1); 364 gfs2_holder_wake(gh); 365 goto restart; 366 } 367 set_bit(HIF_HOLDER, &gh->gh_iflags); 368 trace_gfs2_promote(gh, 0); 369 gfs2_holder_wake(gh); 370 continue; 371 } 372 if (gh->gh_list.prev == &gl->gl_holders) 373 return 1; 374 do_error(gl, 0); 375 break; 376 } 377 return 0; 378 } 379 380 /** 381 * find_first_waiter - find the first gh that's waiting for the glock 382 * @gl: the glock 383 */ 384 385 static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) 386 { 387 struct gfs2_holder *gh; 388 389 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 390 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) 391 return gh; 392 } 393 return NULL; 394 } 395 396 /** 397 * state_change - record that the glock is now in a different state 398 * @gl: the glock 399 * @new_state the new state 400 * 401 */ 402 403 static void state_change(struct gfs2_glock *gl, unsigned int new_state) 404 { 405 int held1, held2; 406 407 held1 = (gl->gl_state != LM_ST_UNLOCKED); 408 held2 = (new_state != LM_ST_UNLOCKED); 409 410 if (held1 != held2) { 411 if (held2) 412 gfs2_glock_hold(gl); 413 else 414 gfs2_glock_put_nolock(gl); 415 } 416 if (held1 && held2 && list_empty(&gl->gl_holders)) 417 clear_bit(GLF_QUEUED, &gl->gl_flags); 418 419 if (new_state != gl->gl_target) 420 /* shorten our minimum hold time */ 421 gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, 422 GL_GLOCK_MIN_HOLD); 423 gl->gl_state = new_state; 424 gl->gl_tchange = jiffies; 425 } 426 427 static void gfs2_demote_wake(struct gfs2_glock *gl) 428 { 429 gl->gl_demote_state = LM_ST_EXCLUSIVE; 430 clear_bit(GLF_DEMOTE, &gl->gl_flags); 431 smp_mb__after_clear_bit(); 432 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); 433 } 434 435 /** 436 * finish_xmote - The DLM has replied to one of our lock requests 437 * @gl: The glock 438 * @ret: The status from the DLM 439 * 440 */ 441 442 static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) 443 { 444 const struct gfs2_glock_operations *glops = gl->gl_ops; 445 struct gfs2_holder *gh; 446 unsigned state = ret & LM_OUT_ST_MASK; 447 int rv; 448 449 spin_lock(&gl->gl_spin); 450 trace_gfs2_glock_state_change(gl, state); 451 state_change(gl, state); 452 gh = find_first_waiter(gl); 453 454 /* Demote to UN request arrived during demote to SH or DF */ 455 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && 456 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) 457 gl->gl_target = LM_ST_UNLOCKED; 458 459 /* Check for state != intended state */ 460 if (unlikely(state != gl->gl_target)) { 461 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { 462 /* move to back of queue and try next entry */ 463 if (ret & LM_OUT_CANCELED) { 464 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) 465 list_move_tail(&gh->gh_list, &gl->gl_holders); 466 gh = find_first_waiter(gl); 467 gl->gl_target = gh->gh_state; 468 goto retry; 469 } 470 /* Some error or failed "try lock" - report it */ 471 if ((ret & LM_OUT_ERROR) || 472 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { 473 gl->gl_target = gl->gl_state; 474 do_error(gl, ret); 475 goto out; 476 } 477 } 478 switch(state) { 479 /* Unlocked due to conversion deadlock, try again */ 480 case LM_ST_UNLOCKED: 481 retry: 482 do_xmote(gl, gh, gl->gl_target); 483 break; 484 /* Conversion fails, unlock and try again */ 485 case LM_ST_SHARED: 486 case LM_ST_DEFERRED: 487 do_xmote(gl, gh, LM_ST_UNLOCKED); 488 break; 489 default: /* Everything else */ 490 printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state); 491 GLOCK_BUG_ON(gl, 1); 492 } 493 spin_unlock(&gl->gl_spin); 494 return; 495 } 496 497 /* Fast path - we got what we asked for */ 498 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) 499 gfs2_demote_wake(gl); 500 if (state != LM_ST_UNLOCKED) { 501 if (glops->go_xmote_bh) { 502 spin_unlock(&gl->gl_spin); 503 rv = glops->go_xmote_bh(gl, gh); 504 spin_lock(&gl->gl_spin); 505 if (rv) { 506 do_error(gl, rv); 507 goto out; 508 } 509 } 510 rv = do_promote(gl); 511 if (rv == 2) 512 goto out_locked; 513 } 514 out: 515 clear_bit(GLF_LOCK, &gl->gl_flags); 516 out_locked: 517 spin_unlock(&gl->gl_spin); 518 } 519 520 /** 521 * do_xmote - Calls the DLM to change the state of a lock 522 * @gl: The lock state 523 * @gh: The holder (only for promotes) 524 * @target: The target lock state 525 * 526 */ 527 528 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) 529 __releases(&gl->gl_spin) 530 __acquires(&gl->gl_spin) 531 { 532 const struct gfs2_glock_operations *glops = gl->gl_ops; 533 struct gfs2_sbd *sdp = gl->gl_sbd; 534 unsigned int lck_flags = gh ? gh->gh_flags : 0; 535 int ret; 536 537 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | 538 LM_FLAG_PRIORITY); 539 GLOCK_BUG_ON(gl, gl->gl_state == target); 540 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target); 541 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) && 542 glops->go_inval) { 543 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 544 do_error(gl, 0); /* Fail queued try locks */ 545 } 546 gl->gl_req = target; 547 set_bit(GLF_BLOCKING, &gl->gl_flags); 548 if ((gl->gl_req == LM_ST_UNLOCKED) || 549 (gl->gl_state == LM_ST_EXCLUSIVE) || 550 (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) 551 clear_bit(GLF_BLOCKING, &gl->gl_flags); 552 spin_unlock(&gl->gl_spin); 553 if (glops->go_xmote_th) 554 glops->go_xmote_th(gl); 555 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) 556 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); 557 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); 558 559 gfs2_glock_hold(gl); 560 if (sdp->sd_lockstruct.ls_ops->lm_lock) { 561 /* lock_dlm */ 562 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); 563 GLOCK_BUG_ON(gl, ret); 564 } else { /* lock_nolock */ 565 finish_xmote(gl, target); 566 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 567 gfs2_glock_put(gl); 568 } 569 570 spin_lock(&gl->gl_spin); 571 } 572 573 /** 574 * find_first_holder - find the first "holder" gh 575 * @gl: the glock 576 */ 577 578 static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) 579 { 580 struct gfs2_holder *gh; 581 582 if (!list_empty(&gl->gl_holders)) { 583 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); 584 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 585 return gh; 586 } 587 return NULL; 588 } 589 590 /** 591 * run_queue - do all outstanding tasks related to a glock 592 * @gl: The glock in question 593 * @nonblock: True if we must not block in run_queue 594 * 595 */ 596 597 static void run_queue(struct gfs2_glock *gl, const int nonblock) 598 __releases(&gl->gl_spin) 599 __acquires(&gl->gl_spin) 600 { 601 struct gfs2_holder *gh = NULL; 602 int ret; 603 604 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 605 return; 606 607 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); 608 609 if (test_bit(GLF_DEMOTE, &gl->gl_flags) && 610 gl->gl_demote_state != gl->gl_state) { 611 if (find_first_holder(gl)) 612 goto out_unlock; 613 if (nonblock) 614 goto out_sched; 615 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 616 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); 617 gl->gl_target = gl->gl_demote_state; 618 } else { 619 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) 620 gfs2_demote_wake(gl); 621 ret = do_promote(gl); 622 if (ret == 0) 623 goto out_unlock; 624 if (ret == 2) 625 goto out; 626 gh = find_first_waiter(gl); 627 gl->gl_target = gh->gh_state; 628 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 629 do_error(gl, 0); /* Fail queued try locks */ 630 } 631 do_xmote(gl, gh, gl->gl_target); 632 out: 633 return; 634 635 out_sched: 636 clear_bit(GLF_LOCK, &gl->gl_flags); 637 smp_mb__after_clear_bit(); 638 gfs2_glock_hold(gl); 639 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 640 gfs2_glock_put_nolock(gl); 641 return; 642 643 out_unlock: 644 clear_bit(GLF_LOCK, &gl->gl_flags); 645 smp_mb__after_clear_bit(); 646 return; 647 } 648 649 static void delete_work_func(struct work_struct *work) 650 { 651 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); 652 struct gfs2_sbd *sdp = gl->gl_sbd; 653 struct gfs2_inode *ip; 654 struct inode *inode; 655 u64 no_addr = gl->gl_name.ln_number; 656 657 ip = gl->gl_object; 658 /* Note: Unsafe to dereference ip as we don't hold right refs/locks */ 659 660 if (ip) 661 inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1); 662 else 663 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED); 664 if (inode && !IS_ERR(inode)) { 665 d_prune_aliases(inode); 666 iput(inode); 667 } 668 gfs2_glock_put(gl); 669 } 670 671 static void glock_work_func(struct work_struct *work) 672 { 673 unsigned long delay = 0; 674 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 675 int drop_ref = 0; 676 677 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { 678 finish_xmote(gl, gl->gl_reply); 679 drop_ref = 1; 680 } 681 spin_lock(&gl->gl_spin); 682 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 683 gl->gl_state != LM_ST_UNLOCKED && 684 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 685 unsigned long holdtime, now = jiffies; 686 687 holdtime = gl->gl_tchange + gl->gl_hold_time; 688 if (time_before(now, holdtime)) 689 delay = holdtime - now; 690 691 if (!delay) { 692 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 693 set_bit(GLF_DEMOTE, &gl->gl_flags); 694 } 695 } 696 run_queue(gl, 0); 697 spin_unlock(&gl->gl_spin); 698 if (!delay) 699 gfs2_glock_put(gl); 700 else { 701 if (gl->gl_name.ln_type != LM_TYPE_INODE) 702 delay = 0; 703 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 704 gfs2_glock_put(gl); 705 } 706 if (drop_ref) 707 gfs2_glock_put(gl); 708 } 709 710 /** 711 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist 712 * @sdp: The GFS2 superblock 713 * @number: the lock number 714 * @glops: The glock_operations to use 715 * @create: If 0, don't create the glock if it doesn't exist 716 * @glp: the glock is returned here 717 * 718 * This does not lock a glock, just finds/creates structures for one. 719 * 720 * Returns: errno 721 */ 722 723 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 724 const struct gfs2_glock_operations *glops, int create, 725 struct gfs2_glock **glp) 726 { 727 struct super_block *s = sdp->sd_vfs; 728 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type }; 729 struct gfs2_glock *gl, *tmp; 730 unsigned int hash = gl_hash(sdp, &name); 731 struct address_space *mapping; 732 struct kmem_cache *cachep; 733 734 rcu_read_lock(); 735 gl = search_bucket(hash, sdp, &name); 736 rcu_read_unlock(); 737 738 *glp = gl; 739 if (gl) 740 return 0; 741 if (!create) 742 return -ENOENT; 743 744 if (glops->go_flags & GLOF_ASPACE) 745 cachep = gfs2_glock_aspace_cachep; 746 else 747 cachep = gfs2_glock_cachep; 748 gl = kmem_cache_alloc(cachep, GFP_KERNEL); 749 if (!gl) 750 return -ENOMEM; 751 752 atomic_inc(&sdp->sd_glock_disposal); 753 gl->gl_sbd = sdp; 754 gl->gl_flags = 0; 755 gl->gl_name = name; 756 atomic_set(&gl->gl_ref, 1); 757 gl->gl_state = LM_ST_UNLOCKED; 758 gl->gl_target = LM_ST_UNLOCKED; 759 gl->gl_demote_state = LM_ST_EXCLUSIVE; 760 gl->gl_hash = hash; 761 gl->gl_ops = glops; 762 gl->gl_dstamp = ktime_set(0, 0); 763 preempt_disable(); 764 /* We use the global stats to estimate the initial per-glock stats */ 765 gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type]; 766 preempt_enable(); 767 gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0; 768 gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0; 769 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 770 gl->gl_lksb.sb_lvbptr = gl->gl_lvb; 771 gl->gl_tchange = jiffies; 772 gl->gl_object = NULL; 773 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; 774 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 775 INIT_WORK(&gl->gl_delete, delete_work_func); 776 777 mapping = gfs2_glock2aspace(gl); 778 if (mapping) { 779 mapping->a_ops = &gfs2_meta_aops; 780 mapping->host = s->s_bdev->bd_inode; 781 mapping->flags = 0; 782 mapping_set_gfp_mask(mapping, GFP_NOFS); 783 mapping->assoc_mapping = NULL; 784 mapping->backing_dev_info = s->s_bdi; 785 mapping->writeback_index = 0; 786 } 787 788 spin_lock_bucket(hash); 789 tmp = search_bucket(hash, sdp, &name); 790 if (tmp) { 791 spin_unlock_bucket(hash); 792 kmem_cache_free(cachep, gl); 793 atomic_dec(&sdp->sd_glock_disposal); 794 gl = tmp; 795 } else { 796 hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]); 797 spin_unlock_bucket(hash); 798 } 799 800 *glp = gl; 801 802 return 0; 803 } 804 805 /** 806 * gfs2_holder_init - initialize a struct gfs2_holder in the default way 807 * @gl: the glock 808 * @state: the state we're requesting 809 * @flags: the modifier flags 810 * @gh: the holder structure 811 * 812 */ 813 814 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, 815 struct gfs2_holder *gh) 816 { 817 INIT_LIST_HEAD(&gh->gh_list); 818 gh->gh_gl = gl; 819 gh->gh_ip = (unsigned long)__builtin_return_address(0); 820 gh->gh_owner_pid = get_pid(task_pid(current)); 821 gh->gh_state = state; 822 gh->gh_flags = flags; 823 gh->gh_error = 0; 824 gh->gh_iflags = 0; 825 gfs2_glock_hold(gl); 826 } 827 828 /** 829 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it 830 * @state: the state we're requesting 831 * @flags: the modifier flags 832 * @gh: the holder structure 833 * 834 * Don't mess with the glock. 835 * 836 */ 837 838 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh) 839 { 840 gh->gh_state = state; 841 gh->gh_flags = flags; 842 gh->gh_iflags = 0; 843 gh->gh_ip = (unsigned long)__builtin_return_address(0); 844 if (gh->gh_owner_pid) 845 put_pid(gh->gh_owner_pid); 846 gh->gh_owner_pid = get_pid(task_pid(current)); 847 } 848 849 /** 850 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) 851 * @gh: the holder structure 852 * 853 */ 854 855 void gfs2_holder_uninit(struct gfs2_holder *gh) 856 { 857 put_pid(gh->gh_owner_pid); 858 gfs2_glock_put(gh->gh_gl); 859 gh->gh_gl = NULL; 860 gh->gh_ip = 0; 861 } 862 863 /** 864 * gfs2_glock_holder_wait 865 * @word: unused 866 * 867 * This function and gfs2_glock_demote_wait both show up in the WCHAN 868 * field. Thus I've separated these otherwise identical functions in 869 * order to be more informative to the user. 870 */ 871 872 static int gfs2_glock_holder_wait(void *word) 873 { 874 schedule(); 875 return 0; 876 } 877 878 static int gfs2_glock_demote_wait(void *word) 879 { 880 schedule(); 881 return 0; 882 } 883 884 static void wait_on_holder(struct gfs2_holder *gh) 885 { 886 unsigned long time1 = jiffies; 887 888 might_sleep(); 889 wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE); 890 if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */ 891 /* Lengthen the minimum hold time. */ 892 gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time + 893 GL_GLOCK_HOLD_INCR, 894 GL_GLOCK_MAX_HOLD); 895 } 896 897 static void wait_on_demote(struct gfs2_glock *gl) 898 { 899 might_sleep(); 900 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE); 901 } 902 903 /** 904 * handle_callback - process a demote request 905 * @gl: the glock 906 * @state: the state the caller wants us to change to 907 * 908 * There are only two requests that we are going to see in actual 909 * practise: LM_ST_SHARED and LM_ST_UNLOCKED 910 */ 911 912 static void handle_callback(struct gfs2_glock *gl, unsigned int state, 913 unsigned long delay) 914 { 915 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; 916 917 set_bit(bit, &gl->gl_flags); 918 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { 919 gl->gl_demote_state = state; 920 gl->gl_demote_time = jiffies; 921 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && 922 gl->gl_demote_state != state) { 923 gl->gl_demote_state = LM_ST_UNLOCKED; 924 } 925 if (gl->gl_ops->go_callback) 926 gl->gl_ops->go_callback(gl); 927 trace_gfs2_demote_rq(gl); 928 } 929 930 /** 931 * gfs2_glock_wait - wait on a glock acquisition 932 * @gh: the glock holder 933 * 934 * Returns: 0 on success 935 */ 936 937 int gfs2_glock_wait(struct gfs2_holder *gh) 938 { 939 wait_on_holder(gh); 940 return gh->gh_error; 941 } 942 943 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) 944 { 945 struct va_format vaf; 946 va_list args; 947 948 va_start(args, fmt); 949 950 if (seq) { 951 struct gfs2_glock_iter *gi = seq->private; 952 vsprintf(gi->string, fmt, args); 953 seq_printf(seq, gi->string); 954 } else { 955 vaf.fmt = fmt; 956 vaf.va = &args; 957 958 printk(KERN_ERR " %pV", &vaf); 959 } 960 961 va_end(args); 962 } 963 964 /** 965 * add_to_queue - Add a holder to the wait queue (but look for recursion) 966 * @gh: the holder structure to add 967 * 968 * Eventually we should move the recursive locking trap to a 969 * debugging option or something like that. This is the fast 970 * path and needs to have the minimum number of distractions. 971 * 972 */ 973 974 static inline void add_to_queue(struct gfs2_holder *gh) 975 __releases(&gl->gl_spin) 976 __acquires(&gl->gl_spin) 977 { 978 struct gfs2_glock *gl = gh->gh_gl; 979 struct gfs2_sbd *sdp = gl->gl_sbd; 980 struct list_head *insert_pt = NULL; 981 struct gfs2_holder *gh2; 982 int try_lock = 0; 983 984 BUG_ON(gh->gh_owner_pid == NULL); 985 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 986 BUG(); 987 988 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { 989 if (test_bit(GLF_LOCK, &gl->gl_flags)) 990 try_lock = 1; 991 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) 992 goto fail; 993 } 994 995 list_for_each_entry(gh2, &gl->gl_holders, gh_list) { 996 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid && 997 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK))) 998 goto trap_recursive; 999 if (try_lock && 1000 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) && 1001 !may_grant(gl, gh)) { 1002 fail: 1003 gh->gh_error = GLR_TRYFAILED; 1004 gfs2_holder_wake(gh); 1005 return; 1006 } 1007 if (test_bit(HIF_HOLDER, &gh2->gh_iflags)) 1008 continue; 1009 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt)) 1010 insert_pt = &gh2->gh_list; 1011 } 1012 set_bit(GLF_QUEUED, &gl->gl_flags); 1013 trace_gfs2_glock_queue(gh, 1); 1014 gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); 1015 gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); 1016 if (likely(insert_pt == NULL)) { 1017 list_add_tail(&gh->gh_list, &gl->gl_holders); 1018 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY)) 1019 goto do_cancel; 1020 return; 1021 } 1022 list_add_tail(&gh->gh_list, insert_pt); 1023 do_cancel: 1024 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); 1025 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { 1026 spin_unlock(&gl->gl_spin); 1027 if (sdp->sd_lockstruct.ls_ops->lm_cancel) 1028 sdp->sd_lockstruct.ls_ops->lm_cancel(gl); 1029 spin_lock(&gl->gl_spin); 1030 } 1031 return; 1032 1033 trap_recursive: 1034 print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip); 1035 printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid)); 1036 printk(KERN_ERR "lock type: %d req lock state : %d\n", 1037 gh2->gh_gl->gl_name.ln_type, gh2->gh_state); 1038 print_symbol(KERN_ERR "new: %s\n", gh->gh_ip); 1039 printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid)); 1040 printk(KERN_ERR "lock type: %d req lock state : %d\n", 1041 gh->gh_gl->gl_name.ln_type, gh->gh_state); 1042 __dump_glock(NULL, gl); 1043 BUG(); 1044 } 1045 1046 /** 1047 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) 1048 * @gh: the holder structure 1049 * 1050 * if (gh->gh_flags & GL_ASYNC), this never returns an error 1051 * 1052 * Returns: 0, GLR_TRYFAILED, or errno on failure 1053 */ 1054 1055 int gfs2_glock_nq(struct gfs2_holder *gh) 1056 { 1057 struct gfs2_glock *gl = gh->gh_gl; 1058 struct gfs2_sbd *sdp = gl->gl_sbd; 1059 int error = 0; 1060 1061 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 1062 return -EIO; 1063 1064 if (test_bit(GLF_LRU, &gl->gl_flags)) 1065 gfs2_glock_remove_from_lru(gl); 1066 1067 spin_lock(&gl->gl_spin); 1068 add_to_queue(gh); 1069 if ((LM_FLAG_NOEXP & gh->gh_flags) && 1070 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) 1071 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1072 run_queue(gl, 1); 1073 spin_unlock(&gl->gl_spin); 1074 1075 if (!(gh->gh_flags & GL_ASYNC)) 1076 error = gfs2_glock_wait(gh); 1077 1078 return error; 1079 } 1080 1081 /** 1082 * gfs2_glock_poll - poll to see if an async request has been completed 1083 * @gh: the holder 1084 * 1085 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on 1086 */ 1087 1088 int gfs2_glock_poll(struct gfs2_holder *gh) 1089 { 1090 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; 1091 } 1092 1093 /** 1094 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) 1095 * @gh: the glock holder 1096 * 1097 */ 1098 1099 void gfs2_glock_dq(struct gfs2_holder *gh) 1100 { 1101 struct gfs2_glock *gl = gh->gh_gl; 1102 const struct gfs2_glock_operations *glops = gl->gl_ops; 1103 unsigned delay = 0; 1104 int fast_path = 0; 1105 1106 spin_lock(&gl->gl_spin); 1107 if (gh->gh_flags & GL_NOCACHE) 1108 handle_callback(gl, LM_ST_UNLOCKED, 0); 1109 1110 list_del_init(&gh->gh_list); 1111 if (find_first_holder(gl) == NULL) { 1112 if (glops->go_unlock) { 1113 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags)); 1114 spin_unlock(&gl->gl_spin); 1115 glops->go_unlock(gh); 1116 spin_lock(&gl->gl_spin); 1117 clear_bit(GLF_LOCK, &gl->gl_flags); 1118 } 1119 if (list_empty(&gl->gl_holders) && 1120 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1121 !test_bit(GLF_DEMOTE, &gl->gl_flags)) 1122 fast_path = 1; 1123 } 1124 if (!test_bit(GLF_LFLUSH, &gl->gl_flags)) 1125 __gfs2_glock_schedule_for_reclaim(gl); 1126 trace_gfs2_glock_queue(gh, 0); 1127 spin_unlock(&gl->gl_spin); 1128 if (likely(fast_path)) 1129 return; 1130 1131 gfs2_glock_hold(gl); 1132 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1133 !test_bit(GLF_DEMOTE, &gl->gl_flags) && 1134 gl->gl_name.ln_type == LM_TYPE_INODE) 1135 delay = gl->gl_hold_time; 1136 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 1137 gfs2_glock_put(gl); 1138 } 1139 1140 void gfs2_glock_dq_wait(struct gfs2_holder *gh) 1141 { 1142 struct gfs2_glock *gl = gh->gh_gl; 1143 gfs2_glock_dq(gh); 1144 wait_on_demote(gl); 1145 } 1146 1147 /** 1148 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1149 * @gh: the holder structure 1150 * 1151 */ 1152 1153 void gfs2_glock_dq_uninit(struct gfs2_holder *gh) 1154 { 1155 gfs2_glock_dq(gh); 1156 gfs2_holder_uninit(gh); 1157 } 1158 1159 /** 1160 * gfs2_glock_nq_num - acquire a glock based on lock number 1161 * @sdp: the filesystem 1162 * @number: the lock number 1163 * @glops: the glock operations for the type of glock 1164 * @state: the state to acquire the glock in 1165 * @flags: modifier flags for the acquisition 1166 * @gh: the struct gfs2_holder 1167 * 1168 * Returns: errno 1169 */ 1170 1171 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, 1172 const struct gfs2_glock_operations *glops, 1173 unsigned int state, int flags, struct gfs2_holder *gh) 1174 { 1175 struct gfs2_glock *gl; 1176 int error; 1177 1178 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); 1179 if (!error) { 1180 error = gfs2_glock_nq_init(gl, state, flags, gh); 1181 gfs2_glock_put(gl); 1182 } 1183 1184 return error; 1185 } 1186 1187 /** 1188 * glock_compare - Compare two struct gfs2_glock structures for sorting 1189 * @arg_a: the first structure 1190 * @arg_b: the second structure 1191 * 1192 */ 1193 1194 static int glock_compare(const void *arg_a, const void *arg_b) 1195 { 1196 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; 1197 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; 1198 const struct lm_lockname *a = &gh_a->gh_gl->gl_name; 1199 const struct lm_lockname *b = &gh_b->gh_gl->gl_name; 1200 1201 if (a->ln_number > b->ln_number) 1202 return 1; 1203 if (a->ln_number < b->ln_number) 1204 return -1; 1205 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); 1206 return 0; 1207 } 1208 1209 /** 1210 * nq_m_sync - synchonously acquire more than one glock in deadlock free order 1211 * @num_gh: the number of structures 1212 * @ghs: an array of struct gfs2_holder structures 1213 * 1214 * Returns: 0 on success (all glocks acquired), 1215 * errno on failure (no glocks acquired) 1216 */ 1217 1218 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, 1219 struct gfs2_holder **p) 1220 { 1221 unsigned int x; 1222 int error = 0; 1223 1224 for (x = 0; x < num_gh; x++) 1225 p[x] = &ghs[x]; 1226 1227 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); 1228 1229 for (x = 0; x < num_gh; x++) { 1230 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1231 1232 error = gfs2_glock_nq(p[x]); 1233 if (error) { 1234 while (x--) 1235 gfs2_glock_dq(p[x]); 1236 break; 1237 } 1238 } 1239 1240 return error; 1241 } 1242 1243 /** 1244 * gfs2_glock_nq_m - acquire multiple glocks 1245 * @num_gh: the number of structures 1246 * @ghs: an array of struct gfs2_holder structures 1247 * 1248 * 1249 * Returns: 0 on success (all glocks acquired), 1250 * errno on failure (no glocks acquired) 1251 */ 1252 1253 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1254 { 1255 struct gfs2_holder *tmp[4]; 1256 struct gfs2_holder **pph = tmp; 1257 int error = 0; 1258 1259 switch(num_gh) { 1260 case 0: 1261 return 0; 1262 case 1: 1263 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1264 return gfs2_glock_nq(ghs); 1265 default: 1266 if (num_gh <= 4) 1267 break; 1268 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS); 1269 if (!pph) 1270 return -ENOMEM; 1271 } 1272 1273 error = nq_m_sync(num_gh, ghs, pph); 1274 1275 if (pph != tmp) 1276 kfree(pph); 1277 1278 return error; 1279 } 1280 1281 /** 1282 * gfs2_glock_dq_m - release multiple glocks 1283 * @num_gh: the number of structures 1284 * @ghs: an array of struct gfs2_holder structures 1285 * 1286 */ 1287 1288 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1289 { 1290 while (num_gh--) 1291 gfs2_glock_dq(&ghs[num_gh]); 1292 } 1293 1294 /** 1295 * gfs2_glock_dq_uninit_m - release multiple glocks 1296 * @num_gh: the number of structures 1297 * @ghs: an array of struct gfs2_holder structures 1298 * 1299 */ 1300 1301 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs) 1302 { 1303 while (num_gh--) 1304 gfs2_glock_dq_uninit(&ghs[num_gh]); 1305 } 1306 1307 void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) 1308 { 1309 unsigned long delay = 0; 1310 unsigned long holdtime; 1311 unsigned long now = jiffies; 1312 1313 gfs2_glock_hold(gl); 1314 holdtime = gl->gl_tchange + gl->gl_hold_time; 1315 if (test_bit(GLF_QUEUED, &gl->gl_flags) && 1316 gl->gl_name.ln_type == LM_TYPE_INODE) { 1317 if (time_before(now, holdtime)) 1318 delay = holdtime - now; 1319 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) 1320 delay = gl->gl_hold_time; 1321 } 1322 1323 spin_lock(&gl->gl_spin); 1324 handle_callback(gl, state, delay); 1325 spin_unlock(&gl->gl_spin); 1326 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 1327 gfs2_glock_put(gl); 1328 } 1329 1330 /** 1331 * gfs2_should_freeze - Figure out if glock should be frozen 1332 * @gl: The glock in question 1333 * 1334 * Glocks are not frozen if (a) the result of the dlm operation is 1335 * an error, (b) the locking operation was an unlock operation or 1336 * (c) if there is a "noexp" flagged request anywhere in the queue 1337 * 1338 * Returns: 1 if freezing should occur, 0 otherwise 1339 */ 1340 1341 static int gfs2_should_freeze(const struct gfs2_glock *gl) 1342 { 1343 const struct gfs2_holder *gh; 1344 1345 if (gl->gl_reply & ~LM_OUT_ST_MASK) 1346 return 0; 1347 if (gl->gl_target == LM_ST_UNLOCKED) 1348 return 0; 1349 1350 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1351 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 1352 continue; 1353 if (LM_FLAG_NOEXP & gh->gh_flags) 1354 return 0; 1355 } 1356 1357 return 1; 1358 } 1359 1360 /** 1361 * gfs2_glock_complete - Callback used by locking 1362 * @gl: Pointer to the glock 1363 * @ret: The return value from the dlm 1364 * 1365 * The gl_reply field is under the gl_spin lock so that it is ok 1366 * to use a bitfield shared with other glock state fields. 1367 */ 1368 1369 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1370 { 1371 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; 1372 1373 spin_lock(&gl->gl_spin); 1374 gl->gl_reply = ret; 1375 1376 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { 1377 if (gfs2_should_freeze(gl)) { 1378 set_bit(GLF_FROZEN, &gl->gl_flags); 1379 spin_unlock(&gl->gl_spin); 1380 return; 1381 } 1382 } 1383 1384 spin_unlock(&gl->gl_spin); 1385 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1386 smp_wmb(); 1387 gfs2_glock_hold(gl); 1388 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1389 gfs2_glock_put(gl); 1390 } 1391 1392 1393 static int gfs2_shrink_glock_memory(struct shrinker *shrink, 1394 struct shrink_control *sc) 1395 { 1396 struct gfs2_glock *gl; 1397 int may_demote; 1398 int nr_skipped = 0; 1399 int nr = sc->nr_to_scan; 1400 gfp_t gfp_mask = sc->gfp_mask; 1401 LIST_HEAD(skipped); 1402 1403 if (nr == 0) 1404 goto out; 1405 1406 if (!(gfp_mask & __GFP_FS)) 1407 return -1; 1408 1409 spin_lock(&lru_lock); 1410 while(nr && !list_empty(&lru_list)) { 1411 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); 1412 list_del_init(&gl->gl_lru); 1413 clear_bit(GLF_LRU, &gl->gl_flags); 1414 atomic_dec(&lru_count); 1415 1416 /* Test for being demotable */ 1417 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1418 gfs2_glock_hold(gl); 1419 spin_unlock(&lru_lock); 1420 spin_lock(&gl->gl_spin); 1421 may_demote = demote_ok(gl); 1422 if (may_demote) { 1423 handle_callback(gl, LM_ST_UNLOCKED, 0); 1424 nr--; 1425 } 1426 clear_bit(GLF_LOCK, &gl->gl_flags); 1427 smp_mb__after_clear_bit(); 1428 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1429 gfs2_glock_put_nolock(gl); 1430 spin_unlock(&gl->gl_spin); 1431 spin_lock(&lru_lock); 1432 continue; 1433 } 1434 nr_skipped++; 1435 list_add(&gl->gl_lru, &skipped); 1436 set_bit(GLF_LRU, &gl->gl_flags); 1437 } 1438 list_splice(&skipped, &lru_list); 1439 atomic_add(nr_skipped, &lru_count); 1440 spin_unlock(&lru_lock); 1441 out: 1442 return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure; 1443 } 1444 1445 static struct shrinker glock_shrinker = { 1446 .shrink = gfs2_shrink_glock_memory, 1447 .seeks = DEFAULT_SEEKS, 1448 }; 1449 1450 /** 1451 * examine_bucket - Call a function for glock in a hash bucket 1452 * @examiner: the function 1453 * @sdp: the filesystem 1454 * @bucket: the bucket 1455 * 1456 */ 1457 1458 static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp, 1459 unsigned int hash) 1460 { 1461 struct gfs2_glock *gl; 1462 struct hlist_bl_head *head = &gl_hash_table[hash]; 1463 struct hlist_bl_node *pos; 1464 1465 rcu_read_lock(); 1466 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) { 1467 if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref)) 1468 examiner(gl); 1469 } 1470 rcu_read_unlock(); 1471 cond_resched(); 1472 } 1473 1474 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) 1475 { 1476 unsigned x; 1477 1478 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) 1479 examine_bucket(examiner, sdp, x); 1480 } 1481 1482 1483 /** 1484 * thaw_glock - thaw out a glock which has an unprocessed reply waiting 1485 * @gl: The glock to thaw 1486 * 1487 * N.B. When we freeze a glock, we leave a ref to the glock outstanding, 1488 * so this has to result in the ref count being dropped by one. 1489 */ 1490 1491 static void thaw_glock(struct gfs2_glock *gl) 1492 { 1493 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) 1494 return; 1495 set_bit(GLF_REPLY_PENDING, &gl->gl_flags); 1496 gfs2_glock_hold(gl); 1497 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1498 gfs2_glock_put(gl); 1499 } 1500 1501 /** 1502 * clear_glock - look at a glock and see if we can free it from glock cache 1503 * @gl: the glock to look at 1504 * 1505 */ 1506 1507 static void clear_glock(struct gfs2_glock *gl) 1508 { 1509 gfs2_glock_remove_from_lru(gl); 1510 1511 spin_lock(&gl->gl_spin); 1512 if (gl->gl_state != LM_ST_UNLOCKED) 1513 handle_callback(gl, LM_ST_UNLOCKED, 0); 1514 spin_unlock(&gl->gl_spin); 1515 gfs2_glock_hold(gl); 1516 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1517 gfs2_glock_put(gl); 1518 } 1519 1520 /** 1521 * gfs2_glock_thaw - Thaw any frozen glocks 1522 * @sdp: The super block 1523 * 1524 */ 1525 1526 void gfs2_glock_thaw(struct gfs2_sbd *sdp) 1527 { 1528 glock_hash_walk(thaw_glock, sdp); 1529 } 1530 1531 static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl) 1532 { 1533 int ret; 1534 spin_lock(&gl->gl_spin); 1535 ret = __dump_glock(seq, gl); 1536 spin_unlock(&gl->gl_spin); 1537 return ret; 1538 } 1539 1540 static void dump_glock_func(struct gfs2_glock *gl) 1541 { 1542 dump_glock(NULL, gl); 1543 } 1544 1545 /** 1546 * gfs2_gl_hash_clear - Empty out the glock hash table 1547 * @sdp: the filesystem 1548 * @wait: wait until it's all gone 1549 * 1550 * Called when unmounting the filesystem. 1551 */ 1552 1553 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) 1554 { 1555 glock_hash_walk(clear_glock, sdp); 1556 flush_workqueue(glock_workqueue); 1557 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0); 1558 glock_hash_walk(dump_glock_func, sdp); 1559 } 1560 1561 void gfs2_glock_finish_truncate(struct gfs2_inode *ip) 1562 { 1563 struct gfs2_glock *gl = ip->i_gl; 1564 int ret; 1565 1566 ret = gfs2_truncatei_resume(ip); 1567 gfs2_assert_withdraw(gl->gl_sbd, ret == 0); 1568 1569 spin_lock(&gl->gl_spin); 1570 clear_bit(GLF_LOCK, &gl->gl_flags); 1571 run_queue(gl, 1); 1572 spin_unlock(&gl->gl_spin); 1573 } 1574 1575 static const char *state2str(unsigned state) 1576 { 1577 switch(state) { 1578 case LM_ST_UNLOCKED: 1579 return "UN"; 1580 case LM_ST_SHARED: 1581 return "SH"; 1582 case LM_ST_DEFERRED: 1583 return "DF"; 1584 case LM_ST_EXCLUSIVE: 1585 return "EX"; 1586 } 1587 return "??"; 1588 } 1589 1590 static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags) 1591 { 1592 char *p = buf; 1593 if (flags & LM_FLAG_TRY) 1594 *p++ = 't'; 1595 if (flags & LM_FLAG_TRY_1CB) 1596 *p++ = 'T'; 1597 if (flags & LM_FLAG_NOEXP) 1598 *p++ = 'e'; 1599 if (flags & LM_FLAG_ANY) 1600 *p++ = 'A'; 1601 if (flags & LM_FLAG_PRIORITY) 1602 *p++ = 'p'; 1603 if (flags & GL_ASYNC) 1604 *p++ = 'a'; 1605 if (flags & GL_EXACT) 1606 *p++ = 'E'; 1607 if (flags & GL_NOCACHE) 1608 *p++ = 'c'; 1609 if (test_bit(HIF_HOLDER, &iflags)) 1610 *p++ = 'H'; 1611 if (test_bit(HIF_WAIT, &iflags)) 1612 *p++ = 'W'; 1613 if (test_bit(HIF_FIRST, &iflags)) 1614 *p++ = 'F'; 1615 *p = 0; 1616 return buf; 1617 } 1618 1619 /** 1620 * dump_holder - print information about a glock holder 1621 * @seq: the seq_file struct 1622 * @gh: the glock holder 1623 * 1624 * Returns: 0 on success, -ENOBUFS when we run out of space 1625 */ 1626 1627 static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh) 1628 { 1629 struct task_struct *gh_owner = NULL; 1630 char flags_buf[32]; 1631 1632 if (gh->gh_owner_pid) 1633 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); 1634 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n", 1635 state2str(gh->gh_state), 1636 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), 1637 gh->gh_error, 1638 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, 1639 gh_owner ? gh_owner->comm : "(ended)", 1640 (void *)gh->gh_ip); 1641 return 0; 1642 } 1643 1644 static const char *gflags2str(char *buf, const struct gfs2_glock *gl) 1645 { 1646 const unsigned long *gflags = &gl->gl_flags; 1647 char *p = buf; 1648 1649 if (test_bit(GLF_LOCK, gflags)) 1650 *p++ = 'l'; 1651 if (test_bit(GLF_DEMOTE, gflags)) 1652 *p++ = 'D'; 1653 if (test_bit(GLF_PENDING_DEMOTE, gflags)) 1654 *p++ = 'd'; 1655 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags)) 1656 *p++ = 'p'; 1657 if (test_bit(GLF_DIRTY, gflags)) 1658 *p++ = 'y'; 1659 if (test_bit(GLF_LFLUSH, gflags)) 1660 *p++ = 'f'; 1661 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags)) 1662 *p++ = 'i'; 1663 if (test_bit(GLF_REPLY_PENDING, gflags)) 1664 *p++ = 'r'; 1665 if (test_bit(GLF_INITIAL, gflags)) 1666 *p++ = 'I'; 1667 if (test_bit(GLF_FROZEN, gflags)) 1668 *p++ = 'F'; 1669 if (test_bit(GLF_QUEUED, gflags)) 1670 *p++ = 'q'; 1671 if (test_bit(GLF_LRU, gflags)) 1672 *p++ = 'L'; 1673 if (gl->gl_object) 1674 *p++ = 'o'; 1675 if (test_bit(GLF_BLOCKING, gflags)) 1676 *p++ = 'b'; 1677 *p = 0; 1678 return buf; 1679 } 1680 1681 /** 1682 * __dump_glock - print information about a glock 1683 * @seq: The seq_file struct 1684 * @gl: the glock 1685 * 1686 * The file format is as follows: 1687 * One line per object, capital letters are used to indicate objects 1688 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented, 1689 * other objects are indented by a single space and follow the glock to 1690 * which they are related. Fields are indicated by lower case letters 1691 * followed by a colon and the field value, except for strings which are in 1692 * [] so that its possible to see if they are composed of spaces for 1693 * example. The field's are n = number (id of the object), f = flags, 1694 * t = type, s = state, r = refcount, e = error, p = pid. 1695 * 1696 * Returns: 0 on success, -ENOBUFS when we run out of space 1697 */ 1698 1699 static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) 1700 { 1701 const struct gfs2_glock_operations *glops = gl->gl_ops; 1702 unsigned long long dtime; 1703 const struct gfs2_holder *gh; 1704 char gflags_buf[32]; 1705 int error = 0; 1706 1707 dtime = jiffies - gl->gl_demote_time; 1708 dtime *= 1000000/HZ; /* demote time in uSec */ 1709 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) 1710 dtime = 0; 1711 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n", 1712 state2str(gl->gl_state), 1713 gl->gl_name.ln_type, 1714 (unsigned long long)gl->gl_name.ln_number, 1715 gflags2str(gflags_buf, gl), 1716 state2str(gl->gl_target), 1717 state2str(gl->gl_demote_state), dtime, 1718 atomic_read(&gl->gl_ail_count), 1719 atomic_read(&gl->gl_revokes), 1720 atomic_read(&gl->gl_ref), gl->gl_hold_time); 1721 1722 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1723 error = dump_holder(seq, gh); 1724 if (error) 1725 goto out; 1726 } 1727 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) 1728 error = glops->go_dump(seq, gl); 1729 out: 1730 return error; 1731 } 1732 1733 static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr) 1734 { 1735 struct gfs2_glock *gl = iter_ptr; 1736 1737 seq_printf(seq, "G: n:%u/%llx rtt:%lld/%lld rttb:%lld/%lld irt:%lld/%lld dcnt: %lld qcnt: %lld\n", 1738 gl->gl_name.ln_type, 1739 (unsigned long long)gl->gl_name.ln_number, 1740 (long long)gl->gl_stats.stats[GFS2_LKS_SRTT], 1741 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], 1742 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], 1743 (long long)gl->gl_stats.stats[GFS2_LKS_SRTTVARB], 1744 (long long)gl->gl_stats.stats[GFS2_LKS_SIRT], 1745 (long long)gl->gl_stats.stats[GFS2_LKS_SIRTVAR], 1746 (long long)gl->gl_stats.stats[GFS2_LKS_DCOUNT], 1747 (long long)gl->gl_stats.stats[GFS2_LKS_QCOUNT]); 1748 return 0; 1749 } 1750 1751 static const char *gfs2_gltype[] = { 1752 "type", 1753 "reserved", 1754 "nondisk", 1755 "inode", 1756 "rgrp", 1757 "meta", 1758 "iopen", 1759 "flock", 1760 "plock", 1761 "quota", 1762 "journal", 1763 }; 1764 1765 static const char *gfs2_stype[] = { 1766 [GFS2_LKS_SRTT] = "srtt", 1767 [GFS2_LKS_SRTTVAR] = "srttvar", 1768 [GFS2_LKS_SRTTB] = "srttb", 1769 [GFS2_LKS_SRTTVARB] = "srttvarb", 1770 [GFS2_LKS_SIRT] = "sirt", 1771 [GFS2_LKS_SIRTVAR] = "sirtvar", 1772 [GFS2_LKS_DCOUNT] = "dlm", 1773 [GFS2_LKS_QCOUNT] = "queue", 1774 }; 1775 1776 #define GFS2_NR_SBSTATS (ARRAY_SIZE(gfs2_gltype) * ARRAY_SIZE(gfs2_stype)) 1777 1778 static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr) 1779 { 1780 struct gfs2_glock_iter *gi = seq->private; 1781 struct gfs2_sbd *sdp = gi->sdp; 1782 unsigned index = gi->hash >> 3; 1783 unsigned subindex = gi->hash & 0x07; 1784 s64 value; 1785 int i; 1786 1787 if (index == 0 && subindex != 0) 1788 return 0; 1789 1790 seq_printf(seq, "%-10s %8s:", gfs2_gltype[index], 1791 (index == 0) ? "cpu": gfs2_stype[subindex]); 1792 1793 for_each_possible_cpu(i) { 1794 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); 1795 if (index == 0) { 1796 value = i; 1797 } else { 1798 value = lkstats->lkstats[index - 1].stats[subindex]; 1799 } 1800 seq_printf(seq, " %15lld", (long long)value); 1801 } 1802 seq_putc(seq, '\n'); 1803 return 0; 1804 } 1805 1806 int __init gfs2_glock_init(void) 1807 { 1808 unsigned i; 1809 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { 1810 INIT_HLIST_BL_HEAD(&gl_hash_table[i]); 1811 } 1812 1813 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 1814 WQ_HIGHPRI | WQ_FREEZABLE, 0); 1815 if (IS_ERR(glock_workqueue)) 1816 return PTR_ERR(glock_workqueue); 1817 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 1818 WQ_MEM_RECLAIM | WQ_FREEZABLE, 1819 0); 1820 if (IS_ERR(gfs2_delete_workqueue)) { 1821 destroy_workqueue(glock_workqueue); 1822 return PTR_ERR(gfs2_delete_workqueue); 1823 } 1824 1825 register_shrinker(&glock_shrinker); 1826 1827 return 0; 1828 } 1829 1830 void gfs2_glock_exit(void) 1831 { 1832 unregister_shrinker(&glock_shrinker); 1833 destroy_workqueue(glock_workqueue); 1834 destroy_workqueue(gfs2_delete_workqueue); 1835 } 1836 1837 static inline struct gfs2_glock *glock_hash_chain(unsigned hash) 1838 { 1839 return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]), 1840 struct gfs2_glock, gl_list); 1841 } 1842 1843 static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl) 1844 { 1845 return hlist_bl_entry(rcu_dereference(gl->gl_list.next), 1846 struct gfs2_glock, gl_list); 1847 } 1848 1849 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) 1850 { 1851 struct gfs2_glock *gl; 1852 1853 do { 1854 gl = gi->gl; 1855 if (gl) { 1856 gi->gl = glock_hash_next(gl); 1857 } else { 1858 gi->gl = glock_hash_chain(gi->hash); 1859 } 1860 while (gi->gl == NULL) { 1861 gi->hash++; 1862 if (gi->hash >= GFS2_GL_HASH_SIZE) { 1863 rcu_read_unlock(); 1864 return 1; 1865 } 1866 gi->gl = glock_hash_chain(gi->hash); 1867 } 1868 /* Skip entries for other sb and dead entries */ 1869 } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0); 1870 1871 return 0; 1872 } 1873 1874 static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) 1875 { 1876 struct gfs2_glock_iter *gi = seq->private; 1877 loff_t n = *pos; 1878 1879 gi->hash = 0; 1880 rcu_read_lock(); 1881 1882 do { 1883 if (gfs2_glock_iter_next(gi)) 1884 return NULL; 1885 } while (n--); 1886 1887 return gi->gl; 1888 } 1889 1890 static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr, 1891 loff_t *pos) 1892 { 1893 struct gfs2_glock_iter *gi = seq->private; 1894 1895 (*pos)++; 1896 1897 if (gfs2_glock_iter_next(gi)) 1898 return NULL; 1899 1900 return gi->gl; 1901 } 1902 1903 static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) 1904 { 1905 struct gfs2_glock_iter *gi = seq->private; 1906 1907 if (gi->gl) 1908 rcu_read_unlock(); 1909 gi->gl = NULL; 1910 } 1911 1912 static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) 1913 { 1914 return dump_glock(seq, iter_ptr); 1915 } 1916 1917 static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos) 1918 { 1919 struct gfs2_glock_iter *gi = seq->private; 1920 1921 gi->hash = *pos; 1922 if (*pos >= GFS2_NR_SBSTATS) 1923 return NULL; 1924 preempt_disable(); 1925 return SEQ_START_TOKEN; 1926 } 1927 1928 static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr, 1929 loff_t *pos) 1930 { 1931 struct gfs2_glock_iter *gi = seq->private; 1932 (*pos)++; 1933 gi->hash++; 1934 if (gi->hash >= GFS2_NR_SBSTATS) { 1935 preempt_enable(); 1936 return NULL; 1937 } 1938 return SEQ_START_TOKEN; 1939 } 1940 1941 static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr) 1942 { 1943 preempt_enable(); 1944 } 1945 1946 static const struct seq_operations gfs2_glock_seq_ops = { 1947 .start = gfs2_glock_seq_start, 1948 .next = gfs2_glock_seq_next, 1949 .stop = gfs2_glock_seq_stop, 1950 .show = gfs2_glock_seq_show, 1951 }; 1952 1953 static const struct seq_operations gfs2_glstats_seq_ops = { 1954 .start = gfs2_glock_seq_start, 1955 .next = gfs2_glock_seq_next, 1956 .stop = gfs2_glock_seq_stop, 1957 .show = gfs2_glstats_seq_show, 1958 }; 1959 1960 static const struct seq_operations gfs2_sbstats_seq_ops = { 1961 .start = gfs2_sbstats_seq_start, 1962 .next = gfs2_sbstats_seq_next, 1963 .stop = gfs2_sbstats_seq_stop, 1964 .show = gfs2_sbstats_seq_show, 1965 }; 1966 1967 static int gfs2_glocks_open(struct inode *inode, struct file *file) 1968 { 1969 int ret = seq_open_private(file, &gfs2_glock_seq_ops, 1970 sizeof(struct gfs2_glock_iter)); 1971 if (ret == 0) { 1972 struct seq_file *seq = file->private_data; 1973 struct gfs2_glock_iter *gi = seq->private; 1974 gi->sdp = inode->i_private; 1975 } 1976 return ret; 1977 } 1978 1979 static int gfs2_glstats_open(struct inode *inode, struct file *file) 1980 { 1981 int ret = seq_open_private(file, &gfs2_glstats_seq_ops, 1982 sizeof(struct gfs2_glock_iter)); 1983 if (ret == 0) { 1984 struct seq_file *seq = file->private_data; 1985 struct gfs2_glock_iter *gi = seq->private; 1986 gi->sdp = inode->i_private; 1987 } 1988 return ret; 1989 } 1990 1991 static int gfs2_sbstats_open(struct inode *inode, struct file *file) 1992 { 1993 int ret = seq_open_private(file, &gfs2_sbstats_seq_ops, 1994 sizeof(struct gfs2_glock_iter)); 1995 if (ret == 0) { 1996 struct seq_file *seq = file->private_data; 1997 struct gfs2_glock_iter *gi = seq->private; 1998 gi->sdp = inode->i_private; 1999 } 2000 return ret; 2001 } 2002 2003 static const struct file_operations gfs2_glocks_fops = { 2004 .owner = THIS_MODULE, 2005 .open = gfs2_glocks_open, 2006 .read = seq_read, 2007 .llseek = seq_lseek, 2008 .release = seq_release_private, 2009 }; 2010 2011 static const struct file_operations gfs2_glstats_fops = { 2012 .owner = THIS_MODULE, 2013 .open = gfs2_glstats_open, 2014 .read = seq_read, 2015 .llseek = seq_lseek, 2016 .release = seq_release_private, 2017 }; 2018 2019 static const struct file_operations gfs2_sbstats_fops = { 2020 .owner = THIS_MODULE, 2021 .open = gfs2_sbstats_open, 2022 .read = seq_read, 2023 .llseek = seq_lseek, 2024 .release = seq_release_private, 2025 }; 2026 2027 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp) 2028 { 2029 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root); 2030 if (!sdp->debugfs_dir) 2031 return -ENOMEM; 2032 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks", 2033 S_IFREG | S_IRUGO, 2034 sdp->debugfs_dir, sdp, 2035 &gfs2_glocks_fops); 2036 if (!sdp->debugfs_dentry_glocks) 2037 goto fail; 2038 2039 sdp->debugfs_dentry_glstats = debugfs_create_file("glstats", 2040 S_IFREG | S_IRUGO, 2041 sdp->debugfs_dir, sdp, 2042 &gfs2_glstats_fops); 2043 if (!sdp->debugfs_dentry_glstats) 2044 goto fail; 2045 2046 sdp->debugfs_dentry_sbstats = debugfs_create_file("sbstats", 2047 S_IFREG | S_IRUGO, 2048 sdp->debugfs_dir, sdp, 2049 &gfs2_sbstats_fops); 2050 if (!sdp->debugfs_dentry_sbstats) 2051 goto fail; 2052 2053 return 0; 2054 fail: 2055 gfs2_delete_debugfs_file(sdp); 2056 return -ENOMEM; 2057 } 2058 2059 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) 2060 { 2061 if (sdp->debugfs_dir) { 2062 if (sdp->debugfs_dentry_glocks) { 2063 debugfs_remove(sdp->debugfs_dentry_glocks); 2064 sdp->debugfs_dentry_glocks = NULL; 2065 } 2066 if (sdp->debugfs_dentry_glstats) { 2067 debugfs_remove(sdp->debugfs_dentry_glstats); 2068 sdp->debugfs_dentry_glstats = NULL; 2069 } 2070 if (sdp->debugfs_dentry_sbstats) { 2071 debugfs_remove(sdp->debugfs_dentry_sbstats); 2072 sdp->debugfs_dentry_sbstats = NULL; 2073 } 2074 debugfs_remove(sdp->debugfs_dir); 2075 sdp->debugfs_dir = NULL; 2076 } 2077 } 2078 2079 int gfs2_register_debugfs(void) 2080 { 2081 gfs2_root = debugfs_create_dir("gfs2", NULL); 2082 return gfs2_root ? 0 : -ENOMEM; 2083 } 2084 2085 void gfs2_unregister_debugfs(void) 2086 { 2087 debugfs_remove(gfs2_root); 2088 gfs2_root = NULL; 2089 } 2090