1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/delay.h> 16 #include <linux/sort.h> 17 #include <linux/jhash.h> 18 #include <linux/kallsyms.h> 19 #include <linux/gfs2_ondisk.h> 20 #include <linux/list.h> 21 #include <linux/lm_interface.h> 22 #include <linux/wait.h> 23 #include <linux/module.h> 24 #include <linux/rwsem.h> 25 #include <asm/uaccess.h> 26 #include <linux/seq_file.h> 27 #include <linux/debugfs.h> 28 #include <linux/kthread.h> 29 #include <linux/freezer.h> 30 #include <linux/workqueue.h> 31 #include <linux/jiffies.h> 32 33 #include "gfs2.h" 34 #include "incore.h" 35 #include "glock.h" 36 #include "glops.h" 37 #include "inode.h" 38 #include "lm.h" 39 #include "lops.h" 40 #include "meta_io.h" 41 #include "quota.h" 42 #include "super.h" 43 #include "util.h" 44 45 struct gfs2_gl_hash_bucket { 46 struct hlist_head hb_list; 47 }; 48 49 struct glock_iter { 50 int hash; /* hash bucket index */ 51 struct gfs2_sbd *sdp; /* incore superblock */ 52 struct gfs2_glock *gl; /* current glock struct */ 53 struct seq_file *seq; /* sequence file for debugfs */ 54 char string[512]; /* scratch space */ 55 }; 56 57 typedef void (*glock_examiner) (struct gfs2_glock * gl); 58 59 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); 60 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl); 61 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh); 62 static void gfs2_glock_drop_th(struct gfs2_glock *gl); 63 static void run_queue(struct gfs2_glock *gl); 64 65 static DECLARE_RWSEM(gfs2_umount_flush_sem); 66 static struct dentry *gfs2_root; 67 static struct task_struct *scand_process; 68 static unsigned int scand_secs = 5; 69 static struct workqueue_struct *glock_workqueue; 70 71 #define GFS2_GL_HASH_SHIFT 15 72 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) 73 #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1) 74 75 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE]; 76 static struct dentry *gfs2_root; 77 78 /* 79 * Despite what you might think, the numbers below are not arbitrary :-) 80 * They are taken from the ipv4 routing hash code, which is well tested 81 * and thus should be nearly optimal. Later on we might tweek the numbers 82 * but for now this should be fine. 83 * 84 * The reason for putting the locks in a separate array from the list heads 85 * is that we can have fewer locks than list heads and save memory. We use 86 * the same hash function for both, but with a different hash mask. 87 */ 88 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ 89 defined(CONFIG_PROVE_LOCKING) 90 91 #ifdef CONFIG_LOCKDEP 92 # define GL_HASH_LOCK_SZ 256 93 #else 94 # if NR_CPUS >= 32 95 # define GL_HASH_LOCK_SZ 4096 96 # elif NR_CPUS >= 16 97 # define GL_HASH_LOCK_SZ 2048 98 # elif NR_CPUS >= 8 99 # define GL_HASH_LOCK_SZ 1024 100 # elif NR_CPUS >= 4 101 # define GL_HASH_LOCK_SZ 512 102 # else 103 # define GL_HASH_LOCK_SZ 256 104 # endif 105 #endif 106 107 /* We never want more locks than chains */ 108 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ 109 # undef GL_HASH_LOCK_SZ 110 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE 111 #endif 112 113 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ]; 114 115 static inline rwlock_t *gl_lock_addr(unsigned int x) 116 { 117 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)]; 118 } 119 #else /* not SMP, so no spinlocks required */ 120 static inline rwlock_t *gl_lock_addr(unsigned int x) 121 { 122 return NULL; 123 } 124 #endif 125 126 /** 127 * relaxed_state_ok - is a requested lock compatible with the current lock mode? 128 * @actual: the current state of the lock 129 * @requested: the lock state that was requested by the caller 130 * @flags: the modifier flags passed in by the caller 131 * 132 * Returns: 1 if the locks are compatible, 0 otherwise 133 */ 134 135 static inline int relaxed_state_ok(unsigned int actual, unsigned requested, 136 int flags) 137 { 138 if (actual == requested) 139 return 1; 140 141 if (flags & GL_EXACT) 142 return 0; 143 144 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED) 145 return 1; 146 147 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY)) 148 return 1; 149 150 return 0; 151 } 152 153 /** 154 * gl_hash() - Turn glock number into hash bucket number 155 * @lock: The glock number 156 * 157 * Returns: The number of the corresponding hash bucket 158 */ 159 160 static unsigned int gl_hash(const struct gfs2_sbd *sdp, 161 const struct lm_lockname *name) 162 { 163 unsigned int h; 164 165 h = jhash(&name->ln_number, sizeof(u64), 0); 166 h = jhash(&name->ln_type, sizeof(unsigned int), h); 167 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h); 168 h &= GFS2_GL_HASH_MASK; 169 170 return h; 171 } 172 173 /** 174 * glock_free() - Perform a few checks and then release struct gfs2_glock 175 * @gl: The glock to release 176 * 177 * Also calls lock module to release its internal structure for this glock. 178 * 179 */ 180 181 static void glock_free(struct gfs2_glock *gl) 182 { 183 struct gfs2_sbd *sdp = gl->gl_sbd; 184 struct inode *aspace = gl->gl_aspace; 185 186 gfs2_lm_put_lock(sdp, gl->gl_lock); 187 188 if (aspace) 189 gfs2_aspace_put(aspace); 190 191 kmem_cache_free(gfs2_glock_cachep, gl); 192 } 193 194 /** 195 * gfs2_glock_hold() - increment reference count on glock 196 * @gl: The glock to hold 197 * 198 */ 199 200 void gfs2_glock_hold(struct gfs2_glock *gl) 201 { 202 atomic_inc(&gl->gl_ref); 203 } 204 205 /** 206 * gfs2_glock_put() - Decrement reference count on glock 207 * @gl: The glock to put 208 * 209 */ 210 211 int gfs2_glock_put(struct gfs2_glock *gl) 212 { 213 int rv = 0; 214 struct gfs2_sbd *sdp = gl->gl_sbd; 215 216 write_lock(gl_lock_addr(gl->gl_hash)); 217 if (atomic_dec_and_test(&gl->gl_ref)) { 218 hlist_del(&gl->gl_list); 219 write_unlock(gl_lock_addr(gl->gl_hash)); 220 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED); 221 gfs2_assert(sdp, list_empty(&gl->gl_reclaim)); 222 gfs2_assert(sdp, list_empty(&gl->gl_holders)); 223 gfs2_assert(sdp, list_empty(&gl->gl_waiters1)); 224 gfs2_assert(sdp, list_empty(&gl->gl_waiters3)); 225 glock_free(gl); 226 rv = 1; 227 goto out; 228 } 229 write_unlock(gl_lock_addr(gl->gl_hash)); 230 out: 231 return rv; 232 } 233 234 /** 235 * search_bucket() - Find struct gfs2_glock by lock number 236 * @bucket: the bucket to search 237 * @name: The lock name 238 * 239 * Returns: NULL, or the struct gfs2_glock with the requested number 240 */ 241 242 static struct gfs2_glock *search_bucket(unsigned int hash, 243 const struct gfs2_sbd *sdp, 244 const struct lm_lockname *name) 245 { 246 struct gfs2_glock *gl; 247 struct hlist_node *h; 248 249 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) { 250 if (!lm_name_equal(&gl->gl_name, name)) 251 continue; 252 if (gl->gl_sbd != sdp) 253 continue; 254 255 atomic_inc(&gl->gl_ref); 256 257 return gl; 258 } 259 260 return NULL; 261 } 262 263 /** 264 * gfs2_glock_find() - Find glock by lock number 265 * @sdp: The GFS2 superblock 266 * @name: The lock name 267 * 268 * Returns: NULL, or the struct gfs2_glock with the requested number 269 */ 270 271 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp, 272 const struct lm_lockname *name) 273 { 274 unsigned int hash = gl_hash(sdp, name); 275 struct gfs2_glock *gl; 276 277 read_lock(gl_lock_addr(hash)); 278 gl = search_bucket(hash, sdp, name); 279 read_unlock(gl_lock_addr(hash)); 280 281 return gl; 282 } 283 284 static void glock_work_func(struct work_struct *work) 285 { 286 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); 287 288 spin_lock(&gl->gl_spin); 289 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)) 290 set_bit(GLF_DEMOTE, &gl->gl_flags); 291 run_queue(gl); 292 spin_unlock(&gl->gl_spin); 293 gfs2_glock_put(gl); 294 } 295 296 /** 297 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist 298 * @sdp: The GFS2 superblock 299 * @number: the lock number 300 * @glops: The glock_operations to use 301 * @create: If 0, don't create the glock if it doesn't exist 302 * @glp: the glock is returned here 303 * 304 * This does not lock a glock, just finds/creates structures for one. 305 * 306 * Returns: errno 307 */ 308 309 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, 310 const struct gfs2_glock_operations *glops, int create, 311 struct gfs2_glock **glp) 312 { 313 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type }; 314 struct gfs2_glock *gl, *tmp; 315 unsigned int hash = gl_hash(sdp, &name); 316 int error; 317 318 read_lock(gl_lock_addr(hash)); 319 gl = search_bucket(hash, sdp, &name); 320 read_unlock(gl_lock_addr(hash)); 321 322 if (gl || !create) { 323 *glp = gl; 324 return 0; 325 } 326 327 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL); 328 if (!gl) 329 return -ENOMEM; 330 331 gl->gl_flags = 0; 332 gl->gl_name = name; 333 atomic_set(&gl->gl_ref, 1); 334 gl->gl_state = LM_ST_UNLOCKED; 335 gl->gl_demote_state = LM_ST_EXCLUSIVE; 336 gl->gl_hash = hash; 337 gl->gl_owner_pid = 0; 338 gl->gl_ip = 0; 339 gl->gl_ops = glops; 340 gl->gl_req_gh = NULL; 341 gl->gl_req_bh = NULL; 342 gl->gl_vn = 0; 343 gl->gl_stamp = jiffies; 344 gl->gl_tchange = jiffies; 345 gl->gl_object = NULL; 346 gl->gl_sbd = sdp; 347 gl->gl_aspace = NULL; 348 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 349 350 /* If this glock protects actual on-disk data or metadata blocks, 351 create a VFS inode to manage the pages/buffers holding them. */ 352 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) { 353 gl->gl_aspace = gfs2_aspace_get(sdp); 354 if (!gl->gl_aspace) { 355 error = -ENOMEM; 356 goto fail; 357 } 358 } 359 360 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock); 361 if (error) 362 goto fail_aspace; 363 364 write_lock(gl_lock_addr(hash)); 365 tmp = search_bucket(hash, sdp, &name); 366 if (tmp) { 367 write_unlock(gl_lock_addr(hash)); 368 glock_free(gl); 369 gl = tmp; 370 } else { 371 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list); 372 write_unlock(gl_lock_addr(hash)); 373 } 374 375 *glp = gl; 376 377 return 0; 378 379 fail_aspace: 380 if (gl->gl_aspace) 381 gfs2_aspace_put(gl->gl_aspace); 382 fail: 383 kmem_cache_free(gfs2_glock_cachep, gl); 384 return error; 385 } 386 387 /** 388 * gfs2_holder_init - initialize a struct gfs2_holder in the default way 389 * @gl: the glock 390 * @state: the state we're requesting 391 * @flags: the modifier flags 392 * @gh: the holder structure 393 * 394 */ 395 396 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, 397 struct gfs2_holder *gh) 398 { 399 INIT_LIST_HEAD(&gh->gh_list); 400 gh->gh_gl = gl; 401 gh->gh_ip = (unsigned long)__builtin_return_address(0); 402 gh->gh_owner_pid = get_pid(task_pid(current)); 403 gh->gh_state = state; 404 gh->gh_flags = flags; 405 gh->gh_error = 0; 406 gh->gh_iflags = 0; 407 gfs2_glock_hold(gl); 408 } 409 410 /** 411 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it 412 * @state: the state we're requesting 413 * @flags: the modifier flags 414 * @gh: the holder structure 415 * 416 * Don't mess with the glock. 417 * 418 */ 419 420 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh) 421 { 422 gh->gh_state = state; 423 gh->gh_flags = flags; 424 gh->gh_iflags = 0; 425 gh->gh_ip = (unsigned long)__builtin_return_address(0); 426 } 427 428 /** 429 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference) 430 * @gh: the holder structure 431 * 432 */ 433 434 void gfs2_holder_uninit(struct gfs2_holder *gh) 435 { 436 put_pid(gh->gh_owner_pid); 437 gfs2_glock_put(gh->gh_gl); 438 gh->gh_gl = NULL; 439 gh->gh_ip = 0; 440 } 441 442 static void gfs2_holder_wake(struct gfs2_holder *gh) 443 { 444 clear_bit(HIF_WAIT, &gh->gh_iflags); 445 smp_mb__after_clear_bit(); 446 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 447 } 448 449 static int just_schedule(void *word) 450 { 451 schedule(); 452 return 0; 453 } 454 455 static void wait_on_holder(struct gfs2_holder *gh) 456 { 457 might_sleep(); 458 wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE); 459 } 460 461 static void gfs2_demote_wake(struct gfs2_glock *gl) 462 { 463 gl->gl_demote_state = LM_ST_EXCLUSIVE; 464 clear_bit(GLF_DEMOTE, &gl->gl_flags); 465 smp_mb__after_clear_bit(); 466 wake_up_bit(&gl->gl_flags, GLF_DEMOTE); 467 } 468 469 static void wait_on_demote(struct gfs2_glock *gl) 470 { 471 might_sleep(); 472 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE); 473 } 474 475 /** 476 * rq_mutex - process a mutex request in the queue 477 * @gh: the glock holder 478 * 479 * Returns: 1 if the queue is blocked 480 */ 481 482 static int rq_mutex(struct gfs2_holder *gh) 483 { 484 struct gfs2_glock *gl = gh->gh_gl; 485 486 list_del_init(&gh->gh_list); 487 /* gh->gh_error never examined. */ 488 set_bit(GLF_LOCK, &gl->gl_flags); 489 clear_bit(HIF_WAIT, &gh->gh_iflags); 490 smp_mb(); 491 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 492 493 return 1; 494 } 495 496 /** 497 * rq_promote - process a promote request in the queue 498 * @gh: the glock holder 499 * 500 * Acquire a new inter-node lock, or change a lock state to more restrictive. 501 * 502 * Returns: 1 if the queue is blocked 503 */ 504 505 static int rq_promote(struct gfs2_holder *gh) 506 { 507 struct gfs2_glock *gl = gh->gh_gl; 508 509 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { 510 if (list_empty(&gl->gl_holders)) { 511 gl->gl_req_gh = gh; 512 set_bit(GLF_LOCK, &gl->gl_flags); 513 spin_unlock(&gl->gl_spin); 514 gfs2_glock_xmote_th(gh->gh_gl, gh); 515 spin_lock(&gl->gl_spin); 516 } 517 return 1; 518 } 519 520 if (list_empty(&gl->gl_holders)) { 521 set_bit(HIF_FIRST, &gh->gh_iflags); 522 set_bit(GLF_LOCK, &gl->gl_flags); 523 } else { 524 struct gfs2_holder *next_gh; 525 if (gh->gh_state == LM_ST_EXCLUSIVE) 526 return 1; 527 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder, 528 gh_list); 529 if (next_gh->gh_state == LM_ST_EXCLUSIVE) 530 return 1; 531 } 532 533 list_move_tail(&gh->gh_list, &gl->gl_holders); 534 gh->gh_error = 0; 535 set_bit(HIF_HOLDER, &gh->gh_iflags); 536 537 gfs2_holder_wake(gh); 538 539 return 0; 540 } 541 542 /** 543 * rq_demote - process a demote request in the queue 544 * @gh: the glock holder 545 * 546 * Returns: 1 if the queue is blocked 547 */ 548 549 static int rq_demote(struct gfs2_glock *gl) 550 { 551 if (!list_empty(&gl->gl_holders)) 552 return 1; 553 554 if (gl->gl_state == gl->gl_demote_state || 555 gl->gl_state == LM_ST_UNLOCKED) { 556 gfs2_demote_wake(gl); 557 return 0; 558 } 559 560 set_bit(GLF_LOCK, &gl->gl_flags); 561 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 562 563 if (gl->gl_demote_state == LM_ST_UNLOCKED || 564 gl->gl_state != LM_ST_EXCLUSIVE) { 565 spin_unlock(&gl->gl_spin); 566 gfs2_glock_drop_th(gl); 567 } else { 568 spin_unlock(&gl->gl_spin); 569 gfs2_glock_xmote_th(gl, NULL); 570 } 571 572 spin_lock(&gl->gl_spin); 573 clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 574 575 return 0; 576 } 577 578 /** 579 * run_queue - process holder structures on a glock 580 * @gl: the glock 581 * 582 */ 583 static void run_queue(struct gfs2_glock *gl) 584 { 585 struct gfs2_holder *gh; 586 int blocked = 1; 587 588 for (;;) { 589 if (test_bit(GLF_LOCK, &gl->gl_flags)) 590 break; 591 592 if (!list_empty(&gl->gl_waiters1)) { 593 gh = list_entry(gl->gl_waiters1.next, 594 struct gfs2_holder, gh_list); 595 blocked = rq_mutex(gh); 596 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { 597 blocked = rq_demote(gl); 598 if (gl->gl_waiters2 && !blocked) { 599 set_bit(GLF_DEMOTE, &gl->gl_flags); 600 gl->gl_demote_state = LM_ST_UNLOCKED; 601 } 602 gl->gl_waiters2 = 0; 603 } else if (!list_empty(&gl->gl_waiters3)) { 604 gh = list_entry(gl->gl_waiters3.next, 605 struct gfs2_holder, gh_list); 606 blocked = rq_promote(gh); 607 } else 608 break; 609 610 if (blocked) 611 break; 612 } 613 } 614 615 /** 616 * gfs2_glmutex_lock - acquire a local lock on a glock 617 * @gl: the glock 618 * 619 * Gives caller exclusive access to manipulate a glock structure. 620 */ 621 622 static void gfs2_glmutex_lock(struct gfs2_glock *gl) 623 { 624 spin_lock(&gl->gl_spin); 625 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 626 struct gfs2_holder gh; 627 628 gfs2_holder_init(gl, 0, 0, &gh); 629 set_bit(HIF_WAIT, &gh.gh_iflags); 630 list_add_tail(&gh.gh_list, &gl->gl_waiters1); 631 spin_unlock(&gl->gl_spin); 632 wait_on_holder(&gh); 633 gfs2_holder_uninit(&gh); 634 } else { 635 gl->gl_owner_pid = current->pid; 636 gl->gl_ip = (unsigned long)__builtin_return_address(0); 637 spin_unlock(&gl->gl_spin); 638 } 639 } 640 641 /** 642 * gfs2_glmutex_trylock - try to acquire a local lock on a glock 643 * @gl: the glock 644 * 645 * Returns: 1 if the glock is acquired 646 */ 647 648 static int gfs2_glmutex_trylock(struct gfs2_glock *gl) 649 { 650 int acquired = 1; 651 652 spin_lock(&gl->gl_spin); 653 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 654 acquired = 0; 655 } else { 656 gl->gl_owner_pid = current->pid; 657 gl->gl_ip = (unsigned long)__builtin_return_address(0); 658 } 659 spin_unlock(&gl->gl_spin); 660 661 return acquired; 662 } 663 664 /** 665 * gfs2_glmutex_unlock - release a local lock on a glock 666 * @gl: the glock 667 * 668 */ 669 670 static void gfs2_glmutex_unlock(struct gfs2_glock *gl) 671 { 672 spin_lock(&gl->gl_spin); 673 clear_bit(GLF_LOCK, &gl->gl_flags); 674 gl->gl_owner_pid = 0; 675 gl->gl_ip = 0; 676 run_queue(gl); 677 spin_unlock(&gl->gl_spin); 678 } 679 680 /** 681 * handle_callback - process a demote request 682 * @gl: the glock 683 * @state: the state the caller wants us to change to 684 * 685 * There are only two requests that we are going to see in actual 686 * practise: LM_ST_SHARED and LM_ST_UNLOCKED 687 */ 688 689 static void handle_callback(struct gfs2_glock *gl, unsigned int state, 690 int remote, unsigned long delay) 691 { 692 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; 693 694 spin_lock(&gl->gl_spin); 695 set_bit(bit, &gl->gl_flags); 696 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { 697 gl->gl_demote_state = state; 698 gl->gl_demote_time = jiffies; 699 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN && 700 gl->gl_object) { 701 gfs2_glock_schedule_for_reclaim(gl); 702 spin_unlock(&gl->gl_spin); 703 return; 704 } 705 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && 706 gl->gl_demote_state != state) { 707 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) 708 gl->gl_waiters2 = 1; 709 else 710 gl->gl_demote_state = LM_ST_UNLOCKED; 711 } 712 spin_unlock(&gl->gl_spin); 713 } 714 715 /** 716 * state_change - record that the glock is now in a different state 717 * @gl: the glock 718 * @new_state the new state 719 * 720 */ 721 722 static void state_change(struct gfs2_glock *gl, unsigned int new_state) 723 { 724 int held1, held2; 725 726 held1 = (gl->gl_state != LM_ST_UNLOCKED); 727 held2 = (new_state != LM_ST_UNLOCKED); 728 729 if (held1 != held2) { 730 if (held2) 731 gfs2_glock_hold(gl); 732 else 733 gfs2_glock_put(gl); 734 } 735 736 gl->gl_state = new_state; 737 gl->gl_tchange = jiffies; 738 } 739 740 /** 741 * xmote_bh - Called after the lock module is done acquiring a lock 742 * @gl: The glock in question 743 * @ret: the int returned from the lock module 744 * 745 */ 746 747 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) 748 { 749 struct gfs2_sbd *sdp = gl->gl_sbd; 750 const struct gfs2_glock_operations *glops = gl->gl_ops; 751 struct gfs2_holder *gh = gl->gl_req_gh; 752 int prev_state = gl->gl_state; 753 int op_done = 1; 754 755 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 756 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); 757 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); 758 759 state_change(gl, ret & LM_OUT_ST_MASK); 760 761 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) { 762 if (glops->go_inval) 763 glops->go_inval(gl, DIO_METADATA); 764 } else if (gl->gl_state == LM_ST_DEFERRED) { 765 /* We might not want to do this here. 766 Look at moving to the inode glops. */ 767 if (glops->go_inval) 768 glops->go_inval(gl, 0); 769 } 770 771 /* Deal with each possible exit condition */ 772 773 if (!gh) { 774 gl->gl_stamp = jiffies; 775 if (ret & LM_OUT_CANCELED) { 776 op_done = 0; 777 } else { 778 spin_lock(&gl->gl_spin); 779 if (gl->gl_state != gl->gl_demote_state) { 780 gl->gl_req_bh = NULL; 781 spin_unlock(&gl->gl_spin); 782 gfs2_glock_drop_th(gl); 783 gfs2_glock_put(gl); 784 return; 785 } 786 gfs2_demote_wake(gl); 787 spin_unlock(&gl->gl_spin); 788 } 789 } else { 790 spin_lock(&gl->gl_spin); 791 list_del_init(&gh->gh_list); 792 gh->gh_error = -EIO; 793 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 794 goto out; 795 gh->gh_error = GLR_CANCELED; 796 if (ret & LM_OUT_CANCELED) 797 goto out; 798 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { 799 list_add_tail(&gh->gh_list, &gl->gl_holders); 800 gh->gh_error = 0; 801 set_bit(HIF_HOLDER, &gh->gh_iflags); 802 set_bit(HIF_FIRST, &gh->gh_iflags); 803 op_done = 0; 804 goto out; 805 } 806 gh->gh_error = GLR_TRYFAILED; 807 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) 808 goto out; 809 gh->gh_error = -EINVAL; 810 if (gfs2_assert_withdraw(sdp, 0) == -1) 811 fs_err(sdp, "ret = 0x%.8X\n", ret); 812 out: 813 spin_unlock(&gl->gl_spin); 814 } 815 816 if (glops->go_xmote_bh) 817 glops->go_xmote_bh(gl); 818 819 if (op_done) { 820 spin_lock(&gl->gl_spin); 821 gl->gl_req_gh = NULL; 822 gl->gl_req_bh = NULL; 823 clear_bit(GLF_LOCK, &gl->gl_flags); 824 spin_unlock(&gl->gl_spin); 825 } 826 827 gfs2_glock_put(gl); 828 829 if (gh) 830 gfs2_holder_wake(gh); 831 } 832 833 /** 834 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock 835 * @gl: The glock in question 836 * @state: the requested state 837 * @flags: modifier flags to the lock call 838 * 839 */ 840 841 static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh) 842 { 843 struct gfs2_sbd *sdp = gl->gl_sbd; 844 int flags = gh ? gh->gh_flags : 0; 845 unsigned state = gh ? gh->gh_state : gl->gl_demote_state; 846 const struct gfs2_glock_operations *glops = gl->gl_ops; 847 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | 848 LM_FLAG_NOEXP | LM_FLAG_ANY | 849 LM_FLAG_PRIORITY); 850 unsigned int lck_ret; 851 852 if (glops->go_xmote_th) 853 glops->go_xmote_th(gl); 854 855 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 856 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); 857 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); 858 gfs2_assert_warn(sdp, state != gl->gl_state); 859 860 gfs2_glock_hold(gl); 861 gl->gl_req_bh = xmote_bh; 862 863 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags); 864 865 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR))) 866 return; 867 868 if (lck_ret & LM_OUT_ASYNC) 869 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC); 870 else 871 xmote_bh(gl, lck_ret); 872 } 873 874 /** 875 * drop_bh - Called after a lock module unlock completes 876 * @gl: the glock 877 * @ret: the return status 878 * 879 * Doesn't wake up the process waiting on the struct gfs2_holder (if any) 880 * Doesn't drop the reference on the glock the top half took out 881 * 882 */ 883 884 static void drop_bh(struct gfs2_glock *gl, unsigned int ret) 885 { 886 struct gfs2_sbd *sdp = gl->gl_sbd; 887 const struct gfs2_glock_operations *glops = gl->gl_ops; 888 struct gfs2_holder *gh = gl->gl_req_gh; 889 890 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 891 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); 892 gfs2_assert_warn(sdp, !ret); 893 894 state_change(gl, LM_ST_UNLOCKED); 895 896 if (glops->go_inval) 897 glops->go_inval(gl, DIO_METADATA); 898 899 if (gh) { 900 spin_lock(&gl->gl_spin); 901 list_del_init(&gh->gh_list); 902 gh->gh_error = 0; 903 spin_unlock(&gl->gl_spin); 904 } 905 906 spin_lock(&gl->gl_spin); 907 gfs2_demote_wake(gl); 908 gl->gl_req_gh = NULL; 909 gl->gl_req_bh = NULL; 910 clear_bit(GLF_LOCK, &gl->gl_flags); 911 spin_unlock(&gl->gl_spin); 912 913 gfs2_glock_put(gl); 914 915 if (gh) 916 gfs2_holder_wake(gh); 917 } 918 919 /** 920 * gfs2_glock_drop_th - call into the lock module to unlock a lock 921 * @gl: the glock 922 * 923 */ 924 925 static void gfs2_glock_drop_th(struct gfs2_glock *gl) 926 { 927 struct gfs2_sbd *sdp = gl->gl_sbd; 928 const struct gfs2_glock_operations *glops = gl->gl_ops; 929 unsigned int ret; 930 931 if (glops->go_xmote_th) 932 glops->go_xmote_th(gl); 933 934 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 935 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); 936 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); 937 938 gfs2_glock_hold(gl); 939 gl->gl_req_bh = drop_bh; 940 941 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state); 942 943 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR))) 944 return; 945 946 if (!ret) 947 drop_bh(gl, ret); 948 else 949 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC); 950 } 951 952 /** 953 * do_cancels - cancel requests for locks stuck waiting on an expire flag 954 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock 955 * 956 * Don't cancel GL_NOCANCEL requests. 957 */ 958 959 static void do_cancels(struct gfs2_holder *gh) 960 { 961 struct gfs2_glock *gl = gh->gh_gl; 962 963 spin_lock(&gl->gl_spin); 964 965 while (gl->gl_req_gh != gh && 966 !test_bit(HIF_HOLDER, &gh->gh_iflags) && 967 !list_empty(&gh->gh_list)) { 968 if (gl->gl_req_bh && !(gl->gl_req_gh && 969 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) { 970 spin_unlock(&gl->gl_spin); 971 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock); 972 msleep(100); 973 spin_lock(&gl->gl_spin); 974 } else { 975 spin_unlock(&gl->gl_spin); 976 msleep(100); 977 spin_lock(&gl->gl_spin); 978 } 979 } 980 981 spin_unlock(&gl->gl_spin); 982 } 983 984 /** 985 * glock_wait_internal - wait on a glock acquisition 986 * @gh: the glock holder 987 * 988 * Returns: 0 on success 989 */ 990 991 static int glock_wait_internal(struct gfs2_holder *gh) 992 { 993 struct gfs2_glock *gl = gh->gh_gl; 994 struct gfs2_sbd *sdp = gl->gl_sbd; 995 const struct gfs2_glock_operations *glops = gl->gl_ops; 996 997 if (test_bit(HIF_ABORTED, &gh->gh_iflags)) 998 return -EIO; 999 1000 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { 1001 spin_lock(&gl->gl_spin); 1002 if (gl->gl_req_gh != gh && 1003 !test_bit(HIF_HOLDER, &gh->gh_iflags) && 1004 !list_empty(&gh->gh_list)) { 1005 list_del_init(&gh->gh_list); 1006 gh->gh_error = GLR_TRYFAILED; 1007 run_queue(gl); 1008 spin_unlock(&gl->gl_spin); 1009 return gh->gh_error; 1010 } 1011 spin_unlock(&gl->gl_spin); 1012 } 1013 1014 if (gh->gh_flags & LM_FLAG_PRIORITY) 1015 do_cancels(gh); 1016 1017 wait_on_holder(gh); 1018 if (gh->gh_error) 1019 return gh->gh_error; 1020 1021 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags)); 1022 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state, 1023 gh->gh_flags)); 1024 1025 if (test_bit(HIF_FIRST, &gh->gh_iflags)) { 1026 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); 1027 1028 if (glops->go_lock) { 1029 gh->gh_error = glops->go_lock(gh); 1030 if (gh->gh_error) { 1031 spin_lock(&gl->gl_spin); 1032 list_del_init(&gh->gh_list); 1033 spin_unlock(&gl->gl_spin); 1034 } 1035 } 1036 1037 spin_lock(&gl->gl_spin); 1038 gl->gl_req_gh = NULL; 1039 gl->gl_req_bh = NULL; 1040 clear_bit(GLF_LOCK, &gl->gl_flags); 1041 run_queue(gl); 1042 spin_unlock(&gl->gl_spin); 1043 } 1044 1045 return gh->gh_error; 1046 } 1047 1048 static inline struct gfs2_holder * 1049 find_holder_by_owner(struct list_head *head, struct pid *pid) 1050 { 1051 struct gfs2_holder *gh; 1052 1053 list_for_each_entry(gh, head, gh_list) { 1054 if (gh->gh_owner_pid == pid) 1055 return gh; 1056 } 1057 1058 return NULL; 1059 } 1060 1061 static void print_dbg(struct glock_iter *gi, const char *fmt, ...) 1062 { 1063 va_list args; 1064 1065 va_start(args, fmt); 1066 if (gi) { 1067 vsprintf(gi->string, fmt, args); 1068 seq_printf(gi->seq, gi->string); 1069 } 1070 else 1071 vprintk(fmt, args); 1072 va_end(args); 1073 } 1074 1075 /** 1076 * add_to_queue - Add a holder to the wait queue (but look for recursion) 1077 * @gh: the holder structure to add 1078 * 1079 */ 1080 1081 static void add_to_queue(struct gfs2_holder *gh) 1082 { 1083 struct gfs2_glock *gl = gh->gh_gl; 1084 struct gfs2_holder *existing; 1085 1086 BUG_ON(gh->gh_owner_pid == NULL); 1087 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) 1088 BUG(); 1089 1090 if (!(gh->gh_flags & GL_FLOCK)) { 1091 existing = find_holder_by_owner(&gl->gl_holders, 1092 gh->gh_owner_pid); 1093 if (existing) { 1094 print_symbol(KERN_WARNING "original: %s\n", 1095 existing->gh_ip); 1096 printk(KERN_INFO "pid : %d\n", 1097 pid_nr(existing->gh_owner_pid)); 1098 printk(KERN_INFO "lock type : %d lock state : %d\n", 1099 existing->gh_gl->gl_name.ln_type, 1100 existing->gh_gl->gl_state); 1101 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); 1102 printk(KERN_INFO "pid : %d\n", 1103 pid_nr(gh->gh_owner_pid)); 1104 printk(KERN_INFO "lock type : %d lock state : %d\n", 1105 gl->gl_name.ln_type, gl->gl_state); 1106 BUG(); 1107 } 1108 1109 existing = find_holder_by_owner(&gl->gl_waiters3, 1110 gh->gh_owner_pid); 1111 if (existing) { 1112 print_symbol(KERN_WARNING "original: %s\n", 1113 existing->gh_ip); 1114 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); 1115 BUG(); 1116 } 1117 } 1118 1119 if (gh->gh_flags & LM_FLAG_PRIORITY) 1120 list_add(&gh->gh_list, &gl->gl_waiters3); 1121 else 1122 list_add_tail(&gh->gh_list, &gl->gl_waiters3); 1123 } 1124 1125 /** 1126 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock) 1127 * @gh: the holder structure 1128 * 1129 * if (gh->gh_flags & GL_ASYNC), this never returns an error 1130 * 1131 * Returns: 0, GLR_TRYFAILED, or errno on failure 1132 */ 1133 1134 int gfs2_glock_nq(struct gfs2_holder *gh) 1135 { 1136 struct gfs2_glock *gl = gh->gh_gl; 1137 struct gfs2_sbd *sdp = gl->gl_sbd; 1138 int error = 0; 1139 1140 restart: 1141 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { 1142 set_bit(HIF_ABORTED, &gh->gh_iflags); 1143 return -EIO; 1144 } 1145 1146 spin_lock(&gl->gl_spin); 1147 add_to_queue(gh); 1148 run_queue(gl); 1149 spin_unlock(&gl->gl_spin); 1150 1151 if (!(gh->gh_flags & GL_ASYNC)) { 1152 error = glock_wait_internal(gh); 1153 if (error == GLR_CANCELED) { 1154 msleep(100); 1155 goto restart; 1156 } 1157 } 1158 1159 return error; 1160 } 1161 1162 /** 1163 * gfs2_glock_poll - poll to see if an async request has been completed 1164 * @gh: the holder 1165 * 1166 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on 1167 */ 1168 1169 int gfs2_glock_poll(struct gfs2_holder *gh) 1170 { 1171 struct gfs2_glock *gl = gh->gh_gl; 1172 int ready = 0; 1173 1174 spin_lock(&gl->gl_spin); 1175 1176 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) 1177 ready = 1; 1178 else if (list_empty(&gh->gh_list)) { 1179 if (gh->gh_error == GLR_CANCELED) { 1180 spin_unlock(&gl->gl_spin); 1181 msleep(100); 1182 if (gfs2_glock_nq(gh)) 1183 return 1; 1184 return 0; 1185 } else 1186 ready = 1; 1187 } 1188 1189 spin_unlock(&gl->gl_spin); 1190 1191 return ready; 1192 } 1193 1194 /** 1195 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC 1196 * @gh: the holder structure 1197 * 1198 * Returns: 0, GLR_TRYFAILED, or errno on failure 1199 */ 1200 1201 int gfs2_glock_wait(struct gfs2_holder *gh) 1202 { 1203 int error; 1204 1205 error = glock_wait_internal(gh); 1206 if (error == GLR_CANCELED) { 1207 msleep(100); 1208 gh->gh_flags &= ~GL_ASYNC; 1209 error = gfs2_glock_nq(gh); 1210 } 1211 1212 return error; 1213 } 1214 1215 /** 1216 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock) 1217 * @gh: the glock holder 1218 * 1219 */ 1220 1221 void gfs2_glock_dq(struct gfs2_holder *gh) 1222 { 1223 struct gfs2_glock *gl = gh->gh_gl; 1224 const struct gfs2_glock_operations *glops = gl->gl_ops; 1225 unsigned delay = 0; 1226 1227 if (gh->gh_flags & GL_NOCACHE) 1228 handle_callback(gl, LM_ST_UNLOCKED, 0, 0); 1229 1230 gfs2_glmutex_lock(gl); 1231 1232 spin_lock(&gl->gl_spin); 1233 list_del_init(&gh->gh_list); 1234 1235 if (list_empty(&gl->gl_holders)) { 1236 if (glops->go_unlock) { 1237 spin_unlock(&gl->gl_spin); 1238 glops->go_unlock(gh); 1239 spin_lock(&gl->gl_spin); 1240 } 1241 gl->gl_stamp = jiffies; 1242 } 1243 1244 clear_bit(GLF_LOCK, &gl->gl_flags); 1245 spin_unlock(&gl->gl_spin); 1246 1247 gfs2_glock_hold(gl); 1248 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1249 !test_bit(GLF_DEMOTE, &gl->gl_flags)) 1250 delay = gl->gl_ops->go_min_hold_time; 1251 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 1252 gfs2_glock_put(gl); 1253 } 1254 1255 void gfs2_glock_dq_wait(struct gfs2_holder *gh) 1256 { 1257 struct gfs2_glock *gl = gh->gh_gl; 1258 gfs2_glock_dq(gh); 1259 wait_on_demote(gl); 1260 } 1261 1262 /** 1263 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it 1264 * @gh: the holder structure 1265 * 1266 */ 1267 1268 void gfs2_glock_dq_uninit(struct gfs2_holder *gh) 1269 { 1270 gfs2_glock_dq(gh); 1271 gfs2_holder_uninit(gh); 1272 } 1273 1274 /** 1275 * gfs2_glock_nq_num - acquire a glock based on lock number 1276 * @sdp: the filesystem 1277 * @number: the lock number 1278 * @glops: the glock operations for the type of glock 1279 * @state: the state to acquire the glock in 1280 * @flags: modifier flags for the aquisition 1281 * @gh: the struct gfs2_holder 1282 * 1283 * Returns: errno 1284 */ 1285 1286 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number, 1287 const struct gfs2_glock_operations *glops, 1288 unsigned int state, int flags, struct gfs2_holder *gh) 1289 { 1290 struct gfs2_glock *gl; 1291 int error; 1292 1293 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl); 1294 if (!error) { 1295 error = gfs2_glock_nq_init(gl, state, flags, gh); 1296 gfs2_glock_put(gl); 1297 } 1298 1299 return error; 1300 } 1301 1302 /** 1303 * glock_compare - Compare two struct gfs2_glock structures for sorting 1304 * @arg_a: the first structure 1305 * @arg_b: the second structure 1306 * 1307 */ 1308 1309 static int glock_compare(const void *arg_a, const void *arg_b) 1310 { 1311 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a; 1312 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b; 1313 const struct lm_lockname *a = &gh_a->gh_gl->gl_name; 1314 const struct lm_lockname *b = &gh_b->gh_gl->gl_name; 1315 1316 if (a->ln_number > b->ln_number) 1317 return 1; 1318 if (a->ln_number < b->ln_number) 1319 return -1; 1320 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type); 1321 return 0; 1322 } 1323 1324 /** 1325 * nq_m_sync - synchonously acquire more than one glock in deadlock free order 1326 * @num_gh: the number of structures 1327 * @ghs: an array of struct gfs2_holder structures 1328 * 1329 * Returns: 0 on success (all glocks acquired), 1330 * errno on failure (no glocks acquired) 1331 */ 1332 1333 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, 1334 struct gfs2_holder **p) 1335 { 1336 unsigned int x; 1337 int error = 0; 1338 1339 for (x = 0; x < num_gh; x++) 1340 p[x] = &ghs[x]; 1341 1342 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); 1343 1344 for (x = 0; x < num_gh; x++) { 1345 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1346 1347 error = gfs2_glock_nq(p[x]); 1348 if (error) { 1349 while (x--) 1350 gfs2_glock_dq(p[x]); 1351 break; 1352 } 1353 } 1354 1355 return error; 1356 } 1357 1358 /** 1359 * gfs2_glock_nq_m - acquire multiple glocks 1360 * @num_gh: the number of structures 1361 * @ghs: an array of struct gfs2_holder structures 1362 * 1363 * 1364 * Returns: 0 on success (all glocks acquired), 1365 * errno on failure (no glocks acquired) 1366 */ 1367 1368 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1369 { 1370 struct gfs2_holder *tmp[4]; 1371 struct gfs2_holder **pph = tmp; 1372 int error = 0; 1373 1374 switch(num_gh) { 1375 case 0: 1376 return 0; 1377 case 1: 1378 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC); 1379 return gfs2_glock_nq(ghs); 1380 default: 1381 if (num_gh <= 4) 1382 break; 1383 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS); 1384 if (!pph) 1385 return -ENOMEM; 1386 } 1387 1388 error = nq_m_sync(num_gh, ghs, pph); 1389 1390 if (pph != tmp) 1391 kfree(pph); 1392 1393 return error; 1394 } 1395 1396 /** 1397 * gfs2_glock_dq_m - release multiple glocks 1398 * @num_gh: the number of structures 1399 * @ghs: an array of struct gfs2_holder structures 1400 * 1401 */ 1402 1403 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) 1404 { 1405 unsigned int x; 1406 1407 for (x = 0; x < num_gh; x++) 1408 gfs2_glock_dq(&ghs[x]); 1409 } 1410 1411 /** 1412 * gfs2_glock_dq_uninit_m - release multiple glocks 1413 * @num_gh: the number of structures 1414 * @ghs: an array of struct gfs2_holder structures 1415 * 1416 */ 1417 1418 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs) 1419 { 1420 unsigned int x; 1421 1422 for (x = 0; x < num_gh; x++) 1423 gfs2_glock_dq_uninit(&ghs[x]); 1424 } 1425 1426 /** 1427 * gfs2_lvb_hold - attach a LVB from a glock 1428 * @gl: The glock in question 1429 * 1430 */ 1431 1432 int gfs2_lvb_hold(struct gfs2_glock *gl) 1433 { 1434 int error; 1435 1436 gfs2_glmutex_lock(gl); 1437 1438 if (!atomic_read(&gl->gl_lvb_count)) { 1439 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb); 1440 if (error) { 1441 gfs2_glmutex_unlock(gl); 1442 return error; 1443 } 1444 gfs2_glock_hold(gl); 1445 } 1446 atomic_inc(&gl->gl_lvb_count); 1447 1448 gfs2_glmutex_unlock(gl); 1449 1450 return 0; 1451 } 1452 1453 /** 1454 * gfs2_lvb_unhold - detach a LVB from a glock 1455 * @gl: The glock in question 1456 * 1457 */ 1458 1459 void gfs2_lvb_unhold(struct gfs2_glock *gl) 1460 { 1461 gfs2_glock_hold(gl); 1462 gfs2_glmutex_lock(gl); 1463 1464 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); 1465 if (atomic_dec_and_test(&gl->gl_lvb_count)) { 1466 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb); 1467 gl->gl_lvb = NULL; 1468 gfs2_glock_put(gl); 1469 } 1470 1471 gfs2_glmutex_unlock(gl); 1472 gfs2_glock_put(gl); 1473 } 1474 1475 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, 1476 unsigned int state) 1477 { 1478 struct gfs2_glock *gl; 1479 unsigned long delay = 0; 1480 unsigned long holdtime; 1481 unsigned long now = jiffies; 1482 1483 gl = gfs2_glock_find(sdp, name); 1484 if (!gl) 1485 return; 1486 1487 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; 1488 if (time_before(now, holdtime)) 1489 delay = holdtime - now; 1490 1491 handle_callback(gl, state, 1, delay); 1492 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 1493 gfs2_glock_put(gl); 1494 } 1495 1496 /** 1497 * gfs2_glock_cb - Callback used by locking module 1498 * @sdp: Pointer to the superblock 1499 * @type: Type of callback 1500 * @data: Type dependent data pointer 1501 * 1502 * Called by the locking module when it wants to tell us something. 1503 * Either we need to drop a lock, one of our ASYNC requests completed, or 1504 * a journal from another client needs to be recovered. 1505 */ 1506 1507 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data) 1508 { 1509 struct gfs2_sbd *sdp = cb_data; 1510 1511 switch (type) { 1512 case LM_CB_NEED_E: 1513 blocking_cb(sdp, data, LM_ST_UNLOCKED); 1514 return; 1515 1516 case LM_CB_NEED_D: 1517 blocking_cb(sdp, data, LM_ST_DEFERRED); 1518 return; 1519 1520 case LM_CB_NEED_S: 1521 blocking_cb(sdp, data, LM_ST_SHARED); 1522 return; 1523 1524 case LM_CB_ASYNC: { 1525 struct lm_async_cb *async = data; 1526 struct gfs2_glock *gl; 1527 1528 down_read(&gfs2_umount_flush_sem); 1529 gl = gfs2_glock_find(sdp, &async->lc_name); 1530 if (gfs2_assert_warn(sdp, gl)) 1531 return; 1532 if (!gfs2_assert_warn(sdp, gl->gl_req_bh)) 1533 gl->gl_req_bh(gl, async->lc_ret); 1534 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1535 gfs2_glock_put(gl); 1536 up_read(&gfs2_umount_flush_sem); 1537 return; 1538 } 1539 1540 case LM_CB_NEED_RECOVERY: 1541 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data); 1542 if (sdp->sd_recoverd_process) 1543 wake_up_process(sdp->sd_recoverd_process); 1544 return; 1545 1546 case LM_CB_DROPLOCKS: 1547 gfs2_gl_hash_clear(sdp, NO_WAIT); 1548 gfs2_quota_scan(sdp); 1549 return; 1550 1551 default: 1552 gfs2_assert_warn(sdp, 0); 1553 return; 1554 } 1555 } 1556 1557 /** 1558 * demote_ok - Check to see if it's ok to unlock a glock 1559 * @gl: the glock 1560 * 1561 * Returns: 1 if it's ok 1562 */ 1563 1564 static int demote_ok(struct gfs2_glock *gl) 1565 { 1566 const struct gfs2_glock_operations *glops = gl->gl_ops; 1567 int demote = 1; 1568 1569 if (test_bit(GLF_STICKY, &gl->gl_flags)) 1570 demote = 0; 1571 else if (glops->go_demote_ok) 1572 demote = glops->go_demote_ok(gl); 1573 1574 return demote; 1575 } 1576 1577 /** 1578 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list 1579 * @gl: the glock 1580 * 1581 */ 1582 1583 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) 1584 { 1585 struct gfs2_sbd *sdp = gl->gl_sbd; 1586 1587 spin_lock(&sdp->sd_reclaim_lock); 1588 if (list_empty(&gl->gl_reclaim)) { 1589 gfs2_glock_hold(gl); 1590 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list); 1591 atomic_inc(&sdp->sd_reclaim_count); 1592 } 1593 spin_unlock(&sdp->sd_reclaim_lock); 1594 1595 wake_up(&sdp->sd_reclaim_wq); 1596 } 1597 1598 /** 1599 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list 1600 * @sdp: the filesystem 1601 * 1602 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a 1603 * different glock and we notice that there are a lot of glocks in the 1604 * reclaim list. 1605 * 1606 */ 1607 1608 void gfs2_reclaim_glock(struct gfs2_sbd *sdp) 1609 { 1610 struct gfs2_glock *gl; 1611 1612 spin_lock(&sdp->sd_reclaim_lock); 1613 if (list_empty(&sdp->sd_reclaim_list)) { 1614 spin_unlock(&sdp->sd_reclaim_lock); 1615 return; 1616 } 1617 gl = list_entry(sdp->sd_reclaim_list.next, 1618 struct gfs2_glock, gl_reclaim); 1619 list_del_init(&gl->gl_reclaim); 1620 spin_unlock(&sdp->sd_reclaim_lock); 1621 1622 atomic_dec(&sdp->sd_reclaim_count); 1623 atomic_inc(&sdp->sd_reclaimed); 1624 1625 if (gfs2_glmutex_trylock(gl)) { 1626 if (list_empty(&gl->gl_holders) && 1627 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1628 handle_callback(gl, LM_ST_UNLOCKED, 0, 0); 1629 gfs2_glmutex_unlock(gl); 1630 } 1631 1632 gfs2_glock_put(gl); 1633 } 1634 1635 /** 1636 * examine_bucket - Call a function for glock in a hash bucket 1637 * @examiner: the function 1638 * @sdp: the filesystem 1639 * @bucket: the bucket 1640 * 1641 * Returns: 1 if the bucket has entries 1642 */ 1643 1644 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, 1645 unsigned int hash) 1646 { 1647 struct gfs2_glock *gl, *prev = NULL; 1648 int has_entries = 0; 1649 struct hlist_head *head = &gl_hash_table[hash].hb_list; 1650 1651 read_lock(gl_lock_addr(hash)); 1652 /* Can't use hlist_for_each_entry - don't want prefetch here */ 1653 if (hlist_empty(head)) 1654 goto out; 1655 gl = list_entry(head->first, struct gfs2_glock, gl_list); 1656 while(1) { 1657 if (!sdp || gl->gl_sbd == sdp) { 1658 gfs2_glock_hold(gl); 1659 read_unlock(gl_lock_addr(hash)); 1660 if (prev) 1661 gfs2_glock_put(prev); 1662 prev = gl; 1663 examiner(gl); 1664 has_entries = 1; 1665 read_lock(gl_lock_addr(hash)); 1666 } 1667 if (gl->gl_list.next == NULL) 1668 break; 1669 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list); 1670 } 1671 out: 1672 read_unlock(gl_lock_addr(hash)); 1673 if (prev) 1674 gfs2_glock_put(prev); 1675 cond_resched(); 1676 return has_entries; 1677 } 1678 1679 /** 1680 * scan_glock - look at a glock and see if we can reclaim it 1681 * @gl: the glock to look at 1682 * 1683 */ 1684 1685 static void scan_glock(struct gfs2_glock *gl) 1686 { 1687 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) 1688 return; 1689 1690 if (gfs2_glmutex_trylock(gl)) { 1691 if (list_empty(&gl->gl_holders) && 1692 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1693 goto out_schedule; 1694 gfs2_glmutex_unlock(gl); 1695 } 1696 return; 1697 1698 out_schedule: 1699 gfs2_glmutex_unlock(gl); 1700 gfs2_glock_schedule_for_reclaim(gl); 1701 } 1702 1703 /** 1704 * clear_glock - look at a glock and see if we can free it from glock cache 1705 * @gl: the glock to look at 1706 * 1707 */ 1708 1709 static void clear_glock(struct gfs2_glock *gl) 1710 { 1711 struct gfs2_sbd *sdp = gl->gl_sbd; 1712 int released; 1713 1714 spin_lock(&sdp->sd_reclaim_lock); 1715 if (!list_empty(&gl->gl_reclaim)) { 1716 list_del_init(&gl->gl_reclaim); 1717 atomic_dec(&sdp->sd_reclaim_count); 1718 spin_unlock(&sdp->sd_reclaim_lock); 1719 released = gfs2_glock_put(gl); 1720 gfs2_assert(sdp, !released); 1721 } else { 1722 spin_unlock(&sdp->sd_reclaim_lock); 1723 } 1724 1725 if (gfs2_glmutex_trylock(gl)) { 1726 if (list_empty(&gl->gl_holders) && 1727 gl->gl_state != LM_ST_UNLOCKED) 1728 handle_callback(gl, LM_ST_UNLOCKED, 0, 0); 1729 gfs2_glmutex_unlock(gl); 1730 } 1731 } 1732 1733 /** 1734 * gfs2_gl_hash_clear - Empty out the glock hash table 1735 * @sdp: the filesystem 1736 * @wait: wait until it's all gone 1737 * 1738 * Called when unmounting the filesystem, or when inter-node lock manager 1739 * requests DROPLOCKS because it is running out of capacity. 1740 */ 1741 1742 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) 1743 { 1744 unsigned long t; 1745 unsigned int x; 1746 int cont; 1747 1748 t = jiffies; 1749 1750 for (;;) { 1751 cont = 0; 1752 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { 1753 if (examine_bucket(clear_glock, sdp, x)) 1754 cont = 1; 1755 } 1756 1757 if (!wait || !cont) 1758 break; 1759 1760 if (time_after_eq(jiffies, 1761 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) { 1762 fs_warn(sdp, "Unmount seems to be stalled. " 1763 "Dumping lock state...\n"); 1764 gfs2_dump_lockstate(sdp); 1765 t = jiffies; 1766 } 1767 1768 down_write(&gfs2_umount_flush_sem); 1769 invalidate_inodes(sdp->sd_vfs); 1770 up_write(&gfs2_umount_flush_sem); 1771 msleep(10); 1772 } 1773 } 1774 1775 /* 1776 * Diagnostic routines to help debug distributed deadlock 1777 */ 1778 1779 static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt, 1780 unsigned long address) 1781 { 1782 char buffer[KSYM_SYMBOL_LEN]; 1783 1784 sprint_symbol(buffer, address); 1785 print_dbg(gi, fmt, buffer); 1786 } 1787 1788 /** 1789 * dump_holder - print information about a glock holder 1790 * @str: a string naming the type of holder 1791 * @gh: the glock holder 1792 * 1793 * Returns: 0 on success, -ENOBUFS when we run out of space 1794 */ 1795 1796 static int dump_holder(struct glock_iter *gi, char *str, 1797 struct gfs2_holder *gh) 1798 { 1799 unsigned int x; 1800 struct task_struct *gh_owner; 1801 1802 print_dbg(gi, " %s\n", str); 1803 if (gh->gh_owner_pid) { 1804 print_dbg(gi, " owner = %ld ", 1805 (long)pid_nr(gh->gh_owner_pid)); 1806 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); 1807 if (gh_owner) 1808 print_dbg(gi, "(%s)\n", gh_owner->comm); 1809 else 1810 print_dbg(gi, "(ended)\n"); 1811 } else 1812 print_dbg(gi, " owner = -1\n"); 1813 print_dbg(gi, " gh_state = %u\n", gh->gh_state); 1814 print_dbg(gi, " gh_flags ="); 1815 for (x = 0; x < 32; x++) 1816 if (gh->gh_flags & (1 << x)) 1817 print_dbg(gi, " %u", x); 1818 print_dbg(gi, " \n"); 1819 print_dbg(gi, " error = %d\n", gh->gh_error); 1820 print_dbg(gi, " gh_iflags ="); 1821 for (x = 0; x < 32; x++) 1822 if (test_bit(x, &gh->gh_iflags)) 1823 print_dbg(gi, " %u", x); 1824 print_dbg(gi, " \n"); 1825 gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip); 1826 1827 return 0; 1828 } 1829 1830 /** 1831 * dump_inode - print information about an inode 1832 * @ip: the inode 1833 * 1834 * Returns: 0 on success, -ENOBUFS when we run out of space 1835 */ 1836 1837 static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip) 1838 { 1839 unsigned int x; 1840 1841 print_dbg(gi, " Inode:\n"); 1842 print_dbg(gi, " num = %llu/%llu\n", 1843 (unsigned long long)ip->i_no_formal_ino, 1844 (unsigned long long)ip->i_no_addr); 1845 print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode)); 1846 print_dbg(gi, " i_flags ="); 1847 for (x = 0; x < 32; x++) 1848 if (test_bit(x, &ip->i_flags)) 1849 print_dbg(gi, " %u", x); 1850 print_dbg(gi, " \n"); 1851 return 0; 1852 } 1853 1854 /** 1855 * dump_glock - print information about a glock 1856 * @gl: the glock 1857 * @count: where we are in the buffer 1858 * 1859 * Returns: 0 on success, -ENOBUFS when we run out of space 1860 */ 1861 1862 static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl) 1863 { 1864 struct gfs2_holder *gh; 1865 unsigned int x; 1866 int error = -ENOBUFS; 1867 struct task_struct *gl_owner; 1868 1869 spin_lock(&gl->gl_spin); 1870 1871 print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type, 1872 (unsigned long long)gl->gl_name.ln_number); 1873 print_dbg(gi, " gl_flags ="); 1874 for (x = 0; x < 32; x++) { 1875 if (test_bit(x, &gl->gl_flags)) 1876 print_dbg(gi, " %u", x); 1877 } 1878 if (!test_bit(GLF_LOCK, &gl->gl_flags)) 1879 print_dbg(gi, " (unlocked)"); 1880 print_dbg(gi, " \n"); 1881 print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref)); 1882 print_dbg(gi, " gl_state = %u\n", gl->gl_state); 1883 if (gl->gl_owner_pid) { 1884 gl_owner = find_task_by_pid(gl->gl_owner_pid); 1885 if (gl_owner) 1886 print_dbg(gi, " gl_owner = pid %d (%s)\n", 1887 gl->gl_owner_pid, gl_owner->comm); 1888 else 1889 print_dbg(gi, " gl_owner = %d (ended)\n", 1890 gl->gl_owner_pid); 1891 } else 1892 print_dbg(gi, " gl_owner = -1\n"); 1893 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip); 1894 print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no"); 1895 print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no"); 1896 print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count)); 1897 print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no"); 1898 print_dbg(gi, " reclaim = %s\n", 1899 (list_empty(&gl->gl_reclaim)) ? "no" : "yes"); 1900 if (gl->gl_aspace) 1901 print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace, 1902 gl->gl_aspace->i_mapping->nrpages); 1903 else 1904 print_dbg(gi, " aspace = no\n"); 1905 print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count)); 1906 if (gl->gl_req_gh) { 1907 error = dump_holder(gi, "Request", gl->gl_req_gh); 1908 if (error) 1909 goto out; 1910 } 1911 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1912 error = dump_holder(gi, "Holder", gh); 1913 if (error) 1914 goto out; 1915 } 1916 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) { 1917 error = dump_holder(gi, "Waiter1", gh); 1918 if (error) 1919 goto out; 1920 } 1921 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) { 1922 error = dump_holder(gi, "Waiter3", gh); 1923 if (error) 1924 goto out; 1925 } 1926 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { 1927 print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n", 1928 gl->gl_demote_state, (unsigned long long) 1929 (jiffies - gl->gl_demote_time)*(1000000/HZ)); 1930 } 1931 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) { 1932 if (!test_bit(GLF_LOCK, &gl->gl_flags) && 1933 list_empty(&gl->gl_holders)) { 1934 error = dump_inode(gi, gl->gl_object); 1935 if (error) 1936 goto out; 1937 } else { 1938 error = -ENOBUFS; 1939 print_dbg(gi, " Inode: busy\n"); 1940 } 1941 } 1942 1943 error = 0; 1944 1945 out: 1946 spin_unlock(&gl->gl_spin); 1947 return error; 1948 } 1949 1950 /** 1951 * gfs2_dump_lockstate - print out the current lockstate 1952 * @sdp: the filesystem 1953 * @ub: the buffer to copy the information into 1954 * 1955 * If @ub is NULL, dump the lockstate to the console. 1956 * 1957 */ 1958 1959 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp) 1960 { 1961 struct gfs2_glock *gl; 1962 struct hlist_node *h; 1963 unsigned int x; 1964 int error = 0; 1965 1966 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) { 1967 1968 read_lock(gl_lock_addr(x)); 1969 1970 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) { 1971 if (gl->gl_sbd != sdp) 1972 continue; 1973 1974 error = dump_glock(NULL, gl); 1975 if (error) 1976 break; 1977 } 1978 1979 read_unlock(gl_lock_addr(x)); 1980 1981 if (error) 1982 break; 1983 } 1984 1985 1986 return error; 1987 } 1988 1989 /** 1990 * gfs2_scand - Look for cached glocks and inodes to toss from memory 1991 * @sdp: Pointer to GFS2 superblock 1992 * 1993 * One of these daemons runs, finding candidates to add to sd_reclaim_list. 1994 * See gfs2_glockd() 1995 */ 1996 1997 static int gfs2_scand(void *data) 1998 { 1999 unsigned x; 2000 unsigned delay; 2001 2002 while (!kthread_should_stop()) { 2003 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) 2004 examine_bucket(scan_glock, NULL, x); 2005 if (freezing(current)) 2006 refrigerator(); 2007 delay = scand_secs; 2008 if (delay < 1) 2009 delay = 1; 2010 schedule_timeout_interruptible(delay * HZ); 2011 } 2012 2013 return 0; 2014 } 2015 2016 2017 2018 int __init gfs2_glock_init(void) 2019 { 2020 unsigned i; 2021 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) { 2022 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list); 2023 } 2024 #ifdef GL_HASH_LOCK_SZ 2025 for(i = 0; i < GL_HASH_LOCK_SZ; i++) { 2026 rwlock_init(&gl_hash_locks[i]); 2027 } 2028 #endif 2029 2030 scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand"); 2031 if (IS_ERR(scand_process)) 2032 return PTR_ERR(scand_process); 2033 2034 glock_workqueue = create_workqueue("glock_workqueue"); 2035 if (IS_ERR(glock_workqueue)) { 2036 kthread_stop(scand_process); 2037 return PTR_ERR(glock_workqueue); 2038 } 2039 2040 return 0; 2041 } 2042 2043 void gfs2_glock_exit(void) 2044 { 2045 destroy_workqueue(glock_workqueue); 2046 kthread_stop(scand_process); 2047 } 2048 2049 module_param(scand_secs, uint, S_IRUGO|S_IWUSR); 2050 MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs"); 2051 2052 static int gfs2_glock_iter_next(struct glock_iter *gi) 2053 { 2054 struct gfs2_glock *gl; 2055 2056 restart: 2057 read_lock(gl_lock_addr(gi->hash)); 2058 gl = gi->gl; 2059 if (gl) { 2060 gi->gl = hlist_entry(gl->gl_list.next, 2061 struct gfs2_glock, gl_list); 2062 if (gi->gl) 2063 gfs2_glock_hold(gi->gl); 2064 } 2065 read_unlock(gl_lock_addr(gi->hash)); 2066 if (gl) 2067 gfs2_glock_put(gl); 2068 if (gl && gi->gl == NULL) 2069 gi->hash++; 2070 while(gi->gl == NULL) { 2071 if (gi->hash >= GFS2_GL_HASH_SIZE) 2072 return 1; 2073 read_lock(gl_lock_addr(gi->hash)); 2074 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first, 2075 struct gfs2_glock, gl_list); 2076 if (gi->gl) 2077 gfs2_glock_hold(gi->gl); 2078 read_unlock(gl_lock_addr(gi->hash)); 2079 gi->hash++; 2080 } 2081 2082 if (gi->sdp != gi->gl->gl_sbd) 2083 goto restart; 2084 2085 return 0; 2086 } 2087 2088 static void gfs2_glock_iter_free(struct glock_iter *gi) 2089 { 2090 if (gi->gl) 2091 gfs2_glock_put(gi->gl); 2092 kfree(gi); 2093 } 2094 2095 static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp) 2096 { 2097 struct glock_iter *gi; 2098 2099 gi = kmalloc(sizeof (*gi), GFP_KERNEL); 2100 if (!gi) 2101 return NULL; 2102 2103 gi->sdp = sdp; 2104 gi->hash = 0; 2105 gi->seq = NULL; 2106 gi->gl = NULL; 2107 memset(gi->string, 0, sizeof(gi->string)); 2108 2109 if (gfs2_glock_iter_next(gi)) { 2110 gfs2_glock_iter_free(gi); 2111 return NULL; 2112 } 2113 2114 return gi; 2115 } 2116 2117 static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos) 2118 { 2119 struct glock_iter *gi; 2120 loff_t n = *pos; 2121 2122 gi = gfs2_glock_iter_init(file->private); 2123 if (!gi) 2124 return NULL; 2125 2126 while(n--) { 2127 if (gfs2_glock_iter_next(gi)) { 2128 gfs2_glock_iter_free(gi); 2129 return NULL; 2130 } 2131 } 2132 2133 return gi; 2134 } 2135 2136 static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr, 2137 loff_t *pos) 2138 { 2139 struct glock_iter *gi = iter_ptr; 2140 2141 (*pos)++; 2142 2143 if (gfs2_glock_iter_next(gi)) { 2144 gfs2_glock_iter_free(gi); 2145 return NULL; 2146 } 2147 2148 return gi; 2149 } 2150 2151 static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr) 2152 { 2153 struct glock_iter *gi = iter_ptr; 2154 if (gi) 2155 gfs2_glock_iter_free(gi); 2156 } 2157 2158 static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr) 2159 { 2160 struct glock_iter *gi = iter_ptr; 2161 2162 gi->seq = file; 2163 dump_glock(gi, gi->gl); 2164 2165 return 0; 2166 } 2167 2168 static const struct seq_operations gfs2_glock_seq_ops = { 2169 .start = gfs2_glock_seq_start, 2170 .next = gfs2_glock_seq_next, 2171 .stop = gfs2_glock_seq_stop, 2172 .show = gfs2_glock_seq_show, 2173 }; 2174 2175 static int gfs2_debugfs_open(struct inode *inode, struct file *file) 2176 { 2177 struct seq_file *seq; 2178 int ret; 2179 2180 ret = seq_open(file, &gfs2_glock_seq_ops); 2181 if (ret) 2182 return ret; 2183 2184 seq = file->private_data; 2185 seq->private = inode->i_private; 2186 2187 return 0; 2188 } 2189 2190 static const struct file_operations gfs2_debug_fops = { 2191 .owner = THIS_MODULE, 2192 .open = gfs2_debugfs_open, 2193 .read = seq_read, 2194 .llseek = seq_lseek, 2195 .release = seq_release 2196 }; 2197 2198 int gfs2_create_debugfs_file(struct gfs2_sbd *sdp) 2199 { 2200 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root); 2201 if (!sdp->debugfs_dir) 2202 return -ENOMEM; 2203 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks", 2204 S_IFREG | S_IRUGO, 2205 sdp->debugfs_dir, sdp, 2206 &gfs2_debug_fops); 2207 if (!sdp->debugfs_dentry_glocks) 2208 return -ENOMEM; 2209 2210 return 0; 2211 } 2212 2213 void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp) 2214 { 2215 if (sdp && sdp->debugfs_dir) { 2216 if (sdp->debugfs_dentry_glocks) { 2217 debugfs_remove(sdp->debugfs_dentry_glocks); 2218 sdp->debugfs_dentry_glocks = NULL; 2219 } 2220 debugfs_remove(sdp->debugfs_dir); 2221 sdp->debugfs_dir = NULL; 2222 } 2223 } 2224 2225 int gfs2_register_debugfs(void) 2226 { 2227 gfs2_root = debugfs_create_dir("gfs2", NULL); 2228 return gfs2_root ? 0 : -ENOMEM; 2229 } 2230 2231 void gfs2_unregister_debugfs(void) 2232 { 2233 debugfs_remove(gfs2_root); 2234 gfs2_root = NULL; 2235 } 2236