1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/spinlock.h> 11 #include <linux/completion.h> 12 #include <linux/buffer_head.h> 13 #include <linux/gfs2_ondisk.h> 14 #include <linux/bio.h> 15 #include <linux/posix_acl.h> 16 17 #include "gfs2.h" 18 #include "incore.h" 19 #include "bmap.h" 20 #include "glock.h" 21 #include "glops.h" 22 #include "inode.h" 23 #include "log.h" 24 #include "meta_io.h" 25 #include "recovery.h" 26 #include "rgrp.h" 27 #include "util.h" 28 #include "trans.h" 29 #include "dir.h" 30 31 /** 32 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL 33 * @gl: the glock 34 * 35 * None of the buffers should be dirty, locked, or pinned. 36 */ 37 38 static void __gfs2_ail_flush(struct gfs2_glock *gl) 39 { 40 struct gfs2_sbd *sdp = gl->gl_sbd; 41 struct list_head *head = &gl->gl_ail_list; 42 struct gfs2_bufdata *bd; 43 struct buffer_head *bh; 44 45 spin_lock(&sdp->sd_ail_lock); 46 while (!list_empty(head)) { 47 bd = list_entry(head->next, struct gfs2_bufdata, 48 bd_ail_gl_list); 49 bh = bd->bd_bh; 50 gfs2_remove_from_ail(bd); 51 bd->bd_bh = NULL; 52 bh->b_private = NULL; 53 spin_unlock(&sdp->sd_ail_lock); 54 55 bd->bd_blkno = bh->b_blocknr; 56 gfs2_log_lock(sdp); 57 gfs2_assert_withdraw(sdp, !buffer_busy(bh)); 58 gfs2_trans_add_revoke(sdp, bd); 59 gfs2_log_unlock(sdp); 60 61 spin_lock(&sdp->sd_ail_lock); 62 } 63 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); 64 spin_unlock(&sdp->sd_ail_lock); 65 } 66 67 68 static void gfs2_ail_empty_gl(struct gfs2_glock *gl) 69 { 70 struct gfs2_sbd *sdp = gl->gl_sbd; 71 struct gfs2_trans tr; 72 73 memset(&tr, 0, sizeof(tr)); 74 tr.tr_revokes = atomic_read(&gl->gl_ail_count); 75 76 if (!tr.tr_revokes) 77 return; 78 79 /* A shortened, inline version of gfs2_trans_begin() */ 80 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64)); 81 tr.tr_ip = (unsigned long)__builtin_return_address(0); 82 INIT_LIST_HEAD(&tr.tr_list_buf); 83 gfs2_log_reserve(sdp, tr.tr_reserved); 84 BUG_ON(current->journal_info); 85 current->journal_info = &tr; 86 87 __gfs2_ail_flush(gl); 88 89 gfs2_trans_end(sdp); 90 gfs2_log_flush(sdp, NULL); 91 } 92 93 void gfs2_ail_flush(struct gfs2_glock *gl) 94 { 95 struct gfs2_sbd *sdp = gl->gl_sbd; 96 unsigned int revokes = atomic_read(&gl->gl_ail_count); 97 int ret; 98 99 if (!revokes) 100 return; 101 102 ret = gfs2_trans_begin(sdp, 0, revokes); 103 if (ret) 104 return; 105 __gfs2_ail_flush(gl); 106 gfs2_trans_end(sdp); 107 gfs2_log_flush(sdp, NULL); 108 } 109 110 /** 111 * rgrp_go_sync - sync out the metadata for this glock 112 * @gl: the glock 113 * 114 * Called when demoting or unlocking an EX glock. We must flush 115 * to disk all dirty buffers/pages relating to this glock, and must not 116 * not return to caller to demote/unlock the glock until I/O is complete. 117 */ 118 119 static void rgrp_go_sync(struct gfs2_glock *gl) 120 { 121 struct address_space *metamapping = gfs2_glock2aspace(gl); 122 int error; 123 124 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 125 return; 126 BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE); 127 128 gfs2_log_flush(gl->gl_sbd, gl); 129 filemap_fdatawrite(metamapping); 130 error = filemap_fdatawait(metamapping); 131 mapping_set_error(metamapping, error); 132 gfs2_ail_empty_gl(gl); 133 } 134 135 /** 136 * rgrp_go_inval - invalidate the metadata for this glock 137 * @gl: the glock 138 * @flags: 139 * 140 * We never used LM_ST_DEFERRED with resource groups, so that we 141 * should always see the metadata flag set here. 142 * 143 */ 144 145 static void rgrp_go_inval(struct gfs2_glock *gl, int flags) 146 { 147 struct address_space *mapping = gfs2_glock2aspace(gl); 148 149 BUG_ON(!(flags & DIO_METADATA)); 150 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); 151 truncate_inode_pages(mapping, 0); 152 153 if (gl->gl_object) { 154 struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object; 155 rgd->rd_flags &= ~GFS2_RDF_UPTODATE; 156 } 157 } 158 159 /** 160 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock 161 * @gl: the glock protecting the inode 162 * 163 */ 164 165 static void inode_go_sync(struct gfs2_glock *gl) 166 { 167 struct gfs2_inode *ip = gl->gl_object; 168 struct address_space *metamapping = gfs2_glock2aspace(gl); 169 int error; 170 171 if (ip && !S_ISREG(ip->i_inode.i_mode)) 172 ip = NULL; 173 if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 174 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); 175 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 176 return; 177 178 BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE); 179 180 gfs2_log_flush(gl->gl_sbd, gl); 181 filemap_fdatawrite(metamapping); 182 if (ip) { 183 struct address_space *mapping = ip->i_inode.i_mapping; 184 filemap_fdatawrite(mapping); 185 error = filemap_fdatawait(mapping); 186 mapping_set_error(mapping, error); 187 } 188 error = filemap_fdatawait(metamapping); 189 mapping_set_error(metamapping, error); 190 gfs2_ail_empty_gl(gl); 191 /* 192 * Writeback of the data mapping may cause the dirty flag to be set 193 * so we have to clear it again here. 194 */ 195 smp_mb__before_clear_bit(); 196 clear_bit(GLF_DIRTY, &gl->gl_flags); 197 } 198 199 /** 200 * inode_go_inval - prepare a inode glock to be released 201 * @gl: the glock 202 * @flags: 203 * 204 * Normally we invlidate everything, but if we are moving into 205 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we 206 * can keep hold of the metadata, since it won't have changed. 207 * 208 */ 209 210 static void inode_go_inval(struct gfs2_glock *gl, int flags) 211 { 212 struct gfs2_inode *ip = gl->gl_object; 213 214 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); 215 216 if (flags & DIO_METADATA) { 217 struct address_space *mapping = gfs2_glock2aspace(gl); 218 truncate_inode_pages(mapping, 0); 219 if (ip) { 220 set_bit(GIF_INVALID, &ip->i_flags); 221 forget_all_cached_acls(&ip->i_inode); 222 gfs2_dir_hash_inval(ip); 223 } 224 } 225 226 if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) { 227 gfs2_log_flush(gl->gl_sbd, NULL); 228 gl->gl_sbd->sd_rindex_uptodate = 0; 229 } 230 if (ip && S_ISREG(ip->i_inode.i_mode)) 231 truncate_inode_pages(ip->i_inode.i_mapping, 0); 232 } 233 234 /** 235 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock 236 * @gl: the glock 237 * 238 * Returns: 1 if it's ok 239 */ 240 241 static int inode_go_demote_ok(const struct gfs2_glock *gl) 242 { 243 struct gfs2_sbd *sdp = gl->gl_sbd; 244 struct gfs2_holder *gh; 245 246 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) 247 return 0; 248 249 if (!list_empty(&gl->gl_holders)) { 250 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); 251 if (gh->gh_list.next != &gl->gl_holders) 252 return 0; 253 } 254 255 return 1; 256 } 257 258 /** 259 * gfs2_set_nlink - Set the inode's link count based on on-disk info 260 * @inode: The inode in question 261 * @nlink: The link count 262 * 263 * If the link count has hit zero, it must never be raised, whatever the 264 * on-disk inode might say. When new struct inodes are created the link 265 * count is set to 1, so that we can safely use this test even when reading 266 * in on disk information for the first time. 267 */ 268 269 static void gfs2_set_nlink(struct inode *inode, u32 nlink) 270 { 271 /* 272 * We will need to review setting the nlink count here in the 273 * light of the forthcoming ro bind mount work. This is a reminder 274 * to do that. 275 */ 276 if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) { 277 if (nlink == 0) 278 clear_nlink(inode); 279 else 280 inode->i_nlink = nlink; 281 } 282 } 283 284 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) 285 { 286 const struct gfs2_dinode *str = buf; 287 struct timespec atime; 288 u16 height, depth; 289 290 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) 291 goto corrupt; 292 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); 293 ip->i_inode.i_mode = be32_to_cpu(str->di_mode); 294 ip->i_inode.i_rdev = 0; 295 switch (ip->i_inode.i_mode & S_IFMT) { 296 case S_IFBLK: 297 case S_IFCHR: 298 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major), 299 be32_to_cpu(str->di_minor)); 300 break; 301 }; 302 303 ip->i_inode.i_uid = be32_to_cpu(str->di_uid); 304 ip->i_inode.i_gid = be32_to_cpu(str->di_gid); 305 gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink)); 306 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size)); 307 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); 308 atime.tv_sec = be64_to_cpu(str->di_atime); 309 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); 310 if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0) 311 ip->i_inode.i_atime = atime; 312 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); 313 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec); 314 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); 315 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); 316 317 ip->i_goal = be64_to_cpu(str->di_goal_meta); 318 ip->i_generation = be64_to_cpu(str->di_generation); 319 320 ip->i_diskflags = be32_to_cpu(str->di_flags); 321 ip->i_eattr = be64_to_cpu(str->di_eattr); 322 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ 323 gfs2_set_inode_flags(&ip->i_inode); 324 height = be16_to_cpu(str->di_height); 325 if (unlikely(height > GFS2_MAX_META_HEIGHT)) 326 goto corrupt; 327 ip->i_height = (u8)height; 328 329 depth = be16_to_cpu(str->di_depth); 330 if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) 331 goto corrupt; 332 ip->i_depth = (u8)depth; 333 ip->i_entries = be32_to_cpu(str->di_entries); 334 335 if (S_ISREG(ip->i_inode.i_mode)) 336 gfs2_set_aops(&ip->i_inode); 337 338 return 0; 339 corrupt: 340 gfs2_consist_inode(ip); 341 return -EIO; 342 } 343 344 /** 345 * gfs2_inode_refresh - Refresh the incore copy of the dinode 346 * @ip: The GFS2 inode 347 * 348 * Returns: errno 349 */ 350 351 int gfs2_inode_refresh(struct gfs2_inode *ip) 352 { 353 struct buffer_head *dibh; 354 int error; 355 356 error = gfs2_meta_inode_buffer(ip, &dibh); 357 if (error) 358 return error; 359 360 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) { 361 brelse(dibh); 362 return -EIO; 363 } 364 365 error = gfs2_dinode_in(ip, dibh->b_data); 366 brelse(dibh); 367 clear_bit(GIF_INVALID, &ip->i_flags); 368 369 return error; 370 } 371 372 /** 373 * inode_go_lock - operation done after an inode lock is locked by a process 374 * @gl: the glock 375 * @flags: 376 * 377 * Returns: errno 378 */ 379 380 static int inode_go_lock(struct gfs2_holder *gh) 381 { 382 struct gfs2_glock *gl = gh->gh_gl; 383 struct gfs2_sbd *sdp = gl->gl_sbd; 384 struct gfs2_inode *ip = gl->gl_object; 385 int error = 0; 386 387 if (!ip || (gh->gh_flags & GL_SKIP)) 388 return 0; 389 390 if (test_bit(GIF_INVALID, &ip->i_flags)) { 391 error = gfs2_inode_refresh(ip); 392 if (error) 393 return error; 394 } 395 396 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && 397 (gl->gl_state == LM_ST_EXCLUSIVE) && 398 (gh->gh_state == LM_ST_EXCLUSIVE)) { 399 spin_lock(&sdp->sd_trunc_lock); 400 if (list_empty(&ip->i_trunc_list)) 401 list_add(&sdp->sd_trunc_list, &ip->i_trunc_list); 402 spin_unlock(&sdp->sd_trunc_lock); 403 wake_up(&sdp->sd_quota_wait); 404 return 1; 405 } 406 407 return error; 408 } 409 410 /** 411 * inode_go_dump - print information about an inode 412 * @seq: The iterator 413 * @ip: the inode 414 * 415 * Returns: 0 on success, -ENOBUFS when we run out of space 416 */ 417 418 static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) 419 { 420 const struct gfs2_inode *ip = gl->gl_object; 421 if (ip == NULL) 422 return 0; 423 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n", 424 (unsigned long long)ip->i_no_formal_ino, 425 (unsigned long long)ip->i_no_addr, 426 IF2DT(ip->i_inode.i_mode), ip->i_flags, 427 (unsigned int)ip->i_diskflags, 428 (unsigned long long)i_size_read(&ip->i_inode)); 429 return 0; 430 } 431 432 /** 433 * rgrp_go_lock - operation done after an rgrp lock is locked by 434 * a first holder on this node. 435 * @gl: the glock 436 * @flags: 437 * 438 * Returns: errno 439 */ 440 441 static int rgrp_go_lock(struct gfs2_holder *gh) 442 { 443 return gfs2_rgrp_bh_get(gh->gh_gl->gl_object); 444 } 445 446 /** 447 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by 448 * a last holder on this node. 449 * @gl: the glock 450 * @flags: 451 * 452 */ 453 454 static void rgrp_go_unlock(struct gfs2_holder *gh) 455 { 456 gfs2_rgrp_bh_put(gh->gh_gl->gl_object); 457 } 458 459 /** 460 * trans_go_sync - promote/demote the transaction glock 461 * @gl: the glock 462 * @state: the requested state 463 * @flags: 464 * 465 */ 466 467 static void trans_go_sync(struct gfs2_glock *gl) 468 { 469 struct gfs2_sbd *sdp = gl->gl_sbd; 470 471 if (gl->gl_state != LM_ST_UNLOCKED && 472 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 473 gfs2_meta_syncfs(sdp); 474 gfs2_log_shutdown(sdp); 475 } 476 } 477 478 /** 479 * trans_go_xmote_bh - After promoting/demoting the transaction glock 480 * @gl: the glock 481 * 482 */ 483 484 static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) 485 { 486 struct gfs2_sbd *sdp = gl->gl_sbd; 487 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 488 struct gfs2_glock *j_gl = ip->i_gl; 489 struct gfs2_log_header_host head; 490 int error; 491 492 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 493 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 494 495 error = gfs2_find_jhead(sdp->sd_jdesc, &head); 496 if (error) 497 gfs2_consist(sdp); 498 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) 499 gfs2_consist(sdp); 500 501 /* Initialize some head of the log stuff */ 502 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { 503 sdp->sd_log_sequence = head.lh_sequence + 1; 504 gfs2_log_pointers_init(sdp, head.lh_blkno); 505 } 506 } 507 return 0; 508 } 509 510 /** 511 * trans_go_demote_ok 512 * @gl: the glock 513 * 514 * Always returns 0 515 */ 516 517 static int trans_go_demote_ok(const struct gfs2_glock *gl) 518 { 519 return 0; 520 } 521 522 /** 523 * iopen_go_callback - schedule the dcache entry for the inode to be deleted 524 * @gl: the glock 525 * 526 * gl_spin lock is held while calling this 527 */ 528 static void iopen_go_callback(struct gfs2_glock *gl) 529 { 530 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; 531 struct gfs2_sbd *sdp = gl->gl_sbd; 532 533 if (sdp->sd_vfs->s_flags & MS_RDONLY) 534 return; 535 536 if (gl->gl_demote_state == LM_ST_UNLOCKED && 537 gl->gl_state == LM_ST_SHARED && ip) { 538 gfs2_glock_hold(gl); 539 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) 540 gfs2_glock_put_nolock(gl); 541 } 542 } 543 544 const struct gfs2_glock_operations gfs2_meta_glops = { 545 .go_type = LM_TYPE_META, 546 }; 547 548 const struct gfs2_glock_operations gfs2_inode_glops = { 549 .go_xmote_th = inode_go_sync, 550 .go_inval = inode_go_inval, 551 .go_demote_ok = inode_go_demote_ok, 552 .go_lock = inode_go_lock, 553 .go_dump = inode_go_dump, 554 .go_type = LM_TYPE_INODE, 555 .go_flags = GLOF_ASPACE, 556 }; 557 558 const struct gfs2_glock_operations gfs2_rgrp_glops = { 559 .go_xmote_th = rgrp_go_sync, 560 .go_inval = rgrp_go_inval, 561 .go_lock = rgrp_go_lock, 562 .go_unlock = rgrp_go_unlock, 563 .go_dump = gfs2_rgrp_dump, 564 .go_type = LM_TYPE_RGRP, 565 .go_flags = GLOF_ASPACE, 566 }; 567 568 const struct gfs2_glock_operations gfs2_trans_glops = { 569 .go_xmote_th = trans_go_sync, 570 .go_xmote_bh = trans_go_xmote_bh, 571 .go_demote_ok = trans_go_demote_ok, 572 .go_type = LM_TYPE_NONDISK, 573 }; 574 575 const struct gfs2_glock_operations gfs2_iopen_glops = { 576 .go_type = LM_TYPE_IOPEN, 577 .go_callback = iopen_go_callback, 578 }; 579 580 const struct gfs2_glock_operations gfs2_flock_glops = { 581 .go_type = LM_TYPE_FLOCK, 582 }; 583 584 const struct gfs2_glock_operations gfs2_nondisk_glops = { 585 .go_type = LM_TYPE_NONDISK, 586 }; 587 588 const struct gfs2_glock_operations gfs2_quota_glops = { 589 .go_type = LM_TYPE_QUOTA, 590 }; 591 592 const struct gfs2_glock_operations gfs2_journal_glops = { 593 .go_type = LM_TYPE_JOURNAL, 594 }; 595 596 const struct gfs2_glock_operations *gfs2_glops_list[] = { 597 [LM_TYPE_META] = &gfs2_meta_glops, 598 [LM_TYPE_INODE] = &gfs2_inode_glops, 599 [LM_TYPE_RGRP] = &gfs2_rgrp_glops, 600 [LM_TYPE_IOPEN] = &gfs2_iopen_glops, 601 [LM_TYPE_FLOCK] = &gfs2_flock_glops, 602 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops, 603 [LM_TYPE_QUOTA] = &gfs2_quota_glops, 604 [LM_TYPE_JOURNAL] = &gfs2_journal_glops, 605 }; 606 607