1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_shared.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_mount.h" 25 #include "xfs_defer.h" 26 #include "xfs_inode.h" 27 #include "xfs_error.h" 28 #include "xfs_cksum.h" 29 #include "xfs_icache.h" 30 #include "xfs_trans.h" 31 #include "xfs_ialloc.h" 32 #include "xfs_dir2.h" 33 34 /* 35 * Check that none of the inode's in the buffer have a next 36 * unlinked field of 0. 37 */ 38 #if defined(DEBUG) 39 void 40 xfs_inobp_check( 41 xfs_mount_t *mp, 42 xfs_buf_t *bp) 43 { 44 int i; 45 int j; 46 xfs_dinode_t *dip; 47 48 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog; 49 50 for (i = 0; i < j; i++) { 51 dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize); 52 if (!dip->di_next_unlinked) { 53 xfs_alert(mp, 54 "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.", 55 i, (long long)bp->b_bn); 56 } 57 } 58 } 59 #endif 60 61 bool 62 xfs_dinode_good_version( 63 struct xfs_mount *mp, 64 __u8 version) 65 { 66 if (xfs_sb_version_hascrc(&mp->m_sb)) 67 return version == 3; 68 69 return version == 1 || version == 2; 70 } 71 72 /* 73 * If we are doing readahead on an inode buffer, we might be in log recovery 74 * reading an inode allocation buffer that hasn't yet been replayed, and hence 75 * has not had the inode cores stamped into it. Hence for readahead, the buffer 76 * may be potentially invalid. 77 * 78 * If the readahead buffer is invalid, we need to mark it with an error and 79 * clear the DONE status of the buffer so that a followup read will re-read it 80 * from disk. We don't report the error otherwise to avoid warnings during log 81 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here 82 * because all we want to do is say readahead failed; there is no-one to report 83 * the error to, so this will distinguish it from a non-ra verifier failure. 84 * Changes to this readahead error behavour also need to be reflected in 85 * xfs_dquot_buf_readahead_verify(). 86 */ 87 static void 88 xfs_inode_buf_verify( 89 struct xfs_buf *bp, 90 bool readahead) 91 { 92 struct xfs_mount *mp = bp->b_target->bt_mount; 93 int i; 94 int ni; 95 96 /* 97 * Validate the magic number and version of every inode in the buffer 98 */ 99 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock; 100 for (i = 0; i < ni; i++) { 101 int di_ok; 102 xfs_dinode_t *dip; 103 104 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog)); 105 di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) && 106 xfs_dinode_good_version(mp, dip->di_version); 107 if (unlikely(XFS_TEST_ERROR(!di_ok, mp, 108 XFS_ERRTAG_ITOBP_INOTOBP))) { 109 if (readahead) { 110 bp->b_flags &= ~XBF_DONE; 111 xfs_buf_ioerror(bp, -EIO); 112 return; 113 } 114 115 xfs_buf_ioerror(bp, -EFSCORRUPTED); 116 xfs_verifier_error(bp); 117 #ifdef DEBUG 118 xfs_alert(mp, 119 "bad inode magic/vsn daddr %lld #%d (magic=%x)", 120 (unsigned long long)bp->b_bn, i, 121 be16_to_cpu(dip->di_magic)); 122 #endif 123 } 124 } 125 xfs_inobp_check(mp, bp); 126 } 127 128 129 static void 130 xfs_inode_buf_read_verify( 131 struct xfs_buf *bp) 132 { 133 xfs_inode_buf_verify(bp, false); 134 } 135 136 static void 137 xfs_inode_buf_readahead_verify( 138 struct xfs_buf *bp) 139 { 140 xfs_inode_buf_verify(bp, true); 141 } 142 143 static void 144 xfs_inode_buf_write_verify( 145 struct xfs_buf *bp) 146 { 147 xfs_inode_buf_verify(bp, false); 148 } 149 150 const struct xfs_buf_ops xfs_inode_buf_ops = { 151 .name = "xfs_inode", 152 .verify_read = xfs_inode_buf_read_verify, 153 .verify_write = xfs_inode_buf_write_verify, 154 }; 155 156 const struct xfs_buf_ops xfs_inode_buf_ra_ops = { 157 .name = "xxfs_inode_ra", 158 .verify_read = xfs_inode_buf_readahead_verify, 159 .verify_write = xfs_inode_buf_write_verify, 160 }; 161 162 163 /* 164 * This routine is called to map an inode to the buffer containing the on-disk 165 * version of the inode. It returns a pointer to the buffer containing the 166 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a 167 * pointer to the on-disk inode within that buffer. 168 * 169 * If a non-zero error is returned, then the contents of bpp and dipp are 170 * undefined. 171 */ 172 int 173 xfs_imap_to_bp( 174 struct xfs_mount *mp, 175 struct xfs_trans *tp, 176 struct xfs_imap *imap, 177 struct xfs_dinode **dipp, 178 struct xfs_buf **bpp, 179 uint buf_flags, 180 uint iget_flags) 181 { 182 struct xfs_buf *bp; 183 int error; 184 185 buf_flags |= XBF_UNMAPPED; 186 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno, 187 (int)imap->im_len, buf_flags, &bp, 188 &xfs_inode_buf_ops); 189 if (error) { 190 if (error == -EAGAIN) { 191 ASSERT(buf_flags & XBF_TRYLOCK); 192 return error; 193 } 194 195 if (error == -EFSCORRUPTED && 196 (iget_flags & XFS_IGET_UNTRUSTED)) 197 return -EINVAL; 198 199 xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.", 200 __func__, error); 201 return error; 202 } 203 204 *bpp = bp; 205 *dipp = xfs_buf_offset(bp, imap->im_boffset); 206 return 0; 207 } 208 209 void 210 xfs_inode_from_disk( 211 struct xfs_inode *ip, 212 struct xfs_dinode *from) 213 { 214 struct xfs_icdinode *to = &ip->i_d; 215 struct inode *inode = VFS_I(ip); 216 217 218 /* 219 * Convert v1 inodes immediately to v2 inode format as this is the 220 * minimum inode version format we support in the rest of the code. 221 */ 222 to->di_version = from->di_version; 223 if (to->di_version == 1) { 224 set_nlink(inode, be16_to_cpu(from->di_onlink)); 225 to->di_projid_lo = 0; 226 to->di_projid_hi = 0; 227 to->di_version = 2; 228 } else { 229 set_nlink(inode, be32_to_cpu(from->di_nlink)); 230 to->di_projid_lo = be16_to_cpu(from->di_projid_lo); 231 to->di_projid_hi = be16_to_cpu(from->di_projid_hi); 232 } 233 234 to->di_format = from->di_format; 235 to->di_uid = be32_to_cpu(from->di_uid); 236 to->di_gid = be32_to_cpu(from->di_gid); 237 to->di_flushiter = be16_to_cpu(from->di_flushiter); 238 239 /* 240 * Time is signed, so need to convert to signed 32 bit before 241 * storing in inode timestamp which may be 64 bit. Otherwise 242 * a time before epoch is converted to a time long after epoch 243 * on 64 bit systems. 244 */ 245 inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec); 246 inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec); 247 inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec); 248 inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec); 249 inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec); 250 inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec); 251 inode->i_generation = be32_to_cpu(from->di_gen); 252 inode->i_mode = be16_to_cpu(from->di_mode); 253 254 to->di_size = be64_to_cpu(from->di_size); 255 to->di_nblocks = be64_to_cpu(from->di_nblocks); 256 to->di_extsize = be32_to_cpu(from->di_extsize); 257 to->di_nextents = be32_to_cpu(from->di_nextents); 258 to->di_anextents = be16_to_cpu(from->di_anextents); 259 to->di_forkoff = from->di_forkoff; 260 to->di_aformat = from->di_aformat; 261 to->di_dmevmask = be32_to_cpu(from->di_dmevmask); 262 to->di_dmstate = be16_to_cpu(from->di_dmstate); 263 to->di_flags = be16_to_cpu(from->di_flags); 264 265 if (to->di_version == 3) { 266 inode->i_version = be64_to_cpu(from->di_changecount); 267 to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec); 268 to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec); 269 to->di_flags2 = be64_to_cpu(from->di_flags2); 270 to->di_cowextsize = be32_to_cpu(from->di_cowextsize); 271 } 272 } 273 274 void 275 xfs_inode_to_disk( 276 struct xfs_inode *ip, 277 struct xfs_dinode *to, 278 xfs_lsn_t lsn) 279 { 280 struct xfs_icdinode *from = &ip->i_d; 281 struct inode *inode = VFS_I(ip); 282 283 to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); 284 to->di_onlink = 0; 285 286 to->di_version = from->di_version; 287 to->di_format = from->di_format; 288 to->di_uid = cpu_to_be32(from->di_uid); 289 to->di_gid = cpu_to_be32(from->di_gid); 290 to->di_projid_lo = cpu_to_be16(from->di_projid_lo); 291 to->di_projid_hi = cpu_to_be16(from->di_projid_hi); 292 293 memset(to->di_pad, 0, sizeof(to->di_pad)); 294 to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec); 295 to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec); 296 to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec); 297 to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec); 298 to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec); 299 to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec); 300 to->di_nlink = cpu_to_be32(inode->i_nlink); 301 to->di_gen = cpu_to_be32(inode->i_generation); 302 to->di_mode = cpu_to_be16(inode->i_mode); 303 304 to->di_size = cpu_to_be64(from->di_size); 305 to->di_nblocks = cpu_to_be64(from->di_nblocks); 306 to->di_extsize = cpu_to_be32(from->di_extsize); 307 to->di_nextents = cpu_to_be32(from->di_nextents); 308 to->di_anextents = cpu_to_be16(from->di_anextents); 309 to->di_forkoff = from->di_forkoff; 310 to->di_aformat = from->di_aformat; 311 to->di_dmevmask = cpu_to_be32(from->di_dmevmask); 312 to->di_dmstate = cpu_to_be16(from->di_dmstate); 313 to->di_flags = cpu_to_be16(from->di_flags); 314 315 if (from->di_version == 3) { 316 to->di_changecount = cpu_to_be64(inode->i_version); 317 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec); 318 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec); 319 to->di_flags2 = cpu_to_be64(from->di_flags2); 320 to->di_cowextsize = cpu_to_be32(from->di_cowextsize); 321 to->di_ino = cpu_to_be64(ip->i_ino); 322 to->di_lsn = cpu_to_be64(lsn); 323 memset(to->di_pad2, 0, sizeof(to->di_pad2)); 324 uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid); 325 to->di_flushiter = 0; 326 } else { 327 to->di_flushiter = cpu_to_be16(from->di_flushiter); 328 } 329 } 330 331 void 332 xfs_log_dinode_to_disk( 333 struct xfs_log_dinode *from, 334 struct xfs_dinode *to) 335 { 336 to->di_magic = cpu_to_be16(from->di_magic); 337 to->di_mode = cpu_to_be16(from->di_mode); 338 to->di_version = from->di_version; 339 to->di_format = from->di_format; 340 to->di_onlink = 0; 341 to->di_uid = cpu_to_be32(from->di_uid); 342 to->di_gid = cpu_to_be32(from->di_gid); 343 to->di_nlink = cpu_to_be32(from->di_nlink); 344 to->di_projid_lo = cpu_to_be16(from->di_projid_lo); 345 to->di_projid_hi = cpu_to_be16(from->di_projid_hi); 346 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad)); 347 348 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec); 349 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec); 350 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec); 351 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec); 352 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec); 353 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec); 354 355 to->di_size = cpu_to_be64(from->di_size); 356 to->di_nblocks = cpu_to_be64(from->di_nblocks); 357 to->di_extsize = cpu_to_be32(from->di_extsize); 358 to->di_nextents = cpu_to_be32(from->di_nextents); 359 to->di_anextents = cpu_to_be16(from->di_anextents); 360 to->di_forkoff = from->di_forkoff; 361 to->di_aformat = from->di_aformat; 362 to->di_dmevmask = cpu_to_be32(from->di_dmevmask); 363 to->di_dmstate = cpu_to_be16(from->di_dmstate); 364 to->di_flags = cpu_to_be16(from->di_flags); 365 to->di_gen = cpu_to_be32(from->di_gen); 366 367 if (from->di_version == 3) { 368 to->di_changecount = cpu_to_be64(from->di_changecount); 369 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec); 370 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec); 371 to->di_flags2 = cpu_to_be64(from->di_flags2); 372 to->di_cowextsize = cpu_to_be32(from->di_cowextsize); 373 to->di_ino = cpu_to_be64(from->di_ino); 374 to->di_lsn = cpu_to_be64(from->di_lsn); 375 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2)); 376 uuid_copy(&to->di_uuid, &from->di_uuid); 377 to->di_flushiter = 0; 378 } else { 379 to->di_flushiter = cpu_to_be16(from->di_flushiter); 380 } 381 } 382 383 bool 384 xfs_dinode_verify( 385 struct xfs_mount *mp, 386 xfs_ino_t ino, 387 struct xfs_dinode *dip) 388 { 389 uint16_t mode; 390 uint16_t flags; 391 uint64_t flags2; 392 393 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) 394 return false; 395 396 /* don't allow invalid i_size */ 397 if (be64_to_cpu(dip->di_size) & (1ULL << 63)) 398 return false; 399 400 mode = be16_to_cpu(dip->di_mode); 401 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN) 402 return false; 403 404 /* No zero-length symlinks/dirs. */ 405 if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0) 406 return false; 407 408 /* only version 3 or greater inodes are extensively verified here */ 409 if (dip->di_version < 3) 410 return true; 411 412 if (!xfs_sb_version_hascrc(&mp->m_sb)) 413 return false; 414 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize, 415 XFS_DINODE_CRC_OFF)) 416 return false; 417 if (be64_to_cpu(dip->di_ino) != ino) 418 return false; 419 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid)) 420 return false; 421 422 flags = be16_to_cpu(dip->di_flags); 423 flags2 = be64_to_cpu(dip->di_flags2); 424 425 /* don't allow reflink/cowextsize if we don't have reflink */ 426 if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) && 427 !xfs_sb_version_hasreflink(&mp->m_sb)) 428 return false; 429 430 /* don't let reflink and realtime mix */ 431 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME)) 432 return false; 433 434 /* don't let reflink and dax mix */ 435 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX)) 436 return false; 437 438 return true; 439 } 440 441 void 442 xfs_dinode_calc_crc( 443 struct xfs_mount *mp, 444 struct xfs_dinode *dip) 445 { 446 uint32_t crc; 447 448 if (dip->di_version < 3) 449 return; 450 451 ASSERT(xfs_sb_version_hascrc(&mp->m_sb)); 452 crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize, 453 XFS_DINODE_CRC_OFF); 454 dip->di_crc = xfs_end_cksum(crc); 455 } 456 457 /* 458 * Read the disk inode attributes into the in-core inode structure. 459 * 460 * For version 5 superblocks, if we are initialising a new inode and we are not 461 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new 462 * inode core with a random generation number. If we are keeping inodes around, 463 * we need to read the inode cluster to get the existing generation number off 464 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode 465 * format) then log recovery is dependent on the di_flushiter field being 466 * initialised from the current on-disk value and hence we must also read the 467 * inode off disk. 468 */ 469 int 470 xfs_iread( 471 xfs_mount_t *mp, 472 xfs_trans_t *tp, 473 xfs_inode_t *ip, 474 uint iget_flags) 475 { 476 xfs_buf_t *bp; 477 xfs_dinode_t *dip; 478 int error; 479 480 /* 481 * Fill in the location information in the in-core inode. 482 */ 483 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); 484 if (error) 485 return error; 486 487 /* shortcut IO on inode allocation if possible */ 488 if ((iget_flags & XFS_IGET_CREATE) && 489 xfs_sb_version_hascrc(&mp->m_sb) && 490 !(mp->m_flags & XFS_MOUNT_IKEEP)) { 491 /* initialise the on-disk inode core */ 492 memset(&ip->i_d, 0, sizeof(ip->i_d)); 493 VFS_I(ip)->i_generation = prandom_u32(); 494 if (xfs_sb_version_hascrc(&mp->m_sb)) 495 ip->i_d.di_version = 3; 496 else 497 ip->i_d.di_version = 2; 498 return 0; 499 } 500 501 /* 502 * Get pointers to the on-disk inode and the buffer containing it. 503 */ 504 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags); 505 if (error) 506 return error; 507 508 /* even unallocated inodes are verified */ 509 if (!xfs_dinode_verify(mp, ip->i_ino, dip)) { 510 xfs_alert(mp, "%s: validation failed for inode %lld", 511 __func__, ip->i_ino); 512 513 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip); 514 error = -EFSCORRUPTED; 515 goto out_brelse; 516 } 517 518 /* 519 * If the on-disk inode is already linked to a directory 520 * entry, copy all of the inode into the in-core inode. 521 * xfs_iformat_fork() handles copying in the inode format 522 * specific information. 523 * Otherwise, just get the truly permanent information. 524 */ 525 if (dip->di_mode) { 526 xfs_inode_from_disk(ip, dip); 527 error = xfs_iformat_fork(ip, dip); 528 if (error) { 529 #ifdef DEBUG 530 xfs_alert(mp, "%s: xfs_iformat() returned error %d", 531 __func__, error); 532 #endif /* DEBUG */ 533 goto out_brelse; 534 } 535 } else { 536 /* 537 * Partial initialisation of the in-core inode. Just the bits 538 * that xfs_ialloc won't overwrite or relies on being correct. 539 */ 540 ip->i_d.di_version = dip->di_version; 541 VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen); 542 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter); 543 544 /* 545 * Make sure to pull in the mode here as well in 546 * case the inode is released without being used. 547 * This ensures that xfs_inactive() will see that 548 * the inode is already free and not try to mess 549 * with the uninitialized part of it. 550 */ 551 VFS_I(ip)->i_mode = 0; 552 } 553 554 ASSERT(ip->i_d.di_version >= 2); 555 ip->i_delayed_blks = 0; 556 557 /* 558 * Mark the buffer containing the inode as something to keep 559 * around for a while. This helps to keep recently accessed 560 * meta-data in-core longer. 561 */ 562 xfs_buf_set_ref(bp, XFS_INO_REF); 563 564 /* 565 * Use xfs_trans_brelse() to release the buffer containing the on-disk 566 * inode, because it was acquired with xfs_trans_read_buf() in 567 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal 568 * brelse(). If we're within a transaction, then xfs_trans_brelse() 569 * will only release the buffer if it is not dirty within the 570 * transaction. It will be OK to release the buffer in this case, 571 * because inodes on disk are never destroyed and we will be locking the 572 * new in-core inode before putting it in the cache where other 573 * processes can find it. Thus we don't have to worry about the inode 574 * being changed just because we released the buffer. 575 */ 576 out_brelse: 577 xfs_trans_brelse(tp, bp); 578 return error; 579 } 580