1 /* 2 * Copyright (C) 2017 Oracle. All Rights Reserved. 3 * 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 * 11 * This program is distributed in the hope that it would be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. 19 */ 20 #include "xfs.h" 21 #include "xfs_fs.h" 22 #include "xfs_shared.h" 23 #include "xfs_format.h" 24 #include "xfs_trans_resv.h" 25 #include "xfs_mount.h" 26 #include "xfs_defer.h" 27 #include "xfs_btree.h" 28 #include "xfs_bit.h" 29 #include "xfs_log_format.h" 30 #include "xfs_trans.h" 31 #include "xfs_sb.h" 32 #include "xfs_inode.h" 33 #include "xfs_inode_fork.h" 34 #include "xfs_alloc.h" 35 #include "xfs_rtalloc.h" 36 #include "xfs_bmap.h" 37 #include "xfs_bmap_util.h" 38 #include "xfs_bmap_btree.h" 39 #include "xfs_rmap.h" 40 #include "scrub/xfs_scrub.h" 41 #include "scrub/scrub.h" 42 #include "scrub/common.h" 43 #include "scrub/btree.h" 44 #include "scrub/trace.h" 45 46 /* Set us up with an inode's bmap. */ 47 int 48 xfs_scrub_setup_inode_bmap( 49 struct xfs_scrub_context *sc, 50 struct xfs_inode *ip) 51 { 52 struct xfs_mount *mp = sc->mp; 53 int error; 54 55 error = xfs_scrub_get_inode(sc, ip); 56 if (error) 57 goto out; 58 59 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; 60 xfs_ilock(sc->ip, sc->ilock_flags); 61 62 /* 63 * We don't want any ephemeral data fork updates sitting around 64 * while we inspect block mappings, so wait for directio to finish 65 * and flush dirty data if we have delalloc reservations. 66 */ 67 if (S_ISREG(VFS_I(sc->ip)->i_mode) && 68 sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) { 69 inode_dio_wait(VFS_I(sc->ip)); 70 error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping); 71 if (error) 72 goto out; 73 } 74 75 /* Got the inode, lock it and we're ready to go. */ 76 error = xfs_scrub_trans_alloc(sc->sm, mp, &sc->tp); 77 if (error) 78 goto out; 79 sc->ilock_flags |= XFS_ILOCK_EXCL; 80 xfs_ilock(sc->ip, XFS_ILOCK_EXCL); 81 82 out: 83 /* scrub teardown will unlock and release the inode */ 84 return error; 85 } 86 87 /* 88 * Inode fork block mapping (BMBT) scrubber. 89 * More complex than the others because we have to scrub 90 * all the extents regardless of whether or not the fork 91 * is in btree format. 92 */ 93 94 struct xfs_scrub_bmap_info { 95 struct xfs_scrub_context *sc; 96 xfs_fileoff_t lastoff; 97 bool is_rt; 98 bool is_shared; 99 int whichfork; 100 }; 101 102 /* Scrub a single extent record. */ 103 STATIC int 104 xfs_scrub_bmap_extent( 105 struct xfs_inode *ip, 106 struct xfs_btree_cur *cur, 107 struct xfs_scrub_bmap_info *info, 108 struct xfs_bmbt_irec *irec) 109 { 110 struct xfs_mount *mp = info->sc->mp; 111 struct xfs_buf *bp = NULL; 112 int error = 0; 113 114 if (cur) 115 xfs_btree_get_block(cur, 0, &bp); 116 117 /* 118 * Check for out-of-order extents. This record could have come 119 * from the incore list, for which there is no ordering check. 120 */ 121 if (irec->br_startoff < info->lastoff) 122 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, 123 irec->br_startoff); 124 125 /* There should never be a "hole" extent in either extent list. */ 126 if (irec->br_startblock == HOLESTARTBLOCK) 127 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, 128 irec->br_startoff); 129 130 /* 131 * Check for delalloc extents. We never iterate the ones in the 132 * in-core extent scan, and we should never see these in the bmbt. 133 */ 134 if (isnullstartblock(irec->br_startblock)) 135 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, 136 irec->br_startoff); 137 138 /* Make sure the extent points to a valid place. */ 139 if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock) 140 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, 141 irec->br_startoff); 142 if (info->is_rt && 143 (!xfs_verify_rtbno(mp, irec->br_startblock) || 144 !xfs_verify_rtbno(mp, irec->br_startblock + 145 irec->br_blockcount - 1))) 146 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, 147 irec->br_startoff); 148 if (!info->is_rt && 149 (!xfs_verify_fsbno(mp, irec->br_startblock) || 150 !xfs_verify_fsbno(mp, irec->br_startblock + 151 irec->br_blockcount - 1))) 152 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, 153 irec->br_startoff); 154 155 /* We don't allow unwritten extents on attr forks. */ 156 if (irec->br_state == XFS_EXT_UNWRITTEN && 157 info->whichfork == XFS_ATTR_FORK) 158 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork, 159 irec->br_startoff); 160 161 info->lastoff = irec->br_startoff + irec->br_blockcount; 162 return error; 163 } 164 165 /* Scrub a bmbt record. */ 166 STATIC int 167 xfs_scrub_bmapbt_rec( 168 struct xfs_scrub_btree *bs, 169 union xfs_btree_rec *rec) 170 { 171 struct xfs_bmbt_irec irec; 172 struct xfs_scrub_bmap_info *info = bs->private; 173 struct xfs_inode *ip = bs->cur->bc_private.b.ip; 174 struct xfs_buf *bp = NULL; 175 struct xfs_btree_block *block; 176 uint64_t owner; 177 int i; 178 179 /* 180 * Check the owners of the btree blocks up to the level below 181 * the root since the verifiers don't do that. 182 */ 183 if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) && 184 bs->cur->bc_ptrs[0] == 1) { 185 for (i = 0; i < bs->cur->bc_nlevels - 1; i++) { 186 block = xfs_btree_get_block(bs->cur, i, &bp); 187 owner = be64_to_cpu(block->bb_u.l.bb_owner); 188 if (owner != ip->i_ino) 189 xfs_scrub_fblock_set_corrupt(bs->sc, 190 info->whichfork, 0); 191 } 192 } 193 194 /* Set up the in-core record and scrub it. */ 195 xfs_bmbt_disk_get_all(&rec->bmbt, &irec); 196 return xfs_scrub_bmap_extent(ip, bs->cur, info, &irec); 197 } 198 199 /* Scan the btree records. */ 200 STATIC int 201 xfs_scrub_bmap_btree( 202 struct xfs_scrub_context *sc, 203 int whichfork, 204 struct xfs_scrub_bmap_info *info) 205 { 206 struct xfs_owner_info oinfo; 207 struct xfs_mount *mp = sc->mp; 208 struct xfs_inode *ip = sc->ip; 209 struct xfs_btree_cur *cur; 210 int error; 211 212 cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork); 213 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); 214 error = xfs_scrub_btree(sc, cur, xfs_scrub_bmapbt_rec, &oinfo, info); 215 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : 216 XFS_BTREE_NOERROR); 217 return error; 218 } 219 220 /* 221 * Scrub an inode fork's block mappings. 222 * 223 * First we scan every record in every btree block, if applicable. 224 * Then we unconditionally scan the incore extent cache. 225 */ 226 STATIC int 227 xfs_scrub_bmap( 228 struct xfs_scrub_context *sc, 229 int whichfork) 230 { 231 struct xfs_bmbt_irec irec; 232 struct xfs_scrub_bmap_info info = { NULL }; 233 struct xfs_mount *mp = sc->mp; 234 struct xfs_inode *ip = sc->ip; 235 struct xfs_ifork *ifp; 236 xfs_fileoff_t endoff; 237 struct xfs_iext_cursor icur; 238 bool found; 239 int error = 0; 240 241 ifp = XFS_IFORK_PTR(ip, whichfork); 242 243 info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip); 244 info.whichfork = whichfork; 245 info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip); 246 info.sc = sc; 247 248 switch (whichfork) { 249 case XFS_COW_FORK: 250 /* Non-existent CoW forks are ignorable. */ 251 if (!ifp) 252 goto out; 253 /* No CoW forks on non-reflink inodes/filesystems. */ 254 if (!xfs_is_reflink_inode(ip)) { 255 xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino, NULL); 256 goto out; 257 } 258 break; 259 case XFS_ATTR_FORK: 260 if (!ifp) 261 goto out; 262 if (!xfs_sb_version_hasattr(&mp->m_sb) && 263 !xfs_sb_version_hasattr2(&mp->m_sb)) 264 xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino, NULL); 265 break; 266 default: 267 ASSERT(whichfork == XFS_DATA_FORK); 268 break; 269 } 270 271 /* Check the fork values */ 272 switch (XFS_IFORK_FORMAT(ip, whichfork)) { 273 case XFS_DINODE_FMT_UUID: 274 case XFS_DINODE_FMT_DEV: 275 case XFS_DINODE_FMT_LOCAL: 276 /* No mappings to check. */ 277 goto out; 278 case XFS_DINODE_FMT_EXTENTS: 279 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 280 xfs_scrub_fblock_set_corrupt(sc, whichfork, 0); 281 goto out; 282 } 283 break; 284 case XFS_DINODE_FMT_BTREE: 285 if (whichfork == XFS_COW_FORK) { 286 xfs_scrub_fblock_set_corrupt(sc, whichfork, 0); 287 goto out; 288 } 289 290 error = xfs_scrub_bmap_btree(sc, whichfork, &info); 291 if (error) 292 goto out; 293 break; 294 default: 295 xfs_scrub_fblock_set_corrupt(sc, whichfork, 0); 296 goto out; 297 } 298 299 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 300 goto out; 301 302 /* Now try to scrub the in-memory extent list. */ 303 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 304 error = xfs_iread_extents(sc->tp, ip, whichfork); 305 if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error)) 306 goto out; 307 } 308 309 /* Find the offset of the last extent in the mapping. */ 310 error = xfs_bmap_last_offset(ip, &endoff, whichfork); 311 if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error)) 312 goto out; 313 314 /* Scrub extent records. */ 315 info.lastoff = 0; 316 ifp = XFS_IFORK_PTR(ip, whichfork); 317 for (found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &irec); 318 found != 0; 319 found = xfs_iext_next_extent(ifp, &icur, &irec)) { 320 if (xfs_scrub_should_terminate(sc, &error)) 321 break; 322 if (isnullstartblock(irec.br_startblock)) 323 continue; 324 if (irec.br_startoff >= endoff) { 325 xfs_scrub_fblock_set_corrupt(sc, whichfork, 326 irec.br_startoff); 327 goto out; 328 } 329 error = xfs_scrub_bmap_extent(ip, NULL, &info, &irec); 330 if (error) 331 goto out; 332 } 333 334 out: 335 return error; 336 } 337 338 /* Scrub an inode's data fork. */ 339 int 340 xfs_scrub_bmap_data( 341 struct xfs_scrub_context *sc) 342 { 343 return xfs_scrub_bmap(sc, XFS_DATA_FORK); 344 } 345 346 /* Scrub an inode's attr fork. */ 347 int 348 xfs_scrub_bmap_attr( 349 struct xfs_scrub_context *sc) 350 { 351 return xfs_scrub_bmap(sc, XFS_ATTR_FORK); 352 } 353 354 /* Scrub an inode's CoW fork. */ 355 int 356 xfs_scrub_bmap_cow( 357 struct xfs_scrub_context *sc) 358 { 359 if (!xfs_is_reflink_inode(sc->ip)) 360 return -ENOENT; 361 362 return xfs_scrub_bmap(sc, XFS_COW_FORK); 363 } 364