1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2016 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_bit.h" 12 #include "xfs_shared.h" 13 #include "xfs_mount.h" 14 #include "xfs_defer.h" 15 #include "xfs_inode.h" 16 #include "xfs_trans.h" 17 #include "xfs_trans_priv.h" 18 #include "xfs_bmap_item.h" 19 #include "xfs_log.h" 20 #include "xfs_bmap.h" 21 #include "xfs_icache.h" 22 #include "xfs_bmap_btree.h" 23 #include "xfs_trans_space.h" 24 25 26 kmem_zone_t *xfs_bui_zone; 27 kmem_zone_t *xfs_bud_zone; 28 29 static inline struct xfs_bui_log_item *BUI_ITEM(struct xfs_log_item *lip) 30 { 31 return container_of(lip, struct xfs_bui_log_item, bui_item); 32 } 33 34 void 35 xfs_bui_item_free( 36 struct xfs_bui_log_item *buip) 37 { 38 kmem_zone_free(xfs_bui_zone, buip); 39 } 40 41 /* 42 * Freeing the BUI requires that we remove it from the AIL if it has already 43 * been placed there. However, the BUI may not yet have been placed in the AIL 44 * when called by xfs_bui_release() from BUD processing due to the ordering of 45 * committed vs unpin operations in bulk insert operations. Hence the reference 46 * count to ensure only the last caller frees the BUI. 47 */ 48 void 49 xfs_bui_release( 50 struct xfs_bui_log_item *buip) 51 { 52 ASSERT(atomic_read(&buip->bui_refcount) > 0); 53 if (atomic_dec_and_test(&buip->bui_refcount)) { 54 xfs_trans_ail_remove(&buip->bui_item, SHUTDOWN_LOG_IO_ERROR); 55 xfs_bui_item_free(buip); 56 } 57 } 58 59 60 STATIC void 61 xfs_bui_item_size( 62 struct xfs_log_item *lip, 63 int *nvecs, 64 int *nbytes) 65 { 66 struct xfs_bui_log_item *buip = BUI_ITEM(lip); 67 68 *nvecs += 1; 69 *nbytes += xfs_bui_log_format_sizeof(buip->bui_format.bui_nextents); 70 } 71 72 /* 73 * This is called to fill in the vector of log iovecs for the 74 * given bui log item. We use only 1 iovec, and we point that 75 * at the bui_log_format structure embedded in the bui item. 76 * It is at this point that we assert that all of the extent 77 * slots in the bui item have been filled. 78 */ 79 STATIC void 80 xfs_bui_item_format( 81 struct xfs_log_item *lip, 82 struct xfs_log_vec *lv) 83 { 84 struct xfs_bui_log_item *buip = BUI_ITEM(lip); 85 struct xfs_log_iovec *vecp = NULL; 86 87 ASSERT(atomic_read(&buip->bui_next_extent) == 88 buip->bui_format.bui_nextents); 89 90 buip->bui_format.bui_type = XFS_LI_BUI; 91 buip->bui_format.bui_size = 1; 92 93 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_BUI_FORMAT, &buip->bui_format, 94 xfs_bui_log_format_sizeof(buip->bui_format.bui_nextents)); 95 } 96 97 /* 98 * The unpin operation is the last place an BUI is manipulated in the log. It is 99 * either inserted in the AIL or aborted in the event of a log I/O error. In 100 * either case, the BUI transaction has been successfully committed to make it 101 * this far. Therefore, we expect whoever committed the BUI to either construct 102 * and commit the BUD or drop the BUD's reference in the event of error. Simply 103 * drop the log's BUI reference now that the log is done with it. 104 */ 105 STATIC void 106 xfs_bui_item_unpin( 107 struct xfs_log_item *lip, 108 int remove) 109 { 110 struct xfs_bui_log_item *buip = BUI_ITEM(lip); 111 112 xfs_bui_release(buip); 113 } 114 115 /* 116 * The BUI has been either committed or aborted if the transaction has been 117 * cancelled. If the transaction was cancelled, an BUD isn't going to be 118 * constructed and thus we free the BUI here directly. 119 */ 120 STATIC void 121 xfs_bui_item_release( 122 struct xfs_log_item *lip) 123 { 124 xfs_bui_release(BUI_ITEM(lip)); 125 } 126 127 static const struct xfs_item_ops xfs_bui_item_ops = { 128 .iop_size = xfs_bui_item_size, 129 .iop_format = xfs_bui_item_format, 130 .iop_unpin = xfs_bui_item_unpin, 131 .iop_release = xfs_bui_item_release, 132 }; 133 134 /* 135 * Allocate and initialize an bui item with the given number of extents. 136 */ 137 struct xfs_bui_log_item * 138 xfs_bui_init( 139 struct xfs_mount *mp) 140 141 { 142 struct xfs_bui_log_item *buip; 143 144 buip = kmem_zone_zalloc(xfs_bui_zone, KM_SLEEP); 145 146 xfs_log_item_init(mp, &buip->bui_item, XFS_LI_BUI, &xfs_bui_item_ops); 147 buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS; 148 buip->bui_format.bui_id = (uintptr_t)(void *)buip; 149 atomic_set(&buip->bui_next_extent, 0); 150 atomic_set(&buip->bui_refcount, 2); 151 152 return buip; 153 } 154 155 static inline struct xfs_bud_log_item *BUD_ITEM(struct xfs_log_item *lip) 156 { 157 return container_of(lip, struct xfs_bud_log_item, bud_item); 158 } 159 160 STATIC void 161 xfs_bud_item_size( 162 struct xfs_log_item *lip, 163 int *nvecs, 164 int *nbytes) 165 { 166 *nvecs += 1; 167 *nbytes += sizeof(struct xfs_bud_log_format); 168 } 169 170 /* 171 * This is called to fill in the vector of log iovecs for the 172 * given bud log item. We use only 1 iovec, and we point that 173 * at the bud_log_format structure embedded in the bud item. 174 * It is at this point that we assert that all of the extent 175 * slots in the bud item have been filled. 176 */ 177 STATIC void 178 xfs_bud_item_format( 179 struct xfs_log_item *lip, 180 struct xfs_log_vec *lv) 181 { 182 struct xfs_bud_log_item *budp = BUD_ITEM(lip); 183 struct xfs_log_iovec *vecp = NULL; 184 185 budp->bud_format.bud_type = XFS_LI_BUD; 186 budp->bud_format.bud_size = 1; 187 188 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_BUD_FORMAT, &budp->bud_format, 189 sizeof(struct xfs_bud_log_format)); 190 } 191 192 /* 193 * The BUD is either committed or aborted if the transaction is cancelled. If 194 * the transaction is cancelled, drop our reference to the BUI and free the 195 * BUD. 196 */ 197 STATIC void 198 xfs_bud_item_release( 199 struct xfs_log_item *lip) 200 { 201 struct xfs_bud_log_item *budp = BUD_ITEM(lip); 202 203 xfs_bui_release(budp->bud_buip); 204 kmem_zone_free(xfs_bud_zone, budp); 205 } 206 207 static const struct xfs_item_ops xfs_bud_item_ops = { 208 .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED, 209 .iop_size = xfs_bud_item_size, 210 .iop_format = xfs_bud_item_format, 211 .iop_release = xfs_bud_item_release, 212 }; 213 214 static struct xfs_bud_log_item * 215 xfs_trans_get_bud( 216 struct xfs_trans *tp, 217 struct xfs_bui_log_item *buip) 218 { 219 struct xfs_bud_log_item *budp; 220 221 budp = kmem_zone_zalloc(xfs_bud_zone, KM_SLEEP); 222 xfs_log_item_init(tp->t_mountp, &budp->bud_item, XFS_LI_BUD, 223 &xfs_bud_item_ops); 224 budp->bud_buip = buip; 225 budp->bud_format.bud_bui_id = buip->bui_format.bui_id; 226 227 xfs_trans_add_item(tp, &budp->bud_item); 228 return budp; 229 } 230 231 /* 232 * Finish an bmap update and log it to the BUD. Note that the 233 * transaction is marked dirty regardless of whether the bmap update 234 * succeeds or fails to support the BUI/BUD lifecycle rules. 235 */ 236 static int 237 xfs_trans_log_finish_bmap_update( 238 struct xfs_trans *tp, 239 struct xfs_bud_log_item *budp, 240 enum xfs_bmap_intent_type type, 241 struct xfs_inode *ip, 242 int whichfork, 243 xfs_fileoff_t startoff, 244 xfs_fsblock_t startblock, 245 xfs_filblks_t *blockcount, 246 xfs_exntst_t state) 247 { 248 int error; 249 250 error = xfs_bmap_finish_one(tp, ip, type, whichfork, startoff, 251 startblock, blockcount, state); 252 253 /* 254 * Mark the transaction dirty, even on error. This ensures the 255 * transaction is aborted, which: 256 * 257 * 1.) releases the BUI and frees the BUD 258 * 2.) shuts down the filesystem 259 */ 260 tp->t_flags |= XFS_TRANS_DIRTY; 261 set_bit(XFS_LI_DIRTY, &budp->bud_item.li_flags); 262 263 return error; 264 } 265 266 /* Sort bmap intents by inode. */ 267 static int 268 xfs_bmap_update_diff_items( 269 void *priv, 270 struct list_head *a, 271 struct list_head *b) 272 { 273 struct xfs_bmap_intent *ba; 274 struct xfs_bmap_intent *bb; 275 276 ba = container_of(a, struct xfs_bmap_intent, bi_list); 277 bb = container_of(b, struct xfs_bmap_intent, bi_list); 278 return ba->bi_owner->i_ino - bb->bi_owner->i_ino; 279 } 280 281 /* Get an BUI. */ 282 STATIC void * 283 xfs_bmap_update_create_intent( 284 struct xfs_trans *tp, 285 unsigned int count) 286 { 287 struct xfs_bui_log_item *buip; 288 289 ASSERT(count == XFS_BUI_MAX_FAST_EXTENTS); 290 ASSERT(tp != NULL); 291 292 buip = xfs_bui_init(tp->t_mountp); 293 ASSERT(buip != NULL); 294 295 /* 296 * Get a log_item_desc to point at the new item. 297 */ 298 xfs_trans_add_item(tp, &buip->bui_item); 299 return buip; 300 } 301 302 /* Set the map extent flags for this mapping. */ 303 static void 304 xfs_trans_set_bmap_flags( 305 struct xfs_map_extent *bmap, 306 enum xfs_bmap_intent_type type, 307 int whichfork, 308 xfs_exntst_t state) 309 { 310 bmap->me_flags = 0; 311 switch (type) { 312 case XFS_BMAP_MAP: 313 case XFS_BMAP_UNMAP: 314 bmap->me_flags = type; 315 break; 316 default: 317 ASSERT(0); 318 } 319 if (state == XFS_EXT_UNWRITTEN) 320 bmap->me_flags |= XFS_BMAP_EXTENT_UNWRITTEN; 321 if (whichfork == XFS_ATTR_FORK) 322 bmap->me_flags |= XFS_BMAP_EXTENT_ATTR_FORK; 323 } 324 325 /* Log bmap updates in the intent item. */ 326 STATIC void 327 xfs_bmap_update_log_item( 328 struct xfs_trans *tp, 329 void *intent, 330 struct list_head *item) 331 { 332 struct xfs_bui_log_item *buip = intent; 333 struct xfs_bmap_intent *bmap; 334 uint next_extent; 335 struct xfs_map_extent *map; 336 337 bmap = container_of(item, struct xfs_bmap_intent, bi_list); 338 339 tp->t_flags |= XFS_TRANS_DIRTY; 340 set_bit(XFS_LI_DIRTY, &buip->bui_item.li_flags); 341 342 /* 343 * atomic_inc_return gives us the value after the increment; 344 * we want to use it as an array index so we need to subtract 1 from 345 * it. 346 */ 347 next_extent = atomic_inc_return(&buip->bui_next_extent) - 1; 348 ASSERT(next_extent < buip->bui_format.bui_nextents); 349 map = &buip->bui_format.bui_extents[next_extent]; 350 map->me_owner = bmap->bi_owner->i_ino; 351 map->me_startblock = bmap->bi_bmap.br_startblock; 352 map->me_startoff = bmap->bi_bmap.br_startoff; 353 map->me_len = bmap->bi_bmap.br_blockcount; 354 xfs_trans_set_bmap_flags(map, bmap->bi_type, bmap->bi_whichfork, 355 bmap->bi_bmap.br_state); 356 } 357 358 /* Get an BUD so we can process all the deferred rmap updates. */ 359 STATIC void * 360 xfs_bmap_update_create_done( 361 struct xfs_trans *tp, 362 void *intent, 363 unsigned int count) 364 { 365 return xfs_trans_get_bud(tp, intent); 366 } 367 368 /* Process a deferred rmap update. */ 369 STATIC int 370 xfs_bmap_update_finish_item( 371 struct xfs_trans *tp, 372 struct list_head *item, 373 void *done_item, 374 void **state) 375 { 376 struct xfs_bmap_intent *bmap; 377 xfs_filblks_t count; 378 int error; 379 380 bmap = container_of(item, struct xfs_bmap_intent, bi_list); 381 count = bmap->bi_bmap.br_blockcount; 382 error = xfs_trans_log_finish_bmap_update(tp, done_item, 383 bmap->bi_type, 384 bmap->bi_owner, bmap->bi_whichfork, 385 bmap->bi_bmap.br_startoff, 386 bmap->bi_bmap.br_startblock, 387 &count, 388 bmap->bi_bmap.br_state); 389 if (!error && count > 0) { 390 ASSERT(bmap->bi_type == XFS_BMAP_UNMAP); 391 bmap->bi_bmap.br_blockcount = count; 392 return -EAGAIN; 393 } 394 kmem_free(bmap); 395 return error; 396 } 397 398 /* Abort all pending BUIs. */ 399 STATIC void 400 xfs_bmap_update_abort_intent( 401 void *intent) 402 { 403 xfs_bui_release(intent); 404 } 405 406 /* Cancel a deferred rmap update. */ 407 STATIC void 408 xfs_bmap_update_cancel_item( 409 struct list_head *item) 410 { 411 struct xfs_bmap_intent *bmap; 412 413 bmap = container_of(item, struct xfs_bmap_intent, bi_list); 414 kmem_free(bmap); 415 } 416 417 const struct xfs_defer_op_type xfs_bmap_update_defer_type = { 418 .max_items = XFS_BUI_MAX_FAST_EXTENTS, 419 .diff_items = xfs_bmap_update_diff_items, 420 .create_intent = xfs_bmap_update_create_intent, 421 .abort_intent = xfs_bmap_update_abort_intent, 422 .log_item = xfs_bmap_update_log_item, 423 .create_done = xfs_bmap_update_create_done, 424 .finish_item = xfs_bmap_update_finish_item, 425 .cancel_item = xfs_bmap_update_cancel_item, 426 }; 427 428 /* 429 * Process a bmap update intent item that was recovered from the log. 430 * We need to update some inode's bmbt. 431 */ 432 int 433 xfs_bui_recover( 434 struct xfs_trans *parent_tp, 435 struct xfs_bui_log_item *buip) 436 { 437 int error = 0; 438 unsigned int bui_type; 439 struct xfs_map_extent *bmap; 440 xfs_fsblock_t startblock_fsb; 441 xfs_fsblock_t inode_fsb; 442 xfs_filblks_t count; 443 bool op_ok; 444 struct xfs_bud_log_item *budp; 445 enum xfs_bmap_intent_type type; 446 int whichfork; 447 xfs_exntst_t state; 448 struct xfs_trans *tp; 449 struct xfs_inode *ip = NULL; 450 struct xfs_bmbt_irec irec; 451 struct xfs_mount *mp = parent_tp->t_mountp; 452 453 ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags)); 454 455 /* Only one mapping operation per BUI... */ 456 if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) { 457 set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); 458 xfs_bui_release(buip); 459 return -EIO; 460 } 461 462 /* 463 * First check the validity of the extent described by the 464 * BUI. If anything is bad, then toss the BUI. 465 */ 466 bmap = &buip->bui_format.bui_extents[0]; 467 startblock_fsb = XFS_BB_TO_FSB(mp, 468 XFS_FSB_TO_DADDR(mp, bmap->me_startblock)); 469 inode_fsb = XFS_BB_TO_FSB(mp, XFS_FSB_TO_DADDR(mp, 470 XFS_INO_TO_FSB(mp, bmap->me_owner))); 471 switch (bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK) { 472 case XFS_BMAP_MAP: 473 case XFS_BMAP_UNMAP: 474 op_ok = true; 475 break; 476 default: 477 op_ok = false; 478 break; 479 } 480 if (!op_ok || startblock_fsb == 0 || 481 bmap->me_len == 0 || 482 inode_fsb == 0 || 483 startblock_fsb >= mp->m_sb.sb_dblocks || 484 bmap->me_len >= mp->m_sb.sb_agblocks || 485 inode_fsb >= mp->m_sb.sb_dblocks || 486 (bmap->me_flags & ~XFS_BMAP_EXTENT_FLAGS)) { 487 /* 488 * This will pull the BUI from the AIL and 489 * free the memory associated with it. 490 */ 491 set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); 492 xfs_bui_release(buip); 493 return -EIO; 494 } 495 496 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 497 XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp); 498 if (error) 499 return error; 500 /* 501 * Recovery stashes all deferred ops during intent processing and 502 * finishes them on completion. Transfer current dfops state to this 503 * transaction and transfer the result back before we return. 504 */ 505 xfs_defer_move(tp, parent_tp); 506 budp = xfs_trans_get_bud(tp, buip); 507 508 /* Grab the inode. */ 509 error = xfs_iget(mp, tp, bmap->me_owner, 0, XFS_ILOCK_EXCL, &ip); 510 if (error) 511 goto err_inode; 512 513 if (VFS_I(ip)->i_nlink == 0) 514 xfs_iflags_set(ip, XFS_IRECOVERY); 515 516 /* Process deferred bmap item. */ 517 state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ? 518 XFS_EXT_UNWRITTEN : XFS_EXT_NORM; 519 whichfork = (bmap->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ? 520 XFS_ATTR_FORK : XFS_DATA_FORK; 521 bui_type = bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK; 522 switch (bui_type) { 523 case XFS_BMAP_MAP: 524 case XFS_BMAP_UNMAP: 525 type = bui_type; 526 break; 527 default: 528 error = -EFSCORRUPTED; 529 goto err_inode; 530 } 531 xfs_trans_ijoin(tp, ip, 0); 532 533 count = bmap->me_len; 534 error = xfs_trans_log_finish_bmap_update(tp, budp, type, ip, whichfork, 535 bmap->me_startoff, bmap->me_startblock, &count, state); 536 if (error) 537 goto err_inode; 538 539 if (count > 0) { 540 ASSERT(type == XFS_BMAP_UNMAP); 541 irec.br_startblock = bmap->me_startblock; 542 irec.br_blockcount = count; 543 irec.br_startoff = bmap->me_startoff; 544 irec.br_state = state; 545 error = xfs_bmap_unmap_extent(tp, ip, &irec); 546 if (error) 547 goto err_inode; 548 } 549 550 set_bit(XFS_BUI_RECOVERED, &buip->bui_flags); 551 xfs_defer_move(parent_tp, tp); 552 error = xfs_trans_commit(tp); 553 xfs_iunlock(ip, XFS_ILOCK_EXCL); 554 xfs_irele(ip); 555 556 return error; 557 558 err_inode: 559 xfs_defer_move(parent_tp, tp); 560 xfs_trans_cancel(tp); 561 if (ip) { 562 xfs_iunlock(ip, XFS_ILOCK_EXCL); 563 xfs_irele(ip); 564 } 565 return error; 566 } 567