1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include <linux/backing-dev.h> 8 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_sb.h" 14 #include "xfs_mount.h" 15 #include "xfs_trace.h" 16 #include "xfs_log.h" 17 #include "xfs_errortag.h" 18 #include "xfs_error.h" 19 20 static kmem_zone_t *xfs_buf_zone; 21 22 #define xb_to_gfp(flags) \ 23 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN) 24 25 /* 26 * Locking orders 27 * 28 * xfs_buf_ioacct_inc: 29 * xfs_buf_ioacct_dec: 30 * b_sema (caller holds) 31 * b_lock 32 * 33 * xfs_buf_stale: 34 * b_sema (caller holds) 35 * b_lock 36 * lru_lock 37 * 38 * xfs_buf_rele: 39 * b_lock 40 * pag_buf_lock 41 * lru_lock 42 * 43 * xfs_buftarg_wait_rele 44 * lru_lock 45 * b_lock (trylock due to inversion) 46 * 47 * xfs_buftarg_isolate 48 * lru_lock 49 * b_lock (trylock due to inversion) 50 */ 51 52 static inline int 53 xfs_buf_is_vmapped( 54 struct xfs_buf *bp) 55 { 56 /* 57 * Return true if the buffer is vmapped. 58 * 59 * b_addr is null if the buffer is not mapped, but the code is clever 60 * enough to know it doesn't have to map a single page, so the check has 61 * to be both for b_addr and bp->b_page_count > 1. 62 */ 63 return bp->b_addr && bp->b_page_count > 1; 64 } 65 66 static inline int 67 xfs_buf_vmap_len( 68 struct xfs_buf *bp) 69 { 70 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; 71 } 72 73 /* 74 * Bump the I/O in flight count on the buftarg if we haven't yet done so for 75 * this buffer. The count is incremented once per buffer (per hold cycle) 76 * because the corresponding decrement is deferred to buffer release. Buffers 77 * can undergo I/O multiple times in a hold-release cycle and per buffer I/O 78 * tracking adds unnecessary overhead. This is used for sychronization purposes 79 * with unmount (see xfs_wait_buftarg()), so all we really need is a count of 80 * in-flight buffers. 81 * 82 * Buffers that are never released (e.g., superblock, iclog buffers) must set 83 * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count 84 * never reaches zero and unmount hangs indefinitely. 85 */ 86 static inline void 87 xfs_buf_ioacct_inc( 88 struct xfs_buf *bp) 89 { 90 if (bp->b_flags & XBF_NO_IOACCT) 91 return; 92 93 ASSERT(bp->b_flags & XBF_ASYNC); 94 spin_lock(&bp->b_lock); 95 if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) { 96 bp->b_state |= XFS_BSTATE_IN_FLIGHT; 97 percpu_counter_inc(&bp->b_target->bt_io_count); 98 } 99 spin_unlock(&bp->b_lock); 100 } 101 102 /* 103 * Clear the in-flight state on a buffer about to be released to the LRU or 104 * freed and unaccount from the buftarg. 105 */ 106 static inline void 107 __xfs_buf_ioacct_dec( 108 struct xfs_buf *bp) 109 { 110 lockdep_assert_held(&bp->b_lock); 111 112 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { 113 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; 114 percpu_counter_dec(&bp->b_target->bt_io_count); 115 } 116 } 117 118 static inline void 119 xfs_buf_ioacct_dec( 120 struct xfs_buf *bp) 121 { 122 spin_lock(&bp->b_lock); 123 __xfs_buf_ioacct_dec(bp); 124 spin_unlock(&bp->b_lock); 125 } 126 127 /* 128 * When we mark a buffer stale, we remove the buffer from the LRU and clear the 129 * b_lru_ref count so that the buffer is freed immediately when the buffer 130 * reference count falls to zero. If the buffer is already on the LRU, we need 131 * to remove the reference that LRU holds on the buffer. 132 * 133 * This prevents build-up of stale buffers on the LRU. 134 */ 135 void 136 xfs_buf_stale( 137 struct xfs_buf *bp) 138 { 139 ASSERT(xfs_buf_islocked(bp)); 140 141 bp->b_flags |= XBF_STALE; 142 143 /* 144 * Clear the delwri status so that a delwri queue walker will not 145 * flush this buffer to disk now that it is stale. The delwri queue has 146 * a reference to the buffer, so this is safe to do. 147 */ 148 bp->b_flags &= ~_XBF_DELWRI_Q; 149 150 /* 151 * Once the buffer is marked stale and unlocked, a subsequent lookup 152 * could reset b_flags. There is no guarantee that the buffer is 153 * unaccounted (released to LRU) before that occurs. Drop in-flight 154 * status now to preserve accounting consistency. 155 */ 156 spin_lock(&bp->b_lock); 157 __xfs_buf_ioacct_dec(bp); 158 159 atomic_set(&bp->b_lru_ref, 0); 160 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && 161 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) 162 atomic_dec(&bp->b_hold); 163 164 ASSERT(atomic_read(&bp->b_hold) >= 1); 165 spin_unlock(&bp->b_lock); 166 } 167 168 static int 169 xfs_buf_get_maps( 170 struct xfs_buf *bp, 171 int map_count) 172 { 173 ASSERT(bp->b_maps == NULL); 174 bp->b_map_count = map_count; 175 176 if (map_count == 1) { 177 bp->b_maps = &bp->__b_map; 178 return 0; 179 } 180 181 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), 182 KM_NOFS); 183 if (!bp->b_maps) 184 return -ENOMEM; 185 return 0; 186 } 187 188 /* 189 * Frees b_pages if it was allocated. 190 */ 191 static void 192 xfs_buf_free_maps( 193 struct xfs_buf *bp) 194 { 195 if (bp->b_maps != &bp->__b_map) { 196 kmem_free(bp->b_maps); 197 bp->b_maps = NULL; 198 } 199 } 200 201 static int 202 _xfs_buf_alloc( 203 struct xfs_buftarg *target, 204 struct xfs_buf_map *map, 205 int nmaps, 206 xfs_buf_flags_t flags, 207 struct xfs_buf **bpp) 208 { 209 struct xfs_buf *bp; 210 int error; 211 int i; 212 213 *bpp = NULL; 214 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); 215 if (unlikely(!bp)) 216 return -ENOMEM; 217 218 /* 219 * We don't want certain flags to appear in b_flags unless they are 220 * specifically set by later operations on the buffer. 221 */ 222 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); 223 224 atomic_set(&bp->b_hold, 1); 225 atomic_set(&bp->b_lru_ref, 1); 226 init_completion(&bp->b_iowait); 227 INIT_LIST_HEAD(&bp->b_lru); 228 INIT_LIST_HEAD(&bp->b_list); 229 INIT_LIST_HEAD(&bp->b_li_list); 230 sema_init(&bp->b_sema, 0); /* held, no waiters */ 231 spin_lock_init(&bp->b_lock); 232 bp->b_target = target; 233 bp->b_mount = target->bt_mount; 234 bp->b_flags = flags; 235 236 /* 237 * Set length and io_length to the same value initially. 238 * I/O routines should use io_length, which will be the same in 239 * most cases but may be reset (e.g. XFS recovery). 240 */ 241 error = xfs_buf_get_maps(bp, nmaps); 242 if (error) { 243 kmem_cache_free(xfs_buf_zone, bp); 244 return error; 245 } 246 247 bp->b_bn = map[0].bm_bn; 248 bp->b_length = 0; 249 for (i = 0; i < nmaps; i++) { 250 bp->b_maps[i].bm_bn = map[i].bm_bn; 251 bp->b_maps[i].bm_len = map[i].bm_len; 252 bp->b_length += map[i].bm_len; 253 } 254 255 atomic_set(&bp->b_pin_count, 0); 256 init_waitqueue_head(&bp->b_waiters); 257 258 XFS_STATS_INC(bp->b_mount, xb_create); 259 trace_xfs_buf_init(bp, _RET_IP_); 260 261 *bpp = bp; 262 return 0; 263 } 264 265 /* 266 * Allocate a page array capable of holding a specified number 267 * of pages, and point the page buf at it. 268 */ 269 STATIC int 270 _xfs_buf_get_pages( 271 xfs_buf_t *bp, 272 int page_count) 273 { 274 /* Make sure that we have a page list */ 275 if (bp->b_pages == NULL) { 276 bp->b_page_count = page_count; 277 if (page_count <= XB_PAGES) { 278 bp->b_pages = bp->b_page_array; 279 } else { 280 bp->b_pages = kmem_alloc(sizeof(struct page *) * 281 page_count, KM_NOFS); 282 if (bp->b_pages == NULL) 283 return -ENOMEM; 284 } 285 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); 286 } 287 return 0; 288 } 289 290 /* 291 * Frees b_pages if it was allocated. 292 */ 293 STATIC void 294 _xfs_buf_free_pages( 295 xfs_buf_t *bp) 296 { 297 if (bp->b_pages != bp->b_page_array) { 298 kmem_free(bp->b_pages); 299 bp->b_pages = NULL; 300 } 301 } 302 303 /* 304 * Releases the specified buffer. 305 * 306 * The modification state of any associated pages is left unchanged. 307 * The buffer must not be on any hash - use xfs_buf_rele instead for 308 * hashed and refcounted buffers 309 */ 310 static void 311 xfs_buf_free( 312 xfs_buf_t *bp) 313 { 314 trace_xfs_buf_free(bp, _RET_IP_); 315 316 ASSERT(list_empty(&bp->b_lru)); 317 318 if (bp->b_flags & _XBF_PAGES) { 319 uint i; 320 321 if (xfs_buf_is_vmapped(bp)) 322 vm_unmap_ram(bp->b_addr - bp->b_offset, 323 bp->b_page_count); 324 325 for (i = 0; i < bp->b_page_count; i++) { 326 struct page *page = bp->b_pages[i]; 327 328 __free_page(page); 329 } 330 } else if (bp->b_flags & _XBF_KMEM) 331 kmem_free(bp->b_addr); 332 _xfs_buf_free_pages(bp); 333 xfs_buf_free_maps(bp); 334 kmem_cache_free(xfs_buf_zone, bp); 335 } 336 337 /* 338 * Allocates all the pages for buffer in question and builds it's page list. 339 */ 340 STATIC int 341 xfs_buf_allocate_memory( 342 xfs_buf_t *bp, 343 uint flags) 344 { 345 size_t size; 346 size_t nbytes, offset; 347 gfp_t gfp_mask = xb_to_gfp(flags); 348 unsigned short page_count, i; 349 xfs_off_t start, end; 350 int error; 351 xfs_km_flags_t kmflag_mask = 0; 352 353 /* 354 * assure zeroed buffer for non-read cases. 355 */ 356 if (!(flags & XBF_READ)) { 357 kmflag_mask |= KM_ZERO; 358 gfp_mask |= __GFP_ZERO; 359 } 360 361 /* 362 * for buffers that are contained within a single page, just allocate 363 * the memory from the heap - there's no need for the complexity of 364 * page arrays to keep allocation down to order 0. 365 */ 366 size = BBTOB(bp->b_length); 367 if (size < PAGE_SIZE) { 368 int align_mask = xfs_buftarg_dma_alignment(bp->b_target); 369 bp->b_addr = kmem_alloc_io(size, align_mask, 370 KM_NOFS | kmflag_mask); 371 if (!bp->b_addr) { 372 /* low memory - use alloc_page loop instead */ 373 goto use_alloc_page; 374 } 375 376 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != 377 ((unsigned long)bp->b_addr & PAGE_MASK)) { 378 /* b_addr spans two pages - use alloc_page instead */ 379 kmem_free(bp->b_addr); 380 bp->b_addr = NULL; 381 goto use_alloc_page; 382 } 383 bp->b_offset = offset_in_page(bp->b_addr); 384 bp->b_pages = bp->b_page_array; 385 bp->b_pages[0] = kmem_to_page(bp->b_addr); 386 bp->b_page_count = 1; 387 bp->b_flags |= _XBF_KMEM; 388 return 0; 389 } 390 391 use_alloc_page: 392 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; 393 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) 394 >> PAGE_SHIFT; 395 page_count = end - start; 396 error = _xfs_buf_get_pages(bp, page_count); 397 if (unlikely(error)) 398 return error; 399 400 offset = bp->b_offset; 401 bp->b_flags |= _XBF_PAGES; 402 403 for (i = 0; i < bp->b_page_count; i++) { 404 struct page *page; 405 uint retries = 0; 406 retry: 407 page = alloc_page(gfp_mask); 408 if (unlikely(page == NULL)) { 409 if (flags & XBF_READ_AHEAD) { 410 bp->b_page_count = i; 411 error = -ENOMEM; 412 goto out_free_pages; 413 } 414 415 /* 416 * This could deadlock. 417 * 418 * But until all the XFS lowlevel code is revamped to 419 * handle buffer allocation failures we can't do much. 420 */ 421 if (!(++retries % 100)) 422 xfs_err(NULL, 423 "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)", 424 current->comm, current->pid, 425 __func__, gfp_mask); 426 427 XFS_STATS_INC(bp->b_mount, xb_page_retries); 428 congestion_wait(BLK_RW_ASYNC, HZ/50); 429 goto retry; 430 } 431 432 XFS_STATS_INC(bp->b_mount, xb_page_found); 433 434 nbytes = min_t(size_t, size, PAGE_SIZE - offset); 435 size -= nbytes; 436 bp->b_pages[i] = page; 437 offset = 0; 438 } 439 return 0; 440 441 out_free_pages: 442 for (i = 0; i < bp->b_page_count; i++) 443 __free_page(bp->b_pages[i]); 444 bp->b_flags &= ~_XBF_PAGES; 445 return error; 446 } 447 448 /* 449 * Map buffer into kernel address-space if necessary. 450 */ 451 STATIC int 452 _xfs_buf_map_pages( 453 xfs_buf_t *bp, 454 uint flags) 455 { 456 ASSERT(bp->b_flags & _XBF_PAGES); 457 if (bp->b_page_count == 1) { 458 /* A single page buffer is always mappable */ 459 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 460 } else if (flags & XBF_UNMAPPED) { 461 bp->b_addr = NULL; 462 } else { 463 int retried = 0; 464 unsigned nofs_flag; 465 466 /* 467 * vm_map_ram() will allocate auxiliary structures (e.g. 468 * pagetables) with GFP_KERNEL, yet we are likely to be under 469 * GFP_NOFS context here. Hence we need to tell memory reclaim 470 * that we are in such a context via PF_MEMALLOC_NOFS to prevent 471 * memory reclaim re-entering the filesystem here and 472 * potentially deadlocking. 473 */ 474 nofs_flag = memalloc_nofs_save(); 475 do { 476 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, 477 -1, PAGE_KERNEL); 478 if (bp->b_addr) 479 break; 480 vm_unmap_aliases(); 481 } while (retried++ <= 1); 482 memalloc_nofs_restore(nofs_flag); 483 484 if (!bp->b_addr) 485 return -ENOMEM; 486 bp->b_addr += bp->b_offset; 487 } 488 489 return 0; 490 } 491 492 /* 493 * Finding and Reading Buffers 494 */ 495 static int 496 _xfs_buf_obj_cmp( 497 struct rhashtable_compare_arg *arg, 498 const void *obj) 499 { 500 const struct xfs_buf_map *map = arg->key; 501 const struct xfs_buf *bp = obj; 502 503 /* 504 * The key hashing in the lookup path depends on the key being the 505 * first element of the compare_arg, make sure to assert this. 506 */ 507 BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0); 508 509 if (bp->b_bn != map->bm_bn) 510 return 1; 511 512 if (unlikely(bp->b_length != map->bm_len)) { 513 /* 514 * found a block number match. If the range doesn't 515 * match, the only way this is allowed is if the buffer 516 * in the cache is stale and the transaction that made 517 * it stale has not yet committed. i.e. we are 518 * reallocating a busy extent. Skip this buffer and 519 * continue searching for an exact match. 520 */ 521 ASSERT(bp->b_flags & XBF_STALE); 522 return 1; 523 } 524 return 0; 525 } 526 527 static const struct rhashtable_params xfs_buf_hash_params = { 528 .min_size = 32, /* empty AGs have minimal footprint */ 529 .nelem_hint = 16, 530 .key_len = sizeof(xfs_daddr_t), 531 .key_offset = offsetof(struct xfs_buf, b_bn), 532 .head_offset = offsetof(struct xfs_buf, b_rhash_head), 533 .automatic_shrinking = true, 534 .obj_cmpfn = _xfs_buf_obj_cmp, 535 }; 536 537 int 538 xfs_buf_hash_init( 539 struct xfs_perag *pag) 540 { 541 spin_lock_init(&pag->pag_buf_lock); 542 return rhashtable_init(&pag->pag_buf_hash, &xfs_buf_hash_params); 543 } 544 545 void 546 xfs_buf_hash_destroy( 547 struct xfs_perag *pag) 548 { 549 rhashtable_destroy(&pag->pag_buf_hash); 550 } 551 552 /* 553 * Look up a buffer in the buffer cache and return it referenced and locked 554 * in @found_bp. 555 * 556 * If @new_bp is supplied and we have a lookup miss, insert @new_bp into the 557 * cache. 558 * 559 * If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return 560 * -EAGAIN if we fail to lock it. 561 * 562 * Return values are: 563 * -EFSCORRUPTED if have been supplied with an invalid address 564 * -EAGAIN on trylock failure 565 * -ENOENT if we fail to find a match and @new_bp was NULL 566 * 0, with @found_bp: 567 * - @new_bp if we inserted it into the cache 568 * - the buffer we found and locked. 569 */ 570 static int 571 xfs_buf_find( 572 struct xfs_buftarg *btp, 573 struct xfs_buf_map *map, 574 int nmaps, 575 xfs_buf_flags_t flags, 576 struct xfs_buf *new_bp, 577 struct xfs_buf **found_bp) 578 { 579 struct xfs_perag *pag; 580 xfs_buf_t *bp; 581 struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn }; 582 xfs_daddr_t eofs; 583 int i; 584 585 *found_bp = NULL; 586 587 for (i = 0; i < nmaps; i++) 588 cmap.bm_len += map[i].bm_len; 589 590 /* Check for IOs smaller than the sector size / not sector aligned */ 591 ASSERT(!(BBTOB(cmap.bm_len) < btp->bt_meta_sectorsize)); 592 ASSERT(!(BBTOB(cmap.bm_bn) & (xfs_off_t)btp->bt_meta_sectormask)); 593 594 /* 595 * Corrupted block numbers can get through to here, unfortunately, so we 596 * have to check that the buffer falls within the filesystem bounds. 597 */ 598 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); 599 if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) { 600 xfs_alert(btp->bt_mount, 601 "%s: daddr 0x%llx out of range, EOFS 0x%llx", 602 __func__, cmap.bm_bn, eofs); 603 WARN_ON(1); 604 return -EFSCORRUPTED; 605 } 606 607 pag = xfs_perag_get(btp->bt_mount, 608 xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn)); 609 610 spin_lock(&pag->pag_buf_lock); 611 bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap, 612 xfs_buf_hash_params); 613 if (bp) { 614 atomic_inc(&bp->b_hold); 615 goto found; 616 } 617 618 /* No match found */ 619 if (!new_bp) { 620 XFS_STATS_INC(btp->bt_mount, xb_miss_locked); 621 spin_unlock(&pag->pag_buf_lock); 622 xfs_perag_put(pag); 623 return -ENOENT; 624 } 625 626 /* the buffer keeps the perag reference until it is freed */ 627 new_bp->b_pag = pag; 628 rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head, 629 xfs_buf_hash_params); 630 spin_unlock(&pag->pag_buf_lock); 631 *found_bp = new_bp; 632 return 0; 633 634 found: 635 spin_unlock(&pag->pag_buf_lock); 636 xfs_perag_put(pag); 637 638 if (!xfs_buf_trylock(bp)) { 639 if (flags & XBF_TRYLOCK) { 640 xfs_buf_rele(bp); 641 XFS_STATS_INC(btp->bt_mount, xb_busy_locked); 642 return -EAGAIN; 643 } 644 xfs_buf_lock(bp); 645 XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited); 646 } 647 648 /* 649 * if the buffer is stale, clear all the external state associated with 650 * it. We need to keep flags such as how we allocated the buffer memory 651 * intact here. 652 */ 653 if (bp->b_flags & XBF_STALE) { 654 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); 655 ASSERT(bp->b_iodone == NULL); 656 bp->b_flags &= _XBF_KMEM | _XBF_PAGES; 657 bp->b_ops = NULL; 658 } 659 660 trace_xfs_buf_find(bp, flags, _RET_IP_); 661 XFS_STATS_INC(btp->bt_mount, xb_get_locked); 662 *found_bp = bp; 663 return 0; 664 } 665 666 struct xfs_buf * 667 xfs_buf_incore( 668 struct xfs_buftarg *target, 669 xfs_daddr_t blkno, 670 size_t numblks, 671 xfs_buf_flags_t flags) 672 { 673 struct xfs_buf *bp; 674 int error; 675 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 676 677 error = xfs_buf_find(target, &map, 1, flags, NULL, &bp); 678 if (error) 679 return NULL; 680 return bp; 681 } 682 683 /* 684 * Assembles a buffer covering the specified range. The code is optimised for 685 * cache hits, as metadata intensive workloads will see 3 orders of magnitude 686 * more hits than misses. 687 */ 688 int 689 xfs_buf_get_map( 690 struct xfs_buftarg *target, 691 struct xfs_buf_map *map, 692 int nmaps, 693 xfs_buf_flags_t flags, 694 struct xfs_buf **bpp) 695 { 696 struct xfs_buf *bp; 697 struct xfs_buf *new_bp; 698 int error = 0; 699 700 *bpp = NULL; 701 error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp); 702 if (!error) 703 goto found; 704 if (error != -ENOENT) 705 return error; 706 707 error = _xfs_buf_alloc(target, map, nmaps, flags, &new_bp); 708 if (error) 709 return error; 710 711 error = xfs_buf_allocate_memory(new_bp, flags); 712 if (error) { 713 xfs_buf_free(new_bp); 714 return error; 715 } 716 717 error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp); 718 if (error) { 719 xfs_buf_free(new_bp); 720 return error; 721 } 722 723 if (bp != new_bp) 724 xfs_buf_free(new_bp); 725 726 found: 727 if (!bp->b_addr) { 728 error = _xfs_buf_map_pages(bp, flags); 729 if (unlikely(error)) { 730 xfs_warn_ratelimited(target->bt_mount, 731 "%s: failed to map %u pages", __func__, 732 bp->b_page_count); 733 xfs_buf_relse(bp); 734 return error; 735 } 736 } 737 738 /* 739 * Clear b_error if this is a lookup from a caller that doesn't expect 740 * valid data to be found in the buffer. 741 */ 742 if (!(flags & XBF_READ)) 743 xfs_buf_ioerror(bp, 0); 744 745 XFS_STATS_INC(target->bt_mount, xb_get); 746 trace_xfs_buf_get(bp, flags, _RET_IP_); 747 *bpp = bp; 748 return 0; 749 } 750 751 STATIC int 752 _xfs_buf_read( 753 xfs_buf_t *bp, 754 xfs_buf_flags_t flags) 755 { 756 ASSERT(!(flags & XBF_WRITE)); 757 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); 758 759 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); 760 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); 761 762 return xfs_buf_submit(bp); 763 } 764 765 /* 766 * Reverify a buffer found in cache without an attached ->b_ops. 767 * 768 * If the caller passed an ops structure and the buffer doesn't have ops 769 * assigned, set the ops and use it to verify the contents. If verification 770 * fails, clear XBF_DONE. We assume the buffer has no recorded errors and is 771 * already in XBF_DONE state on entry. 772 * 773 * Under normal operations, every in-core buffer is verified on read I/O 774 * completion. There are two scenarios that can lead to in-core buffers without 775 * an assigned ->b_ops. The first is during log recovery of buffers on a V4 776 * filesystem, though these buffers are purged at the end of recovery. The 777 * other is online repair, which intentionally reads with a NULL buffer ops to 778 * run several verifiers across an in-core buffer in order to establish buffer 779 * type. If repair can't establish that, the buffer will be left in memory 780 * with NULL buffer ops. 781 */ 782 int 783 xfs_buf_reverify( 784 struct xfs_buf *bp, 785 const struct xfs_buf_ops *ops) 786 { 787 ASSERT(bp->b_flags & XBF_DONE); 788 ASSERT(bp->b_error == 0); 789 790 if (!ops || bp->b_ops) 791 return 0; 792 793 bp->b_ops = ops; 794 bp->b_ops->verify_read(bp); 795 if (bp->b_error) 796 bp->b_flags &= ~XBF_DONE; 797 return bp->b_error; 798 } 799 800 int 801 xfs_buf_read_map( 802 struct xfs_buftarg *target, 803 struct xfs_buf_map *map, 804 int nmaps, 805 xfs_buf_flags_t flags, 806 struct xfs_buf **bpp, 807 const struct xfs_buf_ops *ops, 808 xfs_failaddr_t fa) 809 { 810 struct xfs_buf *bp; 811 int error; 812 813 flags |= XBF_READ; 814 *bpp = NULL; 815 816 error = xfs_buf_get_map(target, map, nmaps, flags, &bp); 817 if (error) 818 return error; 819 820 trace_xfs_buf_read(bp, flags, _RET_IP_); 821 822 if (!(bp->b_flags & XBF_DONE)) { 823 /* Initiate the buffer read and wait. */ 824 XFS_STATS_INC(target->bt_mount, xb_get_read); 825 bp->b_ops = ops; 826 error = _xfs_buf_read(bp, flags); 827 828 /* Readahead iodone already dropped the buffer, so exit. */ 829 if (flags & XBF_ASYNC) 830 return 0; 831 } else { 832 /* Buffer already read; all we need to do is check it. */ 833 error = xfs_buf_reverify(bp, ops); 834 835 /* Readahead already finished; drop the buffer and exit. */ 836 if (flags & XBF_ASYNC) { 837 xfs_buf_relse(bp); 838 return 0; 839 } 840 841 /* We do not want read in the flags */ 842 bp->b_flags &= ~XBF_READ; 843 ASSERT(bp->b_ops != NULL || ops == NULL); 844 } 845 846 /* 847 * If we've had a read error, then the contents of the buffer are 848 * invalid and should not be used. To ensure that a followup read tries 849 * to pull the buffer from disk again, we clear the XBF_DONE flag and 850 * mark the buffer stale. This ensures that anyone who has a current 851 * reference to the buffer will interpret it's contents correctly and 852 * future cache lookups will also treat it as an empty, uninitialised 853 * buffer. 854 */ 855 if (error) { 856 if (!XFS_FORCED_SHUTDOWN(target->bt_mount)) 857 xfs_buf_ioerror_alert(bp, fa); 858 859 bp->b_flags &= ~XBF_DONE; 860 xfs_buf_stale(bp); 861 xfs_buf_relse(bp); 862 863 /* bad CRC means corrupted metadata */ 864 if (error == -EFSBADCRC) 865 error = -EFSCORRUPTED; 866 return error; 867 } 868 869 *bpp = bp; 870 return 0; 871 } 872 873 /* 874 * If we are not low on memory then do the readahead in a deadlock 875 * safe manner. 876 */ 877 void 878 xfs_buf_readahead_map( 879 struct xfs_buftarg *target, 880 struct xfs_buf_map *map, 881 int nmaps, 882 const struct xfs_buf_ops *ops) 883 { 884 struct xfs_buf *bp; 885 886 if (bdi_read_congested(target->bt_bdev->bd_bdi)) 887 return; 888 889 xfs_buf_read_map(target, map, nmaps, 890 XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops, 891 __this_address); 892 } 893 894 /* 895 * Read an uncached buffer from disk. Allocates and returns a locked 896 * buffer containing the disk contents or nothing. 897 */ 898 int 899 xfs_buf_read_uncached( 900 struct xfs_buftarg *target, 901 xfs_daddr_t daddr, 902 size_t numblks, 903 int flags, 904 struct xfs_buf **bpp, 905 const struct xfs_buf_ops *ops) 906 { 907 struct xfs_buf *bp; 908 int error; 909 910 *bpp = NULL; 911 912 error = xfs_buf_get_uncached(target, numblks, flags, &bp); 913 if (error) 914 return error; 915 916 /* set up the buffer for a read IO */ 917 ASSERT(bp->b_map_count == 1); 918 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */ 919 bp->b_maps[0].bm_bn = daddr; 920 bp->b_flags |= XBF_READ; 921 bp->b_ops = ops; 922 923 xfs_buf_submit(bp); 924 if (bp->b_error) { 925 error = bp->b_error; 926 xfs_buf_relse(bp); 927 return error; 928 } 929 930 *bpp = bp; 931 return 0; 932 } 933 934 int 935 xfs_buf_get_uncached( 936 struct xfs_buftarg *target, 937 size_t numblks, 938 int flags, 939 struct xfs_buf **bpp) 940 { 941 unsigned long page_count; 942 int error, i; 943 struct xfs_buf *bp; 944 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); 945 946 *bpp = NULL; 947 948 /* flags might contain irrelevant bits, pass only what we care about */ 949 error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp); 950 if (error) 951 goto fail; 952 953 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT; 954 error = _xfs_buf_get_pages(bp, page_count); 955 if (error) 956 goto fail_free_buf; 957 958 for (i = 0; i < page_count; i++) { 959 bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); 960 if (!bp->b_pages[i]) { 961 error = -ENOMEM; 962 goto fail_free_mem; 963 } 964 } 965 bp->b_flags |= _XBF_PAGES; 966 967 error = _xfs_buf_map_pages(bp, 0); 968 if (unlikely(error)) { 969 xfs_warn(target->bt_mount, 970 "%s: failed to map pages", __func__); 971 goto fail_free_mem; 972 } 973 974 trace_xfs_buf_get_uncached(bp, _RET_IP_); 975 *bpp = bp; 976 return 0; 977 978 fail_free_mem: 979 while (--i >= 0) 980 __free_page(bp->b_pages[i]); 981 _xfs_buf_free_pages(bp); 982 fail_free_buf: 983 xfs_buf_free_maps(bp); 984 kmem_cache_free(xfs_buf_zone, bp); 985 fail: 986 return error; 987 } 988 989 /* 990 * Increment reference count on buffer, to hold the buffer concurrently 991 * with another thread which may release (free) the buffer asynchronously. 992 * Must hold the buffer already to call this function. 993 */ 994 void 995 xfs_buf_hold( 996 xfs_buf_t *bp) 997 { 998 trace_xfs_buf_hold(bp, _RET_IP_); 999 atomic_inc(&bp->b_hold); 1000 } 1001 1002 /* 1003 * Release a hold on the specified buffer. If the hold count is 1, the buffer is 1004 * placed on LRU or freed (depending on b_lru_ref). 1005 */ 1006 void 1007 xfs_buf_rele( 1008 xfs_buf_t *bp) 1009 { 1010 struct xfs_perag *pag = bp->b_pag; 1011 bool release; 1012 bool freebuf = false; 1013 1014 trace_xfs_buf_rele(bp, _RET_IP_); 1015 1016 if (!pag) { 1017 ASSERT(list_empty(&bp->b_lru)); 1018 if (atomic_dec_and_test(&bp->b_hold)) { 1019 xfs_buf_ioacct_dec(bp); 1020 xfs_buf_free(bp); 1021 } 1022 return; 1023 } 1024 1025 ASSERT(atomic_read(&bp->b_hold) > 0); 1026 1027 /* 1028 * We grab the b_lock here first to serialise racing xfs_buf_rele() 1029 * calls. The pag_buf_lock being taken on the last reference only 1030 * serialises against racing lookups in xfs_buf_find(). IOWs, the second 1031 * to last reference we drop here is not serialised against the last 1032 * reference until we take bp->b_lock. Hence if we don't grab b_lock 1033 * first, the last "release" reference can win the race to the lock and 1034 * free the buffer before the second-to-last reference is processed, 1035 * leading to a use-after-free scenario. 1036 */ 1037 spin_lock(&bp->b_lock); 1038 release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock); 1039 if (!release) { 1040 /* 1041 * Drop the in-flight state if the buffer is already on the LRU 1042 * and it holds the only reference. This is racy because we 1043 * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT 1044 * ensures the decrement occurs only once per-buf. 1045 */ 1046 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru)) 1047 __xfs_buf_ioacct_dec(bp); 1048 goto out_unlock; 1049 } 1050 1051 /* the last reference has been dropped ... */ 1052 __xfs_buf_ioacct_dec(bp); 1053 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { 1054 /* 1055 * If the buffer is added to the LRU take a new reference to the 1056 * buffer for the LRU and clear the (now stale) dispose list 1057 * state flag 1058 */ 1059 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) { 1060 bp->b_state &= ~XFS_BSTATE_DISPOSE; 1061 atomic_inc(&bp->b_hold); 1062 } 1063 spin_unlock(&pag->pag_buf_lock); 1064 } else { 1065 /* 1066 * most of the time buffers will already be removed from the 1067 * LRU, so optimise that case by checking for the 1068 * XFS_BSTATE_DISPOSE flag indicating the last list the buffer 1069 * was on was the disposal list 1070 */ 1071 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { 1072 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru); 1073 } else { 1074 ASSERT(list_empty(&bp->b_lru)); 1075 } 1076 1077 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); 1078 rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head, 1079 xfs_buf_hash_params); 1080 spin_unlock(&pag->pag_buf_lock); 1081 xfs_perag_put(pag); 1082 freebuf = true; 1083 } 1084 1085 out_unlock: 1086 spin_unlock(&bp->b_lock); 1087 1088 if (freebuf) 1089 xfs_buf_free(bp); 1090 } 1091 1092 1093 /* 1094 * Lock a buffer object, if it is not already locked. 1095 * 1096 * If we come across a stale, pinned, locked buffer, we know that we are 1097 * being asked to lock a buffer that has been reallocated. Because it is 1098 * pinned, we know that the log has not been pushed to disk and hence it 1099 * will still be locked. Rather than continuing to have trylock attempts 1100 * fail until someone else pushes the log, push it ourselves before 1101 * returning. This means that the xfsaild will not get stuck trying 1102 * to push on stale inode buffers. 1103 */ 1104 int 1105 xfs_buf_trylock( 1106 struct xfs_buf *bp) 1107 { 1108 int locked; 1109 1110 locked = down_trylock(&bp->b_sema) == 0; 1111 if (locked) 1112 trace_xfs_buf_trylock(bp, _RET_IP_); 1113 else 1114 trace_xfs_buf_trylock_fail(bp, _RET_IP_); 1115 return locked; 1116 } 1117 1118 /* 1119 * Lock a buffer object. 1120 * 1121 * If we come across a stale, pinned, locked buffer, we know that we 1122 * are being asked to lock a buffer that has been reallocated. Because 1123 * it is pinned, we know that the log has not been pushed to disk and 1124 * hence it will still be locked. Rather than sleeping until someone 1125 * else pushes the log, push it ourselves before trying to get the lock. 1126 */ 1127 void 1128 xfs_buf_lock( 1129 struct xfs_buf *bp) 1130 { 1131 trace_xfs_buf_lock(bp, _RET_IP_); 1132 1133 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) 1134 xfs_log_force(bp->b_mount, 0); 1135 down(&bp->b_sema); 1136 1137 trace_xfs_buf_lock_done(bp, _RET_IP_); 1138 } 1139 1140 void 1141 xfs_buf_unlock( 1142 struct xfs_buf *bp) 1143 { 1144 ASSERT(xfs_buf_islocked(bp)); 1145 1146 up(&bp->b_sema); 1147 trace_xfs_buf_unlock(bp, _RET_IP_); 1148 } 1149 1150 STATIC void 1151 xfs_buf_wait_unpin( 1152 xfs_buf_t *bp) 1153 { 1154 DECLARE_WAITQUEUE (wait, current); 1155 1156 if (atomic_read(&bp->b_pin_count) == 0) 1157 return; 1158 1159 add_wait_queue(&bp->b_waiters, &wait); 1160 for (;;) { 1161 set_current_state(TASK_UNINTERRUPTIBLE); 1162 if (atomic_read(&bp->b_pin_count) == 0) 1163 break; 1164 io_schedule(); 1165 } 1166 remove_wait_queue(&bp->b_waiters, &wait); 1167 set_current_state(TASK_RUNNING); 1168 } 1169 1170 /* 1171 * Buffer Utility Routines 1172 */ 1173 1174 void 1175 xfs_buf_ioend( 1176 struct xfs_buf *bp) 1177 { 1178 bool read = bp->b_flags & XBF_READ; 1179 1180 trace_xfs_buf_iodone(bp, _RET_IP_); 1181 1182 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); 1183 1184 /* 1185 * Pull in IO completion errors now. We are guaranteed to be running 1186 * single threaded, so we don't need the lock to read b_io_error. 1187 */ 1188 if (!bp->b_error && bp->b_io_error) 1189 xfs_buf_ioerror(bp, bp->b_io_error); 1190 1191 /* Only validate buffers that were read without errors */ 1192 if (read && !bp->b_error && bp->b_ops) { 1193 ASSERT(!bp->b_iodone); 1194 bp->b_ops->verify_read(bp); 1195 } 1196 1197 if (!bp->b_error) 1198 bp->b_flags |= XBF_DONE; 1199 1200 if (bp->b_iodone) 1201 (*(bp->b_iodone))(bp); 1202 else if (bp->b_flags & XBF_ASYNC) 1203 xfs_buf_relse(bp); 1204 else 1205 complete(&bp->b_iowait); 1206 } 1207 1208 static void 1209 xfs_buf_ioend_work( 1210 struct work_struct *work) 1211 { 1212 struct xfs_buf *bp = 1213 container_of(work, xfs_buf_t, b_ioend_work); 1214 1215 xfs_buf_ioend(bp); 1216 } 1217 1218 static void 1219 xfs_buf_ioend_async( 1220 struct xfs_buf *bp) 1221 { 1222 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); 1223 queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work); 1224 } 1225 1226 void 1227 __xfs_buf_ioerror( 1228 xfs_buf_t *bp, 1229 int error, 1230 xfs_failaddr_t failaddr) 1231 { 1232 ASSERT(error <= 0 && error >= -1000); 1233 bp->b_error = error; 1234 trace_xfs_buf_ioerror(bp, error, failaddr); 1235 } 1236 1237 void 1238 xfs_buf_ioerror_alert( 1239 struct xfs_buf *bp, 1240 xfs_failaddr_t func) 1241 { 1242 xfs_alert_ratelimited(bp->b_mount, 1243 "metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d", 1244 func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length, 1245 -bp->b_error); 1246 } 1247 1248 int 1249 xfs_bwrite( 1250 struct xfs_buf *bp) 1251 { 1252 int error; 1253 1254 ASSERT(xfs_buf_islocked(bp)); 1255 1256 bp->b_flags |= XBF_WRITE; 1257 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | 1258 XBF_WRITE_FAIL | XBF_DONE); 1259 1260 error = xfs_buf_submit(bp); 1261 if (error) 1262 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR); 1263 return error; 1264 } 1265 1266 static void 1267 xfs_buf_bio_end_io( 1268 struct bio *bio) 1269 { 1270 struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private; 1271 1272 /* 1273 * don't overwrite existing errors - otherwise we can lose errors on 1274 * buffers that require multiple bios to complete. 1275 */ 1276 if (bio->bi_status) { 1277 int error = blk_status_to_errno(bio->bi_status); 1278 1279 cmpxchg(&bp->b_io_error, 0, error); 1280 } 1281 1282 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) 1283 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); 1284 1285 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) 1286 xfs_buf_ioend_async(bp); 1287 bio_put(bio); 1288 } 1289 1290 static void 1291 xfs_buf_ioapply_map( 1292 struct xfs_buf *bp, 1293 int map, 1294 int *buf_offset, 1295 int *count, 1296 int op) 1297 { 1298 int page_index; 1299 int total_nr_pages = bp->b_page_count; 1300 int nr_pages; 1301 struct bio *bio; 1302 sector_t sector = bp->b_maps[map].bm_bn; 1303 int size; 1304 int offset; 1305 1306 /* skip the pages in the buffer before the start offset */ 1307 page_index = 0; 1308 offset = *buf_offset; 1309 while (offset >= PAGE_SIZE) { 1310 page_index++; 1311 offset -= PAGE_SIZE; 1312 } 1313 1314 /* 1315 * Limit the IO size to the length of the current vector, and update the 1316 * remaining IO count for the next time around. 1317 */ 1318 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); 1319 *count -= size; 1320 *buf_offset += size; 1321 1322 next_chunk: 1323 atomic_inc(&bp->b_io_remaining); 1324 nr_pages = min(total_nr_pages, BIO_MAX_PAGES); 1325 1326 bio = bio_alloc(GFP_NOIO, nr_pages); 1327 bio_set_dev(bio, bp->b_target->bt_bdev); 1328 bio->bi_iter.bi_sector = sector; 1329 bio->bi_end_io = xfs_buf_bio_end_io; 1330 bio->bi_private = bp; 1331 bio->bi_opf = op; 1332 1333 for (; size && nr_pages; nr_pages--, page_index++) { 1334 int rbytes, nbytes = PAGE_SIZE - offset; 1335 1336 if (nbytes > size) 1337 nbytes = size; 1338 1339 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, 1340 offset); 1341 if (rbytes < nbytes) 1342 break; 1343 1344 offset = 0; 1345 sector += BTOBB(nbytes); 1346 size -= nbytes; 1347 total_nr_pages--; 1348 } 1349 1350 if (likely(bio->bi_iter.bi_size)) { 1351 if (xfs_buf_is_vmapped(bp)) { 1352 flush_kernel_vmap_range(bp->b_addr, 1353 xfs_buf_vmap_len(bp)); 1354 } 1355 submit_bio(bio); 1356 if (size) 1357 goto next_chunk; 1358 } else { 1359 /* 1360 * This is guaranteed not to be the last io reference count 1361 * because the caller (xfs_buf_submit) holds a count itself. 1362 */ 1363 atomic_dec(&bp->b_io_remaining); 1364 xfs_buf_ioerror(bp, -EIO); 1365 bio_put(bio); 1366 } 1367 1368 } 1369 1370 STATIC void 1371 _xfs_buf_ioapply( 1372 struct xfs_buf *bp) 1373 { 1374 struct blk_plug plug; 1375 int op; 1376 int offset; 1377 int size; 1378 int i; 1379 1380 /* 1381 * Make sure we capture only current IO errors rather than stale errors 1382 * left over from previous use of the buffer (e.g. failed readahead). 1383 */ 1384 bp->b_error = 0; 1385 1386 if (bp->b_flags & XBF_WRITE) { 1387 op = REQ_OP_WRITE; 1388 1389 /* 1390 * Run the write verifier callback function if it exists. If 1391 * this function fails it will mark the buffer with an error and 1392 * the IO should not be dispatched. 1393 */ 1394 if (bp->b_ops) { 1395 bp->b_ops->verify_write(bp); 1396 if (bp->b_error) { 1397 xfs_force_shutdown(bp->b_mount, 1398 SHUTDOWN_CORRUPT_INCORE); 1399 return; 1400 } 1401 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) { 1402 struct xfs_mount *mp = bp->b_mount; 1403 1404 /* 1405 * non-crc filesystems don't attach verifiers during 1406 * log recovery, so don't warn for such filesystems. 1407 */ 1408 if (xfs_sb_version_hascrc(&mp->m_sb)) { 1409 xfs_warn(mp, 1410 "%s: no buf ops on daddr 0x%llx len %d", 1411 __func__, bp->b_bn, bp->b_length); 1412 xfs_hex_dump(bp->b_addr, 1413 XFS_CORRUPTION_DUMP_LEN); 1414 dump_stack(); 1415 } 1416 } 1417 } else { 1418 op = REQ_OP_READ; 1419 if (bp->b_flags & XBF_READ_AHEAD) 1420 op |= REQ_RAHEAD; 1421 } 1422 1423 /* we only use the buffer cache for meta-data */ 1424 op |= REQ_META; 1425 1426 /* 1427 * Walk all the vectors issuing IO on them. Set up the initial offset 1428 * into the buffer and the desired IO size before we start - 1429 * _xfs_buf_ioapply_vec() will modify them appropriately for each 1430 * subsequent call. 1431 */ 1432 offset = bp->b_offset; 1433 size = BBTOB(bp->b_length); 1434 blk_start_plug(&plug); 1435 for (i = 0; i < bp->b_map_count; i++) { 1436 xfs_buf_ioapply_map(bp, i, &offset, &size, op); 1437 if (bp->b_error) 1438 break; 1439 if (size <= 0) 1440 break; /* all done */ 1441 } 1442 blk_finish_plug(&plug); 1443 } 1444 1445 /* 1446 * Wait for I/O completion of a sync buffer and return the I/O error code. 1447 */ 1448 static int 1449 xfs_buf_iowait( 1450 struct xfs_buf *bp) 1451 { 1452 ASSERT(!(bp->b_flags & XBF_ASYNC)); 1453 1454 trace_xfs_buf_iowait(bp, _RET_IP_); 1455 wait_for_completion(&bp->b_iowait); 1456 trace_xfs_buf_iowait_done(bp, _RET_IP_); 1457 1458 return bp->b_error; 1459 } 1460 1461 /* 1462 * Buffer I/O submission path, read or write. Asynchronous submission transfers 1463 * the buffer lock ownership and the current reference to the IO. It is not 1464 * safe to reference the buffer after a call to this function unless the caller 1465 * holds an additional reference itself. 1466 */ 1467 int 1468 __xfs_buf_submit( 1469 struct xfs_buf *bp, 1470 bool wait) 1471 { 1472 int error = 0; 1473 1474 trace_xfs_buf_submit(bp, _RET_IP_); 1475 1476 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); 1477 1478 /* on shutdown we stale and complete the buffer immediately */ 1479 if (XFS_FORCED_SHUTDOWN(bp->b_mount)) { 1480 xfs_buf_ioerror(bp, -EIO); 1481 bp->b_flags &= ~XBF_DONE; 1482 xfs_buf_stale(bp); 1483 xfs_buf_ioend(bp); 1484 return -EIO; 1485 } 1486 1487 /* 1488 * Grab a reference so the buffer does not go away underneath us. For 1489 * async buffers, I/O completion drops the callers reference, which 1490 * could occur before submission returns. 1491 */ 1492 xfs_buf_hold(bp); 1493 1494 if (bp->b_flags & XBF_WRITE) 1495 xfs_buf_wait_unpin(bp); 1496 1497 /* clear the internal error state to avoid spurious errors */ 1498 bp->b_io_error = 0; 1499 1500 /* 1501 * Set the count to 1 initially, this will stop an I/O completion 1502 * callout which happens before we have started all the I/O from calling 1503 * xfs_buf_ioend too early. 1504 */ 1505 atomic_set(&bp->b_io_remaining, 1); 1506 if (bp->b_flags & XBF_ASYNC) 1507 xfs_buf_ioacct_inc(bp); 1508 _xfs_buf_ioapply(bp); 1509 1510 /* 1511 * If _xfs_buf_ioapply failed, we can get back here with only the IO 1512 * reference we took above. If we drop it to zero, run completion so 1513 * that we don't return to the caller with completion still pending. 1514 */ 1515 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { 1516 if (bp->b_error || !(bp->b_flags & XBF_ASYNC)) 1517 xfs_buf_ioend(bp); 1518 else 1519 xfs_buf_ioend_async(bp); 1520 } 1521 1522 if (wait) 1523 error = xfs_buf_iowait(bp); 1524 1525 /* 1526 * Release the hold that keeps the buffer referenced for the entire 1527 * I/O. Note that if the buffer is async, it is not safe to reference 1528 * after this release. 1529 */ 1530 xfs_buf_rele(bp); 1531 return error; 1532 } 1533 1534 void * 1535 xfs_buf_offset( 1536 struct xfs_buf *bp, 1537 size_t offset) 1538 { 1539 struct page *page; 1540 1541 if (bp->b_addr) 1542 return bp->b_addr + offset; 1543 1544 offset += bp->b_offset; 1545 page = bp->b_pages[offset >> PAGE_SHIFT]; 1546 return page_address(page) + (offset & (PAGE_SIZE-1)); 1547 } 1548 1549 void 1550 xfs_buf_zero( 1551 struct xfs_buf *bp, 1552 size_t boff, 1553 size_t bsize) 1554 { 1555 size_t bend; 1556 1557 bend = boff + bsize; 1558 while (boff < bend) { 1559 struct page *page; 1560 int page_index, page_offset, csize; 1561 1562 page_index = (boff + bp->b_offset) >> PAGE_SHIFT; 1563 page_offset = (boff + bp->b_offset) & ~PAGE_MASK; 1564 page = bp->b_pages[page_index]; 1565 csize = min_t(size_t, PAGE_SIZE - page_offset, 1566 BBTOB(bp->b_length) - boff); 1567 1568 ASSERT((csize + page_offset) <= PAGE_SIZE); 1569 1570 memset(page_address(page) + page_offset, 0, csize); 1571 1572 boff += csize; 1573 } 1574 } 1575 1576 /* 1577 * Log a message about and stale a buffer that a caller has decided is corrupt. 1578 * 1579 * This function should be called for the kinds of metadata corruption that 1580 * cannot be detect from a verifier, such as incorrect inter-block relationship 1581 * data. Do /not/ call this function from a verifier function. 1582 * 1583 * The buffer must be XBF_DONE prior to the call. Afterwards, the buffer will 1584 * be marked stale, but b_error will not be set. The caller is responsible for 1585 * releasing the buffer or fixing it. 1586 */ 1587 void 1588 __xfs_buf_mark_corrupt( 1589 struct xfs_buf *bp, 1590 xfs_failaddr_t fa) 1591 { 1592 ASSERT(bp->b_flags & XBF_DONE); 1593 1594 xfs_buf_corruption_error(bp, fa); 1595 xfs_buf_stale(bp); 1596 } 1597 1598 /* 1599 * Handling of buffer targets (buftargs). 1600 */ 1601 1602 /* 1603 * Wait for any bufs with callbacks that have been submitted but have not yet 1604 * returned. These buffers will have an elevated hold count, so wait on those 1605 * while freeing all the buffers only held by the LRU. 1606 */ 1607 static enum lru_status 1608 xfs_buftarg_wait_rele( 1609 struct list_head *item, 1610 struct list_lru_one *lru, 1611 spinlock_t *lru_lock, 1612 void *arg) 1613 1614 { 1615 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); 1616 struct list_head *dispose = arg; 1617 1618 if (atomic_read(&bp->b_hold) > 1) { 1619 /* need to wait, so skip it this pass */ 1620 trace_xfs_buf_wait_buftarg(bp, _RET_IP_); 1621 return LRU_SKIP; 1622 } 1623 if (!spin_trylock(&bp->b_lock)) 1624 return LRU_SKIP; 1625 1626 /* 1627 * clear the LRU reference count so the buffer doesn't get 1628 * ignored in xfs_buf_rele(). 1629 */ 1630 atomic_set(&bp->b_lru_ref, 0); 1631 bp->b_state |= XFS_BSTATE_DISPOSE; 1632 list_lru_isolate_move(lru, item, dispose); 1633 spin_unlock(&bp->b_lock); 1634 return LRU_REMOVED; 1635 } 1636 1637 void 1638 xfs_wait_buftarg( 1639 struct xfs_buftarg *btp) 1640 { 1641 LIST_HEAD(dispose); 1642 int loop = 0; 1643 1644 /* 1645 * First wait on the buftarg I/O count for all in-flight buffers to be 1646 * released. This is critical as new buffers do not make the LRU until 1647 * they are released. 1648 * 1649 * Next, flush the buffer workqueue to ensure all completion processing 1650 * has finished. Just waiting on buffer locks is not sufficient for 1651 * async IO as the reference count held over IO is not released until 1652 * after the buffer lock is dropped. Hence we need to ensure here that 1653 * all reference counts have been dropped before we start walking the 1654 * LRU list. 1655 */ 1656 while (percpu_counter_sum(&btp->bt_io_count)) 1657 delay(100); 1658 flush_workqueue(btp->bt_mount->m_buf_workqueue); 1659 1660 /* loop until there is nothing left on the lru list. */ 1661 while (list_lru_count(&btp->bt_lru)) { 1662 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele, 1663 &dispose, LONG_MAX); 1664 1665 while (!list_empty(&dispose)) { 1666 struct xfs_buf *bp; 1667 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1668 list_del_init(&bp->b_lru); 1669 if (bp->b_flags & XBF_WRITE_FAIL) { 1670 xfs_alert(btp->bt_mount, 1671 "Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!", 1672 (long long)bp->b_bn); 1673 xfs_alert(btp->bt_mount, 1674 "Please run xfs_repair to determine the extent of the problem."); 1675 } 1676 xfs_buf_rele(bp); 1677 } 1678 if (loop++ != 0) 1679 delay(100); 1680 } 1681 } 1682 1683 static enum lru_status 1684 xfs_buftarg_isolate( 1685 struct list_head *item, 1686 struct list_lru_one *lru, 1687 spinlock_t *lru_lock, 1688 void *arg) 1689 { 1690 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); 1691 struct list_head *dispose = arg; 1692 1693 /* 1694 * we are inverting the lru lock/bp->b_lock here, so use a trylock. 1695 * If we fail to get the lock, just skip it. 1696 */ 1697 if (!spin_trylock(&bp->b_lock)) 1698 return LRU_SKIP; 1699 /* 1700 * Decrement the b_lru_ref count unless the value is already 1701 * zero. If the value is already zero, we need to reclaim the 1702 * buffer, otherwise it gets another trip through the LRU. 1703 */ 1704 if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) { 1705 spin_unlock(&bp->b_lock); 1706 return LRU_ROTATE; 1707 } 1708 1709 bp->b_state |= XFS_BSTATE_DISPOSE; 1710 list_lru_isolate_move(lru, item, dispose); 1711 spin_unlock(&bp->b_lock); 1712 return LRU_REMOVED; 1713 } 1714 1715 static unsigned long 1716 xfs_buftarg_shrink_scan( 1717 struct shrinker *shrink, 1718 struct shrink_control *sc) 1719 { 1720 struct xfs_buftarg *btp = container_of(shrink, 1721 struct xfs_buftarg, bt_shrinker); 1722 LIST_HEAD(dispose); 1723 unsigned long freed; 1724 1725 freed = list_lru_shrink_walk(&btp->bt_lru, sc, 1726 xfs_buftarg_isolate, &dispose); 1727 1728 while (!list_empty(&dispose)) { 1729 struct xfs_buf *bp; 1730 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1731 list_del_init(&bp->b_lru); 1732 xfs_buf_rele(bp); 1733 } 1734 1735 return freed; 1736 } 1737 1738 static unsigned long 1739 xfs_buftarg_shrink_count( 1740 struct shrinker *shrink, 1741 struct shrink_control *sc) 1742 { 1743 struct xfs_buftarg *btp = container_of(shrink, 1744 struct xfs_buftarg, bt_shrinker); 1745 return list_lru_shrink_count(&btp->bt_lru, sc); 1746 } 1747 1748 void 1749 xfs_free_buftarg( 1750 struct xfs_buftarg *btp) 1751 { 1752 unregister_shrinker(&btp->bt_shrinker); 1753 ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0); 1754 percpu_counter_destroy(&btp->bt_io_count); 1755 list_lru_destroy(&btp->bt_lru); 1756 1757 xfs_blkdev_issue_flush(btp); 1758 1759 kmem_free(btp); 1760 } 1761 1762 int 1763 xfs_setsize_buftarg( 1764 xfs_buftarg_t *btp, 1765 unsigned int sectorsize) 1766 { 1767 /* Set up metadata sector size info */ 1768 btp->bt_meta_sectorsize = sectorsize; 1769 btp->bt_meta_sectormask = sectorsize - 1; 1770 1771 if (set_blocksize(btp->bt_bdev, sectorsize)) { 1772 xfs_warn(btp->bt_mount, 1773 "Cannot set_blocksize to %u on device %pg", 1774 sectorsize, btp->bt_bdev); 1775 return -EINVAL; 1776 } 1777 1778 /* Set up device logical sector size mask */ 1779 btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev); 1780 btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1; 1781 1782 return 0; 1783 } 1784 1785 /* 1786 * When allocating the initial buffer target we have not yet 1787 * read in the superblock, so don't know what sized sectors 1788 * are being used at this early stage. Play safe. 1789 */ 1790 STATIC int 1791 xfs_setsize_buftarg_early( 1792 xfs_buftarg_t *btp, 1793 struct block_device *bdev) 1794 { 1795 return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev)); 1796 } 1797 1798 xfs_buftarg_t * 1799 xfs_alloc_buftarg( 1800 struct xfs_mount *mp, 1801 struct block_device *bdev, 1802 struct dax_device *dax_dev) 1803 { 1804 xfs_buftarg_t *btp; 1805 1806 btp = kmem_zalloc(sizeof(*btp), KM_NOFS); 1807 1808 btp->bt_mount = mp; 1809 btp->bt_dev = bdev->bd_dev; 1810 btp->bt_bdev = bdev; 1811 btp->bt_daxdev = dax_dev; 1812 1813 if (xfs_setsize_buftarg_early(btp, bdev)) 1814 goto error_free; 1815 1816 if (list_lru_init(&btp->bt_lru)) 1817 goto error_free; 1818 1819 if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL)) 1820 goto error_lru; 1821 1822 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count; 1823 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan; 1824 btp->bt_shrinker.seeks = DEFAULT_SEEKS; 1825 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE; 1826 if (register_shrinker(&btp->bt_shrinker)) 1827 goto error_pcpu; 1828 return btp; 1829 1830 error_pcpu: 1831 percpu_counter_destroy(&btp->bt_io_count); 1832 error_lru: 1833 list_lru_destroy(&btp->bt_lru); 1834 error_free: 1835 kmem_free(btp); 1836 return NULL; 1837 } 1838 1839 /* 1840 * Cancel a delayed write list. 1841 * 1842 * Remove each buffer from the list, clear the delwri queue flag and drop the 1843 * associated buffer reference. 1844 */ 1845 void 1846 xfs_buf_delwri_cancel( 1847 struct list_head *list) 1848 { 1849 struct xfs_buf *bp; 1850 1851 while (!list_empty(list)) { 1852 bp = list_first_entry(list, struct xfs_buf, b_list); 1853 1854 xfs_buf_lock(bp); 1855 bp->b_flags &= ~_XBF_DELWRI_Q; 1856 list_del_init(&bp->b_list); 1857 xfs_buf_relse(bp); 1858 } 1859 } 1860 1861 /* 1862 * Add a buffer to the delayed write list. 1863 * 1864 * This queues a buffer for writeout if it hasn't already been. Note that 1865 * neither this routine nor the buffer list submission functions perform 1866 * any internal synchronization. It is expected that the lists are thread-local 1867 * to the callers. 1868 * 1869 * Returns true if we queued up the buffer, or false if it already had 1870 * been on the buffer list. 1871 */ 1872 bool 1873 xfs_buf_delwri_queue( 1874 struct xfs_buf *bp, 1875 struct list_head *list) 1876 { 1877 ASSERT(xfs_buf_islocked(bp)); 1878 ASSERT(!(bp->b_flags & XBF_READ)); 1879 1880 /* 1881 * If the buffer is already marked delwri it already is queued up 1882 * by someone else for imediate writeout. Just ignore it in that 1883 * case. 1884 */ 1885 if (bp->b_flags & _XBF_DELWRI_Q) { 1886 trace_xfs_buf_delwri_queued(bp, _RET_IP_); 1887 return false; 1888 } 1889 1890 trace_xfs_buf_delwri_queue(bp, _RET_IP_); 1891 1892 /* 1893 * If a buffer gets written out synchronously or marked stale while it 1894 * is on a delwri list we lazily remove it. To do this, the other party 1895 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone. 1896 * It remains referenced and on the list. In a rare corner case it 1897 * might get readded to a delwri list after the synchronous writeout, in 1898 * which case we need just need to re-add the flag here. 1899 */ 1900 bp->b_flags |= _XBF_DELWRI_Q; 1901 if (list_empty(&bp->b_list)) { 1902 atomic_inc(&bp->b_hold); 1903 list_add_tail(&bp->b_list, list); 1904 } 1905 1906 return true; 1907 } 1908 1909 /* 1910 * Compare function is more complex than it needs to be because 1911 * the return value is only 32 bits and we are doing comparisons 1912 * on 64 bit values 1913 */ 1914 static int 1915 xfs_buf_cmp( 1916 void *priv, 1917 struct list_head *a, 1918 struct list_head *b) 1919 { 1920 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list); 1921 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); 1922 xfs_daddr_t diff; 1923 1924 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; 1925 if (diff < 0) 1926 return -1; 1927 if (diff > 0) 1928 return 1; 1929 return 0; 1930 } 1931 1932 /* 1933 * Submit buffers for write. If wait_list is specified, the buffers are 1934 * submitted using sync I/O and placed on the wait list such that the caller can 1935 * iowait each buffer. Otherwise async I/O is used and the buffers are released 1936 * at I/O completion time. In either case, buffers remain locked until I/O 1937 * completes and the buffer is released from the queue. 1938 */ 1939 static int 1940 xfs_buf_delwri_submit_buffers( 1941 struct list_head *buffer_list, 1942 struct list_head *wait_list) 1943 { 1944 struct xfs_buf *bp, *n; 1945 int pinned = 0; 1946 struct blk_plug plug; 1947 1948 list_sort(NULL, buffer_list, xfs_buf_cmp); 1949 1950 blk_start_plug(&plug); 1951 list_for_each_entry_safe(bp, n, buffer_list, b_list) { 1952 if (!wait_list) { 1953 if (xfs_buf_ispinned(bp)) { 1954 pinned++; 1955 continue; 1956 } 1957 if (!xfs_buf_trylock(bp)) 1958 continue; 1959 } else { 1960 xfs_buf_lock(bp); 1961 } 1962 1963 /* 1964 * Someone else might have written the buffer synchronously or 1965 * marked it stale in the meantime. In that case only the 1966 * _XBF_DELWRI_Q flag got cleared, and we have to drop the 1967 * reference and remove it from the list here. 1968 */ 1969 if (!(bp->b_flags & _XBF_DELWRI_Q)) { 1970 list_del_init(&bp->b_list); 1971 xfs_buf_relse(bp); 1972 continue; 1973 } 1974 1975 trace_xfs_buf_delwri_split(bp, _RET_IP_); 1976 1977 /* 1978 * If we have a wait list, each buffer (and associated delwri 1979 * queue reference) transfers to it and is submitted 1980 * synchronously. Otherwise, drop the buffer from the delwri 1981 * queue and submit async. 1982 */ 1983 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL); 1984 bp->b_flags |= XBF_WRITE; 1985 if (wait_list) { 1986 bp->b_flags &= ~XBF_ASYNC; 1987 list_move_tail(&bp->b_list, wait_list); 1988 } else { 1989 bp->b_flags |= XBF_ASYNC; 1990 list_del_init(&bp->b_list); 1991 } 1992 __xfs_buf_submit(bp, false); 1993 } 1994 blk_finish_plug(&plug); 1995 1996 return pinned; 1997 } 1998 1999 /* 2000 * Write out a buffer list asynchronously. 2001 * 2002 * This will take the @buffer_list, write all non-locked and non-pinned buffers 2003 * out and not wait for I/O completion on any of the buffers. This interface 2004 * is only safely useable for callers that can track I/O completion by higher 2005 * level means, e.g. AIL pushing as the @buffer_list is consumed in this 2006 * function. 2007 * 2008 * Note: this function will skip buffers it would block on, and in doing so 2009 * leaves them on @buffer_list so they can be retried on a later pass. As such, 2010 * it is up to the caller to ensure that the buffer list is fully submitted or 2011 * cancelled appropriately when they are finished with the list. Failure to 2012 * cancel or resubmit the list until it is empty will result in leaked buffers 2013 * at unmount time. 2014 */ 2015 int 2016 xfs_buf_delwri_submit_nowait( 2017 struct list_head *buffer_list) 2018 { 2019 return xfs_buf_delwri_submit_buffers(buffer_list, NULL); 2020 } 2021 2022 /* 2023 * Write out a buffer list synchronously. 2024 * 2025 * This will take the @buffer_list, write all buffers out and wait for I/O 2026 * completion on all of the buffers. @buffer_list is consumed by the function, 2027 * so callers must have some other way of tracking buffers if they require such 2028 * functionality. 2029 */ 2030 int 2031 xfs_buf_delwri_submit( 2032 struct list_head *buffer_list) 2033 { 2034 LIST_HEAD (wait_list); 2035 int error = 0, error2; 2036 struct xfs_buf *bp; 2037 2038 xfs_buf_delwri_submit_buffers(buffer_list, &wait_list); 2039 2040 /* Wait for IO to complete. */ 2041 while (!list_empty(&wait_list)) { 2042 bp = list_first_entry(&wait_list, struct xfs_buf, b_list); 2043 2044 list_del_init(&bp->b_list); 2045 2046 /* 2047 * Wait on the locked buffer, check for errors and unlock and 2048 * release the delwri queue reference. 2049 */ 2050 error2 = xfs_buf_iowait(bp); 2051 xfs_buf_relse(bp); 2052 if (!error) 2053 error = error2; 2054 } 2055 2056 return error; 2057 } 2058 2059 /* 2060 * Push a single buffer on a delwri queue. 2061 * 2062 * The purpose of this function is to submit a single buffer of a delwri queue 2063 * and return with the buffer still on the original queue. The waiting delwri 2064 * buffer submission infrastructure guarantees transfer of the delwri queue 2065 * buffer reference to a temporary wait list. We reuse this infrastructure to 2066 * transfer the buffer back to the original queue. 2067 * 2068 * Note the buffer transitions from the queued state, to the submitted and wait 2069 * listed state and back to the queued state during this call. The buffer 2070 * locking and queue management logic between _delwri_pushbuf() and 2071 * _delwri_queue() guarantee that the buffer cannot be queued to another list 2072 * before returning. 2073 */ 2074 int 2075 xfs_buf_delwri_pushbuf( 2076 struct xfs_buf *bp, 2077 struct list_head *buffer_list) 2078 { 2079 LIST_HEAD (submit_list); 2080 int error; 2081 2082 ASSERT(bp->b_flags & _XBF_DELWRI_Q); 2083 2084 trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_); 2085 2086 /* 2087 * Isolate the buffer to a new local list so we can submit it for I/O 2088 * independently from the rest of the original list. 2089 */ 2090 xfs_buf_lock(bp); 2091 list_move(&bp->b_list, &submit_list); 2092 xfs_buf_unlock(bp); 2093 2094 /* 2095 * Delwri submission clears the DELWRI_Q buffer flag and returns with 2096 * the buffer on the wait list with the original reference. Rather than 2097 * bounce the buffer from a local wait list back to the original list 2098 * after I/O completion, reuse the original list as the wait list. 2099 */ 2100 xfs_buf_delwri_submit_buffers(&submit_list, buffer_list); 2101 2102 /* 2103 * The buffer is now locked, under I/O and wait listed on the original 2104 * delwri queue. Wait for I/O completion, restore the DELWRI_Q flag and 2105 * return with the buffer unlocked and on the original queue. 2106 */ 2107 error = xfs_buf_iowait(bp); 2108 bp->b_flags |= _XBF_DELWRI_Q; 2109 xfs_buf_unlock(bp); 2110 2111 return error; 2112 } 2113 2114 int __init 2115 xfs_buf_init(void) 2116 { 2117 xfs_buf_zone = kmem_cache_create("xfs_buf", 2118 sizeof(struct xfs_buf), 0, 2119 SLAB_HWCACHE_ALIGN, NULL); 2120 if (!xfs_buf_zone) 2121 goto out; 2122 2123 return 0; 2124 2125 out: 2126 return -ENOMEM; 2127 } 2128 2129 void 2130 xfs_buf_terminate(void) 2131 { 2132 kmem_cache_destroy(xfs_buf_zone); 2133 } 2134 2135 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) 2136 { 2137 /* 2138 * Set the lru reference count to 0 based on the error injection tag. 2139 * This allows userspace to disrupt buffer caching for debug/testing 2140 * purposes. 2141 */ 2142 if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF)) 2143 lru_ref = 0; 2144 2145 atomic_set(&bp->b_lru_ref, lru_ref); 2146 } 2147 2148 /* 2149 * Verify an on-disk magic value against the magic value specified in the 2150 * verifier structure. The verifier magic is in disk byte order so the caller is 2151 * expected to pass the value directly from disk. 2152 */ 2153 bool 2154 xfs_verify_magic( 2155 struct xfs_buf *bp, 2156 __be32 dmagic) 2157 { 2158 struct xfs_mount *mp = bp->b_mount; 2159 int idx; 2160 2161 idx = xfs_sb_version_hascrc(&mp->m_sb); 2162 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx])) 2163 return false; 2164 return dmagic == bp->b_ops->magic[idx]; 2165 } 2166 /* 2167 * Verify an on-disk magic value against the magic value specified in the 2168 * verifier structure. The verifier magic is in disk byte order so the caller is 2169 * expected to pass the value directly from disk. 2170 */ 2171 bool 2172 xfs_verify_magic16( 2173 struct xfs_buf *bp, 2174 __be16 dmagic) 2175 { 2176 struct xfs_mount *mp = bp->b_mount; 2177 int idx; 2178 2179 idx = xfs_sb_version_hascrc(&mp->m_sb); 2180 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx])) 2181 return false; 2182 return dmagic == bp->b_ops->magic16[idx]; 2183 } 2184