1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include <linux/stddef.h> 20 #include <linux/errno.h> 21 #include <linux/gfp.h> 22 #include <linux/pagemap.h> 23 #include <linux/init.h> 24 #include <linux/vmalloc.h> 25 #include <linux/bio.h> 26 #include <linux/sysctl.h> 27 #include <linux/proc_fs.h> 28 #include <linux/workqueue.h> 29 #include <linux/percpu.h> 30 #include <linux/blkdev.h> 31 #include <linux/hash.h> 32 #include <linux/kthread.h> 33 #include <linux/migrate.h> 34 #include <linux/backing-dev.h> 35 #include <linux/freezer.h> 36 #include <linux/sched/mm.h> 37 38 #include "xfs_format.h" 39 #include "xfs_log_format.h" 40 #include "xfs_trans_resv.h" 41 #include "xfs_sb.h" 42 #include "xfs_mount.h" 43 #include "xfs_trace.h" 44 #include "xfs_log.h" 45 #include "xfs_errortag.h" 46 #include "xfs_error.h" 47 48 static kmem_zone_t *xfs_buf_zone; 49 50 #ifdef XFS_BUF_LOCK_TRACKING 51 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) 52 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) 53 # define XB_GET_OWNER(bp) ((bp)->b_last_holder) 54 #else 55 # define XB_SET_OWNER(bp) do { } while (0) 56 # define XB_CLEAR_OWNER(bp) do { } while (0) 57 # define XB_GET_OWNER(bp) do { } while (0) 58 #endif 59 60 #define xb_to_gfp(flags) \ 61 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN) 62 63 64 static inline int 65 xfs_buf_is_vmapped( 66 struct xfs_buf *bp) 67 { 68 /* 69 * Return true if the buffer is vmapped. 70 * 71 * b_addr is null if the buffer is not mapped, but the code is clever 72 * enough to know it doesn't have to map a single page, so the check has 73 * to be both for b_addr and bp->b_page_count > 1. 74 */ 75 return bp->b_addr && bp->b_page_count > 1; 76 } 77 78 static inline int 79 xfs_buf_vmap_len( 80 struct xfs_buf *bp) 81 { 82 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; 83 } 84 85 /* 86 * Bump the I/O in flight count on the buftarg if we haven't yet done so for 87 * this buffer. The count is incremented once per buffer (per hold cycle) 88 * because the corresponding decrement is deferred to buffer release. Buffers 89 * can undergo I/O multiple times in a hold-release cycle and per buffer I/O 90 * tracking adds unnecessary overhead. This is used for sychronization purposes 91 * with unmount (see xfs_wait_buftarg()), so all we really need is a count of 92 * in-flight buffers. 93 * 94 * Buffers that are never released (e.g., superblock, iclog buffers) must set 95 * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count 96 * never reaches zero and unmount hangs indefinitely. 97 */ 98 static inline void 99 xfs_buf_ioacct_inc( 100 struct xfs_buf *bp) 101 { 102 if (bp->b_flags & XBF_NO_IOACCT) 103 return; 104 105 ASSERT(bp->b_flags & XBF_ASYNC); 106 spin_lock(&bp->b_lock); 107 if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) { 108 bp->b_state |= XFS_BSTATE_IN_FLIGHT; 109 percpu_counter_inc(&bp->b_target->bt_io_count); 110 } 111 spin_unlock(&bp->b_lock); 112 } 113 114 /* 115 * Clear the in-flight state on a buffer about to be released to the LRU or 116 * freed and unaccount from the buftarg. 117 */ 118 static inline void 119 __xfs_buf_ioacct_dec( 120 struct xfs_buf *bp) 121 { 122 lockdep_assert_held(&bp->b_lock); 123 124 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { 125 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; 126 percpu_counter_dec(&bp->b_target->bt_io_count); 127 } 128 } 129 130 static inline void 131 xfs_buf_ioacct_dec( 132 struct xfs_buf *bp) 133 { 134 spin_lock(&bp->b_lock); 135 __xfs_buf_ioacct_dec(bp); 136 spin_unlock(&bp->b_lock); 137 } 138 139 /* 140 * When we mark a buffer stale, we remove the buffer from the LRU and clear the 141 * b_lru_ref count so that the buffer is freed immediately when the buffer 142 * reference count falls to zero. If the buffer is already on the LRU, we need 143 * to remove the reference that LRU holds on the buffer. 144 * 145 * This prevents build-up of stale buffers on the LRU. 146 */ 147 void 148 xfs_buf_stale( 149 struct xfs_buf *bp) 150 { 151 ASSERT(xfs_buf_islocked(bp)); 152 153 bp->b_flags |= XBF_STALE; 154 155 /* 156 * Clear the delwri status so that a delwri queue walker will not 157 * flush this buffer to disk now that it is stale. The delwri queue has 158 * a reference to the buffer, so this is safe to do. 159 */ 160 bp->b_flags &= ~_XBF_DELWRI_Q; 161 162 /* 163 * Once the buffer is marked stale and unlocked, a subsequent lookup 164 * could reset b_flags. There is no guarantee that the buffer is 165 * unaccounted (released to LRU) before that occurs. Drop in-flight 166 * status now to preserve accounting consistency. 167 */ 168 spin_lock(&bp->b_lock); 169 __xfs_buf_ioacct_dec(bp); 170 171 atomic_set(&bp->b_lru_ref, 0); 172 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && 173 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) 174 atomic_dec(&bp->b_hold); 175 176 ASSERT(atomic_read(&bp->b_hold) >= 1); 177 spin_unlock(&bp->b_lock); 178 } 179 180 static int 181 xfs_buf_get_maps( 182 struct xfs_buf *bp, 183 int map_count) 184 { 185 ASSERT(bp->b_maps == NULL); 186 bp->b_map_count = map_count; 187 188 if (map_count == 1) { 189 bp->b_maps = &bp->__b_map; 190 return 0; 191 } 192 193 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), 194 KM_NOFS); 195 if (!bp->b_maps) 196 return -ENOMEM; 197 return 0; 198 } 199 200 /* 201 * Frees b_pages if it was allocated. 202 */ 203 static void 204 xfs_buf_free_maps( 205 struct xfs_buf *bp) 206 { 207 if (bp->b_maps != &bp->__b_map) { 208 kmem_free(bp->b_maps); 209 bp->b_maps = NULL; 210 } 211 } 212 213 struct xfs_buf * 214 _xfs_buf_alloc( 215 struct xfs_buftarg *target, 216 struct xfs_buf_map *map, 217 int nmaps, 218 xfs_buf_flags_t flags) 219 { 220 struct xfs_buf *bp; 221 int error; 222 int i; 223 224 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); 225 if (unlikely(!bp)) 226 return NULL; 227 228 /* 229 * We don't want certain flags to appear in b_flags unless they are 230 * specifically set by later operations on the buffer. 231 */ 232 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); 233 234 atomic_set(&bp->b_hold, 1); 235 atomic_set(&bp->b_lru_ref, 1); 236 init_completion(&bp->b_iowait); 237 INIT_LIST_HEAD(&bp->b_lru); 238 INIT_LIST_HEAD(&bp->b_list); 239 INIT_LIST_HEAD(&bp->b_li_list); 240 sema_init(&bp->b_sema, 0); /* held, no waiters */ 241 spin_lock_init(&bp->b_lock); 242 XB_SET_OWNER(bp); 243 bp->b_target = target; 244 bp->b_flags = flags; 245 246 /* 247 * Set length and io_length to the same value initially. 248 * I/O routines should use io_length, which will be the same in 249 * most cases but may be reset (e.g. XFS recovery). 250 */ 251 error = xfs_buf_get_maps(bp, nmaps); 252 if (error) { 253 kmem_zone_free(xfs_buf_zone, bp); 254 return NULL; 255 } 256 257 bp->b_bn = map[0].bm_bn; 258 bp->b_length = 0; 259 for (i = 0; i < nmaps; i++) { 260 bp->b_maps[i].bm_bn = map[i].bm_bn; 261 bp->b_maps[i].bm_len = map[i].bm_len; 262 bp->b_length += map[i].bm_len; 263 } 264 bp->b_io_length = bp->b_length; 265 266 atomic_set(&bp->b_pin_count, 0); 267 init_waitqueue_head(&bp->b_waiters); 268 269 XFS_STATS_INC(target->bt_mount, xb_create); 270 trace_xfs_buf_init(bp, _RET_IP_); 271 272 return bp; 273 } 274 275 /* 276 * Allocate a page array capable of holding a specified number 277 * of pages, and point the page buf at it. 278 */ 279 STATIC int 280 _xfs_buf_get_pages( 281 xfs_buf_t *bp, 282 int page_count) 283 { 284 /* Make sure that we have a page list */ 285 if (bp->b_pages == NULL) { 286 bp->b_page_count = page_count; 287 if (page_count <= XB_PAGES) { 288 bp->b_pages = bp->b_page_array; 289 } else { 290 bp->b_pages = kmem_alloc(sizeof(struct page *) * 291 page_count, KM_NOFS); 292 if (bp->b_pages == NULL) 293 return -ENOMEM; 294 } 295 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); 296 } 297 return 0; 298 } 299 300 /* 301 * Frees b_pages if it was allocated. 302 */ 303 STATIC void 304 _xfs_buf_free_pages( 305 xfs_buf_t *bp) 306 { 307 if (bp->b_pages != bp->b_page_array) { 308 kmem_free(bp->b_pages); 309 bp->b_pages = NULL; 310 } 311 } 312 313 /* 314 * Releases the specified buffer. 315 * 316 * The modification state of any associated pages is left unchanged. 317 * The buffer must not be on any hash - use xfs_buf_rele instead for 318 * hashed and refcounted buffers 319 */ 320 void 321 xfs_buf_free( 322 xfs_buf_t *bp) 323 { 324 trace_xfs_buf_free(bp, _RET_IP_); 325 326 ASSERT(list_empty(&bp->b_lru)); 327 328 if (bp->b_flags & _XBF_PAGES) { 329 uint i; 330 331 if (xfs_buf_is_vmapped(bp)) 332 vm_unmap_ram(bp->b_addr - bp->b_offset, 333 bp->b_page_count); 334 335 for (i = 0; i < bp->b_page_count; i++) { 336 struct page *page = bp->b_pages[i]; 337 338 __free_page(page); 339 } 340 } else if (bp->b_flags & _XBF_KMEM) 341 kmem_free(bp->b_addr); 342 _xfs_buf_free_pages(bp); 343 xfs_buf_free_maps(bp); 344 kmem_zone_free(xfs_buf_zone, bp); 345 } 346 347 /* 348 * Allocates all the pages for buffer in question and builds it's page list. 349 */ 350 STATIC int 351 xfs_buf_allocate_memory( 352 xfs_buf_t *bp, 353 uint flags) 354 { 355 size_t size; 356 size_t nbytes, offset; 357 gfp_t gfp_mask = xb_to_gfp(flags); 358 unsigned short page_count, i; 359 xfs_off_t start, end; 360 int error; 361 362 /* 363 * for buffers that are contained within a single page, just allocate 364 * the memory from the heap - there's no need for the complexity of 365 * page arrays to keep allocation down to order 0. 366 */ 367 size = BBTOB(bp->b_length); 368 if (size < PAGE_SIZE) { 369 bp->b_addr = kmem_alloc(size, KM_NOFS); 370 if (!bp->b_addr) { 371 /* low memory - use alloc_page loop instead */ 372 goto use_alloc_page; 373 } 374 375 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != 376 ((unsigned long)bp->b_addr & PAGE_MASK)) { 377 /* b_addr spans two pages - use alloc_page instead */ 378 kmem_free(bp->b_addr); 379 bp->b_addr = NULL; 380 goto use_alloc_page; 381 } 382 bp->b_offset = offset_in_page(bp->b_addr); 383 bp->b_pages = bp->b_page_array; 384 bp->b_pages[0] = virt_to_page(bp->b_addr); 385 bp->b_page_count = 1; 386 bp->b_flags |= _XBF_KMEM; 387 return 0; 388 } 389 390 use_alloc_page: 391 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; 392 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) 393 >> PAGE_SHIFT; 394 page_count = end - start; 395 error = _xfs_buf_get_pages(bp, page_count); 396 if (unlikely(error)) 397 return error; 398 399 offset = bp->b_offset; 400 bp->b_flags |= _XBF_PAGES; 401 402 for (i = 0; i < bp->b_page_count; i++) { 403 struct page *page; 404 uint retries = 0; 405 retry: 406 page = alloc_page(gfp_mask); 407 if (unlikely(page == NULL)) { 408 if (flags & XBF_READ_AHEAD) { 409 bp->b_page_count = i; 410 error = -ENOMEM; 411 goto out_free_pages; 412 } 413 414 /* 415 * This could deadlock. 416 * 417 * But until all the XFS lowlevel code is revamped to 418 * handle buffer allocation failures we can't do much. 419 */ 420 if (!(++retries % 100)) 421 xfs_err(NULL, 422 "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)", 423 current->comm, current->pid, 424 __func__, gfp_mask); 425 426 XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries); 427 congestion_wait(BLK_RW_ASYNC, HZ/50); 428 goto retry; 429 } 430 431 XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found); 432 433 nbytes = min_t(size_t, size, PAGE_SIZE - offset); 434 size -= nbytes; 435 bp->b_pages[i] = page; 436 offset = 0; 437 } 438 return 0; 439 440 out_free_pages: 441 for (i = 0; i < bp->b_page_count; i++) 442 __free_page(bp->b_pages[i]); 443 bp->b_flags &= ~_XBF_PAGES; 444 return error; 445 } 446 447 /* 448 * Map buffer into kernel address-space if necessary. 449 */ 450 STATIC int 451 _xfs_buf_map_pages( 452 xfs_buf_t *bp, 453 uint flags) 454 { 455 ASSERT(bp->b_flags & _XBF_PAGES); 456 if (bp->b_page_count == 1) { 457 /* A single page buffer is always mappable */ 458 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 459 } else if (flags & XBF_UNMAPPED) { 460 bp->b_addr = NULL; 461 } else { 462 int retried = 0; 463 unsigned nofs_flag; 464 465 /* 466 * vm_map_ram() will allocate auxillary structures (e.g. 467 * pagetables) with GFP_KERNEL, yet we are likely to be under 468 * GFP_NOFS context here. Hence we need to tell memory reclaim 469 * that we are in such a context via PF_MEMALLOC_NOFS to prevent 470 * memory reclaim re-entering the filesystem here and 471 * potentially deadlocking. 472 */ 473 nofs_flag = memalloc_nofs_save(); 474 do { 475 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, 476 -1, PAGE_KERNEL); 477 if (bp->b_addr) 478 break; 479 vm_unmap_aliases(); 480 } while (retried++ <= 1); 481 memalloc_nofs_restore(nofs_flag); 482 483 if (!bp->b_addr) 484 return -ENOMEM; 485 bp->b_addr += bp->b_offset; 486 } 487 488 return 0; 489 } 490 491 /* 492 * Finding and Reading Buffers 493 */ 494 static int 495 _xfs_buf_obj_cmp( 496 struct rhashtable_compare_arg *arg, 497 const void *obj) 498 { 499 const struct xfs_buf_map *map = arg->key; 500 const struct xfs_buf *bp = obj; 501 502 /* 503 * The key hashing in the lookup path depends on the key being the 504 * first element of the compare_arg, make sure to assert this. 505 */ 506 BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0); 507 508 if (bp->b_bn != map->bm_bn) 509 return 1; 510 511 if (unlikely(bp->b_length != map->bm_len)) { 512 /* 513 * found a block number match. If the range doesn't 514 * match, the only way this is allowed is if the buffer 515 * in the cache is stale and the transaction that made 516 * it stale has not yet committed. i.e. we are 517 * reallocating a busy extent. Skip this buffer and 518 * continue searching for an exact match. 519 */ 520 ASSERT(bp->b_flags & XBF_STALE); 521 return 1; 522 } 523 return 0; 524 } 525 526 static const struct rhashtable_params xfs_buf_hash_params = { 527 .min_size = 32, /* empty AGs have minimal footprint */ 528 .nelem_hint = 16, 529 .key_len = sizeof(xfs_daddr_t), 530 .key_offset = offsetof(struct xfs_buf, b_bn), 531 .head_offset = offsetof(struct xfs_buf, b_rhash_head), 532 .automatic_shrinking = true, 533 .obj_cmpfn = _xfs_buf_obj_cmp, 534 }; 535 536 int 537 xfs_buf_hash_init( 538 struct xfs_perag *pag) 539 { 540 spin_lock_init(&pag->pag_buf_lock); 541 return rhashtable_init(&pag->pag_buf_hash, &xfs_buf_hash_params); 542 } 543 544 void 545 xfs_buf_hash_destroy( 546 struct xfs_perag *pag) 547 { 548 rhashtable_destroy(&pag->pag_buf_hash); 549 } 550 551 /* 552 * Look up a buffer in the buffer cache and return it referenced and locked 553 * in @found_bp. 554 * 555 * If @new_bp is supplied and we have a lookup miss, insert @new_bp into the 556 * cache. 557 * 558 * If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return 559 * -EAGAIN if we fail to lock it. 560 * 561 * Return values are: 562 * -EFSCORRUPTED if have been supplied with an invalid address 563 * -EAGAIN on trylock failure 564 * -ENOENT if we fail to find a match and @new_bp was NULL 565 * 0, with @found_bp: 566 * - @new_bp if we inserted it into the cache 567 * - the buffer we found and locked. 568 */ 569 static int 570 xfs_buf_find( 571 struct xfs_buftarg *btp, 572 struct xfs_buf_map *map, 573 int nmaps, 574 xfs_buf_flags_t flags, 575 struct xfs_buf *new_bp, 576 struct xfs_buf **found_bp) 577 { 578 struct xfs_perag *pag; 579 xfs_buf_t *bp; 580 struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn }; 581 xfs_daddr_t eofs; 582 int i; 583 584 *found_bp = NULL; 585 586 for (i = 0; i < nmaps; i++) 587 cmap.bm_len += map[i].bm_len; 588 589 /* Check for IOs smaller than the sector size / not sector aligned */ 590 ASSERT(!(BBTOB(cmap.bm_len) < btp->bt_meta_sectorsize)); 591 ASSERT(!(BBTOB(cmap.bm_bn) & (xfs_off_t)btp->bt_meta_sectormask)); 592 593 /* 594 * Corrupted block numbers can get through to here, unfortunately, so we 595 * have to check that the buffer falls within the filesystem bounds. 596 */ 597 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); 598 if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) { 599 xfs_alert(btp->bt_mount, 600 "%s: daddr 0x%llx out of range, EOFS 0x%llx", 601 __func__, cmap.bm_bn, eofs); 602 WARN_ON(1); 603 return -EFSCORRUPTED; 604 } 605 606 pag = xfs_perag_get(btp->bt_mount, 607 xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn)); 608 609 spin_lock(&pag->pag_buf_lock); 610 bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap, 611 xfs_buf_hash_params); 612 if (bp) { 613 atomic_inc(&bp->b_hold); 614 goto found; 615 } 616 617 /* No match found */ 618 if (!new_bp) { 619 XFS_STATS_INC(btp->bt_mount, xb_miss_locked); 620 spin_unlock(&pag->pag_buf_lock); 621 xfs_perag_put(pag); 622 return -ENOENT; 623 } 624 625 /* the buffer keeps the perag reference until it is freed */ 626 new_bp->b_pag = pag; 627 rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head, 628 xfs_buf_hash_params); 629 spin_unlock(&pag->pag_buf_lock); 630 *found_bp = new_bp; 631 return 0; 632 633 found: 634 spin_unlock(&pag->pag_buf_lock); 635 xfs_perag_put(pag); 636 637 if (!xfs_buf_trylock(bp)) { 638 if (flags & XBF_TRYLOCK) { 639 xfs_buf_rele(bp); 640 XFS_STATS_INC(btp->bt_mount, xb_busy_locked); 641 return -EAGAIN; 642 } 643 xfs_buf_lock(bp); 644 XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited); 645 } 646 647 /* 648 * if the buffer is stale, clear all the external state associated with 649 * it. We need to keep flags such as how we allocated the buffer memory 650 * intact here. 651 */ 652 if (bp->b_flags & XBF_STALE) { 653 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); 654 ASSERT(bp->b_iodone == NULL); 655 bp->b_flags &= _XBF_KMEM | _XBF_PAGES; 656 bp->b_ops = NULL; 657 } 658 659 trace_xfs_buf_find(bp, flags, _RET_IP_); 660 XFS_STATS_INC(btp->bt_mount, xb_get_locked); 661 *found_bp = bp; 662 return 0; 663 } 664 665 struct xfs_buf * 666 xfs_buf_incore( 667 struct xfs_buftarg *target, 668 xfs_daddr_t blkno, 669 size_t numblks, 670 xfs_buf_flags_t flags) 671 { 672 struct xfs_buf *bp; 673 int error; 674 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); 675 676 error = xfs_buf_find(target, &map, 1, flags, NULL, &bp); 677 if (error) 678 return NULL; 679 return bp; 680 } 681 682 /* 683 * Assembles a buffer covering the specified range. The code is optimised for 684 * cache hits, as metadata intensive workloads will see 3 orders of magnitude 685 * more hits than misses. 686 */ 687 struct xfs_buf * 688 xfs_buf_get_map( 689 struct xfs_buftarg *target, 690 struct xfs_buf_map *map, 691 int nmaps, 692 xfs_buf_flags_t flags) 693 { 694 struct xfs_buf *bp; 695 struct xfs_buf *new_bp; 696 int error = 0; 697 698 error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp); 699 700 switch (error) { 701 case 0: 702 /* cache hit */ 703 goto found; 704 case -EAGAIN: 705 /* cache hit, trylock failure, caller handles failure */ 706 ASSERT(flags & XBF_TRYLOCK); 707 return NULL; 708 case -ENOENT: 709 /* cache miss, go for insert */ 710 break; 711 case -EFSCORRUPTED: 712 default: 713 /* 714 * None of the higher layers understand failure types 715 * yet, so return NULL to signal a fatal lookup error. 716 */ 717 return NULL; 718 } 719 720 new_bp = _xfs_buf_alloc(target, map, nmaps, flags); 721 if (unlikely(!new_bp)) 722 return NULL; 723 724 error = xfs_buf_allocate_memory(new_bp, flags); 725 if (error) { 726 xfs_buf_free(new_bp); 727 return NULL; 728 } 729 730 error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp); 731 if (error) { 732 xfs_buf_free(new_bp); 733 return NULL; 734 } 735 736 if (bp != new_bp) 737 xfs_buf_free(new_bp); 738 739 found: 740 if (!bp->b_addr) { 741 error = _xfs_buf_map_pages(bp, flags); 742 if (unlikely(error)) { 743 xfs_warn(target->bt_mount, 744 "%s: failed to map pagesn", __func__); 745 xfs_buf_relse(bp); 746 return NULL; 747 } 748 } 749 750 /* 751 * Clear b_error if this is a lookup from a caller that doesn't expect 752 * valid data to be found in the buffer. 753 */ 754 if (!(flags & XBF_READ)) 755 xfs_buf_ioerror(bp, 0); 756 757 XFS_STATS_INC(target->bt_mount, xb_get); 758 trace_xfs_buf_get(bp, flags, _RET_IP_); 759 return bp; 760 } 761 762 STATIC int 763 _xfs_buf_read( 764 xfs_buf_t *bp, 765 xfs_buf_flags_t flags) 766 { 767 ASSERT(!(flags & XBF_WRITE)); 768 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); 769 770 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); 771 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); 772 773 if (flags & XBF_ASYNC) { 774 xfs_buf_submit(bp); 775 return 0; 776 } 777 return xfs_buf_submit_wait(bp); 778 } 779 780 xfs_buf_t * 781 xfs_buf_read_map( 782 struct xfs_buftarg *target, 783 struct xfs_buf_map *map, 784 int nmaps, 785 xfs_buf_flags_t flags, 786 const struct xfs_buf_ops *ops) 787 { 788 struct xfs_buf *bp; 789 790 flags |= XBF_READ; 791 792 bp = xfs_buf_get_map(target, map, nmaps, flags); 793 if (bp) { 794 trace_xfs_buf_read(bp, flags, _RET_IP_); 795 796 if (!(bp->b_flags & XBF_DONE)) { 797 XFS_STATS_INC(target->bt_mount, xb_get_read); 798 bp->b_ops = ops; 799 _xfs_buf_read(bp, flags); 800 } else if (flags & XBF_ASYNC) { 801 /* 802 * Read ahead call which is already satisfied, 803 * drop the buffer 804 */ 805 xfs_buf_relse(bp); 806 return NULL; 807 } else { 808 /* We do not want read in the flags */ 809 bp->b_flags &= ~XBF_READ; 810 } 811 } 812 813 return bp; 814 } 815 816 /* 817 * If we are not low on memory then do the readahead in a deadlock 818 * safe manner. 819 */ 820 void 821 xfs_buf_readahead_map( 822 struct xfs_buftarg *target, 823 struct xfs_buf_map *map, 824 int nmaps, 825 const struct xfs_buf_ops *ops) 826 { 827 if (bdi_read_congested(target->bt_bdev->bd_bdi)) 828 return; 829 830 xfs_buf_read_map(target, map, nmaps, 831 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops); 832 } 833 834 /* 835 * Read an uncached buffer from disk. Allocates and returns a locked 836 * buffer containing the disk contents or nothing. 837 */ 838 int 839 xfs_buf_read_uncached( 840 struct xfs_buftarg *target, 841 xfs_daddr_t daddr, 842 size_t numblks, 843 int flags, 844 struct xfs_buf **bpp, 845 const struct xfs_buf_ops *ops) 846 { 847 struct xfs_buf *bp; 848 849 *bpp = NULL; 850 851 bp = xfs_buf_get_uncached(target, numblks, flags); 852 if (!bp) 853 return -ENOMEM; 854 855 /* set up the buffer for a read IO */ 856 ASSERT(bp->b_map_count == 1); 857 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */ 858 bp->b_maps[0].bm_bn = daddr; 859 bp->b_flags |= XBF_READ; 860 bp->b_ops = ops; 861 862 xfs_buf_submit_wait(bp); 863 if (bp->b_error) { 864 int error = bp->b_error; 865 xfs_buf_relse(bp); 866 return error; 867 } 868 869 *bpp = bp; 870 return 0; 871 } 872 873 /* 874 * Return a buffer allocated as an empty buffer and associated to external 875 * memory via xfs_buf_associate_memory() back to it's empty state. 876 */ 877 void 878 xfs_buf_set_empty( 879 struct xfs_buf *bp, 880 size_t numblks) 881 { 882 if (bp->b_pages) 883 _xfs_buf_free_pages(bp); 884 885 bp->b_pages = NULL; 886 bp->b_page_count = 0; 887 bp->b_addr = NULL; 888 bp->b_length = numblks; 889 bp->b_io_length = numblks; 890 891 ASSERT(bp->b_map_count == 1); 892 bp->b_bn = XFS_BUF_DADDR_NULL; 893 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL; 894 bp->b_maps[0].bm_len = bp->b_length; 895 } 896 897 static inline struct page * 898 mem_to_page( 899 void *addr) 900 { 901 if ((!is_vmalloc_addr(addr))) { 902 return virt_to_page(addr); 903 } else { 904 return vmalloc_to_page(addr); 905 } 906 } 907 908 int 909 xfs_buf_associate_memory( 910 xfs_buf_t *bp, 911 void *mem, 912 size_t len) 913 { 914 int rval; 915 int i = 0; 916 unsigned long pageaddr; 917 unsigned long offset; 918 size_t buflen; 919 int page_count; 920 921 pageaddr = (unsigned long)mem & PAGE_MASK; 922 offset = (unsigned long)mem - pageaddr; 923 buflen = PAGE_ALIGN(len + offset); 924 page_count = buflen >> PAGE_SHIFT; 925 926 /* Free any previous set of page pointers */ 927 if (bp->b_pages) 928 _xfs_buf_free_pages(bp); 929 930 bp->b_pages = NULL; 931 bp->b_addr = mem; 932 933 rval = _xfs_buf_get_pages(bp, page_count); 934 if (rval) 935 return rval; 936 937 bp->b_offset = offset; 938 939 for (i = 0; i < bp->b_page_count; i++) { 940 bp->b_pages[i] = mem_to_page((void *)pageaddr); 941 pageaddr += PAGE_SIZE; 942 } 943 944 bp->b_io_length = BTOBB(len); 945 bp->b_length = BTOBB(buflen); 946 947 return 0; 948 } 949 950 xfs_buf_t * 951 xfs_buf_get_uncached( 952 struct xfs_buftarg *target, 953 size_t numblks, 954 int flags) 955 { 956 unsigned long page_count; 957 int error, i; 958 struct xfs_buf *bp; 959 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); 960 961 /* flags might contain irrelevant bits, pass only what we care about */ 962 bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT); 963 if (unlikely(bp == NULL)) 964 goto fail; 965 966 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT; 967 error = _xfs_buf_get_pages(bp, page_count); 968 if (error) 969 goto fail_free_buf; 970 971 for (i = 0; i < page_count; i++) { 972 bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); 973 if (!bp->b_pages[i]) 974 goto fail_free_mem; 975 } 976 bp->b_flags |= _XBF_PAGES; 977 978 error = _xfs_buf_map_pages(bp, 0); 979 if (unlikely(error)) { 980 xfs_warn(target->bt_mount, 981 "%s: failed to map pages", __func__); 982 goto fail_free_mem; 983 } 984 985 trace_xfs_buf_get_uncached(bp, _RET_IP_); 986 return bp; 987 988 fail_free_mem: 989 while (--i >= 0) 990 __free_page(bp->b_pages[i]); 991 _xfs_buf_free_pages(bp); 992 fail_free_buf: 993 xfs_buf_free_maps(bp); 994 kmem_zone_free(xfs_buf_zone, bp); 995 fail: 996 return NULL; 997 } 998 999 /* 1000 * Increment reference count on buffer, to hold the buffer concurrently 1001 * with another thread which may release (free) the buffer asynchronously. 1002 * Must hold the buffer already to call this function. 1003 */ 1004 void 1005 xfs_buf_hold( 1006 xfs_buf_t *bp) 1007 { 1008 trace_xfs_buf_hold(bp, _RET_IP_); 1009 atomic_inc(&bp->b_hold); 1010 } 1011 1012 /* 1013 * Release a hold on the specified buffer. If the hold count is 1, the buffer is 1014 * placed on LRU or freed (depending on b_lru_ref). 1015 */ 1016 void 1017 xfs_buf_rele( 1018 xfs_buf_t *bp) 1019 { 1020 struct xfs_perag *pag = bp->b_pag; 1021 bool release; 1022 bool freebuf = false; 1023 1024 trace_xfs_buf_rele(bp, _RET_IP_); 1025 1026 if (!pag) { 1027 ASSERT(list_empty(&bp->b_lru)); 1028 if (atomic_dec_and_test(&bp->b_hold)) { 1029 xfs_buf_ioacct_dec(bp); 1030 xfs_buf_free(bp); 1031 } 1032 return; 1033 } 1034 1035 ASSERT(atomic_read(&bp->b_hold) > 0); 1036 1037 release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock); 1038 spin_lock(&bp->b_lock); 1039 if (!release) { 1040 /* 1041 * Drop the in-flight state if the buffer is already on the LRU 1042 * and it holds the only reference. This is racy because we 1043 * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT 1044 * ensures the decrement occurs only once per-buf. 1045 */ 1046 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru)) 1047 __xfs_buf_ioacct_dec(bp); 1048 goto out_unlock; 1049 } 1050 1051 /* the last reference has been dropped ... */ 1052 __xfs_buf_ioacct_dec(bp); 1053 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { 1054 /* 1055 * If the buffer is added to the LRU take a new reference to the 1056 * buffer for the LRU and clear the (now stale) dispose list 1057 * state flag 1058 */ 1059 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) { 1060 bp->b_state &= ~XFS_BSTATE_DISPOSE; 1061 atomic_inc(&bp->b_hold); 1062 } 1063 spin_unlock(&pag->pag_buf_lock); 1064 } else { 1065 /* 1066 * most of the time buffers will already be removed from the 1067 * LRU, so optimise that case by checking for the 1068 * XFS_BSTATE_DISPOSE flag indicating the last list the buffer 1069 * was on was the disposal list 1070 */ 1071 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { 1072 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru); 1073 } else { 1074 ASSERT(list_empty(&bp->b_lru)); 1075 } 1076 1077 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); 1078 rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head, 1079 xfs_buf_hash_params); 1080 spin_unlock(&pag->pag_buf_lock); 1081 xfs_perag_put(pag); 1082 freebuf = true; 1083 } 1084 1085 out_unlock: 1086 spin_unlock(&bp->b_lock); 1087 1088 if (freebuf) 1089 xfs_buf_free(bp); 1090 } 1091 1092 1093 /* 1094 * Lock a buffer object, if it is not already locked. 1095 * 1096 * If we come across a stale, pinned, locked buffer, we know that we are 1097 * being asked to lock a buffer that has been reallocated. Because it is 1098 * pinned, we know that the log has not been pushed to disk and hence it 1099 * will still be locked. Rather than continuing to have trylock attempts 1100 * fail until someone else pushes the log, push it ourselves before 1101 * returning. This means that the xfsaild will not get stuck trying 1102 * to push on stale inode buffers. 1103 */ 1104 int 1105 xfs_buf_trylock( 1106 struct xfs_buf *bp) 1107 { 1108 int locked; 1109 1110 locked = down_trylock(&bp->b_sema) == 0; 1111 if (locked) { 1112 XB_SET_OWNER(bp); 1113 trace_xfs_buf_trylock(bp, _RET_IP_); 1114 } else { 1115 trace_xfs_buf_trylock_fail(bp, _RET_IP_); 1116 } 1117 return locked; 1118 } 1119 1120 /* 1121 * Lock a buffer object. 1122 * 1123 * If we come across a stale, pinned, locked buffer, we know that we 1124 * are being asked to lock a buffer that has been reallocated. Because 1125 * it is pinned, we know that the log has not been pushed to disk and 1126 * hence it will still be locked. Rather than sleeping until someone 1127 * else pushes the log, push it ourselves before trying to get the lock. 1128 */ 1129 void 1130 xfs_buf_lock( 1131 struct xfs_buf *bp) 1132 { 1133 trace_xfs_buf_lock(bp, _RET_IP_); 1134 1135 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) 1136 xfs_log_force(bp->b_target->bt_mount, 0); 1137 down(&bp->b_sema); 1138 XB_SET_OWNER(bp); 1139 1140 trace_xfs_buf_lock_done(bp, _RET_IP_); 1141 } 1142 1143 void 1144 xfs_buf_unlock( 1145 struct xfs_buf *bp) 1146 { 1147 ASSERT(xfs_buf_islocked(bp)); 1148 1149 XB_CLEAR_OWNER(bp); 1150 up(&bp->b_sema); 1151 1152 trace_xfs_buf_unlock(bp, _RET_IP_); 1153 } 1154 1155 STATIC void 1156 xfs_buf_wait_unpin( 1157 xfs_buf_t *bp) 1158 { 1159 DECLARE_WAITQUEUE (wait, current); 1160 1161 if (atomic_read(&bp->b_pin_count) == 0) 1162 return; 1163 1164 add_wait_queue(&bp->b_waiters, &wait); 1165 for (;;) { 1166 set_current_state(TASK_UNINTERRUPTIBLE); 1167 if (atomic_read(&bp->b_pin_count) == 0) 1168 break; 1169 io_schedule(); 1170 } 1171 remove_wait_queue(&bp->b_waiters, &wait); 1172 set_current_state(TASK_RUNNING); 1173 } 1174 1175 /* 1176 * Buffer Utility Routines 1177 */ 1178 1179 void 1180 xfs_buf_ioend( 1181 struct xfs_buf *bp) 1182 { 1183 bool read = bp->b_flags & XBF_READ; 1184 1185 trace_xfs_buf_iodone(bp, _RET_IP_); 1186 1187 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); 1188 1189 /* 1190 * Pull in IO completion errors now. We are guaranteed to be running 1191 * single threaded, so we don't need the lock to read b_io_error. 1192 */ 1193 if (!bp->b_error && bp->b_io_error) 1194 xfs_buf_ioerror(bp, bp->b_io_error); 1195 1196 /* Only validate buffers that were read without errors */ 1197 if (read && !bp->b_error && bp->b_ops) { 1198 ASSERT(!bp->b_iodone); 1199 bp->b_ops->verify_read(bp); 1200 } 1201 1202 if (!bp->b_error) 1203 bp->b_flags |= XBF_DONE; 1204 1205 if (bp->b_iodone) 1206 (*(bp->b_iodone))(bp); 1207 else if (bp->b_flags & XBF_ASYNC) 1208 xfs_buf_relse(bp); 1209 else 1210 complete(&bp->b_iowait); 1211 } 1212 1213 static void 1214 xfs_buf_ioend_work( 1215 struct work_struct *work) 1216 { 1217 struct xfs_buf *bp = 1218 container_of(work, xfs_buf_t, b_ioend_work); 1219 1220 xfs_buf_ioend(bp); 1221 } 1222 1223 static void 1224 xfs_buf_ioend_async( 1225 struct xfs_buf *bp) 1226 { 1227 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); 1228 queue_work(bp->b_ioend_wq, &bp->b_ioend_work); 1229 } 1230 1231 void 1232 __xfs_buf_ioerror( 1233 xfs_buf_t *bp, 1234 int error, 1235 xfs_failaddr_t failaddr) 1236 { 1237 ASSERT(error <= 0 && error >= -1000); 1238 bp->b_error = error; 1239 trace_xfs_buf_ioerror(bp, error, failaddr); 1240 } 1241 1242 void 1243 xfs_buf_ioerror_alert( 1244 struct xfs_buf *bp, 1245 const char *func) 1246 { 1247 xfs_alert(bp->b_target->bt_mount, 1248 "metadata I/O error in \"%s\" at daddr 0x%llx len %d error %d", 1249 func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length, 1250 -bp->b_error); 1251 } 1252 1253 int 1254 xfs_bwrite( 1255 struct xfs_buf *bp) 1256 { 1257 int error; 1258 1259 ASSERT(xfs_buf_islocked(bp)); 1260 1261 bp->b_flags |= XBF_WRITE; 1262 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | 1263 XBF_WRITE_FAIL | XBF_DONE); 1264 1265 error = xfs_buf_submit_wait(bp); 1266 if (error) { 1267 xfs_force_shutdown(bp->b_target->bt_mount, 1268 SHUTDOWN_META_IO_ERROR); 1269 } 1270 return error; 1271 } 1272 1273 static void 1274 xfs_buf_bio_end_io( 1275 struct bio *bio) 1276 { 1277 struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private; 1278 1279 /* 1280 * don't overwrite existing errors - otherwise we can lose errors on 1281 * buffers that require multiple bios to complete. 1282 */ 1283 if (bio->bi_status) { 1284 int error = blk_status_to_errno(bio->bi_status); 1285 1286 cmpxchg(&bp->b_io_error, 0, error); 1287 } 1288 1289 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) 1290 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); 1291 1292 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) 1293 xfs_buf_ioend_async(bp); 1294 bio_put(bio); 1295 } 1296 1297 static void 1298 xfs_buf_ioapply_map( 1299 struct xfs_buf *bp, 1300 int map, 1301 int *buf_offset, 1302 int *count, 1303 int op, 1304 int op_flags) 1305 { 1306 int page_index; 1307 int total_nr_pages = bp->b_page_count; 1308 int nr_pages; 1309 struct bio *bio; 1310 sector_t sector = bp->b_maps[map].bm_bn; 1311 int size; 1312 int offset; 1313 1314 /* skip the pages in the buffer before the start offset */ 1315 page_index = 0; 1316 offset = *buf_offset; 1317 while (offset >= PAGE_SIZE) { 1318 page_index++; 1319 offset -= PAGE_SIZE; 1320 } 1321 1322 /* 1323 * Limit the IO size to the length of the current vector, and update the 1324 * remaining IO count for the next time around. 1325 */ 1326 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); 1327 *count -= size; 1328 *buf_offset += size; 1329 1330 next_chunk: 1331 atomic_inc(&bp->b_io_remaining); 1332 nr_pages = min(total_nr_pages, BIO_MAX_PAGES); 1333 1334 bio = bio_alloc(GFP_NOIO, nr_pages); 1335 bio_set_dev(bio, bp->b_target->bt_bdev); 1336 bio->bi_iter.bi_sector = sector; 1337 bio->bi_end_io = xfs_buf_bio_end_io; 1338 bio->bi_private = bp; 1339 bio_set_op_attrs(bio, op, op_flags); 1340 1341 for (; size && nr_pages; nr_pages--, page_index++) { 1342 int rbytes, nbytes = PAGE_SIZE - offset; 1343 1344 if (nbytes > size) 1345 nbytes = size; 1346 1347 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, 1348 offset); 1349 if (rbytes < nbytes) 1350 break; 1351 1352 offset = 0; 1353 sector += BTOBB(nbytes); 1354 size -= nbytes; 1355 total_nr_pages--; 1356 } 1357 1358 if (likely(bio->bi_iter.bi_size)) { 1359 if (xfs_buf_is_vmapped(bp)) { 1360 flush_kernel_vmap_range(bp->b_addr, 1361 xfs_buf_vmap_len(bp)); 1362 } 1363 submit_bio(bio); 1364 if (size) 1365 goto next_chunk; 1366 } else { 1367 /* 1368 * This is guaranteed not to be the last io reference count 1369 * because the caller (xfs_buf_submit) holds a count itself. 1370 */ 1371 atomic_dec(&bp->b_io_remaining); 1372 xfs_buf_ioerror(bp, -EIO); 1373 bio_put(bio); 1374 } 1375 1376 } 1377 1378 STATIC void 1379 _xfs_buf_ioapply( 1380 struct xfs_buf *bp) 1381 { 1382 struct blk_plug plug; 1383 int op; 1384 int op_flags = 0; 1385 int offset; 1386 int size; 1387 int i; 1388 1389 /* 1390 * Make sure we capture only current IO errors rather than stale errors 1391 * left over from previous use of the buffer (e.g. failed readahead). 1392 */ 1393 bp->b_error = 0; 1394 1395 /* 1396 * Initialize the I/O completion workqueue if we haven't yet or the 1397 * submitter has not opted to specify a custom one. 1398 */ 1399 if (!bp->b_ioend_wq) 1400 bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue; 1401 1402 if (bp->b_flags & XBF_WRITE) { 1403 op = REQ_OP_WRITE; 1404 if (bp->b_flags & XBF_SYNCIO) 1405 op_flags = REQ_SYNC; 1406 if (bp->b_flags & XBF_FUA) 1407 op_flags |= REQ_FUA; 1408 if (bp->b_flags & XBF_FLUSH) 1409 op_flags |= REQ_PREFLUSH; 1410 1411 /* 1412 * Run the write verifier callback function if it exists. If 1413 * this function fails it will mark the buffer with an error and 1414 * the IO should not be dispatched. 1415 */ 1416 if (bp->b_ops) { 1417 bp->b_ops->verify_write(bp); 1418 if (bp->b_error) { 1419 xfs_force_shutdown(bp->b_target->bt_mount, 1420 SHUTDOWN_CORRUPT_INCORE); 1421 return; 1422 } 1423 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) { 1424 struct xfs_mount *mp = bp->b_target->bt_mount; 1425 1426 /* 1427 * non-crc filesystems don't attach verifiers during 1428 * log recovery, so don't warn for such filesystems. 1429 */ 1430 if (xfs_sb_version_hascrc(&mp->m_sb)) { 1431 xfs_warn(mp, 1432 "%s: no buf ops on daddr 0x%llx len %d", 1433 __func__, bp->b_bn, bp->b_length); 1434 xfs_hex_dump(bp->b_addr, 1435 XFS_CORRUPTION_DUMP_LEN); 1436 dump_stack(); 1437 } 1438 } 1439 } else if (bp->b_flags & XBF_READ_AHEAD) { 1440 op = REQ_OP_READ; 1441 op_flags = REQ_RAHEAD; 1442 } else { 1443 op = REQ_OP_READ; 1444 } 1445 1446 /* we only use the buffer cache for meta-data */ 1447 op_flags |= REQ_META; 1448 1449 /* 1450 * Walk all the vectors issuing IO on them. Set up the initial offset 1451 * into the buffer and the desired IO size before we start - 1452 * _xfs_buf_ioapply_vec() will modify them appropriately for each 1453 * subsequent call. 1454 */ 1455 offset = bp->b_offset; 1456 size = BBTOB(bp->b_io_length); 1457 blk_start_plug(&plug); 1458 for (i = 0; i < bp->b_map_count; i++) { 1459 xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags); 1460 if (bp->b_error) 1461 break; 1462 if (size <= 0) 1463 break; /* all done */ 1464 } 1465 blk_finish_plug(&plug); 1466 } 1467 1468 /* 1469 * Asynchronous IO submission path. This transfers the buffer lock ownership and 1470 * the current reference to the IO. It is not safe to reference the buffer after 1471 * a call to this function unless the caller holds an additional reference 1472 * itself. 1473 */ 1474 void 1475 xfs_buf_submit( 1476 struct xfs_buf *bp) 1477 { 1478 trace_xfs_buf_submit(bp, _RET_IP_); 1479 1480 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); 1481 ASSERT(bp->b_flags & XBF_ASYNC); 1482 1483 /* on shutdown we stale and complete the buffer immediately */ 1484 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { 1485 xfs_buf_ioerror(bp, -EIO); 1486 bp->b_flags &= ~XBF_DONE; 1487 xfs_buf_stale(bp); 1488 xfs_buf_ioend(bp); 1489 return; 1490 } 1491 1492 if (bp->b_flags & XBF_WRITE) 1493 xfs_buf_wait_unpin(bp); 1494 1495 /* clear the internal error state to avoid spurious errors */ 1496 bp->b_io_error = 0; 1497 1498 /* 1499 * The caller's reference is released during I/O completion. 1500 * This occurs some time after the last b_io_remaining reference is 1501 * released, so after we drop our Io reference we have to have some 1502 * other reference to ensure the buffer doesn't go away from underneath 1503 * us. Take a direct reference to ensure we have safe access to the 1504 * buffer until we are finished with it. 1505 */ 1506 xfs_buf_hold(bp); 1507 1508 /* 1509 * Set the count to 1 initially, this will stop an I/O completion 1510 * callout which happens before we have started all the I/O from calling 1511 * xfs_buf_ioend too early. 1512 */ 1513 atomic_set(&bp->b_io_remaining, 1); 1514 xfs_buf_ioacct_inc(bp); 1515 _xfs_buf_ioapply(bp); 1516 1517 /* 1518 * If _xfs_buf_ioapply failed, we can get back here with only the IO 1519 * reference we took above. If we drop it to zero, run completion so 1520 * that we don't return to the caller with completion still pending. 1521 */ 1522 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { 1523 if (bp->b_error) 1524 xfs_buf_ioend(bp); 1525 else 1526 xfs_buf_ioend_async(bp); 1527 } 1528 1529 xfs_buf_rele(bp); 1530 /* Note: it is not safe to reference bp now we've dropped our ref */ 1531 } 1532 1533 /* 1534 * Synchronous buffer IO submission path, read or write. 1535 */ 1536 int 1537 xfs_buf_submit_wait( 1538 struct xfs_buf *bp) 1539 { 1540 int error; 1541 1542 trace_xfs_buf_submit_wait(bp, _RET_IP_); 1543 1544 ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC))); 1545 1546 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { 1547 xfs_buf_ioerror(bp, -EIO); 1548 xfs_buf_stale(bp); 1549 bp->b_flags &= ~XBF_DONE; 1550 return -EIO; 1551 } 1552 1553 if (bp->b_flags & XBF_WRITE) 1554 xfs_buf_wait_unpin(bp); 1555 1556 /* clear the internal error state to avoid spurious errors */ 1557 bp->b_io_error = 0; 1558 1559 /* 1560 * For synchronous IO, the IO does not inherit the submitters reference 1561 * count, nor the buffer lock. Hence we cannot release the reference we 1562 * are about to take until we've waited for all IO completion to occur, 1563 * including any xfs_buf_ioend_async() work that may be pending. 1564 */ 1565 xfs_buf_hold(bp); 1566 1567 /* 1568 * Set the count to 1 initially, this will stop an I/O completion 1569 * callout which happens before we have started all the I/O from calling 1570 * xfs_buf_ioend too early. 1571 */ 1572 atomic_set(&bp->b_io_remaining, 1); 1573 _xfs_buf_ioapply(bp); 1574 1575 /* 1576 * make sure we run completion synchronously if it raced with us and is 1577 * already complete. 1578 */ 1579 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) 1580 xfs_buf_ioend(bp); 1581 1582 /* wait for completion before gathering the error from the buffer */ 1583 trace_xfs_buf_iowait(bp, _RET_IP_); 1584 wait_for_completion(&bp->b_iowait); 1585 trace_xfs_buf_iowait_done(bp, _RET_IP_); 1586 error = bp->b_error; 1587 1588 /* 1589 * all done now, we can release the hold that keeps the buffer 1590 * referenced for the entire IO. 1591 */ 1592 xfs_buf_rele(bp); 1593 return error; 1594 } 1595 1596 void * 1597 xfs_buf_offset( 1598 struct xfs_buf *bp, 1599 size_t offset) 1600 { 1601 struct page *page; 1602 1603 if (bp->b_addr) 1604 return bp->b_addr + offset; 1605 1606 offset += bp->b_offset; 1607 page = bp->b_pages[offset >> PAGE_SHIFT]; 1608 return page_address(page) + (offset & (PAGE_SIZE-1)); 1609 } 1610 1611 /* 1612 * Move data into or out of a buffer. 1613 */ 1614 void 1615 xfs_buf_iomove( 1616 xfs_buf_t *bp, /* buffer to process */ 1617 size_t boff, /* starting buffer offset */ 1618 size_t bsize, /* length to copy */ 1619 void *data, /* data address */ 1620 xfs_buf_rw_t mode) /* read/write/zero flag */ 1621 { 1622 size_t bend; 1623 1624 bend = boff + bsize; 1625 while (boff < bend) { 1626 struct page *page; 1627 int page_index, page_offset, csize; 1628 1629 page_index = (boff + bp->b_offset) >> PAGE_SHIFT; 1630 page_offset = (boff + bp->b_offset) & ~PAGE_MASK; 1631 page = bp->b_pages[page_index]; 1632 csize = min_t(size_t, PAGE_SIZE - page_offset, 1633 BBTOB(bp->b_io_length) - boff); 1634 1635 ASSERT((csize + page_offset) <= PAGE_SIZE); 1636 1637 switch (mode) { 1638 case XBRW_ZERO: 1639 memset(page_address(page) + page_offset, 0, csize); 1640 break; 1641 case XBRW_READ: 1642 memcpy(data, page_address(page) + page_offset, csize); 1643 break; 1644 case XBRW_WRITE: 1645 memcpy(page_address(page) + page_offset, data, csize); 1646 } 1647 1648 boff += csize; 1649 data += csize; 1650 } 1651 } 1652 1653 /* 1654 * Handling of buffer targets (buftargs). 1655 */ 1656 1657 /* 1658 * Wait for any bufs with callbacks that have been submitted but have not yet 1659 * returned. These buffers will have an elevated hold count, so wait on those 1660 * while freeing all the buffers only held by the LRU. 1661 */ 1662 static enum lru_status 1663 xfs_buftarg_wait_rele( 1664 struct list_head *item, 1665 struct list_lru_one *lru, 1666 spinlock_t *lru_lock, 1667 void *arg) 1668 1669 { 1670 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); 1671 struct list_head *dispose = arg; 1672 1673 if (atomic_read(&bp->b_hold) > 1) { 1674 /* need to wait, so skip it this pass */ 1675 trace_xfs_buf_wait_buftarg(bp, _RET_IP_); 1676 return LRU_SKIP; 1677 } 1678 if (!spin_trylock(&bp->b_lock)) 1679 return LRU_SKIP; 1680 1681 /* 1682 * clear the LRU reference count so the buffer doesn't get 1683 * ignored in xfs_buf_rele(). 1684 */ 1685 atomic_set(&bp->b_lru_ref, 0); 1686 bp->b_state |= XFS_BSTATE_DISPOSE; 1687 list_lru_isolate_move(lru, item, dispose); 1688 spin_unlock(&bp->b_lock); 1689 return LRU_REMOVED; 1690 } 1691 1692 void 1693 xfs_wait_buftarg( 1694 struct xfs_buftarg *btp) 1695 { 1696 LIST_HEAD(dispose); 1697 int loop = 0; 1698 1699 /* 1700 * First wait on the buftarg I/O count for all in-flight buffers to be 1701 * released. This is critical as new buffers do not make the LRU until 1702 * they are released. 1703 * 1704 * Next, flush the buffer workqueue to ensure all completion processing 1705 * has finished. Just waiting on buffer locks is not sufficient for 1706 * async IO as the reference count held over IO is not released until 1707 * after the buffer lock is dropped. Hence we need to ensure here that 1708 * all reference counts have been dropped before we start walking the 1709 * LRU list. 1710 */ 1711 while (percpu_counter_sum(&btp->bt_io_count)) 1712 delay(100); 1713 flush_workqueue(btp->bt_mount->m_buf_workqueue); 1714 1715 /* loop until there is nothing left on the lru list. */ 1716 while (list_lru_count(&btp->bt_lru)) { 1717 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele, 1718 &dispose, LONG_MAX); 1719 1720 while (!list_empty(&dispose)) { 1721 struct xfs_buf *bp; 1722 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1723 list_del_init(&bp->b_lru); 1724 if (bp->b_flags & XBF_WRITE_FAIL) { 1725 xfs_alert(btp->bt_mount, 1726 "Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!", 1727 (long long)bp->b_bn); 1728 xfs_alert(btp->bt_mount, 1729 "Please run xfs_repair to determine the extent of the problem."); 1730 } 1731 xfs_buf_rele(bp); 1732 } 1733 if (loop++ != 0) 1734 delay(100); 1735 } 1736 } 1737 1738 static enum lru_status 1739 xfs_buftarg_isolate( 1740 struct list_head *item, 1741 struct list_lru_one *lru, 1742 spinlock_t *lru_lock, 1743 void *arg) 1744 { 1745 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); 1746 struct list_head *dispose = arg; 1747 1748 /* 1749 * we are inverting the lru lock/bp->b_lock here, so use a trylock. 1750 * If we fail to get the lock, just skip it. 1751 */ 1752 if (!spin_trylock(&bp->b_lock)) 1753 return LRU_SKIP; 1754 /* 1755 * Decrement the b_lru_ref count unless the value is already 1756 * zero. If the value is already zero, we need to reclaim the 1757 * buffer, otherwise it gets another trip through the LRU. 1758 */ 1759 if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) { 1760 spin_unlock(&bp->b_lock); 1761 return LRU_ROTATE; 1762 } 1763 1764 bp->b_state |= XFS_BSTATE_DISPOSE; 1765 list_lru_isolate_move(lru, item, dispose); 1766 spin_unlock(&bp->b_lock); 1767 return LRU_REMOVED; 1768 } 1769 1770 static unsigned long 1771 xfs_buftarg_shrink_scan( 1772 struct shrinker *shrink, 1773 struct shrink_control *sc) 1774 { 1775 struct xfs_buftarg *btp = container_of(shrink, 1776 struct xfs_buftarg, bt_shrinker); 1777 LIST_HEAD(dispose); 1778 unsigned long freed; 1779 1780 freed = list_lru_shrink_walk(&btp->bt_lru, sc, 1781 xfs_buftarg_isolate, &dispose); 1782 1783 while (!list_empty(&dispose)) { 1784 struct xfs_buf *bp; 1785 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1786 list_del_init(&bp->b_lru); 1787 xfs_buf_rele(bp); 1788 } 1789 1790 return freed; 1791 } 1792 1793 static unsigned long 1794 xfs_buftarg_shrink_count( 1795 struct shrinker *shrink, 1796 struct shrink_control *sc) 1797 { 1798 struct xfs_buftarg *btp = container_of(shrink, 1799 struct xfs_buftarg, bt_shrinker); 1800 return list_lru_shrink_count(&btp->bt_lru, sc); 1801 } 1802 1803 void 1804 xfs_free_buftarg( 1805 struct xfs_buftarg *btp) 1806 { 1807 unregister_shrinker(&btp->bt_shrinker); 1808 ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0); 1809 percpu_counter_destroy(&btp->bt_io_count); 1810 list_lru_destroy(&btp->bt_lru); 1811 1812 xfs_blkdev_issue_flush(btp); 1813 1814 kmem_free(btp); 1815 } 1816 1817 int 1818 xfs_setsize_buftarg( 1819 xfs_buftarg_t *btp, 1820 unsigned int sectorsize) 1821 { 1822 /* Set up metadata sector size info */ 1823 btp->bt_meta_sectorsize = sectorsize; 1824 btp->bt_meta_sectormask = sectorsize - 1; 1825 1826 if (set_blocksize(btp->bt_bdev, sectorsize)) { 1827 xfs_warn(btp->bt_mount, 1828 "Cannot set_blocksize to %u on device %pg", 1829 sectorsize, btp->bt_bdev); 1830 return -EINVAL; 1831 } 1832 1833 /* Set up device logical sector size mask */ 1834 btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev); 1835 btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1; 1836 1837 return 0; 1838 } 1839 1840 /* 1841 * When allocating the initial buffer target we have not yet 1842 * read in the superblock, so don't know what sized sectors 1843 * are being used at this early stage. Play safe. 1844 */ 1845 STATIC int 1846 xfs_setsize_buftarg_early( 1847 xfs_buftarg_t *btp, 1848 struct block_device *bdev) 1849 { 1850 return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev)); 1851 } 1852 1853 xfs_buftarg_t * 1854 xfs_alloc_buftarg( 1855 struct xfs_mount *mp, 1856 struct block_device *bdev, 1857 struct dax_device *dax_dev) 1858 { 1859 xfs_buftarg_t *btp; 1860 1861 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS); 1862 1863 btp->bt_mount = mp; 1864 btp->bt_dev = bdev->bd_dev; 1865 btp->bt_bdev = bdev; 1866 btp->bt_daxdev = dax_dev; 1867 1868 if (xfs_setsize_buftarg_early(btp, bdev)) 1869 goto error_free; 1870 1871 if (list_lru_init(&btp->bt_lru)) 1872 goto error_free; 1873 1874 if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL)) 1875 goto error_lru; 1876 1877 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count; 1878 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan; 1879 btp->bt_shrinker.seeks = DEFAULT_SEEKS; 1880 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE; 1881 if (register_shrinker(&btp->bt_shrinker)) 1882 goto error_pcpu; 1883 return btp; 1884 1885 error_pcpu: 1886 percpu_counter_destroy(&btp->bt_io_count); 1887 error_lru: 1888 list_lru_destroy(&btp->bt_lru); 1889 error_free: 1890 kmem_free(btp); 1891 return NULL; 1892 } 1893 1894 /* 1895 * Cancel a delayed write list. 1896 * 1897 * Remove each buffer from the list, clear the delwri queue flag and drop the 1898 * associated buffer reference. 1899 */ 1900 void 1901 xfs_buf_delwri_cancel( 1902 struct list_head *list) 1903 { 1904 struct xfs_buf *bp; 1905 1906 while (!list_empty(list)) { 1907 bp = list_first_entry(list, struct xfs_buf, b_list); 1908 1909 xfs_buf_lock(bp); 1910 bp->b_flags &= ~_XBF_DELWRI_Q; 1911 list_del_init(&bp->b_list); 1912 xfs_buf_relse(bp); 1913 } 1914 } 1915 1916 /* 1917 * Add a buffer to the delayed write list. 1918 * 1919 * This queues a buffer for writeout if it hasn't already been. Note that 1920 * neither this routine nor the buffer list submission functions perform 1921 * any internal synchronization. It is expected that the lists are thread-local 1922 * to the callers. 1923 * 1924 * Returns true if we queued up the buffer, or false if it already had 1925 * been on the buffer list. 1926 */ 1927 bool 1928 xfs_buf_delwri_queue( 1929 struct xfs_buf *bp, 1930 struct list_head *list) 1931 { 1932 ASSERT(xfs_buf_islocked(bp)); 1933 ASSERT(!(bp->b_flags & XBF_READ)); 1934 1935 /* 1936 * If the buffer is already marked delwri it already is queued up 1937 * by someone else for imediate writeout. Just ignore it in that 1938 * case. 1939 */ 1940 if (bp->b_flags & _XBF_DELWRI_Q) { 1941 trace_xfs_buf_delwri_queued(bp, _RET_IP_); 1942 return false; 1943 } 1944 1945 trace_xfs_buf_delwri_queue(bp, _RET_IP_); 1946 1947 /* 1948 * If a buffer gets written out synchronously or marked stale while it 1949 * is on a delwri list we lazily remove it. To do this, the other party 1950 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone. 1951 * It remains referenced and on the list. In a rare corner case it 1952 * might get readded to a delwri list after the synchronous writeout, in 1953 * which case we need just need to re-add the flag here. 1954 */ 1955 bp->b_flags |= _XBF_DELWRI_Q; 1956 if (list_empty(&bp->b_list)) { 1957 atomic_inc(&bp->b_hold); 1958 list_add_tail(&bp->b_list, list); 1959 } 1960 1961 return true; 1962 } 1963 1964 /* 1965 * Compare function is more complex than it needs to be because 1966 * the return value is only 32 bits and we are doing comparisons 1967 * on 64 bit values 1968 */ 1969 static int 1970 xfs_buf_cmp( 1971 void *priv, 1972 struct list_head *a, 1973 struct list_head *b) 1974 { 1975 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list); 1976 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); 1977 xfs_daddr_t diff; 1978 1979 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; 1980 if (diff < 0) 1981 return -1; 1982 if (diff > 0) 1983 return 1; 1984 return 0; 1985 } 1986 1987 /* 1988 * submit buffers for write. 1989 * 1990 * When we have a large buffer list, we do not want to hold all the buffers 1991 * locked while we block on the request queue waiting for IO dispatch. To avoid 1992 * this problem, we lock and submit buffers in groups of 50, thereby minimising 1993 * the lock hold times for lists which may contain thousands of objects. 1994 * 1995 * To do this, we sort the buffer list before we walk the list to lock and 1996 * submit buffers, and we plug and unplug around each group of buffers we 1997 * submit. 1998 */ 1999 static int 2000 xfs_buf_delwri_submit_buffers( 2001 struct list_head *buffer_list, 2002 struct list_head *wait_list) 2003 { 2004 struct xfs_buf *bp, *n; 2005 LIST_HEAD (submit_list); 2006 int pinned = 0; 2007 struct blk_plug plug; 2008 2009 list_sort(NULL, buffer_list, xfs_buf_cmp); 2010 2011 blk_start_plug(&plug); 2012 list_for_each_entry_safe(bp, n, buffer_list, b_list) { 2013 if (!wait_list) { 2014 if (xfs_buf_ispinned(bp)) { 2015 pinned++; 2016 continue; 2017 } 2018 if (!xfs_buf_trylock(bp)) 2019 continue; 2020 } else { 2021 xfs_buf_lock(bp); 2022 } 2023 2024 /* 2025 * Someone else might have written the buffer synchronously or 2026 * marked it stale in the meantime. In that case only the 2027 * _XBF_DELWRI_Q flag got cleared, and we have to drop the 2028 * reference and remove it from the list here. 2029 */ 2030 if (!(bp->b_flags & _XBF_DELWRI_Q)) { 2031 list_del_init(&bp->b_list); 2032 xfs_buf_relse(bp); 2033 continue; 2034 } 2035 2036 trace_xfs_buf_delwri_split(bp, _RET_IP_); 2037 2038 /* 2039 * We do all IO submission async. This means if we need 2040 * to wait for IO completion we need to take an extra 2041 * reference so the buffer is still valid on the other 2042 * side. We need to move the buffer onto the io_list 2043 * at this point so the caller can still access it. 2044 */ 2045 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL); 2046 bp->b_flags |= XBF_WRITE | XBF_ASYNC; 2047 if (wait_list) { 2048 xfs_buf_hold(bp); 2049 list_move_tail(&bp->b_list, wait_list); 2050 } else 2051 list_del_init(&bp->b_list); 2052 2053 xfs_buf_submit(bp); 2054 } 2055 blk_finish_plug(&plug); 2056 2057 return pinned; 2058 } 2059 2060 /* 2061 * Write out a buffer list asynchronously. 2062 * 2063 * This will take the @buffer_list, write all non-locked and non-pinned buffers 2064 * out and not wait for I/O completion on any of the buffers. This interface 2065 * is only safely useable for callers that can track I/O completion by higher 2066 * level means, e.g. AIL pushing as the @buffer_list is consumed in this 2067 * function. 2068 */ 2069 int 2070 xfs_buf_delwri_submit_nowait( 2071 struct list_head *buffer_list) 2072 { 2073 return xfs_buf_delwri_submit_buffers(buffer_list, NULL); 2074 } 2075 2076 /* 2077 * Write out a buffer list synchronously. 2078 * 2079 * This will take the @buffer_list, write all buffers out and wait for I/O 2080 * completion on all of the buffers. @buffer_list is consumed by the function, 2081 * so callers must have some other way of tracking buffers if they require such 2082 * functionality. 2083 */ 2084 int 2085 xfs_buf_delwri_submit( 2086 struct list_head *buffer_list) 2087 { 2088 LIST_HEAD (wait_list); 2089 int error = 0, error2; 2090 struct xfs_buf *bp; 2091 2092 xfs_buf_delwri_submit_buffers(buffer_list, &wait_list); 2093 2094 /* Wait for IO to complete. */ 2095 while (!list_empty(&wait_list)) { 2096 bp = list_first_entry(&wait_list, struct xfs_buf, b_list); 2097 2098 list_del_init(&bp->b_list); 2099 2100 /* locking the buffer will wait for async IO completion. */ 2101 xfs_buf_lock(bp); 2102 error2 = bp->b_error; 2103 xfs_buf_relse(bp); 2104 if (!error) 2105 error = error2; 2106 } 2107 2108 return error; 2109 } 2110 2111 /* 2112 * Push a single buffer on a delwri queue. 2113 * 2114 * The purpose of this function is to submit a single buffer of a delwri queue 2115 * and return with the buffer still on the original queue. The waiting delwri 2116 * buffer submission infrastructure guarantees transfer of the delwri queue 2117 * buffer reference to a temporary wait list. We reuse this infrastructure to 2118 * transfer the buffer back to the original queue. 2119 * 2120 * Note the buffer transitions from the queued state, to the submitted and wait 2121 * listed state and back to the queued state during this call. The buffer 2122 * locking and queue management logic between _delwri_pushbuf() and 2123 * _delwri_queue() guarantee that the buffer cannot be queued to another list 2124 * before returning. 2125 */ 2126 int 2127 xfs_buf_delwri_pushbuf( 2128 struct xfs_buf *bp, 2129 struct list_head *buffer_list) 2130 { 2131 LIST_HEAD (submit_list); 2132 int error; 2133 2134 ASSERT(bp->b_flags & _XBF_DELWRI_Q); 2135 2136 trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_); 2137 2138 /* 2139 * Isolate the buffer to a new local list so we can submit it for I/O 2140 * independently from the rest of the original list. 2141 */ 2142 xfs_buf_lock(bp); 2143 list_move(&bp->b_list, &submit_list); 2144 xfs_buf_unlock(bp); 2145 2146 /* 2147 * Delwri submission clears the DELWRI_Q buffer flag and returns with 2148 * the buffer on the wait list with an associated reference. Rather than 2149 * bounce the buffer from a local wait list back to the original list 2150 * after I/O completion, reuse the original list as the wait list. 2151 */ 2152 xfs_buf_delwri_submit_buffers(&submit_list, buffer_list); 2153 2154 /* 2155 * The buffer is now under I/O and wait listed as during typical delwri 2156 * submission. Lock the buffer to wait for I/O completion. Rather than 2157 * remove the buffer from the wait list and release the reference, we 2158 * want to return with the buffer queued to the original list. The 2159 * buffer already sits on the original list with a wait list reference, 2160 * however. If we let the queue inherit that wait list reference, all we 2161 * need to do is reset the DELWRI_Q flag. 2162 */ 2163 xfs_buf_lock(bp); 2164 error = bp->b_error; 2165 bp->b_flags |= _XBF_DELWRI_Q; 2166 xfs_buf_unlock(bp); 2167 2168 return error; 2169 } 2170 2171 int __init 2172 xfs_buf_init(void) 2173 { 2174 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", 2175 KM_ZONE_HWALIGN, NULL); 2176 if (!xfs_buf_zone) 2177 goto out; 2178 2179 return 0; 2180 2181 out: 2182 return -ENOMEM; 2183 } 2184 2185 void 2186 xfs_buf_terminate(void) 2187 { 2188 kmem_zone_destroy(xfs_buf_zone); 2189 } 2190 2191 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) 2192 { 2193 /* 2194 * Set the lru reference count to 0 based on the error injection tag. 2195 * This allows userspace to disrupt buffer caching for debug/testing 2196 * purposes. 2197 */ 2198 if (XFS_TEST_ERROR(false, bp->b_target->bt_mount, 2199 XFS_ERRTAG_BUF_LRU_REF)) 2200 lru_ref = 0; 2201 2202 atomic_set(&bp->b_lru_ref, lru_ref); 2203 } 2204