1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include <linux/stddef.h> 20 #include <linux/errno.h> 21 #include <linux/gfp.h> 22 #include <linux/pagemap.h> 23 #include <linux/init.h> 24 #include <linux/vmalloc.h> 25 #include <linux/bio.h> 26 #include <linux/sysctl.h> 27 #include <linux/proc_fs.h> 28 #include <linux/workqueue.h> 29 #include <linux/percpu.h> 30 #include <linux/blkdev.h> 31 #include <linux/hash.h> 32 #include <linux/kthread.h> 33 #include <linux/migrate.h> 34 #include <linux/backing-dev.h> 35 #include <linux/freezer.h> 36 37 #include "xfs_format.h" 38 #include "xfs_log_format.h" 39 #include "xfs_trans_resv.h" 40 #include "xfs_sb.h" 41 #include "xfs_mount.h" 42 #include "xfs_trace.h" 43 #include "xfs_log.h" 44 45 static kmem_zone_t *xfs_buf_zone; 46 47 #ifdef XFS_BUF_LOCK_TRACKING 48 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) 49 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) 50 # define XB_GET_OWNER(bp) ((bp)->b_last_holder) 51 #else 52 # define XB_SET_OWNER(bp) do { } while (0) 53 # define XB_CLEAR_OWNER(bp) do { } while (0) 54 # define XB_GET_OWNER(bp) do { } while (0) 55 #endif 56 57 #define xb_to_gfp(flags) \ 58 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN) 59 60 61 static inline int 62 xfs_buf_is_vmapped( 63 struct xfs_buf *bp) 64 { 65 /* 66 * Return true if the buffer is vmapped. 67 * 68 * b_addr is null if the buffer is not mapped, but the code is clever 69 * enough to know it doesn't have to map a single page, so the check has 70 * to be both for b_addr and bp->b_page_count > 1. 71 */ 72 return bp->b_addr && bp->b_page_count > 1; 73 } 74 75 static inline int 76 xfs_buf_vmap_len( 77 struct xfs_buf *bp) 78 { 79 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; 80 } 81 82 /* 83 * When we mark a buffer stale, we remove the buffer from the LRU and clear the 84 * b_lru_ref count so that the buffer is freed immediately when the buffer 85 * reference count falls to zero. If the buffer is already on the LRU, we need 86 * to remove the reference that LRU holds on the buffer. 87 * 88 * This prevents build-up of stale buffers on the LRU. 89 */ 90 void 91 xfs_buf_stale( 92 struct xfs_buf *bp) 93 { 94 ASSERT(xfs_buf_islocked(bp)); 95 96 bp->b_flags |= XBF_STALE; 97 98 /* 99 * Clear the delwri status so that a delwri queue walker will not 100 * flush this buffer to disk now that it is stale. The delwri queue has 101 * a reference to the buffer, so this is safe to do. 102 */ 103 bp->b_flags &= ~_XBF_DELWRI_Q; 104 105 spin_lock(&bp->b_lock); 106 atomic_set(&bp->b_lru_ref, 0); 107 if (!(bp->b_state & XFS_BSTATE_DISPOSE) && 108 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) 109 atomic_dec(&bp->b_hold); 110 111 ASSERT(atomic_read(&bp->b_hold) >= 1); 112 spin_unlock(&bp->b_lock); 113 } 114 115 static int 116 xfs_buf_get_maps( 117 struct xfs_buf *bp, 118 int map_count) 119 { 120 ASSERT(bp->b_maps == NULL); 121 bp->b_map_count = map_count; 122 123 if (map_count == 1) { 124 bp->b_maps = &bp->__b_map; 125 return 0; 126 } 127 128 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), 129 KM_NOFS); 130 if (!bp->b_maps) 131 return -ENOMEM; 132 return 0; 133 } 134 135 /* 136 * Frees b_pages if it was allocated. 137 */ 138 static void 139 xfs_buf_free_maps( 140 struct xfs_buf *bp) 141 { 142 if (bp->b_maps != &bp->__b_map) { 143 kmem_free(bp->b_maps); 144 bp->b_maps = NULL; 145 } 146 } 147 148 struct xfs_buf * 149 _xfs_buf_alloc( 150 struct xfs_buftarg *target, 151 struct xfs_buf_map *map, 152 int nmaps, 153 xfs_buf_flags_t flags) 154 { 155 struct xfs_buf *bp; 156 int error; 157 int i; 158 159 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); 160 if (unlikely(!bp)) 161 return NULL; 162 163 /* 164 * We don't want certain flags to appear in b_flags unless they are 165 * specifically set by later operations on the buffer. 166 */ 167 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); 168 169 atomic_set(&bp->b_hold, 1); 170 atomic_set(&bp->b_lru_ref, 1); 171 init_completion(&bp->b_iowait); 172 INIT_LIST_HEAD(&bp->b_lru); 173 INIT_LIST_HEAD(&bp->b_list); 174 RB_CLEAR_NODE(&bp->b_rbnode); 175 sema_init(&bp->b_sema, 0); /* held, no waiters */ 176 spin_lock_init(&bp->b_lock); 177 XB_SET_OWNER(bp); 178 bp->b_target = target; 179 bp->b_flags = flags; 180 181 /* 182 * Set length and io_length to the same value initially. 183 * I/O routines should use io_length, which will be the same in 184 * most cases but may be reset (e.g. XFS recovery). 185 */ 186 error = xfs_buf_get_maps(bp, nmaps); 187 if (error) { 188 kmem_zone_free(xfs_buf_zone, bp); 189 return NULL; 190 } 191 192 bp->b_bn = map[0].bm_bn; 193 bp->b_length = 0; 194 for (i = 0; i < nmaps; i++) { 195 bp->b_maps[i].bm_bn = map[i].bm_bn; 196 bp->b_maps[i].bm_len = map[i].bm_len; 197 bp->b_length += map[i].bm_len; 198 } 199 bp->b_io_length = bp->b_length; 200 201 atomic_set(&bp->b_pin_count, 0); 202 init_waitqueue_head(&bp->b_waiters); 203 204 XFS_STATS_INC(target->bt_mount, xb_create); 205 trace_xfs_buf_init(bp, _RET_IP_); 206 207 return bp; 208 } 209 210 /* 211 * Allocate a page array capable of holding a specified number 212 * of pages, and point the page buf at it. 213 */ 214 STATIC int 215 _xfs_buf_get_pages( 216 xfs_buf_t *bp, 217 int page_count) 218 { 219 /* Make sure that we have a page list */ 220 if (bp->b_pages == NULL) { 221 bp->b_page_count = page_count; 222 if (page_count <= XB_PAGES) { 223 bp->b_pages = bp->b_page_array; 224 } else { 225 bp->b_pages = kmem_alloc(sizeof(struct page *) * 226 page_count, KM_NOFS); 227 if (bp->b_pages == NULL) 228 return -ENOMEM; 229 } 230 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); 231 } 232 return 0; 233 } 234 235 /* 236 * Frees b_pages if it was allocated. 237 */ 238 STATIC void 239 _xfs_buf_free_pages( 240 xfs_buf_t *bp) 241 { 242 if (bp->b_pages != bp->b_page_array) { 243 kmem_free(bp->b_pages); 244 bp->b_pages = NULL; 245 } 246 } 247 248 /* 249 * Releases the specified buffer. 250 * 251 * The modification state of any associated pages is left unchanged. 252 * The buffer must not be on any hash - use xfs_buf_rele instead for 253 * hashed and refcounted buffers 254 */ 255 void 256 xfs_buf_free( 257 xfs_buf_t *bp) 258 { 259 trace_xfs_buf_free(bp, _RET_IP_); 260 261 ASSERT(list_empty(&bp->b_lru)); 262 263 if (bp->b_flags & _XBF_PAGES) { 264 uint i; 265 266 if (xfs_buf_is_vmapped(bp)) 267 vm_unmap_ram(bp->b_addr - bp->b_offset, 268 bp->b_page_count); 269 270 for (i = 0; i < bp->b_page_count; i++) { 271 struct page *page = bp->b_pages[i]; 272 273 __free_page(page); 274 } 275 } else if (bp->b_flags & _XBF_KMEM) 276 kmem_free(bp->b_addr); 277 _xfs_buf_free_pages(bp); 278 xfs_buf_free_maps(bp); 279 kmem_zone_free(xfs_buf_zone, bp); 280 } 281 282 /* 283 * Allocates all the pages for buffer in question and builds it's page list. 284 */ 285 STATIC int 286 xfs_buf_allocate_memory( 287 xfs_buf_t *bp, 288 uint flags) 289 { 290 size_t size; 291 size_t nbytes, offset; 292 gfp_t gfp_mask = xb_to_gfp(flags); 293 unsigned short page_count, i; 294 xfs_off_t start, end; 295 int error; 296 297 /* 298 * for buffers that are contained within a single page, just allocate 299 * the memory from the heap - there's no need for the complexity of 300 * page arrays to keep allocation down to order 0. 301 */ 302 size = BBTOB(bp->b_length); 303 if (size < PAGE_SIZE) { 304 bp->b_addr = kmem_alloc(size, KM_NOFS); 305 if (!bp->b_addr) { 306 /* low memory - use alloc_page loop instead */ 307 goto use_alloc_page; 308 } 309 310 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != 311 ((unsigned long)bp->b_addr & PAGE_MASK)) { 312 /* b_addr spans two pages - use alloc_page instead */ 313 kmem_free(bp->b_addr); 314 bp->b_addr = NULL; 315 goto use_alloc_page; 316 } 317 bp->b_offset = offset_in_page(bp->b_addr); 318 bp->b_pages = bp->b_page_array; 319 bp->b_pages[0] = virt_to_page(bp->b_addr); 320 bp->b_page_count = 1; 321 bp->b_flags |= _XBF_KMEM; 322 return 0; 323 } 324 325 use_alloc_page: 326 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; 327 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) 328 >> PAGE_SHIFT; 329 page_count = end - start; 330 error = _xfs_buf_get_pages(bp, page_count); 331 if (unlikely(error)) 332 return error; 333 334 offset = bp->b_offset; 335 bp->b_flags |= _XBF_PAGES; 336 337 for (i = 0; i < bp->b_page_count; i++) { 338 struct page *page; 339 uint retries = 0; 340 retry: 341 page = alloc_page(gfp_mask); 342 if (unlikely(page == NULL)) { 343 if (flags & XBF_READ_AHEAD) { 344 bp->b_page_count = i; 345 error = -ENOMEM; 346 goto out_free_pages; 347 } 348 349 /* 350 * This could deadlock. 351 * 352 * But until all the XFS lowlevel code is revamped to 353 * handle buffer allocation failures we can't do much. 354 */ 355 if (!(++retries % 100)) 356 xfs_err(NULL, 357 "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)", 358 current->comm, current->pid, 359 __func__, gfp_mask); 360 361 XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries); 362 congestion_wait(BLK_RW_ASYNC, HZ/50); 363 goto retry; 364 } 365 366 XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found); 367 368 nbytes = min_t(size_t, size, PAGE_SIZE - offset); 369 size -= nbytes; 370 bp->b_pages[i] = page; 371 offset = 0; 372 } 373 return 0; 374 375 out_free_pages: 376 for (i = 0; i < bp->b_page_count; i++) 377 __free_page(bp->b_pages[i]); 378 return error; 379 } 380 381 /* 382 * Map buffer into kernel address-space if necessary. 383 */ 384 STATIC int 385 _xfs_buf_map_pages( 386 xfs_buf_t *bp, 387 uint flags) 388 { 389 ASSERT(bp->b_flags & _XBF_PAGES); 390 if (bp->b_page_count == 1) { 391 /* A single page buffer is always mappable */ 392 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 393 } else if (flags & XBF_UNMAPPED) { 394 bp->b_addr = NULL; 395 } else { 396 int retried = 0; 397 unsigned noio_flag; 398 399 /* 400 * vm_map_ram() will allocate auxillary structures (e.g. 401 * pagetables) with GFP_KERNEL, yet we are likely to be under 402 * GFP_NOFS context here. Hence we need to tell memory reclaim 403 * that we are in such a context via PF_MEMALLOC_NOIO to prevent 404 * memory reclaim re-entering the filesystem here and 405 * potentially deadlocking. 406 */ 407 noio_flag = memalloc_noio_save(); 408 do { 409 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, 410 -1, PAGE_KERNEL); 411 if (bp->b_addr) 412 break; 413 vm_unmap_aliases(); 414 } while (retried++ <= 1); 415 memalloc_noio_restore(noio_flag); 416 417 if (!bp->b_addr) 418 return -ENOMEM; 419 bp->b_addr += bp->b_offset; 420 } 421 422 return 0; 423 } 424 425 /* 426 * Finding and Reading Buffers 427 */ 428 429 /* 430 * Look up, and creates if absent, a lockable buffer for 431 * a given range of an inode. The buffer is returned 432 * locked. No I/O is implied by this call. 433 */ 434 xfs_buf_t * 435 _xfs_buf_find( 436 struct xfs_buftarg *btp, 437 struct xfs_buf_map *map, 438 int nmaps, 439 xfs_buf_flags_t flags, 440 xfs_buf_t *new_bp) 441 { 442 struct xfs_perag *pag; 443 struct rb_node **rbp; 444 struct rb_node *parent; 445 xfs_buf_t *bp; 446 xfs_daddr_t blkno = map[0].bm_bn; 447 xfs_daddr_t eofs; 448 int numblks = 0; 449 int i; 450 451 for (i = 0; i < nmaps; i++) 452 numblks += map[i].bm_len; 453 454 /* Check for IOs smaller than the sector size / not sector aligned */ 455 ASSERT(!(BBTOB(numblks) < btp->bt_meta_sectorsize)); 456 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask)); 457 458 /* 459 * Corrupted block numbers can get through to here, unfortunately, so we 460 * have to check that the buffer falls within the filesystem bounds. 461 */ 462 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); 463 if (blkno < 0 || blkno >= eofs) { 464 /* 465 * XXX (dgc): we should really be returning -EFSCORRUPTED here, 466 * but none of the higher level infrastructure supports 467 * returning a specific error on buffer lookup failures. 468 */ 469 xfs_alert(btp->bt_mount, 470 "%s: Block out of range: block 0x%llx, EOFS 0x%llx ", 471 __func__, blkno, eofs); 472 WARN_ON(1); 473 return NULL; 474 } 475 476 /* get tree root */ 477 pag = xfs_perag_get(btp->bt_mount, 478 xfs_daddr_to_agno(btp->bt_mount, blkno)); 479 480 /* walk tree */ 481 spin_lock(&pag->pag_buf_lock); 482 rbp = &pag->pag_buf_tree.rb_node; 483 parent = NULL; 484 bp = NULL; 485 while (*rbp) { 486 parent = *rbp; 487 bp = rb_entry(parent, struct xfs_buf, b_rbnode); 488 489 if (blkno < bp->b_bn) 490 rbp = &(*rbp)->rb_left; 491 else if (blkno > bp->b_bn) 492 rbp = &(*rbp)->rb_right; 493 else { 494 /* 495 * found a block number match. If the range doesn't 496 * match, the only way this is allowed is if the buffer 497 * in the cache is stale and the transaction that made 498 * it stale has not yet committed. i.e. we are 499 * reallocating a busy extent. Skip this buffer and 500 * continue searching to the right for an exact match. 501 */ 502 if (bp->b_length != numblks) { 503 ASSERT(bp->b_flags & XBF_STALE); 504 rbp = &(*rbp)->rb_right; 505 continue; 506 } 507 atomic_inc(&bp->b_hold); 508 goto found; 509 } 510 } 511 512 /* No match found */ 513 if (new_bp) { 514 rb_link_node(&new_bp->b_rbnode, parent, rbp); 515 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree); 516 /* the buffer keeps the perag reference until it is freed */ 517 new_bp->b_pag = pag; 518 spin_unlock(&pag->pag_buf_lock); 519 } else { 520 XFS_STATS_INC(btp->bt_mount, xb_miss_locked); 521 spin_unlock(&pag->pag_buf_lock); 522 xfs_perag_put(pag); 523 } 524 return new_bp; 525 526 found: 527 spin_unlock(&pag->pag_buf_lock); 528 xfs_perag_put(pag); 529 530 if (!xfs_buf_trylock(bp)) { 531 if (flags & XBF_TRYLOCK) { 532 xfs_buf_rele(bp); 533 XFS_STATS_INC(btp->bt_mount, xb_busy_locked); 534 return NULL; 535 } 536 xfs_buf_lock(bp); 537 XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited); 538 } 539 540 /* 541 * if the buffer is stale, clear all the external state associated with 542 * it. We need to keep flags such as how we allocated the buffer memory 543 * intact here. 544 */ 545 if (bp->b_flags & XBF_STALE) { 546 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); 547 ASSERT(bp->b_iodone == NULL); 548 bp->b_flags &= _XBF_KMEM | _XBF_PAGES; 549 bp->b_ops = NULL; 550 } 551 552 trace_xfs_buf_find(bp, flags, _RET_IP_); 553 XFS_STATS_INC(btp->bt_mount, xb_get_locked); 554 return bp; 555 } 556 557 /* 558 * Assembles a buffer covering the specified range. The code is optimised for 559 * cache hits, as metadata intensive workloads will see 3 orders of magnitude 560 * more hits than misses. 561 */ 562 struct xfs_buf * 563 xfs_buf_get_map( 564 struct xfs_buftarg *target, 565 struct xfs_buf_map *map, 566 int nmaps, 567 xfs_buf_flags_t flags) 568 { 569 struct xfs_buf *bp; 570 struct xfs_buf *new_bp; 571 int error = 0; 572 573 bp = _xfs_buf_find(target, map, nmaps, flags, NULL); 574 if (likely(bp)) 575 goto found; 576 577 new_bp = _xfs_buf_alloc(target, map, nmaps, flags); 578 if (unlikely(!new_bp)) 579 return NULL; 580 581 error = xfs_buf_allocate_memory(new_bp, flags); 582 if (error) { 583 xfs_buf_free(new_bp); 584 return NULL; 585 } 586 587 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp); 588 if (!bp) { 589 xfs_buf_free(new_bp); 590 return NULL; 591 } 592 593 if (bp != new_bp) 594 xfs_buf_free(new_bp); 595 596 found: 597 if (!bp->b_addr) { 598 error = _xfs_buf_map_pages(bp, flags); 599 if (unlikely(error)) { 600 xfs_warn(target->bt_mount, 601 "%s: failed to map pagesn", __func__); 602 xfs_buf_relse(bp); 603 return NULL; 604 } 605 } 606 607 XFS_STATS_INC(target->bt_mount, xb_get); 608 trace_xfs_buf_get(bp, flags, _RET_IP_); 609 return bp; 610 } 611 612 STATIC int 613 _xfs_buf_read( 614 xfs_buf_t *bp, 615 xfs_buf_flags_t flags) 616 { 617 ASSERT(!(flags & XBF_WRITE)); 618 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); 619 620 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); 621 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); 622 623 if (flags & XBF_ASYNC) { 624 xfs_buf_submit(bp); 625 return 0; 626 } 627 return xfs_buf_submit_wait(bp); 628 } 629 630 xfs_buf_t * 631 xfs_buf_read_map( 632 struct xfs_buftarg *target, 633 struct xfs_buf_map *map, 634 int nmaps, 635 xfs_buf_flags_t flags, 636 const struct xfs_buf_ops *ops) 637 { 638 struct xfs_buf *bp; 639 640 flags |= XBF_READ; 641 642 bp = xfs_buf_get_map(target, map, nmaps, flags); 643 if (bp) { 644 trace_xfs_buf_read(bp, flags, _RET_IP_); 645 646 if (!XFS_BUF_ISDONE(bp)) { 647 XFS_STATS_INC(target->bt_mount, xb_get_read); 648 bp->b_ops = ops; 649 _xfs_buf_read(bp, flags); 650 } else if (flags & XBF_ASYNC) { 651 /* 652 * Read ahead call which is already satisfied, 653 * drop the buffer 654 */ 655 xfs_buf_relse(bp); 656 return NULL; 657 } else { 658 /* We do not want read in the flags */ 659 bp->b_flags &= ~XBF_READ; 660 } 661 } 662 663 return bp; 664 } 665 666 /* 667 * If we are not low on memory then do the readahead in a deadlock 668 * safe manner. 669 */ 670 void 671 xfs_buf_readahead_map( 672 struct xfs_buftarg *target, 673 struct xfs_buf_map *map, 674 int nmaps, 675 const struct xfs_buf_ops *ops) 676 { 677 if (bdi_read_congested(target->bt_bdi)) 678 return; 679 680 xfs_buf_read_map(target, map, nmaps, 681 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops); 682 } 683 684 /* 685 * Read an uncached buffer from disk. Allocates and returns a locked 686 * buffer containing the disk contents or nothing. 687 */ 688 int 689 xfs_buf_read_uncached( 690 struct xfs_buftarg *target, 691 xfs_daddr_t daddr, 692 size_t numblks, 693 int flags, 694 struct xfs_buf **bpp, 695 const struct xfs_buf_ops *ops) 696 { 697 struct xfs_buf *bp; 698 699 *bpp = NULL; 700 701 bp = xfs_buf_get_uncached(target, numblks, flags); 702 if (!bp) 703 return -ENOMEM; 704 705 /* set up the buffer for a read IO */ 706 ASSERT(bp->b_map_count == 1); 707 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */ 708 bp->b_maps[0].bm_bn = daddr; 709 bp->b_flags |= XBF_READ; 710 bp->b_ops = ops; 711 712 xfs_buf_submit_wait(bp); 713 if (bp->b_error) { 714 int error = bp->b_error; 715 xfs_buf_relse(bp); 716 return error; 717 } 718 719 *bpp = bp; 720 return 0; 721 } 722 723 /* 724 * Return a buffer allocated as an empty buffer and associated to external 725 * memory via xfs_buf_associate_memory() back to it's empty state. 726 */ 727 void 728 xfs_buf_set_empty( 729 struct xfs_buf *bp, 730 size_t numblks) 731 { 732 if (bp->b_pages) 733 _xfs_buf_free_pages(bp); 734 735 bp->b_pages = NULL; 736 bp->b_page_count = 0; 737 bp->b_addr = NULL; 738 bp->b_length = numblks; 739 bp->b_io_length = numblks; 740 741 ASSERT(bp->b_map_count == 1); 742 bp->b_bn = XFS_BUF_DADDR_NULL; 743 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL; 744 bp->b_maps[0].bm_len = bp->b_length; 745 } 746 747 static inline struct page * 748 mem_to_page( 749 void *addr) 750 { 751 if ((!is_vmalloc_addr(addr))) { 752 return virt_to_page(addr); 753 } else { 754 return vmalloc_to_page(addr); 755 } 756 } 757 758 int 759 xfs_buf_associate_memory( 760 xfs_buf_t *bp, 761 void *mem, 762 size_t len) 763 { 764 int rval; 765 int i = 0; 766 unsigned long pageaddr; 767 unsigned long offset; 768 size_t buflen; 769 int page_count; 770 771 pageaddr = (unsigned long)mem & PAGE_MASK; 772 offset = (unsigned long)mem - pageaddr; 773 buflen = PAGE_ALIGN(len + offset); 774 page_count = buflen >> PAGE_SHIFT; 775 776 /* Free any previous set of page pointers */ 777 if (bp->b_pages) 778 _xfs_buf_free_pages(bp); 779 780 bp->b_pages = NULL; 781 bp->b_addr = mem; 782 783 rval = _xfs_buf_get_pages(bp, page_count); 784 if (rval) 785 return rval; 786 787 bp->b_offset = offset; 788 789 for (i = 0; i < bp->b_page_count; i++) { 790 bp->b_pages[i] = mem_to_page((void *)pageaddr); 791 pageaddr += PAGE_SIZE; 792 } 793 794 bp->b_io_length = BTOBB(len); 795 bp->b_length = BTOBB(buflen); 796 797 return 0; 798 } 799 800 xfs_buf_t * 801 xfs_buf_get_uncached( 802 struct xfs_buftarg *target, 803 size_t numblks, 804 int flags) 805 { 806 unsigned long page_count; 807 int error, i; 808 struct xfs_buf *bp; 809 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); 810 811 bp = _xfs_buf_alloc(target, &map, 1, 0); 812 if (unlikely(bp == NULL)) 813 goto fail; 814 815 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT; 816 error = _xfs_buf_get_pages(bp, page_count); 817 if (error) 818 goto fail_free_buf; 819 820 for (i = 0; i < page_count; i++) { 821 bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); 822 if (!bp->b_pages[i]) 823 goto fail_free_mem; 824 } 825 bp->b_flags |= _XBF_PAGES; 826 827 error = _xfs_buf_map_pages(bp, 0); 828 if (unlikely(error)) { 829 xfs_warn(target->bt_mount, 830 "%s: failed to map pages", __func__); 831 goto fail_free_mem; 832 } 833 834 trace_xfs_buf_get_uncached(bp, _RET_IP_); 835 return bp; 836 837 fail_free_mem: 838 while (--i >= 0) 839 __free_page(bp->b_pages[i]); 840 _xfs_buf_free_pages(bp); 841 fail_free_buf: 842 xfs_buf_free_maps(bp); 843 kmem_zone_free(xfs_buf_zone, bp); 844 fail: 845 return NULL; 846 } 847 848 /* 849 * Increment reference count on buffer, to hold the buffer concurrently 850 * with another thread which may release (free) the buffer asynchronously. 851 * Must hold the buffer already to call this function. 852 */ 853 void 854 xfs_buf_hold( 855 xfs_buf_t *bp) 856 { 857 trace_xfs_buf_hold(bp, _RET_IP_); 858 atomic_inc(&bp->b_hold); 859 } 860 861 /* 862 * Releases a hold on the specified buffer. If the 863 * the hold count is 1, calls xfs_buf_free. 864 */ 865 void 866 xfs_buf_rele( 867 xfs_buf_t *bp) 868 { 869 struct xfs_perag *pag = bp->b_pag; 870 871 trace_xfs_buf_rele(bp, _RET_IP_); 872 873 if (!pag) { 874 ASSERT(list_empty(&bp->b_lru)); 875 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); 876 if (atomic_dec_and_test(&bp->b_hold)) 877 xfs_buf_free(bp); 878 return; 879 } 880 881 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode)); 882 883 ASSERT(atomic_read(&bp->b_hold) > 0); 884 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { 885 spin_lock(&bp->b_lock); 886 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { 887 /* 888 * If the buffer is added to the LRU take a new 889 * reference to the buffer for the LRU and clear the 890 * (now stale) dispose list state flag 891 */ 892 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) { 893 bp->b_state &= ~XFS_BSTATE_DISPOSE; 894 atomic_inc(&bp->b_hold); 895 } 896 spin_unlock(&bp->b_lock); 897 spin_unlock(&pag->pag_buf_lock); 898 } else { 899 /* 900 * most of the time buffers will already be removed from 901 * the LRU, so optimise that case by checking for the 902 * XFS_BSTATE_DISPOSE flag indicating the last list the 903 * buffer was on was the disposal list 904 */ 905 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { 906 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru); 907 } else { 908 ASSERT(list_empty(&bp->b_lru)); 909 } 910 spin_unlock(&bp->b_lock); 911 912 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); 913 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); 914 spin_unlock(&pag->pag_buf_lock); 915 xfs_perag_put(pag); 916 xfs_buf_free(bp); 917 } 918 } 919 } 920 921 922 /* 923 * Lock a buffer object, if it is not already locked. 924 * 925 * If we come across a stale, pinned, locked buffer, we know that we are 926 * being asked to lock a buffer that has been reallocated. Because it is 927 * pinned, we know that the log has not been pushed to disk and hence it 928 * will still be locked. Rather than continuing to have trylock attempts 929 * fail until someone else pushes the log, push it ourselves before 930 * returning. This means that the xfsaild will not get stuck trying 931 * to push on stale inode buffers. 932 */ 933 int 934 xfs_buf_trylock( 935 struct xfs_buf *bp) 936 { 937 int locked; 938 939 locked = down_trylock(&bp->b_sema) == 0; 940 if (locked) 941 XB_SET_OWNER(bp); 942 943 trace_xfs_buf_trylock(bp, _RET_IP_); 944 return locked; 945 } 946 947 /* 948 * Lock a buffer object. 949 * 950 * If we come across a stale, pinned, locked buffer, we know that we 951 * are being asked to lock a buffer that has been reallocated. Because 952 * it is pinned, we know that the log has not been pushed to disk and 953 * hence it will still be locked. Rather than sleeping until someone 954 * else pushes the log, push it ourselves before trying to get the lock. 955 */ 956 void 957 xfs_buf_lock( 958 struct xfs_buf *bp) 959 { 960 trace_xfs_buf_lock(bp, _RET_IP_); 961 962 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) 963 xfs_log_force(bp->b_target->bt_mount, 0); 964 down(&bp->b_sema); 965 XB_SET_OWNER(bp); 966 967 trace_xfs_buf_lock_done(bp, _RET_IP_); 968 } 969 970 void 971 xfs_buf_unlock( 972 struct xfs_buf *bp) 973 { 974 XB_CLEAR_OWNER(bp); 975 up(&bp->b_sema); 976 977 trace_xfs_buf_unlock(bp, _RET_IP_); 978 } 979 980 STATIC void 981 xfs_buf_wait_unpin( 982 xfs_buf_t *bp) 983 { 984 DECLARE_WAITQUEUE (wait, current); 985 986 if (atomic_read(&bp->b_pin_count) == 0) 987 return; 988 989 add_wait_queue(&bp->b_waiters, &wait); 990 for (;;) { 991 set_current_state(TASK_UNINTERRUPTIBLE); 992 if (atomic_read(&bp->b_pin_count) == 0) 993 break; 994 io_schedule(); 995 } 996 remove_wait_queue(&bp->b_waiters, &wait); 997 set_current_state(TASK_RUNNING); 998 } 999 1000 /* 1001 * Buffer Utility Routines 1002 */ 1003 1004 void 1005 xfs_buf_ioend( 1006 struct xfs_buf *bp) 1007 { 1008 bool read = bp->b_flags & XBF_READ; 1009 1010 trace_xfs_buf_iodone(bp, _RET_IP_); 1011 1012 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); 1013 1014 /* 1015 * Pull in IO completion errors now. We are guaranteed to be running 1016 * single threaded, so we don't need the lock to read b_io_error. 1017 */ 1018 if (!bp->b_error && bp->b_io_error) 1019 xfs_buf_ioerror(bp, bp->b_io_error); 1020 1021 /* Only validate buffers that were read without errors */ 1022 if (read && !bp->b_error && bp->b_ops) { 1023 ASSERT(!bp->b_iodone); 1024 bp->b_ops->verify_read(bp); 1025 } 1026 1027 if (!bp->b_error) 1028 bp->b_flags |= XBF_DONE; 1029 1030 if (bp->b_iodone) 1031 (*(bp->b_iodone))(bp); 1032 else if (bp->b_flags & XBF_ASYNC) 1033 xfs_buf_relse(bp); 1034 else 1035 complete(&bp->b_iowait); 1036 } 1037 1038 static void 1039 xfs_buf_ioend_work( 1040 struct work_struct *work) 1041 { 1042 struct xfs_buf *bp = 1043 container_of(work, xfs_buf_t, b_ioend_work); 1044 1045 xfs_buf_ioend(bp); 1046 } 1047 1048 void 1049 xfs_buf_ioend_async( 1050 struct xfs_buf *bp) 1051 { 1052 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); 1053 queue_work(bp->b_ioend_wq, &bp->b_ioend_work); 1054 } 1055 1056 void 1057 xfs_buf_ioerror( 1058 xfs_buf_t *bp, 1059 int error) 1060 { 1061 ASSERT(error <= 0 && error >= -1000); 1062 bp->b_error = error; 1063 trace_xfs_buf_ioerror(bp, error, _RET_IP_); 1064 } 1065 1066 void 1067 xfs_buf_ioerror_alert( 1068 struct xfs_buf *bp, 1069 const char *func) 1070 { 1071 xfs_alert(bp->b_target->bt_mount, 1072 "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d", 1073 (__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length); 1074 } 1075 1076 int 1077 xfs_bwrite( 1078 struct xfs_buf *bp) 1079 { 1080 int error; 1081 1082 ASSERT(xfs_buf_islocked(bp)); 1083 1084 bp->b_flags |= XBF_WRITE; 1085 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | 1086 XBF_WRITE_FAIL | XBF_DONE); 1087 1088 error = xfs_buf_submit_wait(bp); 1089 if (error) { 1090 xfs_force_shutdown(bp->b_target->bt_mount, 1091 SHUTDOWN_META_IO_ERROR); 1092 } 1093 return error; 1094 } 1095 1096 STATIC void 1097 xfs_buf_bio_end_io( 1098 struct bio *bio) 1099 { 1100 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; 1101 1102 /* 1103 * don't overwrite existing errors - otherwise we can lose errors on 1104 * buffers that require multiple bios to complete. 1105 */ 1106 if (bio->bi_error) { 1107 spin_lock(&bp->b_lock); 1108 if (!bp->b_io_error) 1109 bp->b_io_error = bio->bi_error; 1110 spin_unlock(&bp->b_lock); 1111 } 1112 1113 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) 1114 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); 1115 1116 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) 1117 xfs_buf_ioend_async(bp); 1118 bio_put(bio); 1119 } 1120 1121 static void 1122 xfs_buf_ioapply_map( 1123 struct xfs_buf *bp, 1124 int map, 1125 int *buf_offset, 1126 int *count, 1127 int rw) 1128 { 1129 int page_index; 1130 int total_nr_pages = bp->b_page_count; 1131 int nr_pages; 1132 struct bio *bio; 1133 sector_t sector = bp->b_maps[map].bm_bn; 1134 int size; 1135 int offset; 1136 1137 total_nr_pages = bp->b_page_count; 1138 1139 /* skip the pages in the buffer before the start offset */ 1140 page_index = 0; 1141 offset = *buf_offset; 1142 while (offset >= PAGE_SIZE) { 1143 page_index++; 1144 offset -= PAGE_SIZE; 1145 } 1146 1147 /* 1148 * Limit the IO size to the length of the current vector, and update the 1149 * remaining IO count for the next time around. 1150 */ 1151 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); 1152 *count -= size; 1153 *buf_offset += size; 1154 1155 next_chunk: 1156 atomic_inc(&bp->b_io_remaining); 1157 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); 1158 if (nr_pages > total_nr_pages) 1159 nr_pages = total_nr_pages; 1160 1161 bio = bio_alloc(GFP_NOIO, nr_pages); 1162 bio->bi_bdev = bp->b_target->bt_bdev; 1163 bio->bi_iter.bi_sector = sector; 1164 bio->bi_end_io = xfs_buf_bio_end_io; 1165 bio->bi_private = bp; 1166 1167 1168 for (; size && nr_pages; nr_pages--, page_index++) { 1169 int rbytes, nbytes = PAGE_SIZE - offset; 1170 1171 if (nbytes > size) 1172 nbytes = size; 1173 1174 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, 1175 offset); 1176 if (rbytes < nbytes) 1177 break; 1178 1179 offset = 0; 1180 sector += BTOBB(nbytes); 1181 size -= nbytes; 1182 total_nr_pages--; 1183 } 1184 1185 if (likely(bio->bi_iter.bi_size)) { 1186 if (xfs_buf_is_vmapped(bp)) { 1187 flush_kernel_vmap_range(bp->b_addr, 1188 xfs_buf_vmap_len(bp)); 1189 } 1190 submit_bio(rw, bio); 1191 if (size) 1192 goto next_chunk; 1193 } else { 1194 /* 1195 * This is guaranteed not to be the last io reference count 1196 * because the caller (xfs_buf_submit) holds a count itself. 1197 */ 1198 atomic_dec(&bp->b_io_remaining); 1199 xfs_buf_ioerror(bp, -EIO); 1200 bio_put(bio); 1201 } 1202 1203 } 1204 1205 STATIC void 1206 _xfs_buf_ioapply( 1207 struct xfs_buf *bp) 1208 { 1209 struct blk_plug plug; 1210 int rw; 1211 int offset; 1212 int size; 1213 int i; 1214 1215 /* 1216 * Make sure we capture only current IO errors rather than stale errors 1217 * left over from previous use of the buffer (e.g. failed readahead). 1218 */ 1219 bp->b_error = 0; 1220 1221 /* 1222 * Initialize the I/O completion workqueue if we haven't yet or the 1223 * submitter has not opted to specify a custom one. 1224 */ 1225 if (!bp->b_ioend_wq) 1226 bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue; 1227 1228 if (bp->b_flags & XBF_WRITE) { 1229 if (bp->b_flags & XBF_SYNCIO) 1230 rw = WRITE_SYNC; 1231 else 1232 rw = WRITE; 1233 if (bp->b_flags & XBF_FUA) 1234 rw |= REQ_FUA; 1235 if (bp->b_flags & XBF_FLUSH) 1236 rw |= REQ_FLUSH; 1237 1238 /* 1239 * Run the write verifier callback function if it exists. If 1240 * this function fails it will mark the buffer with an error and 1241 * the IO should not be dispatched. 1242 */ 1243 if (bp->b_ops) { 1244 bp->b_ops->verify_write(bp); 1245 if (bp->b_error) { 1246 xfs_force_shutdown(bp->b_target->bt_mount, 1247 SHUTDOWN_CORRUPT_INCORE); 1248 return; 1249 } 1250 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) { 1251 struct xfs_mount *mp = bp->b_target->bt_mount; 1252 1253 /* 1254 * non-crc filesystems don't attach verifiers during 1255 * log recovery, so don't warn for such filesystems. 1256 */ 1257 if (xfs_sb_version_hascrc(&mp->m_sb)) { 1258 xfs_warn(mp, 1259 "%s: no ops on block 0x%llx/0x%x", 1260 __func__, bp->b_bn, bp->b_length); 1261 xfs_hex_dump(bp->b_addr, 64); 1262 dump_stack(); 1263 } 1264 } 1265 } else if (bp->b_flags & XBF_READ_AHEAD) { 1266 rw = READA; 1267 } else { 1268 rw = READ; 1269 } 1270 1271 /* we only use the buffer cache for meta-data */ 1272 rw |= REQ_META; 1273 1274 /* 1275 * Walk all the vectors issuing IO on them. Set up the initial offset 1276 * into the buffer and the desired IO size before we start - 1277 * _xfs_buf_ioapply_vec() will modify them appropriately for each 1278 * subsequent call. 1279 */ 1280 offset = bp->b_offset; 1281 size = BBTOB(bp->b_io_length); 1282 blk_start_plug(&plug); 1283 for (i = 0; i < bp->b_map_count; i++) { 1284 xfs_buf_ioapply_map(bp, i, &offset, &size, rw); 1285 if (bp->b_error) 1286 break; 1287 if (size <= 0) 1288 break; /* all done */ 1289 } 1290 blk_finish_plug(&plug); 1291 } 1292 1293 /* 1294 * Asynchronous IO submission path. This transfers the buffer lock ownership and 1295 * the current reference to the IO. It is not safe to reference the buffer after 1296 * a call to this function unless the caller holds an additional reference 1297 * itself. 1298 */ 1299 void 1300 xfs_buf_submit( 1301 struct xfs_buf *bp) 1302 { 1303 trace_xfs_buf_submit(bp, _RET_IP_); 1304 1305 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); 1306 ASSERT(bp->b_flags & XBF_ASYNC); 1307 1308 /* on shutdown we stale and complete the buffer immediately */ 1309 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { 1310 xfs_buf_ioerror(bp, -EIO); 1311 bp->b_flags &= ~XBF_DONE; 1312 xfs_buf_stale(bp); 1313 xfs_buf_ioend(bp); 1314 return; 1315 } 1316 1317 if (bp->b_flags & XBF_WRITE) 1318 xfs_buf_wait_unpin(bp); 1319 1320 /* clear the internal error state to avoid spurious errors */ 1321 bp->b_io_error = 0; 1322 1323 /* 1324 * The caller's reference is released during I/O completion. 1325 * This occurs some time after the last b_io_remaining reference is 1326 * released, so after we drop our Io reference we have to have some 1327 * other reference to ensure the buffer doesn't go away from underneath 1328 * us. Take a direct reference to ensure we have safe access to the 1329 * buffer until we are finished with it. 1330 */ 1331 xfs_buf_hold(bp); 1332 1333 /* 1334 * Set the count to 1 initially, this will stop an I/O completion 1335 * callout which happens before we have started all the I/O from calling 1336 * xfs_buf_ioend too early. 1337 */ 1338 atomic_set(&bp->b_io_remaining, 1); 1339 _xfs_buf_ioapply(bp); 1340 1341 /* 1342 * If _xfs_buf_ioapply failed, we can get back here with only the IO 1343 * reference we took above. If we drop it to zero, run completion so 1344 * that we don't return to the caller with completion still pending. 1345 */ 1346 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { 1347 if (bp->b_error) 1348 xfs_buf_ioend(bp); 1349 else 1350 xfs_buf_ioend_async(bp); 1351 } 1352 1353 xfs_buf_rele(bp); 1354 /* Note: it is not safe to reference bp now we've dropped our ref */ 1355 } 1356 1357 /* 1358 * Synchronous buffer IO submission path, read or write. 1359 */ 1360 int 1361 xfs_buf_submit_wait( 1362 struct xfs_buf *bp) 1363 { 1364 int error; 1365 1366 trace_xfs_buf_submit_wait(bp, _RET_IP_); 1367 1368 ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC))); 1369 1370 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { 1371 xfs_buf_ioerror(bp, -EIO); 1372 xfs_buf_stale(bp); 1373 bp->b_flags &= ~XBF_DONE; 1374 return -EIO; 1375 } 1376 1377 if (bp->b_flags & XBF_WRITE) 1378 xfs_buf_wait_unpin(bp); 1379 1380 /* clear the internal error state to avoid spurious errors */ 1381 bp->b_io_error = 0; 1382 1383 /* 1384 * For synchronous IO, the IO does not inherit the submitters reference 1385 * count, nor the buffer lock. Hence we cannot release the reference we 1386 * are about to take until we've waited for all IO completion to occur, 1387 * including any xfs_buf_ioend_async() work that may be pending. 1388 */ 1389 xfs_buf_hold(bp); 1390 1391 /* 1392 * Set the count to 1 initially, this will stop an I/O completion 1393 * callout which happens before we have started all the I/O from calling 1394 * xfs_buf_ioend too early. 1395 */ 1396 atomic_set(&bp->b_io_remaining, 1); 1397 _xfs_buf_ioapply(bp); 1398 1399 /* 1400 * make sure we run completion synchronously if it raced with us and is 1401 * already complete. 1402 */ 1403 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) 1404 xfs_buf_ioend(bp); 1405 1406 /* wait for completion before gathering the error from the buffer */ 1407 trace_xfs_buf_iowait(bp, _RET_IP_); 1408 wait_for_completion(&bp->b_iowait); 1409 trace_xfs_buf_iowait_done(bp, _RET_IP_); 1410 error = bp->b_error; 1411 1412 /* 1413 * all done now, we can release the hold that keeps the buffer 1414 * referenced for the entire IO. 1415 */ 1416 xfs_buf_rele(bp); 1417 return error; 1418 } 1419 1420 void * 1421 xfs_buf_offset( 1422 struct xfs_buf *bp, 1423 size_t offset) 1424 { 1425 struct page *page; 1426 1427 if (bp->b_addr) 1428 return bp->b_addr + offset; 1429 1430 offset += bp->b_offset; 1431 page = bp->b_pages[offset >> PAGE_SHIFT]; 1432 return page_address(page) + (offset & (PAGE_SIZE-1)); 1433 } 1434 1435 /* 1436 * Move data into or out of a buffer. 1437 */ 1438 void 1439 xfs_buf_iomove( 1440 xfs_buf_t *bp, /* buffer to process */ 1441 size_t boff, /* starting buffer offset */ 1442 size_t bsize, /* length to copy */ 1443 void *data, /* data address */ 1444 xfs_buf_rw_t mode) /* read/write/zero flag */ 1445 { 1446 size_t bend; 1447 1448 bend = boff + bsize; 1449 while (boff < bend) { 1450 struct page *page; 1451 int page_index, page_offset, csize; 1452 1453 page_index = (boff + bp->b_offset) >> PAGE_SHIFT; 1454 page_offset = (boff + bp->b_offset) & ~PAGE_MASK; 1455 page = bp->b_pages[page_index]; 1456 csize = min_t(size_t, PAGE_SIZE - page_offset, 1457 BBTOB(bp->b_io_length) - boff); 1458 1459 ASSERT((csize + page_offset) <= PAGE_SIZE); 1460 1461 switch (mode) { 1462 case XBRW_ZERO: 1463 memset(page_address(page) + page_offset, 0, csize); 1464 break; 1465 case XBRW_READ: 1466 memcpy(data, page_address(page) + page_offset, csize); 1467 break; 1468 case XBRW_WRITE: 1469 memcpy(page_address(page) + page_offset, data, csize); 1470 } 1471 1472 boff += csize; 1473 data += csize; 1474 } 1475 } 1476 1477 /* 1478 * Handling of buffer targets (buftargs). 1479 */ 1480 1481 /* 1482 * Wait for any bufs with callbacks that have been submitted but have not yet 1483 * returned. These buffers will have an elevated hold count, so wait on those 1484 * while freeing all the buffers only held by the LRU. 1485 */ 1486 static enum lru_status 1487 xfs_buftarg_wait_rele( 1488 struct list_head *item, 1489 struct list_lru_one *lru, 1490 spinlock_t *lru_lock, 1491 void *arg) 1492 1493 { 1494 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); 1495 struct list_head *dispose = arg; 1496 1497 if (atomic_read(&bp->b_hold) > 1) { 1498 /* need to wait, so skip it this pass */ 1499 trace_xfs_buf_wait_buftarg(bp, _RET_IP_); 1500 return LRU_SKIP; 1501 } 1502 if (!spin_trylock(&bp->b_lock)) 1503 return LRU_SKIP; 1504 1505 /* 1506 * clear the LRU reference count so the buffer doesn't get 1507 * ignored in xfs_buf_rele(). 1508 */ 1509 atomic_set(&bp->b_lru_ref, 0); 1510 bp->b_state |= XFS_BSTATE_DISPOSE; 1511 list_lru_isolate_move(lru, item, dispose); 1512 spin_unlock(&bp->b_lock); 1513 return LRU_REMOVED; 1514 } 1515 1516 void 1517 xfs_wait_buftarg( 1518 struct xfs_buftarg *btp) 1519 { 1520 LIST_HEAD(dispose); 1521 int loop = 0; 1522 1523 /* loop until there is nothing left on the lru list. */ 1524 while (list_lru_count(&btp->bt_lru)) { 1525 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele, 1526 &dispose, LONG_MAX); 1527 1528 while (!list_empty(&dispose)) { 1529 struct xfs_buf *bp; 1530 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1531 list_del_init(&bp->b_lru); 1532 if (bp->b_flags & XBF_WRITE_FAIL) { 1533 xfs_alert(btp->bt_mount, 1534 "Corruption Alert: Buffer at block 0x%llx had permanent write failures!", 1535 (long long)bp->b_bn); 1536 xfs_alert(btp->bt_mount, 1537 "Please run xfs_repair to determine the extent of the problem."); 1538 } 1539 xfs_buf_rele(bp); 1540 } 1541 if (loop++ != 0) 1542 delay(100); 1543 } 1544 } 1545 1546 static enum lru_status 1547 xfs_buftarg_isolate( 1548 struct list_head *item, 1549 struct list_lru_one *lru, 1550 spinlock_t *lru_lock, 1551 void *arg) 1552 { 1553 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); 1554 struct list_head *dispose = arg; 1555 1556 /* 1557 * we are inverting the lru lock/bp->b_lock here, so use a trylock. 1558 * If we fail to get the lock, just skip it. 1559 */ 1560 if (!spin_trylock(&bp->b_lock)) 1561 return LRU_SKIP; 1562 /* 1563 * Decrement the b_lru_ref count unless the value is already 1564 * zero. If the value is already zero, we need to reclaim the 1565 * buffer, otherwise it gets another trip through the LRU. 1566 */ 1567 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { 1568 spin_unlock(&bp->b_lock); 1569 return LRU_ROTATE; 1570 } 1571 1572 bp->b_state |= XFS_BSTATE_DISPOSE; 1573 list_lru_isolate_move(lru, item, dispose); 1574 spin_unlock(&bp->b_lock); 1575 return LRU_REMOVED; 1576 } 1577 1578 static unsigned long 1579 xfs_buftarg_shrink_scan( 1580 struct shrinker *shrink, 1581 struct shrink_control *sc) 1582 { 1583 struct xfs_buftarg *btp = container_of(shrink, 1584 struct xfs_buftarg, bt_shrinker); 1585 LIST_HEAD(dispose); 1586 unsigned long freed; 1587 1588 freed = list_lru_shrink_walk(&btp->bt_lru, sc, 1589 xfs_buftarg_isolate, &dispose); 1590 1591 while (!list_empty(&dispose)) { 1592 struct xfs_buf *bp; 1593 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1594 list_del_init(&bp->b_lru); 1595 xfs_buf_rele(bp); 1596 } 1597 1598 return freed; 1599 } 1600 1601 static unsigned long 1602 xfs_buftarg_shrink_count( 1603 struct shrinker *shrink, 1604 struct shrink_control *sc) 1605 { 1606 struct xfs_buftarg *btp = container_of(shrink, 1607 struct xfs_buftarg, bt_shrinker); 1608 return list_lru_shrink_count(&btp->bt_lru, sc); 1609 } 1610 1611 void 1612 xfs_free_buftarg( 1613 struct xfs_mount *mp, 1614 struct xfs_buftarg *btp) 1615 { 1616 unregister_shrinker(&btp->bt_shrinker); 1617 list_lru_destroy(&btp->bt_lru); 1618 1619 if (mp->m_flags & XFS_MOUNT_BARRIER) 1620 xfs_blkdev_issue_flush(btp); 1621 1622 kmem_free(btp); 1623 } 1624 1625 int 1626 xfs_setsize_buftarg( 1627 xfs_buftarg_t *btp, 1628 unsigned int sectorsize) 1629 { 1630 /* Set up metadata sector size info */ 1631 btp->bt_meta_sectorsize = sectorsize; 1632 btp->bt_meta_sectormask = sectorsize - 1; 1633 1634 if (set_blocksize(btp->bt_bdev, sectorsize)) { 1635 char name[BDEVNAME_SIZE]; 1636 1637 bdevname(btp->bt_bdev, name); 1638 1639 xfs_warn(btp->bt_mount, 1640 "Cannot set_blocksize to %u on device %s", 1641 sectorsize, name); 1642 return -EINVAL; 1643 } 1644 1645 /* Set up device logical sector size mask */ 1646 btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev); 1647 btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1; 1648 1649 return 0; 1650 } 1651 1652 /* 1653 * When allocating the initial buffer target we have not yet 1654 * read in the superblock, so don't know what sized sectors 1655 * are being used at this early stage. Play safe. 1656 */ 1657 STATIC int 1658 xfs_setsize_buftarg_early( 1659 xfs_buftarg_t *btp, 1660 struct block_device *bdev) 1661 { 1662 return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev)); 1663 } 1664 1665 xfs_buftarg_t * 1666 xfs_alloc_buftarg( 1667 struct xfs_mount *mp, 1668 struct block_device *bdev) 1669 { 1670 xfs_buftarg_t *btp; 1671 1672 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS); 1673 1674 btp->bt_mount = mp; 1675 btp->bt_dev = bdev->bd_dev; 1676 btp->bt_bdev = bdev; 1677 btp->bt_bdi = blk_get_backing_dev_info(bdev); 1678 1679 if (xfs_setsize_buftarg_early(btp, bdev)) 1680 goto error; 1681 1682 if (list_lru_init(&btp->bt_lru)) 1683 goto error; 1684 1685 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count; 1686 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan; 1687 btp->bt_shrinker.seeks = DEFAULT_SEEKS; 1688 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE; 1689 register_shrinker(&btp->bt_shrinker); 1690 return btp; 1691 1692 error: 1693 kmem_free(btp); 1694 return NULL; 1695 } 1696 1697 /* 1698 * Add a buffer to the delayed write list. 1699 * 1700 * This queues a buffer for writeout if it hasn't already been. Note that 1701 * neither this routine nor the buffer list submission functions perform 1702 * any internal synchronization. It is expected that the lists are thread-local 1703 * to the callers. 1704 * 1705 * Returns true if we queued up the buffer, or false if it already had 1706 * been on the buffer list. 1707 */ 1708 bool 1709 xfs_buf_delwri_queue( 1710 struct xfs_buf *bp, 1711 struct list_head *list) 1712 { 1713 ASSERT(xfs_buf_islocked(bp)); 1714 ASSERT(!(bp->b_flags & XBF_READ)); 1715 1716 /* 1717 * If the buffer is already marked delwri it already is queued up 1718 * by someone else for imediate writeout. Just ignore it in that 1719 * case. 1720 */ 1721 if (bp->b_flags & _XBF_DELWRI_Q) { 1722 trace_xfs_buf_delwri_queued(bp, _RET_IP_); 1723 return false; 1724 } 1725 1726 trace_xfs_buf_delwri_queue(bp, _RET_IP_); 1727 1728 /* 1729 * If a buffer gets written out synchronously or marked stale while it 1730 * is on a delwri list we lazily remove it. To do this, the other party 1731 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone. 1732 * It remains referenced and on the list. In a rare corner case it 1733 * might get readded to a delwri list after the synchronous writeout, in 1734 * which case we need just need to re-add the flag here. 1735 */ 1736 bp->b_flags |= _XBF_DELWRI_Q; 1737 if (list_empty(&bp->b_list)) { 1738 atomic_inc(&bp->b_hold); 1739 list_add_tail(&bp->b_list, list); 1740 } 1741 1742 return true; 1743 } 1744 1745 /* 1746 * Compare function is more complex than it needs to be because 1747 * the return value is only 32 bits and we are doing comparisons 1748 * on 64 bit values 1749 */ 1750 static int 1751 xfs_buf_cmp( 1752 void *priv, 1753 struct list_head *a, 1754 struct list_head *b) 1755 { 1756 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list); 1757 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); 1758 xfs_daddr_t diff; 1759 1760 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; 1761 if (diff < 0) 1762 return -1; 1763 if (diff > 0) 1764 return 1; 1765 return 0; 1766 } 1767 1768 static int 1769 __xfs_buf_delwri_submit( 1770 struct list_head *buffer_list, 1771 struct list_head *io_list, 1772 bool wait) 1773 { 1774 struct blk_plug plug; 1775 struct xfs_buf *bp, *n; 1776 int pinned = 0; 1777 1778 list_for_each_entry_safe(bp, n, buffer_list, b_list) { 1779 if (!wait) { 1780 if (xfs_buf_ispinned(bp)) { 1781 pinned++; 1782 continue; 1783 } 1784 if (!xfs_buf_trylock(bp)) 1785 continue; 1786 } else { 1787 xfs_buf_lock(bp); 1788 } 1789 1790 /* 1791 * Someone else might have written the buffer synchronously or 1792 * marked it stale in the meantime. In that case only the 1793 * _XBF_DELWRI_Q flag got cleared, and we have to drop the 1794 * reference and remove it from the list here. 1795 */ 1796 if (!(bp->b_flags & _XBF_DELWRI_Q)) { 1797 list_del_init(&bp->b_list); 1798 xfs_buf_relse(bp); 1799 continue; 1800 } 1801 1802 list_move_tail(&bp->b_list, io_list); 1803 trace_xfs_buf_delwri_split(bp, _RET_IP_); 1804 } 1805 1806 list_sort(NULL, io_list, xfs_buf_cmp); 1807 1808 blk_start_plug(&plug); 1809 list_for_each_entry_safe(bp, n, io_list, b_list) { 1810 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL); 1811 bp->b_flags |= XBF_WRITE | XBF_ASYNC; 1812 1813 /* 1814 * we do all Io submission async. This means if we need to wait 1815 * for IO completion we need to take an extra reference so the 1816 * buffer is still valid on the other side. 1817 */ 1818 if (wait) 1819 xfs_buf_hold(bp); 1820 else 1821 list_del_init(&bp->b_list); 1822 1823 xfs_buf_submit(bp); 1824 } 1825 blk_finish_plug(&plug); 1826 1827 return pinned; 1828 } 1829 1830 /* 1831 * Write out a buffer list asynchronously. 1832 * 1833 * This will take the @buffer_list, write all non-locked and non-pinned buffers 1834 * out and not wait for I/O completion on any of the buffers. This interface 1835 * is only safely useable for callers that can track I/O completion by higher 1836 * level means, e.g. AIL pushing as the @buffer_list is consumed in this 1837 * function. 1838 */ 1839 int 1840 xfs_buf_delwri_submit_nowait( 1841 struct list_head *buffer_list) 1842 { 1843 LIST_HEAD (io_list); 1844 return __xfs_buf_delwri_submit(buffer_list, &io_list, false); 1845 } 1846 1847 /* 1848 * Write out a buffer list synchronously. 1849 * 1850 * This will take the @buffer_list, write all buffers out and wait for I/O 1851 * completion on all of the buffers. @buffer_list is consumed by the function, 1852 * so callers must have some other way of tracking buffers if they require such 1853 * functionality. 1854 */ 1855 int 1856 xfs_buf_delwri_submit( 1857 struct list_head *buffer_list) 1858 { 1859 LIST_HEAD (io_list); 1860 int error = 0, error2; 1861 struct xfs_buf *bp; 1862 1863 __xfs_buf_delwri_submit(buffer_list, &io_list, true); 1864 1865 /* Wait for IO to complete. */ 1866 while (!list_empty(&io_list)) { 1867 bp = list_first_entry(&io_list, struct xfs_buf, b_list); 1868 1869 list_del_init(&bp->b_list); 1870 1871 /* locking the buffer will wait for async IO completion. */ 1872 xfs_buf_lock(bp); 1873 error2 = bp->b_error; 1874 xfs_buf_relse(bp); 1875 if (!error) 1876 error = error2; 1877 } 1878 1879 return error; 1880 } 1881 1882 int __init 1883 xfs_buf_init(void) 1884 { 1885 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", 1886 KM_ZONE_HWALIGN, NULL); 1887 if (!xfs_buf_zone) 1888 goto out; 1889 1890 return 0; 1891 1892 out: 1893 return -ENOMEM; 1894 } 1895 1896 void 1897 xfs_buf_terminate(void) 1898 { 1899 kmem_zone_destroy(xfs_buf_zone); 1900 } 1901