1 /* 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include <linux/stddef.h> 20 #include <linux/errno.h> 21 #include <linux/gfp.h> 22 #include <linux/pagemap.h> 23 #include <linux/init.h> 24 #include <linux/vmalloc.h> 25 #include <linux/bio.h> 26 #include <linux/sysctl.h> 27 #include <linux/proc_fs.h> 28 #include <linux/workqueue.h> 29 #include <linux/percpu.h> 30 #include <linux/blkdev.h> 31 #include <linux/hash.h> 32 #include <linux/kthread.h> 33 #include <linux/migrate.h> 34 #include <linux/backing-dev.h> 35 #include <linux/freezer.h> 36 37 #include "xfs_sb.h" 38 #include "xfs_log.h" 39 #include "xfs_ag.h" 40 #include "xfs_mount.h" 41 #include "xfs_trace.h" 42 43 static kmem_zone_t *xfs_buf_zone; 44 45 static struct workqueue_struct *xfslogd_workqueue; 46 47 #ifdef XFS_BUF_LOCK_TRACKING 48 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) 49 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) 50 # define XB_GET_OWNER(bp) ((bp)->b_last_holder) 51 #else 52 # define XB_SET_OWNER(bp) do { } while (0) 53 # define XB_CLEAR_OWNER(bp) do { } while (0) 54 # define XB_GET_OWNER(bp) do { } while (0) 55 #endif 56 57 #define xb_to_gfp(flags) \ 58 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN) 59 60 61 static inline int 62 xfs_buf_is_vmapped( 63 struct xfs_buf *bp) 64 { 65 /* 66 * Return true if the buffer is vmapped. 67 * 68 * b_addr is null if the buffer is not mapped, but the code is clever 69 * enough to know it doesn't have to map a single page, so the check has 70 * to be both for b_addr and bp->b_page_count > 1. 71 */ 72 return bp->b_addr && bp->b_page_count > 1; 73 } 74 75 static inline int 76 xfs_buf_vmap_len( 77 struct xfs_buf *bp) 78 { 79 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; 80 } 81 82 /* 83 * xfs_buf_lru_add - add a buffer to the LRU. 84 * 85 * The LRU takes a new reference to the buffer so that it will only be freed 86 * once the shrinker takes the buffer off the LRU. 87 */ 88 STATIC void 89 xfs_buf_lru_add( 90 struct xfs_buf *bp) 91 { 92 struct xfs_buftarg *btp = bp->b_target; 93 94 spin_lock(&btp->bt_lru_lock); 95 if (list_empty(&bp->b_lru)) { 96 atomic_inc(&bp->b_hold); 97 list_add_tail(&bp->b_lru, &btp->bt_lru); 98 btp->bt_lru_nr++; 99 bp->b_lru_flags &= ~_XBF_LRU_DISPOSE; 100 } 101 spin_unlock(&btp->bt_lru_lock); 102 } 103 104 /* 105 * xfs_buf_lru_del - remove a buffer from the LRU 106 * 107 * The unlocked check is safe here because it only occurs when there are not 108 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there 109 * to optimise the shrinker removing the buffer from the LRU and calling 110 * xfs_buf_free(). i.e. it removes an unnecessary round trip on the 111 * bt_lru_lock. 112 */ 113 STATIC void 114 xfs_buf_lru_del( 115 struct xfs_buf *bp) 116 { 117 struct xfs_buftarg *btp = bp->b_target; 118 119 if (list_empty(&bp->b_lru)) 120 return; 121 122 spin_lock(&btp->bt_lru_lock); 123 if (!list_empty(&bp->b_lru)) { 124 list_del_init(&bp->b_lru); 125 btp->bt_lru_nr--; 126 } 127 spin_unlock(&btp->bt_lru_lock); 128 } 129 130 /* 131 * When we mark a buffer stale, we remove the buffer from the LRU and clear the 132 * b_lru_ref count so that the buffer is freed immediately when the buffer 133 * reference count falls to zero. If the buffer is already on the LRU, we need 134 * to remove the reference that LRU holds on the buffer. 135 * 136 * This prevents build-up of stale buffers on the LRU. 137 */ 138 void 139 xfs_buf_stale( 140 struct xfs_buf *bp) 141 { 142 ASSERT(xfs_buf_islocked(bp)); 143 144 bp->b_flags |= XBF_STALE; 145 146 /* 147 * Clear the delwri status so that a delwri queue walker will not 148 * flush this buffer to disk now that it is stale. The delwri queue has 149 * a reference to the buffer, so this is safe to do. 150 */ 151 bp->b_flags &= ~_XBF_DELWRI_Q; 152 153 atomic_set(&(bp)->b_lru_ref, 0); 154 if (!list_empty(&bp->b_lru)) { 155 struct xfs_buftarg *btp = bp->b_target; 156 157 spin_lock(&btp->bt_lru_lock); 158 if (!list_empty(&bp->b_lru) && 159 !(bp->b_lru_flags & _XBF_LRU_DISPOSE)) { 160 list_del_init(&bp->b_lru); 161 btp->bt_lru_nr--; 162 atomic_dec(&bp->b_hold); 163 } 164 spin_unlock(&btp->bt_lru_lock); 165 } 166 ASSERT(atomic_read(&bp->b_hold) >= 1); 167 } 168 169 static int 170 xfs_buf_get_maps( 171 struct xfs_buf *bp, 172 int map_count) 173 { 174 ASSERT(bp->b_maps == NULL); 175 bp->b_map_count = map_count; 176 177 if (map_count == 1) { 178 bp->b_maps = &bp->b_map; 179 return 0; 180 } 181 182 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), 183 KM_NOFS); 184 if (!bp->b_maps) 185 return ENOMEM; 186 return 0; 187 } 188 189 /* 190 * Frees b_pages if it was allocated. 191 */ 192 static void 193 xfs_buf_free_maps( 194 struct xfs_buf *bp) 195 { 196 if (bp->b_maps != &bp->b_map) { 197 kmem_free(bp->b_maps); 198 bp->b_maps = NULL; 199 } 200 } 201 202 struct xfs_buf * 203 _xfs_buf_alloc( 204 struct xfs_buftarg *target, 205 struct xfs_buf_map *map, 206 int nmaps, 207 xfs_buf_flags_t flags) 208 { 209 struct xfs_buf *bp; 210 int error; 211 int i; 212 213 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); 214 if (unlikely(!bp)) 215 return NULL; 216 217 /* 218 * We don't want certain flags to appear in b_flags unless they are 219 * specifically set by later operations on the buffer. 220 */ 221 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); 222 223 atomic_set(&bp->b_hold, 1); 224 atomic_set(&bp->b_lru_ref, 1); 225 init_completion(&bp->b_iowait); 226 INIT_LIST_HEAD(&bp->b_lru); 227 INIT_LIST_HEAD(&bp->b_list); 228 RB_CLEAR_NODE(&bp->b_rbnode); 229 sema_init(&bp->b_sema, 0); /* held, no waiters */ 230 XB_SET_OWNER(bp); 231 bp->b_target = target; 232 bp->b_flags = flags; 233 234 /* 235 * Set length and io_length to the same value initially. 236 * I/O routines should use io_length, which will be the same in 237 * most cases but may be reset (e.g. XFS recovery). 238 */ 239 error = xfs_buf_get_maps(bp, nmaps); 240 if (error) { 241 kmem_zone_free(xfs_buf_zone, bp); 242 return NULL; 243 } 244 245 bp->b_bn = map[0].bm_bn; 246 bp->b_length = 0; 247 for (i = 0; i < nmaps; i++) { 248 bp->b_maps[i].bm_bn = map[i].bm_bn; 249 bp->b_maps[i].bm_len = map[i].bm_len; 250 bp->b_length += map[i].bm_len; 251 } 252 bp->b_io_length = bp->b_length; 253 254 atomic_set(&bp->b_pin_count, 0); 255 init_waitqueue_head(&bp->b_waiters); 256 257 XFS_STATS_INC(xb_create); 258 trace_xfs_buf_init(bp, _RET_IP_); 259 260 return bp; 261 } 262 263 /* 264 * Allocate a page array capable of holding a specified number 265 * of pages, and point the page buf at it. 266 */ 267 STATIC int 268 _xfs_buf_get_pages( 269 xfs_buf_t *bp, 270 int page_count, 271 xfs_buf_flags_t flags) 272 { 273 /* Make sure that we have a page list */ 274 if (bp->b_pages == NULL) { 275 bp->b_page_count = page_count; 276 if (page_count <= XB_PAGES) { 277 bp->b_pages = bp->b_page_array; 278 } else { 279 bp->b_pages = kmem_alloc(sizeof(struct page *) * 280 page_count, KM_NOFS); 281 if (bp->b_pages == NULL) 282 return -ENOMEM; 283 } 284 memset(bp->b_pages, 0, sizeof(struct page *) * page_count); 285 } 286 return 0; 287 } 288 289 /* 290 * Frees b_pages if it was allocated. 291 */ 292 STATIC void 293 _xfs_buf_free_pages( 294 xfs_buf_t *bp) 295 { 296 if (bp->b_pages != bp->b_page_array) { 297 kmem_free(bp->b_pages); 298 bp->b_pages = NULL; 299 } 300 } 301 302 /* 303 * Releases the specified buffer. 304 * 305 * The modification state of any associated pages is left unchanged. 306 * The buffer most not be on any hash - use xfs_buf_rele instead for 307 * hashed and refcounted buffers 308 */ 309 void 310 xfs_buf_free( 311 xfs_buf_t *bp) 312 { 313 trace_xfs_buf_free(bp, _RET_IP_); 314 315 ASSERT(list_empty(&bp->b_lru)); 316 317 if (bp->b_flags & _XBF_PAGES) { 318 uint i; 319 320 if (xfs_buf_is_vmapped(bp)) 321 vm_unmap_ram(bp->b_addr - bp->b_offset, 322 bp->b_page_count); 323 324 for (i = 0; i < bp->b_page_count; i++) { 325 struct page *page = bp->b_pages[i]; 326 327 __free_page(page); 328 } 329 } else if (bp->b_flags & _XBF_KMEM) 330 kmem_free(bp->b_addr); 331 _xfs_buf_free_pages(bp); 332 xfs_buf_free_maps(bp); 333 kmem_zone_free(xfs_buf_zone, bp); 334 } 335 336 /* 337 * Allocates all the pages for buffer in question and builds it's page list. 338 */ 339 STATIC int 340 xfs_buf_allocate_memory( 341 xfs_buf_t *bp, 342 uint flags) 343 { 344 size_t size; 345 size_t nbytes, offset; 346 gfp_t gfp_mask = xb_to_gfp(flags); 347 unsigned short page_count, i; 348 xfs_off_t start, end; 349 int error; 350 351 /* 352 * for buffers that are contained within a single page, just allocate 353 * the memory from the heap - there's no need for the complexity of 354 * page arrays to keep allocation down to order 0. 355 */ 356 size = BBTOB(bp->b_length); 357 if (size < PAGE_SIZE) { 358 bp->b_addr = kmem_alloc(size, KM_NOFS); 359 if (!bp->b_addr) { 360 /* low memory - use alloc_page loop instead */ 361 goto use_alloc_page; 362 } 363 364 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != 365 ((unsigned long)bp->b_addr & PAGE_MASK)) { 366 /* b_addr spans two pages - use alloc_page instead */ 367 kmem_free(bp->b_addr); 368 bp->b_addr = NULL; 369 goto use_alloc_page; 370 } 371 bp->b_offset = offset_in_page(bp->b_addr); 372 bp->b_pages = bp->b_page_array; 373 bp->b_pages[0] = virt_to_page(bp->b_addr); 374 bp->b_page_count = 1; 375 bp->b_flags |= _XBF_KMEM; 376 return 0; 377 } 378 379 use_alloc_page: 380 start = BBTOB(bp->b_map.bm_bn) >> PAGE_SHIFT; 381 end = (BBTOB(bp->b_map.bm_bn + bp->b_length) + PAGE_SIZE - 1) 382 >> PAGE_SHIFT; 383 page_count = end - start; 384 error = _xfs_buf_get_pages(bp, page_count, flags); 385 if (unlikely(error)) 386 return error; 387 388 offset = bp->b_offset; 389 bp->b_flags |= _XBF_PAGES; 390 391 for (i = 0; i < bp->b_page_count; i++) { 392 struct page *page; 393 uint retries = 0; 394 retry: 395 page = alloc_page(gfp_mask); 396 if (unlikely(page == NULL)) { 397 if (flags & XBF_READ_AHEAD) { 398 bp->b_page_count = i; 399 error = ENOMEM; 400 goto out_free_pages; 401 } 402 403 /* 404 * This could deadlock. 405 * 406 * But until all the XFS lowlevel code is revamped to 407 * handle buffer allocation failures we can't do much. 408 */ 409 if (!(++retries % 100)) 410 xfs_err(NULL, 411 "possible memory allocation deadlock in %s (mode:0x%x)", 412 __func__, gfp_mask); 413 414 XFS_STATS_INC(xb_page_retries); 415 congestion_wait(BLK_RW_ASYNC, HZ/50); 416 goto retry; 417 } 418 419 XFS_STATS_INC(xb_page_found); 420 421 nbytes = min_t(size_t, size, PAGE_SIZE - offset); 422 size -= nbytes; 423 bp->b_pages[i] = page; 424 offset = 0; 425 } 426 return 0; 427 428 out_free_pages: 429 for (i = 0; i < bp->b_page_count; i++) 430 __free_page(bp->b_pages[i]); 431 return error; 432 } 433 434 /* 435 * Map buffer into kernel address-space if necessary. 436 */ 437 STATIC int 438 _xfs_buf_map_pages( 439 xfs_buf_t *bp, 440 uint flags) 441 { 442 ASSERT(bp->b_flags & _XBF_PAGES); 443 if (bp->b_page_count == 1) { 444 /* A single page buffer is always mappable */ 445 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 446 } else if (flags & XBF_UNMAPPED) { 447 bp->b_addr = NULL; 448 } else { 449 int retried = 0; 450 451 do { 452 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, 453 -1, PAGE_KERNEL); 454 if (bp->b_addr) 455 break; 456 vm_unmap_aliases(); 457 } while (retried++ <= 1); 458 459 if (!bp->b_addr) 460 return -ENOMEM; 461 bp->b_addr += bp->b_offset; 462 } 463 464 return 0; 465 } 466 467 /* 468 * Finding and Reading Buffers 469 */ 470 471 /* 472 * Look up, and creates if absent, a lockable buffer for 473 * a given range of an inode. The buffer is returned 474 * locked. No I/O is implied by this call. 475 */ 476 xfs_buf_t * 477 _xfs_buf_find( 478 struct xfs_buftarg *btp, 479 struct xfs_buf_map *map, 480 int nmaps, 481 xfs_buf_flags_t flags, 482 xfs_buf_t *new_bp) 483 { 484 size_t numbytes; 485 struct xfs_perag *pag; 486 struct rb_node **rbp; 487 struct rb_node *parent; 488 xfs_buf_t *bp; 489 xfs_daddr_t blkno = map[0].bm_bn; 490 int numblks = 0; 491 int i; 492 493 for (i = 0; i < nmaps; i++) 494 numblks += map[i].bm_len; 495 numbytes = BBTOB(numblks); 496 497 /* Check for IOs smaller than the sector size / not sector aligned */ 498 ASSERT(!(numbytes < (1 << btp->bt_sshift))); 499 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask)); 500 501 /* get tree root */ 502 pag = xfs_perag_get(btp->bt_mount, 503 xfs_daddr_to_agno(btp->bt_mount, blkno)); 504 505 /* walk tree */ 506 spin_lock(&pag->pag_buf_lock); 507 rbp = &pag->pag_buf_tree.rb_node; 508 parent = NULL; 509 bp = NULL; 510 while (*rbp) { 511 parent = *rbp; 512 bp = rb_entry(parent, struct xfs_buf, b_rbnode); 513 514 if (blkno < bp->b_bn) 515 rbp = &(*rbp)->rb_left; 516 else if (blkno > bp->b_bn) 517 rbp = &(*rbp)->rb_right; 518 else { 519 /* 520 * found a block number match. If the range doesn't 521 * match, the only way this is allowed is if the buffer 522 * in the cache is stale and the transaction that made 523 * it stale has not yet committed. i.e. we are 524 * reallocating a busy extent. Skip this buffer and 525 * continue searching to the right for an exact match. 526 */ 527 if (bp->b_length != numblks) { 528 ASSERT(bp->b_flags & XBF_STALE); 529 rbp = &(*rbp)->rb_right; 530 continue; 531 } 532 atomic_inc(&bp->b_hold); 533 goto found; 534 } 535 } 536 537 /* No match found */ 538 if (new_bp) { 539 rb_link_node(&new_bp->b_rbnode, parent, rbp); 540 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree); 541 /* the buffer keeps the perag reference until it is freed */ 542 new_bp->b_pag = pag; 543 spin_unlock(&pag->pag_buf_lock); 544 } else { 545 XFS_STATS_INC(xb_miss_locked); 546 spin_unlock(&pag->pag_buf_lock); 547 xfs_perag_put(pag); 548 } 549 return new_bp; 550 551 found: 552 spin_unlock(&pag->pag_buf_lock); 553 xfs_perag_put(pag); 554 555 if (!xfs_buf_trylock(bp)) { 556 if (flags & XBF_TRYLOCK) { 557 xfs_buf_rele(bp); 558 XFS_STATS_INC(xb_busy_locked); 559 return NULL; 560 } 561 xfs_buf_lock(bp); 562 XFS_STATS_INC(xb_get_locked_waited); 563 } 564 565 /* 566 * if the buffer is stale, clear all the external state associated with 567 * it. We need to keep flags such as how we allocated the buffer memory 568 * intact here. 569 */ 570 if (bp->b_flags & XBF_STALE) { 571 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); 572 ASSERT(bp->b_iodone == NULL); 573 bp->b_flags &= _XBF_KMEM | _XBF_PAGES; 574 bp->b_ops = NULL; 575 } 576 577 trace_xfs_buf_find(bp, flags, _RET_IP_); 578 XFS_STATS_INC(xb_get_locked); 579 return bp; 580 } 581 582 /* 583 * Assembles a buffer covering the specified range. The code is optimised for 584 * cache hits, as metadata intensive workloads will see 3 orders of magnitude 585 * more hits than misses. 586 */ 587 struct xfs_buf * 588 xfs_buf_get_map( 589 struct xfs_buftarg *target, 590 struct xfs_buf_map *map, 591 int nmaps, 592 xfs_buf_flags_t flags) 593 { 594 struct xfs_buf *bp; 595 struct xfs_buf *new_bp; 596 int error = 0; 597 598 bp = _xfs_buf_find(target, map, nmaps, flags, NULL); 599 if (likely(bp)) 600 goto found; 601 602 new_bp = _xfs_buf_alloc(target, map, nmaps, flags); 603 if (unlikely(!new_bp)) 604 return NULL; 605 606 error = xfs_buf_allocate_memory(new_bp, flags); 607 if (error) { 608 xfs_buf_free(new_bp); 609 return NULL; 610 } 611 612 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp); 613 if (!bp) { 614 xfs_buf_free(new_bp); 615 return NULL; 616 } 617 618 if (bp != new_bp) 619 xfs_buf_free(new_bp); 620 621 found: 622 if (!bp->b_addr) { 623 error = _xfs_buf_map_pages(bp, flags); 624 if (unlikely(error)) { 625 xfs_warn(target->bt_mount, 626 "%s: failed to map pages\n", __func__); 627 xfs_buf_relse(bp); 628 return NULL; 629 } 630 } 631 632 XFS_STATS_INC(xb_get); 633 trace_xfs_buf_get(bp, flags, _RET_IP_); 634 return bp; 635 } 636 637 STATIC int 638 _xfs_buf_read( 639 xfs_buf_t *bp, 640 xfs_buf_flags_t flags) 641 { 642 ASSERT(!(flags & XBF_WRITE)); 643 ASSERT(bp->b_map.bm_bn != XFS_BUF_DADDR_NULL); 644 645 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); 646 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); 647 648 xfs_buf_iorequest(bp); 649 if (flags & XBF_ASYNC) 650 return 0; 651 return xfs_buf_iowait(bp); 652 } 653 654 xfs_buf_t * 655 xfs_buf_read_map( 656 struct xfs_buftarg *target, 657 struct xfs_buf_map *map, 658 int nmaps, 659 xfs_buf_flags_t flags, 660 const struct xfs_buf_ops *ops) 661 { 662 struct xfs_buf *bp; 663 664 flags |= XBF_READ; 665 666 bp = xfs_buf_get_map(target, map, nmaps, flags); 667 if (bp) { 668 trace_xfs_buf_read(bp, flags, _RET_IP_); 669 670 if (!XFS_BUF_ISDONE(bp)) { 671 XFS_STATS_INC(xb_get_read); 672 bp->b_ops = ops; 673 _xfs_buf_read(bp, flags); 674 } else if (flags & XBF_ASYNC) { 675 /* 676 * Read ahead call which is already satisfied, 677 * drop the buffer 678 */ 679 xfs_buf_relse(bp); 680 return NULL; 681 } else { 682 /* We do not want read in the flags */ 683 bp->b_flags &= ~XBF_READ; 684 } 685 } 686 687 return bp; 688 } 689 690 /* 691 * If we are not low on memory then do the readahead in a deadlock 692 * safe manner. 693 */ 694 void 695 xfs_buf_readahead_map( 696 struct xfs_buftarg *target, 697 struct xfs_buf_map *map, 698 int nmaps, 699 const struct xfs_buf_ops *ops) 700 { 701 if (bdi_read_congested(target->bt_bdi)) 702 return; 703 704 xfs_buf_read_map(target, map, nmaps, 705 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops); 706 } 707 708 /* 709 * Read an uncached buffer from disk. Allocates and returns a locked 710 * buffer containing the disk contents or nothing. 711 */ 712 struct xfs_buf * 713 xfs_buf_read_uncached( 714 struct xfs_buftarg *target, 715 xfs_daddr_t daddr, 716 size_t numblks, 717 int flags, 718 const struct xfs_buf_ops *ops) 719 { 720 struct xfs_buf *bp; 721 722 bp = xfs_buf_get_uncached(target, numblks, flags); 723 if (!bp) 724 return NULL; 725 726 /* set up the buffer for a read IO */ 727 ASSERT(bp->b_map_count == 1); 728 bp->b_bn = daddr; 729 bp->b_maps[0].bm_bn = daddr; 730 bp->b_flags |= XBF_READ; 731 bp->b_ops = ops; 732 733 xfsbdstrat(target->bt_mount, bp); 734 xfs_buf_iowait(bp); 735 return bp; 736 } 737 738 /* 739 * Return a buffer allocated as an empty buffer and associated to external 740 * memory via xfs_buf_associate_memory() back to it's empty state. 741 */ 742 void 743 xfs_buf_set_empty( 744 struct xfs_buf *bp, 745 size_t numblks) 746 { 747 if (bp->b_pages) 748 _xfs_buf_free_pages(bp); 749 750 bp->b_pages = NULL; 751 bp->b_page_count = 0; 752 bp->b_addr = NULL; 753 bp->b_length = numblks; 754 bp->b_io_length = numblks; 755 756 ASSERT(bp->b_map_count == 1); 757 bp->b_bn = XFS_BUF_DADDR_NULL; 758 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL; 759 bp->b_maps[0].bm_len = bp->b_length; 760 } 761 762 static inline struct page * 763 mem_to_page( 764 void *addr) 765 { 766 if ((!is_vmalloc_addr(addr))) { 767 return virt_to_page(addr); 768 } else { 769 return vmalloc_to_page(addr); 770 } 771 } 772 773 int 774 xfs_buf_associate_memory( 775 xfs_buf_t *bp, 776 void *mem, 777 size_t len) 778 { 779 int rval; 780 int i = 0; 781 unsigned long pageaddr; 782 unsigned long offset; 783 size_t buflen; 784 int page_count; 785 786 pageaddr = (unsigned long)mem & PAGE_MASK; 787 offset = (unsigned long)mem - pageaddr; 788 buflen = PAGE_ALIGN(len + offset); 789 page_count = buflen >> PAGE_SHIFT; 790 791 /* Free any previous set of page pointers */ 792 if (bp->b_pages) 793 _xfs_buf_free_pages(bp); 794 795 bp->b_pages = NULL; 796 bp->b_addr = mem; 797 798 rval = _xfs_buf_get_pages(bp, page_count, 0); 799 if (rval) 800 return rval; 801 802 bp->b_offset = offset; 803 804 for (i = 0; i < bp->b_page_count; i++) { 805 bp->b_pages[i] = mem_to_page((void *)pageaddr); 806 pageaddr += PAGE_SIZE; 807 } 808 809 bp->b_io_length = BTOBB(len); 810 bp->b_length = BTOBB(buflen); 811 812 return 0; 813 } 814 815 xfs_buf_t * 816 xfs_buf_get_uncached( 817 struct xfs_buftarg *target, 818 size_t numblks, 819 int flags) 820 { 821 unsigned long page_count; 822 int error, i; 823 struct xfs_buf *bp; 824 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); 825 826 bp = _xfs_buf_alloc(target, &map, 1, 0); 827 if (unlikely(bp == NULL)) 828 goto fail; 829 830 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT; 831 error = _xfs_buf_get_pages(bp, page_count, 0); 832 if (error) 833 goto fail_free_buf; 834 835 for (i = 0; i < page_count; i++) { 836 bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); 837 if (!bp->b_pages[i]) 838 goto fail_free_mem; 839 } 840 bp->b_flags |= _XBF_PAGES; 841 842 error = _xfs_buf_map_pages(bp, 0); 843 if (unlikely(error)) { 844 xfs_warn(target->bt_mount, 845 "%s: failed to map pages\n", __func__); 846 goto fail_free_mem; 847 } 848 849 trace_xfs_buf_get_uncached(bp, _RET_IP_); 850 return bp; 851 852 fail_free_mem: 853 while (--i >= 0) 854 __free_page(bp->b_pages[i]); 855 _xfs_buf_free_pages(bp); 856 fail_free_buf: 857 xfs_buf_free_maps(bp); 858 kmem_zone_free(xfs_buf_zone, bp); 859 fail: 860 return NULL; 861 } 862 863 /* 864 * Increment reference count on buffer, to hold the buffer concurrently 865 * with another thread which may release (free) the buffer asynchronously. 866 * Must hold the buffer already to call this function. 867 */ 868 void 869 xfs_buf_hold( 870 xfs_buf_t *bp) 871 { 872 trace_xfs_buf_hold(bp, _RET_IP_); 873 atomic_inc(&bp->b_hold); 874 } 875 876 /* 877 * Releases a hold on the specified buffer. If the 878 * the hold count is 1, calls xfs_buf_free. 879 */ 880 void 881 xfs_buf_rele( 882 xfs_buf_t *bp) 883 { 884 struct xfs_perag *pag = bp->b_pag; 885 886 trace_xfs_buf_rele(bp, _RET_IP_); 887 888 if (!pag) { 889 ASSERT(list_empty(&bp->b_lru)); 890 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); 891 if (atomic_dec_and_test(&bp->b_hold)) 892 xfs_buf_free(bp); 893 return; 894 } 895 896 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode)); 897 898 ASSERT(atomic_read(&bp->b_hold) > 0); 899 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { 900 if (!(bp->b_flags & XBF_STALE) && 901 atomic_read(&bp->b_lru_ref)) { 902 xfs_buf_lru_add(bp); 903 spin_unlock(&pag->pag_buf_lock); 904 } else { 905 xfs_buf_lru_del(bp); 906 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); 907 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); 908 spin_unlock(&pag->pag_buf_lock); 909 xfs_perag_put(pag); 910 xfs_buf_free(bp); 911 } 912 } 913 } 914 915 916 /* 917 * Lock a buffer object, if it is not already locked. 918 * 919 * If we come across a stale, pinned, locked buffer, we know that we are 920 * being asked to lock a buffer that has been reallocated. Because it is 921 * pinned, we know that the log has not been pushed to disk and hence it 922 * will still be locked. Rather than continuing to have trylock attempts 923 * fail until someone else pushes the log, push it ourselves before 924 * returning. This means that the xfsaild will not get stuck trying 925 * to push on stale inode buffers. 926 */ 927 int 928 xfs_buf_trylock( 929 struct xfs_buf *bp) 930 { 931 int locked; 932 933 locked = down_trylock(&bp->b_sema) == 0; 934 if (locked) 935 XB_SET_OWNER(bp); 936 else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) 937 xfs_log_force(bp->b_target->bt_mount, 0); 938 939 trace_xfs_buf_trylock(bp, _RET_IP_); 940 return locked; 941 } 942 943 /* 944 * Lock a buffer object. 945 * 946 * If we come across a stale, pinned, locked buffer, we know that we 947 * are being asked to lock a buffer that has been reallocated. Because 948 * it is pinned, we know that the log has not been pushed to disk and 949 * hence it will still be locked. Rather than sleeping until someone 950 * else pushes the log, push it ourselves before trying to get the lock. 951 */ 952 void 953 xfs_buf_lock( 954 struct xfs_buf *bp) 955 { 956 trace_xfs_buf_lock(bp, _RET_IP_); 957 958 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) 959 xfs_log_force(bp->b_target->bt_mount, 0); 960 down(&bp->b_sema); 961 XB_SET_OWNER(bp); 962 963 trace_xfs_buf_lock_done(bp, _RET_IP_); 964 } 965 966 void 967 xfs_buf_unlock( 968 struct xfs_buf *bp) 969 { 970 XB_CLEAR_OWNER(bp); 971 up(&bp->b_sema); 972 973 trace_xfs_buf_unlock(bp, _RET_IP_); 974 } 975 976 STATIC void 977 xfs_buf_wait_unpin( 978 xfs_buf_t *bp) 979 { 980 DECLARE_WAITQUEUE (wait, current); 981 982 if (atomic_read(&bp->b_pin_count) == 0) 983 return; 984 985 add_wait_queue(&bp->b_waiters, &wait); 986 for (;;) { 987 set_current_state(TASK_UNINTERRUPTIBLE); 988 if (atomic_read(&bp->b_pin_count) == 0) 989 break; 990 io_schedule(); 991 } 992 remove_wait_queue(&bp->b_waiters, &wait); 993 set_current_state(TASK_RUNNING); 994 } 995 996 /* 997 * Buffer Utility Routines 998 */ 999 1000 STATIC void 1001 xfs_buf_iodone_work( 1002 struct work_struct *work) 1003 { 1004 struct xfs_buf *bp = 1005 container_of(work, xfs_buf_t, b_iodone_work); 1006 bool read = !!(bp->b_flags & XBF_READ); 1007 1008 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); 1009 if (read && bp->b_ops) 1010 bp->b_ops->verify_read(bp); 1011 1012 if (bp->b_iodone) 1013 (*(bp->b_iodone))(bp); 1014 else if (bp->b_flags & XBF_ASYNC) 1015 xfs_buf_relse(bp); 1016 else { 1017 ASSERT(read && bp->b_ops); 1018 complete(&bp->b_iowait); 1019 } 1020 } 1021 1022 void 1023 xfs_buf_ioend( 1024 struct xfs_buf *bp, 1025 int schedule) 1026 { 1027 bool read = !!(bp->b_flags & XBF_READ); 1028 1029 trace_xfs_buf_iodone(bp, _RET_IP_); 1030 1031 if (bp->b_error == 0) 1032 bp->b_flags |= XBF_DONE; 1033 1034 if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) { 1035 if (schedule) { 1036 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); 1037 queue_work(xfslogd_workqueue, &bp->b_iodone_work); 1038 } else { 1039 xfs_buf_iodone_work(&bp->b_iodone_work); 1040 } 1041 } else { 1042 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); 1043 complete(&bp->b_iowait); 1044 } 1045 } 1046 1047 void 1048 xfs_buf_ioerror( 1049 xfs_buf_t *bp, 1050 int error) 1051 { 1052 ASSERT(error >= 0 && error <= 0xffff); 1053 bp->b_error = (unsigned short)error; 1054 trace_xfs_buf_ioerror(bp, error, _RET_IP_); 1055 } 1056 1057 void 1058 xfs_buf_ioerror_alert( 1059 struct xfs_buf *bp, 1060 const char *func) 1061 { 1062 xfs_alert(bp->b_target->bt_mount, 1063 "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d", 1064 (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length); 1065 } 1066 1067 /* 1068 * Called when we want to stop a buffer from getting written or read. 1069 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend 1070 * so that the proper iodone callbacks get called. 1071 */ 1072 STATIC int 1073 xfs_bioerror( 1074 xfs_buf_t *bp) 1075 { 1076 #ifdef XFSERRORDEBUG 1077 ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone); 1078 #endif 1079 1080 /* 1081 * No need to wait until the buffer is unpinned, we aren't flushing it. 1082 */ 1083 xfs_buf_ioerror(bp, EIO); 1084 1085 /* 1086 * We're calling xfs_buf_ioend, so delete XBF_DONE flag. 1087 */ 1088 XFS_BUF_UNREAD(bp); 1089 XFS_BUF_UNDONE(bp); 1090 xfs_buf_stale(bp); 1091 1092 xfs_buf_ioend(bp, 0); 1093 1094 return EIO; 1095 } 1096 1097 /* 1098 * Same as xfs_bioerror, except that we are releasing the buffer 1099 * here ourselves, and avoiding the xfs_buf_ioend call. 1100 * This is meant for userdata errors; metadata bufs come with 1101 * iodone functions attached, so that we can track down errors. 1102 */ 1103 STATIC int 1104 xfs_bioerror_relse( 1105 struct xfs_buf *bp) 1106 { 1107 int64_t fl = bp->b_flags; 1108 /* 1109 * No need to wait until the buffer is unpinned. 1110 * We aren't flushing it. 1111 * 1112 * chunkhold expects B_DONE to be set, whether 1113 * we actually finish the I/O or not. We don't want to 1114 * change that interface. 1115 */ 1116 XFS_BUF_UNREAD(bp); 1117 XFS_BUF_DONE(bp); 1118 xfs_buf_stale(bp); 1119 bp->b_iodone = NULL; 1120 if (!(fl & XBF_ASYNC)) { 1121 /* 1122 * Mark b_error and B_ERROR _both_. 1123 * Lot's of chunkcache code assumes that. 1124 * There's no reason to mark error for 1125 * ASYNC buffers. 1126 */ 1127 xfs_buf_ioerror(bp, EIO); 1128 complete(&bp->b_iowait); 1129 } else { 1130 xfs_buf_relse(bp); 1131 } 1132 1133 return EIO; 1134 } 1135 1136 STATIC int 1137 xfs_bdstrat_cb( 1138 struct xfs_buf *bp) 1139 { 1140 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { 1141 trace_xfs_bdstrat_shut(bp, _RET_IP_); 1142 /* 1143 * Metadata write that didn't get logged but 1144 * written delayed anyway. These aren't associated 1145 * with a transaction, and can be ignored. 1146 */ 1147 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp)) 1148 return xfs_bioerror_relse(bp); 1149 else 1150 return xfs_bioerror(bp); 1151 } 1152 1153 xfs_buf_iorequest(bp); 1154 return 0; 1155 } 1156 1157 int 1158 xfs_bwrite( 1159 struct xfs_buf *bp) 1160 { 1161 int error; 1162 1163 ASSERT(xfs_buf_islocked(bp)); 1164 1165 bp->b_flags |= XBF_WRITE; 1166 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); 1167 1168 xfs_bdstrat_cb(bp); 1169 1170 error = xfs_buf_iowait(bp); 1171 if (error) { 1172 xfs_force_shutdown(bp->b_target->bt_mount, 1173 SHUTDOWN_META_IO_ERROR); 1174 } 1175 return error; 1176 } 1177 1178 /* 1179 * Wrapper around bdstrat so that we can stop data from going to disk in case 1180 * we are shutting down the filesystem. Typically user data goes thru this 1181 * path; one of the exceptions is the superblock. 1182 */ 1183 void 1184 xfsbdstrat( 1185 struct xfs_mount *mp, 1186 struct xfs_buf *bp) 1187 { 1188 if (XFS_FORCED_SHUTDOWN(mp)) { 1189 trace_xfs_bdstrat_shut(bp, _RET_IP_); 1190 xfs_bioerror_relse(bp); 1191 return; 1192 } 1193 1194 xfs_buf_iorequest(bp); 1195 } 1196 1197 STATIC void 1198 _xfs_buf_ioend( 1199 xfs_buf_t *bp, 1200 int schedule) 1201 { 1202 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) 1203 xfs_buf_ioend(bp, schedule); 1204 } 1205 1206 STATIC void 1207 xfs_buf_bio_end_io( 1208 struct bio *bio, 1209 int error) 1210 { 1211 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; 1212 1213 /* 1214 * don't overwrite existing errors - otherwise we can lose errors on 1215 * buffers that require multiple bios to complete. 1216 */ 1217 if (!bp->b_error) 1218 xfs_buf_ioerror(bp, -error); 1219 1220 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) 1221 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); 1222 1223 _xfs_buf_ioend(bp, 1); 1224 bio_put(bio); 1225 } 1226 1227 static void 1228 xfs_buf_ioapply_map( 1229 struct xfs_buf *bp, 1230 int map, 1231 int *buf_offset, 1232 int *count, 1233 int rw) 1234 { 1235 int page_index; 1236 int total_nr_pages = bp->b_page_count; 1237 int nr_pages; 1238 struct bio *bio; 1239 sector_t sector = bp->b_maps[map].bm_bn; 1240 int size; 1241 int offset; 1242 1243 total_nr_pages = bp->b_page_count; 1244 1245 /* skip the pages in the buffer before the start offset */ 1246 page_index = 0; 1247 offset = *buf_offset; 1248 while (offset >= PAGE_SIZE) { 1249 page_index++; 1250 offset -= PAGE_SIZE; 1251 } 1252 1253 /* 1254 * Limit the IO size to the length of the current vector, and update the 1255 * remaining IO count for the next time around. 1256 */ 1257 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); 1258 *count -= size; 1259 *buf_offset += size; 1260 1261 next_chunk: 1262 atomic_inc(&bp->b_io_remaining); 1263 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); 1264 if (nr_pages > total_nr_pages) 1265 nr_pages = total_nr_pages; 1266 1267 bio = bio_alloc(GFP_NOIO, nr_pages); 1268 bio->bi_bdev = bp->b_target->bt_bdev; 1269 bio->bi_sector = sector; 1270 bio->bi_end_io = xfs_buf_bio_end_io; 1271 bio->bi_private = bp; 1272 1273 1274 for (; size && nr_pages; nr_pages--, page_index++) { 1275 int rbytes, nbytes = PAGE_SIZE - offset; 1276 1277 if (nbytes > size) 1278 nbytes = size; 1279 1280 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, 1281 offset); 1282 if (rbytes < nbytes) 1283 break; 1284 1285 offset = 0; 1286 sector += BTOBB(nbytes); 1287 size -= nbytes; 1288 total_nr_pages--; 1289 } 1290 1291 if (likely(bio->bi_size)) { 1292 if (xfs_buf_is_vmapped(bp)) { 1293 flush_kernel_vmap_range(bp->b_addr, 1294 xfs_buf_vmap_len(bp)); 1295 } 1296 submit_bio(rw, bio); 1297 if (size) 1298 goto next_chunk; 1299 } else { 1300 /* 1301 * This is guaranteed not to be the last io reference count 1302 * because the caller (xfs_buf_iorequest) holds a count itself. 1303 */ 1304 atomic_dec(&bp->b_io_remaining); 1305 xfs_buf_ioerror(bp, EIO); 1306 bio_put(bio); 1307 } 1308 1309 } 1310 1311 STATIC void 1312 _xfs_buf_ioapply( 1313 struct xfs_buf *bp) 1314 { 1315 struct blk_plug plug; 1316 int rw; 1317 int offset; 1318 int size; 1319 int i; 1320 1321 if (bp->b_flags & XBF_WRITE) { 1322 if (bp->b_flags & XBF_SYNCIO) 1323 rw = WRITE_SYNC; 1324 else 1325 rw = WRITE; 1326 if (bp->b_flags & XBF_FUA) 1327 rw |= REQ_FUA; 1328 if (bp->b_flags & XBF_FLUSH) 1329 rw |= REQ_FLUSH; 1330 1331 /* 1332 * Run the write verifier callback function if it exists. If 1333 * this function fails it will mark the buffer with an error and 1334 * the IO should not be dispatched. 1335 */ 1336 if (bp->b_ops) { 1337 bp->b_ops->verify_write(bp); 1338 if (bp->b_error) { 1339 xfs_force_shutdown(bp->b_target->bt_mount, 1340 SHUTDOWN_CORRUPT_INCORE); 1341 return; 1342 } 1343 } 1344 } else if (bp->b_flags & XBF_READ_AHEAD) { 1345 rw = READA; 1346 } else { 1347 rw = READ; 1348 } 1349 1350 /* we only use the buffer cache for meta-data */ 1351 rw |= REQ_META; 1352 1353 /* 1354 * Walk all the vectors issuing IO on them. Set up the initial offset 1355 * into the buffer and the desired IO size before we start - 1356 * _xfs_buf_ioapply_vec() will modify them appropriately for each 1357 * subsequent call. 1358 */ 1359 offset = bp->b_offset; 1360 size = BBTOB(bp->b_io_length); 1361 blk_start_plug(&plug); 1362 for (i = 0; i < bp->b_map_count; i++) { 1363 xfs_buf_ioapply_map(bp, i, &offset, &size, rw); 1364 if (bp->b_error) 1365 break; 1366 if (size <= 0) 1367 break; /* all done */ 1368 } 1369 blk_finish_plug(&plug); 1370 } 1371 1372 void 1373 xfs_buf_iorequest( 1374 xfs_buf_t *bp) 1375 { 1376 trace_xfs_buf_iorequest(bp, _RET_IP_); 1377 1378 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); 1379 1380 if (bp->b_flags & XBF_WRITE) 1381 xfs_buf_wait_unpin(bp); 1382 xfs_buf_hold(bp); 1383 1384 /* Set the count to 1 initially, this will stop an I/O 1385 * completion callout which happens before we have started 1386 * all the I/O from calling xfs_buf_ioend too early. 1387 */ 1388 atomic_set(&bp->b_io_remaining, 1); 1389 _xfs_buf_ioapply(bp); 1390 _xfs_buf_ioend(bp, 1); 1391 1392 xfs_buf_rele(bp); 1393 } 1394 1395 /* 1396 * Waits for I/O to complete on the buffer supplied. It returns immediately if 1397 * no I/O is pending or there is already a pending error on the buffer. It 1398 * returns the I/O error code, if any, or 0 if there was no error. 1399 */ 1400 int 1401 xfs_buf_iowait( 1402 xfs_buf_t *bp) 1403 { 1404 trace_xfs_buf_iowait(bp, _RET_IP_); 1405 1406 if (!bp->b_error) 1407 wait_for_completion(&bp->b_iowait); 1408 1409 trace_xfs_buf_iowait_done(bp, _RET_IP_); 1410 return bp->b_error; 1411 } 1412 1413 xfs_caddr_t 1414 xfs_buf_offset( 1415 xfs_buf_t *bp, 1416 size_t offset) 1417 { 1418 struct page *page; 1419 1420 if (bp->b_addr) 1421 return bp->b_addr + offset; 1422 1423 offset += bp->b_offset; 1424 page = bp->b_pages[offset >> PAGE_SHIFT]; 1425 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1)); 1426 } 1427 1428 /* 1429 * Move data into or out of a buffer. 1430 */ 1431 void 1432 xfs_buf_iomove( 1433 xfs_buf_t *bp, /* buffer to process */ 1434 size_t boff, /* starting buffer offset */ 1435 size_t bsize, /* length to copy */ 1436 void *data, /* data address */ 1437 xfs_buf_rw_t mode) /* read/write/zero flag */ 1438 { 1439 size_t bend; 1440 1441 bend = boff + bsize; 1442 while (boff < bend) { 1443 struct page *page; 1444 int page_index, page_offset, csize; 1445 1446 page_index = (boff + bp->b_offset) >> PAGE_SHIFT; 1447 page_offset = (boff + bp->b_offset) & ~PAGE_MASK; 1448 page = bp->b_pages[page_index]; 1449 csize = min_t(size_t, PAGE_SIZE - page_offset, 1450 BBTOB(bp->b_io_length) - boff); 1451 1452 ASSERT((csize + page_offset) <= PAGE_SIZE); 1453 1454 switch (mode) { 1455 case XBRW_ZERO: 1456 memset(page_address(page) + page_offset, 0, csize); 1457 break; 1458 case XBRW_READ: 1459 memcpy(data, page_address(page) + page_offset, csize); 1460 break; 1461 case XBRW_WRITE: 1462 memcpy(page_address(page) + page_offset, data, csize); 1463 } 1464 1465 boff += csize; 1466 data += csize; 1467 } 1468 } 1469 1470 /* 1471 * Handling of buffer targets (buftargs). 1472 */ 1473 1474 /* 1475 * Wait for any bufs with callbacks that have been submitted but have not yet 1476 * returned. These buffers will have an elevated hold count, so wait on those 1477 * while freeing all the buffers only held by the LRU. 1478 */ 1479 void 1480 xfs_wait_buftarg( 1481 struct xfs_buftarg *btp) 1482 { 1483 struct xfs_buf *bp; 1484 1485 restart: 1486 spin_lock(&btp->bt_lru_lock); 1487 while (!list_empty(&btp->bt_lru)) { 1488 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); 1489 if (atomic_read(&bp->b_hold) > 1) { 1490 spin_unlock(&btp->bt_lru_lock); 1491 delay(100); 1492 goto restart; 1493 } 1494 /* 1495 * clear the LRU reference count so the buffer doesn't get 1496 * ignored in xfs_buf_rele(). 1497 */ 1498 atomic_set(&bp->b_lru_ref, 0); 1499 spin_unlock(&btp->bt_lru_lock); 1500 xfs_buf_rele(bp); 1501 spin_lock(&btp->bt_lru_lock); 1502 } 1503 spin_unlock(&btp->bt_lru_lock); 1504 } 1505 1506 int 1507 xfs_buftarg_shrink( 1508 struct shrinker *shrink, 1509 struct shrink_control *sc) 1510 { 1511 struct xfs_buftarg *btp = container_of(shrink, 1512 struct xfs_buftarg, bt_shrinker); 1513 struct xfs_buf *bp; 1514 int nr_to_scan = sc->nr_to_scan; 1515 LIST_HEAD(dispose); 1516 1517 if (!nr_to_scan) 1518 return btp->bt_lru_nr; 1519 1520 spin_lock(&btp->bt_lru_lock); 1521 while (!list_empty(&btp->bt_lru)) { 1522 if (nr_to_scan-- <= 0) 1523 break; 1524 1525 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); 1526 1527 /* 1528 * Decrement the b_lru_ref count unless the value is already 1529 * zero. If the value is already zero, we need to reclaim the 1530 * buffer, otherwise it gets another trip through the LRU. 1531 */ 1532 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { 1533 list_move_tail(&bp->b_lru, &btp->bt_lru); 1534 continue; 1535 } 1536 1537 /* 1538 * remove the buffer from the LRU now to avoid needing another 1539 * lock round trip inside xfs_buf_rele(). 1540 */ 1541 list_move(&bp->b_lru, &dispose); 1542 btp->bt_lru_nr--; 1543 bp->b_lru_flags |= _XBF_LRU_DISPOSE; 1544 } 1545 spin_unlock(&btp->bt_lru_lock); 1546 1547 while (!list_empty(&dispose)) { 1548 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1549 list_del_init(&bp->b_lru); 1550 xfs_buf_rele(bp); 1551 } 1552 1553 return btp->bt_lru_nr; 1554 } 1555 1556 void 1557 xfs_free_buftarg( 1558 struct xfs_mount *mp, 1559 struct xfs_buftarg *btp) 1560 { 1561 unregister_shrinker(&btp->bt_shrinker); 1562 1563 if (mp->m_flags & XFS_MOUNT_BARRIER) 1564 xfs_blkdev_issue_flush(btp); 1565 1566 kmem_free(btp); 1567 } 1568 1569 STATIC int 1570 xfs_setsize_buftarg_flags( 1571 xfs_buftarg_t *btp, 1572 unsigned int blocksize, 1573 unsigned int sectorsize, 1574 int verbose) 1575 { 1576 btp->bt_bsize = blocksize; 1577 btp->bt_sshift = ffs(sectorsize) - 1; 1578 btp->bt_smask = sectorsize - 1; 1579 1580 if (set_blocksize(btp->bt_bdev, sectorsize)) { 1581 char name[BDEVNAME_SIZE]; 1582 1583 bdevname(btp->bt_bdev, name); 1584 1585 xfs_warn(btp->bt_mount, 1586 "Cannot set_blocksize to %u on device %s\n", 1587 sectorsize, name); 1588 return EINVAL; 1589 } 1590 1591 return 0; 1592 } 1593 1594 /* 1595 * When allocating the initial buffer target we have not yet 1596 * read in the superblock, so don't know what sized sectors 1597 * are being used is at this early stage. Play safe. 1598 */ 1599 STATIC int 1600 xfs_setsize_buftarg_early( 1601 xfs_buftarg_t *btp, 1602 struct block_device *bdev) 1603 { 1604 return xfs_setsize_buftarg_flags(btp, 1605 PAGE_SIZE, bdev_logical_block_size(bdev), 0); 1606 } 1607 1608 int 1609 xfs_setsize_buftarg( 1610 xfs_buftarg_t *btp, 1611 unsigned int blocksize, 1612 unsigned int sectorsize) 1613 { 1614 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1); 1615 } 1616 1617 xfs_buftarg_t * 1618 xfs_alloc_buftarg( 1619 struct xfs_mount *mp, 1620 struct block_device *bdev, 1621 int external, 1622 const char *fsname) 1623 { 1624 xfs_buftarg_t *btp; 1625 1626 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); 1627 1628 btp->bt_mount = mp; 1629 btp->bt_dev = bdev->bd_dev; 1630 btp->bt_bdev = bdev; 1631 btp->bt_bdi = blk_get_backing_dev_info(bdev); 1632 if (!btp->bt_bdi) 1633 goto error; 1634 1635 INIT_LIST_HEAD(&btp->bt_lru); 1636 spin_lock_init(&btp->bt_lru_lock); 1637 if (xfs_setsize_buftarg_early(btp, bdev)) 1638 goto error; 1639 btp->bt_shrinker.shrink = xfs_buftarg_shrink; 1640 btp->bt_shrinker.seeks = DEFAULT_SEEKS; 1641 register_shrinker(&btp->bt_shrinker); 1642 return btp; 1643 1644 error: 1645 kmem_free(btp); 1646 return NULL; 1647 } 1648 1649 /* 1650 * Add a buffer to the delayed write list. 1651 * 1652 * This queues a buffer for writeout if it hasn't already been. Note that 1653 * neither this routine nor the buffer list submission functions perform 1654 * any internal synchronization. It is expected that the lists are thread-local 1655 * to the callers. 1656 * 1657 * Returns true if we queued up the buffer, or false if it already had 1658 * been on the buffer list. 1659 */ 1660 bool 1661 xfs_buf_delwri_queue( 1662 struct xfs_buf *bp, 1663 struct list_head *list) 1664 { 1665 ASSERT(xfs_buf_islocked(bp)); 1666 ASSERT(!(bp->b_flags & XBF_READ)); 1667 1668 /* 1669 * If the buffer is already marked delwri it already is queued up 1670 * by someone else for imediate writeout. Just ignore it in that 1671 * case. 1672 */ 1673 if (bp->b_flags & _XBF_DELWRI_Q) { 1674 trace_xfs_buf_delwri_queued(bp, _RET_IP_); 1675 return false; 1676 } 1677 1678 trace_xfs_buf_delwri_queue(bp, _RET_IP_); 1679 1680 /* 1681 * If a buffer gets written out synchronously or marked stale while it 1682 * is on a delwri list we lazily remove it. To do this, the other party 1683 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone. 1684 * It remains referenced and on the list. In a rare corner case it 1685 * might get readded to a delwri list after the synchronous writeout, in 1686 * which case we need just need to re-add the flag here. 1687 */ 1688 bp->b_flags |= _XBF_DELWRI_Q; 1689 if (list_empty(&bp->b_list)) { 1690 atomic_inc(&bp->b_hold); 1691 list_add_tail(&bp->b_list, list); 1692 } 1693 1694 return true; 1695 } 1696 1697 /* 1698 * Compare function is more complex than it needs to be because 1699 * the return value is only 32 bits and we are doing comparisons 1700 * on 64 bit values 1701 */ 1702 static int 1703 xfs_buf_cmp( 1704 void *priv, 1705 struct list_head *a, 1706 struct list_head *b) 1707 { 1708 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list); 1709 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); 1710 xfs_daddr_t diff; 1711 1712 diff = ap->b_map.bm_bn - bp->b_map.bm_bn; 1713 if (diff < 0) 1714 return -1; 1715 if (diff > 0) 1716 return 1; 1717 return 0; 1718 } 1719 1720 static int 1721 __xfs_buf_delwri_submit( 1722 struct list_head *buffer_list, 1723 struct list_head *io_list, 1724 bool wait) 1725 { 1726 struct blk_plug plug; 1727 struct xfs_buf *bp, *n; 1728 int pinned = 0; 1729 1730 list_for_each_entry_safe(bp, n, buffer_list, b_list) { 1731 if (!wait) { 1732 if (xfs_buf_ispinned(bp)) { 1733 pinned++; 1734 continue; 1735 } 1736 if (!xfs_buf_trylock(bp)) 1737 continue; 1738 } else { 1739 xfs_buf_lock(bp); 1740 } 1741 1742 /* 1743 * Someone else might have written the buffer synchronously or 1744 * marked it stale in the meantime. In that case only the 1745 * _XBF_DELWRI_Q flag got cleared, and we have to drop the 1746 * reference and remove it from the list here. 1747 */ 1748 if (!(bp->b_flags & _XBF_DELWRI_Q)) { 1749 list_del_init(&bp->b_list); 1750 xfs_buf_relse(bp); 1751 continue; 1752 } 1753 1754 list_move_tail(&bp->b_list, io_list); 1755 trace_xfs_buf_delwri_split(bp, _RET_IP_); 1756 } 1757 1758 list_sort(NULL, io_list, xfs_buf_cmp); 1759 1760 blk_start_plug(&plug); 1761 list_for_each_entry_safe(bp, n, io_list, b_list) { 1762 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC); 1763 bp->b_flags |= XBF_WRITE; 1764 1765 if (!wait) { 1766 bp->b_flags |= XBF_ASYNC; 1767 list_del_init(&bp->b_list); 1768 } 1769 xfs_bdstrat_cb(bp); 1770 } 1771 blk_finish_plug(&plug); 1772 1773 return pinned; 1774 } 1775 1776 /* 1777 * Write out a buffer list asynchronously. 1778 * 1779 * This will take the @buffer_list, write all non-locked and non-pinned buffers 1780 * out and not wait for I/O completion on any of the buffers. This interface 1781 * is only safely useable for callers that can track I/O completion by higher 1782 * level means, e.g. AIL pushing as the @buffer_list is consumed in this 1783 * function. 1784 */ 1785 int 1786 xfs_buf_delwri_submit_nowait( 1787 struct list_head *buffer_list) 1788 { 1789 LIST_HEAD (io_list); 1790 return __xfs_buf_delwri_submit(buffer_list, &io_list, false); 1791 } 1792 1793 /* 1794 * Write out a buffer list synchronously. 1795 * 1796 * This will take the @buffer_list, write all buffers out and wait for I/O 1797 * completion on all of the buffers. @buffer_list is consumed by the function, 1798 * so callers must have some other way of tracking buffers if they require such 1799 * functionality. 1800 */ 1801 int 1802 xfs_buf_delwri_submit( 1803 struct list_head *buffer_list) 1804 { 1805 LIST_HEAD (io_list); 1806 int error = 0, error2; 1807 struct xfs_buf *bp; 1808 1809 __xfs_buf_delwri_submit(buffer_list, &io_list, true); 1810 1811 /* Wait for IO to complete. */ 1812 while (!list_empty(&io_list)) { 1813 bp = list_first_entry(&io_list, struct xfs_buf, b_list); 1814 1815 list_del_init(&bp->b_list); 1816 error2 = xfs_buf_iowait(bp); 1817 xfs_buf_relse(bp); 1818 if (!error) 1819 error = error2; 1820 } 1821 1822 return error; 1823 } 1824 1825 int __init 1826 xfs_buf_init(void) 1827 { 1828 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", 1829 KM_ZONE_HWALIGN, NULL); 1830 if (!xfs_buf_zone) 1831 goto out; 1832 1833 xfslogd_workqueue = alloc_workqueue("xfslogd", 1834 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1); 1835 if (!xfslogd_workqueue) 1836 goto out_free_buf_zone; 1837 1838 return 0; 1839 1840 out_free_buf_zone: 1841 kmem_zone_destroy(xfs_buf_zone); 1842 out: 1843 return -ENOMEM; 1844 } 1845 1846 void 1847 xfs_buf_terminate(void) 1848 { 1849 destroy_workqueue(xfslogd_workqueue); 1850 kmem_zone_destroy(xfs_buf_zone); 1851 } 1852