1 /* 2 * pNFS functions to call and manage layout drivers. 3 * 4 * Copyright (c) 2002 [year of first publication] 5 * The Regents of the University of Michigan 6 * All Rights Reserved 7 * 8 * Dean Hildebrand <dhildebz@umich.edu> 9 * 10 * Permission is granted to use, copy, create derivative works, and 11 * redistribute this software and such derivative works for any purpose, 12 * so long as the name of the University of Michigan is not used in 13 * any advertising or publicity pertaining to the use or distribution 14 * of this software without specific, written prior authorization. If 15 * the above copyright notice or any other identification of the 16 * University of Michigan is included in any copy of any portion of 17 * this software, then the disclaimer below must also be included. 18 * 19 * This software is provided as is, without representation or warranty 20 * of any kind either express or implied, including without limitation 21 * the implied warranties of merchantability, fitness for a particular 22 * purpose, or noninfringement. The Regents of the University of 23 * Michigan shall not be liable for any damages, including special, 24 * indirect, incidental, or consequential damages, with respect to any 25 * claim arising out of or in connection with the use of the software, 26 * even if it has been or is hereafter advised of the possibility of 27 * such damages. 28 */ 29 30 #include <linux/nfs_fs.h> 31 #include <linux/nfs_page.h> 32 #include <linux/module.h> 33 #include "internal.h" 34 #include "pnfs.h" 35 #include "iostat.h" 36 #include "nfs4trace.h" 37 #include "delegation.h" 38 #include "nfs42.h" 39 40 #define NFSDBG_FACILITY NFSDBG_PNFS 41 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ) 42 43 /* Locking: 44 * 45 * pnfs_spinlock: 46 * protects pnfs_modules_tbl. 47 */ 48 static DEFINE_SPINLOCK(pnfs_spinlock); 49 50 /* 51 * pnfs_modules_tbl holds all pnfs modules 52 */ 53 static LIST_HEAD(pnfs_modules_tbl); 54 55 static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo); 56 57 /* Return the registered pnfs layout driver module matching given id */ 58 static struct pnfs_layoutdriver_type * 59 find_pnfs_driver_locked(u32 id) 60 { 61 struct pnfs_layoutdriver_type *local; 62 63 list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid) 64 if (local->id == id) 65 goto out; 66 local = NULL; 67 out: 68 dprintk("%s: Searching for id %u, found %p\n", __func__, id, local); 69 return local; 70 } 71 72 static struct pnfs_layoutdriver_type * 73 find_pnfs_driver(u32 id) 74 { 75 struct pnfs_layoutdriver_type *local; 76 77 spin_lock(&pnfs_spinlock); 78 local = find_pnfs_driver_locked(id); 79 if (local != NULL && !try_module_get(local->owner)) { 80 dprintk("%s: Could not grab reference on module\n", __func__); 81 local = NULL; 82 } 83 spin_unlock(&pnfs_spinlock); 84 return local; 85 } 86 87 void 88 unset_pnfs_layoutdriver(struct nfs_server *nfss) 89 { 90 if (nfss->pnfs_curr_ld) { 91 if (nfss->pnfs_curr_ld->clear_layoutdriver) 92 nfss->pnfs_curr_ld->clear_layoutdriver(nfss); 93 /* Decrement the MDS count. Purge the deviceid cache if zero */ 94 if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count)) 95 nfs4_deviceid_purge_client(nfss->nfs_client); 96 module_put(nfss->pnfs_curr_ld->owner); 97 } 98 nfss->pnfs_curr_ld = NULL; 99 } 100 101 /* 102 * Try to set the server's pnfs module to the pnfs layout type specified by id. 103 * Currently only one pNFS layout driver per filesystem is supported. 104 * 105 * @id layout type. Zero (illegal layout type) indicates pNFS not in use. 106 */ 107 void 108 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh, 109 u32 id) 110 { 111 struct pnfs_layoutdriver_type *ld_type = NULL; 112 113 if (id == 0) 114 goto out_no_driver; 115 if (!(server->nfs_client->cl_exchange_flags & 116 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) { 117 printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n", 118 __func__, id, server->nfs_client->cl_exchange_flags); 119 goto out_no_driver; 120 } 121 ld_type = find_pnfs_driver(id); 122 if (!ld_type) { 123 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id); 124 ld_type = find_pnfs_driver(id); 125 if (!ld_type) { 126 dprintk("%s: No pNFS module found for %u.\n", 127 __func__, id); 128 goto out_no_driver; 129 } 130 } 131 server->pnfs_curr_ld = ld_type; 132 if (ld_type->set_layoutdriver 133 && ld_type->set_layoutdriver(server, mntfh)) { 134 printk(KERN_ERR "NFS: %s: Error initializing pNFS layout " 135 "driver %u.\n", __func__, id); 136 module_put(ld_type->owner); 137 goto out_no_driver; 138 } 139 /* Bump the MDS count */ 140 atomic_inc(&server->nfs_client->cl_mds_count); 141 142 dprintk("%s: pNFS module for %u set\n", __func__, id); 143 return; 144 145 out_no_driver: 146 dprintk("%s: Using NFSv4 I/O\n", __func__); 147 server->pnfs_curr_ld = NULL; 148 } 149 150 int 151 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type) 152 { 153 int status = -EINVAL; 154 struct pnfs_layoutdriver_type *tmp; 155 156 if (ld_type->id == 0) { 157 printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__); 158 return status; 159 } 160 if (!ld_type->alloc_lseg || !ld_type->free_lseg) { 161 printk(KERN_ERR "NFS: %s Layout driver must provide " 162 "alloc_lseg and free_lseg.\n", __func__); 163 return status; 164 } 165 166 spin_lock(&pnfs_spinlock); 167 tmp = find_pnfs_driver_locked(ld_type->id); 168 if (!tmp) { 169 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl); 170 status = 0; 171 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id, 172 ld_type->name); 173 } else { 174 printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n", 175 __func__, ld_type->id); 176 } 177 spin_unlock(&pnfs_spinlock); 178 179 return status; 180 } 181 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver); 182 183 void 184 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type) 185 { 186 dprintk("%s Deregistering id:%u\n", __func__, ld_type->id); 187 spin_lock(&pnfs_spinlock); 188 list_del(&ld_type->pnfs_tblid); 189 spin_unlock(&pnfs_spinlock); 190 } 191 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver); 192 193 /* 194 * pNFS client layout cache 195 */ 196 197 /* Need to hold i_lock if caller does not already hold reference */ 198 void 199 pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo) 200 { 201 atomic_inc(&lo->plh_refcount); 202 } 203 204 static struct pnfs_layout_hdr * 205 pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags) 206 { 207 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld; 208 return ld->alloc_layout_hdr(ino, gfp_flags); 209 } 210 211 static void 212 pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo) 213 { 214 struct nfs_server *server = NFS_SERVER(lo->plh_inode); 215 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld; 216 217 if (!list_empty(&lo->plh_layouts)) { 218 struct nfs_client *clp = server->nfs_client; 219 220 spin_lock(&clp->cl_lock); 221 list_del_init(&lo->plh_layouts); 222 spin_unlock(&clp->cl_lock); 223 } 224 put_rpccred(lo->plh_lc_cred); 225 return ld->free_layout_hdr(lo); 226 } 227 228 static void 229 pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo) 230 { 231 struct nfs_inode *nfsi = NFS_I(lo->plh_inode); 232 dprintk("%s: freeing layout cache %p\n", __func__, lo); 233 nfsi->layout = NULL; 234 /* Reset MDS Threshold I/O counters */ 235 nfsi->write_io = 0; 236 nfsi->read_io = 0; 237 } 238 239 void 240 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo) 241 { 242 struct inode *inode = lo->plh_inode; 243 244 pnfs_layoutreturn_before_put_layout_hdr(lo); 245 246 if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) { 247 if (!list_empty(&lo->plh_segs)) 248 WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n"); 249 pnfs_detach_layout_hdr(lo); 250 spin_unlock(&inode->i_lock); 251 pnfs_free_layout_hdr(lo); 252 } 253 } 254 255 static int 256 pnfs_iomode_to_fail_bit(u32 iomode) 257 { 258 return iomode == IOMODE_RW ? 259 NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED; 260 } 261 262 static void 263 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit) 264 { 265 lo->plh_retry_timestamp = jiffies; 266 if (!test_and_set_bit(fail_bit, &lo->plh_flags)) 267 atomic_inc(&lo->plh_refcount); 268 } 269 270 static void 271 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit) 272 { 273 if (test_and_clear_bit(fail_bit, &lo->plh_flags)) 274 atomic_dec(&lo->plh_refcount); 275 } 276 277 static void 278 pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode) 279 { 280 struct inode *inode = lo->plh_inode; 281 struct pnfs_layout_range range = { 282 .iomode = iomode, 283 .offset = 0, 284 .length = NFS4_MAX_UINT64, 285 }; 286 LIST_HEAD(head); 287 288 spin_lock(&inode->i_lock); 289 pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode)); 290 pnfs_mark_matching_lsegs_invalid(lo, &head, &range); 291 spin_unlock(&inode->i_lock); 292 pnfs_free_lseg_list(&head); 293 dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__, 294 iomode == IOMODE_RW ? "RW" : "READ"); 295 } 296 297 static bool 298 pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode) 299 { 300 unsigned long start, end; 301 int fail_bit = pnfs_iomode_to_fail_bit(iomode); 302 303 if (test_bit(fail_bit, &lo->plh_flags) == 0) 304 return false; 305 end = jiffies; 306 start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT; 307 if (!time_in_range(lo->plh_retry_timestamp, start, end)) { 308 /* It is time to retry the failed layoutgets */ 309 pnfs_layout_clear_fail_bit(lo, fail_bit); 310 return false; 311 } 312 return true; 313 } 314 315 static void 316 init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg) 317 { 318 INIT_LIST_HEAD(&lseg->pls_list); 319 INIT_LIST_HEAD(&lseg->pls_lc_list); 320 atomic_set(&lseg->pls_refcount, 1); 321 smp_mb(); 322 set_bit(NFS_LSEG_VALID, &lseg->pls_flags); 323 lseg->pls_layout = lo; 324 } 325 326 static void pnfs_free_lseg(struct pnfs_layout_segment *lseg) 327 { 328 struct inode *ino = lseg->pls_layout->plh_inode; 329 330 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); 331 } 332 333 static void 334 pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo, 335 struct pnfs_layout_segment *lseg) 336 { 337 struct inode *inode = lo->plh_inode; 338 339 WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); 340 list_del_init(&lseg->pls_list); 341 /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */ 342 atomic_dec(&lo->plh_refcount); 343 if (list_empty(&lo->plh_segs)) 344 clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags); 345 rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq); 346 } 347 348 void 349 pnfs_put_lseg(struct pnfs_layout_segment *lseg) 350 { 351 struct pnfs_layout_hdr *lo; 352 struct inode *inode; 353 354 if (!lseg) 355 return; 356 357 dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg, 358 atomic_read(&lseg->pls_refcount), 359 test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); 360 361 lo = lseg->pls_layout; 362 inode = lo->plh_inode; 363 364 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) { 365 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) { 366 spin_unlock(&inode->i_lock); 367 return; 368 } 369 pnfs_get_layout_hdr(lo); 370 pnfs_layout_remove_lseg(lo, lseg); 371 spin_unlock(&inode->i_lock); 372 pnfs_free_lseg(lseg); 373 pnfs_put_layout_hdr(lo); 374 } 375 } 376 EXPORT_SYMBOL_GPL(pnfs_put_lseg); 377 378 static void pnfs_free_lseg_async_work(struct work_struct *work) 379 { 380 struct pnfs_layout_segment *lseg; 381 struct pnfs_layout_hdr *lo; 382 383 lseg = container_of(work, struct pnfs_layout_segment, pls_work); 384 lo = lseg->pls_layout; 385 386 pnfs_free_lseg(lseg); 387 pnfs_put_layout_hdr(lo); 388 } 389 390 static void pnfs_free_lseg_async(struct pnfs_layout_segment *lseg) 391 { 392 INIT_WORK(&lseg->pls_work, pnfs_free_lseg_async_work); 393 schedule_work(&lseg->pls_work); 394 } 395 396 void 397 pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg) 398 { 399 if (!lseg) 400 return; 401 402 assert_spin_locked(&lseg->pls_layout->plh_inode->i_lock); 403 404 dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg, 405 atomic_read(&lseg->pls_refcount), 406 test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); 407 if (atomic_dec_and_test(&lseg->pls_refcount)) { 408 struct pnfs_layout_hdr *lo = lseg->pls_layout; 409 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) 410 return; 411 pnfs_get_layout_hdr(lo); 412 pnfs_layout_remove_lseg(lo, lseg); 413 pnfs_free_lseg_async(lseg); 414 } 415 } 416 EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked); 417 418 static u64 419 end_offset(u64 start, u64 len) 420 { 421 u64 end; 422 423 end = start + len; 424 return end >= start ? end : NFS4_MAX_UINT64; 425 } 426 427 /* 428 * is l2 fully contained in l1? 429 * start1 end1 430 * [----------------------------------) 431 * start2 end2 432 * [----------------) 433 */ 434 static bool 435 pnfs_lseg_range_contained(const struct pnfs_layout_range *l1, 436 const struct pnfs_layout_range *l2) 437 { 438 u64 start1 = l1->offset; 439 u64 end1 = end_offset(start1, l1->length); 440 u64 start2 = l2->offset; 441 u64 end2 = end_offset(start2, l2->length); 442 443 return (start1 <= start2) && (end1 >= end2); 444 } 445 446 /* 447 * is l1 and l2 intersecting? 448 * start1 end1 449 * [----------------------------------) 450 * start2 end2 451 * [----------------) 452 */ 453 static bool 454 pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1, 455 const struct pnfs_layout_range *l2) 456 { 457 u64 start1 = l1->offset; 458 u64 end1 = end_offset(start1, l1->length); 459 u64 start2 = l2->offset; 460 u64 end2 = end_offset(start2, l2->length); 461 462 return (end1 == NFS4_MAX_UINT64 || end1 > start2) && 463 (end2 == NFS4_MAX_UINT64 || end2 > start1); 464 } 465 466 static bool 467 should_free_lseg(const struct pnfs_layout_range *lseg_range, 468 const struct pnfs_layout_range *recall_range) 469 { 470 return (recall_range->iomode == IOMODE_ANY || 471 lseg_range->iomode == recall_range->iomode) && 472 pnfs_lseg_range_intersecting(lseg_range, recall_range); 473 } 474 475 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg, 476 struct list_head *tmp_list) 477 { 478 if (!atomic_dec_and_test(&lseg->pls_refcount)) 479 return false; 480 pnfs_layout_remove_lseg(lseg->pls_layout, lseg); 481 list_add(&lseg->pls_list, tmp_list); 482 return true; 483 } 484 485 /* Returns 1 if lseg is removed from list, 0 otherwise */ 486 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, 487 struct list_head *tmp_list) 488 { 489 int rv = 0; 490 491 if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) { 492 /* Remove the reference keeping the lseg in the 493 * list. It will now be removed when all 494 * outstanding io is finished. 495 */ 496 dprintk("%s: lseg %p ref %d\n", __func__, lseg, 497 atomic_read(&lseg->pls_refcount)); 498 if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list)) 499 rv = 1; 500 } 501 return rv; 502 } 503 504 /* Returns count of number of matching invalid lsegs remaining in list 505 * after call. 506 */ 507 int 508 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, 509 struct list_head *tmp_list, 510 const struct pnfs_layout_range *recall_range) 511 { 512 struct pnfs_layout_segment *lseg, *next; 513 int remaining = 0; 514 515 dprintk("%s:Begin lo %p\n", __func__, lo); 516 517 if (list_empty(&lo->plh_segs)) 518 return 0; 519 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) 520 if (!recall_range || 521 should_free_lseg(&lseg->pls_range, recall_range)) { 522 dprintk("%s: freeing lseg %p iomode %d " 523 "offset %llu length %llu\n", __func__, 524 lseg, lseg->pls_range.iomode, lseg->pls_range.offset, 525 lseg->pls_range.length); 526 if (!mark_lseg_invalid(lseg, tmp_list)) 527 remaining++; 528 } 529 dprintk("%s:Return %i\n", __func__, remaining); 530 return remaining; 531 } 532 533 /* note free_me must contain lsegs from a single layout_hdr */ 534 void 535 pnfs_free_lseg_list(struct list_head *free_me) 536 { 537 struct pnfs_layout_segment *lseg, *tmp; 538 539 if (list_empty(free_me)) 540 return; 541 542 list_for_each_entry_safe(lseg, tmp, free_me, pls_list) { 543 list_del(&lseg->pls_list); 544 pnfs_free_lseg(lseg); 545 } 546 } 547 548 void 549 pnfs_destroy_layout(struct nfs_inode *nfsi) 550 { 551 struct pnfs_layout_hdr *lo; 552 LIST_HEAD(tmp_list); 553 554 spin_lock(&nfsi->vfs_inode.i_lock); 555 lo = nfsi->layout; 556 if (lo) { 557 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */ 558 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL); 559 pnfs_get_layout_hdr(lo); 560 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED); 561 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED); 562 spin_unlock(&nfsi->vfs_inode.i_lock); 563 pnfs_free_lseg_list(&tmp_list); 564 pnfs_put_layout_hdr(lo); 565 } else 566 spin_unlock(&nfsi->vfs_inode.i_lock); 567 } 568 EXPORT_SYMBOL_GPL(pnfs_destroy_layout); 569 570 static bool 571 pnfs_layout_add_bulk_destroy_list(struct inode *inode, 572 struct list_head *layout_list) 573 { 574 struct pnfs_layout_hdr *lo; 575 bool ret = false; 576 577 spin_lock(&inode->i_lock); 578 lo = NFS_I(inode)->layout; 579 if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) { 580 pnfs_get_layout_hdr(lo); 581 list_add(&lo->plh_bulk_destroy, layout_list); 582 ret = true; 583 } 584 spin_unlock(&inode->i_lock); 585 return ret; 586 } 587 588 /* Caller must hold rcu_read_lock and clp->cl_lock */ 589 static int 590 pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp, 591 struct nfs_server *server, 592 struct list_head *layout_list) 593 { 594 struct pnfs_layout_hdr *lo, *next; 595 struct inode *inode; 596 597 list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) { 598 inode = igrab(lo->plh_inode); 599 if (inode == NULL) 600 continue; 601 list_del_init(&lo->plh_layouts); 602 if (pnfs_layout_add_bulk_destroy_list(inode, layout_list)) 603 continue; 604 rcu_read_unlock(); 605 spin_unlock(&clp->cl_lock); 606 iput(inode); 607 spin_lock(&clp->cl_lock); 608 rcu_read_lock(); 609 return -EAGAIN; 610 } 611 return 0; 612 } 613 614 static int 615 pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list, 616 bool is_bulk_recall) 617 { 618 struct pnfs_layout_hdr *lo; 619 struct inode *inode; 620 struct pnfs_layout_range range = { 621 .iomode = IOMODE_ANY, 622 .offset = 0, 623 .length = NFS4_MAX_UINT64, 624 }; 625 LIST_HEAD(lseg_list); 626 int ret = 0; 627 628 while (!list_empty(layout_list)) { 629 lo = list_entry(layout_list->next, struct pnfs_layout_hdr, 630 plh_bulk_destroy); 631 dprintk("%s freeing layout for inode %lu\n", __func__, 632 lo->plh_inode->i_ino); 633 inode = lo->plh_inode; 634 635 pnfs_layoutcommit_inode(inode, false); 636 637 spin_lock(&inode->i_lock); 638 list_del_init(&lo->plh_bulk_destroy); 639 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */ 640 if (is_bulk_recall) 641 set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags); 642 if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range)) 643 ret = -EAGAIN; 644 spin_unlock(&inode->i_lock); 645 pnfs_free_lseg_list(&lseg_list); 646 /* Free all lsegs that are attached to commit buckets */ 647 nfs_commit_inode(inode, 0); 648 pnfs_put_layout_hdr(lo); 649 iput(inode); 650 } 651 return ret; 652 } 653 654 int 655 pnfs_destroy_layouts_byfsid(struct nfs_client *clp, 656 struct nfs_fsid *fsid, 657 bool is_recall) 658 { 659 struct nfs_server *server; 660 LIST_HEAD(layout_list); 661 662 spin_lock(&clp->cl_lock); 663 rcu_read_lock(); 664 restart: 665 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 666 if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0) 667 continue; 668 if (pnfs_layout_bulk_destroy_byserver_locked(clp, 669 server, 670 &layout_list) != 0) 671 goto restart; 672 } 673 rcu_read_unlock(); 674 spin_unlock(&clp->cl_lock); 675 676 if (list_empty(&layout_list)) 677 return 0; 678 return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall); 679 } 680 681 int 682 pnfs_destroy_layouts_byclid(struct nfs_client *clp, 683 bool is_recall) 684 { 685 struct nfs_server *server; 686 LIST_HEAD(layout_list); 687 688 spin_lock(&clp->cl_lock); 689 rcu_read_lock(); 690 restart: 691 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 692 if (pnfs_layout_bulk_destroy_byserver_locked(clp, 693 server, 694 &layout_list) != 0) 695 goto restart; 696 } 697 rcu_read_unlock(); 698 spin_unlock(&clp->cl_lock); 699 700 if (list_empty(&layout_list)) 701 return 0; 702 return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall); 703 } 704 705 /* 706 * Called by the state manger to remove all layouts established under an 707 * expired lease. 708 */ 709 void 710 pnfs_destroy_all_layouts(struct nfs_client *clp) 711 { 712 nfs4_deviceid_mark_client_invalid(clp); 713 nfs4_deviceid_purge_client(clp); 714 715 pnfs_destroy_layouts_byclid(clp, false); 716 } 717 718 /* 719 * Compare 2 layout stateid sequence ids, to see which is newer, 720 * taking into account wraparound issues. 721 */ 722 static bool pnfs_seqid_is_newer(u32 s1, u32 s2) 723 { 724 return (s32)(s1 - s2) > 0; 725 } 726 727 /* update lo->plh_stateid with new if is more recent */ 728 void 729 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new, 730 bool update_barrier) 731 { 732 u32 oldseq, newseq, new_barrier; 733 int empty = list_empty(&lo->plh_segs); 734 735 oldseq = be32_to_cpu(lo->plh_stateid.seqid); 736 newseq = be32_to_cpu(new->seqid); 737 if (empty || pnfs_seqid_is_newer(newseq, oldseq)) { 738 nfs4_stateid_copy(&lo->plh_stateid, new); 739 if (update_barrier) { 740 new_barrier = be32_to_cpu(new->seqid); 741 } else { 742 /* Because of wraparound, we want to keep the barrier 743 * "close" to the current seqids. 744 */ 745 new_barrier = newseq - atomic_read(&lo->plh_outstanding); 746 } 747 if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier)) 748 lo->plh_barrier = new_barrier; 749 } 750 } 751 752 static bool 753 pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo, 754 const nfs4_stateid *stateid) 755 { 756 u32 seqid = be32_to_cpu(stateid->seqid); 757 758 return !pnfs_seqid_is_newer(seqid, lo->plh_barrier); 759 } 760 761 /* lget is set to 1 if called from inside send_layoutget call chain */ 762 static bool 763 pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo) 764 { 765 return lo->plh_block_lgets || 766 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags); 767 } 768 769 int 770 pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, 771 const struct pnfs_layout_range *range, 772 struct nfs4_state *open_state) 773 { 774 int status = 0; 775 776 dprintk("--> %s\n", __func__); 777 spin_lock(&lo->plh_inode->i_lock); 778 if (pnfs_layoutgets_blocked(lo)) { 779 status = -EAGAIN; 780 } else if (!nfs4_valid_open_stateid(open_state)) { 781 status = -EBADF; 782 } else if (list_empty(&lo->plh_segs) || 783 test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) { 784 int seq; 785 786 do { 787 seq = read_seqbegin(&open_state->seqlock); 788 nfs4_stateid_copy(dst, &open_state->stateid); 789 } while (read_seqretry(&open_state->seqlock, seq)); 790 } else 791 nfs4_stateid_copy(dst, &lo->plh_stateid); 792 spin_unlock(&lo->plh_inode->i_lock); 793 dprintk("<-- %s\n", __func__); 794 return status; 795 } 796 797 /* 798 * Get layout from server. 799 * for now, assume that whole file layouts are requested. 800 * arg->offset: 0 801 * arg->length: all ones 802 */ 803 static struct pnfs_layout_segment * 804 send_layoutget(struct pnfs_layout_hdr *lo, 805 struct nfs_open_context *ctx, 806 const struct pnfs_layout_range *range, 807 gfp_t gfp_flags) 808 { 809 struct inode *ino = lo->plh_inode; 810 struct nfs_server *server = NFS_SERVER(ino); 811 struct nfs4_layoutget *lgp; 812 struct pnfs_layout_segment *lseg; 813 loff_t i_size; 814 815 dprintk("--> %s\n", __func__); 816 817 /* 818 * Synchronously retrieve layout information from server and 819 * store in lseg. If we race with a concurrent seqid morphing 820 * op, then re-send the LAYOUTGET. 821 */ 822 do { 823 lgp = kzalloc(sizeof(*lgp), gfp_flags); 824 if (lgp == NULL) 825 return NULL; 826 827 i_size = i_size_read(ino); 828 829 lgp->args.minlength = PAGE_CACHE_SIZE; 830 if (lgp->args.minlength > range->length) 831 lgp->args.minlength = range->length; 832 if (range->iomode == IOMODE_READ) { 833 if (range->offset >= i_size) 834 lgp->args.minlength = 0; 835 else if (i_size - range->offset < lgp->args.minlength) 836 lgp->args.minlength = i_size - range->offset; 837 } 838 lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE; 839 pnfs_copy_range(&lgp->args.range, range); 840 lgp->args.type = server->pnfs_curr_ld->id; 841 lgp->args.inode = ino; 842 lgp->args.ctx = get_nfs_open_context(ctx); 843 lgp->gfp_flags = gfp_flags; 844 lgp->cred = lo->plh_lc_cred; 845 846 lseg = nfs4_proc_layoutget(lgp, gfp_flags); 847 } while (lseg == ERR_PTR(-EAGAIN)); 848 849 if (IS_ERR(lseg) && !nfs_error_is_fatal(PTR_ERR(lseg))) 850 lseg = NULL; 851 else 852 pnfs_layout_clear_fail_bit(lo, 853 pnfs_iomode_to_fail_bit(range->iomode)); 854 855 return lseg; 856 } 857 858 static void pnfs_clear_layoutcommit(struct inode *inode, 859 struct list_head *head) 860 { 861 struct nfs_inode *nfsi = NFS_I(inode); 862 struct pnfs_layout_segment *lseg, *tmp; 863 864 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) 865 return; 866 list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) { 867 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) 868 continue; 869 pnfs_lseg_dec_and_remove_zero(lseg, head); 870 } 871 } 872 873 void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo) 874 { 875 clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags); 876 smp_mb__after_atomic(); 877 wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN); 878 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq); 879 } 880 881 static bool 882 pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo) 883 { 884 if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) 885 return false; 886 lo->plh_return_iomode = 0; 887 pnfs_get_layout_hdr(lo); 888 clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags); 889 return true; 890 } 891 892 static int 893 pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid, 894 enum pnfs_iomode iomode, bool sync) 895 { 896 struct inode *ino = lo->plh_inode; 897 struct nfs4_layoutreturn *lrp; 898 int status = 0; 899 900 lrp = kzalloc(sizeof(*lrp), GFP_NOFS); 901 if (unlikely(lrp == NULL)) { 902 status = -ENOMEM; 903 spin_lock(&ino->i_lock); 904 pnfs_clear_layoutreturn_waitbit(lo); 905 spin_unlock(&ino->i_lock); 906 pnfs_put_layout_hdr(lo); 907 goto out; 908 } 909 910 nfs4_stateid_copy(&lrp->args.stateid, stateid); 911 lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id; 912 lrp->args.inode = ino; 913 lrp->args.range.iomode = iomode; 914 lrp->args.range.offset = 0; 915 lrp->args.range.length = NFS4_MAX_UINT64; 916 lrp->args.layout = lo; 917 lrp->clp = NFS_SERVER(ino)->nfs_client; 918 lrp->cred = lo->plh_lc_cred; 919 920 status = nfs4_proc_layoutreturn(lrp, sync); 921 out: 922 dprintk("<-- %s status: %d\n", __func__, status); 923 return status; 924 } 925 926 /* Return true if layoutreturn is needed */ 927 static bool 928 pnfs_layout_need_return(struct pnfs_layout_hdr *lo) 929 { 930 struct pnfs_layout_segment *s; 931 932 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) 933 return false; 934 935 /* Defer layoutreturn until all lsegs are done */ 936 list_for_each_entry(s, &lo->plh_segs, pls_list) { 937 if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags)) 938 return false; 939 } 940 941 return true; 942 } 943 944 static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo) 945 { 946 struct inode *inode= lo->plh_inode; 947 948 if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) 949 return; 950 spin_lock(&inode->i_lock); 951 if (pnfs_layout_need_return(lo)) { 952 nfs4_stateid stateid; 953 enum pnfs_iomode iomode; 954 bool send; 955 956 nfs4_stateid_copy(&stateid, &lo->plh_stateid); 957 iomode = lo->plh_return_iomode; 958 send = pnfs_prepare_layoutreturn(lo); 959 spin_unlock(&inode->i_lock); 960 if (send) { 961 /* Send an async layoutreturn so we dont deadlock */ 962 pnfs_send_layoutreturn(lo, &stateid, iomode, false); 963 } 964 } else 965 spin_unlock(&inode->i_lock); 966 } 967 968 /* 969 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr 970 * when the layout segment list is empty. 971 * 972 * Note that a pnfs_layout_hdr can exist with an empty layout segment 973 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the 974 * deviceid is marked invalid. 975 */ 976 int 977 _pnfs_return_layout(struct inode *ino) 978 { 979 struct pnfs_layout_hdr *lo = NULL; 980 struct nfs_inode *nfsi = NFS_I(ino); 981 LIST_HEAD(tmp_list); 982 nfs4_stateid stateid; 983 int status = 0, empty; 984 bool send; 985 986 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino); 987 988 spin_lock(&ino->i_lock); 989 lo = nfsi->layout; 990 if (!lo) { 991 spin_unlock(&ino->i_lock); 992 dprintk("NFS: %s no layout to return\n", __func__); 993 goto out; 994 } 995 nfs4_stateid_copy(&stateid, &nfsi->layout->plh_stateid); 996 /* Reference matched in nfs4_layoutreturn_release */ 997 pnfs_get_layout_hdr(lo); 998 empty = list_empty(&lo->plh_segs); 999 pnfs_clear_layoutcommit(ino, &tmp_list); 1000 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL); 1001 1002 if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) { 1003 struct pnfs_layout_range range = { 1004 .iomode = IOMODE_ANY, 1005 .offset = 0, 1006 .length = NFS4_MAX_UINT64, 1007 }; 1008 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range); 1009 } 1010 1011 /* Don't send a LAYOUTRETURN if list was initially empty */ 1012 if (empty) { 1013 spin_unlock(&ino->i_lock); 1014 dprintk("NFS: %s no layout segments to return\n", __func__); 1015 goto out_put_layout_hdr; 1016 } 1017 1018 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); 1019 send = pnfs_prepare_layoutreturn(lo); 1020 spin_unlock(&ino->i_lock); 1021 pnfs_free_lseg_list(&tmp_list); 1022 if (send) 1023 status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true); 1024 out_put_layout_hdr: 1025 pnfs_put_layout_hdr(lo); 1026 out: 1027 dprintk("<-- %s status: %d\n", __func__, status); 1028 return status; 1029 } 1030 EXPORT_SYMBOL_GPL(_pnfs_return_layout); 1031 1032 int 1033 pnfs_commit_and_return_layout(struct inode *inode) 1034 { 1035 struct pnfs_layout_hdr *lo; 1036 int ret; 1037 1038 spin_lock(&inode->i_lock); 1039 lo = NFS_I(inode)->layout; 1040 if (lo == NULL) { 1041 spin_unlock(&inode->i_lock); 1042 return 0; 1043 } 1044 pnfs_get_layout_hdr(lo); 1045 /* Block new layoutgets and read/write to ds */ 1046 lo->plh_block_lgets++; 1047 spin_unlock(&inode->i_lock); 1048 filemap_fdatawait(inode->i_mapping); 1049 ret = pnfs_layoutcommit_inode(inode, true); 1050 if (ret == 0) 1051 ret = _pnfs_return_layout(inode); 1052 spin_lock(&inode->i_lock); 1053 lo->plh_block_lgets--; 1054 spin_unlock(&inode->i_lock); 1055 pnfs_put_layout_hdr(lo); 1056 return ret; 1057 } 1058 1059 bool pnfs_roc(struct inode *ino) 1060 { 1061 struct nfs_inode *nfsi = NFS_I(ino); 1062 struct nfs_open_context *ctx; 1063 struct nfs4_state *state; 1064 struct pnfs_layout_hdr *lo; 1065 struct pnfs_layout_segment *lseg, *tmp; 1066 nfs4_stateid stateid; 1067 LIST_HEAD(tmp_list); 1068 bool found = false, layoutreturn = false, roc = false; 1069 1070 spin_lock(&ino->i_lock); 1071 lo = nfsi->layout; 1072 if (!lo || test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) 1073 goto out_noroc; 1074 1075 /* no roc if we hold a delegation */ 1076 if (nfs4_check_delegation(ino, FMODE_READ)) 1077 goto out_noroc; 1078 1079 list_for_each_entry(ctx, &nfsi->open_files, list) { 1080 state = ctx->state; 1081 /* Don't return layout if there is open file state */ 1082 if (state != NULL && state->state != 0) 1083 goto out_noroc; 1084 } 1085 1086 nfs4_stateid_copy(&stateid, &lo->plh_stateid); 1087 /* always send layoutreturn if being marked so */ 1088 if (test_and_clear_bit(NFS_LAYOUT_RETURN_REQUESTED, 1089 &lo->plh_flags)) 1090 layoutreturn = pnfs_prepare_layoutreturn(lo); 1091 1092 list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list) 1093 /* If we are sending layoutreturn, invalidate all valid lsegs */ 1094 if (layoutreturn || test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { 1095 mark_lseg_invalid(lseg, &tmp_list); 1096 found = true; 1097 } 1098 /* ROC in two conditions: 1099 * 1. there are ROC lsegs 1100 * 2. we don't send layoutreturn 1101 */ 1102 if (found && !layoutreturn) { 1103 /* lo ref dropped in pnfs_roc_release() */ 1104 pnfs_get_layout_hdr(lo); 1105 roc = true; 1106 } 1107 1108 out_noroc: 1109 spin_unlock(&ino->i_lock); 1110 pnfs_free_lseg_list(&tmp_list); 1111 pnfs_layoutcommit_inode(ino, true); 1112 if (layoutreturn) 1113 pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true); 1114 return roc; 1115 } 1116 1117 void pnfs_roc_release(struct inode *ino) 1118 { 1119 struct pnfs_layout_hdr *lo; 1120 1121 spin_lock(&ino->i_lock); 1122 lo = NFS_I(ino)->layout; 1123 pnfs_clear_layoutreturn_waitbit(lo); 1124 if (atomic_dec_and_test(&lo->plh_refcount)) { 1125 pnfs_detach_layout_hdr(lo); 1126 spin_unlock(&ino->i_lock); 1127 pnfs_free_layout_hdr(lo); 1128 } else 1129 spin_unlock(&ino->i_lock); 1130 } 1131 1132 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier) 1133 { 1134 struct pnfs_layout_hdr *lo; 1135 1136 spin_lock(&ino->i_lock); 1137 lo = NFS_I(ino)->layout; 1138 pnfs_mark_layout_returned_if_empty(lo); 1139 if (pnfs_seqid_is_newer(barrier, lo->plh_barrier)) 1140 lo->plh_barrier = barrier; 1141 spin_unlock(&ino->i_lock); 1142 trace_nfs4_layoutreturn_on_close(ino, 0); 1143 } 1144 1145 void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier) 1146 { 1147 struct nfs_inode *nfsi = NFS_I(ino); 1148 struct pnfs_layout_hdr *lo; 1149 u32 current_seqid; 1150 1151 spin_lock(&ino->i_lock); 1152 lo = nfsi->layout; 1153 current_seqid = be32_to_cpu(lo->plh_stateid.seqid); 1154 1155 /* Since close does not return a layout stateid for use as 1156 * a barrier, we choose the worst-case barrier. 1157 */ 1158 *barrier = current_seqid + atomic_read(&lo->plh_outstanding); 1159 spin_unlock(&ino->i_lock); 1160 } 1161 1162 bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task) 1163 { 1164 struct nfs_inode *nfsi = NFS_I(ino); 1165 struct pnfs_layout_hdr *lo; 1166 bool sleep = false; 1167 1168 /* we might not have grabbed lo reference. so need to check under 1169 * i_lock */ 1170 spin_lock(&ino->i_lock); 1171 lo = nfsi->layout; 1172 if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) 1173 sleep = true; 1174 spin_unlock(&ino->i_lock); 1175 1176 if (sleep) 1177 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); 1178 1179 return sleep; 1180 } 1181 1182 /* 1183 * Compare two layout segments for sorting into layout cache. 1184 * We want to preferentially return RW over RO layouts, so ensure those 1185 * are seen first. 1186 */ 1187 static s64 1188 pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1, 1189 const struct pnfs_layout_range *l2) 1190 { 1191 s64 d; 1192 1193 /* high offset > low offset */ 1194 d = l1->offset - l2->offset; 1195 if (d) 1196 return d; 1197 1198 /* short length > long length */ 1199 d = l2->length - l1->length; 1200 if (d) 1201 return d; 1202 1203 /* read > read/write */ 1204 return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ); 1205 } 1206 1207 static bool 1208 pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1, 1209 const struct pnfs_layout_range *l2) 1210 { 1211 return pnfs_lseg_range_cmp(l1, l2) > 0; 1212 } 1213 1214 static bool 1215 pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg, 1216 struct pnfs_layout_segment *old) 1217 { 1218 return false; 1219 } 1220 1221 void 1222 pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo, 1223 struct pnfs_layout_segment *lseg, 1224 bool (*is_after)(const struct pnfs_layout_range *, 1225 const struct pnfs_layout_range *), 1226 bool (*do_merge)(struct pnfs_layout_segment *, 1227 struct pnfs_layout_segment *), 1228 struct list_head *free_me) 1229 { 1230 struct pnfs_layout_segment *lp, *tmp; 1231 1232 dprintk("%s:Begin\n", __func__); 1233 1234 list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) { 1235 if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0) 1236 continue; 1237 if (do_merge(lseg, lp)) { 1238 mark_lseg_invalid(lp, free_me); 1239 continue; 1240 } 1241 if (is_after(&lseg->pls_range, &lp->pls_range)) 1242 continue; 1243 list_add_tail(&lseg->pls_list, &lp->pls_list); 1244 dprintk("%s: inserted lseg %p " 1245 "iomode %d offset %llu length %llu before " 1246 "lp %p iomode %d offset %llu length %llu\n", 1247 __func__, lseg, lseg->pls_range.iomode, 1248 lseg->pls_range.offset, lseg->pls_range.length, 1249 lp, lp->pls_range.iomode, lp->pls_range.offset, 1250 lp->pls_range.length); 1251 goto out; 1252 } 1253 list_add_tail(&lseg->pls_list, &lo->plh_segs); 1254 dprintk("%s: inserted lseg %p " 1255 "iomode %d offset %llu length %llu at tail\n", 1256 __func__, lseg, lseg->pls_range.iomode, 1257 lseg->pls_range.offset, lseg->pls_range.length); 1258 out: 1259 pnfs_get_layout_hdr(lo); 1260 1261 dprintk("%s:Return\n", __func__); 1262 } 1263 EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg); 1264 1265 static void 1266 pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo, 1267 struct pnfs_layout_segment *lseg, 1268 struct list_head *free_me) 1269 { 1270 struct inode *inode = lo->plh_inode; 1271 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; 1272 1273 if (ld->add_lseg != NULL) 1274 ld->add_lseg(lo, lseg, free_me); 1275 else 1276 pnfs_generic_layout_insert_lseg(lo, lseg, 1277 pnfs_lseg_range_is_after, 1278 pnfs_lseg_no_merge, 1279 free_me); 1280 } 1281 1282 static struct pnfs_layout_hdr * 1283 alloc_init_layout_hdr(struct inode *ino, 1284 struct nfs_open_context *ctx, 1285 gfp_t gfp_flags) 1286 { 1287 struct pnfs_layout_hdr *lo; 1288 1289 lo = pnfs_alloc_layout_hdr(ino, gfp_flags); 1290 if (!lo) 1291 return NULL; 1292 atomic_set(&lo->plh_refcount, 1); 1293 INIT_LIST_HEAD(&lo->plh_layouts); 1294 INIT_LIST_HEAD(&lo->plh_segs); 1295 INIT_LIST_HEAD(&lo->plh_bulk_destroy); 1296 lo->plh_inode = ino; 1297 lo->plh_lc_cred = get_rpccred(ctx->cred); 1298 return lo; 1299 } 1300 1301 static struct pnfs_layout_hdr * 1302 pnfs_find_alloc_layout(struct inode *ino, 1303 struct nfs_open_context *ctx, 1304 gfp_t gfp_flags) 1305 { 1306 struct nfs_inode *nfsi = NFS_I(ino); 1307 struct pnfs_layout_hdr *new = NULL; 1308 1309 dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout); 1310 1311 if (nfsi->layout != NULL) 1312 goto out_existing; 1313 spin_unlock(&ino->i_lock); 1314 new = alloc_init_layout_hdr(ino, ctx, gfp_flags); 1315 spin_lock(&ino->i_lock); 1316 1317 if (likely(nfsi->layout == NULL)) { /* Won the race? */ 1318 nfsi->layout = new; 1319 return new; 1320 } else if (new != NULL) 1321 pnfs_free_layout_hdr(new); 1322 out_existing: 1323 pnfs_get_layout_hdr(nfsi->layout); 1324 return nfsi->layout; 1325 } 1326 1327 /* 1328 * iomode matching rules: 1329 * iomode lseg match 1330 * ----- ----- ----- 1331 * ANY READ true 1332 * ANY RW true 1333 * RW READ false 1334 * RW RW true 1335 * READ READ true 1336 * READ RW true 1337 */ 1338 static bool 1339 pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range, 1340 const struct pnfs_layout_range *range) 1341 { 1342 struct pnfs_layout_range range1; 1343 1344 if ((range->iomode == IOMODE_RW && 1345 ls_range->iomode != IOMODE_RW) || 1346 !pnfs_lseg_range_intersecting(ls_range, range)) 1347 return 0; 1348 1349 /* range1 covers only the first byte in the range */ 1350 range1 = *range; 1351 range1.length = 1; 1352 return pnfs_lseg_range_contained(ls_range, &range1); 1353 } 1354 1355 /* 1356 * lookup range in layout 1357 */ 1358 static struct pnfs_layout_segment * 1359 pnfs_find_lseg(struct pnfs_layout_hdr *lo, 1360 struct pnfs_layout_range *range) 1361 { 1362 struct pnfs_layout_segment *lseg, *ret = NULL; 1363 1364 dprintk("%s:Begin\n", __func__); 1365 1366 list_for_each_entry(lseg, &lo->plh_segs, pls_list) { 1367 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) && 1368 !test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) && 1369 pnfs_lseg_range_match(&lseg->pls_range, range)) { 1370 ret = pnfs_get_lseg(lseg); 1371 break; 1372 } 1373 } 1374 1375 dprintk("%s:Return lseg %p ref %d\n", 1376 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0); 1377 return ret; 1378 } 1379 1380 /* 1381 * Use mdsthreshold hints set at each OPEN to determine if I/O should go 1382 * to the MDS or over pNFS 1383 * 1384 * The nfs_inode read_io and write_io fields are cumulative counters reset 1385 * when there are no layout segments. Note that in pnfs_update_layout iomode 1386 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a 1387 * WRITE request. 1388 * 1389 * A return of true means use MDS I/O. 1390 * 1391 * From rfc 5661: 1392 * If a file's size is smaller than the file size threshold, data accesses 1393 * SHOULD be sent to the metadata server. If an I/O request has a length that 1394 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata 1395 * server. If both file size and I/O size are provided, the client SHOULD 1396 * reach or exceed both thresholds before sending its read or write 1397 * requests to the data server. 1398 */ 1399 static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx, 1400 struct inode *ino, int iomode) 1401 { 1402 struct nfs4_threshold *t = ctx->mdsthreshold; 1403 struct nfs_inode *nfsi = NFS_I(ino); 1404 loff_t fsize = i_size_read(ino); 1405 bool size = false, size_set = false, io = false, io_set = false, ret = false; 1406 1407 if (t == NULL) 1408 return ret; 1409 1410 dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n", 1411 __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz); 1412 1413 switch (iomode) { 1414 case IOMODE_READ: 1415 if (t->bm & THRESHOLD_RD) { 1416 dprintk("%s fsize %llu\n", __func__, fsize); 1417 size_set = true; 1418 if (fsize < t->rd_sz) 1419 size = true; 1420 } 1421 if (t->bm & THRESHOLD_RD_IO) { 1422 dprintk("%s nfsi->read_io %llu\n", __func__, 1423 nfsi->read_io); 1424 io_set = true; 1425 if (nfsi->read_io < t->rd_io_sz) 1426 io = true; 1427 } 1428 break; 1429 case IOMODE_RW: 1430 if (t->bm & THRESHOLD_WR) { 1431 dprintk("%s fsize %llu\n", __func__, fsize); 1432 size_set = true; 1433 if (fsize < t->wr_sz) 1434 size = true; 1435 } 1436 if (t->bm & THRESHOLD_WR_IO) { 1437 dprintk("%s nfsi->write_io %llu\n", __func__, 1438 nfsi->write_io); 1439 io_set = true; 1440 if (nfsi->write_io < t->wr_io_sz) 1441 io = true; 1442 } 1443 break; 1444 } 1445 if (size_set && io_set) { 1446 if (size && io) 1447 ret = true; 1448 } else if (size || io) 1449 ret = true; 1450 1451 dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret); 1452 return ret; 1453 } 1454 1455 static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo) 1456 { 1457 /* 1458 * send layoutcommit as it can hold up layoutreturn due to lseg 1459 * reference 1460 */ 1461 pnfs_layoutcommit_inode(lo->plh_inode, false); 1462 return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN, 1463 nfs_wait_bit_killable, 1464 TASK_UNINTERRUPTIBLE); 1465 } 1466 1467 static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo) 1468 { 1469 unsigned long *bitlock = &lo->plh_flags; 1470 1471 clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock); 1472 smp_mb__after_atomic(); 1473 wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET); 1474 } 1475 1476 /* 1477 * Layout segment is retreived from the server if not cached. 1478 * The appropriate layout segment is referenced and returned to the caller. 1479 */ 1480 struct pnfs_layout_segment * 1481 pnfs_update_layout(struct inode *ino, 1482 struct nfs_open_context *ctx, 1483 loff_t pos, 1484 u64 count, 1485 enum pnfs_iomode iomode, 1486 gfp_t gfp_flags) 1487 { 1488 struct pnfs_layout_range arg = { 1489 .iomode = iomode, 1490 .offset = pos, 1491 .length = count, 1492 }; 1493 unsigned pg_offset; 1494 struct nfs_server *server = NFS_SERVER(ino); 1495 struct nfs_client *clp = server->nfs_client; 1496 struct pnfs_layout_hdr *lo; 1497 struct pnfs_layout_segment *lseg = NULL; 1498 bool first; 1499 1500 if (!pnfs_enabled_sb(NFS_SERVER(ino))) { 1501 trace_pnfs_update_layout(ino, pos, count, iomode, NULL, 1502 PNFS_UPDATE_LAYOUT_NO_PNFS); 1503 goto out; 1504 } 1505 1506 if (iomode == IOMODE_READ && i_size_read(ino) == 0) { 1507 trace_pnfs_update_layout(ino, pos, count, iomode, NULL, 1508 PNFS_UPDATE_LAYOUT_RD_ZEROLEN); 1509 goto out; 1510 } 1511 1512 if (pnfs_within_mdsthreshold(ctx, ino, iomode)) { 1513 trace_pnfs_update_layout(ino, pos, count, iomode, NULL, 1514 PNFS_UPDATE_LAYOUT_MDSTHRESH); 1515 goto out; 1516 } 1517 1518 lookup_again: 1519 first = false; 1520 spin_lock(&ino->i_lock); 1521 lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags); 1522 if (lo == NULL) { 1523 spin_unlock(&ino->i_lock); 1524 trace_pnfs_update_layout(ino, pos, count, iomode, NULL, 1525 PNFS_UPDATE_LAYOUT_NOMEM); 1526 goto out; 1527 } 1528 1529 /* Do we even need to bother with this? */ 1530 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { 1531 trace_pnfs_update_layout(ino, pos, count, iomode, lo, 1532 PNFS_UPDATE_LAYOUT_BULK_RECALL); 1533 dprintk("%s matches recall, use MDS\n", __func__); 1534 goto out_unlock; 1535 } 1536 1537 /* if LAYOUTGET already failed once we don't try again */ 1538 if (pnfs_layout_io_test_failed(lo, iomode)) { 1539 trace_pnfs_update_layout(ino, pos, count, iomode, lo, 1540 PNFS_UPDATE_LAYOUT_IO_TEST_FAIL); 1541 goto out_unlock; 1542 } 1543 1544 first = list_empty(&lo->plh_segs); 1545 if (first) { 1546 /* The first layoutget for the file. Need to serialize per 1547 * RFC 5661 Errata 3208. 1548 */ 1549 if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, 1550 &lo->plh_flags)) { 1551 spin_unlock(&ino->i_lock); 1552 wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET, 1553 TASK_UNINTERRUPTIBLE); 1554 pnfs_put_layout_hdr(lo); 1555 goto lookup_again; 1556 } 1557 } else { 1558 /* Check to see if the layout for the given range 1559 * already exists 1560 */ 1561 lseg = pnfs_find_lseg(lo, &arg); 1562 if (lseg) { 1563 trace_pnfs_update_layout(ino, pos, count, iomode, lo, 1564 PNFS_UPDATE_LAYOUT_FOUND_CACHED); 1565 goto out_unlock; 1566 } 1567 } 1568 1569 /* 1570 * Because we free lsegs before sending LAYOUTRETURN, we need to wait 1571 * for LAYOUTRETURN even if first is true. 1572 */ 1573 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) { 1574 spin_unlock(&ino->i_lock); 1575 dprintk("%s wait for layoutreturn\n", __func__); 1576 if (pnfs_prepare_to_retry_layoutget(lo)) { 1577 if (first) 1578 pnfs_clear_first_layoutget(lo); 1579 pnfs_put_layout_hdr(lo); 1580 dprintk("%s retrying\n", __func__); 1581 goto lookup_again; 1582 } 1583 trace_pnfs_update_layout(ino, pos, count, iomode, lo, 1584 PNFS_UPDATE_LAYOUT_RETURN); 1585 goto out_put_layout_hdr; 1586 } 1587 1588 if (pnfs_layoutgets_blocked(lo)) { 1589 trace_pnfs_update_layout(ino, pos, count, iomode, lo, 1590 PNFS_UPDATE_LAYOUT_BLOCKED); 1591 goto out_unlock; 1592 } 1593 atomic_inc(&lo->plh_outstanding); 1594 spin_unlock(&ino->i_lock); 1595 1596 if (list_empty(&lo->plh_layouts)) { 1597 /* The lo must be on the clp list if there is any 1598 * chance of a CB_LAYOUTRECALL(FILE) coming in. 1599 */ 1600 spin_lock(&clp->cl_lock); 1601 if (list_empty(&lo->plh_layouts)) 1602 list_add_tail(&lo->plh_layouts, &server->layouts); 1603 spin_unlock(&clp->cl_lock); 1604 } 1605 1606 pg_offset = arg.offset & ~PAGE_CACHE_MASK; 1607 if (pg_offset) { 1608 arg.offset -= pg_offset; 1609 arg.length += pg_offset; 1610 } 1611 if (arg.length != NFS4_MAX_UINT64) 1612 arg.length = PAGE_CACHE_ALIGN(arg.length); 1613 1614 lseg = send_layoutget(lo, ctx, &arg, gfp_flags); 1615 atomic_dec(&lo->plh_outstanding); 1616 trace_pnfs_update_layout(ino, pos, count, iomode, lo, 1617 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET); 1618 out_put_layout_hdr: 1619 if (first) 1620 pnfs_clear_first_layoutget(lo); 1621 pnfs_put_layout_hdr(lo); 1622 out: 1623 dprintk("%s: inode %s/%llu pNFS layout segment %s for " 1624 "(%s, offset: %llu, length: %llu)\n", 1625 __func__, ino->i_sb->s_id, 1626 (unsigned long long)NFS_FILEID(ino), 1627 IS_ERR_OR_NULL(lseg) ? "not found" : "found", 1628 iomode==IOMODE_RW ? "read/write" : "read-only", 1629 (unsigned long long)pos, 1630 (unsigned long long)count); 1631 return lseg; 1632 out_unlock: 1633 spin_unlock(&ino->i_lock); 1634 goto out_put_layout_hdr; 1635 } 1636 EXPORT_SYMBOL_GPL(pnfs_update_layout); 1637 1638 static bool 1639 pnfs_sanity_check_layout_range(struct pnfs_layout_range *range) 1640 { 1641 switch (range->iomode) { 1642 case IOMODE_READ: 1643 case IOMODE_RW: 1644 break; 1645 default: 1646 return false; 1647 } 1648 if (range->offset == NFS4_MAX_UINT64) 1649 return false; 1650 if (range->length == 0) 1651 return false; 1652 if (range->length != NFS4_MAX_UINT64 && 1653 range->length > NFS4_MAX_UINT64 - range->offset) 1654 return false; 1655 return true; 1656 } 1657 1658 struct pnfs_layout_segment * 1659 pnfs_layout_process(struct nfs4_layoutget *lgp) 1660 { 1661 struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout; 1662 struct nfs4_layoutget_res *res = &lgp->res; 1663 struct pnfs_layout_segment *lseg; 1664 struct inode *ino = lo->plh_inode; 1665 LIST_HEAD(free_me); 1666 int status = -EINVAL; 1667 1668 if (!pnfs_sanity_check_layout_range(&res->range)) 1669 goto out; 1670 1671 /* Inject layout blob into I/O device driver */ 1672 lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags); 1673 if (!lseg || IS_ERR(lseg)) { 1674 if (!lseg) 1675 status = -ENOMEM; 1676 else 1677 status = PTR_ERR(lseg); 1678 dprintk("%s: Could not allocate layout: error %d\n", 1679 __func__, status); 1680 goto out; 1681 } 1682 1683 init_lseg(lo, lseg); 1684 lseg->pls_range = res->range; 1685 1686 spin_lock(&ino->i_lock); 1687 if (pnfs_layoutgets_blocked(lo)) { 1688 dprintk("%s forget reply due to state\n", __func__); 1689 goto out_forget_reply; 1690 } 1691 1692 if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) { 1693 /* existing state ID, make sure the sequence number matches. */ 1694 if (pnfs_layout_stateid_blocked(lo, &res->stateid)) { 1695 dprintk("%s forget reply due to sequence\n", __func__); 1696 status = -EAGAIN; 1697 goto out_forget_reply; 1698 } 1699 pnfs_set_layout_stateid(lo, &res->stateid, false); 1700 } else { 1701 /* 1702 * We got an entirely new state ID. Mark all segments for the 1703 * inode invalid, and don't bother validating the stateid 1704 * sequence number. 1705 */ 1706 pnfs_mark_matching_lsegs_invalid(lo, &free_me, NULL); 1707 1708 nfs4_stateid_copy(&lo->plh_stateid, &res->stateid); 1709 lo->plh_barrier = be32_to_cpu(res->stateid.seqid); 1710 } 1711 1712 clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); 1713 1714 pnfs_get_lseg(lseg); 1715 pnfs_layout_insert_lseg(lo, lseg, &free_me); 1716 1717 if (res->return_on_close) 1718 set_bit(NFS_LSEG_ROC, &lseg->pls_flags); 1719 1720 spin_unlock(&ino->i_lock); 1721 pnfs_free_lseg_list(&free_me); 1722 return lseg; 1723 out: 1724 return ERR_PTR(status); 1725 1726 out_forget_reply: 1727 spin_unlock(&ino->i_lock); 1728 lseg->pls_layout = lo; 1729 NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); 1730 goto out; 1731 } 1732 1733 static void 1734 pnfs_set_plh_return_iomode(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode) 1735 { 1736 if (lo->plh_return_iomode == iomode) 1737 return; 1738 if (lo->plh_return_iomode != 0) 1739 iomode = IOMODE_ANY; 1740 lo->plh_return_iomode = iomode; 1741 } 1742 1743 int 1744 pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, 1745 struct list_head *tmp_list, 1746 const struct pnfs_layout_range *return_range) 1747 { 1748 struct pnfs_layout_segment *lseg, *next; 1749 int remaining = 0; 1750 1751 dprintk("%s:Begin lo %p\n", __func__, lo); 1752 1753 if (list_empty(&lo->plh_segs)) 1754 return 0; 1755 1756 assert_spin_locked(&lo->plh_inode->i_lock); 1757 1758 list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) 1759 if (should_free_lseg(&lseg->pls_range, return_range)) { 1760 dprintk("%s: marking lseg %p iomode %d " 1761 "offset %llu length %llu\n", __func__, 1762 lseg, lseg->pls_range.iomode, 1763 lseg->pls_range.offset, 1764 lseg->pls_range.length); 1765 set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags); 1766 pnfs_set_plh_return_iomode(lo, return_range->iomode); 1767 if (!mark_lseg_invalid(lseg, tmp_list)) 1768 remaining++; 1769 set_bit(NFS_LAYOUT_RETURN_REQUESTED, 1770 &lo->plh_flags); 1771 } 1772 return remaining; 1773 } 1774 1775 void pnfs_error_mark_layout_for_return(struct inode *inode, 1776 struct pnfs_layout_segment *lseg) 1777 { 1778 struct pnfs_layout_hdr *lo = NFS_I(inode)->layout; 1779 struct pnfs_layout_range range = { 1780 .iomode = lseg->pls_range.iomode, 1781 .offset = 0, 1782 .length = NFS4_MAX_UINT64, 1783 }; 1784 LIST_HEAD(free_me); 1785 bool return_now = false; 1786 1787 spin_lock(&inode->i_lock); 1788 pnfs_set_plh_return_iomode(lo, range.iomode); 1789 /* 1790 * mark all matching lsegs so that we are sure to have no live 1791 * segments at hand when sending layoutreturn. See pnfs_put_lseg() 1792 * for how it works. 1793 */ 1794 if (!pnfs_mark_matching_lsegs_return(lo, &free_me, &range)) { 1795 nfs4_stateid stateid; 1796 enum pnfs_iomode iomode = lo->plh_return_iomode; 1797 1798 nfs4_stateid_copy(&stateid, &lo->plh_stateid); 1799 return_now = pnfs_prepare_layoutreturn(lo); 1800 spin_unlock(&inode->i_lock); 1801 if (return_now) 1802 pnfs_send_layoutreturn(lo, &stateid, iomode, false); 1803 } else { 1804 spin_unlock(&inode->i_lock); 1805 nfs_commit_inode(inode, 0); 1806 } 1807 pnfs_free_lseg_list(&free_me); 1808 } 1809 EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return); 1810 1811 void 1812 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) 1813 { 1814 u64 rd_size = req->wb_bytes; 1815 1816 if (pgio->pg_lseg == NULL) { 1817 if (pgio->pg_dreq == NULL) 1818 rd_size = i_size_read(pgio->pg_inode) - req_offset(req); 1819 else 1820 rd_size = nfs_dreq_bytes_left(pgio->pg_dreq); 1821 1822 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 1823 req->wb_context, 1824 req_offset(req), 1825 rd_size, 1826 IOMODE_READ, 1827 GFP_KERNEL); 1828 if (IS_ERR(pgio->pg_lseg)) { 1829 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 1830 pgio->pg_lseg = NULL; 1831 return; 1832 } 1833 } 1834 /* If no lseg, fall back to read through mds */ 1835 if (pgio->pg_lseg == NULL) 1836 nfs_pageio_reset_read_mds(pgio); 1837 1838 } 1839 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read); 1840 1841 void 1842 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, 1843 struct nfs_page *req, u64 wb_size) 1844 { 1845 if (pgio->pg_lseg == NULL) { 1846 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 1847 req->wb_context, 1848 req_offset(req), 1849 wb_size, 1850 IOMODE_RW, 1851 GFP_NOFS); 1852 if (IS_ERR(pgio->pg_lseg)) { 1853 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 1854 pgio->pg_lseg = NULL; 1855 return; 1856 } 1857 } 1858 /* If no lseg, fall back to write through mds */ 1859 if (pgio->pg_lseg == NULL) 1860 nfs_pageio_reset_write_mds(pgio); 1861 } 1862 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write); 1863 1864 void 1865 pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc) 1866 { 1867 if (desc->pg_lseg) { 1868 pnfs_put_lseg(desc->pg_lseg); 1869 desc->pg_lseg = NULL; 1870 } 1871 } 1872 EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup); 1873 1874 /* 1875 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number 1876 * of bytes (maximum @req->wb_bytes) that can be coalesced. 1877 */ 1878 size_t 1879 pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, 1880 struct nfs_page *prev, struct nfs_page *req) 1881 { 1882 unsigned int size; 1883 u64 seg_end, req_start, seg_left; 1884 1885 size = nfs_generic_pg_test(pgio, prev, req); 1886 if (!size) 1887 return 0; 1888 1889 /* 1890 * 'size' contains the number of bytes left in the current page (up 1891 * to the original size asked for in @req->wb_bytes). 1892 * 1893 * Calculate how many bytes are left in the layout segment 1894 * and if there are less bytes than 'size', return that instead. 1895 * 1896 * Please also note that 'end_offset' is actually the offset of the 1897 * first byte that lies outside the pnfs_layout_range. FIXME? 1898 * 1899 */ 1900 if (pgio->pg_lseg) { 1901 seg_end = end_offset(pgio->pg_lseg->pls_range.offset, 1902 pgio->pg_lseg->pls_range.length); 1903 req_start = req_offset(req); 1904 WARN_ON_ONCE(req_start >= seg_end); 1905 /* start of request is past the last byte of this segment */ 1906 if (req_start >= seg_end) { 1907 /* reference the new lseg */ 1908 if (pgio->pg_ops->pg_cleanup) 1909 pgio->pg_ops->pg_cleanup(pgio); 1910 if (pgio->pg_ops->pg_init) 1911 pgio->pg_ops->pg_init(pgio, req); 1912 return 0; 1913 } 1914 1915 /* adjust 'size' iff there are fewer bytes left in the 1916 * segment than what nfs_generic_pg_test returned */ 1917 seg_left = seg_end - req_start; 1918 if (seg_left < size) 1919 size = (unsigned int)seg_left; 1920 } 1921 1922 return size; 1923 } 1924 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test); 1925 1926 int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr) 1927 { 1928 struct nfs_pageio_descriptor pgio; 1929 1930 /* Resend all requests through the MDS */ 1931 nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true, 1932 hdr->completion_ops); 1933 set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags); 1934 return nfs_pageio_resend(&pgio, hdr); 1935 } 1936 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds); 1937 1938 static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr) 1939 { 1940 1941 dprintk("pnfs write error = %d\n", hdr->pnfs_error); 1942 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & 1943 PNFS_LAYOUTRET_ON_ERROR) { 1944 pnfs_return_layout(hdr->inode); 1945 } 1946 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) 1947 hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr); 1948 } 1949 1950 /* 1951 * Called by non rpc-based layout drivers 1952 */ 1953 void pnfs_ld_write_done(struct nfs_pgio_header *hdr) 1954 { 1955 if (likely(!hdr->pnfs_error)) { 1956 pnfs_set_layoutcommit(hdr->inode, hdr->lseg, 1957 hdr->mds_offset + hdr->res.count); 1958 hdr->mds_ops->rpc_call_done(&hdr->task, hdr); 1959 } 1960 trace_nfs4_pnfs_write(hdr, hdr->pnfs_error); 1961 if (unlikely(hdr->pnfs_error)) 1962 pnfs_ld_handle_write_error(hdr); 1963 hdr->mds_ops->rpc_release(hdr); 1964 } 1965 EXPORT_SYMBOL_GPL(pnfs_ld_write_done); 1966 1967 static void 1968 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc, 1969 struct nfs_pgio_header *hdr) 1970 { 1971 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); 1972 1973 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { 1974 list_splice_tail_init(&hdr->pages, &mirror->pg_list); 1975 nfs_pageio_reset_write_mds(desc); 1976 mirror->pg_recoalesce = 1; 1977 } 1978 nfs_pgio_data_destroy(hdr); 1979 hdr->release(hdr); 1980 } 1981 1982 static enum pnfs_try_status 1983 pnfs_try_to_write_data(struct nfs_pgio_header *hdr, 1984 const struct rpc_call_ops *call_ops, 1985 struct pnfs_layout_segment *lseg, 1986 int how) 1987 { 1988 struct inode *inode = hdr->inode; 1989 enum pnfs_try_status trypnfs; 1990 struct nfs_server *nfss = NFS_SERVER(inode); 1991 1992 hdr->mds_ops = call_ops; 1993 1994 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__, 1995 inode->i_ino, hdr->args.count, hdr->args.offset, how); 1996 trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how); 1997 if (trypnfs != PNFS_NOT_ATTEMPTED) 1998 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE); 1999 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs); 2000 return trypnfs; 2001 } 2002 2003 static void 2004 pnfs_do_write(struct nfs_pageio_descriptor *desc, 2005 struct nfs_pgio_header *hdr, int how) 2006 { 2007 const struct rpc_call_ops *call_ops = desc->pg_rpc_callops; 2008 struct pnfs_layout_segment *lseg = desc->pg_lseg; 2009 enum pnfs_try_status trypnfs; 2010 2011 trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how); 2012 if (trypnfs == PNFS_NOT_ATTEMPTED) 2013 pnfs_write_through_mds(desc, hdr); 2014 } 2015 2016 static void pnfs_writehdr_free(struct nfs_pgio_header *hdr) 2017 { 2018 pnfs_put_lseg(hdr->lseg); 2019 nfs_pgio_header_free(hdr); 2020 } 2021 2022 int 2023 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) 2024 { 2025 struct nfs_pgio_header *hdr; 2026 int ret; 2027 2028 hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); 2029 if (!hdr) { 2030 desc->pg_error = -ENOMEM; 2031 return desc->pg_error; 2032 } 2033 nfs_pgheader_init(desc, hdr, pnfs_writehdr_free); 2034 2035 hdr->lseg = pnfs_get_lseg(desc->pg_lseg); 2036 ret = nfs_generic_pgio(desc, hdr); 2037 if (!ret) 2038 pnfs_do_write(desc, hdr, desc->pg_ioflags); 2039 2040 return ret; 2041 } 2042 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages); 2043 2044 int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr) 2045 { 2046 struct nfs_pageio_descriptor pgio; 2047 2048 /* Resend all requests through the MDS */ 2049 nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops); 2050 return nfs_pageio_resend(&pgio, hdr); 2051 } 2052 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds); 2053 2054 static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr) 2055 { 2056 dprintk("pnfs read error = %d\n", hdr->pnfs_error); 2057 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & 2058 PNFS_LAYOUTRET_ON_ERROR) { 2059 pnfs_return_layout(hdr->inode); 2060 } 2061 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) 2062 hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr); 2063 } 2064 2065 /* 2066 * Called by non rpc-based layout drivers 2067 */ 2068 void pnfs_ld_read_done(struct nfs_pgio_header *hdr) 2069 { 2070 if (likely(!hdr->pnfs_error)) { 2071 __nfs4_read_done_cb(hdr); 2072 hdr->mds_ops->rpc_call_done(&hdr->task, hdr); 2073 } 2074 trace_nfs4_pnfs_read(hdr, hdr->pnfs_error); 2075 if (unlikely(hdr->pnfs_error)) 2076 pnfs_ld_handle_read_error(hdr); 2077 hdr->mds_ops->rpc_release(hdr); 2078 } 2079 EXPORT_SYMBOL_GPL(pnfs_ld_read_done); 2080 2081 static void 2082 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc, 2083 struct nfs_pgio_header *hdr) 2084 { 2085 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); 2086 2087 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { 2088 list_splice_tail_init(&hdr->pages, &mirror->pg_list); 2089 nfs_pageio_reset_read_mds(desc); 2090 mirror->pg_recoalesce = 1; 2091 } 2092 nfs_pgio_data_destroy(hdr); 2093 hdr->release(hdr); 2094 } 2095 2096 /* 2097 * Call the appropriate parallel I/O subsystem read function. 2098 */ 2099 static enum pnfs_try_status 2100 pnfs_try_to_read_data(struct nfs_pgio_header *hdr, 2101 const struct rpc_call_ops *call_ops, 2102 struct pnfs_layout_segment *lseg) 2103 { 2104 struct inode *inode = hdr->inode; 2105 struct nfs_server *nfss = NFS_SERVER(inode); 2106 enum pnfs_try_status trypnfs; 2107 2108 hdr->mds_ops = call_ops; 2109 2110 dprintk("%s: Reading ino:%lu %u@%llu\n", 2111 __func__, inode->i_ino, hdr->args.count, hdr->args.offset); 2112 2113 trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr); 2114 if (trypnfs != PNFS_NOT_ATTEMPTED) 2115 nfs_inc_stats(inode, NFSIOS_PNFS_READ); 2116 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs); 2117 return trypnfs; 2118 } 2119 2120 /* Resend all requests through pnfs. */ 2121 int pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr) 2122 { 2123 struct nfs_pageio_descriptor pgio; 2124 2125 nfs_pageio_init_read(&pgio, hdr->inode, false, hdr->completion_ops); 2126 return nfs_pageio_resend(&pgio, hdr); 2127 } 2128 EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs); 2129 2130 static void 2131 pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) 2132 { 2133 const struct rpc_call_ops *call_ops = desc->pg_rpc_callops; 2134 struct pnfs_layout_segment *lseg = desc->pg_lseg; 2135 enum pnfs_try_status trypnfs; 2136 int err = 0; 2137 2138 trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg); 2139 if (trypnfs == PNFS_TRY_AGAIN) 2140 err = pnfs_read_resend_pnfs(hdr); 2141 if (trypnfs == PNFS_NOT_ATTEMPTED || err) 2142 pnfs_read_through_mds(desc, hdr); 2143 } 2144 2145 static void pnfs_readhdr_free(struct nfs_pgio_header *hdr) 2146 { 2147 pnfs_put_lseg(hdr->lseg); 2148 nfs_pgio_header_free(hdr); 2149 } 2150 2151 int 2152 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) 2153 { 2154 struct nfs_pgio_header *hdr; 2155 int ret; 2156 2157 hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); 2158 if (!hdr) { 2159 desc->pg_error = -ENOMEM; 2160 return desc->pg_error; 2161 } 2162 nfs_pgheader_init(desc, hdr, pnfs_readhdr_free); 2163 hdr->lseg = pnfs_get_lseg(desc->pg_lseg); 2164 ret = nfs_generic_pgio(desc, hdr); 2165 if (!ret) 2166 pnfs_do_read(desc, hdr); 2167 return ret; 2168 } 2169 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages); 2170 2171 static void pnfs_clear_layoutcommitting(struct inode *inode) 2172 { 2173 unsigned long *bitlock = &NFS_I(inode)->flags; 2174 2175 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); 2176 smp_mb__after_atomic(); 2177 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); 2178 } 2179 2180 /* 2181 * There can be multiple RW segments. 2182 */ 2183 static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp) 2184 { 2185 struct pnfs_layout_segment *lseg; 2186 2187 list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) { 2188 if (lseg->pls_range.iomode == IOMODE_RW && 2189 test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) 2190 list_add(&lseg->pls_lc_list, listp); 2191 } 2192 } 2193 2194 static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp) 2195 { 2196 struct pnfs_layout_segment *lseg, *tmp; 2197 2198 /* Matched by references in pnfs_set_layoutcommit */ 2199 list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) { 2200 list_del_init(&lseg->pls_lc_list); 2201 pnfs_put_lseg(lseg); 2202 } 2203 2204 pnfs_clear_layoutcommitting(inode); 2205 } 2206 2207 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg) 2208 { 2209 pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode); 2210 } 2211 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail); 2212 2213 void 2214 pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg, 2215 loff_t end_pos) 2216 { 2217 struct nfs_inode *nfsi = NFS_I(inode); 2218 bool mark_as_dirty = false; 2219 2220 spin_lock(&inode->i_lock); 2221 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) { 2222 nfsi->layout->plh_lwb = end_pos; 2223 mark_as_dirty = true; 2224 dprintk("%s: Set layoutcommit for inode %lu ", 2225 __func__, inode->i_ino); 2226 } else if (end_pos > nfsi->layout->plh_lwb) 2227 nfsi->layout->plh_lwb = end_pos; 2228 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) { 2229 /* references matched in nfs4_layoutcommit_release */ 2230 pnfs_get_lseg(lseg); 2231 } 2232 spin_unlock(&inode->i_lock); 2233 dprintk("%s: lseg %p end_pos %llu\n", 2234 __func__, lseg, nfsi->layout->plh_lwb); 2235 2236 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one 2237 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */ 2238 if (mark_as_dirty) 2239 mark_inode_dirty_sync(inode); 2240 } 2241 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit); 2242 2243 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data) 2244 { 2245 struct nfs_server *nfss = NFS_SERVER(data->args.inode); 2246 2247 if (nfss->pnfs_curr_ld->cleanup_layoutcommit) 2248 nfss->pnfs_curr_ld->cleanup_layoutcommit(data); 2249 pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list); 2250 } 2251 2252 /* 2253 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and 2254 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough 2255 * data to disk to allow the server to recover the data if it crashes. 2256 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag 2257 * is off, and a COMMIT is sent to a data server, or 2258 * if WRITEs to a data server return NFS_DATA_SYNC. 2259 */ 2260 int 2261 pnfs_layoutcommit_inode(struct inode *inode, bool sync) 2262 { 2263 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; 2264 struct nfs4_layoutcommit_data *data; 2265 struct nfs_inode *nfsi = NFS_I(inode); 2266 loff_t end_pos; 2267 int status; 2268 2269 if (!pnfs_layoutcommit_outstanding(inode)) 2270 return 0; 2271 2272 dprintk("--> %s inode %lu\n", __func__, inode->i_ino); 2273 2274 status = -EAGAIN; 2275 if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) { 2276 if (!sync) 2277 goto out; 2278 status = wait_on_bit_lock_action(&nfsi->flags, 2279 NFS_INO_LAYOUTCOMMITTING, 2280 nfs_wait_bit_killable, 2281 TASK_KILLABLE); 2282 if (status) 2283 goto out; 2284 } 2285 2286 status = -ENOMEM; 2287 /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */ 2288 data = kzalloc(sizeof(*data), GFP_NOFS); 2289 if (!data) 2290 goto clear_layoutcommitting; 2291 2292 status = 0; 2293 spin_lock(&inode->i_lock); 2294 if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) 2295 goto out_unlock; 2296 2297 INIT_LIST_HEAD(&data->lseg_list); 2298 pnfs_list_write_lseg(inode, &data->lseg_list); 2299 2300 end_pos = nfsi->layout->plh_lwb; 2301 2302 nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid); 2303 spin_unlock(&inode->i_lock); 2304 2305 data->args.inode = inode; 2306 data->cred = get_rpccred(nfsi->layout->plh_lc_cred); 2307 nfs_fattr_init(&data->fattr); 2308 data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask; 2309 data->res.fattr = &data->fattr; 2310 data->args.lastbytewritten = end_pos - 1; 2311 data->res.server = NFS_SERVER(inode); 2312 2313 if (ld->prepare_layoutcommit) { 2314 status = ld->prepare_layoutcommit(&data->args); 2315 if (status) { 2316 put_rpccred(data->cred); 2317 spin_lock(&inode->i_lock); 2318 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags); 2319 if (end_pos > nfsi->layout->plh_lwb) 2320 nfsi->layout->plh_lwb = end_pos; 2321 goto out_unlock; 2322 } 2323 } 2324 2325 2326 status = nfs4_proc_layoutcommit(data, sync); 2327 out: 2328 if (status) 2329 mark_inode_dirty_sync(inode); 2330 dprintk("<-- %s status %d\n", __func__, status); 2331 return status; 2332 out_unlock: 2333 spin_unlock(&inode->i_lock); 2334 kfree(data); 2335 clear_layoutcommitting: 2336 pnfs_clear_layoutcommitting(inode); 2337 goto out; 2338 } 2339 EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode); 2340 2341 int 2342 pnfs_generic_sync(struct inode *inode, bool datasync) 2343 { 2344 return pnfs_layoutcommit_inode(inode, true); 2345 } 2346 EXPORT_SYMBOL_GPL(pnfs_generic_sync); 2347 2348 struct nfs4_threshold *pnfs_mdsthreshold_alloc(void) 2349 { 2350 struct nfs4_threshold *thp; 2351 2352 thp = kzalloc(sizeof(*thp), GFP_NOFS); 2353 if (!thp) { 2354 dprintk("%s mdsthreshold allocation failed\n", __func__); 2355 return NULL; 2356 } 2357 return thp; 2358 } 2359 2360 #if IS_ENABLED(CONFIG_NFS_V4_2) 2361 int 2362 pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags) 2363 { 2364 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; 2365 struct nfs_server *server = NFS_SERVER(inode); 2366 struct nfs_inode *nfsi = NFS_I(inode); 2367 struct nfs42_layoutstat_data *data; 2368 struct pnfs_layout_hdr *hdr; 2369 int status = 0; 2370 2371 if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats) 2372 goto out; 2373 2374 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS)) 2375 goto out; 2376 2377 if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags)) 2378 goto out; 2379 2380 spin_lock(&inode->i_lock); 2381 if (!NFS_I(inode)->layout) { 2382 spin_unlock(&inode->i_lock); 2383 goto out; 2384 } 2385 hdr = NFS_I(inode)->layout; 2386 pnfs_get_layout_hdr(hdr); 2387 spin_unlock(&inode->i_lock); 2388 2389 data = kzalloc(sizeof(*data), gfp_flags); 2390 if (!data) { 2391 status = -ENOMEM; 2392 goto out_put; 2393 } 2394 2395 data->args.fh = NFS_FH(inode); 2396 data->args.inode = inode; 2397 nfs4_stateid_copy(&data->args.stateid, &hdr->plh_stateid); 2398 status = ld->prepare_layoutstats(&data->args); 2399 if (status) 2400 goto out_free; 2401 2402 status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data); 2403 2404 out: 2405 dprintk("%s returns %d\n", __func__, status); 2406 return status; 2407 2408 out_free: 2409 kfree(data); 2410 out_put: 2411 pnfs_put_layout_hdr(hdr); 2412 smp_mb__before_atomic(); 2413 clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags); 2414 smp_mb__after_atomic(); 2415 goto out; 2416 } 2417 EXPORT_SYMBOL_GPL(pnfs_report_layoutstat); 2418 #endif 2419 2420 unsigned int layoutstats_timer; 2421 module_param(layoutstats_timer, uint, 0644); 2422 EXPORT_SYMBOL_GPL(layoutstats_timer); 2423