1 /* 2 * Module for pnfs flexfile layout driver. 3 * 4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved. 5 * 6 * Tao Peng <bergwolf@primarydata.com> 7 */ 8 9 #include <linux/nfs_fs.h> 10 #include <linux/nfs_page.h> 11 #include <linux/module.h> 12 #include <linux/sched/mm.h> 13 14 #include <linux/sunrpc/metrics.h> 15 16 #include "flexfilelayout.h" 17 #include "../nfs4session.h" 18 #include "../nfs4idmap.h" 19 #include "../internal.h" 20 #include "../delegation.h" 21 #include "../nfs4trace.h" 22 #include "../iostat.h" 23 #include "../nfs.h" 24 #include "../nfs42.h" 25 26 #define NFSDBG_FACILITY NFSDBG_PNFS_LD 27 28 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ) 29 #define FF_LAYOUTRETURN_MAXERR 20 30 31 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task, 32 struct nfs_pgio_header *hdr); 33 static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, 34 struct nfs42_layoutstat_devinfo *devinfo, 35 int dev_limit); 36 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr, 37 const struct nfs42_layoutstat_devinfo *devinfo, 38 struct nfs4_ff_layout_mirror *mirror); 39 40 static struct pnfs_layout_hdr * 41 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags) 42 { 43 struct nfs4_flexfile_layout *ffl; 44 45 ffl = kzalloc(sizeof(*ffl), gfp_flags); 46 if (ffl) { 47 INIT_LIST_HEAD(&ffl->error_list); 48 INIT_LIST_HEAD(&ffl->mirrors); 49 ffl->last_report_time = ktime_get(); 50 return &ffl->generic_hdr; 51 } else 52 return NULL; 53 } 54 55 static void 56 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo) 57 { 58 struct nfs4_ff_layout_ds_err *err, *n; 59 60 list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list, 61 list) { 62 list_del(&err->list); 63 kfree(err); 64 } 65 kfree(FF_LAYOUT_FROM_HDR(lo)); 66 } 67 68 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) 69 { 70 __be32 *p; 71 72 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE); 73 if (unlikely(p == NULL)) 74 return -ENOBUFS; 75 stateid->type = NFS4_PNFS_DS_STATEID_TYPE; 76 memcpy(stateid->data, p, NFS4_STATEID_SIZE); 77 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__, 78 p[0], p[1], p[2], p[3]); 79 return 0; 80 } 81 82 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid) 83 { 84 __be32 *p; 85 86 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE); 87 if (unlikely(!p)) 88 return -ENOBUFS; 89 memcpy(devid, p, NFS4_DEVICEID4_SIZE); 90 nfs4_print_deviceid(devid); 91 return 0; 92 } 93 94 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh) 95 { 96 __be32 *p; 97 98 p = xdr_inline_decode(xdr, 4); 99 if (unlikely(!p)) 100 return -ENOBUFS; 101 fh->size = be32_to_cpup(p++); 102 if (fh->size > sizeof(struct nfs_fh)) { 103 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n", 104 fh->size); 105 return -EOVERFLOW; 106 } 107 /* fh.data */ 108 p = xdr_inline_decode(xdr, fh->size); 109 if (unlikely(!p)) 110 return -ENOBUFS; 111 memcpy(&fh->data, p, fh->size); 112 dprintk("%s: fh len %d\n", __func__, fh->size); 113 114 return 0; 115 } 116 117 /* 118 * Currently only stringified uids and gids are accepted. 119 * I.e., kerberos is not supported to the DSes, so no pricipals. 120 * 121 * That means that one common function will suffice, but when 122 * principals are added, this should be split to accomodate 123 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid(). 124 */ 125 static int 126 decode_name(struct xdr_stream *xdr, u32 *id) 127 { 128 __be32 *p; 129 int len; 130 131 /* opaque_length(4)*/ 132 p = xdr_inline_decode(xdr, 4); 133 if (unlikely(!p)) 134 return -ENOBUFS; 135 len = be32_to_cpup(p++); 136 if (len < 0) 137 return -EINVAL; 138 139 dprintk("%s: len %u\n", __func__, len); 140 141 /* opaque body */ 142 p = xdr_inline_decode(xdr, len); 143 if (unlikely(!p)) 144 return -ENOBUFS; 145 146 if (!nfs_map_string_to_numeric((char *)p, len, id)) 147 return -EINVAL; 148 149 return 0; 150 } 151 152 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1, 153 const struct nfs4_ff_layout_mirror *m2) 154 { 155 int i, j; 156 157 if (m1->fh_versions_cnt != m2->fh_versions_cnt) 158 return false; 159 for (i = 0; i < m1->fh_versions_cnt; i++) { 160 bool found_fh = false; 161 for (j = 0; j < m2->fh_versions_cnt; j++) { 162 if (nfs_compare_fh(&m1->fh_versions[i], 163 &m2->fh_versions[j]) == 0) { 164 found_fh = true; 165 break; 166 } 167 } 168 if (!found_fh) 169 return false; 170 } 171 return true; 172 } 173 174 static struct nfs4_ff_layout_mirror * 175 ff_layout_add_mirror(struct pnfs_layout_hdr *lo, 176 struct nfs4_ff_layout_mirror *mirror) 177 { 178 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo); 179 struct nfs4_ff_layout_mirror *pos; 180 struct inode *inode = lo->plh_inode; 181 182 spin_lock(&inode->i_lock); 183 list_for_each_entry(pos, &ff_layout->mirrors, mirrors) { 184 if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0) 185 continue; 186 if (!ff_mirror_match_fh(mirror, pos)) 187 continue; 188 if (refcount_inc_not_zero(&pos->ref)) { 189 spin_unlock(&inode->i_lock); 190 return pos; 191 } 192 } 193 list_add(&mirror->mirrors, &ff_layout->mirrors); 194 mirror->layout = lo; 195 spin_unlock(&inode->i_lock); 196 return mirror; 197 } 198 199 static void 200 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror) 201 { 202 struct inode *inode; 203 if (mirror->layout == NULL) 204 return; 205 inode = mirror->layout->plh_inode; 206 spin_lock(&inode->i_lock); 207 list_del(&mirror->mirrors); 208 spin_unlock(&inode->i_lock); 209 mirror->layout = NULL; 210 } 211 212 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags) 213 { 214 struct nfs4_ff_layout_mirror *mirror; 215 216 mirror = kzalloc(sizeof(*mirror), gfp_flags); 217 if (mirror != NULL) { 218 spin_lock_init(&mirror->lock); 219 refcount_set(&mirror->ref, 1); 220 INIT_LIST_HEAD(&mirror->mirrors); 221 } 222 return mirror; 223 } 224 225 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror) 226 { 227 const struct cred *cred; 228 229 ff_layout_remove_mirror(mirror); 230 kfree(mirror->fh_versions); 231 cred = rcu_access_pointer(mirror->ro_cred); 232 put_cred(cred); 233 cred = rcu_access_pointer(mirror->rw_cred); 234 put_cred(cred); 235 nfs4_ff_layout_put_deviceid(mirror->mirror_ds); 236 kfree(mirror); 237 } 238 239 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror) 240 { 241 if (mirror != NULL && refcount_dec_and_test(&mirror->ref)) 242 ff_layout_free_mirror(mirror); 243 } 244 245 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls) 246 { 247 int i; 248 249 if (fls->mirror_array) { 250 for (i = 0; i < fls->mirror_array_cnt; i++) { 251 /* normally mirror_ds is freed in 252 * .free_deviceid_node but we still do it here 253 * for .alloc_lseg error path */ 254 ff_layout_put_mirror(fls->mirror_array[i]); 255 } 256 kfree(fls->mirror_array); 257 fls->mirror_array = NULL; 258 } 259 } 260 261 static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr) 262 { 263 int ret = 0; 264 265 dprintk("--> %s\n", __func__); 266 267 /* FIXME: remove this check when layout segment support is added */ 268 if (lgr->range.offset != 0 || 269 lgr->range.length != NFS4_MAX_UINT64) { 270 dprintk("%s Only whole file layouts supported. Use MDS i/o\n", 271 __func__); 272 ret = -EINVAL; 273 } 274 275 dprintk("--> %s returns %d\n", __func__, ret); 276 return ret; 277 } 278 279 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls) 280 { 281 if (fls) { 282 ff_layout_free_mirror_array(fls); 283 kfree(fls); 284 } 285 } 286 287 static bool 288 ff_lseg_range_is_after(const struct pnfs_layout_range *l1, 289 const struct pnfs_layout_range *l2) 290 { 291 u64 end1, end2; 292 293 if (l1->iomode != l2->iomode) 294 return l1->iomode != IOMODE_READ; 295 end1 = pnfs_calc_offset_end(l1->offset, l1->length); 296 end2 = pnfs_calc_offset_end(l2->offset, l2->length); 297 if (end1 < l2->offset) 298 return false; 299 if (end2 < l1->offset) 300 return true; 301 return l2->offset <= l1->offset; 302 } 303 304 static bool 305 ff_lseg_merge(struct pnfs_layout_segment *new, 306 struct pnfs_layout_segment *old) 307 { 308 u64 new_end, old_end; 309 310 if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags)) 311 return false; 312 if (new->pls_range.iomode != old->pls_range.iomode) 313 return false; 314 old_end = pnfs_calc_offset_end(old->pls_range.offset, 315 old->pls_range.length); 316 if (old_end < new->pls_range.offset) 317 return false; 318 new_end = pnfs_calc_offset_end(new->pls_range.offset, 319 new->pls_range.length); 320 if (new_end < old->pls_range.offset) 321 return false; 322 323 /* Mergeable: copy info from 'old' to 'new' */ 324 if (new_end < old_end) 325 new_end = old_end; 326 if (new->pls_range.offset < old->pls_range.offset) 327 new->pls_range.offset = old->pls_range.offset; 328 new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset, 329 new_end); 330 if (test_bit(NFS_LSEG_ROC, &old->pls_flags)) 331 set_bit(NFS_LSEG_ROC, &new->pls_flags); 332 return true; 333 } 334 335 static void 336 ff_layout_add_lseg(struct pnfs_layout_hdr *lo, 337 struct pnfs_layout_segment *lseg, 338 struct list_head *free_me) 339 { 340 pnfs_generic_layout_insert_lseg(lo, lseg, 341 ff_lseg_range_is_after, 342 ff_lseg_merge, 343 free_me); 344 } 345 346 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls) 347 { 348 int i, j; 349 350 for (i = 0; i < fls->mirror_array_cnt - 1; i++) { 351 for (j = i + 1; j < fls->mirror_array_cnt; j++) 352 if (fls->mirror_array[i]->efficiency < 353 fls->mirror_array[j]->efficiency) 354 swap(fls->mirror_array[i], 355 fls->mirror_array[j]); 356 } 357 } 358 359 static struct pnfs_layout_segment * 360 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh, 361 struct nfs4_layoutget_res *lgr, 362 gfp_t gfp_flags) 363 { 364 struct pnfs_layout_segment *ret; 365 struct nfs4_ff_layout_segment *fls = NULL; 366 struct xdr_stream stream; 367 struct xdr_buf buf; 368 struct page *scratch; 369 u64 stripe_unit; 370 u32 mirror_array_cnt; 371 __be32 *p; 372 int i, rc; 373 374 dprintk("--> %s\n", __func__); 375 scratch = alloc_page(gfp_flags); 376 if (!scratch) 377 return ERR_PTR(-ENOMEM); 378 379 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, 380 lgr->layoutp->len); 381 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); 382 383 /* stripe unit and mirror_array_cnt */ 384 rc = -EIO; 385 p = xdr_inline_decode(&stream, 8 + 4); 386 if (!p) 387 goto out_err_free; 388 389 p = xdr_decode_hyper(p, &stripe_unit); 390 mirror_array_cnt = be32_to_cpup(p++); 391 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__, 392 stripe_unit, mirror_array_cnt); 393 394 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT || 395 mirror_array_cnt == 0) 396 goto out_err_free; 397 398 rc = -ENOMEM; 399 fls = kzalloc(sizeof(*fls), gfp_flags); 400 if (!fls) 401 goto out_err_free; 402 403 fls->mirror_array_cnt = mirror_array_cnt; 404 fls->stripe_unit = stripe_unit; 405 fls->mirror_array = kcalloc(fls->mirror_array_cnt, 406 sizeof(fls->mirror_array[0]), gfp_flags); 407 if (fls->mirror_array == NULL) 408 goto out_err_free; 409 410 for (i = 0; i < fls->mirror_array_cnt; i++) { 411 struct nfs4_ff_layout_mirror *mirror; 412 struct cred *kcred; 413 const struct cred __rcu *cred; 414 kuid_t uid; 415 kgid_t gid; 416 u32 ds_count, fh_count, id; 417 int j; 418 419 rc = -EIO; 420 p = xdr_inline_decode(&stream, 4); 421 if (!p) 422 goto out_err_free; 423 ds_count = be32_to_cpup(p); 424 425 /* FIXME: allow for striping? */ 426 if (ds_count != 1) 427 goto out_err_free; 428 429 fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags); 430 if (fls->mirror_array[i] == NULL) { 431 rc = -ENOMEM; 432 goto out_err_free; 433 } 434 435 fls->mirror_array[i]->ds_count = ds_count; 436 437 /* deviceid */ 438 rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid); 439 if (rc) 440 goto out_err_free; 441 442 /* efficiency */ 443 rc = -EIO; 444 p = xdr_inline_decode(&stream, 4); 445 if (!p) 446 goto out_err_free; 447 fls->mirror_array[i]->efficiency = be32_to_cpup(p); 448 449 /* stateid */ 450 rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid); 451 if (rc) 452 goto out_err_free; 453 454 /* fh */ 455 rc = -EIO; 456 p = xdr_inline_decode(&stream, 4); 457 if (!p) 458 goto out_err_free; 459 fh_count = be32_to_cpup(p); 460 461 fls->mirror_array[i]->fh_versions = 462 kcalloc(fh_count, sizeof(struct nfs_fh), 463 gfp_flags); 464 if (fls->mirror_array[i]->fh_versions == NULL) { 465 rc = -ENOMEM; 466 goto out_err_free; 467 } 468 469 for (j = 0; j < fh_count; j++) { 470 rc = decode_nfs_fh(&stream, 471 &fls->mirror_array[i]->fh_versions[j]); 472 if (rc) 473 goto out_err_free; 474 } 475 476 fls->mirror_array[i]->fh_versions_cnt = fh_count; 477 478 /* user */ 479 rc = decode_name(&stream, &id); 480 if (rc) 481 goto out_err_free; 482 483 uid = make_kuid(&init_user_ns, id); 484 485 /* group */ 486 rc = decode_name(&stream, &id); 487 if (rc) 488 goto out_err_free; 489 490 gid = make_kgid(&init_user_ns, id); 491 492 if (gfp_flags & __GFP_FS) 493 kcred = prepare_kernel_cred(NULL); 494 else { 495 unsigned int nofs_flags = memalloc_nofs_save(); 496 kcred = prepare_kernel_cred(NULL); 497 memalloc_nofs_restore(nofs_flags); 498 } 499 rc = -ENOMEM; 500 if (!kcred) 501 goto out_err_free; 502 kcred->fsuid = uid; 503 kcred->fsgid = gid; 504 cred = RCU_INITIALIZER(kcred); 505 506 if (lgr->range.iomode == IOMODE_READ) 507 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred); 508 else 509 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred); 510 511 mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]); 512 if (mirror != fls->mirror_array[i]) { 513 /* swap cred ptrs so free_mirror will clean up old */ 514 if (lgr->range.iomode == IOMODE_READ) { 515 cred = xchg(&mirror->ro_cred, cred); 516 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred); 517 } else { 518 cred = xchg(&mirror->rw_cred, cred); 519 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred); 520 } 521 ff_layout_free_mirror(fls->mirror_array[i]); 522 fls->mirror_array[i] = mirror; 523 } 524 525 dprintk("%s: iomode %s uid %u gid %u\n", __func__, 526 lgr->range.iomode == IOMODE_READ ? "READ" : "RW", 527 from_kuid(&init_user_ns, uid), 528 from_kgid(&init_user_ns, gid)); 529 } 530 531 p = xdr_inline_decode(&stream, 4); 532 if (!p) 533 goto out_sort_mirrors; 534 fls->flags = be32_to_cpup(p); 535 536 p = xdr_inline_decode(&stream, 4); 537 if (!p) 538 goto out_sort_mirrors; 539 for (i=0; i < fls->mirror_array_cnt; i++) 540 fls->mirror_array[i]->report_interval = be32_to_cpup(p); 541 542 out_sort_mirrors: 543 ff_layout_sort_mirrors(fls); 544 rc = ff_layout_check_layout(lgr); 545 if (rc) 546 goto out_err_free; 547 ret = &fls->generic_hdr; 548 dprintk("<-- %s (success)\n", __func__); 549 out_free_page: 550 __free_page(scratch); 551 return ret; 552 out_err_free: 553 _ff_layout_free_lseg(fls); 554 ret = ERR_PTR(rc); 555 dprintk("<-- %s (%d)\n", __func__, rc); 556 goto out_free_page; 557 } 558 559 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout) 560 { 561 struct pnfs_layout_segment *lseg; 562 563 list_for_each_entry(lseg, &layout->plh_segs, pls_list) 564 if (lseg->pls_range.iomode == IOMODE_RW) 565 return true; 566 567 return false; 568 } 569 570 static void 571 ff_layout_free_lseg(struct pnfs_layout_segment *lseg) 572 { 573 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); 574 575 dprintk("--> %s\n", __func__); 576 577 if (lseg->pls_range.iomode == IOMODE_RW) { 578 struct nfs4_flexfile_layout *ffl; 579 struct inode *inode; 580 581 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout); 582 inode = ffl->generic_hdr.plh_inode; 583 spin_lock(&inode->i_lock); 584 if (!ff_layout_has_rw_segments(lseg->pls_layout)) { 585 ffl->commit_info.nbuckets = 0; 586 kfree(ffl->commit_info.buckets); 587 ffl->commit_info.buckets = NULL; 588 } 589 spin_unlock(&inode->i_lock); 590 } 591 _ff_layout_free_lseg(fls); 592 } 593 594 /* Return 1 until we have multiple lsegs support */ 595 static int 596 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls) 597 { 598 return 1; 599 } 600 601 static void 602 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now) 603 { 604 /* first IO request? */ 605 if (atomic_inc_return(&timer->n_ops) == 1) { 606 timer->start_time = now; 607 } 608 } 609 610 static ktime_t 611 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now) 612 { 613 ktime_t start; 614 615 if (atomic_dec_return(&timer->n_ops) < 0) 616 WARN_ON_ONCE(1); 617 618 start = timer->start_time; 619 timer->start_time = now; 620 return ktime_sub(now, start); 621 } 622 623 static bool 624 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror, 625 struct nfs4_ff_layoutstat *layoutstat, 626 ktime_t now) 627 { 628 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL; 629 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout); 630 631 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now); 632 if (!mirror->start_time) 633 mirror->start_time = now; 634 if (mirror->report_interval != 0) 635 report_interval = (s64)mirror->report_interval * 1000LL; 636 else if (layoutstats_timer != 0) 637 report_interval = (s64)layoutstats_timer * 1000LL; 638 if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >= 639 report_interval) { 640 ffl->last_report_time = now; 641 return true; 642 } 643 644 return false; 645 } 646 647 static void 648 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat, 649 __u64 requested) 650 { 651 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat; 652 653 iostat->ops_requested++; 654 iostat->bytes_requested += requested; 655 } 656 657 static void 658 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat, 659 __u64 requested, 660 __u64 completed, 661 ktime_t time_completed, 662 ktime_t time_started) 663 { 664 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat; 665 ktime_t completion_time = ktime_sub(time_completed, time_started); 666 ktime_t timer; 667 668 iostat->ops_completed++; 669 iostat->bytes_completed += completed; 670 iostat->bytes_not_delivered += requested - completed; 671 672 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed); 673 iostat->total_busy_time = 674 ktime_add(iostat->total_busy_time, timer); 675 iostat->aggregate_completion_time = 676 ktime_add(iostat->aggregate_completion_time, 677 completion_time); 678 } 679 680 static void 681 nfs4_ff_layout_stat_io_start_read(struct inode *inode, 682 struct nfs4_ff_layout_mirror *mirror, 683 __u64 requested, ktime_t now) 684 { 685 bool report; 686 687 spin_lock(&mirror->lock); 688 report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now); 689 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested); 690 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); 691 spin_unlock(&mirror->lock); 692 693 if (report) 694 pnfs_report_layoutstat(inode, GFP_KERNEL); 695 } 696 697 static void 698 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task, 699 struct nfs4_ff_layout_mirror *mirror, 700 __u64 requested, 701 __u64 completed) 702 { 703 spin_lock(&mirror->lock); 704 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat, 705 requested, completed, 706 ktime_get(), task->tk_start); 707 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); 708 spin_unlock(&mirror->lock); 709 } 710 711 static void 712 nfs4_ff_layout_stat_io_start_write(struct inode *inode, 713 struct nfs4_ff_layout_mirror *mirror, 714 __u64 requested, ktime_t now) 715 { 716 bool report; 717 718 spin_lock(&mirror->lock); 719 report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now); 720 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested); 721 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); 722 spin_unlock(&mirror->lock); 723 724 if (report) 725 pnfs_report_layoutstat(inode, GFP_NOIO); 726 } 727 728 static void 729 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task, 730 struct nfs4_ff_layout_mirror *mirror, 731 __u64 requested, 732 __u64 completed, 733 enum nfs3_stable_how committed) 734 { 735 if (committed == NFS_UNSTABLE) 736 requested = completed = 0; 737 738 spin_lock(&mirror->lock); 739 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat, 740 requested, completed, ktime_get(), task->tk_start); 741 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); 742 spin_unlock(&mirror->lock); 743 } 744 745 static int 746 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg, 747 struct nfs_commit_info *cinfo, 748 gfp_t gfp_flags) 749 { 750 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); 751 struct pnfs_commit_bucket *buckets; 752 int size; 753 754 if (cinfo->ds->nbuckets != 0) { 755 /* This assumes there is only one RW lseg per file. 756 * To support multiple lseg per file, we need to 757 * change struct pnfs_commit_bucket to allow dynamic 758 * increasing nbuckets. 759 */ 760 return 0; 761 } 762 763 size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg); 764 765 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket), 766 gfp_flags); 767 if (!buckets) 768 return -ENOMEM; 769 else { 770 int i; 771 772 spin_lock(&cinfo->inode->i_lock); 773 if (cinfo->ds->nbuckets != 0) 774 kfree(buckets); 775 else { 776 cinfo->ds->buckets = buckets; 777 cinfo->ds->nbuckets = size; 778 for (i = 0; i < size; i++) { 779 INIT_LIST_HEAD(&buckets[i].written); 780 INIT_LIST_HEAD(&buckets[i].committing); 781 /* mark direct verifier as unset */ 782 buckets[i].direct_verf.committed = 783 NFS_INVALID_STABLE_HOW; 784 } 785 } 786 spin_unlock(&cinfo->inode->i_lock); 787 return 0; 788 } 789 } 790 791 static void 792 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, int idx) 793 { 794 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); 795 796 if (devid) 797 nfs4_mark_deviceid_unavailable(devid); 798 } 799 800 static void 801 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, int idx) 802 { 803 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); 804 805 if (devid) 806 nfs4_mark_deviceid_available(devid); 807 } 808 809 static struct nfs4_pnfs_ds * 810 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg, 811 int start_idx, int *best_idx, 812 bool check_device) 813 { 814 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); 815 struct nfs4_ff_layout_mirror *mirror; 816 struct nfs4_pnfs_ds *ds; 817 bool fail_return = false; 818 int idx; 819 820 /* mirrors are initially sorted by efficiency */ 821 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) { 822 if (idx+1 == fls->mirror_array_cnt) 823 fail_return = !check_device; 824 825 mirror = FF_LAYOUT_COMP(lseg, idx); 826 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, fail_return); 827 if (!ds) 828 continue; 829 830 if (check_device && 831 nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node)) 832 continue; 833 834 *best_idx = idx; 835 return ds; 836 } 837 838 return NULL; 839 } 840 841 static struct nfs4_pnfs_ds * 842 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg, 843 int start_idx, int *best_idx) 844 { 845 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false); 846 } 847 848 static struct nfs4_pnfs_ds * 849 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg, 850 int start_idx, int *best_idx) 851 { 852 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true); 853 } 854 855 static struct nfs4_pnfs_ds * 856 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg, 857 int start_idx, int *best_idx) 858 { 859 struct nfs4_pnfs_ds *ds; 860 861 ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx); 862 if (ds) 863 return ds; 864 return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx); 865 } 866 867 static void 868 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio, 869 struct nfs_page *req, 870 bool strict_iomode) 871 { 872 pnfs_put_lseg(pgio->pg_lseg); 873 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 874 req->wb_context, 875 0, 876 NFS4_MAX_UINT64, 877 IOMODE_READ, 878 strict_iomode, 879 GFP_KERNEL); 880 if (IS_ERR(pgio->pg_lseg)) { 881 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 882 pgio->pg_lseg = NULL; 883 } 884 } 885 886 static void 887 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio, 888 struct nfs_page *req) 889 { 890 struct nfs_pgio_mirror *pgm; 891 struct nfs4_ff_layout_mirror *mirror; 892 struct nfs4_pnfs_ds *ds; 893 int ds_idx; 894 895 retry: 896 pnfs_generic_pg_check_layout(pgio); 897 /* Use full layout for now */ 898 if (!pgio->pg_lseg) { 899 ff_layout_pg_get_read(pgio, req, false); 900 if (!pgio->pg_lseg) 901 goto out_nolseg; 902 } 903 if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) { 904 ff_layout_pg_get_read(pgio, req, true); 905 if (!pgio->pg_lseg) 906 goto out_nolseg; 907 } 908 909 ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx); 910 if (!ds) { 911 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg)) 912 goto out_mds; 913 pnfs_put_lseg(pgio->pg_lseg); 914 pgio->pg_lseg = NULL; 915 /* Sleep for 1 second before retrying */ 916 ssleep(1); 917 goto retry; 918 } 919 920 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx); 921 922 pgio->pg_mirror_idx = ds_idx; 923 924 /* read always uses only one mirror - idx 0 for pgio layer */ 925 pgm = &pgio->pg_mirrors[0]; 926 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; 927 928 return; 929 out_nolseg: 930 if (pgio->pg_error < 0) 931 return; 932 out_mds: 933 pnfs_put_lseg(pgio->pg_lseg); 934 pgio->pg_lseg = NULL; 935 nfs_pageio_reset_read_mds(pgio); 936 } 937 938 static void 939 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, 940 struct nfs_page *req) 941 { 942 struct nfs4_ff_layout_mirror *mirror; 943 struct nfs_pgio_mirror *pgm; 944 struct nfs_commit_info cinfo; 945 struct nfs4_pnfs_ds *ds; 946 int i; 947 int status; 948 949 retry: 950 pnfs_generic_pg_check_layout(pgio); 951 if (!pgio->pg_lseg) { 952 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 953 req->wb_context, 954 0, 955 NFS4_MAX_UINT64, 956 IOMODE_RW, 957 false, 958 GFP_NOFS); 959 if (IS_ERR(pgio->pg_lseg)) { 960 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 961 pgio->pg_lseg = NULL; 962 return; 963 } 964 } 965 /* If no lseg, fall back to write through mds */ 966 if (pgio->pg_lseg == NULL) 967 goto out_mds; 968 969 nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq); 970 status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS); 971 if (status < 0) 972 goto out_mds; 973 974 /* Use a direct mapping of ds_idx to pgio mirror_idx */ 975 if (WARN_ON_ONCE(pgio->pg_mirror_count != 976 FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))) 977 goto out_mds; 978 979 for (i = 0; i < pgio->pg_mirror_count; i++) { 980 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i); 981 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true); 982 if (!ds) { 983 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg)) 984 goto out_mds; 985 pnfs_put_lseg(pgio->pg_lseg); 986 pgio->pg_lseg = NULL; 987 /* Sleep for 1 second before retrying */ 988 ssleep(1); 989 goto retry; 990 } 991 pgm = &pgio->pg_mirrors[i]; 992 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize; 993 } 994 995 return; 996 997 out_mds: 998 pnfs_put_lseg(pgio->pg_lseg); 999 pgio->pg_lseg = NULL; 1000 nfs_pageio_reset_write_mds(pgio); 1001 } 1002 1003 static unsigned int 1004 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio, 1005 struct nfs_page *req) 1006 { 1007 if (!pgio->pg_lseg) { 1008 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 1009 req->wb_context, 1010 0, 1011 NFS4_MAX_UINT64, 1012 IOMODE_RW, 1013 false, 1014 GFP_NOFS); 1015 if (IS_ERR(pgio->pg_lseg)) { 1016 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 1017 pgio->pg_lseg = NULL; 1018 goto out; 1019 } 1020 } 1021 if (pgio->pg_lseg) 1022 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg); 1023 1024 /* no lseg means that pnfs is not in use, so no mirroring here */ 1025 nfs_pageio_reset_write_mds(pgio); 1026 out: 1027 return 1; 1028 } 1029 1030 static const struct nfs_pageio_ops ff_layout_pg_read_ops = { 1031 .pg_init = ff_layout_pg_init_read, 1032 .pg_test = pnfs_generic_pg_test, 1033 .pg_doio = pnfs_generic_pg_readpages, 1034 .pg_cleanup = pnfs_generic_pg_cleanup, 1035 }; 1036 1037 static const struct nfs_pageio_ops ff_layout_pg_write_ops = { 1038 .pg_init = ff_layout_pg_init_write, 1039 .pg_test = pnfs_generic_pg_test, 1040 .pg_doio = pnfs_generic_pg_writepages, 1041 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write, 1042 .pg_cleanup = pnfs_generic_pg_cleanup, 1043 }; 1044 1045 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs) 1046 { 1047 struct rpc_task *task = &hdr->task; 1048 1049 pnfs_layoutcommit_inode(hdr->inode, false); 1050 1051 if (retry_pnfs) { 1052 dprintk("%s Reset task %5u for i/o through pNFS " 1053 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, 1054 hdr->task.tk_pid, 1055 hdr->inode->i_sb->s_id, 1056 (unsigned long long)NFS_FILEID(hdr->inode), 1057 hdr->args.count, 1058 (unsigned long long)hdr->args.offset); 1059 1060 hdr->completion_ops->reschedule_io(hdr); 1061 return; 1062 } 1063 1064 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { 1065 dprintk("%s Reset task %5u for i/o through MDS " 1066 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, 1067 hdr->task.tk_pid, 1068 hdr->inode->i_sb->s_id, 1069 (unsigned long long)NFS_FILEID(hdr->inode), 1070 hdr->args.count, 1071 (unsigned long long)hdr->args.offset); 1072 1073 task->tk_status = pnfs_write_done_resend_to_mds(hdr); 1074 } 1075 } 1076 1077 static void ff_layout_reset_read(struct nfs_pgio_header *hdr) 1078 { 1079 struct rpc_task *task = &hdr->task; 1080 1081 pnfs_layoutcommit_inode(hdr->inode, false); 1082 1083 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { 1084 dprintk("%s Reset task %5u for i/o through MDS " 1085 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, 1086 hdr->task.tk_pid, 1087 hdr->inode->i_sb->s_id, 1088 (unsigned long long)NFS_FILEID(hdr->inode), 1089 hdr->args.count, 1090 (unsigned long long)hdr->args.offset); 1091 1092 task->tk_status = pnfs_read_done_resend_to_mds(hdr); 1093 } 1094 } 1095 1096 static int ff_layout_async_handle_error_v4(struct rpc_task *task, 1097 struct nfs4_state *state, 1098 struct nfs_client *clp, 1099 struct pnfs_layout_segment *lseg, 1100 int idx) 1101 { 1102 struct pnfs_layout_hdr *lo = lseg->pls_layout; 1103 struct inode *inode = lo->plh_inode; 1104 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); 1105 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; 1106 1107 switch (task->tk_status) { 1108 case -NFS4ERR_BADSESSION: 1109 case -NFS4ERR_BADSLOT: 1110 case -NFS4ERR_BAD_HIGH_SLOT: 1111 case -NFS4ERR_DEADSESSION: 1112 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1113 case -NFS4ERR_SEQ_FALSE_RETRY: 1114 case -NFS4ERR_SEQ_MISORDERED: 1115 dprintk("%s ERROR %d, Reset session. Exchangeid " 1116 "flags 0x%x\n", __func__, task->tk_status, 1117 clp->cl_exchange_flags); 1118 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 1119 break; 1120 case -NFS4ERR_DELAY: 1121 case -NFS4ERR_GRACE: 1122 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX); 1123 break; 1124 case -NFS4ERR_RETRY_UNCACHED_REP: 1125 break; 1126 case -EAGAIN: 1127 return -NFS4ERR_RESET_TO_PNFS; 1128 /* Invalidate Layout errors */ 1129 case -NFS4ERR_PNFS_NO_LAYOUT: 1130 case -ESTALE: /* mapped NFS4ERR_STALE */ 1131 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */ 1132 case -EISDIR: /* mapped NFS4ERR_ISDIR */ 1133 case -NFS4ERR_FHEXPIRED: 1134 case -NFS4ERR_WRONG_TYPE: 1135 dprintk("%s Invalid layout error %d\n", __func__, 1136 task->tk_status); 1137 /* 1138 * Destroy layout so new i/o will get a new layout. 1139 * Layout will not be destroyed until all current lseg 1140 * references are put. Mark layout as invalid to resend failed 1141 * i/o and all i/o waiting on the slot table to the MDS until 1142 * layout is destroyed and a new valid layout is obtained. 1143 */ 1144 pnfs_destroy_layout(NFS_I(inode)); 1145 rpc_wake_up(&tbl->slot_tbl_waitq); 1146 goto reset; 1147 /* RPC connection errors */ 1148 case -ECONNREFUSED: 1149 case -EHOSTDOWN: 1150 case -EHOSTUNREACH: 1151 case -ENETUNREACH: 1152 case -EIO: 1153 case -ETIMEDOUT: 1154 case -EPIPE: 1155 dprintk("%s DS connection error %d\n", __func__, 1156 task->tk_status); 1157 nfs4_delete_deviceid(devid->ld, devid->nfs_client, 1158 &devid->deviceid); 1159 rpc_wake_up(&tbl->slot_tbl_waitq); 1160 /* fall through */ 1161 default: 1162 if (ff_layout_avoid_mds_available_ds(lseg)) 1163 return -NFS4ERR_RESET_TO_PNFS; 1164 reset: 1165 dprintk("%s Retry through MDS. Error %d\n", __func__, 1166 task->tk_status); 1167 return -NFS4ERR_RESET_TO_MDS; 1168 } 1169 task->tk_status = 0; 1170 return -EAGAIN; 1171 } 1172 1173 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */ 1174 static int ff_layout_async_handle_error_v3(struct rpc_task *task, 1175 struct pnfs_layout_segment *lseg, 1176 int idx) 1177 { 1178 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); 1179 1180 switch (task->tk_status) { 1181 /* File access problems. Don't mark the device as unavailable */ 1182 case -EACCES: 1183 case -ESTALE: 1184 case -EISDIR: 1185 case -EBADHANDLE: 1186 case -ELOOP: 1187 case -ENOSPC: 1188 case -EAGAIN: 1189 break; 1190 case -EJUKEBOX: 1191 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); 1192 goto out_retry; 1193 default: 1194 dprintk("%s DS connection error %d\n", __func__, 1195 task->tk_status); 1196 nfs4_delete_deviceid(devid->ld, devid->nfs_client, 1197 &devid->deviceid); 1198 } 1199 /* FIXME: Need to prevent infinite looping here. */ 1200 return -NFS4ERR_RESET_TO_PNFS; 1201 out_retry: 1202 task->tk_status = 0; 1203 rpc_restart_call_prepare(task); 1204 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); 1205 return -EAGAIN; 1206 } 1207 1208 static int ff_layout_async_handle_error(struct rpc_task *task, 1209 struct nfs4_state *state, 1210 struct nfs_client *clp, 1211 struct pnfs_layout_segment *lseg, 1212 int idx) 1213 { 1214 int vers = clp->cl_nfs_mod->rpc_vers->number; 1215 1216 if (task->tk_status >= 0) { 1217 ff_layout_mark_ds_reachable(lseg, idx); 1218 return 0; 1219 } 1220 1221 /* Handle the case of an invalid layout segment */ 1222 if (!pnfs_is_valid_lseg(lseg)) 1223 return -NFS4ERR_RESET_TO_PNFS; 1224 1225 switch (vers) { 1226 case 3: 1227 return ff_layout_async_handle_error_v3(task, lseg, idx); 1228 case 4: 1229 return ff_layout_async_handle_error_v4(task, state, clp, 1230 lseg, idx); 1231 default: 1232 /* should never happen */ 1233 WARN_ON_ONCE(1); 1234 return 0; 1235 } 1236 } 1237 1238 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, 1239 int idx, u64 offset, u64 length, 1240 u32 status, int opnum, int error) 1241 { 1242 struct nfs4_ff_layout_mirror *mirror; 1243 int err; 1244 1245 if (status == 0) { 1246 switch (error) { 1247 case -ETIMEDOUT: 1248 case -EPFNOSUPPORT: 1249 case -EPROTONOSUPPORT: 1250 case -EOPNOTSUPP: 1251 case -ECONNREFUSED: 1252 case -ECONNRESET: 1253 case -EHOSTDOWN: 1254 case -EHOSTUNREACH: 1255 case -ENETUNREACH: 1256 case -EADDRINUSE: 1257 case -ENOBUFS: 1258 case -EPIPE: 1259 case -EPERM: 1260 status = NFS4ERR_NXIO; 1261 break; 1262 case -EACCES: 1263 status = NFS4ERR_ACCESS; 1264 break; 1265 default: 1266 return; 1267 } 1268 } 1269 1270 switch (status) { 1271 case NFS4ERR_DELAY: 1272 case NFS4ERR_GRACE: 1273 return; 1274 default: 1275 break; 1276 } 1277 1278 mirror = FF_LAYOUT_COMP(lseg, idx); 1279 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), 1280 mirror, offset, length, status, opnum, 1281 GFP_NOIO); 1282 if (status == NFS4ERR_NXIO) 1283 ff_layout_mark_ds_unreachable(lseg, idx); 1284 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg); 1285 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status); 1286 } 1287 1288 /* NFS_PROTO call done callback routines */ 1289 static int ff_layout_read_done_cb(struct rpc_task *task, 1290 struct nfs_pgio_header *hdr) 1291 { 1292 int new_idx = hdr->pgio_mirror_idx; 1293 int err; 1294 1295 trace_nfs4_pnfs_read(hdr, task->tk_status); 1296 if (task->tk_status < 0) 1297 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx, 1298 hdr->args.offset, hdr->args.count, 1299 hdr->res.op_status, OP_READ, 1300 task->tk_status); 1301 err = ff_layout_async_handle_error(task, hdr->args.context->state, 1302 hdr->ds_clp, hdr->lseg, 1303 hdr->pgio_mirror_idx); 1304 1305 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); 1306 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); 1307 switch (err) { 1308 case -NFS4ERR_RESET_TO_PNFS: 1309 if (ff_layout_choose_best_ds_for_read(hdr->lseg, 1310 hdr->pgio_mirror_idx + 1, 1311 &new_idx)) 1312 goto out_layouterror; 1313 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); 1314 return task->tk_status; 1315 case -NFS4ERR_RESET_TO_MDS: 1316 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); 1317 return task->tk_status; 1318 case -EAGAIN: 1319 goto out_eagain; 1320 } 1321 1322 return 0; 1323 out_layouterror: 1324 ff_layout_read_record_layoutstats_done(task, hdr); 1325 ff_layout_send_layouterror(hdr->lseg); 1326 hdr->pgio_mirror_idx = new_idx; 1327 out_eagain: 1328 rpc_restart_call_prepare(task); 1329 return -EAGAIN; 1330 } 1331 1332 static bool 1333 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg) 1334 { 1335 return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT); 1336 } 1337 1338 /* 1339 * We reference the rpc_cred of the first WRITE that triggers the need for 1340 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound. 1341 * rfc5661 is not clear about which credential should be used. 1342 * 1343 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so 1344 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751 1345 * we always send layoutcommit after DS writes. 1346 */ 1347 static void 1348 ff_layout_set_layoutcommit(struct inode *inode, 1349 struct pnfs_layout_segment *lseg, 1350 loff_t end_offset) 1351 { 1352 if (!ff_layout_need_layoutcommit(lseg)) 1353 return; 1354 1355 pnfs_set_layoutcommit(inode, lseg, end_offset); 1356 dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino, 1357 (unsigned long long) NFS_I(inode)->layout->plh_lwb); 1358 } 1359 1360 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task, 1361 struct nfs_pgio_header *hdr) 1362 { 1363 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags)) 1364 return; 1365 nfs4_ff_layout_stat_io_start_read(hdr->inode, 1366 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1367 hdr->args.count, 1368 task->tk_start); 1369 } 1370 1371 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task, 1372 struct nfs_pgio_header *hdr) 1373 { 1374 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags)) 1375 return; 1376 nfs4_ff_layout_stat_io_end_read(task, 1377 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1378 hdr->args.count, 1379 hdr->res.count); 1380 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags); 1381 } 1382 1383 static int ff_layout_read_prepare_common(struct rpc_task *task, 1384 struct nfs_pgio_header *hdr) 1385 { 1386 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { 1387 rpc_exit(task, -EIO); 1388 return -EIO; 1389 } 1390 1391 ff_layout_read_record_layoutstats_start(task, hdr); 1392 return 0; 1393 } 1394 1395 /* 1396 * Call ops for the async read/write cases 1397 * In the case of dense layouts, the offset needs to be reset to its 1398 * original value. 1399 */ 1400 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data) 1401 { 1402 struct nfs_pgio_header *hdr = data; 1403 1404 if (ff_layout_read_prepare_common(task, hdr)) 1405 return; 1406 1407 rpc_call_start(task); 1408 } 1409 1410 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data) 1411 { 1412 struct nfs_pgio_header *hdr = data; 1413 1414 if (nfs4_setup_sequence(hdr->ds_clp, 1415 &hdr->args.seq_args, 1416 &hdr->res.seq_res, 1417 task)) 1418 return; 1419 1420 ff_layout_read_prepare_common(task, hdr); 1421 } 1422 1423 static void 1424 ff_layout_io_prepare_transmit(struct rpc_task *task, 1425 void *data) 1426 { 1427 struct nfs_pgio_header *hdr = data; 1428 1429 if (!pnfs_is_valid_lseg(hdr->lseg)) 1430 rpc_exit(task, -EAGAIN); 1431 } 1432 1433 static void ff_layout_read_call_done(struct rpc_task *task, void *data) 1434 { 1435 struct nfs_pgio_header *hdr = data; 1436 1437 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status); 1438 1439 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) && 1440 task->tk_status == 0) { 1441 nfs4_sequence_done(task, &hdr->res.seq_res); 1442 return; 1443 } 1444 1445 /* Note this may cause RPC to be resent */ 1446 hdr->mds_ops->rpc_call_done(task, hdr); 1447 } 1448 1449 static void ff_layout_read_count_stats(struct rpc_task *task, void *data) 1450 { 1451 struct nfs_pgio_header *hdr = data; 1452 1453 ff_layout_read_record_layoutstats_done(task, hdr); 1454 rpc_count_iostats_metrics(task, 1455 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]); 1456 } 1457 1458 static void ff_layout_read_release(void *data) 1459 { 1460 struct nfs_pgio_header *hdr = data; 1461 1462 ff_layout_read_record_layoutstats_done(&hdr->task, hdr); 1463 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) { 1464 ff_layout_send_layouterror(hdr->lseg); 1465 pnfs_read_resend_pnfs(hdr); 1466 } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags)) 1467 ff_layout_reset_read(hdr); 1468 pnfs_generic_rw_release(data); 1469 } 1470 1471 1472 static int ff_layout_write_done_cb(struct rpc_task *task, 1473 struct nfs_pgio_header *hdr) 1474 { 1475 loff_t end_offs = 0; 1476 int err; 1477 1478 trace_nfs4_pnfs_write(hdr, task->tk_status); 1479 if (task->tk_status < 0) 1480 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx, 1481 hdr->args.offset, hdr->args.count, 1482 hdr->res.op_status, OP_WRITE, 1483 task->tk_status); 1484 err = ff_layout_async_handle_error(task, hdr->args.context->state, 1485 hdr->ds_clp, hdr->lseg, 1486 hdr->pgio_mirror_idx); 1487 1488 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); 1489 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); 1490 switch (err) { 1491 case -NFS4ERR_RESET_TO_PNFS: 1492 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); 1493 return task->tk_status; 1494 case -NFS4ERR_RESET_TO_MDS: 1495 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); 1496 return task->tk_status; 1497 case -EAGAIN: 1498 return -EAGAIN; 1499 } 1500 1501 if (hdr->res.verf->committed == NFS_FILE_SYNC || 1502 hdr->res.verf->committed == NFS_DATA_SYNC) 1503 end_offs = hdr->mds_offset + (loff_t)hdr->res.count; 1504 1505 /* Note: if the write is unstable, don't set end_offs until commit */ 1506 ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs); 1507 1508 /* zero out fattr since we don't care DS attr at all */ 1509 hdr->fattr.valid = 0; 1510 if (task->tk_status >= 0) 1511 nfs_writeback_update_inode(hdr); 1512 1513 return 0; 1514 } 1515 1516 static int ff_layout_commit_done_cb(struct rpc_task *task, 1517 struct nfs_commit_data *data) 1518 { 1519 int err; 1520 1521 trace_nfs4_pnfs_commit_ds(data, task->tk_status); 1522 if (task->tk_status < 0) 1523 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index, 1524 data->args.offset, data->args.count, 1525 data->res.op_status, OP_COMMIT, 1526 task->tk_status); 1527 err = ff_layout_async_handle_error(task, NULL, data->ds_clp, 1528 data->lseg, data->ds_commit_index); 1529 1530 switch (err) { 1531 case -NFS4ERR_RESET_TO_PNFS: 1532 pnfs_generic_prepare_to_resend_writes(data); 1533 return -EAGAIN; 1534 case -NFS4ERR_RESET_TO_MDS: 1535 pnfs_generic_prepare_to_resend_writes(data); 1536 return -EAGAIN; 1537 case -EAGAIN: 1538 rpc_restart_call_prepare(task); 1539 return -EAGAIN; 1540 } 1541 1542 ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb); 1543 1544 return 0; 1545 } 1546 1547 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task, 1548 struct nfs_pgio_header *hdr) 1549 { 1550 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags)) 1551 return; 1552 nfs4_ff_layout_stat_io_start_write(hdr->inode, 1553 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1554 hdr->args.count, 1555 task->tk_start); 1556 } 1557 1558 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task, 1559 struct nfs_pgio_header *hdr) 1560 { 1561 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags)) 1562 return; 1563 nfs4_ff_layout_stat_io_end_write(task, 1564 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1565 hdr->args.count, hdr->res.count, 1566 hdr->res.verf->committed); 1567 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags); 1568 } 1569 1570 static int ff_layout_write_prepare_common(struct rpc_task *task, 1571 struct nfs_pgio_header *hdr) 1572 { 1573 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { 1574 rpc_exit(task, -EIO); 1575 return -EIO; 1576 } 1577 1578 ff_layout_write_record_layoutstats_start(task, hdr); 1579 return 0; 1580 } 1581 1582 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data) 1583 { 1584 struct nfs_pgio_header *hdr = data; 1585 1586 if (ff_layout_write_prepare_common(task, hdr)) 1587 return; 1588 1589 rpc_call_start(task); 1590 } 1591 1592 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data) 1593 { 1594 struct nfs_pgio_header *hdr = data; 1595 1596 if (nfs4_setup_sequence(hdr->ds_clp, 1597 &hdr->args.seq_args, 1598 &hdr->res.seq_res, 1599 task)) 1600 return; 1601 1602 ff_layout_write_prepare_common(task, hdr); 1603 } 1604 1605 static void ff_layout_write_call_done(struct rpc_task *task, void *data) 1606 { 1607 struct nfs_pgio_header *hdr = data; 1608 1609 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) && 1610 task->tk_status == 0) { 1611 nfs4_sequence_done(task, &hdr->res.seq_res); 1612 return; 1613 } 1614 1615 /* Note this may cause RPC to be resent */ 1616 hdr->mds_ops->rpc_call_done(task, hdr); 1617 } 1618 1619 static void ff_layout_write_count_stats(struct rpc_task *task, void *data) 1620 { 1621 struct nfs_pgio_header *hdr = data; 1622 1623 ff_layout_write_record_layoutstats_done(task, hdr); 1624 rpc_count_iostats_metrics(task, 1625 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]); 1626 } 1627 1628 static void ff_layout_write_release(void *data) 1629 { 1630 struct nfs_pgio_header *hdr = data; 1631 1632 ff_layout_write_record_layoutstats_done(&hdr->task, hdr); 1633 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) { 1634 ff_layout_send_layouterror(hdr->lseg); 1635 ff_layout_reset_write(hdr, true); 1636 } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags)) 1637 ff_layout_reset_write(hdr, false); 1638 pnfs_generic_rw_release(data); 1639 } 1640 1641 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task, 1642 struct nfs_commit_data *cdata) 1643 { 1644 if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags)) 1645 return; 1646 nfs4_ff_layout_stat_io_start_write(cdata->inode, 1647 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index), 1648 0, task->tk_start); 1649 } 1650 1651 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task, 1652 struct nfs_commit_data *cdata) 1653 { 1654 struct nfs_page *req; 1655 __u64 count = 0; 1656 1657 if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags)) 1658 return; 1659 1660 if (task->tk_status == 0) { 1661 list_for_each_entry(req, &cdata->pages, wb_list) 1662 count += req->wb_bytes; 1663 } 1664 nfs4_ff_layout_stat_io_end_write(task, 1665 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index), 1666 count, count, NFS_FILE_SYNC); 1667 set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags); 1668 } 1669 1670 static void ff_layout_commit_prepare_common(struct rpc_task *task, 1671 struct nfs_commit_data *cdata) 1672 { 1673 ff_layout_commit_record_layoutstats_start(task, cdata); 1674 } 1675 1676 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data) 1677 { 1678 ff_layout_commit_prepare_common(task, data); 1679 rpc_call_start(task); 1680 } 1681 1682 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data) 1683 { 1684 struct nfs_commit_data *wdata = data; 1685 1686 if (nfs4_setup_sequence(wdata->ds_clp, 1687 &wdata->args.seq_args, 1688 &wdata->res.seq_res, 1689 task)) 1690 return; 1691 ff_layout_commit_prepare_common(task, data); 1692 } 1693 1694 static void ff_layout_commit_done(struct rpc_task *task, void *data) 1695 { 1696 pnfs_generic_write_commit_done(task, data); 1697 } 1698 1699 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data) 1700 { 1701 struct nfs_commit_data *cdata = data; 1702 1703 ff_layout_commit_record_layoutstats_done(task, cdata); 1704 rpc_count_iostats_metrics(task, 1705 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]); 1706 } 1707 1708 static void ff_layout_commit_release(void *data) 1709 { 1710 struct nfs_commit_data *cdata = data; 1711 1712 ff_layout_commit_record_layoutstats_done(&cdata->task, cdata); 1713 pnfs_generic_commit_release(data); 1714 } 1715 1716 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = { 1717 .rpc_call_prepare = ff_layout_read_prepare_v3, 1718 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit, 1719 .rpc_call_done = ff_layout_read_call_done, 1720 .rpc_count_stats = ff_layout_read_count_stats, 1721 .rpc_release = ff_layout_read_release, 1722 }; 1723 1724 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = { 1725 .rpc_call_prepare = ff_layout_read_prepare_v4, 1726 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit, 1727 .rpc_call_done = ff_layout_read_call_done, 1728 .rpc_count_stats = ff_layout_read_count_stats, 1729 .rpc_release = ff_layout_read_release, 1730 }; 1731 1732 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = { 1733 .rpc_call_prepare = ff_layout_write_prepare_v3, 1734 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit, 1735 .rpc_call_done = ff_layout_write_call_done, 1736 .rpc_count_stats = ff_layout_write_count_stats, 1737 .rpc_release = ff_layout_write_release, 1738 }; 1739 1740 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = { 1741 .rpc_call_prepare = ff_layout_write_prepare_v4, 1742 .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit, 1743 .rpc_call_done = ff_layout_write_call_done, 1744 .rpc_count_stats = ff_layout_write_count_stats, 1745 .rpc_release = ff_layout_write_release, 1746 }; 1747 1748 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = { 1749 .rpc_call_prepare = ff_layout_commit_prepare_v3, 1750 .rpc_call_done = ff_layout_commit_done, 1751 .rpc_count_stats = ff_layout_commit_count_stats, 1752 .rpc_release = ff_layout_commit_release, 1753 }; 1754 1755 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = { 1756 .rpc_call_prepare = ff_layout_commit_prepare_v4, 1757 .rpc_call_done = ff_layout_commit_done, 1758 .rpc_count_stats = ff_layout_commit_count_stats, 1759 .rpc_release = ff_layout_commit_release, 1760 }; 1761 1762 static enum pnfs_try_status 1763 ff_layout_read_pagelist(struct nfs_pgio_header *hdr) 1764 { 1765 struct pnfs_layout_segment *lseg = hdr->lseg; 1766 struct nfs4_pnfs_ds *ds; 1767 struct rpc_clnt *ds_clnt; 1768 struct nfs4_ff_layout_mirror *mirror; 1769 const struct cred *ds_cred; 1770 loff_t offset = hdr->args.offset; 1771 u32 idx = hdr->pgio_mirror_idx; 1772 int vers; 1773 struct nfs_fh *fh; 1774 1775 dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n", 1776 __func__, hdr->inode->i_ino, 1777 hdr->args.pgbase, (size_t)hdr->args.count, offset); 1778 1779 mirror = FF_LAYOUT_COMP(lseg, idx); 1780 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false); 1781 if (!ds) 1782 goto out_failed; 1783 1784 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp, 1785 hdr->inode); 1786 if (IS_ERR(ds_clnt)) 1787 goto out_failed; 1788 1789 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred); 1790 if (!ds_cred) 1791 goto out_failed; 1792 1793 vers = nfs4_ff_layout_ds_version(mirror); 1794 1795 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__, 1796 ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers); 1797 1798 hdr->pgio_done_cb = ff_layout_read_done_cb; 1799 refcount_inc(&ds->ds_clp->cl_count); 1800 hdr->ds_clp = ds->ds_clp; 1801 fh = nfs4_ff_layout_select_ds_fh(mirror); 1802 if (fh) 1803 hdr->args.fh = fh; 1804 1805 nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid); 1806 1807 /* 1808 * Note that if we ever decide to split across DSes, 1809 * then we may need to handle dense-like offsets. 1810 */ 1811 hdr->args.offset = offset; 1812 hdr->mds_offset = offset; 1813 1814 /* Perform an asynchronous read to ds */ 1815 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops, 1816 vers == 3 ? &ff_layout_read_call_ops_v3 : 1817 &ff_layout_read_call_ops_v4, 1818 0, RPC_TASK_SOFTCONN); 1819 put_cred(ds_cred); 1820 return PNFS_ATTEMPTED; 1821 1822 out_failed: 1823 if (ff_layout_avoid_mds_available_ds(lseg)) 1824 return PNFS_TRY_AGAIN; 1825 return PNFS_NOT_ATTEMPTED; 1826 } 1827 1828 /* Perform async writes. */ 1829 static enum pnfs_try_status 1830 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync) 1831 { 1832 struct pnfs_layout_segment *lseg = hdr->lseg; 1833 struct nfs4_pnfs_ds *ds; 1834 struct rpc_clnt *ds_clnt; 1835 struct nfs4_ff_layout_mirror *mirror; 1836 const struct cred *ds_cred; 1837 loff_t offset = hdr->args.offset; 1838 int vers; 1839 struct nfs_fh *fh; 1840 int idx = hdr->pgio_mirror_idx; 1841 1842 mirror = FF_LAYOUT_COMP(lseg, idx); 1843 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true); 1844 if (!ds) 1845 goto out_failed; 1846 1847 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp, 1848 hdr->inode); 1849 if (IS_ERR(ds_clnt)) 1850 goto out_failed; 1851 1852 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred); 1853 if (!ds_cred) 1854 goto out_failed; 1855 1856 vers = nfs4_ff_layout_ds_version(mirror); 1857 1858 dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n", 1859 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count, 1860 offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), 1861 vers); 1862 1863 hdr->pgio_done_cb = ff_layout_write_done_cb; 1864 refcount_inc(&ds->ds_clp->cl_count); 1865 hdr->ds_clp = ds->ds_clp; 1866 hdr->ds_commit_idx = idx; 1867 fh = nfs4_ff_layout_select_ds_fh(mirror); 1868 if (fh) 1869 hdr->args.fh = fh; 1870 1871 nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid); 1872 1873 /* 1874 * Note that if we ever decide to split across DSes, 1875 * then we may need to handle dense-like offsets. 1876 */ 1877 hdr->args.offset = offset; 1878 1879 /* Perform an asynchronous write */ 1880 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops, 1881 vers == 3 ? &ff_layout_write_call_ops_v3 : 1882 &ff_layout_write_call_ops_v4, 1883 sync, RPC_TASK_SOFTCONN); 1884 put_cred(ds_cred); 1885 return PNFS_ATTEMPTED; 1886 1887 out_failed: 1888 if (ff_layout_avoid_mds_available_ds(lseg)) 1889 return PNFS_TRY_AGAIN; 1890 return PNFS_NOT_ATTEMPTED; 1891 } 1892 1893 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) 1894 { 1895 return i; 1896 } 1897 1898 static struct nfs_fh * 1899 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i) 1900 { 1901 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg); 1902 1903 /* FIXME: Assume that there is only one NFS version available 1904 * for the DS. 1905 */ 1906 return &flseg->mirror_array[i]->fh_versions[0]; 1907 } 1908 1909 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how) 1910 { 1911 struct pnfs_layout_segment *lseg = data->lseg; 1912 struct nfs4_pnfs_ds *ds; 1913 struct rpc_clnt *ds_clnt; 1914 struct nfs4_ff_layout_mirror *mirror; 1915 const struct cred *ds_cred; 1916 u32 idx; 1917 int vers, ret; 1918 struct nfs_fh *fh; 1919 1920 if (!lseg || !(pnfs_is_valid_lseg(lseg) || 1921 test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))) 1922 goto out_err; 1923 1924 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index); 1925 mirror = FF_LAYOUT_COMP(lseg, idx); 1926 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true); 1927 if (!ds) 1928 goto out_err; 1929 1930 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp, 1931 data->inode); 1932 if (IS_ERR(ds_clnt)) 1933 goto out_err; 1934 1935 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred); 1936 if (!ds_cred) 1937 goto out_err; 1938 1939 vers = nfs4_ff_layout_ds_version(mirror); 1940 1941 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__, 1942 data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count), 1943 vers); 1944 data->commit_done_cb = ff_layout_commit_done_cb; 1945 data->cred = ds_cred; 1946 refcount_inc(&ds->ds_clp->cl_count); 1947 data->ds_clp = ds->ds_clp; 1948 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index); 1949 if (fh) 1950 data->args.fh = fh; 1951 1952 ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops, 1953 vers == 3 ? &ff_layout_commit_call_ops_v3 : 1954 &ff_layout_commit_call_ops_v4, 1955 how, RPC_TASK_SOFTCONN); 1956 put_cred(ds_cred); 1957 return ret; 1958 out_err: 1959 pnfs_generic_prepare_to_resend_writes(data); 1960 pnfs_generic_commit_release(data); 1961 return -EAGAIN; 1962 } 1963 1964 static int 1965 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages, 1966 int how, struct nfs_commit_info *cinfo) 1967 { 1968 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo, 1969 ff_layout_initiate_commit); 1970 } 1971 1972 static struct pnfs_ds_commit_info * 1973 ff_layout_get_ds_info(struct inode *inode) 1974 { 1975 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout; 1976 1977 if (layout == NULL) 1978 return NULL; 1979 1980 return &FF_LAYOUT_FROM_HDR(layout)->commit_info; 1981 } 1982 1983 static void 1984 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d) 1985 { 1986 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds, 1987 id_node)); 1988 } 1989 1990 static int ff_layout_encode_ioerr(struct xdr_stream *xdr, 1991 const struct nfs4_layoutreturn_args *args, 1992 const struct nfs4_flexfile_layoutreturn_args *ff_args) 1993 { 1994 __be32 *start; 1995 1996 start = xdr_reserve_space(xdr, 4); 1997 if (unlikely(!start)) 1998 return -E2BIG; 1999 2000 *start = cpu_to_be32(ff_args->num_errors); 2001 /* This assume we always return _ALL_ layouts */ 2002 return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors); 2003 } 2004 2005 static void 2006 encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len) 2007 { 2008 WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0); 2009 } 2010 2011 static void 2012 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr, 2013 const nfs4_stateid *stateid, 2014 const struct nfs42_layoutstat_devinfo *devinfo) 2015 { 2016 __be32 *p; 2017 2018 p = xdr_reserve_space(xdr, 8 + 8); 2019 p = xdr_encode_hyper(p, devinfo->offset); 2020 p = xdr_encode_hyper(p, devinfo->length); 2021 encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE); 2022 p = xdr_reserve_space(xdr, 4*8); 2023 p = xdr_encode_hyper(p, devinfo->read_count); 2024 p = xdr_encode_hyper(p, devinfo->read_bytes); 2025 p = xdr_encode_hyper(p, devinfo->write_count); 2026 p = xdr_encode_hyper(p, devinfo->write_bytes); 2027 encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE); 2028 } 2029 2030 static void 2031 ff_layout_encode_ff_iostat(struct xdr_stream *xdr, 2032 const nfs4_stateid *stateid, 2033 const struct nfs42_layoutstat_devinfo *devinfo) 2034 { 2035 ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo); 2036 ff_layout_encode_ff_layoutupdate(xdr, devinfo, 2037 devinfo->ld_private.data); 2038 } 2039 2040 /* report nothing for now */ 2041 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr, 2042 const struct nfs4_layoutreturn_args *args, 2043 struct nfs4_flexfile_layoutreturn_args *ff_args) 2044 { 2045 __be32 *p; 2046 int i; 2047 2048 p = xdr_reserve_space(xdr, 4); 2049 *p = cpu_to_be32(ff_args->num_dev); 2050 for (i = 0; i < ff_args->num_dev; i++) 2051 ff_layout_encode_ff_iostat(xdr, 2052 &args->layout->plh_stateid, 2053 &ff_args->devinfo[i]); 2054 } 2055 2056 static void 2057 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo, 2058 unsigned int num_entries) 2059 { 2060 unsigned int i; 2061 2062 for (i = 0; i < num_entries; i++) { 2063 if (!devinfo[i].ld_private.ops) 2064 continue; 2065 if (!devinfo[i].ld_private.ops->free) 2066 continue; 2067 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private); 2068 } 2069 } 2070 2071 static struct nfs4_deviceid_node * 2072 ff_layout_alloc_deviceid_node(struct nfs_server *server, 2073 struct pnfs_device *pdev, gfp_t gfp_flags) 2074 { 2075 struct nfs4_ff_layout_ds *dsaddr; 2076 2077 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags); 2078 if (!dsaddr) 2079 return NULL; 2080 return &dsaddr->id_node; 2081 } 2082 2083 static void 2084 ff_layout_encode_layoutreturn(struct xdr_stream *xdr, 2085 const void *voidargs, 2086 const struct nfs4_xdr_opaque_data *ff_opaque) 2087 { 2088 const struct nfs4_layoutreturn_args *args = voidargs; 2089 struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data; 2090 struct xdr_buf tmp_buf = { 2091 .head = { 2092 [0] = { 2093 .iov_base = page_address(ff_args->pages[0]), 2094 }, 2095 }, 2096 .buflen = PAGE_SIZE, 2097 }; 2098 struct xdr_stream tmp_xdr; 2099 __be32 *start; 2100 2101 dprintk("%s: Begin\n", __func__); 2102 2103 xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL); 2104 2105 ff_layout_encode_ioerr(&tmp_xdr, args, ff_args); 2106 ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args); 2107 2108 start = xdr_reserve_space(xdr, 4); 2109 *start = cpu_to_be32(tmp_buf.len); 2110 xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len); 2111 2112 dprintk("%s: Return\n", __func__); 2113 } 2114 2115 static void 2116 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args) 2117 { 2118 struct nfs4_flexfile_layoutreturn_args *ff_args; 2119 2120 if (!args->data) 2121 return; 2122 ff_args = args->data; 2123 args->data = NULL; 2124 2125 ff_layout_free_ds_ioerr(&ff_args->errors); 2126 ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev); 2127 2128 put_page(ff_args->pages[0]); 2129 kfree(ff_args); 2130 } 2131 2132 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = { 2133 .encode = ff_layout_encode_layoutreturn, 2134 .free = ff_layout_free_layoutreturn, 2135 }; 2136 2137 static int 2138 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args) 2139 { 2140 struct nfs4_flexfile_layoutreturn_args *ff_args; 2141 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout); 2142 2143 ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL); 2144 if (!ff_args) 2145 goto out_nomem; 2146 ff_args->pages[0] = alloc_page(GFP_KERNEL); 2147 if (!ff_args->pages[0]) 2148 goto out_nomem_free; 2149 2150 INIT_LIST_HEAD(&ff_args->errors); 2151 ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout, 2152 &args->range, &ff_args->errors, 2153 FF_LAYOUTRETURN_MAXERR); 2154 2155 spin_lock(&args->inode->i_lock); 2156 ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr, 2157 &ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo)); 2158 spin_unlock(&args->inode->i_lock); 2159 2160 args->ld_private->ops = &layoutreturn_ops; 2161 args->ld_private->data = ff_args; 2162 return 0; 2163 out_nomem_free: 2164 kfree(ff_args); 2165 out_nomem: 2166 return -ENOMEM; 2167 } 2168 2169 #ifdef CONFIG_NFS_V4_2 2170 void 2171 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg) 2172 { 2173 struct pnfs_layout_hdr *lo = lseg->pls_layout; 2174 struct nfs42_layout_error *errors; 2175 LIST_HEAD(head); 2176 2177 if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR)) 2178 return; 2179 ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1); 2180 if (list_empty(&head)) 2181 return; 2182 2183 errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, 2184 sizeof(*errors), GFP_NOFS); 2185 if (errors != NULL) { 2186 const struct nfs4_ff_layout_ds_err *pos; 2187 size_t n = 0; 2188 2189 list_for_each_entry(pos, &head, list) { 2190 errors[n].offset = pos->offset; 2191 errors[n].length = pos->length; 2192 nfs4_stateid_copy(&errors[n].stateid, &pos->stateid); 2193 errors[n].errors[0].dev_id = pos->deviceid; 2194 errors[n].errors[0].status = pos->status; 2195 errors[n].errors[0].opnum = pos->opnum; 2196 n++; 2197 if (!list_is_last(&pos->list, &head) && 2198 n < NFS42_LAYOUTERROR_MAX) 2199 continue; 2200 if (nfs42_proc_layouterror(lseg, errors, n) < 0) 2201 break; 2202 n = 0; 2203 } 2204 kfree(errors); 2205 } 2206 ff_layout_free_ds_ioerr(&head); 2207 } 2208 #else 2209 void 2210 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg) 2211 { 2212 } 2213 #endif 2214 2215 static int 2216 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen) 2217 { 2218 const struct sockaddr_in *sin = (struct sockaddr_in *)sap; 2219 2220 return snprintf(buf, buflen, "%pI4", &sin->sin_addr); 2221 } 2222 2223 static size_t 2224 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf, 2225 const int buflen) 2226 { 2227 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; 2228 const struct in6_addr *addr = &sin6->sin6_addr; 2229 2230 /* 2231 * RFC 4291, Section 2.2.2 2232 * 2233 * Shorthanded ANY address 2234 */ 2235 if (ipv6_addr_any(addr)) 2236 return snprintf(buf, buflen, "::"); 2237 2238 /* 2239 * RFC 4291, Section 2.2.2 2240 * 2241 * Shorthanded loopback address 2242 */ 2243 if (ipv6_addr_loopback(addr)) 2244 return snprintf(buf, buflen, "::1"); 2245 2246 /* 2247 * RFC 4291, Section 2.2.3 2248 * 2249 * Special presentation address format for mapped v4 2250 * addresses. 2251 */ 2252 if (ipv6_addr_v4mapped(addr)) 2253 return snprintf(buf, buflen, "::ffff:%pI4", 2254 &addr->s6_addr32[3]); 2255 2256 /* 2257 * RFC 4291, Section 2.2.1 2258 */ 2259 return snprintf(buf, buflen, "%pI6c", addr); 2260 } 2261 2262 /* Derived from rpc_sockaddr2uaddr */ 2263 static void 2264 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da) 2265 { 2266 struct sockaddr *sap = (struct sockaddr *)&da->da_addr; 2267 char portbuf[RPCBIND_MAXUADDRPLEN]; 2268 char addrbuf[RPCBIND_MAXUADDRLEN]; 2269 char *netid; 2270 unsigned short port; 2271 int len, netid_len; 2272 __be32 *p; 2273 2274 switch (sap->sa_family) { 2275 case AF_INET: 2276 if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0) 2277 return; 2278 port = ntohs(((struct sockaddr_in *)sap)->sin_port); 2279 netid = "tcp"; 2280 netid_len = 3; 2281 break; 2282 case AF_INET6: 2283 if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0) 2284 return; 2285 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port); 2286 netid = "tcp6"; 2287 netid_len = 4; 2288 break; 2289 default: 2290 /* we only support tcp and tcp6 */ 2291 WARN_ON_ONCE(1); 2292 return; 2293 } 2294 2295 snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff); 2296 len = strlcat(addrbuf, portbuf, sizeof(addrbuf)); 2297 2298 p = xdr_reserve_space(xdr, 4 + netid_len); 2299 xdr_encode_opaque(p, netid, netid_len); 2300 2301 p = xdr_reserve_space(xdr, 4 + len); 2302 xdr_encode_opaque(p, addrbuf, len); 2303 } 2304 2305 static void 2306 ff_layout_encode_nfstime(struct xdr_stream *xdr, 2307 ktime_t t) 2308 { 2309 struct timespec64 ts; 2310 __be32 *p; 2311 2312 p = xdr_reserve_space(xdr, 12); 2313 ts = ktime_to_timespec64(t); 2314 p = xdr_encode_hyper(p, ts.tv_sec); 2315 *p++ = cpu_to_be32(ts.tv_nsec); 2316 } 2317 2318 static void 2319 ff_layout_encode_io_latency(struct xdr_stream *xdr, 2320 struct nfs4_ff_io_stat *stat) 2321 { 2322 __be32 *p; 2323 2324 p = xdr_reserve_space(xdr, 5 * 8); 2325 p = xdr_encode_hyper(p, stat->ops_requested); 2326 p = xdr_encode_hyper(p, stat->bytes_requested); 2327 p = xdr_encode_hyper(p, stat->ops_completed); 2328 p = xdr_encode_hyper(p, stat->bytes_completed); 2329 p = xdr_encode_hyper(p, stat->bytes_not_delivered); 2330 ff_layout_encode_nfstime(xdr, stat->total_busy_time); 2331 ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time); 2332 } 2333 2334 static void 2335 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr, 2336 const struct nfs42_layoutstat_devinfo *devinfo, 2337 struct nfs4_ff_layout_mirror *mirror) 2338 { 2339 struct nfs4_pnfs_ds_addr *da; 2340 struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds; 2341 struct nfs_fh *fh = &mirror->fh_versions[0]; 2342 __be32 *p; 2343 2344 da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node); 2345 dprintk("%s: DS %s: encoding address %s\n", 2346 __func__, ds->ds_remotestr, da->da_remotestr); 2347 /* netaddr4 */ 2348 ff_layout_encode_netaddr(xdr, da); 2349 /* nfs_fh4 */ 2350 p = xdr_reserve_space(xdr, 4 + fh->size); 2351 xdr_encode_opaque(p, fh->data, fh->size); 2352 /* ff_io_latency4 read */ 2353 spin_lock(&mirror->lock); 2354 ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat); 2355 /* ff_io_latency4 write */ 2356 ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat); 2357 spin_unlock(&mirror->lock); 2358 /* nfstime4 */ 2359 ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time)); 2360 /* bool */ 2361 p = xdr_reserve_space(xdr, 4); 2362 *p = cpu_to_be32(false); 2363 } 2364 2365 static void 2366 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args, 2367 const struct nfs4_xdr_opaque_data *opaque) 2368 { 2369 struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque, 2370 struct nfs42_layoutstat_devinfo, ld_private); 2371 __be32 *start; 2372 2373 /* layoutupdate length */ 2374 start = xdr_reserve_space(xdr, 4); 2375 ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data); 2376 2377 *start = cpu_to_be32((xdr->p - start - 1) * 4); 2378 } 2379 2380 static void 2381 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque) 2382 { 2383 struct nfs4_ff_layout_mirror *mirror = opaque->data; 2384 2385 ff_layout_put_mirror(mirror); 2386 } 2387 2388 static const struct nfs4_xdr_opaque_ops layoutstat_ops = { 2389 .encode = ff_layout_encode_layoutstats, 2390 .free = ff_layout_free_layoutstats, 2391 }; 2392 2393 static int 2394 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, 2395 struct nfs42_layoutstat_devinfo *devinfo, 2396 int dev_limit) 2397 { 2398 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo); 2399 struct nfs4_ff_layout_mirror *mirror; 2400 struct nfs4_deviceid_node *dev; 2401 int i = 0; 2402 2403 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) { 2404 if (i >= dev_limit) 2405 break; 2406 if (IS_ERR_OR_NULL(mirror->mirror_ds)) 2407 continue; 2408 if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags)) 2409 continue; 2410 /* mirror refcount put in cleanup_layoutstats */ 2411 if (!refcount_inc_not_zero(&mirror->ref)) 2412 continue; 2413 dev = &mirror->mirror_ds->id_node; 2414 memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE); 2415 devinfo->offset = 0; 2416 devinfo->length = NFS4_MAX_UINT64; 2417 spin_lock(&mirror->lock); 2418 devinfo->read_count = mirror->read_stat.io_stat.ops_completed; 2419 devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed; 2420 devinfo->write_count = mirror->write_stat.io_stat.ops_completed; 2421 devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed; 2422 spin_unlock(&mirror->lock); 2423 devinfo->layout_type = LAYOUT_FLEX_FILES; 2424 devinfo->ld_private.ops = &layoutstat_ops; 2425 devinfo->ld_private.data = mirror; 2426 2427 devinfo++; 2428 i++; 2429 } 2430 return i; 2431 } 2432 2433 static int 2434 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args) 2435 { 2436 struct nfs4_flexfile_layout *ff_layout; 2437 const int dev_count = PNFS_LAYOUTSTATS_MAXDEV; 2438 2439 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */ 2440 args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO); 2441 if (!args->devinfo) 2442 return -ENOMEM; 2443 2444 spin_lock(&args->inode->i_lock); 2445 ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout); 2446 args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr, 2447 &args->devinfo[0], dev_count); 2448 spin_unlock(&args->inode->i_lock); 2449 if (!args->num_dev) { 2450 kfree(args->devinfo); 2451 args->devinfo = NULL; 2452 return -ENOENT; 2453 } 2454 2455 return 0; 2456 } 2457 2458 static int 2459 ff_layout_set_layoutdriver(struct nfs_server *server, 2460 const struct nfs_fh *dummy) 2461 { 2462 #if IS_ENABLED(CONFIG_NFS_V4_2) 2463 server->caps |= NFS_CAP_LAYOUTSTATS; 2464 #endif 2465 return 0; 2466 } 2467 2468 static struct pnfs_layoutdriver_type flexfilelayout_type = { 2469 .id = LAYOUT_FLEX_FILES, 2470 .name = "LAYOUT_FLEX_FILES", 2471 .owner = THIS_MODULE, 2472 .flags = PNFS_LAYOUTGET_ON_OPEN, 2473 .max_layoutget_response = 4096, /* 1 page or so... */ 2474 .set_layoutdriver = ff_layout_set_layoutdriver, 2475 .alloc_layout_hdr = ff_layout_alloc_layout_hdr, 2476 .free_layout_hdr = ff_layout_free_layout_hdr, 2477 .alloc_lseg = ff_layout_alloc_lseg, 2478 .free_lseg = ff_layout_free_lseg, 2479 .add_lseg = ff_layout_add_lseg, 2480 .pg_read_ops = &ff_layout_pg_read_ops, 2481 .pg_write_ops = &ff_layout_pg_write_ops, 2482 .get_ds_info = ff_layout_get_ds_info, 2483 .free_deviceid_node = ff_layout_free_deviceid_node, 2484 .mark_request_commit = pnfs_layout_mark_request_commit, 2485 .clear_request_commit = pnfs_generic_clear_request_commit, 2486 .scan_commit_lists = pnfs_generic_scan_commit_lists, 2487 .recover_commit_reqs = pnfs_generic_recover_commit_reqs, 2488 .commit_pagelist = ff_layout_commit_pagelist, 2489 .read_pagelist = ff_layout_read_pagelist, 2490 .write_pagelist = ff_layout_write_pagelist, 2491 .alloc_deviceid_node = ff_layout_alloc_deviceid_node, 2492 .prepare_layoutreturn = ff_layout_prepare_layoutreturn, 2493 .sync = pnfs_nfs_generic_sync, 2494 .prepare_layoutstats = ff_layout_prepare_layoutstats, 2495 }; 2496 2497 static int __init nfs4flexfilelayout_init(void) 2498 { 2499 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n", 2500 __func__); 2501 return pnfs_register_layoutdriver(&flexfilelayout_type); 2502 } 2503 2504 static void __exit nfs4flexfilelayout_exit(void) 2505 { 2506 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n", 2507 __func__); 2508 pnfs_unregister_layoutdriver(&flexfilelayout_type); 2509 } 2510 2511 MODULE_ALIAS("nfs-layouttype4-4"); 2512 2513 MODULE_LICENSE("GPL"); 2514 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver"); 2515 2516 module_init(nfs4flexfilelayout_init); 2517 module_exit(nfs4flexfilelayout_exit); 2518