1 /* 2 * Module for pnfs flexfile layout driver. 3 * 4 * Copyright (c) 2014, Primary Data, Inc. All rights reserved. 5 * 6 * Tao Peng <bergwolf@primarydata.com> 7 */ 8 9 #include <linux/nfs_fs.h> 10 #include <linux/nfs_page.h> 11 #include <linux/module.h> 12 13 #include <linux/sunrpc/metrics.h> 14 15 #include "flexfilelayout.h" 16 #include "../nfs4session.h" 17 #include "../nfs4idmap.h" 18 #include "../internal.h" 19 #include "../delegation.h" 20 #include "../nfs4trace.h" 21 #include "../iostat.h" 22 #include "../nfs.h" 23 #include "../nfs42.h" 24 25 #define NFSDBG_FACILITY NFSDBG_PNFS_LD 26 27 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ) 28 #define FF_LAYOUTRETURN_MAXERR 20 29 30 31 static struct group_info *ff_zero_group; 32 33 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task, 34 struct nfs_pgio_header *hdr); 35 static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, 36 struct nfs42_layoutstat_devinfo *devinfo, 37 int dev_limit); 38 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr, 39 const struct nfs42_layoutstat_devinfo *devinfo, 40 struct nfs4_ff_layout_mirror *mirror); 41 42 static struct pnfs_layout_hdr * 43 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags) 44 { 45 struct nfs4_flexfile_layout *ffl; 46 47 ffl = kzalloc(sizeof(*ffl), gfp_flags); 48 if (ffl) { 49 INIT_LIST_HEAD(&ffl->error_list); 50 INIT_LIST_HEAD(&ffl->mirrors); 51 ffl->last_report_time = ktime_get(); 52 return &ffl->generic_hdr; 53 } else 54 return NULL; 55 } 56 57 static void 58 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo) 59 { 60 struct nfs4_ff_layout_ds_err *err, *n; 61 62 list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list, 63 list) { 64 list_del(&err->list); 65 kfree(err); 66 } 67 kfree(FF_LAYOUT_FROM_HDR(lo)); 68 } 69 70 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) 71 { 72 __be32 *p; 73 74 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE); 75 if (unlikely(p == NULL)) 76 return -ENOBUFS; 77 stateid->type = NFS4_PNFS_DS_STATEID_TYPE; 78 memcpy(stateid->data, p, NFS4_STATEID_SIZE); 79 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__, 80 p[0], p[1], p[2], p[3]); 81 return 0; 82 } 83 84 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid) 85 { 86 __be32 *p; 87 88 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE); 89 if (unlikely(!p)) 90 return -ENOBUFS; 91 memcpy(devid, p, NFS4_DEVICEID4_SIZE); 92 nfs4_print_deviceid(devid); 93 return 0; 94 } 95 96 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh) 97 { 98 __be32 *p; 99 100 p = xdr_inline_decode(xdr, 4); 101 if (unlikely(!p)) 102 return -ENOBUFS; 103 fh->size = be32_to_cpup(p++); 104 if (fh->size > sizeof(struct nfs_fh)) { 105 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n", 106 fh->size); 107 return -EOVERFLOW; 108 } 109 /* fh.data */ 110 p = xdr_inline_decode(xdr, fh->size); 111 if (unlikely(!p)) 112 return -ENOBUFS; 113 memcpy(&fh->data, p, fh->size); 114 dprintk("%s: fh len %d\n", __func__, fh->size); 115 116 return 0; 117 } 118 119 /* 120 * Currently only stringified uids and gids are accepted. 121 * I.e., kerberos is not supported to the DSes, so no pricipals. 122 * 123 * That means that one common function will suffice, but when 124 * principals are added, this should be split to accomodate 125 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid(). 126 */ 127 static int 128 decode_name(struct xdr_stream *xdr, u32 *id) 129 { 130 __be32 *p; 131 int len; 132 133 /* opaque_length(4)*/ 134 p = xdr_inline_decode(xdr, 4); 135 if (unlikely(!p)) 136 return -ENOBUFS; 137 len = be32_to_cpup(p++); 138 if (len < 0) 139 return -EINVAL; 140 141 dprintk("%s: len %u\n", __func__, len); 142 143 /* opaque body */ 144 p = xdr_inline_decode(xdr, len); 145 if (unlikely(!p)) 146 return -ENOBUFS; 147 148 if (!nfs_map_string_to_numeric((char *)p, len, id)) 149 return -EINVAL; 150 151 return 0; 152 } 153 154 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1, 155 const struct nfs4_ff_layout_mirror *m2) 156 { 157 int i, j; 158 159 if (m1->fh_versions_cnt != m2->fh_versions_cnt) 160 return false; 161 for (i = 0; i < m1->fh_versions_cnt; i++) { 162 bool found_fh = false; 163 for (j = 0; j < m2->fh_versions_cnt; j++) { 164 if (nfs_compare_fh(&m1->fh_versions[i], 165 &m2->fh_versions[j]) == 0) { 166 found_fh = true; 167 break; 168 } 169 } 170 if (!found_fh) 171 return false; 172 } 173 return true; 174 } 175 176 static struct nfs4_ff_layout_mirror * 177 ff_layout_add_mirror(struct pnfs_layout_hdr *lo, 178 struct nfs4_ff_layout_mirror *mirror) 179 { 180 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo); 181 struct nfs4_ff_layout_mirror *pos; 182 struct inode *inode = lo->plh_inode; 183 184 spin_lock(&inode->i_lock); 185 list_for_each_entry(pos, &ff_layout->mirrors, mirrors) { 186 if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0) 187 continue; 188 if (!ff_mirror_match_fh(mirror, pos)) 189 continue; 190 if (atomic_inc_not_zero(&pos->ref)) { 191 spin_unlock(&inode->i_lock); 192 return pos; 193 } 194 } 195 list_add(&mirror->mirrors, &ff_layout->mirrors); 196 mirror->layout = lo; 197 spin_unlock(&inode->i_lock); 198 return mirror; 199 } 200 201 static void 202 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror) 203 { 204 struct inode *inode; 205 if (mirror->layout == NULL) 206 return; 207 inode = mirror->layout->plh_inode; 208 spin_lock(&inode->i_lock); 209 list_del(&mirror->mirrors); 210 spin_unlock(&inode->i_lock); 211 mirror->layout = NULL; 212 } 213 214 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags) 215 { 216 struct nfs4_ff_layout_mirror *mirror; 217 218 mirror = kzalloc(sizeof(*mirror), gfp_flags); 219 if (mirror != NULL) { 220 spin_lock_init(&mirror->lock); 221 atomic_set(&mirror->ref, 1); 222 INIT_LIST_HEAD(&mirror->mirrors); 223 } 224 return mirror; 225 } 226 227 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror) 228 { 229 struct rpc_cred *cred; 230 231 ff_layout_remove_mirror(mirror); 232 kfree(mirror->fh_versions); 233 cred = rcu_access_pointer(mirror->ro_cred); 234 if (cred) 235 put_rpccred(cred); 236 cred = rcu_access_pointer(mirror->rw_cred); 237 if (cred) 238 put_rpccred(cred); 239 nfs4_ff_layout_put_deviceid(mirror->mirror_ds); 240 kfree(mirror); 241 } 242 243 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror) 244 { 245 if (mirror != NULL && atomic_dec_and_test(&mirror->ref)) 246 ff_layout_free_mirror(mirror); 247 } 248 249 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls) 250 { 251 int i; 252 253 if (fls->mirror_array) { 254 for (i = 0; i < fls->mirror_array_cnt; i++) { 255 /* normally mirror_ds is freed in 256 * .free_deviceid_node but we still do it here 257 * for .alloc_lseg error path */ 258 ff_layout_put_mirror(fls->mirror_array[i]); 259 } 260 kfree(fls->mirror_array); 261 fls->mirror_array = NULL; 262 } 263 } 264 265 static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr) 266 { 267 int ret = 0; 268 269 dprintk("--> %s\n", __func__); 270 271 /* FIXME: remove this check when layout segment support is added */ 272 if (lgr->range.offset != 0 || 273 lgr->range.length != NFS4_MAX_UINT64) { 274 dprintk("%s Only whole file layouts supported. Use MDS i/o\n", 275 __func__); 276 ret = -EINVAL; 277 } 278 279 dprintk("--> %s returns %d\n", __func__, ret); 280 return ret; 281 } 282 283 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls) 284 { 285 if (fls) { 286 ff_layout_free_mirror_array(fls); 287 kfree(fls); 288 } 289 } 290 291 static bool 292 ff_lseg_range_is_after(const struct pnfs_layout_range *l1, 293 const struct pnfs_layout_range *l2) 294 { 295 u64 end1, end2; 296 297 if (l1->iomode != l2->iomode) 298 return l1->iomode != IOMODE_READ; 299 end1 = pnfs_calc_offset_end(l1->offset, l1->length); 300 end2 = pnfs_calc_offset_end(l2->offset, l2->length); 301 if (end1 < l2->offset) 302 return false; 303 if (end2 < l1->offset) 304 return true; 305 return l2->offset <= l1->offset; 306 } 307 308 static bool 309 ff_lseg_merge(struct pnfs_layout_segment *new, 310 struct pnfs_layout_segment *old) 311 { 312 u64 new_end, old_end; 313 314 if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags)) 315 return false; 316 if (new->pls_range.iomode != old->pls_range.iomode) 317 return false; 318 old_end = pnfs_calc_offset_end(old->pls_range.offset, 319 old->pls_range.length); 320 if (old_end < new->pls_range.offset) 321 return false; 322 new_end = pnfs_calc_offset_end(new->pls_range.offset, 323 new->pls_range.length); 324 if (new_end < old->pls_range.offset) 325 return false; 326 327 /* Mergeable: copy info from 'old' to 'new' */ 328 if (new_end < old_end) 329 new_end = old_end; 330 if (new->pls_range.offset < old->pls_range.offset) 331 new->pls_range.offset = old->pls_range.offset; 332 new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset, 333 new_end); 334 if (test_bit(NFS_LSEG_ROC, &old->pls_flags)) 335 set_bit(NFS_LSEG_ROC, &new->pls_flags); 336 return true; 337 } 338 339 static void 340 ff_layout_add_lseg(struct pnfs_layout_hdr *lo, 341 struct pnfs_layout_segment *lseg, 342 struct list_head *free_me) 343 { 344 pnfs_generic_layout_insert_lseg(lo, lseg, 345 ff_lseg_range_is_after, 346 ff_lseg_merge, 347 free_me); 348 } 349 350 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls) 351 { 352 int i, j; 353 354 for (i = 0; i < fls->mirror_array_cnt - 1; i++) { 355 for (j = i + 1; j < fls->mirror_array_cnt; j++) 356 if (fls->mirror_array[i]->efficiency < 357 fls->mirror_array[j]->efficiency) 358 swap(fls->mirror_array[i], 359 fls->mirror_array[j]); 360 } 361 } 362 363 static struct pnfs_layout_segment * 364 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh, 365 struct nfs4_layoutget_res *lgr, 366 gfp_t gfp_flags) 367 { 368 struct pnfs_layout_segment *ret; 369 struct nfs4_ff_layout_segment *fls = NULL; 370 struct xdr_stream stream; 371 struct xdr_buf buf; 372 struct page *scratch; 373 u64 stripe_unit; 374 u32 mirror_array_cnt; 375 __be32 *p; 376 int i, rc; 377 378 dprintk("--> %s\n", __func__); 379 scratch = alloc_page(gfp_flags); 380 if (!scratch) 381 return ERR_PTR(-ENOMEM); 382 383 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, 384 lgr->layoutp->len); 385 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); 386 387 /* stripe unit and mirror_array_cnt */ 388 rc = -EIO; 389 p = xdr_inline_decode(&stream, 8 + 4); 390 if (!p) 391 goto out_err_free; 392 393 p = xdr_decode_hyper(p, &stripe_unit); 394 mirror_array_cnt = be32_to_cpup(p++); 395 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__, 396 stripe_unit, mirror_array_cnt); 397 398 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT || 399 mirror_array_cnt == 0) 400 goto out_err_free; 401 402 rc = -ENOMEM; 403 fls = kzalloc(sizeof(*fls), gfp_flags); 404 if (!fls) 405 goto out_err_free; 406 407 fls->mirror_array_cnt = mirror_array_cnt; 408 fls->stripe_unit = stripe_unit; 409 fls->mirror_array = kcalloc(fls->mirror_array_cnt, 410 sizeof(fls->mirror_array[0]), gfp_flags); 411 if (fls->mirror_array == NULL) 412 goto out_err_free; 413 414 for (i = 0; i < fls->mirror_array_cnt; i++) { 415 struct nfs4_ff_layout_mirror *mirror; 416 struct auth_cred acred = { .group_info = ff_zero_group }; 417 struct rpc_cred __rcu *cred; 418 u32 ds_count, fh_count, id; 419 int j; 420 421 rc = -EIO; 422 p = xdr_inline_decode(&stream, 4); 423 if (!p) 424 goto out_err_free; 425 ds_count = be32_to_cpup(p); 426 427 /* FIXME: allow for striping? */ 428 if (ds_count != 1) 429 goto out_err_free; 430 431 fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags); 432 if (fls->mirror_array[i] == NULL) { 433 rc = -ENOMEM; 434 goto out_err_free; 435 } 436 437 fls->mirror_array[i]->ds_count = ds_count; 438 439 /* deviceid */ 440 rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid); 441 if (rc) 442 goto out_err_free; 443 444 /* efficiency */ 445 rc = -EIO; 446 p = xdr_inline_decode(&stream, 4); 447 if (!p) 448 goto out_err_free; 449 fls->mirror_array[i]->efficiency = be32_to_cpup(p); 450 451 /* stateid */ 452 rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid); 453 if (rc) 454 goto out_err_free; 455 456 /* fh */ 457 p = xdr_inline_decode(&stream, 4); 458 if (!p) 459 goto out_err_free; 460 fh_count = be32_to_cpup(p); 461 462 fls->mirror_array[i]->fh_versions = 463 kzalloc(fh_count * sizeof(struct nfs_fh), 464 gfp_flags); 465 if (fls->mirror_array[i]->fh_versions == NULL) { 466 rc = -ENOMEM; 467 goto out_err_free; 468 } 469 470 for (j = 0; j < fh_count; j++) { 471 rc = decode_nfs_fh(&stream, 472 &fls->mirror_array[i]->fh_versions[j]); 473 if (rc) 474 goto out_err_free; 475 } 476 477 fls->mirror_array[i]->fh_versions_cnt = fh_count; 478 479 /* user */ 480 rc = decode_name(&stream, &id); 481 if (rc) 482 goto out_err_free; 483 484 acred.uid = make_kuid(&init_user_ns, id); 485 486 /* group */ 487 rc = decode_name(&stream, &id); 488 if (rc) 489 goto out_err_free; 490 491 acred.gid = make_kgid(&init_user_ns, id); 492 493 /* find the cred for it */ 494 rcu_assign_pointer(cred, rpc_lookup_generic_cred(&acred, 0, gfp_flags)); 495 if (IS_ERR(cred)) { 496 rc = PTR_ERR(cred); 497 goto out_err_free; 498 } 499 500 if (lgr->range.iomode == IOMODE_READ) 501 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred); 502 else 503 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred); 504 505 mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]); 506 if (mirror != fls->mirror_array[i]) { 507 /* swap cred ptrs so free_mirror will clean up old */ 508 if (lgr->range.iomode == IOMODE_READ) { 509 cred = xchg(&mirror->ro_cred, cred); 510 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred); 511 } else { 512 cred = xchg(&mirror->rw_cred, cred); 513 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred); 514 } 515 ff_layout_free_mirror(fls->mirror_array[i]); 516 fls->mirror_array[i] = mirror; 517 } 518 519 dprintk("%s: iomode %s uid %u gid %u\n", __func__, 520 lgr->range.iomode == IOMODE_READ ? "READ" : "RW", 521 from_kuid(&init_user_ns, acred.uid), 522 from_kgid(&init_user_ns, acred.gid)); 523 } 524 525 p = xdr_inline_decode(&stream, 4); 526 if (!p) 527 goto out_sort_mirrors; 528 fls->flags = be32_to_cpup(p); 529 530 p = xdr_inline_decode(&stream, 4); 531 if (!p) 532 goto out_sort_mirrors; 533 for (i=0; i < fls->mirror_array_cnt; i++) 534 fls->mirror_array[i]->report_interval = be32_to_cpup(p); 535 536 out_sort_mirrors: 537 ff_layout_sort_mirrors(fls); 538 rc = ff_layout_check_layout(lgr); 539 if (rc) 540 goto out_err_free; 541 ret = &fls->generic_hdr; 542 dprintk("<-- %s (success)\n", __func__); 543 out_free_page: 544 __free_page(scratch); 545 return ret; 546 out_err_free: 547 _ff_layout_free_lseg(fls); 548 ret = ERR_PTR(rc); 549 dprintk("<-- %s (%d)\n", __func__, rc); 550 goto out_free_page; 551 } 552 553 static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout) 554 { 555 struct pnfs_layout_segment *lseg; 556 557 list_for_each_entry(lseg, &layout->plh_segs, pls_list) 558 if (lseg->pls_range.iomode == IOMODE_RW) 559 return true; 560 561 return false; 562 } 563 564 static void 565 ff_layout_free_lseg(struct pnfs_layout_segment *lseg) 566 { 567 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); 568 569 dprintk("--> %s\n", __func__); 570 571 if (lseg->pls_range.iomode == IOMODE_RW) { 572 struct nfs4_flexfile_layout *ffl; 573 struct inode *inode; 574 575 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout); 576 inode = ffl->generic_hdr.plh_inode; 577 spin_lock(&inode->i_lock); 578 if (!ff_layout_has_rw_segments(lseg->pls_layout)) { 579 ffl->commit_info.nbuckets = 0; 580 kfree(ffl->commit_info.buckets); 581 ffl->commit_info.buckets = NULL; 582 } 583 spin_unlock(&inode->i_lock); 584 } 585 _ff_layout_free_lseg(fls); 586 } 587 588 /* Return 1 until we have multiple lsegs support */ 589 static int 590 ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls) 591 { 592 return 1; 593 } 594 595 static void 596 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now) 597 { 598 /* first IO request? */ 599 if (atomic_inc_return(&timer->n_ops) == 1) { 600 timer->start_time = now; 601 } 602 } 603 604 static ktime_t 605 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now) 606 { 607 ktime_t start; 608 609 if (atomic_dec_return(&timer->n_ops) < 0) 610 WARN_ON_ONCE(1); 611 612 start = timer->start_time; 613 timer->start_time = now; 614 return ktime_sub(now, start); 615 } 616 617 static bool 618 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror, 619 struct nfs4_ff_layoutstat *layoutstat, 620 ktime_t now) 621 { 622 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL; 623 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout); 624 625 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now); 626 if (!mirror->start_time) 627 mirror->start_time = now; 628 if (mirror->report_interval != 0) 629 report_interval = (s64)mirror->report_interval * 1000LL; 630 else if (layoutstats_timer != 0) 631 report_interval = (s64)layoutstats_timer * 1000LL; 632 if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >= 633 report_interval) { 634 ffl->last_report_time = now; 635 return true; 636 } 637 638 return false; 639 } 640 641 static void 642 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat, 643 __u64 requested) 644 { 645 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat; 646 647 iostat->ops_requested++; 648 iostat->bytes_requested += requested; 649 } 650 651 static void 652 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat, 653 __u64 requested, 654 __u64 completed, 655 ktime_t time_completed, 656 ktime_t time_started) 657 { 658 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat; 659 ktime_t completion_time = ktime_sub(time_completed, time_started); 660 ktime_t timer; 661 662 iostat->ops_completed++; 663 iostat->bytes_completed += completed; 664 iostat->bytes_not_delivered += requested - completed; 665 666 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed); 667 iostat->total_busy_time = 668 ktime_add(iostat->total_busy_time, timer); 669 iostat->aggregate_completion_time = 670 ktime_add(iostat->aggregate_completion_time, 671 completion_time); 672 } 673 674 static void 675 nfs4_ff_layout_stat_io_start_read(struct inode *inode, 676 struct nfs4_ff_layout_mirror *mirror, 677 __u64 requested, ktime_t now) 678 { 679 bool report; 680 681 spin_lock(&mirror->lock); 682 report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now); 683 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested); 684 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); 685 spin_unlock(&mirror->lock); 686 687 if (report) 688 pnfs_report_layoutstat(inode, GFP_KERNEL); 689 } 690 691 static void 692 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task, 693 struct nfs4_ff_layout_mirror *mirror, 694 __u64 requested, 695 __u64 completed) 696 { 697 spin_lock(&mirror->lock); 698 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat, 699 requested, completed, 700 ktime_get(), task->tk_start); 701 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); 702 spin_unlock(&mirror->lock); 703 } 704 705 static void 706 nfs4_ff_layout_stat_io_start_write(struct inode *inode, 707 struct nfs4_ff_layout_mirror *mirror, 708 __u64 requested, ktime_t now) 709 { 710 bool report; 711 712 spin_lock(&mirror->lock); 713 report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now); 714 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested); 715 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); 716 spin_unlock(&mirror->lock); 717 718 if (report) 719 pnfs_report_layoutstat(inode, GFP_NOIO); 720 } 721 722 static void 723 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task, 724 struct nfs4_ff_layout_mirror *mirror, 725 __u64 requested, 726 __u64 completed, 727 enum nfs3_stable_how committed) 728 { 729 if (committed == NFS_UNSTABLE) 730 requested = completed = 0; 731 732 spin_lock(&mirror->lock); 733 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat, 734 requested, completed, ktime_get(), task->tk_start); 735 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); 736 spin_unlock(&mirror->lock); 737 } 738 739 static int 740 ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg, 741 struct nfs_commit_info *cinfo, 742 gfp_t gfp_flags) 743 { 744 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); 745 struct pnfs_commit_bucket *buckets; 746 int size; 747 748 if (cinfo->ds->nbuckets != 0) { 749 /* This assumes there is only one RW lseg per file. 750 * To support multiple lseg per file, we need to 751 * change struct pnfs_commit_bucket to allow dynamic 752 * increasing nbuckets. 753 */ 754 return 0; 755 } 756 757 size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg); 758 759 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket), 760 gfp_flags); 761 if (!buckets) 762 return -ENOMEM; 763 else { 764 int i; 765 766 spin_lock(&cinfo->inode->i_lock); 767 if (cinfo->ds->nbuckets != 0) 768 kfree(buckets); 769 else { 770 cinfo->ds->buckets = buckets; 771 cinfo->ds->nbuckets = size; 772 for (i = 0; i < size; i++) { 773 INIT_LIST_HEAD(&buckets[i].written); 774 INIT_LIST_HEAD(&buckets[i].committing); 775 /* mark direct verifier as unset */ 776 buckets[i].direct_verf.committed = 777 NFS_INVALID_STABLE_HOW; 778 } 779 } 780 spin_unlock(&cinfo->inode->i_lock); 781 return 0; 782 } 783 } 784 785 static struct nfs4_pnfs_ds * 786 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg, 787 int start_idx, 788 int *best_idx) 789 { 790 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); 791 struct nfs4_pnfs_ds *ds; 792 bool fail_return = false; 793 int idx; 794 795 /* mirrors are sorted by efficiency */ 796 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) { 797 if (idx+1 == fls->mirror_array_cnt) 798 fail_return = true; 799 ds = nfs4_ff_layout_prepare_ds(lseg, idx, fail_return); 800 if (ds) { 801 *best_idx = idx; 802 return ds; 803 } 804 } 805 806 return NULL; 807 } 808 809 static void 810 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio, 811 struct nfs_page *req, 812 bool strict_iomode) 813 { 814 retry_strict: 815 pnfs_put_lseg(pgio->pg_lseg); 816 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 817 req->wb_context, 818 0, 819 NFS4_MAX_UINT64, 820 IOMODE_READ, 821 strict_iomode, 822 GFP_KERNEL); 823 if (IS_ERR(pgio->pg_lseg)) { 824 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 825 pgio->pg_lseg = NULL; 826 } 827 828 /* If we don't have checking, do get a IOMODE_RW 829 * segment, and the server wants to avoid READs 830 * there, then retry! 831 */ 832 if (pgio->pg_lseg && !strict_iomode && 833 ff_layout_avoid_read_on_rw(pgio->pg_lseg)) { 834 strict_iomode = true; 835 goto retry_strict; 836 } 837 } 838 839 static void 840 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio, 841 struct nfs_page *req) 842 { 843 struct nfs_pgio_mirror *pgm; 844 struct nfs4_ff_layout_mirror *mirror; 845 struct nfs4_pnfs_ds *ds; 846 int ds_idx; 847 848 retry: 849 /* Use full layout for now */ 850 if (!pgio->pg_lseg) 851 ff_layout_pg_get_read(pgio, req, false); 852 else if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) 853 ff_layout_pg_get_read(pgio, req, true); 854 855 /* If no lseg, fall back to read through mds */ 856 if (pgio->pg_lseg == NULL) 857 goto out_mds; 858 859 ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx); 860 if (!ds) { 861 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg)) 862 goto out_mds; 863 pnfs_put_lseg(pgio->pg_lseg); 864 pgio->pg_lseg = NULL; 865 /* Sleep for 1 second before retrying */ 866 ssleep(1); 867 goto retry; 868 } 869 870 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx); 871 872 pgio->pg_mirror_idx = ds_idx; 873 874 /* read always uses only one mirror - idx 0 for pgio layer */ 875 pgm = &pgio->pg_mirrors[0]; 876 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; 877 878 return; 879 out_mds: 880 pnfs_put_lseg(pgio->pg_lseg); 881 pgio->pg_lseg = NULL; 882 nfs_pageio_reset_read_mds(pgio); 883 } 884 885 static void 886 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, 887 struct nfs_page *req) 888 { 889 struct nfs4_ff_layout_mirror *mirror; 890 struct nfs_pgio_mirror *pgm; 891 struct nfs_commit_info cinfo; 892 struct nfs4_pnfs_ds *ds; 893 int i; 894 int status; 895 896 retry: 897 if (!pgio->pg_lseg) { 898 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 899 req->wb_context, 900 0, 901 NFS4_MAX_UINT64, 902 IOMODE_RW, 903 false, 904 GFP_NOFS); 905 if (IS_ERR(pgio->pg_lseg)) { 906 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 907 pgio->pg_lseg = NULL; 908 return; 909 } 910 } 911 /* If no lseg, fall back to write through mds */ 912 if (pgio->pg_lseg == NULL) 913 goto out_mds; 914 915 nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq); 916 status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS); 917 if (status < 0) 918 goto out_mds; 919 920 /* Use a direct mapping of ds_idx to pgio mirror_idx */ 921 if (WARN_ON_ONCE(pgio->pg_mirror_count != 922 FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))) 923 goto out_mds; 924 925 for (i = 0; i < pgio->pg_mirror_count; i++) { 926 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true); 927 if (!ds) { 928 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg)) 929 goto out_mds; 930 pnfs_put_lseg(pgio->pg_lseg); 931 pgio->pg_lseg = NULL; 932 /* Sleep for 1 second before retrying */ 933 ssleep(1); 934 goto retry; 935 } 936 pgm = &pgio->pg_mirrors[i]; 937 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i); 938 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize; 939 } 940 941 return; 942 943 out_mds: 944 pnfs_put_lseg(pgio->pg_lseg); 945 pgio->pg_lseg = NULL; 946 nfs_pageio_reset_write_mds(pgio); 947 } 948 949 static unsigned int 950 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio, 951 struct nfs_page *req) 952 { 953 if (!pgio->pg_lseg) { 954 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 955 req->wb_context, 956 0, 957 NFS4_MAX_UINT64, 958 IOMODE_RW, 959 false, 960 GFP_NOFS); 961 if (IS_ERR(pgio->pg_lseg)) { 962 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 963 pgio->pg_lseg = NULL; 964 goto out; 965 } 966 } 967 if (pgio->pg_lseg) 968 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg); 969 970 /* no lseg means that pnfs is not in use, so no mirroring here */ 971 nfs_pageio_reset_write_mds(pgio); 972 out: 973 return 1; 974 } 975 976 static const struct nfs_pageio_ops ff_layout_pg_read_ops = { 977 .pg_init = ff_layout_pg_init_read, 978 .pg_test = pnfs_generic_pg_test, 979 .pg_doio = pnfs_generic_pg_readpages, 980 .pg_cleanup = pnfs_generic_pg_cleanup, 981 }; 982 983 static const struct nfs_pageio_ops ff_layout_pg_write_ops = { 984 .pg_init = ff_layout_pg_init_write, 985 .pg_test = pnfs_generic_pg_test, 986 .pg_doio = pnfs_generic_pg_writepages, 987 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write, 988 .pg_cleanup = pnfs_generic_pg_cleanup, 989 }; 990 991 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs) 992 { 993 struct rpc_task *task = &hdr->task; 994 995 pnfs_layoutcommit_inode(hdr->inode, false); 996 997 if (retry_pnfs) { 998 dprintk("%s Reset task %5u for i/o through pNFS " 999 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, 1000 hdr->task.tk_pid, 1001 hdr->inode->i_sb->s_id, 1002 (unsigned long long)NFS_FILEID(hdr->inode), 1003 hdr->args.count, 1004 (unsigned long long)hdr->args.offset); 1005 1006 hdr->completion_ops->reschedule_io(hdr); 1007 return; 1008 } 1009 1010 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { 1011 dprintk("%s Reset task %5u for i/o through MDS " 1012 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, 1013 hdr->task.tk_pid, 1014 hdr->inode->i_sb->s_id, 1015 (unsigned long long)NFS_FILEID(hdr->inode), 1016 hdr->args.count, 1017 (unsigned long long)hdr->args.offset); 1018 1019 task->tk_status = pnfs_write_done_resend_to_mds(hdr); 1020 } 1021 } 1022 1023 static void ff_layout_reset_read(struct nfs_pgio_header *hdr) 1024 { 1025 struct rpc_task *task = &hdr->task; 1026 1027 pnfs_layoutcommit_inode(hdr->inode, false); 1028 1029 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { 1030 dprintk("%s Reset task %5u for i/o through MDS " 1031 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, 1032 hdr->task.tk_pid, 1033 hdr->inode->i_sb->s_id, 1034 (unsigned long long)NFS_FILEID(hdr->inode), 1035 hdr->args.count, 1036 (unsigned long long)hdr->args.offset); 1037 1038 task->tk_status = pnfs_read_done_resend_to_mds(hdr); 1039 } 1040 } 1041 1042 static int ff_layout_async_handle_error_v4(struct rpc_task *task, 1043 struct nfs4_state *state, 1044 struct nfs_client *clp, 1045 struct pnfs_layout_segment *lseg, 1046 int idx) 1047 { 1048 struct pnfs_layout_hdr *lo = lseg->pls_layout; 1049 struct inode *inode = lo->plh_inode; 1050 struct nfs_server *mds_server = NFS_SERVER(inode); 1051 1052 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); 1053 struct nfs_client *mds_client = mds_server->nfs_client; 1054 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; 1055 1056 if (task->tk_status >= 0) 1057 return 0; 1058 1059 switch (task->tk_status) { 1060 /* MDS state errors */ 1061 case -NFS4ERR_DELEG_REVOKED: 1062 case -NFS4ERR_ADMIN_REVOKED: 1063 case -NFS4ERR_BAD_STATEID: 1064 if (state == NULL) 1065 break; 1066 nfs_remove_bad_delegation(state->inode, NULL); 1067 case -NFS4ERR_OPENMODE: 1068 if (state == NULL) 1069 break; 1070 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0) 1071 goto out_bad_stateid; 1072 goto wait_on_recovery; 1073 case -NFS4ERR_EXPIRED: 1074 if (state != NULL) { 1075 if (nfs4_schedule_stateid_recovery(mds_server, state) < 0) 1076 goto out_bad_stateid; 1077 } 1078 nfs4_schedule_lease_recovery(mds_client); 1079 goto wait_on_recovery; 1080 /* DS session errors */ 1081 case -NFS4ERR_BADSESSION: 1082 case -NFS4ERR_BADSLOT: 1083 case -NFS4ERR_BAD_HIGH_SLOT: 1084 case -NFS4ERR_DEADSESSION: 1085 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1086 case -NFS4ERR_SEQ_FALSE_RETRY: 1087 case -NFS4ERR_SEQ_MISORDERED: 1088 dprintk("%s ERROR %d, Reset session. Exchangeid " 1089 "flags 0x%x\n", __func__, task->tk_status, 1090 clp->cl_exchange_flags); 1091 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 1092 break; 1093 case -NFS4ERR_DELAY: 1094 case -NFS4ERR_GRACE: 1095 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX); 1096 break; 1097 case -NFS4ERR_RETRY_UNCACHED_REP: 1098 break; 1099 /* Invalidate Layout errors */ 1100 case -NFS4ERR_PNFS_NO_LAYOUT: 1101 case -ESTALE: /* mapped NFS4ERR_STALE */ 1102 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */ 1103 case -EISDIR: /* mapped NFS4ERR_ISDIR */ 1104 case -NFS4ERR_FHEXPIRED: 1105 case -NFS4ERR_WRONG_TYPE: 1106 dprintk("%s Invalid layout error %d\n", __func__, 1107 task->tk_status); 1108 /* 1109 * Destroy layout so new i/o will get a new layout. 1110 * Layout will not be destroyed until all current lseg 1111 * references are put. Mark layout as invalid to resend failed 1112 * i/o and all i/o waiting on the slot table to the MDS until 1113 * layout is destroyed and a new valid layout is obtained. 1114 */ 1115 pnfs_destroy_layout(NFS_I(inode)); 1116 rpc_wake_up(&tbl->slot_tbl_waitq); 1117 goto reset; 1118 /* RPC connection errors */ 1119 case -ECONNREFUSED: 1120 case -EHOSTDOWN: 1121 case -EHOSTUNREACH: 1122 case -ENETUNREACH: 1123 case -EIO: 1124 case -ETIMEDOUT: 1125 case -EPIPE: 1126 dprintk("%s DS connection error %d\n", __func__, 1127 task->tk_status); 1128 nfs4_delete_deviceid(devid->ld, devid->nfs_client, 1129 &devid->deviceid); 1130 rpc_wake_up(&tbl->slot_tbl_waitq); 1131 /* fall through */ 1132 default: 1133 if (ff_layout_avoid_mds_available_ds(lseg)) 1134 return -NFS4ERR_RESET_TO_PNFS; 1135 reset: 1136 dprintk("%s Retry through MDS. Error %d\n", __func__, 1137 task->tk_status); 1138 return -NFS4ERR_RESET_TO_MDS; 1139 } 1140 out: 1141 task->tk_status = 0; 1142 return -EAGAIN; 1143 out_bad_stateid: 1144 task->tk_status = -EIO; 1145 return 0; 1146 wait_on_recovery: 1147 rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL); 1148 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0) 1149 rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task); 1150 goto out; 1151 } 1152 1153 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */ 1154 static int ff_layout_async_handle_error_v3(struct rpc_task *task, 1155 struct pnfs_layout_segment *lseg, 1156 int idx) 1157 { 1158 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); 1159 1160 if (task->tk_status >= 0) 1161 return 0; 1162 1163 switch (task->tk_status) { 1164 /* File access problems. Don't mark the device as unavailable */ 1165 case -EACCES: 1166 case -ESTALE: 1167 case -EISDIR: 1168 case -EBADHANDLE: 1169 case -ELOOP: 1170 case -ENOSPC: 1171 break; 1172 case -EJUKEBOX: 1173 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); 1174 goto out_retry; 1175 default: 1176 dprintk("%s DS connection error %d\n", __func__, 1177 task->tk_status); 1178 nfs4_delete_deviceid(devid->ld, devid->nfs_client, 1179 &devid->deviceid); 1180 } 1181 /* FIXME: Need to prevent infinite looping here. */ 1182 return -NFS4ERR_RESET_TO_PNFS; 1183 out_retry: 1184 task->tk_status = 0; 1185 rpc_restart_call_prepare(task); 1186 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); 1187 return -EAGAIN; 1188 } 1189 1190 static int ff_layout_async_handle_error(struct rpc_task *task, 1191 struct nfs4_state *state, 1192 struct nfs_client *clp, 1193 struct pnfs_layout_segment *lseg, 1194 int idx) 1195 { 1196 int vers = clp->cl_nfs_mod->rpc_vers->number; 1197 1198 switch (vers) { 1199 case 3: 1200 return ff_layout_async_handle_error_v3(task, lseg, idx); 1201 case 4: 1202 return ff_layout_async_handle_error_v4(task, state, clp, 1203 lseg, idx); 1204 default: 1205 /* should never happen */ 1206 WARN_ON_ONCE(1); 1207 return 0; 1208 } 1209 } 1210 1211 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, 1212 int idx, u64 offset, u64 length, 1213 u32 status, int opnum, int error) 1214 { 1215 struct nfs4_ff_layout_mirror *mirror; 1216 int err; 1217 1218 if (status == 0) { 1219 switch (error) { 1220 case -ETIMEDOUT: 1221 case -EPFNOSUPPORT: 1222 case -EPROTONOSUPPORT: 1223 case -EOPNOTSUPP: 1224 case -ECONNREFUSED: 1225 case -ECONNRESET: 1226 case -EHOSTDOWN: 1227 case -EHOSTUNREACH: 1228 case -ENETUNREACH: 1229 case -EADDRINUSE: 1230 case -ENOBUFS: 1231 case -EPIPE: 1232 case -EPERM: 1233 status = NFS4ERR_NXIO; 1234 break; 1235 case -EACCES: 1236 status = NFS4ERR_ACCESS; 1237 break; 1238 default: 1239 return; 1240 } 1241 } 1242 1243 switch (status) { 1244 case NFS4ERR_DELAY: 1245 case NFS4ERR_GRACE: 1246 return; 1247 default: 1248 break; 1249 } 1250 1251 mirror = FF_LAYOUT_COMP(lseg, idx); 1252 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), 1253 mirror, offset, length, status, opnum, 1254 GFP_NOIO); 1255 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg); 1256 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status); 1257 } 1258 1259 /* NFS_PROTO call done callback routines */ 1260 static int ff_layout_read_done_cb(struct rpc_task *task, 1261 struct nfs_pgio_header *hdr) 1262 { 1263 int err; 1264 1265 trace_nfs4_pnfs_read(hdr, task->tk_status); 1266 if (task->tk_status < 0) 1267 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx, 1268 hdr->args.offset, hdr->args.count, 1269 hdr->res.op_status, OP_READ, 1270 task->tk_status); 1271 err = ff_layout_async_handle_error(task, hdr->args.context->state, 1272 hdr->ds_clp, hdr->lseg, 1273 hdr->pgio_mirror_idx); 1274 1275 switch (err) { 1276 case -NFS4ERR_RESET_TO_PNFS: 1277 if (ff_layout_choose_best_ds_for_read(hdr->lseg, 1278 hdr->pgio_mirror_idx + 1, 1279 &hdr->pgio_mirror_idx)) 1280 goto out_eagain; 1281 ff_layout_read_record_layoutstats_done(task, hdr); 1282 pnfs_read_resend_pnfs(hdr); 1283 return task->tk_status; 1284 case -NFS4ERR_RESET_TO_MDS: 1285 ff_layout_reset_read(hdr); 1286 return task->tk_status; 1287 case -EAGAIN: 1288 goto out_eagain; 1289 } 1290 1291 return 0; 1292 out_eagain: 1293 rpc_restart_call_prepare(task); 1294 return -EAGAIN; 1295 } 1296 1297 static bool 1298 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg) 1299 { 1300 return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT); 1301 } 1302 1303 /* 1304 * We reference the rpc_cred of the first WRITE that triggers the need for 1305 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound. 1306 * rfc5661 is not clear about which credential should be used. 1307 * 1308 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so 1309 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751 1310 * we always send layoutcommit after DS writes. 1311 */ 1312 static void 1313 ff_layout_set_layoutcommit(struct inode *inode, 1314 struct pnfs_layout_segment *lseg, 1315 loff_t end_offset) 1316 { 1317 if (!ff_layout_need_layoutcommit(lseg)) 1318 return; 1319 1320 pnfs_set_layoutcommit(inode, lseg, end_offset); 1321 dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino, 1322 (unsigned long long) NFS_I(inode)->layout->plh_lwb); 1323 } 1324 1325 static bool 1326 ff_layout_device_unavailable(struct pnfs_layout_segment *lseg, int idx) 1327 { 1328 /* No mirroring for now */ 1329 struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx); 1330 1331 return ff_layout_test_devid_unavailable(node); 1332 } 1333 1334 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task, 1335 struct nfs_pgio_header *hdr) 1336 { 1337 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags)) 1338 return; 1339 nfs4_ff_layout_stat_io_start_read(hdr->inode, 1340 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1341 hdr->args.count, 1342 task->tk_start); 1343 } 1344 1345 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task, 1346 struct nfs_pgio_header *hdr) 1347 { 1348 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags)) 1349 return; 1350 nfs4_ff_layout_stat_io_end_read(task, 1351 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1352 hdr->args.count, 1353 hdr->res.count); 1354 } 1355 1356 static int ff_layout_read_prepare_common(struct rpc_task *task, 1357 struct nfs_pgio_header *hdr) 1358 { 1359 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { 1360 rpc_exit(task, -EIO); 1361 return -EIO; 1362 } 1363 if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) { 1364 rpc_exit(task, -EHOSTDOWN); 1365 return -EAGAIN; 1366 } 1367 1368 ff_layout_read_record_layoutstats_start(task, hdr); 1369 return 0; 1370 } 1371 1372 /* 1373 * Call ops for the async read/write cases 1374 * In the case of dense layouts, the offset needs to be reset to its 1375 * original value. 1376 */ 1377 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data) 1378 { 1379 struct nfs_pgio_header *hdr = data; 1380 1381 if (ff_layout_read_prepare_common(task, hdr)) 1382 return; 1383 1384 rpc_call_start(task); 1385 } 1386 1387 static int ff_layout_setup_sequence(struct nfs_client *ds_clp, 1388 struct nfs4_sequence_args *args, 1389 struct nfs4_sequence_res *res, 1390 struct rpc_task *task) 1391 { 1392 if (ds_clp->cl_session) 1393 return nfs41_setup_sequence(ds_clp->cl_session, 1394 args, 1395 res, 1396 task); 1397 return nfs40_setup_sequence(ds_clp->cl_slot_tbl, 1398 args, 1399 res, 1400 task); 1401 } 1402 1403 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data) 1404 { 1405 struct nfs_pgio_header *hdr = data; 1406 1407 if (ff_layout_setup_sequence(hdr->ds_clp, 1408 &hdr->args.seq_args, 1409 &hdr->res.seq_res, 1410 task)) 1411 return; 1412 1413 if (ff_layout_read_prepare_common(task, hdr)) 1414 return; 1415 1416 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 1417 hdr->args.lock_context, FMODE_READ) == -EIO) 1418 rpc_exit(task, -EIO); /* lost lock, terminate I/O */ 1419 } 1420 1421 static void ff_layout_read_call_done(struct rpc_task *task, void *data) 1422 { 1423 struct nfs_pgio_header *hdr = data; 1424 1425 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status); 1426 1427 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) && 1428 task->tk_status == 0) { 1429 nfs4_sequence_done(task, &hdr->res.seq_res); 1430 return; 1431 } 1432 1433 /* Note this may cause RPC to be resent */ 1434 hdr->mds_ops->rpc_call_done(task, hdr); 1435 } 1436 1437 static void ff_layout_read_count_stats(struct rpc_task *task, void *data) 1438 { 1439 struct nfs_pgio_header *hdr = data; 1440 1441 ff_layout_read_record_layoutstats_done(task, hdr); 1442 rpc_count_iostats_metrics(task, 1443 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]); 1444 } 1445 1446 static void ff_layout_read_release(void *data) 1447 { 1448 struct nfs_pgio_header *hdr = data; 1449 1450 ff_layout_read_record_layoutstats_done(&hdr->task, hdr); 1451 pnfs_generic_rw_release(data); 1452 } 1453 1454 1455 static int ff_layout_write_done_cb(struct rpc_task *task, 1456 struct nfs_pgio_header *hdr) 1457 { 1458 loff_t end_offs = 0; 1459 int err; 1460 1461 trace_nfs4_pnfs_write(hdr, task->tk_status); 1462 if (task->tk_status < 0) 1463 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx, 1464 hdr->args.offset, hdr->args.count, 1465 hdr->res.op_status, OP_WRITE, 1466 task->tk_status); 1467 err = ff_layout_async_handle_error(task, hdr->args.context->state, 1468 hdr->ds_clp, hdr->lseg, 1469 hdr->pgio_mirror_idx); 1470 1471 switch (err) { 1472 case -NFS4ERR_RESET_TO_PNFS: 1473 ff_layout_reset_write(hdr, true); 1474 return task->tk_status; 1475 case -NFS4ERR_RESET_TO_MDS: 1476 ff_layout_reset_write(hdr, false); 1477 return task->tk_status; 1478 case -EAGAIN: 1479 return -EAGAIN; 1480 } 1481 1482 if (hdr->res.verf->committed == NFS_FILE_SYNC || 1483 hdr->res.verf->committed == NFS_DATA_SYNC) 1484 end_offs = hdr->mds_offset + (loff_t)hdr->res.count; 1485 1486 /* Note: if the write is unstable, don't set end_offs until commit */ 1487 ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs); 1488 1489 /* zero out fattr since we don't care DS attr at all */ 1490 hdr->fattr.valid = 0; 1491 if (task->tk_status >= 0) 1492 nfs_writeback_update_inode(hdr); 1493 1494 return 0; 1495 } 1496 1497 static int ff_layout_commit_done_cb(struct rpc_task *task, 1498 struct nfs_commit_data *data) 1499 { 1500 int err; 1501 1502 trace_nfs4_pnfs_commit_ds(data, task->tk_status); 1503 if (task->tk_status < 0) 1504 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index, 1505 data->args.offset, data->args.count, 1506 data->res.op_status, OP_COMMIT, 1507 task->tk_status); 1508 err = ff_layout_async_handle_error(task, NULL, data->ds_clp, 1509 data->lseg, data->ds_commit_index); 1510 1511 switch (err) { 1512 case -NFS4ERR_RESET_TO_PNFS: 1513 pnfs_generic_prepare_to_resend_writes(data); 1514 return -EAGAIN; 1515 case -NFS4ERR_RESET_TO_MDS: 1516 pnfs_generic_prepare_to_resend_writes(data); 1517 return -EAGAIN; 1518 case -EAGAIN: 1519 rpc_restart_call_prepare(task); 1520 return -EAGAIN; 1521 } 1522 1523 ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb); 1524 1525 return 0; 1526 } 1527 1528 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task, 1529 struct nfs_pgio_header *hdr) 1530 { 1531 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags)) 1532 return; 1533 nfs4_ff_layout_stat_io_start_write(hdr->inode, 1534 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1535 hdr->args.count, 1536 task->tk_start); 1537 } 1538 1539 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task, 1540 struct nfs_pgio_header *hdr) 1541 { 1542 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags)) 1543 return; 1544 nfs4_ff_layout_stat_io_end_write(task, 1545 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1546 hdr->args.count, hdr->res.count, 1547 hdr->res.verf->committed); 1548 } 1549 1550 static int ff_layout_write_prepare_common(struct rpc_task *task, 1551 struct nfs_pgio_header *hdr) 1552 { 1553 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { 1554 rpc_exit(task, -EIO); 1555 return -EIO; 1556 } 1557 1558 if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) { 1559 rpc_exit(task, -EHOSTDOWN); 1560 return -EAGAIN; 1561 } 1562 1563 ff_layout_write_record_layoutstats_start(task, hdr); 1564 return 0; 1565 } 1566 1567 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data) 1568 { 1569 struct nfs_pgio_header *hdr = data; 1570 1571 if (ff_layout_write_prepare_common(task, hdr)) 1572 return; 1573 1574 rpc_call_start(task); 1575 } 1576 1577 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data) 1578 { 1579 struct nfs_pgio_header *hdr = data; 1580 1581 if (ff_layout_setup_sequence(hdr->ds_clp, 1582 &hdr->args.seq_args, 1583 &hdr->res.seq_res, 1584 task)) 1585 return; 1586 1587 if (ff_layout_write_prepare_common(task, hdr)) 1588 return; 1589 1590 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 1591 hdr->args.lock_context, FMODE_WRITE) == -EIO) 1592 rpc_exit(task, -EIO); /* lost lock, terminate I/O */ 1593 } 1594 1595 static void ff_layout_write_call_done(struct rpc_task *task, void *data) 1596 { 1597 struct nfs_pgio_header *hdr = data; 1598 1599 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) && 1600 task->tk_status == 0) { 1601 nfs4_sequence_done(task, &hdr->res.seq_res); 1602 return; 1603 } 1604 1605 /* Note this may cause RPC to be resent */ 1606 hdr->mds_ops->rpc_call_done(task, hdr); 1607 } 1608 1609 static void ff_layout_write_count_stats(struct rpc_task *task, void *data) 1610 { 1611 struct nfs_pgio_header *hdr = data; 1612 1613 ff_layout_write_record_layoutstats_done(task, hdr); 1614 rpc_count_iostats_metrics(task, 1615 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]); 1616 } 1617 1618 static void ff_layout_write_release(void *data) 1619 { 1620 struct nfs_pgio_header *hdr = data; 1621 1622 ff_layout_write_record_layoutstats_done(&hdr->task, hdr); 1623 pnfs_generic_rw_release(data); 1624 } 1625 1626 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task, 1627 struct nfs_commit_data *cdata) 1628 { 1629 if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags)) 1630 return; 1631 nfs4_ff_layout_stat_io_start_write(cdata->inode, 1632 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index), 1633 0, task->tk_start); 1634 } 1635 1636 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task, 1637 struct nfs_commit_data *cdata) 1638 { 1639 struct nfs_page *req; 1640 __u64 count = 0; 1641 1642 if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags)) 1643 return; 1644 1645 if (task->tk_status == 0) { 1646 list_for_each_entry(req, &cdata->pages, wb_list) 1647 count += req->wb_bytes; 1648 } 1649 nfs4_ff_layout_stat_io_end_write(task, 1650 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index), 1651 count, count, NFS_FILE_SYNC); 1652 } 1653 1654 static void ff_layout_commit_prepare_common(struct rpc_task *task, 1655 struct nfs_commit_data *cdata) 1656 { 1657 ff_layout_commit_record_layoutstats_start(task, cdata); 1658 } 1659 1660 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data) 1661 { 1662 ff_layout_commit_prepare_common(task, data); 1663 rpc_call_start(task); 1664 } 1665 1666 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data) 1667 { 1668 struct nfs_commit_data *wdata = data; 1669 1670 if (ff_layout_setup_sequence(wdata->ds_clp, 1671 &wdata->args.seq_args, 1672 &wdata->res.seq_res, 1673 task)) 1674 return; 1675 ff_layout_commit_prepare_common(task, data); 1676 } 1677 1678 static void ff_layout_commit_done(struct rpc_task *task, void *data) 1679 { 1680 pnfs_generic_write_commit_done(task, data); 1681 } 1682 1683 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data) 1684 { 1685 struct nfs_commit_data *cdata = data; 1686 1687 ff_layout_commit_record_layoutstats_done(task, cdata); 1688 rpc_count_iostats_metrics(task, 1689 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]); 1690 } 1691 1692 static void ff_layout_commit_release(void *data) 1693 { 1694 struct nfs_commit_data *cdata = data; 1695 1696 ff_layout_commit_record_layoutstats_done(&cdata->task, cdata); 1697 pnfs_generic_commit_release(data); 1698 } 1699 1700 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = { 1701 .rpc_call_prepare = ff_layout_read_prepare_v3, 1702 .rpc_call_done = ff_layout_read_call_done, 1703 .rpc_count_stats = ff_layout_read_count_stats, 1704 .rpc_release = ff_layout_read_release, 1705 }; 1706 1707 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = { 1708 .rpc_call_prepare = ff_layout_read_prepare_v4, 1709 .rpc_call_done = ff_layout_read_call_done, 1710 .rpc_count_stats = ff_layout_read_count_stats, 1711 .rpc_release = ff_layout_read_release, 1712 }; 1713 1714 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = { 1715 .rpc_call_prepare = ff_layout_write_prepare_v3, 1716 .rpc_call_done = ff_layout_write_call_done, 1717 .rpc_count_stats = ff_layout_write_count_stats, 1718 .rpc_release = ff_layout_write_release, 1719 }; 1720 1721 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = { 1722 .rpc_call_prepare = ff_layout_write_prepare_v4, 1723 .rpc_call_done = ff_layout_write_call_done, 1724 .rpc_count_stats = ff_layout_write_count_stats, 1725 .rpc_release = ff_layout_write_release, 1726 }; 1727 1728 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = { 1729 .rpc_call_prepare = ff_layout_commit_prepare_v3, 1730 .rpc_call_done = ff_layout_commit_done, 1731 .rpc_count_stats = ff_layout_commit_count_stats, 1732 .rpc_release = ff_layout_commit_release, 1733 }; 1734 1735 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = { 1736 .rpc_call_prepare = ff_layout_commit_prepare_v4, 1737 .rpc_call_done = ff_layout_commit_done, 1738 .rpc_count_stats = ff_layout_commit_count_stats, 1739 .rpc_release = ff_layout_commit_release, 1740 }; 1741 1742 static enum pnfs_try_status 1743 ff_layout_read_pagelist(struct nfs_pgio_header *hdr) 1744 { 1745 struct pnfs_layout_segment *lseg = hdr->lseg; 1746 struct nfs4_pnfs_ds *ds; 1747 struct rpc_clnt *ds_clnt; 1748 struct rpc_cred *ds_cred; 1749 loff_t offset = hdr->args.offset; 1750 u32 idx = hdr->pgio_mirror_idx; 1751 int vers; 1752 struct nfs_fh *fh; 1753 1754 dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n", 1755 __func__, hdr->inode->i_ino, 1756 hdr->args.pgbase, (size_t)hdr->args.count, offset); 1757 1758 ds = nfs4_ff_layout_prepare_ds(lseg, idx, false); 1759 if (!ds) 1760 goto out_failed; 1761 1762 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp, 1763 hdr->inode); 1764 if (IS_ERR(ds_clnt)) 1765 goto out_failed; 1766 1767 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred); 1768 if (!ds_cred) 1769 goto out_failed; 1770 1771 vers = nfs4_ff_layout_ds_version(lseg, idx); 1772 1773 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__, 1774 ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers); 1775 1776 hdr->pgio_done_cb = ff_layout_read_done_cb; 1777 atomic_inc(&ds->ds_clp->cl_count); 1778 hdr->ds_clp = ds->ds_clp; 1779 fh = nfs4_ff_layout_select_ds_fh(lseg, idx); 1780 if (fh) 1781 hdr->args.fh = fh; 1782 /* 1783 * Note that if we ever decide to split across DSes, 1784 * then we may need to handle dense-like offsets. 1785 */ 1786 hdr->args.offset = offset; 1787 hdr->mds_offset = offset; 1788 1789 /* Perform an asynchronous read to ds */ 1790 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops, 1791 vers == 3 ? &ff_layout_read_call_ops_v3 : 1792 &ff_layout_read_call_ops_v4, 1793 0, RPC_TASK_SOFTCONN); 1794 put_rpccred(ds_cred); 1795 return PNFS_ATTEMPTED; 1796 1797 out_failed: 1798 if (ff_layout_avoid_mds_available_ds(lseg)) 1799 return PNFS_TRY_AGAIN; 1800 return PNFS_NOT_ATTEMPTED; 1801 } 1802 1803 /* Perform async writes. */ 1804 static enum pnfs_try_status 1805 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync) 1806 { 1807 struct pnfs_layout_segment *lseg = hdr->lseg; 1808 struct nfs4_pnfs_ds *ds; 1809 struct rpc_clnt *ds_clnt; 1810 struct rpc_cred *ds_cred; 1811 loff_t offset = hdr->args.offset; 1812 int vers; 1813 struct nfs_fh *fh; 1814 int idx = hdr->pgio_mirror_idx; 1815 1816 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true); 1817 if (!ds) 1818 return PNFS_NOT_ATTEMPTED; 1819 1820 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp, 1821 hdr->inode); 1822 if (IS_ERR(ds_clnt)) 1823 return PNFS_NOT_ATTEMPTED; 1824 1825 ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred); 1826 if (!ds_cred) 1827 return PNFS_NOT_ATTEMPTED; 1828 1829 vers = nfs4_ff_layout_ds_version(lseg, idx); 1830 1831 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n", 1832 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count, 1833 offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), 1834 vers); 1835 1836 hdr->pgio_done_cb = ff_layout_write_done_cb; 1837 atomic_inc(&ds->ds_clp->cl_count); 1838 hdr->ds_clp = ds->ds_clp; 1839 hdr->ds_commit_idx = idx; 1840 fh = nfs4_ff_layout_select_ds_fh(lseg, idx); 1841 if (fh) 1842 hdr->args.fh = fh; 1843 1844 /* 1845 * Note that if we ever decide to split across DSes, 1846 * then we may need to handle dense-like offsets. 1847 */ 1848 hdr->args.offset = offset; 1849 1850 /* Perform an asynchronous write */ 1851 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops, 1852 vers == 3 ? &ff_layout_write_call_ops_v3 : 1853 &ff_layout_write_call_ops_v4, 1854 sync, RPC_TASK_SOFTCONN); 1855 put_rpccred(ds_cred); 1856 return PNFS_ATTEMPTED; 1857 } 1858 1859 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) 1860 { 1861 return i; 1862 } 1863 1864 static struct nfs_fh * 1865 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i) 1866 { 1867 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg); 1868 1869 /* FIXME: Assume that there is only one NFS version available 1870 * for the DS. 1871 */ 1872 return &flseg->mirror_array[i]->fh_versions[0]; 1873 } 1874 1875 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how) 1876 { 1877 struct pnfs_layout_segment *lseg = data->lseg; 1878 struct nfs4_pnfs_ds *ds; 1879 struct rpc_clnt *ds_clnt; 1880 struct rpc_cred *ds_cred; 1881 u32 idx; 1882 int vers, ret; 1883 struct nfs_fh *fh; 1884 1885 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index); 1886 ds = nfs4_ff_layout_prepare_ds(lseg, idx, true); 1887 if (!ds) 1888 goto out_err; 1889 1890 ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp, 1891 data->inode); 1892 if (IS_ERR(ds_clnt)) 1893 goto out_err; 1894 1895 ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred); 1896 if (!ds_cred) 1897 goto out_err; 1898 1899 vers = nfs4_ff_layout_ds_version(lseg, idx); 1900 1901 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__, 1902 data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count), 1903 vers); 1904 data->commit_done_cb = ff_layout_commit_done_cb; 1905 data->cred = ds_cred; 1906 atomic_inc(&ds->ds_clp->cl_count); 1907 data->ds_clp = ds->ds_clp; 1908 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index); 1909 if (fh) 1910 data->args.fh = fh; 1911 1912 ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops, 1913 vers == 3 ? &ff_layout_commit_call_ops_v3 : 1914 &ff_layout_commit_call_ops_v4, 1915 how, RPC_TASK_SOFTCONN); 1916 put_rpccred(ds_cred); 1917 return ret; 1918 out_err: 1919 pnfs_generic_prepare_to_resend_writes(data); 1920 pnfs_generic_commit_release(data); 1921 return -EAGAIN; 1922 } 1923 1924 static int 1925 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages, 1926 int how, struct nfs_commit_info *cinfo) 1927 { 1928 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo, 1929 ff_layout_initiate_commit); 1930 } 1931 1932 static struct pnfs_ds_commit_info * 1933 ff_layout_get_ds_info(struct inode *inode) 1934 { 1935 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout; 1936 1937 if (layout == NULL) 1938 return NULL; 1939 1940 return &FF_LAYOUT_FROM_HDR(layout)->commit_info; 1941 } 1942 1943 static void 1944 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d) 1945 { 1946 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds, 1947 id_node)); 1948 } 1949 1950 static int ff_layout_encode_ioerr(struct xdr_stream *xdr, 1951 const struct nfs4_layoutreturn_args *args, 1952 const struct nfs4_flexfile_layoutreturn_args *ff_args) 1953 { 1954 __be32 *start; 1955 1956 start = xdr_reserve_space(xdr, 4); 1957 if (unlikely(!start)) 1958 return -E2BIG; 1959 1960 *start = cpu_to_be32(ff_args->num_errors); 1961 /* This assume we always return _ALL_ layouts */ 1962 return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors); 1963 } 1964 1965 static void 1966 encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len) 1967 { 1968 __be32 *p; 1969 1970 p = xdr_reserve_space(xdr, len); 1971 xdr_encode_opaque_fixed(p, buf, len); 1972 } 1973 1974 static void 1975 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr, 1976 const nfs4_stateid *stateid, 1977 const struct nfs42_layoutstat_devinfo *devinfo) 1978 { 1979 __be32 *p; 1980 1981 p = xdr_reserve_space(xdr, 8 + 8); 1982 p = xdr_encode_hyper(p, devinfo->offset); 1983 p = xdr_encode_hyper(p, devinfo->length); 1984 encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE); 1985 p = xdr_reserve_space(xdr, 4*8); 1986 p = xdr_encode_hyper(p, devinfo->read_count); 1987 p = xdr_encode_hyper(p, devinfo->read_bytes); 1988 p = xdr_encode_hyper(p, devinfo->write_count); 1989 p = xdr_encode_hyper(p, devinfo->write_bytes); 1990 encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE); 1991 } 1992 1993 static void 1994 ff_layout_encode_ff_iostat(struct xdr_stream *xdr, 1995 const nfs4_stateid *stateid, 1996 const struct nfs42_layoutstat_devinfo *devinfo) 1997 { 1998 ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo); 1999 ff_layout_encode_ff_layoutupdate(xdr, devinfo, 2000 devinfo->ld_private.data); 2001 } 2002 2003 /* report nothing for now */ 2004 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr, 2005 const struct nfs4_layoutreturn_args *args, 2006 struct nfs4_flexfile_layoutreturn_args *ff_args) 2007 { 2008 __be32 *p; 2009 int i; 2010 2011 p = xdr_reserve_space(xdr, 4); 2012 *p = cpu_to_be32(ff_args->num_dev); 2013 for (i = 0; i < ff_args->num_dev; i++) 2014 ff_layout_encode_ff_iostat(xdr, 2015 &args->layout->plh_stateid, 2016 &ff_args->devinfo[i]); 2017 } 2018 2019 static void 2020 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo, 2021 unsigned int num_entries) 2022 { 2023 unsigned int i; 2024 2025 for (i = 0; i < num_entries; i++) { 2026 if (!devinfo[i].ld_private.ops) 2027 continue; 2028 if (!devinfo[i].ld_private.ops->free) 2029 continue; 2030 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private); 2031 } 2032 } 2033 2034 static struct nfs4_deviceid_node * 2035 ff_layout_alloc_deviceid_node(struct nfs_server *server, 2036 struct pnfs_device *pdev, gfp_t gfp_flags) 2037 { 2038 struct nfs4_ff_layout_ds *dsaddr; 2039 2040 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags); 2041 if (!dsaddr) 2042 return NULL; 2043 return &dsaddr->id_node; 2044 } 2045 2046 static void 2047 ff_layout_encode_layoutreturn(struct xdr_stream *xdr, 2048 const void *voidargs, 2049 const struct nfs4_xdr_opaque_data *ff_opaque) 2050 { 2051 const struct nfs4_layoutreturn_args *args = voidargs; 2052 struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data; 2053 struct xdr_buf tmp_buf = { 2054 .head = { 2055 [0] = { 2056 .iov_base = page_address(ff_args->pages[0]), 2057 }, 2058 }, 2059 .buflen = PAGE_SIZE, 2060 }; 2061 struct xdr_stream tmp_xdr; 2062 __be32 *start; 2063 2064 dprintk("%s: Begin\n", __func__); 2065 2066 xdr_init_encode(&tmp_xdr, &tmp_buf, NULL); 2067 2068 ff_layout_encode_ioerr(&tmp_xdr, args, ff_args); 2069 ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args); 2070 2071 start = xdr_reserve_space(xdr, 4); 2072 *start = cpu_to_be32(tmp_buf.len); 2073 xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len); 2074 2075 dprintk("%s: Return\n", __func__); 2076 } 2077 2078 static void 2079 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args) 2080 { 2081 struct nfs4_flexfile_layoutreturn_args *ff_args; 2082 2083 if (!args->data) 2084 return; 2085 ff_args = args->data; 2086 args->data = NULL; 2087 2088 ff_layout_free_ds_ioerr(&ff_args->errors); 2089 ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev); 2090 2091 put_page(ff_args->pages[0]); 2092 kfree(ff_args); 2093 } 2094 2095 const struct nfs4_xdr_opaque_ops layoutreturn_ops = { 2096 .encode = ff_layout_encode_layoutreturn, 2097 .free = ff_layout_free_layoutreturn, 2098 }; 2099 2100 static int 2101 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args) 2102 { 2103 struct nfs4_flexfile_layoutreturn_args *ff_args; 2104 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout); 2105 2106 ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL); 2107 if (!ff_args) 2108 goto out_nomem; 2109 ff_args->pages[0] = alloc_page(GFP_KERNEL); 2110 if (!ff_args->pages[0]) 2111 goto out_nomem_free; 2112 2113 INIT_LIST_HEAD(&ff_args->errors); 2114 ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout, 2115 &args->range, &ff_args->errors, 2116 FF_LAYOUTRETURN_MAXERR); 2117 2118 spin_lock(&args->inode->i_lock); 2119 ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr, 2120 &ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo)); 2121 spin_unlock(&args->inode->i_lock); 2122 2123 args->ld_private->ops = &layoutreturn_ops; 2124 args->ld_private->data = ff_args; 2125 return 0; 2126 out_nomem_free: 2127 kfree(ff_args); 2128 out_nomem: 2129 return -ENOMEM; 2130 } 2131 2132 static int 2133 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen) 2134 { 2135 const struct sockaddr_in *sin = (struct sockaddr_in *)sap; 2136 2137 return snprintf(buf, buflen, "%pI4", &sin->sin_addr); 2138 } 2139 2140 static size_t 2141 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf, 2142 const int buflen) 2143 { 2144 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; 2145 const struct in6_addr *addr = &sin6->sin6_addr; 2146 2147 /* 2148 * RFC 4291, Section 2.2.2 2149 * 2150 * Shorthanded ANY address 2151 */ 2152 if (ipv6_addr_any(addr)) 2153 return snprintf(buf, buflen, "::"); 2154 2155 /* 2156 * RFC 4291, Section 2.2.2 2157 * 2158 * Shorthanded loopback address 2159 */ 2160 if (ipv6_addr_loopback(addr)) 2161 return snprintf(buf, buflen, "::1"); 2162 2163 /* 2164 * RFC 4291, Section 2.2.3 2165 * 2166 * Special presentation address format for mapped v4 2167 * addresses. 2168 */ 2169 if (ipv6_addr_v4mapped(addr)) 2170 return snprintf(buf, buflen, "::ffff:%pI4", 2171 &addr->s6_addr32[3]); 2172 2173 /* 2174 * RFC 4291, Section 2.2.1 2175 */ 2176 return snprintf(buf, buflen, "%pI6c", addr); 2177 } 2178 2179 /* Derived from rpc_sockaddr2uaddr */ 2180 static void 2181 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da) 2182 { 2183 struct sockaddr *sap = (struct sockaddr *)&da->da_addr; 2184 char portbuf[RPCBIND_MAXUADDRPLEN]; 2185 char addrbuf[RPCBIND_MAXUADDRLEN]; 2186 char *netid; 2187 unsigned short port; 2188 int len, netid_len; 2189 __be32 *p; 2190 2191 switch (sap->sa_family) { 2192 case AF_INET: 2193 if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0) 2194 return; 2195 port = ntohs(((struct sockaddr_in *)sap)->sin_port); 2196 netid = "tcp"; 2197 netid_len = 3; 2198 break; 2199 case AF_INET6: 2200 if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0) 2201 return; 2202 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port); 2203 netid = "tcp6"; 2204 netid_len = 4; 2205 break; 2206 default: 2207 /* we only support tcp and tcp6 */ 2208 WARN_ON_ONCE(1); 2209 return; 2210 } 2211 2212 snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff); 2213 len = strlcat(addrbuf, portbuf, sizeof(addrbuf)); 2214 2215 p = xdr_reserve_space(xdr, 4 + netid_len); 2216 xdr_encode_opaque(p, netid, netid_len); 2217 2218 p = xdr_reserve_space(xdr, 4 + len); 2219 xdr_encode_opaque(p, addrbuf, len); 2220 } 2221 2222 static void 2223 ff_layout_encode_nfstime(struct xdr_stream *xdr, 2224 ktime_t t) 2225 { 2226 struct timespec64 ts; 2227 __be32 *p; 2228 2229 p = xdr_reserve_space(xdr, 12); 2230 ts = ktime_to_timespec64(t); 2231 p = xdr_encode_hyper(p, ts.tv_sec); 2232 *p++ = cpu_to_be32(ts.tv_nsec); 2233 } 2234 2235 static void 2236 ff_layout_encode_io_latency(struct xdr_stream *xdr, 2237 struct nfs4_ff_io_stat *stat) 2238 { 2239 __be32 *p; 2240 2241 p = xdr_reserve_space(xdr, 5 * 8); 2242 p = xdr_encode_hyper(p, stat->ops_requested); 2243 p = xdr_encode_hyper(p, stat->bytes_requested); 2244 p = xdr_encode_hyper(p, stat->ops_completed); 2245 p = xdr_encode_hyper(p, stat->bytes_completed); 2246 p = xdr_encode_hyper(p, stat->bytes_not_delivered); 2247 ff_layout_encode_nfstime(xdr, stat->total_busy_time); 2248 ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time); 2249 } 2250 2251 static void 2252 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr, 2253 const struct nfs42_layoutstat_devinfo *devinfo, 2254 struct nfs4_ff_layout_mirror *mirror) 2255 { 2256 struct nfs4_pnfs_ds_addr *da; 2257 struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds; 2258 struct nfs_fh *fh = &mirror->fh_versions[0]; 2259 __be32 *p; 2260 2261 da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node); 2262 dprintk("%s: DS %s: encoding address %s\n", 2263 __func__, ds->ds_remotestr, da->da_remotestr); 2264 /* netaddr4 */ 2265 ff_layout_encode_netaddr(xdr, da); 2266 /* nfs_fh4 */ 2267 p = xdr_reserve_space(xdr, 4 + fh->size); 2268 xdr_encode_opaque(p, fh->data, fh->size); 2269 /* ff_io_latency4 read */ 2270 spin_lock(&mirror->lock); 2271 ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat); 2272 /* ff_io_latency4 write */ 2273 ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat); 2274 spin_unlock(&mirror->lock); 2275 /* nfstime4 */ 2276 ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time)); 2277 /* bool */ 2278 p = xdr_reserve_space(xdr, 4); 2279 *p = cpu_to_be32(false); 2280 } 2281 2282 static void 2283 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args, 2284 const struct nfs4_xdr_opaque_data *opaque) 2285 { 2286 struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque, 2287 struct nfs42_layoutstat_devinfo, ld_private); 2288 __be32 *start; 2289 2290 /* layoutupdate length */ 2291 start = xdr_reserve_space(xdr, 4); 2292 ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data); 2293 2294 *start = cpu_to_be32((xdr->p - start - 1) * 4); 2295 } 2296 2297 static void 2298 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque) 2299 { 2300 struct nfs4_ff_layout_mirror *mirror = opaque->data; 2301 2302 ff_layout_put_mirror(mirror); 2303 } 2304 2305 static const struct nfs4_xdr_opaque_ops layoutstat_ops = { 2306 .encode = ff_layout_encode_layoutstats, 2307 .free = ff_layout_free_layoutstats, 2308 }; 2309 2310 static int 2311 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, 2312 struct nfs42_layoutstat_devinfo *devinfo, 2313 int dev_limit) 2314 { 2315 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo); 2316 struct nfs4_ff_layout_mirror *mirror; 2317 struct nfs4_deviceid_node *dev; 2318 int i = 0; 2319 2320 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) { 2321 if (i >= dev_limit) 2322 break; 2323 if (IS_ERR_OR_NULL(mirror->mirror_ds)) 2324 continue; 2325 if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags)) 2326 continue; 2327 /* mirror refcount put in cleanup_layoutstats */ 2328 if (!atomic_inc_not_zero(&mirror->ref)) 2329 continue; 2330 dev = &mirror->mirror_ds->id_node; 2331 memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE); 2332 devinfo->offset = 0; 2333 devinfo->length = NFS4_MAX_UINT64; 2334 spin_lock(&mirror->lock); 2335 devinfo->read_count = mirror->read_stat.io_stat.ops_completed; 2336 devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed; 2337 devinfo->write_count = mirror->write_stat.io_stat.ops_completed; 2338 devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed; 2339 spin_unlock(&mirror->lock); 2340 devinfo->layout_type = LAYOUT_FLEX_FILES; 2341 devinfo->ld_private.ops = &layoutstat_ops; 2342 devinfo->ld_private.data = mirror; 2343 2344 devinfo++; 2345 i++; 2346 } 2347 return i; 2348 } 2349 2350 static int 2351 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args) 2352 { 2353 struct nfs4_flexfile_layout *ff_layout; 2354 const int dev_count = PNFS_LAYOUTSTATS_MAXDEV; 2355 2356 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */ 2357 args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO); 2358 if (!args->devinfo) 2359 return -ENOMEM; 2360 2361 spin_lock(&args->inode->i_lock); 2362 ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout); 2363 args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr, 2364 &args->devinfo[0], dev_count); 2365 spin_unlock(&args->inode->i_lock); 2366 if (!args->num_dev) { 2367 kfree(args->devinfo); 2368 args->devinfo = NULL; 2369 return -ENOENT; 2370 } 2371 2372 return 0; 2373 } 2374 2375 static struct pnfs_layoutdriver_type flexfilelayout_type = { 2376 .id = LAYOUT_FLEX_FILES, 2377 .name = "LAYOUT_FLEX_FILES", 2378 .owner = THIS_MODULE, 2379 .alloc_layout_hdr = ff_layout_alloc_layout_hdr, 2380 .free_layout_hdr = ff_layout_free_layout_hdr, 2381 .alloc_lseg = ff_layout_alloc_lseg, 2382 .free_lseg = ff_layout_free_lseg, 2383 .add_lseg = ff_layout_add_lseg, 2384 .pg_read_ops = &ff_layout_pg_read_ops, 2385 .pg_write_ops = &ff_layout_pg_write_ops, 2386 .get_ds_info = ff_layout_get_ds_info, 2387 .free_deviceid_node = ff_layout_free_deviceid_node, 2388 .mark_request_commit = pnfs_layout_mark_request_commit, 2389 .clear_request_commit = pnfs_generic_clear_request_commit, 2390 .scan_commit_lists = pnfs_generic_scan_commit_lists, 2391 .recover_commit_reqs = pnfs_generic_recover_commit_reqs, 2392 .commit_pagelist = ff_layout_commit_pagelist, 2393 .read_pagelist = ff_layout_read_pagelist, 2394 .write_pagelist = ff_layout_write_pagelist, 2395 .alloc_deviceid_node = ff_layout_alloc_deviceid_node, 2396 .prepare_layoutreturn = ff_layout_prepare_layoutreturn, 2397 .sync = pnfs_nfs_generic_sync, 2398 .prepare_layoutstats = ff_layout_prepare_layoutstats, 2399 }; 2400 2401 static int __init nfs4flexfilelayout_init(void) 2402 { 2403 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n", 2404 __func__); 2405 if (!ff_zero_group) { 2406 ff_zero_group = groups_alloc(0); 2407 if (!ff_zero_group) 2408 return -ENOMEM; 2409 } 2410 return pnfs_register_layoutdriver(&flexfilelayout_type); 2411 } 2412 2413 static void __exit nfs4flexfilelayout_exit(void) 2414 { 2415 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n", 2416 __func__); 2417 pnfs_unregister_layoutdriver(&flexfilelayout_type); 2418 if (ff_zero_group) { 2419 put_group_info(ff_zero_group); 2420 ff_zero_group = NULL; 2421 } 2422 } 2423 2424 MODULE_ALIAS("nfs-layouttype4-4"); 2425 2426 MODULE_LICENSE("GPL"); 2427 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver"); 2428 2429 module_init(nfs4flexfilelayout_init); 2430 module_exit(nfs4flexfilelayout_exit); 2431