1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Module for pnfs flexfile layout driver. 4 * 5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved. 6 * 7 * Tao Peng <bergwolf@primarydata.com> 8 */ 9 10 #include <linux/nfs_fs.h> 11 #include <linux/nfs_mount.h> 12 #include <linux/nfs_page.h> 13 #include <linux/module.h> 14 #include <linux/sched/mm.h> 15 16 #include <linux/sunrpc/metrics.h> 17 18 #include "flexfilelayout.h" 19 #include "../nfs4session.h" 20 #include "../nfs4idmap.h" 21 #include "../internal.h" 22 #include "../delegation.h" 23 #include "../nfs4trace.h" 24 #include "../iostat.h" 25 #include "../nfs.h" 26 #include "../nfs42.h" 27 28 #define NFSDBG_FACILITY NFSDBG_PNFS_LD 29 30 #define FF_LAYOUT_POLL_RETRY_MAX (15*HZ) 31 #define FF_LAYOUTRETURN_MAXERR 20 32 33 static unsigned short io_maxretrans; 34 35 static const struct pnfs_commit_ops ff_layout_commit_ops; 36 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task, 37 struct nfs_pgio_header *hdr); 38 static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, 39 struct nfs42_layoutstat_devinfo *devinfo, 40 int dev_limit); 41 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr, 42 const struct nfs42_layoutstat_devinfo *devinfo, 43 struct nfs4_ff_layout_mirror *mirror); 44 45 static struct pnfs_layout_hdr * 46 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags) 47 { 48 struct nfs4_flexfile_layout *ffl; 49 50 ffl = kzalloc(sizeof(*ffl), gfp_flags); 51 if (ffl) { 52 pnfs_init_ds_commit_info(&ffl->commit_info); 53 INIT_LIST_HEAD(&ffl->error_list); 54 INIT_LIST_HEAD(&ffl->mirrors); 55 ffl->last_report_time = ktime_get(); 56 ffl->commit_info.ops = &ff_layout_commit_ops; 57 return &ffl->generic_hdr; 58 } else 59 return NULL; 60 } 61 62 static void 63 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo) 64 { 65 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo); 66 struct nfs4_ff_layout_ds_err *err, *n; 67 68 list_for_each_entry_safe(err, n, &ffl->error_list, list) { 69 list_del(&err->list); 70 kfree(err); 71 } 72 kfree_rcu(ffl, generic_hdr.plh_rcu); 73 } 74 75 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) 76 { 77 __be32 *p; 78 79 p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE); 80 if (unlikely(p == NULL)) 81 return -ENOBUFS; 82 stateid->type = NFS4_PNFS_DS_STATEID_TYPE; 83 memcpy(stateid->data, p, NFS4_STATEID_SIZE); 84 dprintk("%s: stateid id= [%x%x%x%x]\n", __func__, 85 p[0], p[1], p[2], p[3]); 86 return 0; 87 } 88 89 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid) 90 { 91 __be32 *p; 92 93 p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE); 94 if (unlikely(!p)) 95 return -ENOBUFS; 96 memcpy(devid, p, NFS4_DEVICEID4_SIZE); 97 nfs4_print_deviceid(devid); 98 return 0; 99 } 100 101 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh) 102 { 103 __be32 *p; 104 105 p = xdr_inline_decode(xdr, 4); 106 if (unlikely(!p)) 107 return -ENOBUFS; 108 fh->size = be32_to_cpup(p++); 109 if (fh->size > sizeof(struct nfs_fh)) { 110 printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n", 111 fh->size); 112 return -EOVERFLOW; 113 } 114 /* fh.data */ 115 p = xdr_inline_decode(xdr, fh->size); 116 if (unlikely(!p)) 117 return -ENOBUFS; 118 memcpy(&fh->data, p, fh->size); 119 dprintk("%s: fh len %d\n", __func__, fh->size); 120 121 return 0; 122 } 123 124 /* 125 * Currently only stringified uids and gids are accepted. 126 * I.e., kerberos is not supported to the DSes, so no pricipals. 127 * 128 * That means that one common function will suffice, but when 129 * principals are added, this should be split to accomodate 130 * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid(). 131 */ 132 static int 133 decode_name(struct xdr_stream *xdr, u32 *id) 134 { 135 __be32 *p; 136 int len; 137 138 /* opaque_length(4)*/ 139 p = xdr_inline_decode(xdr, 4); 140 if (unlikely(!p)) 141 return -ENOBUFS; 142 len = be32_to_cpup(p++); 143 if (len < 0) 144 return -EINVAL; 145 146 dprintk("%s: len %u\n", __func__, len); 147 148 /* opaque body */ 149 p = xdr_inline_decode(xdr, len); 150 if (unlikely(!p)) 151 return -ENOBUFS; 152 153 if (!nfs_map_string_to_numeric((char *)p, len, id)) 154 return -EINVAL; 155 156 return 0; 157 } 158 159 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1, 160 const struct nfs4_ff_layout_mirror *m2) 161 { 162 int i, j; 163 164 if (m1->fh_versions_cnt != m2->fh_versions_cnt) 165 return false; 166 for (i = 0; i < m1->fh_versions_cnt; i++) { 167 bool found_fh = false; 168 for (j = 0; j < m2->fh_versions_cnt; j++) { 169 if (nfs_compare_fh(&m1->fh_versions[i], 170 &m2->fh_versions[j]) == 0) { 171 found_fh = true; 172 break; 173 } 174 } 175 if (!found_fh) 176 return false; 177 } 178 return true; 179 } 180 181 static struct nfs4_ff_layout_mirror * 182 ff_layout_add_mirror(struct pnfs_layout_hdr *lo, 183 struct nfs4_ff_layout_mirror *mirror) 184 { 185 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo); 186 struct nfs4_ff_layout_mirror *pos; 187 struct inode *inode = lo->plh_inode; 188 189 spin_lock(&inode->i_lock); 190 list_for_each_entry(pos, &ff_layout->mirrors, mirrors) { 191 if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0) 192 continue; 193 if (!ff_mirror_match_fh(mirror, pos)) 194 continue; 195 if (refcount_inc_not_zero(&pos->ref)) { 196 spin_unlock(&inode->i_lock); 197 return pos; 198 } 199 } 200 list_add(&mirror->mirrors, &ff_layout->mirrors); 201 mirror->layout = lo; 202 spin_unlock(&inode->i_lock); 203 return mirror; 204 } 205 206 static void 207 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror) 208 { 209 struct inode *inode; 210 if (mirror->layout == NULL) 211 return; 212 inode = mirror->layout->plh_inode; 213 spin_lock(&inode->i_lock); 214 list_del(&mirror->mirrors); 215 spin_unlock(&inode->i_lock); 216 mirror->layout = NULL; 217 } 218 219 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags) 220 { 221 struct nfs4_ff_layout_mirror *mirror; 222 223 mirror = kzalloc(sizeof(*mirror), gfp_flags); 224 if (mirror != NULL) { 225 spin_lock_init(&mirror->lock); 226 refcount_set(&mirror->ref, 1); 227 INIT_LIST_HEAD(&mirror->mirrors); 228 } 229 return mirror; 230 } 231 232 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror) 233 { 234 const struct cred *cred; 235 236 ff_layout_remove_mirror(mirror); 237 kfree(mirror->fh_versions); 238 cred = rcu_access_pointer(mirror->ro_cred); 239 put_cred(cred); 240 cred = rcu_access_pointer(mirror->rw_cred); 241 put_cred(cred); 242 nfs4_ff_layout_put_deviceid(mirror->mirror_ds); 243 kfree(mirror); 244 } 245 246 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror) 247 { 248 if (mirror != NULL && refcount_dec_and_test(&mirror->ref)) 249 ff_layout_free_mirror(mirror); 250 } 251 252 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls) 253 { 254 u32 i; 255 256 for (i = 0; i < fls->mirror_array_cnt; i++) 257 ff_layout_put_mirror(fls->mirror_array[i]); 258 } 259 260 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls) 261 { 262 if (fls) { 263 ff_layout_free_mirror_array(fls); 264 kfree(fls); 265 } 266 } 267 268 static bool 269 ff_lseg_match_mirrors(struct pnfs_layout_segment *l1, 270 struct pnfs_layout_segment *l2) 271 { 272 const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1); 273 const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1); 274 u32 i; 275 276 if (fl1->mirror_array_cnt != fl2->mirror_array_cnt) 277 return false; 278 for (i = 0; i < fl1->mirror_array_cnt; i++) { 279 if (fl1->mirror_array[i] != fl2->mirror_array[i]) 280 return false; 281 } 282 return true; 283 } 284 285 static bool 286 ff_lseg_range_is_after(const struct pnfs_layout_range *l1, 287 const struct pnfs_layout_range *l2) 288 { 289 u64 end1, end2; 290 291 if (l1->iomode != l2->iomode) 292 return l1->iomode != IOMODE_READ; 293 end1 = pnfs_calc_offset_end(l1->offset, l1->length); 294 end2 = pnfs_calc_offset_end(l2->offset, l2->length); 295 if (end1 < l2->offset) 296 return false; 297 if (end2 < l1->offset) 298 return true; 299 return l2->offset <= l1->offset; 300 } 301 302 static bool 303 ff_lseg_merge(struct pnfs_layout_segment *new, 304 struct pnfs_layout_segment *old) 305 { 306 u64 new_end, old_end; 307 308 if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags)) 309 return false; 310 if (new->pls_range.iomode != old->pls_range.iomode) 311 return false; 312 old_end = pnfs_calc_offset_end(old->pls_range.offset, 313 old->pls_range.length); 314 if (old_end < new->pls_range.offset) 315 return false; 316 new_end = pnfs_calc_offset_end(new->pls_range.offset, 317 new->pls_range.length); 318 if (new_end < old->pls_range.offset) 319 return false; 320 if (!ff_lseg_match_mirrors(new, old)) 321 return false; 322 323 /* Mergeable: copy info from 'old' to 'new' */ 324 if (new_end < old_end) 325 new_end = old_end; 326 if (new->pls_range.offset < old->pls_range.offset) 327 new->pls_range.offset = old->pls_range.offset; 328 new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset, 329 new_end); 330 if (test_bit(NFS_LSEG_ROC, &old->pls_flags)) 331 set_bit(NFS_LSEG_ROC, &new->pls_flags); 332 return true; 333 } 334 335 static void 336 ff_layout_add_lseg(struct pnfs_layout_hdr *lo, 337 struct pnfs_layout_segment *lseg, 338 struct list_head *free_me) 339 { 340 pnfs_generic_layout_insert_lseg(lo, lseg, 341 ff_lseg_range_is_after, 342 ff_lseg_merge, 343 free_me); 344 } 345 346 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls) 347 { 348 int i, j; 349 350 for (i = 0; i < fls->mirror_array_cnt - 1; i++) { 351 for (j = i + 1; j < fls->mirror_array_cnt; j++) 352 if (fls->mirror_array[i]->efficiency < 353 fls->mirror_array[j]->efficiency) 354 swap(fls->mirror_array[i], 355 fls->mirror_array[j]); 356 } 357 } 358 359 static struct pnfs_layout_segment * 360 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh, 361 struct nfs4_layoutget_res *lgr, 362 gfp_t gfp_flags) 363 { 364 struct pnfs_layout_segment *ret; 365 struct nfs4_ff_layout_segment *fls = NULL; 366 struct xdr_stream stream; 367 struct xdr_buf buf; 368 struct page *scratch; 369 u64 stripe_unit; 370 u32 mirror_array_cnt; 371 __be32 *p; 372 int i, rc; 373 374 dprintk("--> %s\n", __func__); 375 scratch = alloc_page(gfp_flags); 376 if (!scratch) 377 return ERR_PTR(-ENOMEM); 378 379 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, 380 lgr->layoutp->len); 381 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); 382 383 /* stripe unit and mirror_array_cnt */ 384 rc = -EIO; 385 p = xdr_inline_decode(&stream, 8 + 4); 386 if (!p) 387 goto out_err_free; 388 389 p = xdr_decode_hyper(p, &stripe_unit); 390 mirror_array_cnt = be32_to_cpup(p++); 391 dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__, 392 stripe_unit, mirror_array_cnt); 393 394 if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT || 395 mirror_array_cnt == 0) 396 goto out_err_free; 397 398 rc = -ENOMEM; 399 fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt), 400 gfp_flags); 401 if (!fls) 402 goto out_err_free; 403 404 fls->mirror_array_cnt = mirror_array_cnt; 405 fls->stripe_unit = stripe_unit; 406 407 for (i = 0; i < fls->mirror_array_cnt; i++) { 408 struct nfs4_ff_layout_mirror *mirror; 409 struct cred *kcred; 410 const struct cred __rcu *cred; 411 kuid_t uid; 412 kgid_t gid; 413 u32 ds_count, fh_count, id; 414 int j; 415 416 rc = -EIO; 417 p = xdr_inline_decode(&stream, 4); 418 if (!p) 419 goto out_err_free; 420 ds_count = be32_to_cpup(p); 421 422 /* FIXME: allow for striping? */ 423 if (ds_count != 1) 424 goto out_err_free; 425 426 fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags); 427 if (fls->mirror_array[i] == NULL) { 428 rc = -ENOMEM; 429 goto out_err_free; 430 } 431 432 fls->mirror_array[i]->ds_count = ds_count; 433 434 /* deviceid */ 435 rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid); 436 if (rc) 437 goto out_err_free; 438 439 /* efficiency */ 440 rc = -EIO; 441 p = xdr_inline_decode(&stream, 4); 442 if (!p) 443 goto out_err_free; 444 fls->mirror_array[i]->efficiency = be32_to_cpup(p); 445 446 /* stateid */ 447 rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid); 448 if (rc) 449 goto out_err_free; 450 451 /* fh */ 452 rc = -EIO; 453 p = xdr_inline_decode(&stream, 4); 454 if (!p) 455 goto out_err_free; 456 fh_count = be32_to_cpup(p); 457 458 fls->mirror_array[i]->fh_versions = 459 kcalloc(fh_count, sizeof(struct nfs_fh), 460 gfp_flags); 461 if (fls->mirror_array[i]->fh_versions == NULL) { 462 rc = -ENOMEM; 463 goto out_err_free; 464 } 465 466 for (j = 0; j < fh_count; j++) { 467 rc = decode_nfs_fh(&stream, 468 &fls->mirror_array[i]->fh_versions[j]); 469 if (rc) 470 goto out_err_free; 471 } 472 473 fls->mirror_array[i]->fh_versions_cnt = fh_count; 474 475 /* user */ 476 rc = decode_name(&stream, &id); 477 if (rc) 478 goto out_err_free; 479 480 uid = make_kuid(&init_user_ns, id); 481 482 /* group */ 483 rc = decode_name(&stream, &id); 484 if (rc) 485 goto out_err_free; 486 487 gid = make_kgid(&init_user_ns, id); 488 489 if (gfp_flags & __GFP_FS) 490 kcred = prepare_kernel_cred(NULL); 491 else { 492 unsigned int nofs_flags = memalloc_nofs_save(); 493 kcred = prepare_kernel_cred(NULL); 494 memalloc_nofs_restore(nofs_flags); 495 } 496 rc = -ENOMEM; 497 if (!kcred) 498 goto out_err_free; 499 kcred->fsuid = uid; 500 kcred->fsgid = gid; 501 cred = RCU_INITIALIZER(kcred); 502 503 if (lgr->range.iomode == IOMODE_READ) 504 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred); 505 else 506 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred); 507 508 mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]); 509 if (mirror != fls->mirror_array[i]) { 510 /* swap cred ptrs so free_mirror will clean up old */ 511 if (lgr->range.iomode == IOMODE_READ) { 512 cred = xchg(&mirror->ro_cred, cred); 513 rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred); 514 } else { 515 cred = xchg(&mirror->rw_cred, cred); 516 rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred); 517 } 518 ff_layout_free_mirror(fls->mirror_array[i]); 519 fls->mirror_array[i] = mirror; 520 } 521 522 dprintk("%s: iomode %s uid %u gid %u\n", __func__, 523 lgr->range.iomode == IOMODE_READ ? "READ" : "RW", 524 from_kuid(&init_user_ns, uid), 525 from_kgid(&init_user_ns, gid)); 526 } 527 528 p = xdr_inline_decode(&stream, 4); 529 if (!p) 530 goto out_sort_mirrors; 531 fls->flags = be32_to_cpup(p); 532 533 p = xdr_inline_decode(&stream, 4); 534 if (!p) 535 goto out_sort_mirrors; 536 for (i=0; i < fls->mirror_array_cnt; i++) 537 fls->mirror_array[i]->report_interval = be32_to_cpup(p); 538 539 out_sort_mirrors: 540 ff_layout_sort_mirrors(fls); 541 ret = &fls->generic_hdr; 542 dprintk("<-- %s (success)\n", __func__); 543 out_free_page: 544 __free_page(scratch); 545 return ret; 546 out_err_free: 547 _ff_layout_free_lseg(fls); 548 ret = ERR_PTR(rc); 549 dprintk("<-- %s (%d)\n", __func__, rc); 550 goto out_free_page; 551 } 552 553 static void 554 ff_layout_free_lseg(struct pnfs_layout_segment *lseg) 555 { 556 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); 557 558 dprintk("--> %s\n", __func__); 559 560 if (lseg->pls_range.iomode == IOMODE_RW) { 561 struct nfs4_flexfile_layout *ffl; 562 struct inode *inode; 563 564 ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout); 565 inode = ffl->generic_hdr.plh_inode; 566 spin_lock(&inode->i_lock); 567 pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg); 568 spin_unlock(&inode->i_lock); 569 } 570 _ff_layout_free_lseg(fls); 571 } 572 573 static void 574 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now) 575 { 576 /* first IO request? */ 577 if (atomic_inc_return(&timer->n_ops) == 1) { 578 timer->start_time = now; 579 } 580 } 581 582 static ktime_t 583 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now) 584 { 585 ktime_t start; 586 587 if (atomic_dec_return(&timer->n_ops) < 0) 588 WARN_ON_ONCE(1); 589 590 start = timer->start_time; 591 timer->start_time = now; 592 return ktime_sub(now, start); 593 } 594 595 static bool 596 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror, 597 struct nfs4_ff_layoutstat *layoutstat, 598 ktime_t now) 599 { 600 s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL; 601 struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout); 602 603 nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now); 604 if (!mirror->start_time) 605 mirror->start_time = now; 606 if (mirror->report_interval != 0) 607 report_interval = (s64)mirror->report_interval * 1000LL; 608 else if (layoutstats_timer != 0) 609 report_interval = (s64)layoutstats_timer * 1000LL; 610 if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >= 611 report_interval) { 612 ffl->last_report_time = now; 613 return true; 614 } 615 616 return false; 617 } 618 619 static void 620 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat, 621 __u64 requested) 622 { 623 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat; 624 625 iostat->ops_requested++; 626 iostat->bytes_requested += requested; 627 } 628 629 static void 630 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat, 631 __u64 requested, 632 __u64 completed, 633 ktime_t time_completed, 634 ktime_t time_started) 635 { 636 struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat; 637 ktime_t completion_time = ktime_sub(time_completed, time_started); 638 ktime_t timer; 639 640 iostat->ops_completed++; 641 iostat->bytes_completed += completed; 642 iostat->bytes_not_delivered += requested - completed; 643 644 timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed); 645 iostat->total_busy_time = 646 ktime_add(iostat->total_busy_time, timer); 647 iostat->aggregate_completion_time = 648 ktime_add(iostat->aggregate_completion_time, 649 completion_time); 650 } 651 652 static void 653 nfs4_ff_layout_stat_io_start_read(struct inode *inode, 654 struct nfs4_ff_layout_mirror *mirror, 655 __u64 requested, ktime_t now) 656 { 657 bool report; 658 659 spin_lock(&mirror->lock); 660 report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now); 661 nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested); 662 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); 663 spin_unlock(&mirror->lock); 664 665 if (report) 666 pnfs_report_layoutstat(inode, GFP_KERNEL); 667 } 668 669 static void 670 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task, 671 struct nfs4_ff_layout_mirror *mirror, 672 __u64 requested, 673 __u64 completed) 674 { 675 spin_lock(&mirror->lock); 676 nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat, 677 requested, completed, 678 ktime_get(), task->tk_start); 679 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); 680 spin_unlock(&mirror->lock); 681 } 682 683 static void 684 nfs4_ff_layout_stat_io_start_write(struct inode *inode, 685 struct nfs4_ff_layout_mirror *mirror, 686 __u64 requested, ktime_t now) 687 { 688 bool report; 689 690 spin_lock(&mirror->lock); 691 report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now); 692 nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested); 693 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); 694 spin_unlock(&mirror->lock); 695 696 if (report) 697 pnfs_report_layoutstat(inode, GFP_NOIO); 698 } 699 700 static void 701 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task, 702 struct nfs4_ff_layout_mirror *mirror, 703 __u64 requested, 704 __u64 completed, 705 enum nfs3_stable_how committed) 706 { 707 if (committed == NFS_UNSTABLE) 708 requested = completed = 0; 709 710 spin_lock(&mirror->lock); 711 nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat, 712 requested, completed, ktime_get(), task->tk_start); 713 set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags); 714 spin_unlock(&mirror->lock); 715 } 716 717 static void 718 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, int idx) 719 { 720 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); 721 722 if (devid) 723 nfs4_mark_deviceid_unavailable(devid); 724 } 725 726 static void 727 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, int idx) 728 { 729 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); 730 731 if (devid) 732 nfs4_mark_deviceid_available(devid); 733 } 734 735 static struct nfs4_pnfs_ds * 736 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg, 737 int start_idx, int *best_idx, 738 bool check_device) 739 { 740 struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); 741 struct nfs4_ff_layout_mirror *mirror; 742 struct nfs4_pnfs_ds *ds; 743 bool fail_return = false; 744 int idx; 745 746 /* mirrors are initially sorted by efficiency */ 747 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) { 748 if (idx+1 == fls->mirror_array_cnt) 749 fail_return = !check_device; 750 751 mirror = FF_LAYOUT_COMP(lseg, idx); 752 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, fail_return); 753 if (!ds) 754 continue; 755 756 if (check_device && 757 nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node)) 758 continue; 759 760 *best_idx = idx; 761 return ds; 762 } 763 764 return NULL; 765 } 766 767 static struct nfs4_pnfs_ds * 768 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg, 769 int start_idx, int *best_idx) 770 { 771 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false); 772 } 773 774 static struct nfs4_pnfs_ds * 775 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg, 776 int start_idx, int *best_idx) 777 { 778 return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true); 779 } 780 781 static struct nfs4_pnfs_ds * 782 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg, 783 int start_idx, int *best_idx) 784 { 785 struct nfs4_pnfs_ds *ds; 786 787 ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx); 788 if (ds) 789 return ds; 790 return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx); 791 } 792 793 static void 794 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio, 795 struct nfs_page *req, 796 bool strict_iomode) 797 { 798 pnfs_put_lseg(pgio->pg_lseg); 799 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 800 nfs_req_openctx(req), 801 req_offset(req), 802 req->wb_bytes, 803 IOMODE_READ, 804 strict_iomode, 805 GFP_KERNEL); 806 if (IS_ERR(pgio->pg_lseg)) { 807 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 808 pgio->pg_lseg = NULL; 809 } 810 } 811 812 static void 813 ff_layout_pg_check_layout(struct nfs_pageio_descriptor *pgio, 814 struct nfs_page *req) 815 { 816 pnfs_generic_pg_check_layout(pgio); 817 pnfs_generic_pg_check_range(pgio, req); 818 } 819 820 static void 821 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio, 822 struct nfs_page *req) 823 { 824 struct nfs_pgio_mirror *pgm; 825 struct nfs4_ff_layout_mirror *mirror; 826 struct nfs4_pnfs_ds *ds; 827 int ds_idx; 828 829 retry: 830 ff_layout_pg_check_layout(pgio, req); 831 /* Use full layout for now */ 832 if (!pgio->pg_lseg) { 833 ff_layout_pg_get_read(pgio, req, false); 834 if (!pgio->pg_lseg) 835 goto out_nolseg; 836 } 837 if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) { 838 ff_layout_pg_get_read(pgio, req, true); 839 if (!pgio->pg_lseg) 840 goto out_nolseg; 841 } 842 843 ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx); 844 if (!ds) { 845 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg)) 846 goto out_mds; 847 pnfs_put_lseg(pgio->pg_lseg); 848 pgio->pg_lseg = NULL; 849 /* Sleep for 1 second before retrying */ 850 ssleep(1); 851 goto retry; 852 } 853 854 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx); 855 856 pgio->pg_mirror_idx = ds_idx; 857 858 /* read always uses only one mirror - idx 0 for pgio layer */ 859 pgm = &pgio->pg_mirrors[0]; 860 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; 861 862 if (NFS_SERVER(pgio->pg_inode)->flags & 863 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) 864 pgio->pg_maxretrans = io_maxretrans; 865 return; 866 out_nolseg: 867 if (pgio->pg_error < 0) 868 return; 869 out_mds: 870 trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode, 871 0, NFS4_MAX_UINT64, IOMODE_READ, 872 NFS_I(pgio->pg_inode)->layout, 873 pgio->pg_lseg); 874 pnfs_put_lseg(pgio->pg_lseg); 875 pgio->pg_lseg = NULL; 876 pgio->pg_maxretrans = 0; 877 nfs_pageio_reset_read_mds(pgio); 878 } 879 880 static void 881 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, 882 struct nfs_page *req) 883 { 884 struct nfs4_ff_layout_mirror *mirror; 885 struct nfs_pgio_mirror *pgm; 886 struct nfs4_pnfs_ds *ds; 887 int i; 888 889 retry: 890 ff_layout_pg_check_layout(pgio, req); 891 if (!pgio->pg_lseg) { 892 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 893 nfs_req_openctx(req), 894 req_offset(req), 895 req->wb_bytes, 896 IOMODE_RW, 897 false, 898 GFP_NOFS); 899 if (IS_ERR(pgio->pg_lseg)) { 900 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 901 pgio->pg_lseg = NULL; 902 return; 903 } 904 } 905 /* If no lseg, fall back to write through mds */ 906 if (pgio->pg_lseg == NULL) 907 goto out_mds; 908 909 /* Use a direct mapping of ds_idx to pgio mirror_idx */ 910 if (WARN_ON_ONCE(pgio->pg_mirror_count != 911 FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))) 912 goto out_mds; 913 914 for (i = 0; i < pgio->pg_mirror_count; i++) { 915 mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i); 916 ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true); 917 if (!ds) { 918 if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg)) 919 goto out_mds; 920 pnfs_put_lseg(pgio->pg_lseg); 921 pgio->pg_lseg = NULL; 922 /* Sleep for 1 second before retrying */ 923 ssleep(1); 924 goto retry; 925 } 926 pgm = &pgio->pg_mirrors[i]; 927 pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize; 928 } 929 930 if (NFS_SERVER(pgio->pg_inode)->flags & 931 (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) 932 pgio->pg_maxretrans = io_maxretrans; 933 return; 934 935 out_mds: 936 trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode, 937 0, NFS4_MAX_UINT64, IOMODE_RW, 938 NFS_I(pgio->pg_inode)->layout, 939 pgio->pg_lseg); 940 pnfs_put_lseg(pgio->pg_lseg); 941 pgio->pg_lseg = NULL; 942 pgio->pg_maxretrans = 0; 943 nfs_pageio_reset_write_mds(pgio); 944 } 945 946 static unsigned int 947 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio, 948 struct nfs_page *req) 949 { 950 if (!pgio->pg_lseg) { 951 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 952 nfs_req_openctx(req), 953 0, 954 NFS4_MAX_UINT64, 955 IOMODE_RW, 956 false, 957 GFP_NOFS); 958 if (IS_ERR(pgio->pg_lseg)) { 959 pgio->pg_error = PTR_ERR(pgio->pg_lseg); 960 pgio->pg_lseg = NULL; 961 goto out; 962 } 963 } 964 if (pgio->pg_lseg) 965 return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg); 966 967 trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode, 968 0, NFS4_MAX_UINT64, IOMODE_RW, 969 NFS_I(pgio->pg_inode)->layout, 970 pgio->pg_lseg); 971 /* no lseg means that pnfs is not in use, so no mirroring here */ 972 nfs_pageio_reset_write_mds(pgio); 973 out: 974 return 1; 975 } 976 977 static const struct nfs_pageio_ops ff_layout_pg_read_ops = { 978 .pg_init = ff_layout_pg_init_read, 979 .pg_test = pnfs_generic_pg_test, 980 .pg_doio = pnfs_generic_pg_readpages, 981 .pg_cleanup = pnfs_generic_pg_cleanup, 982 }; 983 984 static const struct nfs_pageio_ops ff_layout_pg_write_ops = { 985 .pg_init = ff_layout_pg_init_write, 986 .pg_test = pnfs_generic_pg_test, 987 .pg_doio = pnfs_generic_pg_writepages, 988 .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write, 989 .pg_cleanup = pnfs_generic_pg_cleanup, 990 }; 991 992 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs) 993 { 994 struct rpc_task *task = &hdr->task; 995 996 pnfs_layoutcommit_inode(hdr->inode, false); 997 998 if (retry_pnfs) { 999 dprintk("%s Reset task %5u for i/o through pNFS " 1000 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, 1001 hdr->task.tk_pid, 1002 hdr->inode->i_sb->s_id, 1003 (unsigned long long)NFS_FILEID(hdr->inode), 1004 hdr->args.count, 1005 (unsigned long long)hdr->args.offset); 1006 1007 hdr->completion_ops->reschedule_io(hdr); 1008 return; 1009 } 1010 1011 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { 1012 dprintk("%s Reset task %5u for i/o through MDS " 1013 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, 1014 hdr->task.tk_pid, 1015 hdr->inode->i_sb->s_id, 1016 (unsigned long long)NFS_FILEID(hdr->inode), 1017 hdr->args.count, 1018 (unsigned long long)hdr->args.offset); 1019 1020 trace_pnfs_mds_fallback_write_done(hdr->inode, 1021 hdr->args.offset, hdr->args.count, 1022 IOMODE_RW, NFS_I(hdr->inode)->layout, 1023 hdr->lseg); 1024 task->tk_status = pnfs_write_done_resend_to_mds(hdr); 1025 } 1026 } 1027 1028 static void ff_layout_reset_read(struct nfs_pgio_header *hdr) 1029 { 1030 struct rpc_task *task = &hdr->task; 1031 1032 pnfs_layoutcommit_inode(hdr->inode, false); 1033 1034 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { 1035 dprintk("%s Reset task %5u for i/o through MDS " 1036 "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, 1037 hdr->task.tk_pid, 1038 hdr->inode->i_sb->s_id, 1039 (unsigned long long)NFS_FILEID(hdr->inode), 1040 hdr->args.count, 1041 (unsigned long long)hdr->args.offset); 1042 1043 trace_pnfs_mds_fallback_read_done(hdr->inode, 1044 hdr->args.offset, hdr->args.count, 1045 IOMODE_READ, NFS_I(hdr->inode)->layout, 1046 hdr->lseg); 1047 task->tk_status = pnfs_read_done_resend_to_mds(hdr); 1048 } 1049 } 1050 1051 static int ff_layout_async_handle_error_v4(struct rpc_task *task, 1052 struct nfs4_state *state, 1053 struct nfs_client *clp, 1054 struct pnfs_layout_segment *lseg, 1055 int idx) 1056 { 1057 struct pnfs_layout_hdr *lo = lseg->pls_layout; 1058 struct inode *inode = lo->plh_inode; 1059 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); 1060 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; 1061 1062 switch (task->tk_status) { 1063 case -NFS4ERR_BADSESSION: 1064 case -NFS4ERR_BADSLOT: 1065 case -NFS4ERR_BAD_HIGH_SLOT: 1066 case -NFS4ERR_DEADSESSION: 1067 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1068 case -NFS4ERR_SEQ_FALSE_RETRY: 1069 case -NFS4ERR_SEQ_MISORDERED: 1070 dprintk("%s ERROR %d, Reset session. Exchangeid " 1071 "flags 0x%x\n", __func__, task->tk_status, 1072 clp->cl_exchange_flags); 1073 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 1074 break; 1075 case -NFS4ERR_DELAY: 1076 case -NFS4ERR_GRACE: 1077 rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX); 1078 break; 1079 case -NFS4ERR_RETRY_UNCACHED_REP: 1080 break; 1081 /* Invalidate Layout errors */ 1082 case -NFS4ERR_PNFS_NO_LAYOUT: 1083 case -ESTALE: /* mapped NFS4ERR_STALE */ 1084 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */ 1085 case -EISDIR: /* mapped NFS4ERR_ISDIR */ 1086 case -NFS4ERR_FHEXPIRED: 1087 case -NFS4ERR_WRONG_TYPE: 1088 dprintk("%s Invalid layout error %d\n", __func__, 1089 task->tk_status); 1090 /* 1091 * Destroy layout so new i/o will get a new layout. 1092 * Layout will not be destroyed until all current lseg 1093 * references are put. Mark layout as invalid to resend failed 1094 * i/o and all i/o waiting on the slot table to the MDS until 1095 * layout is destroyed and a new valid layout is obtained. 1096 */ 1097 pnfs_destroy_layout(NFS_I(inode)); 1098 rpc_wake_up(&tbl->slot_tbl_waitq); 1099 goto reset; 1100 /* RPC connection errors */ 1101 case -ECONNREFUSED: 1102 case -EHOSTDOWN: 1103 case -EHOSTUNREACH: 1104 case -ENETUNREACH: 1105 case -EIO: 1106 case -ETIMEDOUT: 1107 case -EPIPE: 1108 dprintk("%s DS connection error %d\n", __func__, 1109 task->tk_status); 1110 nfs4_delete_deviceid(devid->ld, devid->nfs_client, 1111 &devid->deviceid); 1112 rpc_wake_up(&tbl->slot_tbl_waitq); 1113 /* fall through */ 1114 default: 1115 if (ff_layout_avoid_mds_available_ds(lseg)) 1116 return -NFS4ERR_RESET_TO_PNFS; 1117 reset: 1118 dprintk("%s Retry through MDS. Error %d\n", __func__, 1119 task->tk_status); 1120 return -NFS4ERR_RESET_TO_MDS; 1121 } 1122 task->tk_status = 0; 1123 return -EAGAIN; 1124 } 1125 1126 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */ 1127 static int ff_layout_async_handle_error_v3(struct rpc_task *task, 1128 struct pnfs_layout_segment *lseg, 1129 int idx) 1130 { 1131 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); 1132 1133 switch (task->tk_status) { 1134 /* File access problems. Don't mark the device as unavailable */ 1135 case -EACCES: 1136 case -ESTALE: 1137 case -EISDIR: 1138 case -EBADHANDLE: 1139 case -ELOOP: 1140 case -ENOSPC: 1141 break; 1142 case -EJUKEBOX: 1143 nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); 1144 goto out_retry; 1145 default: 1146 dprintk("%s DS connection error %d\n", __func__, 1147 task->tk_status); 1148 nfs4_delete_deviceid(devid->ld, devid->nfs_client, 1149 &devid->deviceid); 1150 } 1151 /* FIXME: Need to prevent infinite looping here. */ 1152 return -NFS4ERR_RESET_TO_PNFS; 1153 out_retry: 1154 task->tk_status = 0; 1155 rpc_restart_call_prepare(task); 1156 rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); 1157 return -EAGAIN; 1158 } 1159 1160 static int ff_layout_async_handle_error(struct rpc_task *task, 1161 struct nfs4_state *state, 1162 struct nfs_client *clp, 1163 struct pnfs_layout_segment *lseg, 1164 int idx) 1165 { 1166 int vers = clp->cl_nfs_mod->rpc_vers->number; 1167 1168 if (task->tk_status >= 0) { 1169 ff_layout_mark_ds_reachable(lseg, idx); 1170 return 0; 1171 } 1172 1173 /* Handle the case of an invalid layout segment */ 1174 if (!pnfs_is_valid_lseg(lseg)) 1175 return -NFS4ERR_RESET_TO_PNFS; 1176 1177 switch (vers) { 1178 case 3: 1179 return ff_layout_async_handle_error_v3(task, lseg, idx); 1180 case 4: 1181 return ff_layout_async_handle_error_v4(task, state, clp, 1182 lseg, idx); 1183 default: 1184 /* should never happen */ 1185 WARN_ON_ONCE(1); 1186 return 0; 1187 } 1188 } 1189 1190 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, 1191 int idx, u64 offset, u64 length, 1192 u32 *op_status, int opnum, int error) 1193 { 1194 struct nfs4_ff_layout_mirror *mirror; 1195 u32 status = *op_status; 1196 int err; 1197 1198 if (status == 0) { 1199 switch (error) { 1200 case -ETIMEDOUT: 1201 case -EPFNOSUPPORT: 1202 case -EPROTONOSUPPORT: 1203 case -EOPNOTSUPP: 1204 case -ECONNREFUSED: 1205 case -ECONNRESET: 1206 case -EHOSTDOWN: 1207 case -EHOSTUNREACH: 1208 case -ENETUNREACH: 1209 case -EADDRINUSE: 1210 case -ENOBUFS: 1211 case -EPIPE: 1212 case -EPERM: 1213 *op_status = status = NFS4ERR_NXIO; 1214 break; 1215 case -EACCES: 1216 *op_status = status = NFS4ERR_ACCESS; 1217 break; 1218 default: 1219 return; 1220 } 1221 } 1222 1223 mirror = FF_LAYOUT_COMP(lseg, idx); 1224 err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), 1225 mirror, offset, length, status, opnum, 1226 GFP_NOIO); 1227 1228 switch (status) { 1229 case NFS4ERR_DELAY: 1230 case NFS4ERR_GRACE: 1231 break; 1232 case NFS4ERR_NXIO: 1233 ff_layout_mark_ds_unreachable(lseg, idx); 1234 /* Fallthrough */ 1235 default: 1236 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, 1237 lseg); 1238 } 1239 1240 dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status); 1241 } 1242 1243 /* NFS_PROTO call done callback routines */ 1244 static int ff_layout_read_done_cb(struct rpc_task *task, 1245 struct nfs_pgio_header *hdr) 1246 { 1247 int new_idx = hdr->pgio_mirror_idx; 1248 int err; 1249 1250 if (task->tk_status < 0) { 1251 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx, 1252 hdr->args.offset, hdr->args.count, 1253 &hdr->res.op_status, OP_READ, 1254 task->tk_status); 1255 trace_ff_layout_read_error(hdr); 1256 } 1257 1258 err = ff_layout_async_handle_error(task, hdr->args.context->state, 1259 hdr->ds_clp, hdr->lseg, 1260 hdr->pgio_mirror_idx); 1261 1262 trace_nfs4_pnfs_read(hdr, err); 1263 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); 1264 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); 1265 switch (err) { 1266 case -NFS4ERR_RESET_TO_PNFS: 1267 if (ff_layout_choose_best_ds_for_read(hdr->lseg, 1268 hdr->pgio_mirror_idx + 1, 1269 &new_idx)) 1270 goto out_layouterror; 1271 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); 1272 return task->tk_status; 1273 case -NFS4ERR_RESET_TO_MDS: 1274 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); 1275 return task->tk_status; 1276 case -EAGAIN: 1277 goto out_eagain; 1278 } 1279 1280 return 0; 1281 out_layouterror: 1282 ff_layout_read_record_layoutstats_done(task, hdr); 1283 ff_layout_send_layouterror(hdr->lseg); 1284 hdr->pgio_mirror_idx = new_idx; 1285 out_eagain: 1286 rpc_restart_call_prepare(task); 1287 return -EAGAIN; 1288 } 1289 1290 static bool 1291 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg) 1292 { 1293 return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT); 1294 } 1295 1296 /* 1297 * We reference the rpc_cred of the first WRITE that triggers the need for 1298 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound. 1299 * rfc5661 is not clear about which credential should be used. 1300 * 1301 * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so 1302 * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751 1303 * we always send layoutcommit after DS writes. 1304 */ 1305 static void 1306 ff_layout_set_layoutcommit(struct inode *inode, 1307 struct pnfs_layout_segment *lseg, 1308 loff_t end_offset) 1309 { 1310 if (!ff_layout_need_layoutcommit(lseg)) 1311 return; 1312 1313 pnfs_set_layoutcommit(inode, lseg, end_offset); 1314 dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino, 1315 (unsigned long long) NFS_I(inode)->layout->plh_lwb); 1316 } 1317 1318 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task, 1319 struct nfs_pgio_header *hdr) 1320 { 1321 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags)) 1322 return; 1323 nfs4_ff_layout_stat_io_start_read(hdr->inode, 1324 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1325 hdr->args.count, 1326 task->tk_start); 1327 } 1328 1329 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task, 1330 struct nfs_pgio_header *hdr) 1331 { 1332 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags)) 1333 return; 1334 nfs4_ff_layout_stat_io_end_read(task, 1335 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1336 hdr->args.count, 1337 hdr->res.count); 1338 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags); 1339 } 1340 1341 static int ff_layout_read_prepare_common(struct rpc_task *task, 1342 struct nfs_pgio_header *hdr) 1343 { 1344 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { 1345 rpc_exit(task, -EIO); 1346 return -EIO; 1347 } 1348 1349 ff_layout_read_record_layoutstats_start(task, hdr); 1350 return 0; 1351 } 1352 1353 /* 1354 * Call ops for the async read/write cases 1355 * In the case of dense layouts, the offset needs to be reset to its 1356 * original value. 1357 */ 1358 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data) 1359 { 1360 struct nfs_pgio_header *hdr = data; 1361 1362 if (ff_layout_read_prepare_common(task, hdr)) 1363 return; 1364 1365 rpc_call_start(task); 1366 } 1367 1368 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data) 1369 { 1370 struct nfs_pgio_header *hdr = data; 1371 1372 if (nfs4_setup_sequence(hdr->ds_clp, 1373 &hdr->args.seq_args, 1374 &hdr->res.seq_res, 1375 task)) 1376 return; 1377 1378 ff_layout_read_prepare_common(task, hdr); 1379 } 1380 1381 static void ff_layout_read_call_done(struct rpc_task *task, void *data) 1382 { 1383 struct nfs_pgio_header *hdr = data; 1384 1385 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status); 1386 1387 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) && 1388 task->tk_status == 0) { 1389 nfs4_sequence_done(task, &hdr->res.seq_res); 1390 return; 1391 } 1392 1393 /* Note this may cause RPC to be resent */ 1394 hdr->mds_ops->rpc_call_done(task, hdr); 1395 } 1396 1397 static void ff_layout_read_count_stats(struct rpc_task *task, void *data) 1398 { 1399 struct nfs_pgio_header *hdr = data; 1400 1401 ff_layout_read_record_layoutstats_done(task, hdr); 1402 rpc_count_iostats_metrics(task, 1403 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]); 1404 } 1405 1406 static void ff_layout_read_release(void *data) 1407 { 1408 struct nfs_pgio_header *hdr = data; 1409 1410 ff_layout_read_record_layoutstats_done(&hdr->task, hdr); 1411 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) { 1412 ff_layout_send_layouterror(hdr->lseg); 1413 pnfs_read_resend_pnfs(hdr); 1414 } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags)) 1415 ff_layout_reset_read(hdr); 1416 pnfs_generic_rw_release(data); 1417 } 1418 1419 1420 static int ff_layout_write_done_cb(struct rpc_task *task, 1421 struct nfs_pgio_header *hdr) 1422 { 1423 loff_t end_offs = 0; 1424 int err; 1425 1426 if (task->tk_status < 0) { 1427 ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx, 1428 hdr->args.offset, hdr->args.count, 1429 &hdr->res.op_status, OP_WRITE, 1430 task->tk_status); 1431 trace_ff_layout_write_error(hdr); 1432 } 1433 1434 err = ff_layout_async_handle_error(task, hdr->args.context->state, 1435 hdr->ds_clp, hdr->lseg, 1436 hdr->pgio_mirror_idx); 1437 1438 trace_nfs4_pnfs_write(hdr, err); 1439 clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); 1440 clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); 1441 switch (err) { 1442 case -NFS4ERR_RESET_TO_PNFS: 1443 set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); 1444 return task->tk_status; 1445 case -NFS4ERR_RESET_TO_MDS: 1446 set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); 1447 return task->tk_status; 1448 case -EAGAIN: 1449 return -EAGAIN; 1450 } 1451 1452 if (hdr->res.verf->committed == NFS_FILE_SYNC || 1453 hdr->res.verf->committed == NFS_DATA_SYNC) 1454 end_offs = hdr->mds_offset + (loff_t)hdr->res.count; 1455 1456 /* Note: if the write is unstable, don't set end_offs until commit */ 1457 ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs); 1458 1459 /* zero out fattr since we don't care DS attr at all */ 1460 hdr->fattr.valid = 0; 1461 if (task->tk_status >= 0) 1462 nfs_writeback_update_inode(hdr); 1463 1464 return 0; 1465 } 1466 1467 static int ff_layout_commit_done_cb(struct rpc_task *task, 1468 struct nfs_commit_data *data) 1469 { 1470 int err; 1471 1472 if (task->tk_status < 0) { 1473 ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index, 1474 data->args.offset, data->args.count, 1475 &data->res.op_status, OP_COMMIT, 1476 task->tk_status); 1477 trace_ff_layout_commit_error(data); 1478 } 1479 1480 err = ff_layout_async_handle_error(task, NULL, data->ds_clp, 1481 data->lseg, data->ds_commit_index); 1482 1483 trace_nfs4_pnfs_commit_ds(data, err); 1484 switch (err) { 1485 case -NFS4ERR_RESET_TO_PNFS: 1486 pnfs_generic_prepare_to_resend_writes(data); 1487 return -EAGAIN; 1488 case -NFS4ERR_RESET_TO_MDS: 1489 pnfs_generic_prepare_to_resend_writes(data); 1490 return -EAGAIN; 1491 case -EAGAIN: 1492 rpc_restart_call_prepare(task); 1493 return -EAGAIN; 1494 } 1495 1496 ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb); 1497 1498 return 0; 1499 } 1500 1501 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task, 1502 struct nfs_pgio_header *hdr) 1503 { 1504 if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags)) 1505 return; 1506 nfs4_ff_layout_stat_io_start_write(hdr->inode, 1507 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1508 hdr->args.count, 1509 task->tk_start); 1510 } 1511 1512 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task, 1513 struct nfs_pgio_header *hdr) 1514 { 1515 if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags)) 1516 return; 1517 nfs4_ff_layout_stat_io_end_write(task, 1518 FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), 1519 hdr->args.count, hdr->res.count, 1520 hdr->res.verf->committed); 1521 set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags); 1522 } 1523 1524 static int ff_layout_write_prepare_common(struct rpc_task *task, 1525 struct nfs_pgio_header *hdr) 1526 { 1527 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { 1528 rpc_exit(task, -EIO); 1529 return -EIO; 1530 } 1531 1532 ff_layout_write_record_layoutstats_start(task, hdr); 1533 return 0; 1534 } 1535 1536 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data) 1537 { 1538 struct nfs_pgio_header *hdr = data; 1539 1540 if (ff_layout_write_prepare_common(task, hdr)) 1541 return; 1542 1543 rpc_call_start(task); 1544 } 1545 1546 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data) 1547 { 1548 struct nfs_pgio_header *hdr = data; 1549 1550 if (nfs4_setup_sequence(hdr->ds_clp, 1551 &hdr->args.seq_args, 1552 &hdr->res.seq_res, 1553 task)) 1554 return; 1555 1556 ff_layout_write_prepare_common(task, hdr); 1557 } 1558 1559 static void ff_layout_write_call_done(struct rpc_task *task, void *data) 1560 { 1561 struct nfs_pgio_header *hdr = data; 1562 1563 if (test_bit(NFS_IOHDR_REDO, &hdr->flags) && 1564 task->tk_status == 0) { 1565 nfs4_sequence_done(task, &hdr->res.seq_res); 1566 return; 1567 } 1568 1569 /* Note this may cause RPC to be resent */ 1570 hdr->mds_ops->rpc_call_done(task, hdr); 1571 } 1572 1573 static void ff_layout_write_count_stats(struct rpc_task *task, void *data) 1574 { 1575 struct nfs_pgio_header *hdr = data; 1576 1577 ff_layout_write_record_layoutstats_done(task, hdr); 1578 rpc_count_iostats_metrics(task, 1579 &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]); 1580 } 1581 1582 static void ff_layout_write_release(void *data) 1583 { 1584 struct nfs_pgio_header *hdr = data; 1585 1586 ff_layout_write_record_layoutstats_done(&hdr->task, hdr); 1587 if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) { 1588 ff_layout_send_layouterror(hdr->lseg); 1589 ff_layout_reset_write(hdr, true); 1590 } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags)) 1591 ff_layout_reset_write(hdr, false); 1592 pnfs_generic_rw_release(data); 1593 } 1594 1595 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task, 1596 struct nfs_commit_data *cdata) 1597 { 1598 if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags)) 1599 return; 1600 nfs4_ff_layout_stat_io_start_write(cdata->inode, 1601 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index), 1602 0, task->tk_start); 1603 } 1604 1605 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task, 1606 struct nfs_commit_data *cdata) 1607 { 1608 struct nfs_page *req; 1609 __u64 count = 0; 1610 1611 if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags)) 1612 return; 1613 1614 if (task->tk_status == 0) { 1615 list_for_each_entry(req, &cdata->pages, wb_list) 1616 count += req->wb_bytes; 1617 } 1618 nfs4_ff_layout_stat_io_end_write(task, 1619 FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index), 1620 count, count, NFS_FILE_SYNC); 1621 set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags); 1622 } 1623 1624 static void ff_layout_commit_prepare_common(struct rpc_task *task, 1625 struct nfs_commit_data *cdata) 1626 { 1627 ff_layout_commit_record_layoutstats_start(task, cdata); 1628 } 1629 1630 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data) 1631 { 1632 ff_layout_commit_prepare_common(task, data); 1633 rpc_call_start(task); 1634 } 1635 1636 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data) 1637 { 1638 struct nfs_commit_data *wdata = data; 1639 1640 if (nfs4_setup_sequence(wdata->ds_clp, 1641 &wdata->args.seq_args, 1642 &wdata->res.seq_res, 1643 task)) 1644 return; 1645 ff_layout_commit_prepare_common(task, data); 1646 } 1647 1648 static void ff_layout_commit_done(struct rpc_task *task, void *data) 1649 { 1650 pnfs_generic_write_commit_done(task, data); 1651 } 1652 1653 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data) 1654 { 1655 struct nfs_commit_data *cdata = data; 1656 1657 ff_layout_commit_record_layoutstats_done(task, cdata); 1658 rpc_count_iostats_metrics(task, 1659 &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]); 1660 } 1661 1662 static void ff_layout_commit_release(void *data) 1663 { 1664 struct nfs_commit_data *cdata = data; 1665 1666 ff_layout_commit_record_layoutstats_done(&cdata->task, cdata); 1667 pnfs_generic_commit_release(data); 1668 } 1669 1670 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = { 1671 .rpc_call_prepare = ff_layout_read_prepare_v3, 1672 .rpc_call_done = ff_layout_read_call_done, 1673 .rpc_count_stats = ff_layout_read_count_stats, 1674 .rpc_release = ff_layout_read_release, 1675 }; 1676 1677 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = { 1678 .rpc_call_prepare = ff_layout_read_prepare_v4, 1679 .rpc_call_done = ff_layout_read_call_done, 1680 .rpc_count_stats = ff_layout_read_count_stats, 1681 .rpc_release = ff_layout_read_release, 1682 }; 1683 1684 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = { 1685 .rpc_call_prepare = ff_layout_write_prepare_v3, 1686 .rpc_call_done = ff_layout_write_call_done, 1687 .rpc_count_stats = ff_layout_write_count_stats, 1688 .rpc_release = ff_layout_write_release, 1689 }; 1690 1691 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = { 1692 .rpc_call_prepare = ff_layout_write_prepare_v4, 1693 .rpc_call_done = ff_layout_write_call_done, 1694 .rpc_count_stats = ff_layout_write_count_stats, 1695 .rpc_release = ff_layout_write_release, 1696 }; 1697 1698 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = { 1699 .rpc_call_prepare = ff_layout_commit_prepare_v3, 1700 .rpc_call_done = ff_layout_commit_done, 1701 .rpc_count_stats = ff_layout_commit_count_stats, 1702 .rpc_release = ff_layout_commit_release, 1703 }; 1704 1705 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = { 1706 .rpc_call_prepare = ff_layout_commit_prepare_v4, 1707 .rpc_call_done = ff_layout_commit_done, 1708 .rpc_count_stats = ff_layout_commit_count_stats, 1709 .rpc_release = ff_layout_commit_release, 1710 }; 1711 1712 static enum pnfs_try_status 1713 ff_layout_read_pagelist(struct nfs_pgio_header *hdr) 1714 { 1715 struct pnfs_layout_segment *lseg = hdr->lseg; 1716 struct nfs4_pnfs_ds *ds; 1717 struct rpc_clnt *ds_clnt; 1718 struct nfs4_ff_layout_mirror *mirror; 1719 const struct cred *ds_cred; 1720 loff_t offset = hdr->args.offset; 1721 u32 idx = hdr->pgio_mirror_idx; 1722 int vers; 1723 struct nfs_fh *fh; 1724 1725 dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n", 1726 __func__, hdr->inode->i_ino, 1727 hdr->args.pgbase, (size_t)hdr->args.count, offset); 1728 1729 mirror = FF_LAYOUT_COMP(lseg, idx); 1730 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false); 1731 if (!ds) 1732 goto out_failed; 1733 1734 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp, 1735 hdr->inode); 1736 if (IS_ERR(ds_clnt)) 1737 goto out_failed; 1738 1739 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred); 1740 if (!ds_cred) 1741 goto out_failed; 1742 1743 vers = nfs4_ff_layout_ds_version(mirror); 1744 1745 dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__, 1746 ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers); 1747 1748 hdr->pgio_done_cb = ff_layout_read_done_cb; 1749 refcount_inc(&ds->ds_clp->cl_count); 1750 hdr->ds_clp = ds->ds_clp; 1751 fh = nfs4_ff_layout_select_ds_fh(mirror); 1752 if (fh) 1753 hdr->args.fh = fh; 1754 1755 nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid); 1756 1757 /* 1758 * Note that if we ever decide to split across DSes, 1759 * then we may need to handle dense-like offsets. 1760 */ 1761 hdr->args.offset = offset; 1762 hdr->mds_offset = offset; 1763 1764 /* Perform an asynchronous read to ds */ 1765 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops, 1766 vers == 3 ? &ff_layout_read_call_ops_v3 : 1767 &ff_layout_read_call_ops_v4, 1768 0, RPC_TASK_SOFTCONN); 1769 put_cred(ds_cred); 1770 return PNFS_ATTEMPTED; 1771 1772 out_failed: 1773 if (ff_layout_avoid_mds_available_ds(lseg)) 1774 return PNFS_TRY_AGAIN; 1775 trace_pnfs_mds_fallback_read_pagelist(hdr->inode, 1776 hdr->args.offset, hdr->args.count, 1777 IOMODE_READ, NFS_I(hdr->inode)->layout, lseg); 1778 return PNFS_NOT_ATTEMPTED; 1779 } 1780 1781 /* Perform async writes. */ 1782 static enum pnfs_try_status 1783 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync) 1784 { 1785 struct pnfs_layout_segment *lseg = hdr->lseg; 1786 struct nfs4_pnfs_ds *ds; 1787 struct rpc_clnt *ds_clnt; 1788 struct nfs4_ff_layout_mirror *mirror; 1789 const struct cred *ds_cred; 1790 loff_t offset = hdr->args.offset; 1791 int vers; 1792 struct nfs_fh *fh; 1793 int idx = hdr->pgio_mirror_idx; 1794 1795 mirror = FF_LAYOUT_COMP(lseg, idx); 1796 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true); 1797 if (!ds) 1798 goto out_failed; 1799 1800 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp, 1801 hdr->inode); 1802 if (IS_ERR(ds_clnt)) 1803 goto out_failed; 1804 1805 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred); 1806 if (!ds_cred) 1807 goto out_failed; 1808 1809 vers = nfs4_ff_layout_ds_version(mirror); 1810 1811 dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n", 1812 __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count, 1813 offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), 1814 vers); 1815 1816 hdr->pgio_done_cb = ff_layout_write_done_cb; 1817 refcount_inc(&ds->ds_clp->cl_count); 1818 hdr->ds_clp = ds->ds_clp; 1819 hdr->ds_commit_idx = idx; 1820 fh = nfs4_ff_layout_select_ds_fh(mirror); 1821 if (fh) 1822 hdr->args.fh = fh; 1823 1824 nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid); 1825 1826 /* 1827 * Note that if we ever decide to split across DSes, 1828 * then we may need to handle dense-like offsets. 1829 */ 1830 hdr->args.offset = offset; 1831 1832 /* Perform an asynchronous write */ 1833 nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops, 1834 vers == 3 ? &ff_layout_write_call_ops_v3 : 1835 &ff_layout_write_call_ops_v4, 1836 sync, RPC_TASK_SOFTCONN); 1837 put_cred(ds_cred); 1838 return PNFS_ATTEMPTED; 1839 1840 out_failed: 1841 if (ff_layout_avoid_mds_available_ds(lseg)) 1842 return PNFS_TRY_AGAIN; 1843 trace_pnfs_mds_fallback_write_pagelist(hdr->inode, 1844 hdr->args.offset, hdr->args.count, 1845 IOMODE_RW, NFS_I(hdr->inode)->layout, lseg); 1846 return PNFS_NOT_ATTEMPTED; 1847 } 1848 1849 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) 1850 { 1851 return i; 1852 } 1853 1854 static struct nfs_fh * 1855 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i) 1856 { 1857 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg); 1858 1859 /* FIXME: Assume that there is only one NFS version available 1860 * for the DS. 1861 */ 1862 return &flseg->mirror_array[i]->fh_versions[0]; 1863 } 1864 1865 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how) 1866 { 1867 struct pnfs_layout_segment *lseg = data->lseg; 1868 struct nfs4_pnfs_ds *ds; 1869 struct rpc_clnt *ds_clnt; 1870 struct nfs4_ff_layout_mirror *mirror; 1871 const struct cred *ds_cred; 1872 u32 idx; 1873 int vers, ret; 1874 struct nfs_fh *fh; 1875 1876 if (!lseg || !(pnfs_is_valid_lseg(lseg) || 1877 test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))) 1878 goto out_err; 1879 1880 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index); 1881 mirror = FF_LAYOUT_COMP(lseg, idx); 1882 ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true); 1883 if (!ds) 1884 goto out_err; 1885 1886 ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp, 1887 data->inode); 1888 if (IS_ERR(ds_clnt)) 1889 goto out_err; 1890 1891 ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred); 1892 if (!ds_cred) 1893 goto out_err; 1894 1895 vers = nfs4_ff_layout_ds_version(mirror); 1896 1897 dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__, 1898 data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count), 1899 vers); 1900 data->commit_done_cb = ff_layout_commit_done_cb; 1901 data->cred = ds_cred; 1902 refcount_inc(&ds->ds_clp->cl_count); 1903 data->ds_clp = ds->ds_clp; 1904 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index); 1905 if (fh) 1906 data->args.fh = fh; 1907 1908 ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops, 1909 vers == 3 ? &ff_layout_commit_call_ops_v3 : 1910 &ff_layout_commit_call_ops_v4, 1911 how, RPC_TASK_SOFTCONN); 1912 put_cred(ds_cred); 1913 return ret; 1914 out_err: 1915 pnfs_generic_prepare_to_resend_writes(data); 1916 pnfs_generic_commit_release(data); 1917 return -EAGAIN; 1918 } 1919 1920 static int 1921 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages, 1922 int how, struct nfs_commit_info *cinfo) 1923 { 1924 return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo, 1925 ff_layout_initiate_commit); 1926 } 1927 1928 static struct pnfs_ds_commit_info * 1929 ff_layout_get_ds_info(struct inode *inode) 1930 { 1931 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout; 1932 1933 if (layout == NULL) 1934 return NULL; 1935 1936 return &FF_LAYOUT_FROM_HDR(layout)->commit_info; 1937 } 1938 1939 static void 1940 ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo, 1941 struct pnfs_layout_segment *lseg) 1942 { 1943 struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg); 1944 struct inode *inode = lseg->pls_layout->plh_inode; 1945 struct pnfs_commit_array *array, *new; 1946 1947 new = pnfs_alloc_commit_array(flseg->mirror_array_cnt, GFP_NOIO); 1948 if (new) { 1949 spin_lock(&inode->i_lock); 1950 array = pnfs_add_commit_array(fl_cinfo, new, lseg); 1951 spin_unlock(&inode->i_lock); 1952 if (array != new) 1953 pnfs_free_commit_array(new); 1954 } 1955 } 1956 1957 static void 1958 ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo, 1959 struct inode *inode) 1960 { 1961 spin_lock(&inode->i_lock); 1962 pnfs_generic_ds_cinfo_destroy(fl_cinfo); 1963 spin_unlock(&inode->i_lock); 1964 } 1965 1966 static void 1967 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d) 1968 { 1969 nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds, 1970 id_node)); 1971 } 1972 1973 static int ff_layout_encode_ioerr(struct xdr_stream *xdr, 1974 const struct nfs4_layoutreturn_args *args, 1975 const struct nfs4_flexfile_layoutreturn_args *ff_args) 1976 { 1977 __be32 *start; 1978 1979 start = xdr_reserve_space(xdr, 4); 1980 if (unlikely(!start)) 1981 return -E2BIG; 1982 1983 *start = cpu_to_be32(ff_args->num_errors); 1984 /* This assume we always return _ALL_ layouts */ 1985 return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors); 1986 } 1987 1988 static void 1989 encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len) 1990 { 1991 WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0); 1992 } 1993 1994 static void 1995 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr, 1996 const nfs4_stateid *stateid, 1997 const struct nfs42_layoutstat_devinfo *devinfo) 1998 { 1999 __be32 *p; 2000 2001 p = xdr_reserve_space(xdr, 8 + 8); 2002 p = xdr_encode_hyper(p, devinfo->offset); 2003 p = xdr_encode_hyper(p, devinfo->length); 2004 encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE); 2005 p = xdr_reserve_space(xdr, 4*8); 2006 p = xdr_encode_hyper(p, devinfo->read_count); 2007 p = xdr_encode_hyper(p, devinfo->read_bytes); 2008 p = xdr_encode_hyper(p, devinfo->write_count); 2009 p = xdr_encode_hyper(p, devinfo->write_bytes); 2010 encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE); 2011 } 2012 2013 static void 2014 ff_layout_encode_ff_iostat(struct xdr_stream *xdr, 2015 const nfs4_stateid *stateid, 2016 const struct nfs42_layoutstat_devinfo *devinfo) 2017 { 2018 ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo); 2019 ff_layout_encode_ff_layoutupdate(xdr, devinfo, 2020 devinfo->ld_private.data); 2021 } 2022 2023 /* report nothing for now */ 2024 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr, 2025 const struct nfs4_layoutreturn_args *args, 2026 struct nfs4_flexfile_layoutreturn_args *ff_args) 2027 { 2028 __be32 *p; 2029 int i; 2030 2031 p = xdr_reserve_space(xdr, 4); 2032 *p = cpu_to_be32(ff_args->num_dev); 2033 for (i = 0; i < ff_args->num_dev; i++) 2034 ff_layout_encode_ff_iostat(xdr, 2035 &args->layout->plh_stateid, 2036 &ff_args->devinfo[i]); 2037 } 2038 2039 static void 2040 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo, 2041 unsigned int num_entries) 2042 { 2043 unsigned int i; 2044 2045 for (i = 0; i < num_entries; i++) { 2046 if (!devinfo[i].ld_private.ops) 2047 continue; 2048 if (!devinfo[i].ld_private.ops->free) 2049 continue; 2050 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private); 2051 } 2052 } 2053 2054 static struct nfs4_deviceid_node * 2055 ff_layout_alloc_deviceid_node(struct nfs_server *server, 2056 struct pnfs_device *pdev, gfp_t gfp_flags) 2057 { 2058 struct nfs4_ff_layout_ds *dsaddr; 2059 2060 dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags); 2061 if (!dsaddr) 2062 return NULL; 2063 return &dsaddr->id_node; 2064 } 2065 2066 static void 2067 ff_layout_encode_layoutreturn(struct xdr_stream *xdr, 2068 const void *voidargs, 2069 const struct nfs4_xdr_opaque_data *ff_opaque) 2070 { 2071 const struct nfs4_layoutreturn_args *args = voidargs; 2072 struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data; 2073 struct xdr_buf tmp_buf = { 2074 .head = { 2075 [0] = { 2076 .iov_base = page_address(ff_args->pages[0]), 2077 }, 2078 }, 2079 .buflen = PAGE_SIZE, 2080 }; 2081 struct xdr_stream tmp_xdr; 2082 __be32 *start; 2083 2084 dprintk("%s: Begin\n", __func__); 2085 2086 xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL); 2087 2088 ff_layout_encode_ioerr(&tmp_xdr, args, ff_args); 2089 ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args); 2090 2091 start = xdr_reserve_space(xdr, 4); 2092 *start = cpu_to_be32(tmp_buf.len); 2093 xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len); 2094 2095 dprintk("%s: Return\n", __func__); 2096 } 2097 2098 static void 2099 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args) 2100 { 2101 struct nfs4_flexfile_layoutreturn_args *ff_args; 2102 2103 if (!args->data) 2104 return; 2105 ff_args = args->data; 2106 args->data = NULL; 2107 2108 ff_layout_free_ds_ioerr(&ff_args->errors); 2109 ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev); 2110 2111 put_page(ff_args->pages[0]); 2112 kfree(ff_args); 2113 } 2114 2115 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = { 2116 .encode = ff_layout_encode_layoutreturn, 2117 .free = ff_layout_free_layoutreturn, 2118 }; 2119 2120 static int 2121 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args) 2122 { 2123 struct nfs4_flexfile_layoutreturn_args *ff_args; 2124 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout); 2125 2126 ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL); 2127 if (!ff_args) 2128 goto out_nomem; 2129 ff_args->pages[0] = alloc_page(GFP_KERNEL); 2130 if (!ff_args->pages[0]) 2131 goto out_nomem_free; 2132 2133 INIT_LIST_HEAD(&ff_args->errors); 2134 ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout, 2135 &args->range, &ff_args->errors, 2136 FF_LAYOUTRETURN_MAXERR); 2137 2138 spin_lock(&args->inode->i_lock); 2139 ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr, 2140 &ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo)); 2141 spin_unlock(&args->inode->i_lock); 2142 2143 args->ld_private->ops = &layoutreturn_ops; 2144 args->ld_private->data = ff_args; 2145 return 0; 2146 out_nomem_free: 2147 kfree(ff_args); 2148 out_nomem: 2149 return -ENOMEM; 2150 } 2151 2152 #ifdef CONFIG_NFS_V4_2 2153 void 2154 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg) 2155 { 2156 struct pnfs_layout_hdr *lo = lseg->pls_layout; 2157 struct nfs42_layout_error *errors; 2158 LIST_HEAD(head); 2159 2160 if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR)) 2161 return; 2162 ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1); 2163 if (list_empty(&head)) 2164 return; 2165 2166 errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, 2167 sizeof(*errors), GFP_NOFS); 2168 if (errors != NULL) { 2169 const struct nfs4_ff_layout_ds_err *pos; 2170 size_t n = 0; 2171 2172 list_for_each_entry(pos, &head, list) { 2173 errors[n].offset = pos->offset; 2174 errors[n].length = pos->length; 2175 nfs4_stateid_copy(&errors[n].stateid, &pos->stateid); 2176 errors[n].errors[0].dev_id = pos->deviceid; 2177 errors[n].errors[0].status = pos->status; 2178 errors[n].errors[0].opnum = pos->opnum; 2179 n++; 2180 if (!list_is_last(&pos->list, &head) && 2181 n < NFS42_LAYOUTERROR_MAX) 2182 continue; 2183 if (nfs42_proc_layouterror(lseg, errors, n) < 0) 2184 break; 2185 n = 0; 2186 } 2187 kfree(errors); 2188 } 2189 ff_layout_free_ds_ioerr(&head); 2190 } 2191 #else 2192 void 2193 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg) 2194 { 2195 } 2196 #endif 2197 2198 static int 2199 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen) 2200 { 2201 const struct sockaddr_in *sin = (struct sockaddr_in *)sap; 2202 2203 return snprintf(buf, buflen, "%pI4", &sin->sin_addr); 2204 } 2205 2206 static size_t 2207 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf, 2208 const int buflen) 2209 { 2210 const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; 2211 const struct in6_addr *addr = &sin6->sin6_addr; 2212 2213 /* 2214 * RFC 4291, Section 2.2.2 2215 * 2216 * Shorthanded ANY address 2217 */ 2218 if (ipv6_addr_any(addr)) 2219 return snprintf(buf, buflen, "::"); 2220 2221 /* 2222 * RFC 4291, Section 2.2.2 2223 * 2224 * Shorthanded loopback address 2225 */ 2226 if (ipv6_addr_loopback(addr)) 2227 return snprintf(buf, buflen, "::1"); 2228 2229 /* 2230 * RFC 4291, Section 2.2.3 2231 * 2232 * Special presentation address format for mapped v4 2233 * addresses. 2234 */ 2235 if (ipv6_addr_v4mapped(addr)) 2236 return snprintf(buf, buflen, "::ffff:%pI4", 2237 &addr->s6_addr32[3]); 2238 2239 /* 2240 * RFC 4291, Section 2.2.1 2241 */ 2242 return snprintf(buf, buflen, "%pI6c", addr); 2243 } 2244 2245 /* Derived from rpc_sockaddr2uaddr */ 2246 static void 2247 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da) 2248 { 2249 struct sockaddr *sap = (struct sockaddr *)&da->da_addr; 2250 char portbuf[RPCBIND_MAXUADDRPLEN]; 2251 char addrbuf[RPCBIND_MAXUADDRLEN]; 2252 char *netid; 2253 unsigned short port; 2254 int len, netid_len; 2255 __be32 *p; 2256 2257 switch (sap->sa_family) { 2258 case AF_INET: 2259 if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0) 2260 return; 2261 port = ntohs(((struct sockaddr_in *)sap)->sin_port); 2262 netid = "tcp"; 2263 netid_len = 3; 2264 break; 2265 case AF_INET6: 2266 if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0) 2267 return; 2268 port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port); 2269 netid = "tcp6"; 2270 netid_len = 4; 2271 break; 2272 default: 2273 /* we only support tcp and tcp6 */ 2274 WARN_ON_ONCE(1); 2275 return; 2276 } 2277 2278 snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff); 2279 len = strlcat(addrbuf, portbuf, sizeof(addrbuf)); 2280 2281 p = xdr_reserve_space(xdr, 4 + netid_len); 2282 xdr_encode_opaque(p, netid, netid_len); 2283 2284 p = xdr_reserve_space(xdr, 4 + len); 2285 xdr_encode_opaque(p, addrbuf, len); 2286 } 2287 2288 static void 2289 ff_layout_encode_nfstime(struct xdr_stream *xdr, 2290 ktime_t t) 2291 { 2292 struct timespec64 ts; 2293 __be32 *p; 2294 2295 p = xdr_reserve_space(xdr, 12); 2296 ts = ktime_to_timespec64(t); 2297 p = xdr_encode_hyper(p, ts.tv_sec); 2298 *p++ = cpu_to_be32(ts.tv_nsec); 2299 } 2300 2301 static void 2302 ff_layout_encode_io_latency(struct xdr_stream *xdr, 2303 struct nfs4_ff_io_stat *stat) 2304 { 2305 __be32 *p; 2306 2307 p = xdr_reserve_space(xdr, 5 * 8); 2308 p = xdr_encode_hyper(p, stat->ops_requested); 2309 p = xdr_encode_hyper(p, stat->bytes_requested); 2310 p = xdr_encode_hyper(p, stat->ops_completed); 2311 p = xdr_encode_hyper(p, stat->bytes_completed); 2312 p = xdr_encode_hyper(p, stat->bytes_not_delivered); 2313 ff_layout_encode_nfstime(xdr, stat->total_busy_time); 2314 ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time); 2315 } 2316 2317 static void 2318 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr, 2319 const struct nfs42_layoutstat_devinfo *devinfo, 2320 struct nfs4_ff_layout_mirror *mirror) 2321 { 2322 struct nfs4_pnfs_ds_addr *da; 2323 struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds; 2324 struct nfs_fh *fh = &mirror->fh_versions[0]; 2325 __be32 *p; 2326 2327 da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node); 2328 dprintk("%s: DS %s: encoding address %s\n", 2329 __func__, ds->ds_remotestr, da->da_remotestr); 2330 /* netaddr4 */ 2331 ff_layout_encode_netaddr(xdr, da); 2332 /* nfs_fh4 */ 2333 p = xdr_reserve_space(xdr, 4 + fh->size); 2334 xdr_encode_opaque(p, fh->data, fh->size); 2335 /* ff_io_latency4 read */ 2336 spin_lock(&mirror->lock); 2337 ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat); 2338 /* ff_io_latency4 write */ 2339 ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat); 2340 spin_unlock(&mirror->lock); 2341 /* nfstime4 */ 2342 ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time)); 2343 /* bool */ 2344 p = xdr_reserve_space(xdr, 4); 2345 *p = cpu_to_be32(false); 2346 } 2347 2348 static void 2349 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args, 2350 const struct nfs4_xdr_opaque_data *opaque) 2351 { 2352 struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque, 2353 struct nfs42_layoutstat_devinfo, ld_private); 2354 __be32 *start; 2355 2356 /* layoutupdate length */ 2357 start = xdr_reserve_space(xdr, 4); 2358 ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data); 2359 2360 *start = cpu_to_be32((xdr->p - start - 1) * 4); 2361 } 2362 2363 static void 2364 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque) 2365 { 2366 struct nfs4_ff_layout_mirror *mirror = opaque->data; 2367 2368 ff_layout_put_mirror(mirror); 2369 } 2370 2371 static const struct nfs4_xdr_opaque_ops layoutstat_ops = { 2372 .encode = ff_layout_encode_layoutstats, 2373 .free = ff_layout_free_layoutstats, 2374 }; 2375 2376 static int 2377 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, 2378 struct nfs42_layoutstat_devinfo *devinfo, 2379 int dev_limit) 2380 { 2381 struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo); 2382 struct nfs4_ff_layout_mirror *mirror; 2383 struct nfs4_deviceid_node *dev; 2384 int i = 0; 2385 2386 list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) { 2387 if (i >= dev_limit) 2388 break; 2389 if (IS_ERR_OR_NULL(mirror->mirror_ds)) 2390 continue; 2391 if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags)) 2392 continue; 2393 /* mirror refcount put in cleanup_layoutstats */ 2394 if (!refcount_inc_not_zero(&mirror->ref)) 2395 continue; 2396 dev = &mirror->mirror_ds->id_node; 2397 memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE); 2398 devinfo->offset = 0; 2399 devinfo->length = NFS4_MAX_UINT64; 2400 spin_lock(&mirror->lock); 2401 devinfo->read_count = mirror->read_stat.io_stat.ops_completed; 2402 devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed; 2403 devinfo->write_count = mirror->write_stat.io_stat.ops_completed; 2404 devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed; 2405 spin_unlock(&mirror->lock); 2406 devinfo->layout_type = LAYOUT_FLEX_FILES; 2407 devinfo->ld_private.ops = &layoutstat_ops; 2408 devinfo->ld_private.data = mirror; 2409 2410 devinfo++; 2411 i++; 2412 } 2413 return i; 2414 } 2415 2416 static int 2417 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args) 2418 { 2419 struct nfs4_flexfile_layout *ff_layout; 2420 const int dev_count = PNFS_LAYOUTSTATS_MAXDEV; 2421 2422 /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */ 2423 args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO); 2424 if (!args->devinfo) 2425 return -ENOMEM; 2426 2427 spin_lock(&args->inode->i_lock); 2428 ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout); 2429 args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr, 2430 &args->devinfo[0], dev_count); 2431 spin_unlock(&args->inode->i_lock); 2432 if (!args->num_dev) { 2433 kfree(args->devinfo); 2434 args->devinfo = NULL; 2435 return -ENOENT; 2436 } 2437 2438 return 0; 2439 } 2440 2441 static int 2442 ff_layout_set_layoutdriver(struct nfs_server *server, 2443 const struct nfs_fh *dummy) 2444 { 2445 #if IS_ENABLED(CONFIG_NFS_V4_2) 2446 server->caps |= NFS_CAP_LAYOUTSTATS; 2447 #endif 2448 return 0; 2449 } 2450 2451 static const struct pnfs_commit_ops ff_layout_commit_ops = { 2452 .setup_ds_info = ff_layout_setup_ds_info, 2453 .release_ds_info = ff_layout_release_ds_info, 2454 .mark_request_commit = pnfs_layout_mark_request_commit, 2455 .clear_request_commit = pnfs_generic_clear_request_commit, 2456 .scan_commit_lists = pnfs_generic_scan_commit_lists, 2457 .recover_commit_reqs = pnfs_generic_recover_commit_reqs, 2458 .commit_pagelist = ff_layout_commit_pagelist, 2459 }; 2460 2461 static struct pnfs_layoutdriver_type flexfilelayout_type = { 2462 .id = LAYOUT_FLEX_FILES, 2463 .name = "LAYOUT_FLEX_FILES", 2464 .owner = THIS_MODULE, 2465 .flags = PNFS_LAYOUTGET_ON_OPEN, 2466 .max_layoutget_response = 4096, /* 1 page or so... */ 2467 .set_layoutdriver = ff_layout_set_layoutdriver, 2468 .alloc_layout_hdr = ff_layout_alloc_layout_hdr, 2469 .free_layout_hdr = ff_layout_free_layout_hdr, 2470 .alloc_lseg = ff_layout_alloc_lseg, 2471 .free_lseg = ff_layout_free_lseg, 2472 .add_lseg = ff_layout_add_lseg, 2473 .pg_read_ops = &ff_layout_pg_read_ops, 2474 .pg_write_ops = &ff_layout_pg_write_ops, 2475 .get_ds_info = ff_layout_get_ds_info, 2476 .free_deviceid_node = ff_layout_free_deviceid_node, 2477 .read_pagelist = ff_layout_read_pagelist, 2478 .write_pagelist = ff_layout_write_pagelist, 2479 .alloc_deviceid_node = ff_layout_alloc_deviceid_node, 2480 .prepare_layoutreturn = ff_layout_prepare_layoutreturn, 2481 .sync = pnfs_nfs_generic_sync, 2482 .prepare_layoutstats = ff_layout_prepare_layoutstats, 2483 }; 2484 2485 static int __init nfs4flexfilelayout_init(void) 2486 { 2487 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n", 2488 __func__); 2489 return pnfs_register_layoutdriver(&flexfilelayout_type); 2490 } 2491 2492 static void __exit nfs4flexfilelayout_exit(void) 2493 { 2494 printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n", 2495 __func__); 2496 pnfs_unregister_layoutdriver(&flexfilelayout_type); 2497 } 2498 2499 MODULE_ALIAS("nfs-layouttype4-4"); 2500 2501 MODULE_LICENSE("GPL"); 2502 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver"); 2503 2504 module_init(nfs4flexfilelayout_init); 2505 module_exit(nfs4flexfilelayout_exit); 2506 2507 module_param(io_maxretrans, ushort, 0644); 2508 MODULE_PARM_DESC(io_maxretrans, "The number of times the NFSv4.1 client " 2509 "retries an I/O request before returning an error. "); 2510