1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Device operations for the pnfs nfs4 file layout driver. 4 * 5 * Copyright (c) 2014, Primary Data, Inc. All rights reserved. 6 * 7 * Tao Peng <bergwolf@primarydata.com> 8 */ 9 10 #include <linux/nfs_fs.h> 11 #include <linux/vmalloc.h> 12 #include <linux/module.h> 13 #include <linux/sunrpc/addr.h> 14 15 #include "../internal.h" 16 #include "../nfs4session.h" 17 #include "flexfilelayout.h" 18 19 #define NFSDBG_FACILITY NFSDBG_PNFS_LD 20 21 static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS; 22 static unsigned int dataserver_retrans; 23 24 static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg); 25 26 void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds) 27 { 28 if (!IS_ERR_OR_NULL(mirror_ds)) 29 nfs4_put_deviceid_node(&mirror_ds->id_node); 30 } 31 32 void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds) 33 { 34 nfs4_print_deviceid(&mirror_ds->id_node.deviceid); 35 nfs4_pnfs_ds_put(mirror_ds->ds); 36 kfree(mirror_ds->ds_versions); 37 kfree_rcu(mirror_ds, id_node.rcu); 38 } 39 40 /* Decode opaque device data and construct new_ds using it */ 41 struct nfs4_ff_layout_ds * 42 nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, 43 gfp_t gfp_flags) 44 { 45 struct xdr_stream stream; 46 struct xdr_buf buf; 47 struct page *scratch; 48 struct list_head dsaddrs; 49 struct nfs4_pnfs_ds_addr *da; 50 struct nfs4_ff_layout_ds *new_ds = NULL; 51 struct nfs4_ff_ds_version *ds_versions = NULL; 52 u32 mp_count; 53 u32 version_count; 54 __be32 *p; 55 int i, ret = -ENOMEM; 56 57 /* set up xdr stream */ 58 scratch = alloc_page(gfp_flags); 59 if (!scratch) 60 goto out_err; 61 62 new_ds = kzalloc(sizeof(struct nfs4_ff_layout_ds), gfp_flags); 63 if (!new_ds) 64 goto out_scratch; 65 66 nfs4_init_deviceid_node(&new_ds->id_node, 67 server, 68 &pdev->dev_id); 69 INIT_LIST_HEAD(&dsaddrs); 70 71 xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen); 72 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); 73 74 /* multipath count */ 75 p = xdr_inline_decode(&stream, 4); 76 if (unlikely(!p)) 77 goto out_err_drain_dsaddrs; 78 mp_count = be32_to_cpup(p); 79 dprintk("%s: multipath ds count %d\n", __func__, mp_count); 80 81 for (i = 0; i < mp_count; i++) { 82 /* multipath ds */ 83 da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net, 84 &stream, gfp_flags); 85 if (da) 86 list_add_tail(&da->da_node, &dsaddrs); 87 } 88 if (list_empty(&dsaddrs)) { 89 dprintk("%s: no suitable DS addresses found\n", 90 __func__); 91 ret = -ENOMEDIUM; 92 goto out_err_drain_dsaddrs; 93 } 94 95 /* version count */ 96 p = xdr_inline_decode(&stream, 4); 97 if (unlikely(!p)) 98 goto out_err_drain_dsaddrs; 99 version_count = be32_to_cpup(p); 100 dprintk("%s: version count %d\n", __func__, version_count); 101 102 ds_versions = kcalloc(version_count, 103 sizeof(struct nfs4_ff_ds_version), 104 gfp_flags); 105 if (!ds_versions) 106 goto out_scratch; 107 108 for (i = 0; i < version_count; i++) { 109 /* 20 = version(4) + minor_version(4) + rsize(4) + wsize(4) + 110 * tightly_coupled(4) */ 111 p = xdr_inline_decode(&stream, 20); 112 if (unlikely(!p)) 113 goto out_err_drain_dsaddrs; 114 ds_versions[i].version = be32_to_cpup(p++); 115 ds_versions[i].minor_version = be32_to_cpup(p++); 116 ds_versions[i].rsize = nfs_block_size(be32_to_cpup(p++), NULL); 117 ds_versions[i].wsize = nfs_block_size(be32_to_cpup(p++), NULL); 118 ds_versions[i].tightly_coupled = be32_to_cpup(p); 119 120 if (ds_versions[i].rsize > NFS_MAX_FILE_IO_SIZE) 121 ds_versions[i].rsize = NFS_MAX_FILE_IO_SIZE; 122 if (ds_versions[i].wsize > NFS_MAX_FILE_IO_SIZE) 123 ds_versions[i].wsize = NFS_MAX_FILE_IO_SIZE; 124 125 /* 126 * check for valid major/minor combination. 127 * currently we support dataserver which talk: 128 * v3, v4.0, v4.1, v4.2 129 */ 130 if (!((ds_versions[i].version == 3 && ds_versions[i].minor_version == 0) || 131 (ds_versions[i].version == 4 && ds_versions[i].minor_version < 3))) { 132 dprintk("%s: [%d] unsupported ds version %d-%d\n", __func__, 133 i, ds_versions[i].version, 134 ds_versions[i].minor_version); 135 ret = -EPROTONOSUPPORT; 136 goto out_err_drain_dsaddrs; 137 } 138 139 dprintk("%s: [%d] vers %u minor_ver %u rsize %u wsize %u coupled %d\n", 140 __func__, i, ds_versions[i].version, 141 ds_versions[i].minor_version, 142 ds_versions[i].rsize, 143 ds_versions[i].wsize, 144 ds_versions[i].tightly_coupled); 145 } 146 147 new_ds->ds_versions = ds_versions; 148 new_ds->ds_versions_cnt = version_count; 149 150 new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags); 151 if (!new_ds->ds) 152 goto out_err_drain_dsaddrs; 153 154 /* If DS was already in cache, free ds addrs */ 155 while (!list_empty(&dsaddrs)) { 156 da = list_first_entry(&dsaddrs, 157 struct nfs4_pnfs_ds_addr, 158 da_node); 159 list_del_init(&da->da_node); 160 kfree(da->da_remotestr); 161 kfree(da); 162 } 163 164 __free_page(scratch); 165 return new_ds; 166 167 out_err_drain_dsaddrs: 168 while (!list_empty(&dsaddrs)) { 169 da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr, 170 da_node); 171 list_del_init(&da->da_node); 172 kfree(da->da_remotestr); 173 kfree(da); 174 } 175 176 kfree(ds_versions); 177 out_scratch: 178 __free_page(scratch); 179 out_err: 180 kfree(new_ds); 181 182 dprintk("%s ERROR: returning %d\n", __func__, ret); 183 return NULL; 184 } 185 186 static void ff_layout_mark_devid_invalid(struct pnfs_layout_segment *lseg, 187 struct nfs4_deviceid_node *devid) 188 { 189 nfs4_delete_deviceid(devid->ld, devid->nfs_client, &devid->deviceid); 190 if (!ff_layout_has_available_ds(lseg)) 191 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, 192 lseg); 193 } 194 195 static bool ff_layout_mirror_valid(struct pnfs_layout_segment *lseg, 196 struct nfs4_ff_layout_mirror *mirror, 197 bool create) 198 { 199 if (mirror == NULL || IS_ERR(mirror->mirror_ds)) 200 goto outerr; 201 if (mirror->mirror_ds == NULL) { 202 if (create) { 203 struct nfs4_deviceid_node *node; 204 struct pnfs_layout_hdr *lh = lseg->pls_layout; 205 struct nfs4_ff_layout_ds *mirror_ds = ERR_PTR(-ENODEV); 206 207 node = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode), 208 &mirror->devid, lh->plh_lc_cred, 209 GFP_KERNEL); 210 if (node) 211 mirror_ds = FF_LAYOUT_MIRROR_DS(node); 212 213 /* check for race with another call to this function */ 214 if (cmpxchg(&mirror->mirror_ds, NULL, mirror_ds) && 215 mirror_ds != ERR_PTR(-ENODEV)) 216 nfs4_put_deviceid_node(node); 217 } else 218 goto outerr; 219 } 220 221 if (IS_ERR(mirror->mirror_ds)) 222 goto outerr; 223 224 if (mirror->mirror_ds->ds == NULL) { 225 struct nfs4_deviceid_node *devid; 226 devid = &mirror->mirror_ds->id_node; 227 ff_layout_mark_devid_invalid(lseg, devid); 228 return false; 229 } 230 return true; 231 outerr: 232 pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg); 233 return false; 234 } 235 236 static void extend_ds_error(struct nfs4_ff_layout_ds_err *err, 237 u64 offset, u64 length) 238 { 239 u64 end; 240 241 end = max_t(u64, pnfs_end_offset(err->offset, err->length), 242 pnfs_end_offset(offset, length)); 243 err->offset = min_t(u64, err->offset, offset); 244 err->length = end - err->offset; 245 } 246 247 static int 248 ff_ds_error_match(const struct nfs4_ff_layout_ds_err *e1, 249 const struct nfs4_ff_layout_ds_err *e2) 250 { 251 int ret; 252 253 if (e1->opnum != e2->opnum) 254 return e1->opnum < e2->opnum ? -1 : 1; 255 if (e1->status != e2->status) 256 return e1->status < e2->status ? -1 : 1; 257 ret = memcmp(e1->stateid.data, e2->stateid.data, 258 sizeof(e1->stateid.data)); 259 if (ret != 0) 260 return ret; 261 ret = memcmp(&e1->deviceid, &e2->deviceid, sizeof(e1->deviceid)); 262 if (ret != 0) 263 return ret; 264 if (pnfs_end_offset(e1->offset, e1->length) < e2->offset) 265 return -1; 266 if (e1->offset > pnfs_end_offset(e2->offset, e2->length)) 267 return 1; 268 /* If ranges overlap or are contiguous, they are the same */ 269 return 0; 270 } 271 272 static void 273 ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo, 274 struct nfs4_ff_layout_ds_err *dserr) 275 { 276 struct nfs4_ff_layout_ds_err *err, *tmp; 277 struct list_head *head = &flo->error_list; 278 int match; 279 280 /* Do insertion sort w/ merges */ 281 list_for_each_entry_safe(err, tmp, &flo->error_list, list) { 282 match = ff_ds_error_match(err, dserr); 283 if (match < 0) 284 continue; 285 if (match > 0) { 286 /* Add entry "dserr" _before_ entry "err" */ 287 head = &err->list; 288 break; 289 } 290 /* Entries match, so merge "err" into "dserr" */ 291 extend_ds_error(dserr, err->offset, err->length); 292 list_replace(&err->list, &dserr->list); 293 kfree(err); 294 return; 295 } 296 297 list_add_tail(&dserr->list, head); 298 } 299 300 int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo, 301 struct nfs4_ff_layout_mirror *mirror, u64 offset, 302 u64 length, int status, enum nfs_opnum4 opnum, 303 gfp_t gfp_flags) 304 { 305 struct nfs4_ff_layout_ds_err *dserr; 306 307 if (status == 0) 308 return 0; 309 310 if (mirror->mirror_ds == NULL) 311 return -EINVAL; 312 313 dserr = kmalloc(sizeof(*dserr), gfp_flags); 314 if (!dserr) 315 return -ENOMEM; 316 317 INIT_LIST_HEAD(&dserr->list); 318 dserr->offset = offset; 319 dserr->length = length; 320 dserr->status = status; 321 dserr->opnum = opnum; 322 nfs4_stateid_copy(&dserr->stateid, &mirror->stateid); 323 memcpy(&dserr->deviceid, &mirror->mirror_ds->id_node.deviceid, 324 NFS4_DEVICEID4_SIZE); 325 326 spin_lock(&flo->generic_hdr.plh_inode->i_lock); 327 ff_layout_add_ds_error_locked(flo, dserr); 328 spin_unlock(&flo->generic_hdr.plh_inode->i_lock); 329 330 return 0; 331 } 332 333 static struct rpc_cred * 334 ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode) 335 { 336 struct rpc_cred *cred, __rcu **pcred; 337 338 if (iomode == IOMODE_READ) 339 pcred = &mirror->ro_cred; 340 else 341 pcred = &mirror->rw_cred; 342 343 rcu_read_lock(); 344 do { 345 cred = rcu_dereference(*pcred); 346 if (!cred) 347 break; 348 349 cred = get_rpccred_rcu(cred); 350 } while(!cred); 351 rcu_read_unlock(); 352 return cred; 353 } 354 355 struct nfs_fh * 356 nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx) 357 { 358 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx); 359 struct nfs_fh *fh = NULL; 360 361 if (!ff_layout_mirror_valid(lseg, mirror, false)) { 362 pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n", 363 __func__, mirror_idx); 364 goto out; 365 } 366 367 /* FIXME: For now assume there is only 1 version available for the DS */ 368 fh = &mirror->fh_versions[0]; 369 out: 370 return fh; 371 } 372 373 /** 374 * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call 375 * @lseg: the layout segment we're operating on 376 * @ds_idx: index of the DS to use 377 * @fail_return: return layout on connect failure? 378 * 379 * Try to prepare a DS connection to accept an RPC call. This involves 380 * selecting a mirror to use and connecting the client to it if it's not 381 * already connected. 382 * 383 * Since we only need a single functioning mirror to satisfy a read, we don't 384 * want to return the layout if there is one. For writes though, any down 385 * mirror should result in a LAYOUTRETURN. @fail_return is how we distinguish 386 * between the two cases. 387 * 388 * Returns a pointer to a connected DS object on success or NULL on failure. 389 */ 390 struct nfs4_pnfs_ds * 391 nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, 392 bool fail_return) 393 { 394 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx); 395 struct nfs4_pnfs_ds *ds = NULL; 396 struct nfs4_deviceid_node *devid; 397 struct inode *ino = lseg->pls_layout->plh_inode; 398 struct nfs_server *s = NFS_SERVER(ino); 399 unsigned int max_payload; 400 int status; 401 402 if (!ff_layout_mirror_valid(lseg, mirror, true)) { 403 pr_err_ratelimited("NFS: %s: No data server for offset index %d\n", 404 __func__, ds_idx); 405 goto out; 406 } 407 408 devid = &mirror->mirror_ds->id_node; 409 if (ff_layout_test_devid_unavailable(devid)) 410 goto out_fail; 411 412 ds = mirror->mirror_ds->ds; 413 /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */ 414 smp_rmb(); 415 if (ds->ds_clp) 416 goto out; 417 418 /* FIXME: For now we assume the server sent only one version of NFS 419 * to use for the DS. 420 */ 421 status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, 422 dataserver_retrans, 423 mirror->mirror_ds->ds_versions[0].version, 424 mirror->mirror_ds->ds_versions[0].minor_version); 425 426 /* connect success, check rsize/wsize limit */ 427 if (!status) { 428 max_payload = 429 nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient), 430 NULL); 431 if (mirror->mirror_ds->ds_versions[0].rsize > max_payload) 432 mirror->mirror_ds->ds_versions[0].rsize = max_payload; 433 if (mirror->mirror_ds->ds_versions[0].wsize > max_payload) 434 mirror->mirror_ds->ds_versions[0].wsize = max_payload; 435 goto out; 436 } 437 out_fail: 438 ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), 439 mirror, lseg->pls_range.offset, 440 lseg->pls_range.length, NFS4ERR_NXIO, 441 OP_ILLEGAL, GFP_NOIO); 442 if (fail_return || !ff_layout_has_available_ds(lseg)) 443 pnfs_error_mark_layout_for_return(ino, lseg); 444 ds = NULL; 445 out: 446 return ds; 447 } 448 449 struct rpc_cred * 450 ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, u32 ds_idx, 451 struct rpc_cred *mdscred) 452 { 453 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx); 454 struct rpc_cred *cred; 455 456 if (mirror) { 457 cred = ff_layout_get_mirror_cred(mirror, lseg->pls_range.iomode); 458 if (!cred) 459 cred = get_rpccred(mdscred); 460 } else { 461 cred = get_rpccred(mdscred); 462 } 463 return cred; 464 } 465 466 /** 467 * Find or create a DS rpc client with th MDS server rpc client auth flavor 468 * in the nfs_client cl_ds_clients list. 469 */ 470 struct rpc_clnt * 471 nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg, u32 ds_idx, 472 struct nfs_client *ds_clp, struct inode *inode) 473 { 474 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx); 475 476 switch (mirror->mirror_ds->ds_versions[0].version) { 477 case 3: 478 /* For NFSv3 DS, flavor is set when creating DS connections */ 479 return ds_clp->cl_rpcclient; 480 case 4: 481 return nfs4_find_or_create_ds_client(ds_clp, inode); 482 default: 483 BUG(); 484 } 485 } 486 487 void ff_layout_free_ds_ioerr(struct list_head *head) 488 { 489 struct nfs4_ff_layout_ds_err *err; 490 491 while (!list_empty(head)) { 492 err = list_first_entry(head, 493 struct nfs4_ff_layout_ds_err, 494 list); 495 list_del(&err->list); 496 kfree(err); 497 } 498 } 499 500 /* called with inode i_lock held */ 501 int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head) 502 { 503 struct nfs4_ff_layout_ds_err *err; 504 __be32 *p; 505 506 list_for_each_entry(err, head, list) { 507 /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE) 508 * + array length + deviceid(NFS4_DEVICEID4_SIZE) 509 * + status(4) + opnum(4) 510 */ 511 p = xdr_reserve_space(xdr, 512 28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE); 513 if (unlikely(!p)) 514 return -ENOBUFS; 515 p = xdr_encode_hyper(p, err->offset); 516 p = xdr_encode_hyper(p, err->length); 517 p = xdr_encode_opaque_fixed(p, &err->stateid, 518 NFS4_STATEID_SIZE); 519 /* Encode 1 error */ 520 *p++ = cpu_to_be32(1); 521 p = xdr_encode_opaque_fixed(p, &err->deviceid, 522 NFS4_DEVICEID4_SIZE); 523 *p++ = cpu_to_be32(err->status); 524 *p++ = cpu_to_be32(err->opnum); 525 dprintk("%s: offset %llu length %llu status %d op %d\n", 526 __func__, err->offset, err->length, err->status, 527 err->opnum); 528 } 529 530 return 0; 531 } 532 533 static 534 unsigned int do_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo, 535 const struct pnfs_layout_range *range, 536 struct list_head *head, 537 unsigned int maxnum) 538 { 539 struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo); 540 struct inode *inode = lo->plh_inode; 541 struct nfs4_ff_layout_ds_err *err, *n; 542 unsigned int ret = 0; 543 544 spin_lock(&inode->i_lock); 545 list_for_each_entry_safe(err, n, &flo->error_list, list) { 546 if (!pnfs_is_range_intersecting(err->offset, 547 pnfs_end_offset(err->offset, err->length), 548 range->offset, 549 pnfs_end_offset(range->offset, range->length))) 550 continue; 551 if (!maxnum) 552 break; 553 list_move(&err->list, head); 554 maxnum--; 555 ret++; 556 } 557 spin_unlock(&inode->i_lock); 558 return ret; 559 } 560 561 unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo, 562 const struct pnfs_layout_range *range, 563 struct list_head *head, 564 unsigned int maxnum) 565 { 566 unsigned int ret; 567 568 ret = do_layout_fetch_ds_ioerr(lo, range, head, maxnum); 569 /* If we're over the max, discard all remaining entries */ 570 if (ret == maxnum) { 571 LIST_HEAD(discard); 572 do_layout_fetch_ds_ioerr(lo, range, &discard, -1); 573 ff_layout_free_ds_ioerr(&discard); 574 } 575 return ret; 576 } 577 578 static bool ff_read_layout_has_available_ds(struct pnfs_layout_segment *lseg) 579 { 580 struct nfs4_ff_layout_mirror *mirror; 581 struct nfs4_deviceid_node *devid; 582 u32 idx; 583 584 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) { 585 mirror = FF_LAYOUT_COMP(lseg, idx); 586 if (mirror) { 587 if (!mirror->mirror_ds) 588 return true; 589 if (IS_ERR(mirror->mirror_ds)) 590 continue; 591 devid = &mirror->mirror_ds->id_node; 592 if (!ff_layout_test_devid_unavailable(devid)) 593 return true; 594 } 595 } 596 597 return false; 598 } 599 600 static bool ff_rw_layout_has_available_ds(struct pnfs_layout_segment *lseg) 601 { 602 struct nfs4_ff_layout_mirror *mirror; 603 struct nfs4_deviceid_node *devid; 604 u32 idx; 605 606 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) { 607 mirror = FF_LAYOUT_COMP(lseg, idx); 608 if (!mirror || IS_ERR(mirror->mirror_ds)) 609 return false; 610 if (!mirror->mirror_ds) 611 continue; 612 devid = &mirror->mirror_ds->id_node; 613 if (ff_layout_test_devid_unavailable(devid)) 614 return false; 615 } 616 617 return FF_LAYOUT_MIRROR_COUNT(lseg) != 0; 618 } 619 620 static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg) 621 { 622 if (lseg->pls_range.iomode == IOMODE_READ) 623 return ff_read_layout_has_available_ds(lseg); 624 /* Note: RW layout needs all mirrors available */ 625 return ff_rw_layout_has_available_ds(lseg); 626 } 627 628 bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg) 629 { 630 return ff_layout_no_fallback_to_mds(lseg) || 631 ff_layout_has_available_ds(lseg); 632 } 633 634 bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg) 635 { 636 return lseg->pls_range.iomode == IOMODE_RW && 637 ff_layout_no_read_on_rw(lseg); 638 } 639 640 module_param(dataserver_retrans, uint, 0644); 641 MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client " 642 "retries a request before it attempts further " 643 " recovery action."); 644 module_param(dataserver_timeo, uint, 0644); 645 MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the " 646 "NFSv4.1 client waits for a response from a " 647 " data server before it retries an NFS request."); 648