1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com> 4 */ 5 #include <linux/fs.h> 6 #include <linux/sunrpc/addr.h> 7 #include <linux/sunrpc/sched.h> 8 #include <linux/nfs.h> 9 #include <linux/nfs3.h> 10 #include <linux/nfs4.h> 11 #include <linux/nfs_xdr.h> 12 #include <linux/nfs_fs.h> 13 #include "nfs4_fs.h" 14 #include "nfs42.h" 15 #include "iostat.h" 16 #include "pnfs.h" 17 #include "nfs4session.h" 18 #include "internal.h" 19 #include "delegation.h" 20 #include "nfs4trace.h" 21 22 #define NFSDBG_FACILITY NFSDBG_PROC 23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std); 24 25 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr) 26 { 27 struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client; 28 unsigned short port = 2049; 29 30 rcu_read_lock(); 31 naddr->netid_len = scnprintf(naddr->netid, 32 sizeof(naddr->netid), "%s", 33 rpc_peeraddr2str(clp->cl_rpcclient, 34 RPC_DISPLAY_NETID)); 35 naddr->addr_len = scnprintf(naddr->addr, 36 sizeof(naddr->addr), 37 "%s.%u.%u", 38 rpc_peeraddr2str(clp->cl_rpcclient, 39 RPC_DISPLAY_ADDR), 40 port >> 8, port & 255); 41 rcu_read_unlock(); 42 } 43 44 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 45 struct nfs_lock_context *lock, loff_t offset, loff_t len) 46 { 47 struct inode *inode = file_inode(filep); 48 struct nfs_server *server = NFS_SERVER(inode); 49 struct nfs42_falloc_args args = { 50 .falloc_fh = NFS_FH(inode), 51 .falloc_offset = offset, 52 .falloc_length = len, 53 .falloc_bitmask = nfs4_fattr_bitmap, 54 }; 55 struct nfs42_falloc_res res = { 56 .falloc_server = server, 57 }; 58 int status; 59 60 msg->rpc_argp = &args; 61 msg->rpc_resp = &res; 62 63 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context, 64 lock, FMODE_WRITE); 65 if (status) { 66 if (status == -EAGAIN) 67 status = -NFS4ERR_BAD_STATEID; 68 return status; 69 } 70 71 res.falloc_fattr = nfs_alloc_fattr(); 72 if (!res.falloc_fattr) 73 return -ENOMEM; 74 75 status = nfs4_call_sync(server->client, server, msg, 76 &args.seq_args, &res.seq_res, 0); 77 if (status == 0) 78 status = nfs_post_op_update_inode(inode, res.falloc_fattr); 79 80 kfree(res.falloc_fattr); 81 return status; 82 } 83 84 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 85 loff_t offset, loff_t len) 86 { 87 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 88 struct nfs4_exception exception = { }; 89 struct nfs_lock_context *lock; 90 int err; 91 92 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 93 if (IS_ERR(lock)) 94 return PTR_ERR(lock); 95 96 exception.inode = file_inode(filep); 97 exception.state = lock->open_context->state; 98 99 do { 100 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len); 101 if (err == -ENOTSUPP) { 102 err = -EOPNOTSUPP; 103 break; 104 } 105 err = nfs4_handle_exception(server, err, &exception); 106 } while (exception.retry); 107 108 nfs_put_lock_context(lock); 109 return err; 110 } 111 112 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len) 113 { 114 struct rpc_message msg = { 115 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE], 116 }; 117 struct inode *inode = file_inode(filep); 118 int err; 119 120 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE)) 121 return -EOPNOTSUPP; 122 123 inode_lock(inode); 124 125 err = nfs42_proc_fallocate(&msg, filep, offset, len); 126 if (err == -EOPNOTSUPP) 127 NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE; 128 129 inode_unlock(inode); 130 return err; 131 } 132 133 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) 134 { 135 struct rpc_message msg = { 136 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE], 137 }; 138 struct inode *inode = file_inode(filep); 139 int err; 140 141 if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE)) 142 return -EOPNOTSUPP; 143 144 inode_lock(inode); 145 err = nfs_sync_inode(inode); 146 if (err) 147 goto out_unlock; 148 149 err = nfs42_proc_fallocate(&msg, filep, offset, len); 150 if (err == 0) 151 truncate_pagecache_range(inode, offset, (offset + len) -1); 152 if (err == -EOPNOTSUPP) 153 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE; 154 out_unlock: 155 inode_unlock(inode); 156 return err; 157 } 158 159 static int handle_async_copy(struct nfs42_copy_res *res, 160 struct nfs_server *dst_server, 161 struct nfs_server *src_server, 162 struct file *src, 163 struct file *dst, 164 nfs4_stateid *src_stateid, 165 bool *restart) 166 { 167 struct nfs4_copy_state *copy, *tmp_copy; 168 int status = NFS4_OK; 169 bool found_pending = false; 170 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst); 171 struct nfs_open_context *src_ctx = nfs_file_open_context(src); 172 173 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS); 174 if (!copy) 175 return -ENOMEM; 176 177 spin_lock(&dst_server->nfs_client->cl_lock); 178 list_for_each_entry(tmp_copy, 179 &dst_server->nfs_client->pending_cb_stateids, 180 copies) { 181 if (memcmp(&res->write_res.stateid, &tmp_copy->stateid, 182 NFS4_STATEID_SIZE)) 183 continue; 184 found_pending = true; 185 list_del(&tmp_copy->copies); 186 break; 187 } 188 if (found_pending) { 189 spin_unlock(&dst_server->nfs_client->cl_lock); 190 kfree(copy); 191 copy = tmp_copy; 192 goto out; 193 } 194 195 memcpy(©->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE); 196 init_completion(©->completion); 197 copy->parent_dst_state = dst_ctx->state; 198 copy->parent_src_state = src_ctx->state; 199 200 list_add_tail(©->copies, &dst_server->ss_copies); 201 spin_unlock(&dst_server->nfs_client->cl_lock); 202 203 if (dst_server != src_server) { 204 spin_lock(&src_server->nfs_client->cl_lock); 205 list_add_tail(©->src_copies, &src_server->ss_copies); 206 spin_unlock(&src_server->nfs_client->cl_lock); 207 } 208 209 status = wait_for_completion_interruptible(©->completion); 210 spin_lock(&dst_server->nfs_client->cl_lock); 211 list_del_init(©->copies); 212 spin_unlock(&dst_server->nfs_client->cl_lock); 213 if (dst_server != src_server) { 214 spin_lock(&src_server->nfs_client->cl_lock); 215 list_del_init(©->src_copies); 216 spin_unlock(&src_server->nfs_client->cl_lock); 217 } 218 if (status == -ERESTARTSYS) { 219 goto out_cancel; 220 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) { 221 status = -EAGAIN; 222 *restart = true; 223 goto out_cancel; 224 } 225 out: 226 res->write_res.count = copy->count; 227 memcpy(&res->write_res.verifier, ©->verf, sizeof(copy->verf)); 228 status = -copy->error; 229 230 out_free: 231 kfree(copy); 232 return status; 233 out_cancel: 234 nfs42_do_offload_cancel_async(dst, ©->stateid); 235 if (!nfs42_files_from_same_server(src, dst)) 236 nfs42_do_offload_cancel_async(src, src_stateid); 237 goto out_free; 238 } 239 240 static int process_copy_commit(struct file *dst, loff_t pos_dst, 241 struct nfs42_copy_res *res) 242 { 243 struct nfs_commitres cres; 244 int status = -ENOMEM; 245 246 cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 247 if (!cres.verf) 248 goto out; 249 250 status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres); 251 if (status) 252 goto out_free; 253 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 254 &cres.verf->verifier)) { 255 dprintk("commit verf differs from copy verf\n"); 256 status = -EAGAIN; 257 } 258 out_free: 259 kfree(cres.verf); 260 out: 261 return status; 262 } 263 264 static ssize_t _nfs42_proc_copy(struct file *src, 265 struct nfs_lock_context *src_lock, 266 struct file *dst, 267 struct nfs_lock_context *dst_lock, 268 struct nfs42_copy_args *args, 269 struct nfs42_copy_res *res, 270 struct nl4_server *nss, 271 nfs4_stateid *cnr_stateid, 272 bool *restart) 273 { 274 struct rpc_message msg = { 275 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY], 276 .rpc_argp = args, 277 .rpc_resp = res, 278 }; 279 struct inode *dst_inode = file_inode(dst); 280 struct inode *src_inode = file_inode(src); 281 struct nfs_server *dst_server = NFS_SERVER(dst_inode); 282 struct nfs_server *src_server = NFS_SERVER(src_inode); 283 loff_t pos_src = args->src_pos; 284 loff_t pos_dst = args->dst_pos; 285 size_t count = args->count; 286 ssize_t status; 287 288 if (nss) { 289 args->cp_src = nss; 290 nfs4_stateid_copy(&args->src_stateid, cnr_stateid); 291 } else { 292 status = nfs4_set_rw_stateid(&args->src_stateid, 293 src_lock->open_context, src_lock, FMODE_READ); 294 if (status) { 295 if (status == -EAGAIN) 296 status = -NFS4ERR_BAD_STATEID; 297 return status; 298 } 299 } 300 status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping, 301 pos_src, pos_src + (loff_t)count - 1); 302 if (status) 303 return status; 304 305 status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context, 306 dst_lock, FMODE_WRITE); 307 if (status) { 308 if (status == -EAGAIN) 309 status = -NFS4ERR_BAD_STATEID; 310 return status; 311 } 312 313 status = nfs_sync_inode(dst_inode); 314 if (status) 315 return status; 316 317 res->commit_res.verf = NULL; 318 if (args->sync) { 319 res->commit_res.verf = 320 kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 321 if (!res->commit_res.verf) 322 return -ENOMEM; 323 } 324 set_bit(NFS_CLNT_SRC_SSC_COPY_STATE, 325 &src_lock->open_context->state->flags); 326 set_bit(NFS_CLNT_DST_SSC_COPY_STATE, 327 &dst_lock->open_context->state->flags); 328 329 status = nfs4_call_sync(dst_server->client, dst_server, &msg, 330 &args->seq_args, &res->seq_res, 0); 331 if (status == -ENOTSUPP) 332 dst_server->caps &= ~NFS_CAP_COPY; 333 if (status) 334 goto out; 335 336 if (args->sync && 337 nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 338 &res->commit_res.verf->verifier)) { 339 status = -EAGAIN; 340 goto out; 341 } 342 343 if (!res->synchronous) { 344 status = handle_async_copy(res, dst_server, src_server, src, 345 dst, &args->src_stateid, restart); 346 if (status) 347 goto out; 348 } 349 350 if ((!res->synchronous || !args->sync) && 351 res->write_res.verifier.committed != NFS_FILE_SYNC) { 352 status = process_copy_commit(dst, pos_dst, res); 353 if (status) 354 goto out; 355 } 356 357 truncate_pagecache_range(dst_inode, pos_dst, 358 pos_dst + res->write_res.count); 359 360 status = res->write_res.count; 361 out: 362 if (args->sync) 363 kfree(res->commit_res.verf); 364 return status; 365 } 366 367 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, 368 struct file *dst, loff_t pos_dst, size_t count, 369 struct nl4_server *nss, 370 nfs4_stateid *cnr_stateid, bool sync) 371 { 372 struct nfs_server *server = NFS_SERVER(file_inode(dst)); 373 struct nfs_lock_context *src_lock; 374 struct nfs_lock_context *dst_lock; 375 struct nfs42_copy_args args = { 376 .src_fh = NFS_FH(file_inode(src)), 377 .src_pos = pos_src, 378 .dst_fh = NFS_FH(file_inode(dst)), 379 .dst_pos = pos_dst, 380 .count = count, 381 .sync = sync, 382 }; 383 struct nfs42_copy_res res; 384 struct nfs4_exception src_exception = { 385 .inode = file_inode(src), 386 .stateid = &args.src_stateid, 387 }; 388 struct nfs4_exception dst_exception = { 389 .inode = file_inode(dst), 390 .stateid = &args.dst_stateid, 391 }; 392 ssize_t err, err2; 393 bool restart = false; 394 395 src_lock = nfs_get_lock_context(nfs_file_open_context(src)); 396 if (IS_ERR(src_lock)) 397 return PTR_ERR(src_lock); 398 399 src_exception.state = src_lock->open_context->state; 400 401 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst)); 402 if (IS_ERR(dst_lock)) { 403 err = PTR_ERR(dst_lock); 404 goto out_put_src_lock; 405 } 406 407 dst_exception.state = dst_lock->open_context->state; 408 409 do { 410 inode_lock(file_inode(dst)); 411 err = _nfs42_proc_copy(src, src_lock, 412 dst, dst_lock, 413 &args, &res, 414 nss, cnr_stateid, &restart); 415 inode_unlock(file_inode(dst)); 416 417 if (err >= 0) 418 break; 419 if (err == -ENOTSUPP && 420 nfs42_files_from_same_server(src, dst)) { 421 err = -EOPNOTSUPP; 422 break; 423 } else if (err == -EAGAIN) { 424 if (!restart) { 425 dst_exception.retry = 1; 426 continue; 427 } 428 break; 429 } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) { 430 args.sync = true; 431 dst_exception.retry = 1; 432 continue; 433 } else if ((err == -ESTALE || 434 err == -NFS4ERR_OFFLOAD_DENIED || 435 err == -ENOTSUPP) && 436 !nfs42_files_from_same_server(src, dst)) { 437 nfs42_do_offload_cancel_async(src, &args.src_stateid); 438 err = -EOPNOTSUPP; 439 break; 440 } 441 442 err2 = nfs4_handle_exception(server, err, &src_exception); 443 err = nfs4_handle_exception(server, err, &dst_exception); 444 if (!err) 445 err = err2; 446 } while (src_exception.retry || dst_exception.retry); 447 448 nfs_put_lock_context(dst_lock); 449 out_put_src_lock: 450 nfs_put_lock_context(src_lock); 451 return err; 452 } 453 454 struct nfs42_offloadcancel_data { 455 struct nfs_server *seq_server; 456 struct nfs42_offload_status_args args; 457 struct nfs42_offload_status_res res; 458 }; 459 460 static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata) 461 { 462 struct nfs42_offloadcancel_data *data = calldata; 463 464 nfs4_setup_sequence(data->seq_server->nfs_client, 465 &data->args.osa_seq_args, 466 &data->res.osr_seq_res, task); 467 } 468 469 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) 470 { 471 struct nfs42_offloadcancel_data *data = calldata; 472 473 nfs41_sequence_done(task, &data->res.osr_seq_res); 474 if (task->tk_status && 475 nfs4_async_handle_error(task, data->seq_server, NULL, 476 NULL) == -EAGAIN) 477 rpc_restart_call_prepare(task); 478 } 479 480 static void nfs42_free_offloadcancel_data(void *data) 481 { 482 kfree(data); 483 } 484 485 static const struct rpc_call_ops nfs42_offload_cancel_ops = { 486 .rpc_call_prepare = nfs42_offload_cancel_prepare, 487 .rpc_call_done = nfs42_offload_cancel_done, 488 .rpc_release = nfs42_free_offloadcancel_data, 489 }; 490 491 static int nfs42_do_offload_cancel_async(struct file *dst, 492 nfs4_stateid *stateid) 493 { 494 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 495 struct nfs42_offloadcancel_data *data = NULL; 496 struct nfs_open_context *ctx = nfs_file_open_context(dst); 497 struct rpc_task *task; 498 struct rpc_message msg = { 499 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL], 500 .rpc_cred = ctx->cred, 501 }; 502 struct rpc_task_setup task_setup_data = { 503 .rpc_client = dst_server->client, 504 .rpc_message = &msg, 505 .callback_ops = &nfs42_offload_cancel_ops, 506 .workqueue = nfsiod_workqueue, 507 .flags = RPC_TASK_ASYNC, 508 }; 509 int status; 510 511 if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL)) 512 return -EOPNOTSUPP; 513 514 data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS); 515 if (data == NULL) 516 return -ENOMEM; 517 518 data->seq_server = dst_server; 519 data->args.osa_src_fh = NFS_FH(file_inode(dst)); 520 memcpy(&data->args.osa_stateid, stateid, 521 sizeof(data->args.osa_stateid)); 522 msg.rpc_argp = &data->args; 523 msg.rpc_resp = &data->res; 524 task_setup_data.callback_data = data; 525 nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res, 526 1, 0); 527 task = rpc_run_task(&task_setup_data); 528 if (IS_ERR(task)) 529 return PTR_ERR(task); 530 status = rpc_wait_for_completion_task(task); 531 if (status == -ENOTSUPP) 532 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL; 533 rpc_put_task(task); 534 return status; 535 } 536 537 static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, 538 struct nfs42_copy_notify_args *args, 539 struct nfs42_copy_notify_res *res) 540 { 541 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 542 struct rpc_message msg = { 543 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY], 544 .rpc_argp = args, 545 .rpc_resp = res, 546 }; 547 int status; 548 struct nfs_open_context *ctx; 549 struct nfs_lock_context *l_ctx; 550 551 ctx = get_nfs_open_context(nfs_file_open_context(src)); 552 l_ctx = nfs_get_lock_context(ctx); 553 if (IS_ERR(l_ctx)) 554 return PTR_ERR(l_ctx); 555 556 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx, 557 FMODE_READ); 558 nfs_put_lock_context(l_ctx); 559 if (status) { 560 if (status == -EAGAIN) 561 status = -NFS4ERR_BAD_STATEID; 562 return status; 563 } 564 565 status = nfs4_call_sync(src_server->client, src_server, &msg, 566 &args->cna_seq_args, &res->cnr_seq_res, 0); 567 if (status == -ENOTSUPP) 568 src_server->caps &= ~NFS_CAP_COPY_NOTIFY; 569 570 put_nfs_open_context(nfs_file_open_context(src)); 571 return status; 572 } 573 574 int nfs42_proc_copy_notify(struct file *src, struct file *dst, 575 struct nfs42_copy_notify_res *res) 576 { 577 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 578 struct nfs42_copy_notify_args *args; 579 struct nfs4_exception exception = { 580 .inode = file_inode(src), 581 }; 582 int status; 583 584 if (!(src_server->caps & NFS_CAP_COPY_NOTIFY)) 585 return -EOPNOTSUPP; 586 587 args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_NOFS); 588 if (args == NULL) 589 return -ENOMEM; 590 591 args->cna_src_fh = NFS_FH(file_inode(src)), 592 args->cna_dst.nl4_type = NL4_NETADDR; 593 nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr); 594 exception.stateid = &args->cna_src_stateid; 595 596 do { 597 status = _nfs42_proc_copy_notify(src, dst, args, res); 598 if (status == -ENOTSUPP) { 599 status = -EOPNOTSUPP; 600 goto out; 601 } 602 status = nfs4_handle_exception(src_server, status, &exception); 603 } while (exception.retry); 604 605 out: 606 kfree(args); 607 return status; 608 } 609 610 static loff_t _nfs42_proc_llseek(struct file *filep, 611 struct nfs_lock_context *lock, loff_t offset, int whence) 612 { 613 struct inode *inode = file_inode(filep); 614 struct nfs42_seek_args args = { 615 .sa_fh = NFS_FH(inode), 616 .sa_offset = offset, 617 .sa_what = (whence == SEEK_HOLE) ? 618 NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA, 619 }; 620 struct nfs42_seek_res res; 621 struct rpc_message msg = { 622 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK], 623 .rpc_argp = &args, 624 .rpc_resp = &res, 625 }; 626 struct nfs_server *server = NFS_SERVER(inode); 627 int status; 628 629 if (!nfs_server_capable(inode, NFS_CAP_SEEK)) 630 return -ENOTSUPP; 631 632 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context, 633 lock, FMODE_READ); 634 if (status) { 635 if (status == -EAGAIN) 636 status = -NFS4ERR_BAD_STATEID; 637 return status; 638 } 639 640 status = nfs_filemap_write_and_wait_range(inode->i_mapping, 641 offset, LLONG_MAX); 642 if (status) 643 return status; 644 645 status = nfs4_call_sync(server->client, server, &msg, 646 &args.seq_args, &res.seq_res, 0); 647 if (status == -ENOTSUPP) 648 server->caps &= ~NFS_CAP_SEEK; 649 if (status) 650 return status; 651 652 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); 653 } 654 655 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 656 { 657 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 658 struct nfs4_exception exception = { }; 659 struct nfs_lock_context *lock; 660 loff_t err; 661 662 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 663 if (IS_ERR(lock)) 664 return PTR_ERR(lock); 665 666 exception.inode = file_inode(filep); 667 exception.state = lock->open_context->state; 668 669 do { 670 err = _nfs42_proc_llseek(filep, lock, offset, whence); 671 if (err >= 0) 672 break; 673 if (err == -ENOTSUPP) { 674 err = -EOPNOTSUPP; 675 break; 676 } 677 err = nfs4_handle_exception(server, err, &exception); 678 } while (exception.retry); 679 680 nfs_put_lock_context(lock); 681 return err; 682 } 683 684 685 static void 686 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 687 { 688 struct nfs42_layoutstat_data *data = calldata; 689 struct inode *inode = data->inode; 690 struct nfs_server *server = NFS_SERVER(inode); 691 struct pnfs_layout_hdr *lo; 692 693 spin_lock(&inode->i_lock); 694 lo = NFS_I(inode)->layout; 695 if (!pnfs_layout_is_valid(lo)) { 696 spin_unlock(&inode->i_lock); 697 rpc_exit(task, 0); 698 return; 699 } 700 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid); 701 spin_unlock(&inode->i_lock); 702 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 703 &data->res.seq_res, task); 704 } 705 706 static void 707 nfs42_layoutstat_done(struct rpc_task *task, void *calldata) 708 { 709 struct nfs42_layoutstat_data *data = calldata; 710 struct inode *inode = data->inode; 711 struct pnfs_layout_hdr *lo; 712 713 if (!nfs4_sequence_done(task, &data->res.seq_res)) 714 return; 715 716 switch (task->tk_status) { 717 case 0: 718 return; 719 case -NFS4ERR_BADHANDLE: 720 case -ESTALE: 721 pnfs_destroy_layout(NFS_I(inode)); 722 break; 723 case -NFS4ERR_EXPIRED: 724 case -NFS4ERR_ADMIN_REVOKED: 725 case -NFS4ERR_DELEG_REVOKED: 726 case -NFS4ERR_STALE_STATEID: 727 case -NFS4ERR_BAD_STATEID: 728 spin_lock(&inode->i_lock); 729 lo = NFS_I(inode)->layout; 730 if (pnfs_layout_is_valid(lo) && 731 nfs4_stateid_match(&data->args.stateid, 732 &lo->plh_stateid)) { 733 LIST_HEAD(head); 734 735 /* 736 * Mark the bad layout state as invalid, then retry 737 * with the current stateid. 738 */ 739 pnfs_mark_layout_stateid_invalid(lo, &head); 740 spin_unlock(&inode->i_lock); 741 pnfs_free_lseg_list(&head); 742 nfs_commit_inode(inode, 0); 743 } else 744 spin_unlock(&inode->i_lock); 745 break; 746 case -NFS4ERR_OLD_STATEID: 747 spin_lock(&inode->i_lock); 748 lo = NFS_I(inode)->layout; 749 if (pnfs_layout_is_valid(lo) && 750 nfs4_stateid_match_other(&data->args.stateid, 751 &lo->plh_stateid)) { 752 /* Do we need to delay before resending? */ 753 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 754 &data->args.stateid)) 755 rpc_delay(task, HZ); 756 rpc_restart_call_prepare(task); 757 } 758 spin_unlock(&inode->i_lock); 759 break; 760 case -ENOTSUPP: 761 case -EOPNOTSUPP: 762 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS; 763 } 764 765 trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status); 766 } 767 768 static void 769 nfs42_layoutstat_release(void *calldata) 770 { 771 struct nfs42_layoutstat_data *data = calldata; 772 struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo; 773 int i; 774 775 for (i = 0; i < data->args.num_dev; i++) { 776 if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free) 777 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private); 778 } 779 780 pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout); 781 smp_mb__before_atomic(); 782 clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags); 783 smp_mb__after_atomic(); 784 nfs_iput_and_deactive(data->inode); 785 kfree(data->args.devinfo); 786 kfree(data); 787 } 788 789 static const struct rpc_call_ops nfs42_layoutstat_ops = { 790 .rpc_call_prepare = nfs42_layoutstat_prepare, 791 .rpc_call_done = nfs42_layoutstat_done, 792 .rpc_release = nfs42_layoutstat_release, 793 }; 794 795 int nfs42_proc_layoutstats_generic(struct nfs_server *server, 796 struct nfs42_layoutstat_data *data) 797 { 798 struct rpc_message msg = { 799 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS], 800 .rpc_argp = &data->args, 801 .rpc_resp = &data->res, 802 }; 803 struct rpc_task_setup task_setup = { 804 .rpc_client = server->client, 805 .rpc_message = &msg, 806 .callback_ops = &nfs42_layoutstat_ops, 807 .callback_data = data, 808 .flags = RPC_TASK_ASYNC, 809 }; 810 struct rpc_task *task; 811 812 data->inode = nfs_igrab_and_active(data->args.inode); 813 if (!data->inode) { 814 nfs42_layoutstat_release(data); 815 return -EAGAIN; 816 } 817 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 818 task = rpc_run_task(&task_setup); 819 if (IS_ERR(task)) 820 return PTR_ERR(task); 821 rpc_put_task(task); 822 return 0; 823 } 824 825 static struct nfs42_layouterror_data * 826 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags) 827 { 828 struct nfs42_layouterror_data *data; 829 struct inode *inode = lseg->pls_layout->plh_inode; 830 831 data = kzalloc(sizeof(*data), gfp_flags); 832 if (data) { 833 data->args.inode = data->inode = nfs_igrab_and_active(inode); 834 if (data->inode) { 835 data->lseg = pnfs_get_lseg(lseg); 836 if (data->lseg) 837 return data; 838 nfs_iput_and_deactive(data->inode); 839 } 840 kfree(data); 841 } 842 return NULL; 843 } 844 845 static void 846 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data) 847 { 848 pnfs_put_lseg(data->lseg); 849 nfs_iput_and_deactive(data->inode); 850 kfree(data); 851 } 852 853 static void 854 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata) 855 { 856 struct nfs42_layouterror_data *data = calldata; 857 struct inode *inode = data->inode; 858 struct nfs_server *server = NFS_SERVER(inode); 859 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 860 unsigned i; 861 862 spin_lock(&inode->i_lock); 863 if (!pnfs_layout_is_valid(lo)) { 864 spin_unlock(&inode->i_lock); 865 rpc_exit(task, 0); 866 return; 867 } 868 for (i = 0; i < data->args.num_errors; i++) 869 nfs4_stateid_copy(&data->args.errors[i].stateid, 870 &lo->plh_stateid); 871 spin_unlock(&inode->i_lock); 872 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 873 &data->res.seq_res, task); 874 } 875 876 static void 877 nfs42_layouterror_done(struct rpc_task *task, void *calldata) 878 { 879 struct nfs42_layouterror_data *data = calldata; 880 struct inode *inode = data->inode; 881 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 882 883 if (!nfs4_sequence_done(task, &data->res.seq_res)) 884 return; 885 886 switch (task->tk_status) { 887 case 0: 888 return; 889 case -NFS4ERR_BADHANDLE: 890 case -ESTALE: 891 pnfs_destroy_layout(NFS_I(inode)); 892 break; 893 case -NFS4ERR_EXPIRED: 894 case -NFS4ERR_ADMIN_REVOKED: 895 case -NFS4ERR_DELEG_REVOKED: 896 case -NFS4ERR_STALE_STATEID: 897 case -NFS4ERR_BAD_STATEID: 898 spin_lock(&inode->i_lock); 899 if (pnfs_layout_is_valid(lo) && 900 nfs4_stateid_match(&data->args.errors[0].stateid, 901 &lo->plh_stateid)) { 902 LIST_HEAD(head); 903 904 /* 905 * Mark the bad layout state as invalid, then retry 906 * with the current stateid. 907 */ 908 pnfs_mark_layout_stateid_invalid(lo, &head); 909 spin_unlock(&inode->i_lock); 910 pnfs_free_lseg_list(&head); 911 nfs_commit_inode(inode, 0); 912 } else 913 spin_unlock(&inode->i_lock); 914 break; 915 case -NFS4ERR_OLD_STATEID: 916 spin_lock(&inode->i_lock); 917 if (pnfs_layout_is_valid(lo) && 918 nfs4_stateid_match_other(&data->args.errors[0].stateid, 919 &lo->plh_stateid)) { 920 /* Do we need to delay before resending? */ 921 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 922 &data->args.errors[0].stateid)) 923 rpc_delay(task, HZ); 924 rpc_restart_call_prepare(task); 925 } 926 spin_unlock(&inode->i_lock); 927 break; 928 case -ENOTSUPP: 929 case -EOPNOTSUPP: 930 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR; 931 } 932 933 trace_nfs4_layouterror(inode, &data->args.errors[0].stateid, 934 task->tk_status); 935 } 936 937 static void 938 nfs42_layouterror_release(void *calldata) 939 { 940 struct nfs42_layouterror_data *data = calldata; 941 942 nfs42_free_layouterror_data(data); 943 } 944 945 static const struct rpc_call_ops nfs42_layouterror_ops = { 946 .rpc_call_prepare = nfs42_layouterror_prepare, 947 .rpc_call_done = nfs42_layouterror_done, 948 .rpc_release = nfs42_layouterror_release, 949 }; 950 951 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg, 952 const struct nfs42_layout_error *errors, size_t n) 953 { 954 struct inode *inode = lseg->pls_layout->plh_inode; 955 struct nfs42_layouterror_data *data; 956 struct rpc_task *task; 957 struct rpc_message msg = { 958 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR], 959 }; 960 struct rpc_task_setup task_setup = { 961 .rpc_message = &msg, 962 .callback_ops = &nfs42_layouterror_ops, 963 .flags = RPC_TASK_ASYNC, 964 }; 965 unsigned int i; 966 967 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR)) 968 return -EOPNOTSUPP; 969 if (n > NFS42_LAYOUTERROR_MAX) 970 return -EINVAL; 971 data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS); 972 if (!data) 973 return -ENOMEM; 974 for (i = 0; i < n; i++) { 975 data->args.errors[i] = errors[i]; 976 data->args.num_errors++; 977 data->res.num_errors++; 978 } 979 msg.rpc_argp = &data->args; 980 msg.rpc_resp = &data->res; 981 task_setup.callback_data = data; 982 task_setup.rpc_client = NFS_SERVER(inode)->client; 983 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 984 task = rpc_run_task(&task_setup); 985 if (IS_ERR(task)) 986 return PTR_ERR(task); 987 rpc_put_task(task); 988 return 0; 989 } 990 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror); 991 992 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f, 993 struct file *dst_f, struct nfs_lock_context *src_lock, 994 struct nfs_lock_context *dst_lock, loff_t src_offset, 995 loff_t dst_offset, loff_t count) 996 { 997 struct inode *src_inode = file_inode(src_f); 998 struct inode *dst_inode = file_inode(dst_f); 999 struct nfs_server *server = NFS_SERVER(dst_inode); 1000 struct nfs42_clone_args args = { 1001 .src_fh = NFS_FH(src_inode), 1002 .dst_fh = NFS_FH(dst_inode), 1003 .src_offset = src_offset, 1004 .dst_offset = dst_offset, 1005 .count = count, 1006 .dst_bitmask = server->cache_consistency_bitmask, 1007 }; 1008 struct nfs42_clone_res res = { 1009 .server = server, 1010 }; 1011 int status; 1012 1013 msg->rpc_argp = &args; 1014 msg->rpc_resp = &res; 1015 1016 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context, 1017 src_lock, FMODE_READ); 1018 if (status) { 1019 if (status == -EAGAIN) 1020 status = -NFS4ERR_BAD_STATEID; 1021 return status; 1022 } 1023 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context, 1024 dst_lock, FMODE_WRITE); 1025 if (status) { 1026 if (status == -EAGAIN) 1027 status = -NFS4ERR_BAD_STATEID; 1028 return status; 1029 } 1030 1031 res.dst_fattr = nfs_alloc_fattr(); 1032 if (!res.dst_fattr) 1033 return -ENOMEM; 1034 1035 status = nfs4_call_sync(server->client, server, msg, 1036 &args.seq_args, &res.seq_res, 0); 1037 if (status == 0) 1038 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr); 1039 1040 kfree(res.dst_fattr); 1041 return status; 1042 } 1043 1044 int nfs42_proc_clone(struct file *src_f, struct file *dst_f, 1045 loff_t src_offset, loff_t dst_offset, loff_t count) 1046 { 1047 struct rpc_message msg = { 1048 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE], 1049 }; 1050 struct inode *inode = file_inode(src_f); 1051 struct nfs_server *server = NFS_SERVER(file_inode(src_f)); 1052 struct nfs_lock_context *src_lock; 1053 struct nfs_lock_context *dst_lock; 1054 struct nfs4_exception src_exception = { }; 1055 struct nfs4_exception dst_exception = { }; 1056 int err, err2; 1057 1058 if (!nfs_server_capable(inode, NFS_CAP_CLONE)) 1059 return -EOPNOTSUPP; 1060 1061 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f)); 1062 if (IS_ERR(src_lock)) 1063 return PTR_ERR(src_lock); 1064 1065 src_exception.inode = file_inode(src_f); 1066 src_exception.state = src_lock->open_context->state; 1067 1068 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f)); 1069 if (IS_ERR(dst_lock)) { 1070 err = PTR_ERR(dst_lock); 1071 goto out_put_src_lock; 1072 } 1073 1074 dst_exception.inode = file_inode(dst_f); 1075 dst_exception.state = dst_lock->open_context->state; 1076 1077 do { 1078 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock, 1079 src_offset, dst_offset, count); 1080 if (err == -ENOTSUPP || err == -EOPNOTSUPP) { 1081 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE; 1082 err = -EOPNOTSUPP; 1083 break; 1084 } 1085 1086 err2 = nfs4_handle_exception(server, err, &src_exception); 1087 err = nfs4_handle_exception(server, err, &dst_exception); 1088 if (!err) 1089 err = err2; 1090 } while (src_exception.retry || dst_exception.retry); 1091 1092 nfs_put_lock_context(dst_lock); 1093 out_put_src_lock: 1094 nfs_put_lock_context(src_lock); 1095 return err; 1096 } 1097 1098 #define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 1099 1100 static int _nfs42_proc_removexattr(struct inode *inode, const char *name) 1101 { 1102 struct nfs_server *server = NFS_SERVER(inode); 1103 struct nfs42_removexattrargs args = { 1104 .fh = NFS_FH(inode), 1105 .xattr_name = name, 1106 }; 1107 struct nfs42_removexattrres res; 1108 struct rpc_message msg = { 1109 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR], 1110 .rpc_argp = &args, 1111 .rpc_resp = &res, 1112 }; 1113 int ret; 1114 unsigned long timestamp = jiffies; 1115 1116 ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 1117 &res.seq_res, 1); 1118 if (!ret) 1119 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1120 1121 return ret; 1122 } 1123 1124 static int _nfs42_proc_setxattr(struct inode *inode, const char *name, 1125 const void *buf, size_t buflen, int flags) 1126 { 1127 struct nfs_server *server = NFS_SERVER(inode); 1128 struct page *pages[NFS4XATTR_MAXPAGES]; 1129 struct nfs42_setxattrargs arg = { 1130 .fh = NFS_FH(inode), 1131 .xattr_pages = pages, 1132 .xattr_len = buflen, 1133 .xattr_name = name, 1134 .xattr_flags = flags, 1135 }; 1136 struct nfs42_setxattrres res; 1137 struct rpc_message msg = { 1138 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR], 1139 .rpc_argp = &arg, 1140 .rpc_resp = &res, 1141 }; 1142 int ret, np; 1143 unsigned long timestamp = jiffies; 1144 1145 if (buflen > server->sxasize) 1146 return -ERANGE; 1147 1148 if (buflen > 0) { 1149 np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages); 1150 if (np < 0) 1151 return np; 1152 } else 1153 np = 0; 1154 1155 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1156 &res.seq_res, 1); 1157 1158 for (; np > 0; np--) 1159 put_page(pages[np - 1]); 1160 1161 if (!ret) 1162 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1163 1164 return ret; 1165 } 1166 1167 static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name, 1168 void *buf, size_t buflen) 1169 { 1170 struct nfs_server *server = NFS_SERVER(inode); 1171 struct page *pages[NFS4XATTR_MAXPAGES] = {}; 1172 struct nfs42_getxattrargs arg = { 1173 .fh = NFS_FH(inode), 1174 .xattr_pages = pages, 1175 .xattr_len = buflen, 1176 .xattr_name = name, 1177 }; 1178 struct nfs42_getxattrres res; 1179 struct rpc_message msg = { 1180 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR], 1181 .rpc_argp = &arg, 1182 .rpc_resp = &res, 1183 }; 1184 int ret, np; 1185 1186 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1187 &res.seq_res, 0); 1188 if (ret < 0) 1189 return ret; 1190 1191 /* 1192 * Normally, the caching is done one layer up, but for successful 1193 * RPCS, always cache the result here, even if the caller was 1194 * just querying the length, or if the reply was too big for 1195 * the caller. This avoids a second RPC in the case of the 1196 * common query-alloc-retrieve cycle for xattrs. 1197 * 1198 * Note that xattr_len is always capped to XATTR_SIZE_MAX. 1199 */ 1200 1201 nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len); 1202 1203 if (buflen) { 1204 if (res.xattr_len > buflen) 1205 return -ERANGE; 1206 _copy_from_pages(buf, pages, 0, res.xattr_len); 1207 } 1208 1209 np = DIV_ROUND_UP(res.xattr_len, PAGE_SIZE); 1210 while (--np >= 0) 1211 __free_page(pages[np]); 1212 1213 return res.xattr_len; 1214 } 1215 1216 static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf, 1217 size_t buflen, u64 *cookiep, bool *eofp) 1218 { 1219 struct nfs_server *server = NFS_SERVER(inode); 1220 struct page **pages; 1221 struct nfs42_listxattrsargs arg = { 1222 .fh = NFS_FH(inode), 1223 .cookie = *cookiep, 1224 }; 1225 struct nfs42_listxattrsres res = { 1226 .eof = false, 1227 .xattr_buf = buf, 1228 .xattr_len = buflen, 1229 }; 1230 struct rpc_message msg = { 1231 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS], 1232 .rpc_argp = &arg, 1233 .rpc_resp = &res, 1234 }; 1235 u32 xdrlen; 1236 int ret, np; 1237 1238 1239 res.scratch = alloc_page(GFP_KERNEL); 1240 if (!res.scratch) 1241 return -ENOMEM; 1242 1243 xdrlen = nfs42_listxattr_xdrsize(buflen); 1244 if (xdrlen > server->lxasize) 1245 xdrlen = server->lxasize; 1246 np = xdrlen / PAGE_SIZE + 1; 1247 1248 pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL); 1249 if (pages == NULL) { 1250 __free_page(res.scratch); 1251 return -ENOMEM; 1252 } 1253 1254 arg.xattr_pages = pages; 1255 arg.count = xdrlen; 1256 1257 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1258 &res.seq_res, 0); 1259 1260 if (ret >= 0) { 1261 ret = res.copied; 1262 *cookiep = res.cookie; 1263 *eofp = res.eof; 1264 } 1265 1266 while (--np >= 0) { 1267 if (pages[np]) 1268 __free_page(pages[np]); 1269 } 1270 1271 __free_page(res.scratch); 1272 kfree(pages); 1273 1274 return ret; 1275 1276 } 1277 1278 ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, 1279 void *buf, size_t buflen) 1280 { 1281 struct nfs4_exception exception = { }; 1282 ssize_t err; 1283 1284 do { 1285 err = _nfs42_proc_getxattr(inode, name, buf, buflen); 1286 if (err >= 0) 1287 break; 1288 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1289 &exception); 1290 } while (exception.retry); 1291 1292 return err; 1293 } 1294 1295 int nfs42_proc_setxattr(struct inode *inode, const char *name, 1296 const void *buf, size_t buflen, int flags) 1297 { 1298 struct nfs4_exception exception = { }; 1299 int err; 1300 1301 do { 1302 err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags); 1303 if (!err) 1304 break; 1305 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1306 &exception); 1307 } while (exception.retry); 1308 1309 return err; 1310 } 1311 1312 ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf, 1313 size_t buflen, u64 *cookiep, bool *eofp) 1314 { 1315 struct nfs4_exception exception = { }; 1316 ssize_t err; 1317 1318 do { 1319 err = _nfs42_proc_listxattrs(inode, buf, buflen, 1320 cookiep, eofp); 1321 if (err >= 0) 1322 break; 1323 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1324 &exception); 1325 } while (exception.retry); 1326 1327 return err; 1328 } 1329 1330 int nfs42_proc_removexattr(struct inode *inode, const char *name) 1331 { 1332 struct nfs4_exception exception = { }; 1333 int err; 1334 1335 do { 1336 err = _nfs42_proc_removexattr(inode, name); 1337 if (!err) 1338 break; 1339 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1340 &exception); 1341 } while (exception.retry); 1342 1343 return err; 1344 } 1345