1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com> 4 */ 5 #include <linux/fs.h> 6 #include <linux/sunrpc/addr.h> 7 #include <linux/sunrpc/sched.h> 8 #include <linux/nfs.h> 9 #include <linux/nfs3.h> 10 #include <linux/nfs4.h> 11 #include <linux/nfs_xdr.h> 12 #include <linux/nfs_fs.h> 13 #include "nfs4_fs.h" 14 #include "nfs42.h" 15 #include "iostat.h" 16 #include "pnfs.h" 17 #include "nfs4session.h" 18 #include "internal.h" 19 #include "delegation.h" 20 #include "nfs4trace.h" 21 22 #define NFSDBG_FACILITY NFSDBG_PROC 23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std); 24 25 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr) 26 { 27 struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client; 28 unsigned short port = 2049; 29 30 rcu_read_lock(); 31 naddr->netid_len = scnprintf(naddr->netid, 32 sizeof(naddr->netid), "%s", 33 rpc_peeraddr2str(clp->cl_rpcclient, 34 RPC_DISPLAY_NETID)); 35 naddr->addr_len = scnprintf(naddr->addr, 36 sizeof(naddr->addr), 37 "%s.%u.%u", 38 rpc_peeraddr2str(clp->cl_rpcclient, 39 RPC_DISPLAY_ADDR), 40 port >> 8, port & 255); 41 rcu_read_unlock(); 42 } 43 44 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 45 struct nfs_lock_context *lock, loff_t offset, loff_t len) 46 { 47 struct inode *inode = file_inode(filep); 48 struct nfs_server *server = NFS_SERVER(inode); 49 struct nfs42_falloc_args args = { 50 .falloc_fh = NFS_FH(inode), 51 .falloc_offset = offset, 52 .falloc_length = len, 53 .falloc_bitmask = nfs4_fattr_bitmap, 54 }; 55 struct nfs42_falloc_res res = { 56 .falloc_server = server, 57 }; 58 int status; 59 60 msg->rpc_argp = &args; 61 msg->rpc_resp = &res; 62 63 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context, 64 lock, FMODE_WRITE); 65 if (status) { 66 if (status == -EAGAIN) 67 status = -NFS4ERR_BAD_STATEID; 68 return status; 69 } 70 71 res.falloc_fattr = nfs_alloc_fattr(); 72 if (!res.falloc_fattr) 73 return -ENOMEM; 74 75 status = nfs4_call_sync(server->client, server, msg, 76 &args.seq_args, &res.seq_res, 0); 77 if (status == 0) 78 status = nfs_post_op_update_inode(inode, res.falloc_fattr); 79 80 kfree(res.falloc_fattr); 81 return status; 82 } 83 84 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 85 loff_t offset, loff_t len) 86 { 87 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 88 struct nfs4_exception exception = { }; 89 struct nfs_lock_context *lock; 90 int err; 91 92 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 93 if (IS_ERR(lock)) 94 return PTR_ERR(lock); 95 96 exception.inode = file_inode(filep); 97 exception.state = lock->open_context->state; 98 99 do { 100 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len); 101 if (err == -ENOTSUPP) { 102 err = -EOPNOTSUPP; 103 break; 104 } 105 err = nfs4_handle_exception(server, err, &exception); 106 } while (exception.retry); 107 108 nfs_put_lock_context(lock); 109 return err; 110 } 111 112 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len) 113 { 114 struct rpc_message msg = { 115 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE], 116 }; 117 struct inode *inode = file_inode(filep); 118 int err; 119 120 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE)) 121 return -EOPNOTSUPP; 122 123 inode_lock(inode); 124 125 err = nfs42_proc_fallocate(&msg, filep, offset, len); 126 if (err == -EOPNOTSUPP) 127 NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE; 128 129 inode_unlock(inode); 130 return err; 131 } 132 133 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) 134 { 135 struct rpc_message msg = { 136 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE], 137 }; 138 struct inode *inode = file_inode(filep); 139 int err; 140 141 if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE)) 142 return -EOPNOTSUPP; 143 144 inode_lock(inode); 145 err = nfs_sync_inode(inode); 146 if (err) 147 goto out_unlock; 148 149 err = nfs42_proc_fallocate(&msg, filep, offset, len); 150 if (err == 0) 151 truncate_pagecache_range(inode, offset, (offset + len) -1); 152 if (err == -EOPNOTSUPP) 153 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE; 154 out_unlock: 155 inode_unlock(inode); 156 return err; 157 } 158 159 static int handle_async_copy(struct nfs42_copy_res *res, 160 struct nfs_server *dst_server, 161 struct nfs_server *src_server, 162 struct file *src, 163 struct file *dst, 164 nfs4_stateid *src_stateid, 165 bool *restart) 166 { 167 struct nfs4_copy_state *copy, *tmp_copy; 168 int status = NFS4_OK; 169 bool found_pending = false; 170 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst); 171 struct nfs_open_context *src_ctx = nfs_file_open_context(src); 172 173 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS); 174 if (!copy) 175 return -ENOMEM; 176 177 spin_lock(&dst_server->nfs_client->cl_lock); 178 list_for_each_entry(tmp_copy, 179 &dst_server->nfs_client->pending_cb_stateids, 180 copies) { 181 if (memcmp(&res->write_res.stateid, &tmp_copy->stateid, 182 NFS4_STATEID_SIZE)) 183 continue; 184 found_pending = true; 185 list_del(&tmp_copy->copies); 186 break; 187 } 188 if (found_pending) { 189 spin_unlock(&dst_server->nfs_client->cl_lock); 190 kfree(copy); 191 copy = tmp_copy; 192 goto out; 193 } 194 195 memcpy(©->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE); 196 init_completion(©->completion); 197 copy->parent_dst_state = dst_ctx->state; 198 copy->parent_src_state = src_ctx->state; 199 200 list_add_tail(©->copies, &dst_server->ss_copies); 201 spin_unlock(&dst_server->nfs_client->cl_lock); 202 203 if (dst_server != src_server) { 204 spin_lock(&src_server->nfs_client->cl_lock); 205 list_add_tail(©->src_copies, &src_server->ss_copies); 206 spin_unlock(&src_server->nfs_client->cl_lock); 207 } 208 209 status = wait_for_completion_interruptible(©->completion); 210 spin_lock(&dst_server->nfs_client->cl_lock); 211 list_del_init(©->copies); 212 spin_unlock(&dst_server->nfs_client->cl_lock); 213 if (dst_server != src_server) { 214 spin_lock(&src_server->nfs_client->cl_lock); 215 list_del_init(©->src_copies); 216 spin_unlock(&src_server->nfs_client->cl_lock); 217 } 218 if (status == -ERESTARTSYS) { 219 goto out_cancel; 220 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) { 221 status = -EAGAIN; 222 *restart = true; 223 goto out_cancel; 224 } 225 out: 226 res->write_res.count = copy->count; 227 memcpy(&res->write_res.verifier, ©->verf, sizeof(copy->verf)); 228 status = -copy->error; 229 230 out_free: 231 kfree(copy); 232 return status; 233 out_cancel: 234 nfs42_do_offload_cancel_async(dst, ©->stateid); 235 if (!nfs42_files_from_same_server(src, dst)) 236 nfs42_do_offload_cancel_async(src, src_stateid); 237 goto out_free; 238 } 239 240 static int process_copy_commit(struct file *dst, loff_t pos_dst, 241 struct nfs42_copy_res *res) 242 { 243 struct nfs_commitres cres; 244 int status = -ENOMEM; 245 246 cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 247 if (!cres.verf) 248 goto out; 249 250 status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres); 251 if (status) 252 goto out_free; 253 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 254 &cres.verf->verifier)) { 255 dprintk("commit verf differs from copy verf\n"); 256 status = -EAGAIN; 257 } 258 out_free: 259 kfree(cres.verf); 260 out: 261 return status; 262 } 263 264 static ssize_t _nfs42_proc_copy(struct file *src, 265 struct nfs_lock_context *src_lock, 266 struct file *dst, 267 struct nfs_lock_context *dst_lock, 268 struct nfs42_copy_args *args, 269 struct nfs42_copy_res *res, 270 struct nl4_server *nss, 271 nfs4_stateid *cnr_stateid, 272 bool *restart) 273 { 274 struct rpc_message msg = { 275 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY], 276 .rpc_argp = args, 277 .rpc_resp = res, 278 }; 279 struct inode *dst_inode = file_inode(dst); 280 struct inode *src_inode = file_inode(src); 281 struct nfs_server *dst_server = NFS_SERVER(dst_inode); 282 struct nfs_server *src_server = NFS_SERVER(src_inode); 283 loff_t pos_src = args->src_pos; 284 loff_t pos_dst = args->dst_pos; 285 size_t count = args->count; 286 ssize_t status; 287 288 if (nss) { 289 args->cp_src = nss; 290 nfs4_stateid_copy(&args->src_stateid, cnr_stateid); 291 } else { 292 status = nfs4_set_rw_stateid(&args->src_stateid, 293 src_lock->open_context, src_lock, FMODE_READ); 294 if (status) { 295 if (status == -EAGAIN) 296 status = -NFS4ERR_BAD_STATEID; 297 return status; 298 } 299 } 300 status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping, 301 pos_src, pos_src + (loff_t)count - 1); 302 if (status) 303 return status; 304 305 status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context, 306 dst_lock, FMODE_WRITE); 307 if (status) { 308 if (status == -EAGAIN) 309 status = -NFS4ERR_BAD_STATEID; 310 return status; 311 } 312 313 status = nfs_sync_inode(dst_inode); 314 if (status) 315 return status; 316 317 res->commit_res.verf = NULL; 318 if (args->sync) { 319 res->commit_res.verf = 320 kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 321 if (!res->commit_res.verf) 322 return -ENOMEM; 323 } 324 set_bit(NFS_CLNT_SRC_SSC_COPY_STATE, 325 &src_lock->open_context->state->flags); 326 set_bit(NFS_CLNT_DST_SSC_COPY_STATE, 327 &dst_lock->open_context->state->flags); 328 329 status = nfs4_call_sync(dst_server->client, dst_server, &msg, 330 &args->seq_args, &res->seq_res, 0); 331 if (status == -ENOTSUPP) 332 dst_server->caps &= ~NFS_CAP_COPY; 333 if (status) 334 goto out; 335 336 if (args->sync && 337 nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 338 &res->commit_res.verf->verifier)) { 339 status = -EAGAIN; 340 goto out; 341 } 342 343 if (!res->synchronous) { 344 status = handle_async_copy(res, dst_server, src_server, src, 345 dst, &args->src_stateid, restart); 346 if (status) 347 goto out; 348 } 349 350 if ((!res->synchronous || !args->sync) && 351 res->write_res.verifier.committed != NFS_FILE_SYNC) { 352 status = process_copy_commit(dst, pos_dst, res); 353 if (status) 354 goto out; 355 } 356 357 truncate_pagecache_range(dst_inode, pos_dst, 358 pos_dst + res->write_res.count); 359 spin_lock(&dst_inode->i_lock); 360 NFS_I(dst_inode)->cache_validity |= (NFS_INO_REVAL_PAGECACHE | 361 NFS_INO_REVAL_FORCED | NFS_INO_INVALID_SIZE | 362 NFS_INO_INVALID_ATTR | NFS_INO_INVALID_DATA); 363 spin_unlock(&dst_inode->i_lock); 364 spin_lock(&src_inode->i_lock); 365 NFS_I(src_inode)->cache_validity |= (NFS_INO_REVAL_PAGECACHE | 366 NFS_INO_REVAL_FORCED | NFS_INO_INVALID_ATIME); 367 spin_unlock(&src_inode->i_lock); 368 status = res->write_res.count; 369 out: 370 if (args->sync) 371 kfree(res->commit_res.verf); 372 return status; 373 } 374 375 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, 376 struct file *dst, loff_t pos_dst, size_t count, 377 struct nl4_server *nss, 378 nfs4_stateid *cnr_stateid, bool sync) 379 { 380 struct nfs_server *server = NFS_SERVER(file_inode(dst)); 381 struct nfs_lock_context *src_lock; 382 struct nfs_lock_context *dst_lock; 383 struct nfs42_copy_args args = { 384 .src_fh = NFS_FH(file_inode(src)), 385 .src_pos = pos_src, 386 .dst_fh = NFS_FH(file_inode(dst)), 387 .dst_pos = pos_dst, 388 .count = count, 389 .sync = sync, 390 }; 391 struct nfs42_copy_res res; 392 struct nfs4_exception src_exception = { 393 .inode = file_inode(src), 394 .stateid = &args.src_stateid, 395 }; 396 struct nfs4_exception dst_exception = { 397 .inode = file_inode(dst), 398 .stateid = &args.dst_stateid, 399 }; 400 ssize_t err, err2; 401 bool restart = false; 402 403 src_lock = nfs_get_lock_context(nfs_file_open_context(src)); 404 if (IS_ERR(src_lock)) 405 return PTR_ERR(src_lock); 406 407 src_exception.state = src_lock->open_context->state; 408 409 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst)); 410 if (IS_ERR(dst_lock)) { 411 err = PTR_ERR(dst_lock); 412 goto out_put_src_lock; 413 } 414 415 dst_exception.state = dst_lock->open_context->state; 416 417 do { 418 inode_lock(file_inode(dst)); 419 err = _nfs42_proc_copy(src, src_lock, 420 dst, dst_lock, 421 &args, &res, 422 nss, cnr_stateid, &restart); 423 inode_unlock(file_inode(dst)); 424 425 if (err >= 0) 426 break; 427 if (err == -ENOTSUPP && 428 nfs42_files_from_same_server(src, dst)) { 429 err = -EOPNOTSUPP; 430 break; 431 } else if (err == -EAGAIN) { 432 if (!restart) { 433 dst_exception.retry = 1; 434 continue; 435 } 436 break; 437 } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) { 438 args.sync = true; 439 dst_exception.retry = 1; 440 continue; 441 } else if ((err == -ESTALE || 442 err == -NFS4ERR_OFFLOAD_DENIED || 443 err == -ENOTSUPP) && 444 !nfs42_files_from_same_server(src, dst)) { 445 nfs42_do_offload_cancel_async(src, &args.src_stateid); 446 err = -EOPNOTSUPP; 447 break; 448 } 449 450 err2 = nfs4_handle_exception(server, err, &src_exception); 451 err = nfs4_handle_exception(server, err, &dst_exception); 452 if (!err) 453 err = err2; 454 } while (src_exception.retry || dst_exception.retry); 455 456 nfs_put_lock_context(dst_lock); 457 out_put_src_lock: 458 nfs_put_lock_context(src_lock); 459 return err; 460 } 461 462 struct nfs42_offloadcancel_data { 463 struct nfs_server *seq_server; 464 struct nfs42_offload_status_args args; 465 struct nfs42_offload_status_res res; 466 }; 467 468 static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata) 469 { 470 struct nfs42_offloadcancel_data *data = calldata; 471 472 nfs4_setup_sequence(data->seq_server->nfs_client, 473 &data->args.osa_seq_args, 474 &data->res.osr_seq_res, task); 475 } 476 477 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) 478 { 479 struct nfs42_offloadcancel_data *data = calldata; 480 481 nfs41_sequence_done(task, &data->res.osr_seq_res); 482 if (task->tk_status && 483 nfs4_async_handle_error(task, data->seq_server, NULL, 484 NULL) == -EAGAIN) 485 rpc_restart_call_prepare(task); 486 } 487 488 static void nfs42_free_offloadcancel_data(void *data) 489 { 490 kfree(data); 491 } 492 493 static const struct rpc_call_ops nfs42_offload_cancel_ops = { 494 .rpc_call_prepare = nfs42_offload_cancel_prepare, 495 .rpc_call_done = nfs42_offload_cancel_done, 496 .rpc_release = nfs42_free_offloadcancel_data, 497 }; 498 499 static int nfs42_do_offload_cancel_async(struct file *dst, 500 nfs4_stateid *stateid) 501 { 502 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 503 struct nfs42_offloadcancel_data *data = NULL; 504 struct nfs_open_context *ctx = nfs_file_open_context(dst); 505 struct rpc_task *task; 506 struct rpc_message msg = { 507 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL], 508 .rpc_cred = ctx->cred, 509 }; 510 struct rpc_task_setup task_setup_data = { 511 .rpc_client = dst_server->client, 512 .rpc_message = &msg, 513 .callback_ops = &nfs42_offload_cancel_ops, 514 .workqueue = nfsiod_workqueue, 515 .flags = RPC_TASK_ASYNC, 516 }; 517 int status; 518 519 if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL)) 520 return -EOPNOTSUPP; 521 522 data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS); 523 if (data == NULL) 524 return -ENOMEM; 525 526 data->seq_server = dst_server; 527 data->args.osa_src_fh = NFS_FH(file_inode(dst)); 528 memcpy(&data->args.osa_stateid, stateid, 529 sizeof(data->args.osa_stateid)); 530 msg.rpc_argp = &data->args; 531 msg.rpc_resp = &data->res; 532 task_setup_data.callback_data = data; 533 nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res, 534 1, 0); 535 task = rpc_run_task(&task_setup_data); 536 if (IS_ERR(task)) 537 return PTR_ERR(task); 538 status = rpc_wait_for_completion_task(task); 539 if (status == -ENOTSUPP) 540 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL; 541 rpc_put_task(task); 542 return status; 543 } 544 545 static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, 546 struct nfs42_copy_notify_args *args, 547 struct nfs42_copy_notify_res *res) 548 { 549 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 550 struct rpc_message msg = { 551 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY], 552 .rpc_argp = args, 553 .rpc_resp = res, 554 }; 555 int status; 556 struct nfs_open_context *ctx; 557 struct nfs_lock_context *l_ctx; 558 559 ctx = get_nfs_open_context(nfs_file_open_context(src)); 560 l_ctx = nfs_get_lock_context(ctx); 561 if (IS_ERR(l_ctx)) 562 return PTR_ERR(l_ctx); 563 564 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx, 565 FMODE_READ); 566 nfs_put_lock_context(l_ctx); 567 if (status) { 568 if (status == -EAGAIN) 569 status = -NFS4ERR_BAD_STATEID; 570 return status; 571 } 572 573 status = nfs4_call_sync(src_server->client, src_server, &msg, 574 &args->cna_seq_args, &res->cnr_seq_res, 0); 575 if (status == -ENOTSUPP) 576 src_server->caps &= ~NFS_CAP_COPY_NOTIFY; 577 578 put_nfs_open_context(nfs_file_open_context(src)); 579 return status; 580 } 581 582 int nfs42_proc_copy_notify(struct file *src, struct file *dst, 583 struct nfs42_copy_notify_res *res) 584 { 585 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 586 struct nfs42_copy_notify_args *args; 587 struct nfs4_exception exception = { 588 .inode = file_inode(src), 589 }; 590 int status; 591 592 if (!(src_server->caps & NFS_CAP_COPY_NOTIFY)) 593 return -EOPNOTSUPP; 594 595 args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_NOFS); 596 if (args == NULL) 597 return -ENOMEM; 598 599 args->cna_src_fh = NFS_FH(file_inode(src)), 600 args->cna_dst.nl4_type = NL4_NETADDR; 601 nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr); 602 exception.stateid = &args->cna_src_stateid; 603 604 do { 605 status = _nfs42_proc_copy_notify(src, dst, args, res); 606 if (status == -ENOTSUPP) { 607 status = -EOPNOTSUPP; 608 goto out; 609 } 610 status = nfs4_handle_exception(src_server, status, &exception); 611 } while (exception.retry); 612 613 out: 614 kfree(args); 615 return status; 616 } 617 618 static loff_t _nfs42_proc_llseek(struct file *filep, 619 struct nfs_lock_context *lock, loff_t offset, int whence) 620 { 621 struct inode *inode = file_inode(filep); 622 struct nfs42_seek_args args = { 623 .sa_fh = NFS_FH(inode), 624 .sa_offset = offset, 625 .sa_what = (whence == SEEK_HOLE) ? 626 NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA, 627 }; 628 struct nfs42_seek_res res; 629 struct rpc_message msg = { 630 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK], 631 .rpc_argp = &args, 632 .rpc_resp = &res, 633 }; 634 struct nfs_server *server = NFS_SERVER(inode); 635 int status; 636 637 if (!nfs_server_capable(inode, NFS_CAP_SEEK)) 638 return -ENOTSUPP; 639 640 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context, 641 lock, FMODE_READ); 642 if (status) { 643 if (status == -EAGAIN) 644 status = -NFS4ERR_BAD_STATEID; 645 return status; 646 } 647 648 status = nfs_filemap_write_and_wait_range(inode->i_mapping, 649 offset, LLONG_MAX); 650 if (status) 651 return status; 652 653 status = nfs4_call_sync(server->client, server, &msg, 654 &args.seq_args, &res.seq_res, 0); 655 if (status == -ENOTSUPP) 656 server->caps &= ~NFS_CAP_SEEK; 657 if (status) 658 return status; 659 660 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); 661 } 662 663 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 664 { 665 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 666 struct nfs4_exception exception = { }; 667 struct nfs_lock_context *lock; 668 loff_t err; 669 670 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 671 if (IS_ERR(lock)) 672 return PTR_ERR(lock); 673 674 exception.inode = file_inode(filep); 675 exception.state = lock->open_context->state; 676 677 do { 678 err = _nfs42_proc_llseek(filep, lock, offset, whence); 679 if (err >= 0) 680 break; 681 if (err == -ENOTSUPP) { 682 err = -EOPNOTSUPP; 683 break; 684 } 685 err = nfs4_handle_exception(server, err, &exception); 686 } while (exception.retry); 687 688 nfs_put_lock_context(lock); 689 return err; 690 } 691 692 693 static void 694 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 695 { 696 struct nfs42_layoutstat_data *data = calldata; 697 struct inode *inode = data->inode; 698 struct nfs_server *server = NFS_SERVER(inode); 699 struct pnfs_layout_hdr *lo; 700 701 spin_lock(&inode->i_lock); 702 lo = NFS_I(inode)->layout; 703 if (!pnfs_layout_is_valid(lo)) { 704 spin_unlock(&inode->i_lock); 705 rpc_exit(task, 0); 706 return; 707 } 708 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid); 709 spin_unlock(&inode->i_lock); 710 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 711 &data->res.seq_res, task); 712 } 713 714 static void 715 nfs42_layoutstat_done(struct rpc_task *task, void *calldata) 716 { 717 struct nfs42_layoutstat_data *data = calldata; 718 struct inode *inode = data->inode; 719 struct pnfs_layout_hdr *lo; 720 721 if (!nfs4_sequence_done(task, &data->res.seq_res)) 722 return; 723 724 switch (task->tk_status) { 725 case 0: 726 return; 727 case -NFS4ERR_BADHANDLE: 728 case -ESTALE: 729 pnfs_destroy_layout(NFS_I(inode)); 730 break; 731 case -NFS4ERR_EXPIRED: 732 case -NFS4ERR_ADMIN_REVOKED: 733 case -NFS4ERR_DELEG_REVOKED: 734 case -NFS4ERR_STALE_STATEID: 735 case -NFS4ERR_BAD_STATEID: 736 spin_lock(&inode->i_lock); 737 lo = NFS_I(inode)->layout; 738 if (pnfs_layout_is_valid(lo) && 739 nfs4_stateid_match(&data->args.stateid, 740 &lo->plh_stateid)) { 741 LIST_HEAD(head); 742 743 /* 744 * Mark the bad layout state as invalid, then retry 745 * with the current stateid. 746 */ 747 pnfs_mark_layout_stateid_invalid(lo, &head); 748 spin_unlock(&inode->i_lock); 749 pnfs_free_lseg_list(&head); 750 nfs_commit_inode(inode, 0); 751 } else 752 spin_unlock(&inode->i_lock); 753 break; 754 case -NFS4ERR_OLD_STATEID: 755 spin_lock(&inode->i_lock); 756 lo = NFS_I(inode)->layout; 757 if (pnfs_layout_is_valid(lo) && 758 nfs4_stateid_match_other(&data->args.stateid, 759 &lo->plh_stateid)) { 760 /* Do we need to delay before resending? */ 761 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 762 &data->args.stateid)) 763 rpc_delay(task, HZ); 764 rpc_restart_call_prepare(task); 765 } 766 spin_unlock(&inode->i_lock); 767 break; 768 case -ENOTSUPP: 769 case -EOPNOTSUPP: 770 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS; 771 } 772 773 trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status); 774 } 775 776 static void 777 nfs42_layoutstat_release(void *calldata) 778 { 779 struct nfs42_layoutstat_data *data = calldata; 780 struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo; 781 int i; 782 783 for (i = 0; i < data->args.num_dev; i++) { 784 if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free) 785 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private); 786 } 787 788 pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout); 789 smp_mb__before_atomic(); 790 clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags); 791 smp_mb__after_atomic(); 792 nfs_iput_and_deactive(data->inode); 793 kfree(data->args.devinfo); 794 kfree(data); 795 } 796 797 static const struct rpc_call_ops nfs42_layoutstat_ops = { 798 .rpc_call_prepare = nfs42_layoutstat_prepare, 799 .rpc_call_done = nfs42_layoutstat_done, 800 .rpc_release = nfs42_layoutstat_release, 801 }; 802 803 int nfs42_proc_layoutstats_generic(struct nfs_server *server, 804 struct nfs42_layoutstat_data *data) 805 { 806 struct rpc_message msg = { 807 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS], 808 .rpc_argp = &data->args, 809 .rpc_resp = &data->res, 810 }; 811 struct rpc_task_setup task_setup = { 812 .rpc_client = server->client, 813 .rpc_message = &msg, 814 .callback_ops = &nfs42_layoutstat_ops, 815 .callback_data = data, 816 .flags = RPC_TASK_ASYNC, 817 }; 818 struct rpc_task *task; 819 820 data->inode = nfs_igrab_and_active(data->args.inode); 821 if (!data->inode) { 822 nfs42_layoutstat_release(data); 823 return -EAGAIN; 824 } 825 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 826 task = rpc_run_task(&task_setup); 827 if (IS_ERR(task)) 828 return PTR_ERR(task); 829 rpc_put_task(task); 830 return 0; 831 } 832 833 static struct nfs42_layouterror_data * 834 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags) 835 { 836 struct nfs42_layouterror_data *data; 837 struct inode *inode = lseg->pls_layout->plh_inode; 838 839 data = kzalloc(sizeof(*data), gfp_flags); 840 if (data) { 841 data->args.inode = data->inode = nfs_igrab_and_active(inode); 842 if (data->inode) { 843 data->lseg = pnfs_get_lseg(lseg); 844 if (data->lseg) 845 return data; 846 nfs_iput_and_deactive(data->inode); 847 } 848 kfree(data); 849 } 850 return NULL; 851 } 852 853 static void 854 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data) 855 { 856 pnfs_put_lseg(data->lseg); 857 nfs_iput_and_deactive(data->inode); 858 kfree(data); 859 } 860 861 static void 862 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata) 863 { 864 struct nfs42_layouterror_data *data = calldata; 865 struct inode *inode = data->inode; 866 struct nfs_server *server = NFS_SERVER(inode); 867 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 868 unsigned i; 869 870 spin_lock(&inode->i_lock); 871 if (!pnfs_layout_is_valid(lo)) { 872 spin_unlock(&inode->i_lock); 873 rpc_exit(task, 0); 874 return; 875 } 876 for (i = 0; i < data->args.num_errors; i++) 877 nfs4_stateid_copy(&data->args.errors[i].stateid, 878 &lo->plh_stateid); 879 spin_unlock(&inode->i_lock); 880 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 881 &data->res.seq_res, task); 882 } 883 884 static void 885 nfs42_layouterror_done(struct rpc_task *task, void *calldata) 886 { 887 struct nfs42_layouterror_data *data = calldata; 888 struct inode *inode = data->inode; 889 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 890 891 if (!nfs4_sequence_done(task, &data->res.seq_res)) 892 return; 893 894 switch (task->tk_status) { 895 case 0: 896 return; 897 case -NFS4ERR_BADHANDLE: 898 case -ESTALE: 899 pnfs_destroy_layout(NFS_I(inode)); 900 break; 901 case -NFS4ERR_EXPIRED: 902 case -NFS4ERR_ADMIN_REVOKED: 903 case -NFS4ERR_DELEG_REVOKED: 904 case -NFS4ERR_STALE_STATEID: 905 case -NFS4ERR_BAD_STATEID: 906 spin_lock(&inode->i_lock); 907 if (pnfs_layout_is_valid(lo) && 908 nfs4_stateid_match(&data->args.errors[0].stateid, 909 &lo->plh_stateid)) { 910 LIST_HEAD(head); 911 912 /* 913 * Mark the bad layout state as invalid, then retry 914 * with the current stateid. 915 */ 916 pnfs_mark_layout_stateid_invalid(lo, &head); 917 spin_unlock(&inode->i_lock); 918 pnfs_free_lseg_list(&head); 919 nfs_commit_inode(inode, 0); 920 } else 921 spin_unlock(&inode->i_lock); 922 break; 923 case -NFS4ERR_OLD_STATEID: 924 spin_lock(&inode->i_lock); 925 if (pnfs_layout_is_valid(lo) && 926 nfs4_stateid_match_other(&data->args.errors[0].stateid, 927 &lo->plh_stateid)) { 928 /* Do we need to delay before resending? */ 929 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 930 &data->args.errors[0].stateid)) 931 rpc_delay(task, HZ); 932 rpc_restart_call_prepare(task); 933 } 934 spin_unlock(&inode->i_lock); 935 break; 936 case -ENOTSUPP: 937 case -EOPNOTSUPP: 938 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR; 939 } 940 941 trace_nfs4_layouterror(inode, &data->args.errors[0].stateid, 942 task->tk_status); 943 } 944 945 static void 946 nfs42_layouterror_release(void *calldata) 947 { 948 struct nfs42_layouterror_data *data = calldata; 949 950 nfs42_free_layouterror_data(data); 951 } 952 953 static const struct rpc_call_ops nfs42_layouterror_ops = { 954 .rpc_call_prepare = nfs42_layouterror_prepare, 955 .rpc_call_done = nfs42_layouterror_done, 956 .rpc_release = nfs42_layouterror_release, 957 }; 958 959 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg, 960 const struct nfs42_layout_error *errors, size_t n) 961 { 962 struct inode *inode = lseg->pls_layout->plh_inode; 963 struct nfs42_layouterror_data *data; 964 struct rpc_task *task; 965 struct rpc_message msg = { 966 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR], 967 }; 968 struct rpc_task_setup task_setup = { 969 .rpc_message = &msg, 970 .callback_ops = &nfs42_layouterror_ops, 971 .flags = RPC_TASK_ASYNC, 972 }; 973 unsigned int i; 974 975 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR)) 976 return -EOPNOTSUPP; 977 if (n > NFS42_LAYOUTERROR_MAX) 978 return -EINVAL; 979 data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS); 980 if (!data) 981 return -ENOMEM; 982 for (i = 0; i < n; i++) { 983 data->args.errors[i] = errors[i]; 984 data->args.num_errors++; 985 data->res.num_errors++; 986 } 987 msg.rpc_argp = &data->args; 988 msg.rpc_resp = &data->res; 989 task_setup.callback_data = data; 990 task_setup.rpc_client = NFS_SERVER(inode)->client; 991 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 992 task = rpc_run_task(&task_setup); 993 if (IS_ERR(task)) 994 return PTR_ERR(task); 995 rpc_put_task(task); 996 return 0; 997 } 998 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror); 999 1000 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f, 1001 struct file *dst_f, struct nfs_lock_context *src_lock, 1002 struct nfs_lock_context *dst_lock, loff_t src_offset, 1003 loff_t dst_offset, loff_t count) 1004 { 1005 struct inode *src_inode = file_inode(src_f); 1006 struct inode *dst_inode = file_inode(dst_f); 1007 struct nfs_server *server = NFS_SERVER(dst_inode); 1008 struct nfs42_clone_args args = { 1009 .src_fh = NFS_FH(src_inode), 1010 .dst_fh = NFS_FH(dst_inode), 1011 .src_offset = src_offset, 1012 .dst_offset = dst_offset, 1013 .count = count, 1014 .dst_bitmask = server->cache_consistency_bitmask, 1015 }; 1016 struct nfs42_clone_res res = { 1017 .server = server, 1018 }; 1019 int status; 1020 1021 msg->rpc_argp = &args; 1022 msg->rpc_resp = &res; 1023 1024 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context, 1025 src_lock, FMODE_READ); 1026 if (status) { 1027 if (status == -EAGAIN) 1028 status = -NFS4ERR_BAD_STATEID; 1029 return status; 1030 } 1031 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context, 1032 dst_lock, FMODE_WRITE); 1033 if (status) { 1034 if (status == -EAGAIN) 1035 status = -NFS4ERR_BAD_STATEID; 1036 return status; 1037 } 1038 1039 res.dst_fattr = nfs_alloc_fattr(); 1040 if (!res.dst_fattr) 1041 return -ENOMEM; 1042 1043 status = nfs4_call_sync(server->client, server, msg, 1044 &args.seq_args, &res.seq_res, 0); 1045 if (status == 0) 1046 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr); 1047 1048 kfree(res.dst_fattr); 1049 return status; 1050 } 1051 1052 int nfs42_proc_clone(struct file *src_f, struct file *dst_f, 1053 loff_t src_offset, loff_t dst_offset, loff_t count) 1054 { 1055 struct rpc_message msg = { 1056 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE], 1057 }; 1058 struct inode *inode = file_inode(src_f); 1059 struct nfs_server *server = NFS_SERVER(file_inode(src_f)); 1060 struct nfs_lock_context *src_lock; 1061 struct nfs_lock_context *dst_lock; 1062 struct nfs4_exception src_exception = { }; 1063 struct nfs4_exception dst_exception = { }; 1064 int err, err2; 1065 1066 if (!nfs_server_capable(inode, NFS_CAP_CLONE)) 1067 return -EOPNOTSUPP; 1068 1069 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f)); 1070 if (IS_ERR(src_lock)) 1071 return PTR_ERR(src_lock); 1072 1073 src_exception.inode = file_inode(src_f); 1074 src_exception.state = src_lock->open_context->state; 1075 1076 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f)); 1077 if (IS_ERR(dst_lock)) { 1078 err = PTR_ERR(dst_lock); 1079 goto out_put_src_lock; 1080 } 1081 1082 dst_exception.inode = file_inode(dst_f); 1083 dst_exception.state = dst_lock->open_context->state; 1084 1085 do { 1086 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock, 1087 src_offset, dst_offset, count); 1088 if (err == -ENOTSUPP || err == -EOPNOTSUPP) { 1089 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE; 1090 err = -EOPNOTSUPP; 1091 break; 1092 } 1093 1094 err2 = nfs4_handle_exception(server, err, &src_exception); 1095 err = nfs4_handle_exception(server, err, &dst_exception); 1096 if (!err) 1097 err = err2; 1098 } while (src_exception.retry || dst_exception.retry); 1099 1100 nfs_put_lock_context(dst_lock); 1101 out_put_src_lock: 1102 nfs_put_lock_context(src_lock); 1103 return err; 1104 } 1105 1106 #define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 1107 1108 static int _nfs42_proc_removexattr(struct inode *inode, const char *name) 1109 { 1110 struct nfs_server *server = NFS_SERVER(inode); 1111 struct nfs42_removexattrargs args = { 1112 .fh = NFS_FH(inode), 1113 .xattr_name = name, 1114 }; 1115 struct nfs42_removexattrres res; 1116 struct rpc_message msg = { 1117 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR], 1118 .rpc_argp = &args, 1119 .rpc_resp = &res, 1120 }; 1121 int ret; 1122 unsigned long timestamp = jiffies; 1123 1124 ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 1125 &res.seq_res, 1); 1126 if (!ret) 1127 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1128 1129 return ret; 1130 } 1131 1132 static int _nfs42_proc_setxattr(struct inode *inode, const char *name, 1133 const void *buf, size_t buflen, int flags) 1134 { 1135 struct nfs_server *server = NFS_SERVER(inode); 1136 struct page *pages[NFS4XATTR_MAXPAGES]; 1137 struct nfs42_setxattrargs arg = { 1138 .fh = NFS_FH(inode), 1139 .xattr_pages = pages, 1140 .xattr_len = buflen, 1141 .xattr_name = name, 1142 .xattr_flags = flags, 1143 }; 1144 struct nfs42_setxattrres res; 1145 struct rpc_message msg = { 1146 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR], 1147 .rpc_argp = &arg, 1148 .rpc_resp = &res, 1149 }; 1150 int ret, np; 1151 unsigned long timestamp = jiffies; 1152 1153 if (buflen > server->sxasize) 1154 return -ERANGE; 1155 1156 if (buflen > 0) { 1157 np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages); 1158 if (np < 0) 1159 return np; 1160 } else 1161 np = 0; 1162 1163 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1164 &res.seq_res, 1); 1165 1166 for (; np > 0; np--) 1167 put_page(pages[np - 1]); 1168 1169 if (!ret) 1170 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1171 1172 return ret; 1173 } 1174 1175 static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name, 1176 void *buf, size_t buflen, struct page **pages, 1177 size_t plen) 1178 { 1179 struct nfs_server *server = NFS_SERVER(inode); 1180 struct nfs42_getxattrargs arg = { 1181 .fh = NFS_FH(inode), 1182 .xattr_name = name, 1183 }; 1184 struct nfs42_getxattrres res; 1185 struct rpc_message msg = { 1186 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR], 1187 .rpc_argp = &arg, 1188 .rpc_resp = &res, 1189 }; 1190 ssize_t ret; 1191 1192 arg.xattr_len = plen; 1193 arg.xattr_pages = pages; 1194 1195 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1196 &res.seq_res, 0); 1197 if (ret < 0) 1198 return ret; 1199 1200 /* 1201 * Normally, the caching is done one layer up, but for successful 1202 * RPCS, always cache the result here, even if the caller was 1203 * just querying the length, or if the reply was too big for 1204 * the caller. This avoids a second RPC in the case of the 1205 * common query-alloc-retrieve cycle for xattrs. 1206 * 1207 * Note that xattr_len is always capped to XATTR_SIZE_MAX. 1208 */ 1209 1210 nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len); 1211 1212 if (buflen) { 1213 if (res.xattr_len > buflen) 1214 return -ERANGE; 1215 _copy_from_pages(buf, pages, 0, res.xattr_len); 1216 } 1217 1218 return res.xattr_len; 1219 } 1220 1221 static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf, 1222 size_t buflen, u64 *cookiep, bool *eofp) 1223 { 1224 struct nfs_server *server = NFS_SERVER(inode); 1225 struct page **pages; 1226 struct nfs42_listxattrsargs arg = { 1227 .fh = NFS_FH(inode), 1228 .cookie = *cookiep, 1229 }; 1230 struct nfs42_listxattrsres res = { 1231 .eof = false, 1232 .xattr_buf = buf, 1233 .xattr_len = buflen, 1234 }; 1235 struct rpc_message msg = { 1236 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS], 1237 .rpc_argp = &arg, 1238 .rpc_resp = &res, 1239 }; 1240 u32 xdrlen; 1241 int ret, np, i; 1242 1243 1244 ret = -ENOMEM; 1245 res.scratch = alloc_page(GFP_KERNEL); 1246 if (!res.scratch) 1247 goto out; 1248 1249 xdrlen = nfs42_listxattr_xdrsize(buflen); 1250 if (xdrlen > server->lxasize) 1251 xdrlen = server->lxasize; 1252 np = xdrlen / PAGE_SIZE + 1; 1253 1254 pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL); 1255 if (!pages) 1256 goto out_free_scratch; 1257 for (i = 0; i < np; i++) { 1258 pages[i] = alloc_page(GFP_KERNEL); 1259 if (!pages[i]) 1260 goto out_free_pages; 1261 } 1262 1263 arg.xattr_pages = pages; 1264 arg.count = xdrlen; 1265 1266 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1267 &res.seq_res, 0); 1268 1269 if (ret >= 0) { 1270 ret = res.copied; 1271 *cookiep = res.cookie; 1272 *eofp = res.eof; 1273 } 1274 1275 out_free_pages: 1276 while (--np >= 0) { 1277 if (pages[np]) 1278 __free_page(pages[np]); 1279 } 1280 kfree(pages); 1281 out_free_scratch: 1282 __free_page(res.scratch); 1283 out: 1284 return ret; 1285 1286 } 1287 1288 ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, 1289 void *buf, size_t buflen) 1290 { 1291 struct nfs4_exception exception = { }; 1292 ssize_t err, np, i; 1293 struct page **pages; 1294 1295 np = nfs_page_array_len(0, buflen ?: XATTR_SIZE_MAX); 1296 pages = kmalloc_array(np, sizeof(*pages), GFP_KERNEL); 1297 if (!pages) 1298 return -ENOMEM; 1299 1300 for (i = 0; i < np; i++) { 1301 pages[i] = alloc_page(GFP_KERNEL); 1302 if (!pages[i]) { 1303 np = i + 1; 1304 err = -ENOMEM; 1305 goto out; 1306 } 1307 } 1308 1309 /* 1310 * The GETXATTR op has no length field in the call, and the 1311 * xattr data is at the end of the reply. 1312 * 1313 * There is no downside in using the page-aligned length. It will 1314 * allow receiving and caching xattrs that are too large for the 1315 * caller but still fit in the page-rounded value. 1316 */ 1317 do { 1318 err = _nfs42_proc_getxattr(inode, name, buf, buflen, 1319 pages, np * PAGE_SIZE); 1320 if (err >= 0) 1321 break; 1322 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1323 &exception); 1324 } while (exception.retry); 1325 1326 out: 1327 while (--np >= 0) 1328 __free_page(pages[np]); 1329 kfree(pages); 1330 1331 return err; 1332 } 1333 1334 int nfs42_proc_setxattr(struct inode *inode, const char *name, 1335 const void *buf, size_t buflen, int flags) 1336 { 1337 struct nfs4_exception exception = { }; 1338 int err; 1339 1340 do { 1341 err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags); 1342 if (!err) 1343 break; 1344 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1345 &exception); 1346 } while (exception.retry); 1347 1348 return err; 1349 } 1350 1351 ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf, 1352 size_t buflen, u64 *cookiep, bool *eofp) 1353 { 1354 struct nfs4_exception exception = { }; 1355 ssize_t err; 1356 1357 do { 1358 err = _nfs42_proc_listxattrs(inode, buf, buflen, 1359 cookiep, eofp); 1360 if (err >= 0) 1361 break; 1362 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1363 &exception); 1364 } while (exception.retry); 1365 1366 return err; 1367 } 1368 1369 int nfs42_proc_removexattr(struct inode *inode, const char *name) 1370 { 1371 struct nfs4_exception exception = { }; 1372 int err; 1373 1374 do { 1375 err = _nfs42_proc_removexattr(inode, name); 1376 if (!err) 1377 break; 1378 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1379 &exception); 1380 } while (exception.retry); 1381 1382 return err; 1383 } 1384