1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com> 4 */ 5 #include <linux/fs.h> 6 #include <linux/sunrpc/addr.h> 7 #include <linux/sunrpc/sched.h> 8 #include <linux/nfs.h> 9 #include <linux/nfs3.h> 10 #include <linux/nfs4.h> 11 #include <linux/nfs_xdr.h> 12 #include <linux/nfs_fs.h> 13 #include "nfs4_fs.h" 14 #include "nfs42.h" 15 #include "iostat.h" 16 #include "pnfs.h" 17 #include "nfs4session.h" 18 #include "internal.h" 19 #include "delegation.h" 20 #include "nfs4trace.h" 21 22 #define NFSDBG_FACILITY NFSDBG_PROC 23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std); 24 25 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr) 26 { 27 struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client; 28 unsigned short port = 2049; 29 30 rcu_read_lock(); 31 naddr->netid_len = scnprintf(naddr->netid, 32 sizeof(naddr->netid), "%s", 33 rpc_peeraddr2str(clp->cl_rpcclient, 34 RPC_DISPLAY_NETID)); 35 naddr->addr_len = scnprintf(naddr->addr, 36 sizeof(naddr->addr), 37 "%s.%u.%u", 38 rpc_peeraddr2str(clp->cl_rpcclient, 39 RPC_DISPLAY_ADDR), 40 port >> 8, port & 255); 41 rcu_read_unlock(); 42 } 43 44 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 45 struct nfs_lock_context *lock, loff_t offset, loff_t len) 46 { 47 struct inode *inode = file_inode(filep); 48 struct nfs_server *server = NFS_SERVER(inode); 49 u32 bitmask[3]; 50 struct nfs42_falloc_args args = { 51 .falloc_fh = NFS_FH(inode), 52 .falloc_offset = offset, 53 .falloc_length = len, 54 .falloc_bitmask = bitmask, 55 }; 56 struct nfs42_falloc_res res = { 57 .falloc_server = server, 58 }; 59 int status; 60 61 msg->rpc_argp = &args; 62 msg->rpc_resp = &res; 63 64 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context, 65 lock, FMODE_WRITE); 66 if (status) { 67 if (status == -EAGAIN) 68 status = -NFS4ERR_BAD_STATEID; 69 return status; 70 } 71 72 memcpy(bitmask, server->cache_consistency_bitmask, sizeof(bitmask)); 73 if (server->attr_bitmask[1] & FATTR4_WORD1_SPACE_USED) 74 bitmask[1] |= FATTR4_WORD1_SPACE_USED; 75 76 res.falloc_fattr = nfs_alloc_fattr(); 77 if (!res.falloc_fattr) 78 return -ENOMEM; 79 80 status = nfs4_call_sync(server->client, server, msg, 81 &args.seq_args, &res.seq_res, 0); 82 if (status == 0) 83 status = nfs_post_op_update_inode_force_wcc(inode, 84 res.falloc_fattr); 85 86 kfree(res.falloc_fattr); 87 return status; 88 } 89 90 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 91 loff_t offset, loff_t len) 92 { 93 struct inode *inode = file_inode(filep); 94 struct nfs_server *server = NFS_SERVER(inode); 95 struct nfs4_exception exception = { }; 96 struct nfs_lock_context *lock; 97 int err; 98 99 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 100 if (IS_ERR(lock)) 101 return PTR_ERR(lock); 102 103 exception.inode = inode; 104 exception.state = lock->open_context->state; 105 106 err = nfs_sync_inode(inode); 107 if (err) 108 goto out; 109 110 do { 111 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len); 112 if (err == -ENOTSUPP) { 113 err = -EOPNOTSUPP; 114 break; 115 } 116 err = nfs4_handle_exception(server, err, &exception); 117 } while (exception.retry); 118 out: 119 nfs_put_lock_context(lock); 120 return err; 121 } 122 123 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len) 124 { 125 struct rpc_message msg = { 126 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE], 127 }; 128 struct inode *inode = file_inode(filep); 129 int err; 130 131 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE)) 132 return -EOPNOTSUPP; 133 134 inode_lock(inode); 135 136 err = nfs42_proc_fallocate(&msg, filep, offset, len); 137 if (err == -EOPNOTSUPP) 138 NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE; 139 140 inode_unlock(inode); 141 return err; 142 } 143 144 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) 145 { 146 struct rpc_message msg = { 147 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE], 148 }; 149 struct inode *inode = file_inode(filep); 150 int err; 151 152 if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE)) 153 return -EOPNOTSUPP; 154 155 inode_lock(inode); 156 157 err = nfs42_proc_fallocate(&msg, filep, offset, len); 158 if (err == 0) 159 truncate_pagecache_range(inode, offset, (offset + len) -1); 160 if (err == -EOPNOTSUPP) 161 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE; 162 163 inode_unlock(inode); 164 return err; 165 } 166 167 static int handle_async_copy(struct nfs42_copy_res *res, 168 struct nfs_server *dst_server, 169 struct nfs_server *src_server, 170 struct file *src, 171 struct file *dst, 172 nfs4_stateid *src_stateid, 173 bool *restart) 174 { 175 struct nfs4_copy_state *copy, *tmp_copy; 176 int status = NFS4_OK; 177 bool found_pending = false; 178 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst); 179 struct nfs_open_context *src_ctx = nfs_file_open_context(src); 180 181 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS); 182 if (!copy) 183 return -ENOMEM; 184 185 spin_lock(&dst_server->nfs_client->cl_lock); 186 list_for_each_entry(tmp_copy, 187 &dst_server->nfs_client->pending_cb_stateids, 188 copies) { 189 if (memcmp(&res->write_res.stateid, &tmp_copy->stateid, 190 NFS4_STATEID_SIZE)) 191 continue; 192 found_pending = true; 193 list_del(&tmp_copy->copies); 194 break; 195 } 196 if (found_pending) { 197 spin_unlock(&dst_server->nfs_client->cl_lock); 198 kfree(copy); 199 copy = tmp_copy; 200 goto out; 201 } 202 203 memcpy(©->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE); 204 init_completion(©->completion); 205 copy->parent_dst_state = dst_ctx->state; 206 copy->parent_src_state = src_ctx->state; 207 208 list_add_tail(©->copies, &dst_server->ss_copies); 209 spin_unlock(&dst_server->nfs_client->cl_lock); 210 211 if (dst_server != src_server) { 212 spin_lock(&src_server->nfs_client->cl_lock); 213 list_add_tail(©->src_copies, &src_server->ss_copies); 214 spin_unlock(&src_server->nfs_client->cl_lock); 215 } 216 217 status = wait_for_completion_interruptible(©->completion); 218 spin_lock(&dst_server->nfs_client->cl_lock); 219 list_del_init(©->copies); 220 spin_unlock(&dst_server->nfs_client->cl_lock); 221 if (dst_server != src_server) { 222 spin_lock(&src_server->nfs_client->cl_lock); 223 list_del_init(©->src_copies); 224 spin_unlock(&src_server->nfs_client->cl_lock); 225 } 226 if (status == -ERESTARTSYS) { 227 goto out_cancel; 228 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) { 229 status = -EAGAIN; 230 *restart = true; 231 goto out_cancel; 232 } 233 out: 234 res->write_res.count = copy->count; 235 memcpy(&res->write_res.verifier, ©->verf, sizeof(copy->verf)); 236 status = -copy->error; 237 238 out_free: 239 kfree(copy); 240 return status; 241 out_cancel: 242 nfs42_do_offload_cancel_async(dst, ©->stateid); 243 if (!nfs42_files_from_same_server(src, dst)) 244 nfs42_do_offload_cancel_async(src, src_stateid); 245 goto out_free; 246 } 247 248 static int process_copy_commit(struct file *dst, loff_t pos_dst, 249 struct nfs42_copy_res *res) 250 { 251 struct nfs_commitres cres; 252 int status = -ENOMEM; 253 254 cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 255 if (!cres.verf) 256 goto out; 257 258 status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres); 259 if (status) 260 goto out_free; 261 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 262 &cres.verf->verifier)) { 263 dprintk("commit verf differs from copy verf\n"); 264 status = -EAGAIN; 265 } 266 out_free: 267 kfree(cres.verf); 268 out: 269 return status; 270 } 271 272 /** 273 * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload 274 * @inode: pointer to destination inode 275 * @pos: destination offset 276 * @len: copy length 277 * 278 * Punch a hole in the inode page cache, so that the NFS client will 279 * know to retrieve new data. 280 * Update the file size if necessary, and then mark the inode as having 281 * invalid cached values for change attribute, ctime, mtime and space used. 282 */ 283 static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len) 284 { 285 loff_t newsize = pos + len; 286 loff_t end = newsize - 1; 287 288 truncate_pagecache_range(inode, pos, end); 289 spin_lock(&inode->i_lock); 290 if (newsize > i_size_read(inode)) 291 i_size_write(inode, newsize); 292 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 293 NFS_INO_INVALID_CTIME | 294 NFS_INO_INVALID_MTIME | 295 NFS_INO_INVALID_BLOCKS); 296 spin_unlock(&inode->i_lock); 297 } 298 299 static ssize_t _nfs42_proc_copy(struct file *src, 300 struct nfs_lock_context *src_lock, 301 struct file *dst, 302 struct nfs_lock_context *dst_lock, 303 struct nfs42_copy_args *args, 304 struct nfs42_copy_res *res, 305 struct nl4_server *nss, 306 nfs4_stateid *cnr_stateid, 307 bool *restart) 308 { 309 struct rpc_message msg = { 310 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY], 311 .rpc_argp = args, 312 .rpc_resp = res, 313 }; 314 struct inode *dst_inode = file_inode(dst); 315 struct inode *src_inode = file_inode(src); 316 struct nfs_server *dst_server = NFS_SERVER(dst_inode); 317 struct nfs_server *src_server = NFS_SERVER(src_inode); 318 loff_t pos_src = args->src_pos; 319 loff_t pos_dst = args->dst_pos; 320 size_t count = args->count; 321 ssize_t status; 322 323 if (nss) { 324 args->cp_src = nss; 325 nfs4_stateid_copy(&args->src_stateid, cnr_stateid); 326 } else { 327 status = nfs4_set_rw_stateid(&args->src_stateid, 328 src_lock->open_context, src_lock, FMODE_READ); 329 if (status) { 330 if (status == -EAGAIN) 331 status = -NFS4ERR_BAD_STATEID; 332 return status; 333 } 334 } 335 status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping, 336 pos_src, pos_src + (loff_t)count - 1); 337 if (status) 338 return status; 339 340 status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context, 341 dst_lock, FMODE_WRITE); 342 if (status) { 343 if (status == -EAGAIN) 344 status = -NFS4ERR_BAD_STATEID; 345 return status; 346 } 347 348 status = nfs_sync_inode(dst_inode); 349 if (status) 350 return status; 351 352 res->commit_res.verf = NULL; 353 if (args->sync) { 354 res->commit_res.verf = 355 kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 356 if (!res->commit_res.verf) 357 return -ENOMEM; 358 } 359 set_bit(NFS_CLNT_SRC_SSC_COPY_STATE, 360 &src_lock->open_context->state->flags); 361 set_bit(NFS_CLNT_DST_SSC_COPY_STATE, 362 &dst_lock->open_context->state->flags); 363 364 status = nfs4_call_sync(dst_server->client, dst_server, &msg, 365 &args->seq_args, &res->seq_res, 0); 366 if (status == -ENOTSUPP) 367 dst_server->caps &= ~NFS_CAP_COPY; 368 if (status) 369 goto out; 370 371 if (args->sync && 372 nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 373 &res->commit_res.verf->verifier)) { 374 status = -EAGAIN; 375 goto out; 376 } 377 378 if (!res->synchronous) { 379 status = handle_async_copy(res, dst_server, src_server, src, 380 dst, &args->src_stateid, restart); 381 if (status) 382 goto out; 383 } 384 385 if ((!res->synchronous || !args->sync) && 386 res->write_res.verifier.committed != NFS_FILE_SYNC) { 387 status = process_copy_commit(dst, pos_dst, res); 388 if (status) 389 goto out; 390 } 391 392 nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count); 393 nfs_invalidate_atime(src_inode); 394 status = res->write_res.count; 395 out: 396 if (args->sync) 397 kfree(res->commit_res.verf); 398 return status; 399 } 400 401 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, 402 struct file *dst, loff_t pos_dst, size_t count, 403 struct nl4_server *nss, 404 nfs4_stateid *cnr_stateid, bool sync) 405 { 406 struct nfs_server *server = NFS_SERVER(file_inode(dst)); 407 struct nfs_lock_context *src_lock; 408 struct nfs_lock_context *dst_lock; 409 struct nfs42_copy_args args = { 410 .src_fh = NFS_FH(file_inode(src)), 411 .src_pos = pos_src, 412 .dst_fh = NFS_FH(file_inode(dst)), 413 .dst_pos = pos_dst, 414 .count = count, 415 .sync = sync, 416 }; 417 struct nfs42_copy_res res; 418 struct nfs4_exception src_exception = { 419 .inode = file_inode(src), 420 .stateid = &args.src_stateid, 421 }; 422 struct nfs4_exception dst_exception = { 423 .inode = file_inode(dst), 424 .stateid = &args.dst_stateid, 425 }; 426 ssize_t err, err2; 427 bool restart = false; 428 429 src_lock = nfs_get_lock_context(nfs_file_open_context(src)); 430 if (IS_ERR(src_lock)) 431 return PTR_ERR(src_lock); 432 433 src_exception.state = src_lock->open_context->state; 434 435 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst)); 436 if (IS_ERR(dst_lock)) { 437 err = PTR_ERR(dst_lock); 438 goto out_put_src_lock; 439 } 440 441 dst_exception.state = dst_lock->open_context->state; 442 443 do { 444 inode_lock(file_inode(dst)); 445 err = _nfs42_proc_copy(src, src_lock, 446 dst, dst_lock, 447 &args, &res, 448 nss, cnr_stateid, &restart); 449 inode_unlock(file_inode(dst)); 450 451 if (err >= 0) 452 break; 453 if (err == -ENOTSUPP && 454 nfs42_files_from_same_server(src, dst)) { 455 err = -EOPNOTSUPP; 456 break; 457 } else if (err == -EAGAIN) { 458 if (!restart) { 459 dst_exception.retry = 1; 460 continue; 461 } 462 break; 463 } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) { 464 args.sync = true; 465 dst_exception.retry = 1; 466 continue; 467 } else if ((err == -ESTALE || 468 err == -NFS4ERR_OFFLOAD_DENIED || 469 err == -ENOTSUPP) && 470 !nfs42_files_from_same_server(src, dst)) { 471 nfs42_do_offload_cancel_async(src, &args.src_stateid); 472 err = -EOPNOTSUPP; 473 break; 474 } 475 476 err2 = nfs4_handle_exception(server, err, &src_exception); 477 err = nfs4_handle_exception(server, err, &dst_exception); 478 if (!err) 479 err = err2; 480 } while (src_exception.retry || dst_exception.retry); 481 482 nfs_put_lock_context(dst_lock); 483 out_put_src_lock: 484 nfs_put_lock_context(src_lock); 485 return err; 486 } 487 488 struct nfs42_offloadcancel_data { 489 struct nfs_server *seq_server; 490 struct nfs42_offload_status_args args; 491 struct nfs42_offload_status_res res; 492 }; 493 494 static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata) 495 { 496 struct nfs42_offloadcancel_data *data = calldata; 497 498 nfs4_setup_sequence(data->seq_server->nfs_client, 499 &data->args.osa_seq_args, 500 &data->res.osr_seq_res, task); 501 } 502 503 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) 504 { 505 struct nfs42_offloadcancel_data *data = calldata; 506 507 nfs41_sequence_done(task, &data->res.osr_seq_res); 508 if (task->tk_status && 509 nfs4_async_handle_error(task, data->seq_server, NULL, 510 NULL) == -EAGAIN) 511 rpc_restart_call_prepare(task); 512 } 513 514 static void nfs42_free_offloadcancel_data(void *data) 515 { 516 kfree(data); 517 } 518 519 static const struct rpc_call_ops nfs42_offload_cancel_ops = { 520 .rpc_call_prepare = nfs42_offload_cancel_prepare, 521 .rpc_call_done = nfs42_offload_cancel_done, 522 .rpc_release = nfs42_free_offloadcancel_data, 523 }; 524 525 static int nfs42_do_offload_cancel_async(struct file *dst, 526 nfs4_stateid *stateid) 527 { 528 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 529 struct nfs42_offloadcancel_data *data = NULL; 530 struct nfs_open_context *ctx = nfs_file_open_context(dst); 531 struct rpc_task *task; 532 struct rpc_message msg = { 533 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL], 534 .rpc_cred = ctx->cred, 535 }; 536 struct rpc_task_setup task_setup_data = { 537 .rpc_client = dst_server->client, 538 .rpc_message = &msg, 539 .callback_ops = &nfs42_offload_cancel_ops, 540 .workqueue = nfsiod_workqueue, 541 .flags = RPC_TASK_ASYNC, 542 }; 543 int status; 544 545 if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL)) 546 return -EOPNOTSUPP; 547 548 data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS); 549 if (data == NULL) 550 return -ENOMEM; 551 552 data->seq_server = dst_server; 553 data->args.osa_src_fh = NFS_FH(file_inode(dst)); 554 memcpy(&data->args.osa_stateid, stateid, 555 sizeof(data->args.osa_stateid)); 556 msg.rpc_argp = &data->args; 557 msg.rpc_resp = &data->res; 558 task_setup_data.callback_data = data; 559 nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res, 560 1, 0); 561 task = rpc_run_task(&task_setup_data); 562 if (IS_ERR(task)) 563 return PTR_ERR(task); 564 status = rpc_wait_for_completion_task(task); 565 if (status == -ENOTSUPP) 566 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL; 567 rpc_put_task(task); 568 return status; 569 } 570 571 static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, 572 struct nfs42_copy_notify_args *args, 573 struct nfs42_copy_notify_res *res) 574 { 575 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 576 struct rpc_message msg = { 577 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY], 578 .rpc_argp = args, 579 .rpc_resp = res, 580 }; 581 int status; 582 struct nfs_open_context *ctx; 583 struct nfs_lock_context *l_ctx; 584 585 ctx = get_nfs_open_context(nfs_file_open_context(src)); 586 l_ctx = nfs_get_lock_context(ctx); 587 if (IS_ERR(l_ctx)) 588 return PTR_ERR(l_ctx); 589 590 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx, 591 FMODE_READ); 592 nfs_put_lock_context(l_ctx); 593 if (status) { 594 if (status == -EAGAIN) 595 status = -NFS4ERR_BAD_STATEID; 596 return status; 597 } 598 599 status = nfs4_call_sync(src_server->client, src_server, &msg, 600 &args->cna_seq_args, &res->cnr_seq_res, 0); 601 if (status == -ENOTSUPP) 602 src_server->caps &= ~NFS_CAP_COPY_NOTIFY; 603 604 put_nfs_open_context(nfs_file_open_context(src)); 605 return status; 606 } 607 608 int nfs42_proc_copy_notify(struct file *src, struct file *dst, 609 struct nfs42_copy_notify_res *res) 610 { 611 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 612 struct nfs42_copy_notify_args *args; 613 struct nfs4_exception exception = { 614 .inode = file_inode(src), 615 }; 616 int status; 617 618 if (!(src_server->caps & NFS_CAP_COPY_NOTIFY)) 619 return -EOPNOTSUPP; 620 621 args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_NOFS); 622 if (args == NULL) 623 return -ENOMEM; 624 625 args->cna_src_fh = NFS_FH(file_inode(src)), 626 args->cna_dst.nl4_type = NL4_NETADDR; 627 nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr); 628 exception.stateid = &args->cna_src_stateid; 629 630 do { 631 status = _nfs42_proc_copy_notify(src, dst, args, res); 632 if (status == -ENOTSUPP) { 633 status = -EOPNOTSUPP; 634 goto out; 635 } 636 status = nfs4_handle_exception(src_server, status, &exception); 637 } while (exception.retry); 638 639 out: 640 kfree(args); 641 return status; 642 } 643 644 static loff_t _nfs42_proc_llseek(struct file *filep, 645 struct nfs_lock_context *lock, loff_t offset, int whence) 646 { 647 struct inode *inode = file_inode(filep); 648 struct nfs42_seek_args args = { 649 .sa_fh = NFS_FH(inode), 650 .sa_offset = offset, 651 .sa_what = (whence == SEEK_HOLE) ? 652 NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA, 653 }; 654 struct nfs42_seek_res res; 655 struct rpc_message msg = { 656 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK], 657 .rpc_argp = &args, 658 .rpc_resp = &res, 659 }; 660 struct nfs_server *server = NFS_SERVER(inode); 661 int status; 662 663 if (!nfs_server_capable(inode, NFS_CAP_SEEK)) 664 return -ENOTSUPP; 665 666 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context, 667 lock, FMODE_READ); 668 if (status) { 669 if (status == -EAGAIN) 670 status = -NFS4ERR_BAD_STATEID; 671 return status; 672 } 673 674 status = nfs_filemap_write_and_wait_range(inode->i_mapping, 675 offset, LLONG_MAX); 676 if (status) 677 return status; 678 679 status = nfs4_call_sync(server->client, server, &msg, 680 &args.seq_args, &res.seq_res, 0); 681 if (status == -ENOTSUPP) 682 server->caps &= ~NFS_CAP_SEEK; 683 if (status) 684 return status; 685 686 if (whence == SEEK_DATA && res.sr_eof) 687 return -NFS4ERR_NXIO; 688 else 689 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); 690 } 691 692 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 693 { 694 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 695 struct nfs4_exception exception = { }; 696 struct nfs_lock_context *lock; 697 loff_t err; 698 699 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 700 if (IS_ERR(lock)) 701 return PTR_ERR(lock); 702 703 exception.inode = file_inode(filep); 704 exception.state = lock->open_context->state; 705 706 do { 707 err = _nfs42_proc_llseek(filep, lock, offset, whence); 708 if (err >= 0) 709 break; 710 if (err == -ENOTSUPP) { 711 err = -EOPNOTSUPP; 712 break; 713 } 714 err = nfs4_handle_exception(server, err, &exception); 715 } while (exception.retry); 716 717 nfs_put_lock_context(lock); 718 return err; 719 } 720 721 722 static void 723 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 724 { 725 struct nfs42_layoutstat_data *data = calldata; 726 struct inode *inode = data->inode; 727 struct nfs_server *server = NFS_SERVER(inode); 728 struct pnfs_layout_hdr *lo; 729 730 spin_lock(&inode->i_lock); 731 lo = NFS_I(inode)->layout; 732 if (!pnfs_layout_is_valid(lo)) { 733 spin_unlock(&inode->i_lock); 734 rpc_exit(task, 0); 735 return; 736 } 737 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid); 738 spin_unlock(&inode->i_lock); 739 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 740 &data->res.seq_res, task); 741 } 742 743 static void 744 nfs42_layoutstat_done(struct rpc_task *task, void *calldata) 745 { 746 struct nfs42_layoutstat_data *data = calldata; 747 struct inode *inode = data->inode; 748 struct pnfs_layout_hdr *lo; 749 750 if (!nfs4_sequence_done(task, &data->res.seq_res)) 751 return; 752 753 switch (task->tk_status) { 754 case 0: 755 return; 756 case -NFS4ERR_BADHANDLE: 757 case -ESTALE: 758 pnfs_destroy_layout(NFS_I(inode)); 759 break; 760 case -NFS4ERR_EXPIRED: 761 case -NFS4ERR_ADMIN_REVOKED: 762 case -NFS4ERR_DELEG_REVOKED: 763 case -NFS4ERR_STALE_STATEID: 764 case -NFS4ERR_BAD_STATEID: 765 spin_lock(&inode->i_lock); 766 lo = NFS_I(inode)->layout; 767 if (pnfs_layout_is_valid(lo) && 768 nfs4_stateid_match(&data->args.stateid, 769 &lo->plh_stateid)) { 770 LIST_HEAD(head); 771 772 /* 773 * Mark the bad layout state as invalid, then retry 774 * with the current stateid. 775 */ 776 pnfs_mark_layout_stateid_invalid(lo, &head); 777 spin_unlock(&inode->i_lock); 778 pnfs_free_lseg_list(&head); 779 nfs_commit_inode(inode, 0); 780 } else 781 spin_unlock(&inode->i_lock); 782 break; 783 case -NFS4ERR_OLD_STATEID: 784 spin_lock(&inode->i_lock); 785 lo = NFS_I(inode)->layout; 786 if (pnfs_layout_is_valid(lo) && 787 nfs4_stateid_match_other(&data->args.stateid, 788 &lo->plh_stateid)) { 789 /* Do we need to delay before resending? */ 790 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 791 &data->args.stateid)) 792 rpc_delay(task, HZ); 793 rpc_restart_call_prepare(task); 794 } 795 spin_unlock(&inode->i_lock); 796 break; 797 case -ENOTSUPP: 798 case -EOPNOTSUPP: 799 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS; 800 } 801 802 trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status); 803 } 804 805 static void 806 nfs42_layoutstat_release(void *calldata) 807 { 808 struct nfs42_layoutstat_data *data = calldata; 809 struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo; 810 int i; 811 812 for (i = 0; i < data->args.num_dev; i++) { 813 if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free) 814 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private); 815 } 816 817 pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout); 818 smp_mb__before_atomic(); 819 clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags); 820 smp_mb__after_atomic(); 821 nfs_iput_and_deactive(data->inode); 822 kfree(data->args.devinfo); 823 kfree(data); 824 } 825 826 static const struct rpc_call_ops nfs42_layoutstat_ops = { 827 .rpc_call_prepare = nfs42_layoutstat_prepare, 828 .rpc_call_done = nfs42_layoutstat_done, 829 .rpc_release = nfs42_layoutstat_release, 830 }; 831 832 int nfs42_proc_layoutstats_generic(struct nfs_server *server, 833 struct nfs42_layoutstat_data *data) 834 { 835 struct rpc_message msg = { 836 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS], 837 .rpc_argp = &data->args, 838 .rpc_resp = &data->res, 839 }; 840 struct rpc_task_setup task_setup = { 841 .rpc_client = server->client, 842 .rpc_message = &msg, 843 .callback_ops = &nfs42_layoutstat_ops, 844 .callback_data = data, 845 .flags = RPC_TASK_ASYNC, 846 }; 847 struct rpc_task *task; 848 849 data->inode = nfs_igrab_and_active(data->args.inode); 850 if (!data->inode) { 851 nfs42_layoutstat_release(data); 852 return -EAGAIN; 853 } 854 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 855 task = rpc_run_task(&task_setup); 856 if (IS_ERR(task)) 857 return PTR_ERR(task); 858 rpc_put_task(task); 859 return 0; 860 } 861 862 static struct nfs42_layouterror_data * 863 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags) 864 { 865 struct nfs42_layouterror_data *data; 866 struct inode *inode = lseg->pls_layout->plh_inode; 867 868 data = kzalloc(sizeof(*data), gfp_flags); 869 if (data) { 870 data->args.inode = data->inode = nfs_igrab_and_active(inode); 871 if (data->inode) { 872 data->lseg = pnfs_get_lseg(lseg); 873 if (data->lseg) 874 return data; 875 nfs_iput_and_deactive(data->inode); 876 } 877 kfree(data); 878 } 879 return NULL; 880 } 881 882 static void 883 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data) 884 { 885 pnfs_put_lseg(data->lseg); 886 nfs_iput_and_deactive(data->inode); 887 kfree(data); 888 } 889 890 static void 891 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata) 892 { 893 struct nfs42_layouterror_data *data = calldata; 894 struct inode *inode = data->inode; 895 struct nfs_server *server = NFS_SERVER(inode); 896 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 897 unsigned i; 898 899 spin_lock(&inode->i_lock); 900 if (!pnfs_layout_is_valid(lo)) { 901 spin_unlock(&inode->i_lock); 902 rpc_exit(task, 0); 903 return; 904 } 905 for (i = 0; i < data->args.num_errors; i++) 906 nfs4_stateid_copy(&data->args.errors[i].stateid, 907 &lo->plh_stateid); 908 spin_unlock(&inode->i_lock); 909 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 910 &data->res.seq_res, task); 911 } 912 913 static void 914 nfs42_layouterror_done(struct rpc_task *task, void *calldata) 915 { 916 struct nfs42_layouterror_data *data = calldata; 917 struct inode *inode = data->inode; 918 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 919 920 if (!nfs4_sequence_done(task, &data->res.seq_res)) 921 return; 922 923 switch (task->tk_status) { 924 case 0: 925 return; 926 case -NFS4ERR_BADHANDLE: 927 case -ESTALE: 928 pnfs_destroy_layout(NFS_I(inode)); 929 break; 930 case -NFS4ERR_EXPIRED: 931 case -NFS4ERR_ADMIN_REVOKED: 932 case -NFS4ERR_DELEG_REVOKED: 933 case -NFS4ERR_STALE_STATEID: 934 case -NFS4ERR_BAD_STATEID: 935 spin_lock(&inode->i_lock); 936 if (pnfs_layout_is_valid(lo) && 937 nfs4_stateid_match(&data->args.errors[0].stateid, 938 &lo->plh_stateid)) { 939 LIST_HEAD(head); 940 941 /* 942 * Mark the bad layout state as invalid, then retry 943 * with the current stateid. 944 */ 945 pnfs_mark_layout_stateid_invalid(lo, &head); 946 spin_unlock(&inode->i_lock); 947 pnfs_free_lseg_list(&head); 948 nfs_commit_inode(inode, 0); 949 } else 950 spin_unlock(&inode->i_lock); 951 break; 952 case -NFS4ERR_OLD_STATEID: 953 spin_lock(&inode->i_lock); 954 if (pnfs_layout_is_valid(lo) && 955 nfs4_stateid_match_other(&data->args.errors[0].stateid, 956 &lo->plh_stateid)) { 957 /* Do we need to delay before resending? */ 958 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 959 &data->args.errors[0].stateid)) 960 rpc_delay(task, HZ); 961 rpc_restart_call_prepare(task); 962 } 963 spin_unlock(&inode->i_lock); 964 break; 965 case -ENOTSUPP: 966 case -EOPNOTSUPP: 967 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR; 968 } 969 970 trace_nfs4_layouterror(inode, &data->args.errors[0].stateid, 971 task->tk_status); 972 } 973 974 static void 975 nfs42_layouterror_release(void *calldata) 976 { 977 struct nfs42_layouterror_data *data = calldata; 978 979 nfs42_free_layouterror_data(data); 980 } 981 982 static const struct rpc_call_ops nfs42_layouterror_ops = { 983 .rpc_call_prepare = nfs42_layouterror_prepare, 984 .rpc_call_done = nfs42_layouterror_done, 985 .rpc_release = nfs42_layouterror_release, 986 }; 987 988 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg, 989 const struct nfs42_layout_error *errors, size_t n) 990 { 991 struct inode *inode = lseg->pls_layout->plh_inode; 992 struct nfs42_layouterror_data *data; 993 struct rpc_task *task; 994 struct rpc_message msg = { 995 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR], 996 }; 997 struct rpc_task_setup task_setup = { 998 .rpc_message = &msg, 999 .callback_ops = &nfs42_layouterror_ops, 1000 .flags = RPC_TASK_ASYNC, 1001 }; 1002 unsigned int i; 1003 1004 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR)) 1005 return -EOPNOTSUPP; 1006 if (n > NFS42_LAYOUTERROR_MAX) 1007 return -EINVAL; 1008 data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS); 1009 if (!data) 1010 return -ENOMEM; 1011 for (i = 0; i < n; i++) { 1012 data->args.errors[i] = errors[i]; 1013 data->args.num_errors++; 1014 data->res.num_errors++; 1015 } 1016 msg.rpc_argp = &data->args; 1017 msg.rpc_resp = &data->res; 1018 task_setup.callback_data = data; 1019 task_setup.rpc_client = NFS_SERVER(inode)->client; 1020 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 1021 task = rpc_run_task(&task_setup); 1022 if (IS_ERR(task)) 1023 return PTR_ERR(task); 1024 rpc_put_task(task); 1025 return 0; 1026 } 1027 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror); 1028 1029 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f, 1030 struct file *dst_f, struct nfs_lock_context *src_lock, 1031 struct nfs_lock_context *dst_lock, loff_t src_offset, 1032 loff_t dst_offset, loff_t count) 1033 { 1034 struct inode *src_inode = file_inode(src_f); 1035 struct inode *dst_inode = file_inode(dst_f); 1036 struct nfs_server *server = NFS_SERVER(dst_inode); 1037 struct nfs42_clone_args args = { 1038 .src_fh = NFS_FH(src_inode), 1039 .dst_fh = NFS_FH(dst_inode), 1040 .src_offset = src_offset, 1041 .dst_offset = dst_offset, 1042 .count = count, 1043 .dst_bitmask = server->cache_consistency_bitmask, 1044 }; 1045 struct nfs42_clone_res res = { 1046 .server = server, 1047 }; 1048 int status; 1049 1050 msg->rpc_argp = &args; 1051 msg->rpc_resp = &res; 1052 1053 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context, 1054 src_lock, FMODE_READ); 1055 if (status) { 1056 if (status == -EAGAIN) 1057 status = -NFS4ERR_BAD_STATEID; 1058 return status; 1059 } 1060 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context, 1061 dst_lock, FMODE_WRITE); 1062 if (status) { 1063 if (status == -EAGAIN) 1064 status = -NFS4ERR_BAD_STATEID; 1065 return status; 1066 } 1067 1068 res.dst_fattr = nfs_alloc_fattr(); 1069 if (!res.dst_fattr) 1070 return -ENOMEM; 1071 1072 status = nfs4_call_sync(server->client, server, msg, 1073 &args.seq_args, &res.seq_res, 0); 1074 if (status == 0) { 1075 nfs42_copy_dest_done(dst_inode, dst_offset, count); 1076 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr); 1077 } 1078 1079 kfree(res.dst_fattr); 1080 return status; 1081 } 1082 1083 int nfs42_proc_clone(struct file *src_f, struct file *dst_f, 1084 loff_t src_offset, loff_t dst_offset, loff_t count) 1085 { 1086 struct rpc_message msg = { 1087 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE], 1088 }; 1089 struct inode *inode = file_inode(src_f); 1090 struct nfs_server *server = NFS_SERVER(file_inode(src_f)); 1091 struct nfs_lock_context *src_lock; 1092 struct nfs_lock_context *dst_lock; 1093 struct nfs4_exception src_exception = { }; 1094 struct nfs4_exception dst_exception = { }; 1095 int err, err2; 1096 1097 if (!nfs_server_capable(inode, NFS_CAP_CLONE)) 1098 return -EOPNOTSUPP; 1099 1100 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f)); 1101 if (IS_ERR(src_lock)) 1102 return PTR_ERR(src_lock); 1103 1104 src_exception.inode = file_inode(src_f); 1105 src_exception.state = src_lock->open_context->state; 1106 1107 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f)); 1108 if (IS_ERR(dst_lock)) { 1109 err = PTR_ERR(dst_lock); 1110 goto out_put_src_lock; 1111 } 1112 1113 dst_exception.inode = file_inode(dst_f); 1114 dst_exception.state = dst_lock->open_context->state; 1115 1116 do { 1117 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock, 1118 src_offset, dst_offset, count); 1119 if (err == -ENOTSUPP || err == -EOPNOTSUPP) { 1120 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE; 1121 err = -EOPNOTSUPP; 1122 break; 1123 } 1124 1125 err2 = nfs4_handle_exception(server, err, &src_exception); 1126 err = nfs4_handle_exception(server, err, &dst_exception); 1127 if (!err) 1128 err = err2; 1129 } while (src_exception.retry || dst_exception.retry); 1130 1131 nfs_put_lock_context(dst_lock); 1132 out_put_src_lock: 1133 nfs_put_lock_context(src_lock); 1134 return err; 1135 } 1136 1137 #define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 1138 1139 static int _nfs42_proc_removexattr(struct inode *inode, const char *name) 1140 { 1141 struct nfs_server *server = NFS_SERVER(inode); 1142 struct nfs42_removexattrargs args = { 1143 .fh = NFS_FH(inode), 1144 .xattr_name = name, 1145 }; 1146 struct nfs42_removexattrres res; 1147 struct rpc_message msg = { 1148 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR], 1149 .rpc_argp = &args, 1150 .rpc_resp = &res, 1151 }; 1152 int ret; 1153 unsigned long timestamp = jiffies; 1154 1155 ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 1156 &res.seq_res, 1); 1157 if (!ret) 1158 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1159 1160 return ret; 1161 } 1162 1163 static int _nfs42_proc_setxattr(struct inode *inode, const char *name, 1164 const void *buf, size_t buflen, int flags) 1165 { 1166 struct nfs_server *server = NFS_SERVER(inode); 1167 struct page *pages[NFS4XATTR_MAXPAGES]; 1168 struct nfs42_setxattrargs arg = { 1169 .fh = NFS_FH(inode), 1170 .xattr_pages = pages, 1171 .xattr_len = buflen, 1172 .xattr_name = name, 1173 .xattr_flags = flags, 1174 }; 1175 struct nfs42_setxattrres res; 1176 struct rpc_message msg = { 1177 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR], 1178 .rpc_argp = &arg, 1179 .rpc_resp = &res, 1180 }; 1181 int ret, np; 1182 unsigned long timestamp = jiffies; 1183 1184 if (buflen > server->sxasize) 1185 return -ERANGE; 1186 1187 if (buflen > 0) { 1188 np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages); 1189 if (np < 0) 1190 return np; 1191 } else 1192 np = 0; 1193 1194 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1195 &res.seq_res, 1); 1196 1197 for (; np > 0; np--) 1198 put_page(pages[np - 1]); 1199 1200 if (!ret) 1201 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1202 1203 return ret; 1204 } 1205 1206 static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name, 1207 void *buf, size_t buflen, struct page **pages, 1208 size_t plen) 1209 { 1210 struct nfs_server *server = NFS_SERVER(inode); 1211 struct nfs42_getxattrargs arg = { 1212 .fh = NFS_FH(inode), 1213 .xattr_name = name, 1214 }; 1215 struct nfs42_getxattrres res; 1216 struct rpc_message msg = { 1217 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR], 1218 .rpc_argp = &arg, 1219 .rpc_resp = &res, 1220 }; 1221 ssize_t ret; 1222 1223 arg.xattr_len = plen; 1224 arg.xattr_pages = pages; 1225 1226 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1227 &res.seq_res, 0); 1228 if (ret < 0) 1229 return ret; 1230 1231 /* 1232 * Normally, the caching is done one layer up, but for successful 1233 * RPCS, always cache the result here, even if the caller was 1234 * just querying the length, or if the reply was too big for 1235 * the caller. This avoids a second RPC in the case of the 1236 * common query-alloc-retrieve cycle for xattrs. 1237 * 1238 * Note that xattr_len is always capped to XATTR_SIZE_MAX. 1239 */ 1240 1241 nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len); 1242 1243 if (buflen) { 1244 if (res.xattr_len > buflen) 1245 return -ERANGE; 1246 _copy_from_pages(buf, pages, 0, res.xattr_len); 1247 } 1248 1249 return res.xattr_len; 1250 } 1251 1252 static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf, 1253 size_t buflen, u64 *cookiep, bool *eofp) 1254 { 1255 struct nfs_server *server = NFS_SERVER(inode); 1256 struct page **pages; 1257 struct nfs42_listxattrsargs arg = { 1258 .fh = NFS_FH(inode), 1259 .cookie = *cookiep, 1260 }; 1261 struct nfs42_listxattrsres res = { 1262 .eof = false, 1263 .xattr_buf = buf, 1264 .xattr_len = buflen, 1265 }; 1266 struct rpc_message msg = { 1267 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS], 1268 .rpc_argp = &arg, 1269 .rpc_resp = &res, 1270 }; 1271 u32 xdrlen; 1272 int ret, np, i; 1273 1274 1275 ret = -ENOMEM; 1276 res.scratch = alloc_page(GFP_KERNEL); 1277 if (!res.scratch) 1278 goto out; 1279 1280 xdrlen = nfs42_listxattr_xdrsize(buflen); 1281 if (xdrlen > server->lxasize) 1282 xdrlen = server->lxasize; 1283 np = xdrlen / PAGE_SIZE + 1; 1284 1285 pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL); 1286 if (!pages) 1287 goto out_free_scratch; 1288 for (i = 0; i < np; i++) { 1289 pages[i] = alloc_page(GFP_KERNEL); 1290 if (!pages[i]) 1291 goto out_free_pages; 1292 } 1293 1294 arg.xattr_pages = pages; 1295 arg.count = xdrlen; 1296 1297 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1298 &res.seq_res, 0); 1299 1300 if (ret >= 0) { 1301 ret = res.copied; 1302 *cookiep = res.cookie; 1303 *eofp = res.eof; 1304 } 1305 1306 out_free_pages: 1307 while (--np >= 0) { 1308 if (pages[np]) 1309 __free_page(pages[np]); 1310 } 1311 kfree(pages); 1312 out_free_scratch: 1313 __free_page(res.scratch); 1314 out: 1315 return ret; 1316 1317 } 1318 1319 ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, 1320 void *buf, size_t buflen) 1321 { 1322 struct nfs4_exception exception = { }; 1323 ssize_t err, np, i; 1324 struct page **pages; 1325 1326 np = nfs_page_array_len(0, buflen ?: XATTR_SIZE_MAX); 1327 pages = kmalloc_array(np, sizeof(*pages), GFP_KERNEL); 1328 if (!pages) 1329 return -ENOMEM; 1330 1331 for (i = 0; i < np; i++) { 1332 pages[i] = alloc_page(GFP_KERNEL); 1333 if (!pages[i]) { 1334 np = i + 1; 1335 err = -ENOMEM; 1336 goto out; 1337 } 1338 } 1339 1340 /* 1341 * The GETXATTR op has no length field in the call, and the 1342 * xattr data is at the end of the reply. 1343 * 1344 * There is no downside in using the page-aligned length. It will 1345 * allow receiving and caching xattrs that are too large for the 1346 * caller but still fit in the page-rounded value. 1347 */ 1348 do { 1349 err = _nfs42_proc_getxattr(inode, name, buf, buflen, 1350 pages, np * PAGE_SIZE); 1351 if (err >= 0) 1352 break; 1353 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1354 &exception); 1355 } while (exception.retry); 1356 1357 out: 1358 while (--np >= 0) 1359 __free_page(pages[np]); 1360 kfree(pages); 1361 1362 return err; 1363 } 1364 1365 int nfs42_proc_setxattr(struct inode *inode, const char *name, 1366 const void *buf, size_t buflen, int flags) 1367 { 1368 struct nfs4_exception exception = { }; 1369 int err; 1370 1371 do { 1372 err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags); 1373 if (!err) 1374 break; 1375 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1376 &exception); 1377 } while (exception.retry); 1378 1379 return err; 1380 } 1381 1382 ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf, 1383 size_t buflen, u64 *cookiep, bool *eofp) 1384 { 1385 struct nfs4_exception exception = { }; 1386 ssize_t err; 1387 1388 do { 1389 err = _nfs42_proc_listxattrs(inode, buf, buflen, 1390 cookiep, eofp); 1391 if (err >= 0) 1392 break; 1393 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1394 &exception); 1395 } while (exception.retry); 1396 1397 return err; 1398 } 1399 1400 int nfs42_proc_removexattr(struct inode *inode, const char *name) 1401 { 1402 struct nfs4_exception exception = { }; 1403 int err; 1404 1405 do { 1406 err = _nfs42_proc_removexattr(inode, name); 1407 if (!err) 1408 break; 1409 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1410 &exception); 1411 } while (exception.retry); 1412 1413 return err; 1414 } 1415