1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com> 4 */ 5 #include <linux/fs.h> 6 #include <linux/sunrpc/addr.h> 7 #include <linux/sunrpc/sched.h> 8 #include <linux/nfs.h> 9 #include <linux/nfs3.h> 10 #include <linux/nfs4.h> 11 #include <linux/nfs_xdr.h> 12 #include <linux/nfs_fs.h> 13 #include "nfs4_fs.h" 14 #include "nfs42.h" 15 #include "iostat.h" 16 #include "pnfs.h" 17 #include "nfs4session.h" 18 #include "internal.h" 19 #include "delegation.h" 20 #include "nfs4trace.h" 21 22 #define NFSDBG_FACILITY NFSDBG_PROC 23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std); 24 25 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr) 26 { 27 struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client; 28 unsigned short port = 2049; 29 30 rcu_read_lock(); 31 naddr->netid_len = scnprintf(naddr->netid, 32 sizeof(naddr->netid), "%s", 33 rpc_peeraddr2str(clp->cl_rpcclient, 34 RPC_DISPLAY_NETID)); 35 naddr->addr_len = scnprintf(naddr->addr, 36 sizeof(naddr->addr), 37 "%s.%u.%u", 38 rpc_peeraddr2str(clp->cl_rpcclient, 39 RPC_DISPLAY_ADDR), 40 port >> 8, port & 255); 41 rcu_read_unlock(); 42 } 43 44 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 45 struct nfs_lock_context *lock, loff_t offset, loff_t len) 46 { 47 struct inode *inode = file_inode(filep); 48 struct nfs_server *server = NFS_SERVER(inode); 49 u32 bitmask[3]; 50 struct nfs42_falloc_args args = { 51 .falloc_fh = NFS_FH(inode), 52 .falloc_offset = offset, 53 .falloc_length = len, 54 .falloc_bitmask = bitmask, 55 }; 56 struct nfs42_falloc_res res = { 57 .falloc_server = server, 58 }; 59 int status; 60 61 msg->rpc_argp = &args; 62 msg->rpc_resp = &res; 63 64 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context, 65 lock, FMODE_WRITE); 66 if (status) { 67 if (status == -EAGAIN) 68 status = -NFS4ERR_BAD_STATEID; 69 return status; 70 } 71 72 memcpy(bitmask, server->cache_consistency_bitmask, sizeof(bitmask)); 73 if (server->attr_bitmask[1] & FATTR4_WORD1_SPACE_USED) 74 bitmask[1] |= FATTR4_WORD1_SPACE_USED; 75 76 res.falloc_fattr = nfs_alloc_fattr(); 77 if (!res.falloc_fattr) 78 return -ENOMEM; 79 80 status = nfs4_call_sync(server->client, server, msg, 81 &args.seq_args, &res.seq_res, 0); 82 if (status == 0) 83 status = nfs_post_op_update_inode_force_wcc(inode, 84 res.falloc_fattr); 85 86 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE]) 87 trace_nfs4_fallocate(inode, &args, status); 88 else 89 trace_nfs4_deallocate(inode, &args, status); 90 kfree(res.falloc_fattr); 91 return status; 92 } 93 94 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 95 loff_t offset, loff_t len) 96 { 97 struct inode *inode = file_inode(filep); 98 struct nfs_server *server = NFS_SERVER(inode); 99 struct nfs4_exception exception = { }; 100 struct nfs_lock_context *lock; 101 int err; 102 103 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 104 if (IS_ERR(lock)) 105 return PTR_ERR(lock); 106 107 exception.inode = inode; 108 exception.state = lock->open_context->state; 109 110 err = nfs_sync_inode(inode); 111 if (err) 112 goto out; 113 114 do { 115 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len); 116 if (err == -ENOTSUPP) { 117 err = -EOPNOTSUPP; 118 break; 119 } 120 err = nfs4_handle_exception(server, err, &exception); 121 } while (exception.retry); 122 out: 123 nfs_put_lock_context(lock); 124 return err; 125 } 126 127 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len) 128 { 129 struct rpc_message msg = { 130 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE], 131 }; 132 struct inode *inode = file_inode(filep); 133 int err; 134 135 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE)) 136 return -EOPNOTSUPP; 137 138 inode_lock(inode); 139 140 err = nfs42_proc_fallocate(&msg, filep, offset, len); 141 if (err == -EOPNOTSUPP) 142 NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE; 143 144 inode_unlock(inode); 145 return err; 146 } 147 148 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) 149 { 150 struct rpc_message msg = { 151 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE], 152 }; 153 struct inode *inode = file_inode(filep); 154 int err; 155 156 if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE)) 157 return -EOPNOTSUPP; 158 159 inode_lock(inode); 160 161 err = nfs42_proc_fallocate(&msg, filep, offset, len); 162 if (err == 0) 163 truncate_pagecache_range(inode, offset, (offset + len) -1); 164 if (err == -EOPNOTSUPP) 165 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE; 166 167 inode_unlock(inode); 168 return err; 169 } 170 171 static int handle_async_copy(struct nfs42_copy_res *res, 172 struct nfs_server *dst_server, 173 struct nfs_server *src_server, 174 struct file *src, 175 struct file *dst, 176 nfs4_stateid *src_stateid, 177 bool *restart) 178 { 179 struct nfs4_copy_state *copy, *tmp_copy; 180 int status = NFS4_OK; 181 bool found_pending = false; 182 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst); 183 struct nfs_open_context *src_ctx = nfs_file_open_context(src); 184 185 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS); 186 if (!copy) 187 return -ENOMEM; 188 189 spin_lock(&dst_server->nfs_client->cl_lock); 190 list_for_each_entry(tmp_copy, 191 &dst_server->nfs_client->pending_cb_stateids, 192 copies) { 193 if (memcmp(&res->write_res.stateid, &tmp_copy->stateid, 194 NFS4_STATEID_SIZE)) 195 continue; 196 found_pending = true; 197 list_del(&tmp_copy->copies); 198 break; 199 } 200 if (found_pending) { 201 spin_unlock(&dst_server->nfs_client->cl_lock); 202 kfree(copy); 203 copy = tmp_copy; 204 goto out; 205 } 206 207 memcpy(©->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE); 208 init_completion(©->completion); 209 copy->parent_dst_state = dst_ctx->state; 210 copy->parent_src_state = src_ctx->state; 211 212 list_add_tail(©->copies, &dst_server->ss_copies); 213 spin_unlock(&dst_server->nfs_client->cl_lock); 214 215 if (dst_server != src_server) { 216 spin_lock(&src_server->nfs_client->cl_lock); 217 list_add_tail(©->src_copies, &src_server->ss_copies); 218 spin_unlock(&src_server->nfs_client->cl_lock); 219 } 220 221 status = wait_for_completion_interruptible(©->completion); 222 spin_lock(&dst_server->nfs_client->cl_lock); 223 list_del_init(©->copies); 224 spin_unlock(&dst_server->nfs_client->cl_lock); 225 if (dst_server != src_server) { 226 spin_lock(&src_server->nfs_client->cl_lock); 227 list_del_init(©->src_copies); 228 spin_unlock(&src_server->nfs_client->cl_lock); 229 } 230 if (status == -ERESTARTSYS) { 231 goto out_cancel; 232 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) { 233 status = -EAGAIN; 234 *restart = true; 235 goto out_cancel; 236 } 237 out: 238 res->write_res.count = copy->count; 239 memcpy(&res->write_res.verifier, ©->verf, sizeof(copy->verf)); 240 status = -copy->error; 241 242 out_free: 243 kfree(copy); 244 return status; 245 out_cancel: 246 nfs42_do_offload_cancel_async(dst, ©->stateid); 247 if (!nfs42_files_from_same_server(src, dst)) 248 nfs42_do_offload_cancel_async(src, src_stateid); 249 goto out_free; 250 } 251 252 static int process_copy_commit(struct file *dst, loff_t pos_dst, 253 struct nfs42_copy_res *res) 254 { 255 struct nfs_commitres cres; 256 int status = -ENOMEM; 257 258 cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 259 if (!cres.verf) 260 goto out; 261 262 status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres); 263 if (status) 264 goto out_free; 265 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 266 &cres.verf->verifier)) { 267 dprintk("commit verf differs from copy verf\n"); 268 status = -EAGAIN; 269 } 270 out_free: 271 kfree(cres.verf); 272 out: 273 return status; 274 } 275 276 /** 277 * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload 278 * @inode: pointer to destination inode 279 * @pos: destination offset 280 * @len: copy length 281 * 282 * Punch a hole in the inode page cache, so that the NFS client will 283 * know to retrieve new data. 284 * Update the file size if necessary, and then mark the inode as having 285 * invalid cached values for change attribute, ctime, mtime and space used. 286 */ 287 static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len) 288 { 289 loff_t newsize = pos + len; 290 loff_t end = newsize - 1; 291 292 WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping, 293 pos >> PAGE_SHIFT, end >> PAGE_SHIFT)); 294 295 spin_lock(&inode->i_lock); 296 if (newsize > i_size_read(inode)) 297 i_size_write(inode, newsize); 298 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 299 NFS_INO_INVALID_CTIME | 300 NFS_INO_INVALID_MTIME | 301 NFS_INO_INVALID_BLOCKS); 302 spin_unlock(&inode->i_lock); 303 } 304 305 static ssize_t _nfs42_proc_copy(struct file *src, 306 struct nfs_lock_context *src_lock, 307 struct file *dst, 308 struct nfs_lock_context *dst_lock, 309 struct nfs42_copy_args *args, 310 struct nfs42_copy_res *res, 311 struct nl4_server *nss, 312 nfs4_stateid *cnr_stateid, 313 bool *restart) 314 { 315 struct rpc_message msg = { 316 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY], 317 .rpc_argp = args, 318 .rpc_resp = res, 319 }; 320 struct inode *dst_inode = file_inode(dst); 321 struct inode *src_inode = file_inode(src); 322 struct nfs_server *dst_server = NFS_SERVER(dst_inode); 323 struct nfs_server *src_server = NFS_SERVER(src_inode); 324 loff_t pos_src = args->src_pos; 325 loff_t pos_dst = args->dst_pos; 326 size_t count = args->count; 327 ssize_t status; 328 329 if (nss) { 330 args->cp_src = nss; 331 nfs4_stateid_copy(&args->src_stateid, cnr_stateid); 332 } else { 333 status = nfs4_set_rw_stateid(&args->src_stateid, 334 src_lock->open_context, src_lock, FMODE_READ); 335 if (status) { 336 if (status == -EAGAIN) 337 status = -NFS4ERR_BAD_STATEID; 338 return status; 339 } 340 } 341 status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping, 342 pos_src, pos_src + (loff_t)count - 1); 343 if (status) 344 return status; 345 346 status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context, 347 dst_lock, FMODE_WRITE); 348 if (status) { 349 if (status == -EAGAIN) 350 status = -NFS4ERR_BAD_STATEID; 351 return status; 352 } 353 354 status = nfs_sync_inode(dst_inode); 355 if (status) 356 return status; 357 358 res->commit_res.verf = NULL; 359 if (args->sync) { 360 res->commit_res.verf = 361 kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 362 if (!res->commit_res.verf) 363 return -ENOMEM; 364 } 365 set_bit(NFS_CLNT_SRC_SSC_COPY_STATE, 366 &src_lock->open_context->state->flags); 367 set_bit(NFS_CLNT_DST_SSC_COPY_STATE, 368 &dst_lock->open_context->state->flags); 369 370 status = nfs4_call_sync(dst_server->client, dst_server, &msg, 371 &args->seq_args, &res->seq_res, 0); 372 trace_nfs4_copy(src_inode, dst_inode, args, res, nss, status); 373 if (status == -ENOTSUPP) 374 dst_server->caps &= ~NFS_CAP_COPY; 375 if (status) 376 goto out; 377 378 if (args->sync && 379 nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 380 &res->commit_res.verf->verifier)) { 381 status = -EAGAIN; 382 goto out; 383 } 384 385 if (!res->synchronous) { 386 status = handle_async_copy(res, dst_server, src_server, src, 387 dst, &args->src_stateid, restart); 388 if (status) 389 goto out; 390 } 391 392 if ((!res->synchronous || !args->sync) && 393 res->write_res.verifier.committed != NFS_FILE_SYNC) { 394 status = process_copy_commit(dst, pos_dst, res); 395 if (status) 396 goto out; 397 } 398 399 nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count); 400 nfs_invalidate_atime(src_inode); 401 status = res->write_res.count; 402 out: 403 if (args->sync) 404 kfree(res->commit_res.verf); 405 return status; 406 } 407 408 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, 409 struct file *dst, loff_t pos_dst, size_t count, 410 struct nl4_server *nss, 411 nfs4_stateid *cnr_stateid, bool sync) 412 { 413 struct nfs_server *server = NFS_SERVER(file_inode(dst)); 414 struct nfs_lock_context *src_lock; 415 struct nfs_lock_context *dst_lock; 416 struct nfs42_copy_args args = { 417 .src_fh = NFS_FH(file_inode(src)), 418 .src_pos = pos_src, 419 .dst_fh = NFS_FH(file_inode(dst)), 420 .dst_pos = pos_dst, 421 .count = count, 422 .sync = sync, 423 }; 424 struct nfs42_copy_res res; 425 struct nfs4_exception src_exception = { 426 .inode = file_inode(src), 427 .stateid = &args.src_stateid, 428 }; 429 struct nfs4_exception dst_exception = { 430 .inode = file_inode(dst), 431 .stateid = &args.dst_stateid, 432 }; 433 ssize_t err, err2; 434 bool restart = false; 435 436 src_lock = nfs_get_lock_context(nfs_file_open_context(src)); 437 if (IS_ERR(src_lock)) 438 return PTR_ERR(src_lock); 439 440 src_exception.state = src_lock->open_context->state; 441 442 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst)); 443 if (IS_ERR(dst_lock)) { 444 err = PTR_ERR(dst_lock); 445 goto out_put_src_lock; 446 } 447 448 dst_exception.state = dst_lock->open_context->state; 449 450 do { 451 inode_lock(file_inode(dst)); 452 err = _nfs42_proc_copy(src, src_lock, 453 dst, dst_lock, 454 &args, &res, 455 nss, cnr_stateid, &restart); 456 inode_unlock(file_inode(dst)); 457 458 if (err >= 0) 459 break; 460 if (err == -ENOTSUPP && 461 nfs42_files_from_same_server(src, dst)) { 462 err = -EOPNOTSUPP; 463 break; 464 } else if (err == -EAGAIN) { 465 if (!restart) { 466 dst_exception.retry = 1; 467 continue; 468 } 469 break; 470 } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) { 471 args.sync = true; 472 dst_exception.retry = 1; 473 continue; 474 } else if ((err == -ESTALE || 475 err == -NFS4ERR_OFFLOAD_DENIED || 476 err == -ENOTSUPP) && 477 !nfs42_files_from_same_server(src, dst)) { 478 nfs42_do_offload_cancel_async(src, &args.src_stateid); 479 err = -EOPNOTSUPP; 480 break; 481 } 482 483 err2 = nfs4_handle_exception(server, err, &src_exception); 484 err = nfs4_handle_exception(server, err, &dst_exception); 485 if (!err) 486 err = err2; 487 } while (src_exception.retry || dst_exception.retry); 488 489 nfs_put_lock_context(dst_lock); 490 out_put_src_lock: 491 nfs_put_lock_context(src_lock); 492 return err; 493 } 494 495 struct nfs42_offloadcancel_data { 496 struct nfs_server *seq_server; 497 struct nfs42_offload_status_args args; 498 struct nfs42_offload_status_res res; 499 }; 500 501 static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata) 502 { 503 struct nfs42_offloadcancel_data *data = calldata; 504 505 nfs4_setup_sequence(data->seq_server->nfs_client, 506 &data->args.osa_seq_args, 507 &data->res.osr_seq_res, task); 508 } 509 510 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) 511 { 512 struct nfs42_offloadcancel_data *data = calldata; 513 514 trace_nfs4_offload_cancel(&data->args, task->tk_status); 515 nfs41_sequence_done(task, &data->res.osr_seq_res); 516 if (task->tk_status && 517 nfs4_async_handle_error(task, data->seq_server, NULL, 518 NULL) == -EAGAIN) 519 rpc_restart_call_prepare(task); 520 } 521 522 static void nfs42_free_offloadcancel_data(void *data) 523 { 524 kfree(data); 525 } 526 527 static const struct rpc_call_ops nfs42_offload_cancel_ops = { 528 .rpc_call_prepare = nfs42_offload_cancel_prepare, 529 .rpc_call_done = nfs42_offload_cancel_done, 530 .rpc_release = nfs42_free_offloadcancel_data, 531 }; 532 533 static int nfs42_do_offload_cancel_async(struct file *dst, 534 nfs4_stateid *stateid) 535 { 536 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 537 struct nfs42_offloadcancel_data *data = NULL; 538 struct nfs_open_context *ctx = nfs_file_open_context(dst); 539 struct rpc_task *task; 540 struct rpc_message msg = { 541 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL], 542 .rpc_cred = ctx->cred, 543 }; 544 struct rpc_task_setup task_setup_data = { 545 .rpc_client = dst_server->client, 546 .rpc_message = &msg, 547 .callback_ops = &nfs42_offload_cancel_ops, 548 .workqueue = nfsiod_workqueue, 549 .flags = RPC_TASK_ASYNC, 550 }; 551 int status; 552 553 if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL)) 554 return -EOPNOTSUPP; 555 556 data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS); 557 if (data == NULL) 558 return -ENOMEM; 559 560 data->seq_server = dst_server; 561 data->args.osa_src_fh = NFS_FH(file_inode(dst)); 562 memcpy(&data->args.osa_stateid, stateid, 563 sizeof(data->args.osa_stateid)); 564 msg.rpc_argp = &data->args; 565 msg.rpc_resp = &data->res; 566 task_setup_data.callback_data = data; 567 nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res, 568 1, 0); 569 task = rpc_run_task(&task_setup_data); 570 if (IS_ERR(task)) 571 return PTR_ERR(task); 572 status = rpc_wait_for_completion_task(task); 573 if (status == -ENOTSUPP) 574 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL; 575 rpc_put_task(task); 576 return status; 577 } 578 579 static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, 580 struct nfs42_copy_notify_args *args, 581 struct nfs42_copy_notify_res *res) 582 { 583 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 584 struct rpc_message msg = { 585 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY], 586 .rpc_argp = args, 587 .rpc_resp = res, 588 }; 589 int status; 590 struct nfs_open_context *ctx; 591 struct nfs_lock_context *l_ctx; 592 593 ctx = get_nfs_open_context(nfs_file_open_context(src)); 594 l_ctx = nfs_get_lock_context(ctx); 595 if (IS_ERR(l_ctx)) 596 return PTR_ERR(l_ctx); 597 598 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx, 599 FMODE_READ); 600 nfs_put_lock_context(l_ctx); 601 if (status) { 602 if (status == -EAGAIN) 603 status = -NFS4ERR_BAD_STATEID; 604 return status; 605 } 606 607 status = nfs4_call_sync(src_server->client, src_server, &msg, 608 &args->cna_seq_args, &res->cnr_seq_res, 0); 609 trace_nfs4_copy_notify(file_inode(src), args, res, status); 610 if (status == -ENOTSUPP) 611 src_server->caps &= ~NFS_CAP_COPY_NOTIFY; 612 613 put_nfs_open_context(nfs_file_open_context(src)); 614 return status; 615 } 616 617 int nfs42_proc_copy_notify(struct file *src, struct file *dst, 618 struct nfs42_copy_notify_res *res) 619 { 620 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 621 struct nfs42_copy_notify_args *args; 622 struct nfs4_exception exception = { 623 .inode = file_inode(src), 624 }; 625 int status; 626 627 if (!(src_server->caps & NFS_CAP_COPY_NOTIFY)) 628 return -EOPNOTSUPP; 629 630 args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_NOFS); 631 if (args == NULL) 632 return -ENOMEM; 633 634 args->cna_src_fh = NFS_FH(file_inode(src)), 635 args->cna_dst.nl4_type = NL4_NETADDR; 636 nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr); 637 exception.stateid = &args->cna_src_stateid; 638 639 do { 640 status = _nfs42_proc_copy_notify(src, dst, args, res); 641 if (status == -ENOTSUPP) { 642 status = -EOPNOTSUPP; 643 goto out; 644 } 645 status = nfs4_handle_exception(src_server, status, &exception); 646 } while (exception.retry); 647 648 out: 649 kfree(args); 650 return status; 651 } 652 653 static loff_t _nfs42_proc_llseek(struct file *filep, 654 struct nfs_lock_context *lock, loff_t offset, int whence) 655 { 656 struct inode *inode = file_inode(filep); 657 struct nfs42_seek_args args = { 658 .sa_fh = NFS_FH(inode), 659 .sa_offset = offset, 660 .sa_what = (whence == SEEK_HOLE) ? 661 NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA, 662 }; 663 struct nfs42_seek_res res; 664 struct rpc_message msg = { 665 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK], 666 .rpc_argp = &args, 667 .rpc_resp = &res, 668 }; 669 struct nfs_server *server = NFS_SERVER(inode); 670 int status; 671 672 if (!nfs_server_capable(inode, NFS_CAP_SEEK)) 673 return -ENOTSUPP; 674 675 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context, 676 lock, FMODE_READ); 677 if (status) { 678 if (status == -EAGAIN) 679 status = -NFS4ERR_BAD_STATEID; 680 return status; 681 } 682 683 status = nfs_filemap_write_and_wait_range(inode->i_mapping, 684 offset, LLONG_MAX); 685 if (status) 686 return status; 687 688 status = nfs4_call_sync(server->client, server, &msg, 689 &args.seq_args, &res.seq_res, 0); 690 trace_nfs4_llseek(inode, &args, &res, status); 691 if (status == -ENOTSUPP) 692 server->caps &= ~NFS_CAP_SEEK; 693 if (status) 694 return status; 695 696 if (whence == SEEK_DATA && res.sr_eof) 697 return -NFS4ERR_NXIO; 698 else 699 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); 700 } 701 702 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 703 { 704 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 705 struct nfs4_exception exception = { }; 706 struct nfs_lock_context *lock; 707 loff_t err; 708 709 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 710 if (IS_ERR(lock)) 711 return PTR_ERR(lock); 712 713 exception.inode = file_inode(filep); 714 exception.state = lock->open_context->state; 715 716 do { 717 err = _nfs42_proc_llseek(filep, lock, offset, whence); 718 if (err >= 0) 719 break; 720 if (err == -ENOTSUPP) { 721 err = -EOPNOTSUPP; 722 break; 723 } 724 err = nfs4_handle_exception(server, err, &exception); 725 } while (exception.retry); 726 727 nfs_put_lock_context(lock); 728 return err; 729 } 730 731 732 static void 733 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 734 { 735 struct nfs42_layoutstat_data *data = calldata; 736 struct inode *inode = data->inode; 737 struct nfs_server *server = NFS_SERVER(inode); 738 struct pnfs_layout_hdr *lo; 739 740 spin_lock(&inode->i_lock); 741 lo = NFS_I(inode)->layout; 742 if (!pnfs_layout_is_valid(lo)) { 743 spin_unlock(&inode->i_lock); 744 rpc_exit(task, 0); 745 return; 746 } 747 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid); 748 spin_unlock(&inode->i_lock); 749 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 750 &data->res.seq_res, task); 751 } 752 753 static void 754 nfs42_layoutstat_done(struct rpc_task *task, void *calldata) 755 { 756 struct nfs42_layoutstat_data *data = calldata; 757 struct inode *inode = data->inode; 758 struct pnfs_layout_hdr *lo; 759 760 if (!nfs4_sequence_done(task, &data->res.seq_res)) 761 return; 762 763 switch (task->tk_status) { 764 case 0: 765 return; 766 case -NFS4ERR_BADHANDLE: 767 case -ESTALE: 768 pnfs_destroy_layout(NFS_I(inode)); 769 break; 770 case -NFS4ERR_EXPIRED: 771 case -NFS4ERR_ADMIN_REVOKED: 772 case -NFS4ERR_DELEG_REVOKED: 773 case -NFS4ERR_STALE_STATEID: 774 case -NFS4ERR_BAD_STATEID: 775 spin_lock(&inode->i_lock); 776 lo = NFS_I(inode)->layout; 777 if (pnfs_layout_is_valid(lo) && 778 nfs4_stateid_match(&data->args.stateid, 779 &lo->plh_stateid)) { 780 LIST_HEAD(head); 781 782 /* 783 * Mark the bad layout state as invalid, then retry 784 * with the current stateid. 785 */ 786 pnfs_mark_layout_stateid_invalid(lo, &head); 787 spin_unlock(&inode->i_lock); 788 pnfs_free_lseg_list(&head); 789 nfs_commit_inode(inode, 0); 790 } else 791 spin_unlock(&inode->i_lock); 792 break; 793 case -NFS4ERR_OLD_STATEID: 794 spin_lock(&inode->i_lock); 795 lo = NFS_I(inode)->layout; 796 if (pnfs_layout_is_valid(lo) && 797 nfs4_stateid_match_other(&data->args.stateid, 798 &lo->plh_stateid)) { 799 /* Do we need to delay before resending? */ 800 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 801 &data->args.stateid)) 802 rpc_delay(task, HZ); 803 rpc_restart_call_prepare(task); 804 } 805 spin_unlock(&inode->i_lock); 806 break; 807 case -ENOTSUPP: 808 case -EOPNOTSUPP: 809 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS; 810 } 811 812 trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status); 813 } 814 815 static void 816 nfs42_layoutstat_release(void *calldata) 817 { 818 struct nfs42_layoutstat_data *data = calldata; 819 struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo; 820 int i; 821 822 for (i = 0; i < data->args.num_dev; i++) { 823 if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free) 824 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private); 825 } 826 827 pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout); 828 smp_mb__before_atomic(); 829 clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags); 830 smp_mb__after_atomic(); 831 nfs_iput_and_deactive(data->inode); 832 kfree(data->args.devinfo); 833 kfree(data); 834 } 835 836 static const struct rpc_call_ops nfs42_layoutstat_ops = { 837 .rpc_call_prepare = nfs42_layoutstat_prepare, 838 .rpc_call_done = nfs42_layoutstat_done, 839 .rpc_release = nfs42_layoutstat_release, 840 }; 841 842 int nfs42_proc_layoutstats_generic(struct nfs_server *server, 843 struct nfs42_layoutstat_data *data) 844 { 845 struct rpc_message msg = { 846 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS], 847 .rpc_argp = &data->args, 848 .rpc_resp = &data->res, 849 }; 850 struct rpc_task_setup task_setup = { 851 .rpc_client = server->client, 852 .rpc_message = &msg, 853 .callback_ops = &nfs42_layoutstat_ops, 854 .callback_data = data, 855 .flags = RPC_TASK_ASYNC, 856 }; 857 struct rpc_task *task; 858 859 data->inode = nfs_igrab_and_active(data->args.inode); 860 if (!data->inode) { 861 nfs42_layoutstat_release(data); 862 return -EAGAIN; 863 } 864 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 865 task = rpc_run_task(&task_setup); 866 if (IS_ERR(task)) 867 return PTR_ERR(task); 868 rpc_put_task(task); 869 return 0; 870 } 871 872 static struct nfs42_layouterror_data * 873 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags) 874 { 875 struct nfs42_layouterror_data *data; 876 struct inode *inode = lseg->pls_layout->plh_inode; 877 878 data = kzalloc(sizeof(*data), gfp_flags); 879 if (data) { 880 data->args.inode = data->inode = nfs_igrab_and_active(inode); 881 if (data->inode) { 882 data->lseg = pnfs_get_lseg(lseg); 883 if (data->lseg) 884 return data; 885 nfs_iput_and_deactive(data->inode); 886 } 887 kfree(data); 888 } 889 return NULL; 890 } 891 892 static void 893 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data) 894 { 895 pnfs_put_lseg(data->lseg); 896 nfs_iput_and_deactive(data->inode); 897 kfree(data); 898 } 899 900 static void 901 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata) 902 { 903 struct nfs42_layouterror_data *data = calldata; 904 struct inode *inode = data->inode; 905 struct nfs_server *server = NFS_SERVER(inode); 906 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 907 unsigned i; 908 909 spin_lock(&inode->i_lock); 910 if (!pnfs_layout_is_valid(lo)) { 911 spin_unlock(&inode->i_lock); 912 rpc_exit(task, 0); 913 return; 914 } 915 for (i = 0; i < data->args.num_errors; i++) 916 nfs4_stateid_copy(&data->args.errors[i].stateid, 917 &lo->plh_stateid); 918 spin_unlock(&inode->i_lock); 919 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 920 &data->res.seq_res, task); 921 } 922 923 static void 924 nfs42_layouterror_done(struct rpc_task *task, void *calldata) 925 { 926 struct nfs42_layouterror_data *data = calldata; 927 struct inode *inode = data->inode; 928 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 929 930 if (!nfs4_sequence_done(task, &data->res.seq_res)) 931 return; 932 933 switch (task->tk_status) { 934 case 0: 935 return; 936 case -NFS4ERR_BADHANDLE: 937 case -ESTALE: 938 pnfs_destroy_layout(NFS_I(inode)); 939 break; 940 case -NFS4ERR_EXPIRED: 941 case -NFS4ERR_ADMIN_REVOKED: 942 case -NFS4ERR_DELEG_REVOKED: 943 case -NFS4ERR_STALE_STATEID: 944 case -NFS4ERR_BAD_STATEID: 945 spin_lock(&inode->i_lock); 946 if (pnfs_layout_is_valid(lo) && 947 nfs4_stateid_match(&data->args.errors[0].stateid, 948 &lo->plh_stateid)) { 949 LIST_HEAD(head); 950 951 /* 952 * Mark the bad layout state as invalid, then retry 953 * with the current stateid. 954 */ 955 pnfs_mark_layout_stateid_invalid(lo, &head); 956 spin_unlock(&inode->i_lock); 957 pnfs_free_lseg_list(&head); 958 nfs_commit_inode(inode, 0); 959 } else 960 spin_unlock(&inode->i_lock); 961 break; 962 case -NFS4ERR_OLD_STATEID: 963 spin_lock(&inode->i_lock); 964 if (pnfs_layout_is_valid(lo) && 965 nfs4_stateid_match_other(&data->args.errors[0].stateid, 966 &lo->plh_stateid)) { 967 /* Do we need to delay before resending? */ 968 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 969 &data->args.errors[0].stateid)) 970 rpc_delay(task, HZ); 971 rpc_restart_call_prepare(task); 972 } 973 spin_unlock(&inode->i_lock); 974 break; 975 case -ENOTSUPP: 976 case -EOPNOTSUPP: 977 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR; 978 } 979 980 trace_nfs4_layouterror(inode, &data->args.errors[0].stateid, 981 task->tk_status); 982 } 983 984 static void 985 nfs42_layouterror_release(void *calldata) 986 { 987 struct nfs42_layouterror_data *data = calldata; 988 989 nfs42_free_layouterror_data(data); 990 } 991 992 static const struct rpc_call_ops nfs42_layouterror_ops = { 993 .rpc_call_prepare = nfs42_layouterror_prepare, 994 .rpc_call_done = nfs42_layouterror_done, 995 .rpc_release = nfs42_layouterror_release, 996 }; 997 998 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg, 999 const struct nfs42_layout_error *errors, size_t n) 1000 { 1001 struct inode *inode = lseg->pls_layout->plh_inode; 1002 struct nfs42_layouterror_data *data; 1003 struct rpc_task *task; 1004 struct rpc_message msg = { 1005 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR], 1006 }; 1007 struct rpc_task_setup task_setup = { 1008 .rpc_message = &msg, 1009 .callback_ops = &nfs42_layouterror_ops, 1010 .flags = RPC_TASK_ASYNC, 1011 }; 1012 unsigned int i; 1013 1014 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR)) 1015 return -EOPNOTSUPP; 1016 if (n > NFS42_LAYOUTERROR_MAX) 1017 return -EINVAL; 1018 data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS); 1019 if (!data) 1020 return -ENOMEM; 1021 for (i = 0; i < n; i++) { 1022 data->args.errors[i] = errors[i]; 1023 data->args.num_errors++; 1024 data->res.num_errors++; 1025 } 1026 msg.rpc_argp = &data->args; 1027 msg.rpc_resp = &data->res; 1028 task_setup.callback_data = data; 1029 task_setup.rpc_client = NFS_SERVER(inode)->client; 1030 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 1031 task = rpc_run_task(&task_setup); 1032 if (IS_ERR(task)) 1033 return PTR_ERR(task); 1034 rpc_put_task(task); 1035 return 0; 1036 } 1037 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror); 1038 1039 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f, 1040 struct file *dst_f, struct nfs_lock_context *src_lock, 1041 struct nfs_lock_context *dst_lock, loff_t src_offset, 1042 loff_t dst_offset, loff_t count) 1043 { 1044 struct inode *src_inode = file_inode(src_f); 1045 struct inode *dst_inode = file_inode(dst_f); 1046 struct nfs_server *server = NFS_SERVER(dst_inode); 1047 struct nfs42_clone_args args = { 1048 .src_fh = NFS_FH(src_inode), 1049 .dst_fh = NFS_FH(dst_inode), 1050 .src_offset = src_offset, 1051 .dst_offset = dst_offset, 1052 .count = count, 1053 .dst_bitmask = server->cache_consistency_bitmask, 1054 }; 1055 struct nfs42_clone_res res = { 1056 .server = server, 1057 }; 1058 int status; 1059 1060 msg->rpc_argp = &args; 1061 msg->rpc_resp = &res; 1062 1063 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context, 1064 src_lock, FMODE_READ); 1065 if (status) { 1066 if (status == -EAGAIN) 1067 status = -NFS4ERR_BAD_STATEID; 1068 return status; 1069 } 1070 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context, 1071 dst_lock, FMODE_WRITE); 1072 if (status) { 1073 if (status == -EAGAIN) 1074 status = -NFS4ERR_BAD_STATEID; 1075 return status; 1076 } 1077 1078 res.dst_fattr = nfs_alloc_fattr(); 1079 if (!res.dst_fattr) 1080 return -ENOMEM; 1081 1082 status = nfs4_call_sync(server->client, server, msg, 1083 &args.seq_args, &res.seq_res, 0); 1084 trace_nfs4_clone(src_inode, dst_inode, &args, status); 1085 if (status == 0) { 1086 nfs42_copy_dest_done(dst_inode, dst_offset, count); 1087 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr); 1088 } 1089 1090 kfree(res.dst_fattr); 1091 return status; 1092 } 1093 1094 int nfs42_proc_clone(struct file *src_f, struct file *dst_f, 1095 loff_t src_offset, loff_t dst_offset, loff_t count) 1096 { 1097 struct rpc_message msg = { 1098 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE], 1099 }; 1100 struct inode *inode = file_inode(src_f); 1101 struct nfs_server *server = NFS_SERVER(file_inode(src_f)); 1102 struct nfs_lock_context *src_lock; 1103 struct nfs_lock_context *dst_lock; 1104 struct nfs4_exception src_exception = { }; 1105 struct nfs4_exception dst_exception = { }; 1106 int err, err2; 1107 1108 if (!nfs_server_capable(inode, NFS_CAP_CLONE)) 1109 return -EOPNOTSUPP; 1110 1111 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f)); 1112 if (IS_ERR(src_lock)) 1113 return PTR_ERR(src_lock); 1114 1115 src_exception.inode = file_inode(src_f); 1116 src_exception.state = src_lock->open_context->state; 1117 1118 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f)); 1119 if (IS_ERR(dst_lock)) { 1120 err = PTR_ERR(dst_lock); 1121 goto out_put_src_lock; 1122 } 1123 1124 dst_exception.inode = file_inode(dst_f); 1125 dst_exception.state = dst_lock->open_context->state; 1126 1127 do { 1128 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock, 1129 src_offset, dst_offset, count); 1130 if (err == -ENOTSUPP || err == -EOPNOTSUPP) { 1131 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE; 1132 err = -EOPNOTSUPP; 1133 break; 1134 } 1135 1136 err2 = nfs4_handle_exception(server, err, &src_exception); 1137 err = nfs4_handle_exception(server, err, &dst_exception); 1138 if (!err) 1139 err = err2; 1140 } while (src_exception.retry || dst_exception.retry); 1141 1142 nfs_put_lock_context(dst_lock); 1143 out_put_src_lock: 1144 nfs_put_lock_context(src_lock); 1145 return err; 1146 } 1147 1148 #define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 1149 1150 static int _nfs42_proc_removexattr(struct inode *inode, const char *name) 1151 { 1152 struct nfs_server *server = NFS_SERVER(inode); 1153 struct nfs42_removexattrargs args = { 1154 .fh = NFS_FH(inode), 1155 .xattr_name = name, 1156 }; 1157 struct nfs42_removexattrres res; 1158 struct rpc_message msg = { 1159 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR], 1160 .rpc_argp = &args, 1161 .rpc_resp = &res, 1162 }; 1163 int ret; 1164 unsigned long timestamp = jiffies; 1165 1166 ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 1167 &res.seq_res, 1); 1168 if (!ret) 1169 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1170 1171 return ret; 1172 } 1173 1174 static int _nfs42_proc_setxattr(struct inode *inode, const char *name, 1175 const void *buf, size_t buflen, int flags) 1176 { 1177 struct nfs_server *server = NFS_SERVER(inode); 1178 struct page *pages[NFS4XATTR_MAXPAGES]; 1179 struct nfs42_setxattrargs arg = { 1180 .fh = NFS_FH(inode), 1181 .xattr_pages = pages, 1182 .xattr_len = buflen, 1183 .xattr_name = name, 1184 .xattr_flags = flags, 1185 }; 1186 struct nfs42_setxattrres res; 1187 struct rpc_message msg = { 1188 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR], 1189 .rpc_argp = &arg, 1190 .rpc_resp = &res, 1191 }; 1192 int ret, np; 1193 unsigned long timestamp = jiffies; 1194 1195 if (buflen > server->sxasize) 1196 return -ERANGE; 1197 1198 if (buflen > 0) { 1199 np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages); 1200 if (np < 0) 1201 return np; 1202 } else 1203 np = 0; 1204 1205 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1206 &res.seq_res, 1); 1207 1208 for (; np > 0; np--) 1209 put_page(pages[np - 1]); 1210 1211 if (!ret) 1212 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1213 1214 return ret; 1215 } 1216 1217 static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name, 1218 void *buf, size_t buflen, struct page **pages, 1219 size_t plen) 1220 { 1221 struct nfs_server *server = NFS_SERVER(inode); 1222 struct nfs42_getxattrargs arg = { 1223 .fh = NFS_FH(inode), 1224 .xattr_name = name, 1225 }; 1226 struct nfs42_getxattrres res; 1227 struct rpc_message msg = { 1228 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR], 1229 .rpc_argp = &arg, 1230 .rpc_resp = &res, 1231 }; 1232 ssize_t ret; 1233 1234 arg.xattr_len = plen; 1235 arg.xattr_pages = pages; 1236 1237 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1238 &res.seq_res, 0); 1239 if (ret < 0) 1240 return ret; 1241 1242 /* 1243 * Normally, the caching is done one layer up, but for successful 1244 * RPCS, always cache the result here, even if the caller was 1245 * just querying the length, or if the reply was too big for 1246 * the caller. This avoids a second RPC in the case of the 1247 * common query-alloc-retrieve cycle for xattrs. 1248 * 1249 * Note that xattr_len is always capped to XATTR_SIZE_MAX. 1250 */ 1251 1252 nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len); 1253 1254 if (buflen) { 1255 if (res.xattr_len > buflen) 1256 return -ERANGE; 1257 _copy_from_pages(buf, pages, 0, res.xattr_len); 1258 } 1259 1260 return res.xattr_len; 1261 } 1262 1263 static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf, 1264 size_t buflen, u64 *cookiep, bool *eofp) 1265 { 1266 struct nfs_server *server = NFS_SERVER(inode); 1267 struct page **pages; 1268 struct nfs42_listxattrsargs arg = { 1269 .fh = NFS_FH(inode), 1270 .cookie = *cookiep, 1271 }; 1272 struct nfs42_listxattrsres res = { 1273 .eof = false, 1274 .xattr_buf = buf, 1275 .xattr_len = buflen, 1276 }; 1277 struct rpc_message msg = { 1278 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS], 1279 .rpc_argp = &arg, 1280 .rpc_resp = &res, 1281 }; 1282 u32 xdrlen; 1283 int ret, np, i; 1284 1285 1286 ret = -ENOMEM; 1287 res.scratch = alloc_page(GFP_KERNEL); 1288 if (!res.scratch) 1289 goto out; 1290 1291 xdrlen = nfs42_listxattr_xdrsize(buflen); 1292 if (xdrlen > server->lxasize) 1293 xdrlen = server->lxasize; 1294 np = xdrlen / PAGE_SIZE + 1; 1295 1296 pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL); 1297 if (!pages) 1298 goto out_free_scratch; 1299 for (i = 0; i < np; i++) { 1300 pages[i] = alloc_page(GFP_KERNEL); 1301 if (!pages[i]) 1302 goto out_free_pages; 1303 } 1304 1305 arg.xattr_pages = pages; 1306 arg.count = xdrlen; 1307 1308 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1309 &res.seq_res, 0); 1310 1311 if (ret >= 0) { 1312 ret = res.copied; 1313 *cookiep = res.cookie; 1314 *eofp = res.eof; 1315 } 1316 1317 out_free_pages: 1318 while (--np >= 0) { 1319 if (pages[np]) 1320 __free_page(pages[np]); 1321 } 1322 kfree(pages); 1323 out_free_scratch: 1324 __free_page(res.scratch); 1325 out: 1326 return ret; 1327 1328 } 1329 1330 ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, 1331 void *buf, size_t buflen) 1332 { 1333 struct nfs4_exception exception = { }; 1334 ssize_t err, np, i; 1335 struct page **pages; 1336 1337 np = nfs_page_array_len(0, buflen ?: XATTR_SIZE_MAX); 1338 pages = kmalloc_array(np, sizeof(*pages), GFP_KERNEL); 1339 if (!pages) 1340 return -ENOMEM; 1341 1342 for (i = 0; i < np; i++) { 1343 pages[i] = alloc_page(GFP_KERNEL); 1344 if (!pages[i]) { 1345 np = i + 1; 1346 err = -ENOMEM; 1347 goto out; 1348 } 1349 } 1350 1351 /* 1352 * The GETXATTR op has no length field in the call, and the 1353 * xattr data is at the end of the reply. 1354 * 1355 * There is no downside in using the page-aligned length. It will 1356 * allow receiving and caching xattrs that are too large for the 1357 * caller but still fit in the page-rounded value. 1358 */ 1359 do { 1360 err = _nfs42_proc_getxattr(inode, name, buf, buflen, 1361 pages, np * PAGE_SIZE); 1362 if (err >= 0) 1363 break; 1364 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1365 &exception); 1366 } while (exception.retry); 1367 1368 out: 1369 while (--np >= 0) 1370 __free_page(pages[np]); 1371 kfree(pages); 1372 1373 return err; 1374 } 1375 1376 int nfs42_proc_setxattr(struct inode *inode, const char *name, 1377 const void *buf, size_t buflen, int flags) 1378 { 1379 struct nfs4_exception exception = { }; 1380 int err; 1381 1382 do { 1383 err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags); 1384 if (!err) 1385 break; 1386 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1387 &exception); 1388 } while (exception.retry); 1389 1390 return err; 1391 } 1392 1393 ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf, 1394 size_t buflen, u64 *cookiep, bool *eofp) 1395 { 1396 struct nfs4_exception exception = { }; 1397 ssize_t err; 1398 1399 do { 1400 err = _nfs42_proc_listxattrs(inode, buf, buflen, 1401 cookiep, eofp); 1402 if (err >= 0) 1403 break; 1404 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1405 &exception); 1406 } while (exception.retry); 1407 1408 return err; 1409 } 1410 1411 int nfs42_proc_removexattr(struct inode *inode, const char *name) 1412 { 1413 struct nfs4_exception exception = { }; 1414 int err; 1415 1416 do { 1417 err = _nfs42_proc_removexattr(inode, name); 1418 if (!err) 1419 break; 1420 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1421 &exception); 1422 } while (exception.retry); 1423 1424 return err; 1425 } 1426