1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com> 4 */ 5 #include <linux/fs.h> 6 #include <linux/sunrpc/addr.h> 7 #include <linux/sunrpc/sched.h> 8 #include <linux/nfs.h> 9 #include <linux/nfs3.h> 10 #include <linux/nfs4.h> 11 #include <linux/nfs_xdr.h> 12 #include <linux/nfs_fs.h> 13 #include "nfs4_fs.h" 14 #include "nfs42.h" 15 #include "iostat.h" 16 #include "pnfs.h" 17 #include "nfs4session.h" 18 #include "internal.h" 19 #include "delegation.h" 20 #include "nfs4trace.h" 21 22 #define NFSDBG_FACILITY NFSDBG_PROC 23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std); 24 25 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr) 26 { 27 struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client; 28 unsigned short port = 2049; 29 30 rcu_read_lock(); 31 naddr->netid_len = scnprintf(naddr->netid, 32 sizeof(naddr->netid), "%s", 33 rpc_peeraddr2str(clp->cl_rpcclient, 34 RPC_DISPLAY_NETID)); 35 naddr->addr_len = scnprintf(naddr->addr, 36 sizeof(naddr->addr), 37 "%s.%u.%u", 38 rpc_peeraddr2str(clp->cl_rpcclient, 39 RPC_DISPLAY_ADDR), 40 port >> 8, port & 255); 41 rcu_read_unlock(); 42 } 43 44 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 45 struct nfs_lock_context *lock, loff_t offset, loff_t len) 46 { 47 struct inode *inode = file_inode(filep); 48 struct nfs_server *server = NFS_SERVER(inode); 49 u32 bitmask[3]; 50 struct nfs42_falloc_args args = { 51 .falloc_fh = NFS_FH(inode), 52 .falloc_offset = offset, 53 .falloc_length = len, 54 .falloc_bitmask = bitmask, 55 }; 56 struct nfs42_falloc_res res = { 57 .falloc_server = server, 58 }; 59 int status; 60 61 msg->rpc_argp = &args; 62 msg->rpc_resp = &res; 63 64 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context, 65 lock, FMODE_WRITE); 66 if (status) { 67 if (status == -EAGAIN) 68 status = -NFS4ERR_BAD_STATEID; 69 return status; 70 } 71 72 memcpy(bitmask, server->cache_consistency_bitmask, sizeof(bitmask)); 73 if (server->attr_bitmask[1] & FATTR4_WORD1_SPACE_USED) 74 bitmask[1] |= FATTR4_WORD1_SPACE_USED; 75 76 res.falloc_fattr = nfs_alloc_fattr(); 77 if (!res.falloc_fattr) 78 return -ENOMEM; 79 80 status = nfs4_call_sync(server->client, server, msg, 81 &args.seq_args, &res.seq_res, 0); 82 if (status == 0) 83 status = nfs_post_op_update_inode_force_wcc(inode, 84 res.falloc_fattr); 85 86 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE]) 87 trace_nfs4_fallocate(inode, &args, status); 88 else 89 trace_nfs4_deallocate(inode, &args, status); 90 kfree(res.falloc_fattr); 91 return status; 92 } 93 94 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 95 loff_t offset, loff_t len) 96 { 97 struct inode *inode = file_inode(filep); 98 struct nfs_server *server = NFS_SERVER(inode); 99 struct nfs4_exception exception = { }; 100 struct nfs_lock_context *lock; 101 int err; 102 103 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 104 if (IS_ERR(lock)) 105 return PTR_ERR(lock); 106 107 exception.inode = inode; 108 exception.state = lock->open_context->state; 109 110 err = nfs_sync_inode(inode); 111 if (err) 112 goto out; 113 114 do { 115 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len); 116 if (err == -ENOTSUPP) { 117 err = -EOPNOTSUPP; 118 break; 119 } 120 err = nfs4_handle_exception(server, err, &exception); 121 } while (exception.retry); 122 out: 123 nfs_put_lock_context(lock); 124 return err; 125 } 126 127 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len) 128 { 129 struct rpc_message msg = { 130 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE], 131 }; 132 struct inode *inode = file_inode(filep); 133 int err; 134 135 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE)) 136 return -EOPNOTSUPP; 137 138 inode_lock(inode); 139 140 err = nfs42_proc_fallocate(&msg, filep, offset, len); 141 if (err == -EOPNOTSUPP) 142 NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE; 143 144 inode_unlock(inode); 145 return err; 146 } 147 148 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) 149 { 150 struct rpc_message msg = { 151 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE], 152 }; 153 struct inode *inode = file_inode(filep); 154 int err; 155 156 if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE)) 157 return -EOPNOTSUPP; 158 159 inode_lock(inode); 160 161 err = nfs42_proc_fallocate(&msg, filep, offset, len); 162 if (err == 0) 163 truncate_pagecache_range(inode, offset, (offset + len) -1); 164 if (err == -EOPNOTSUPP) 165 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE; 166 167 inode_unlock(inode); 168 return err; 169 } 170 171 static int handle_async_copy(struct nfs42_copy_res *res, 172 struct nfs_server *dst_server, 173 struct nfs_server *src_server, 174 struct file *src, 175 struct file *dst, 176 nfs4_stateid *src_stateid, 177 bool *restart) 178 { 179 struct nfs4_copy_state *copy, *tmp_copy; 180 int status = NFS4_OK; 181 bool found_pending = false; 182 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst); 183 struct nfs_open_context *src_ctx = nfs_file_open_context(src); 184 185 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS); 186 if (!copy) 187 return -ENOMEM; 188 189 spin_lock(&dst_server->nfs_client->cl_lock); 190 list_for_each_entry(tmp_copy, 191 &dst_server->nfs_client->pending_cb_stateids, 192 copies) { 193 if (memcmp(&res->write_res.stateid, &tmp_copy->stateid, 194 NFS4_STATEID_SIZE)) 195 continue; 196 found_pending = true; 197 list_del(&tmp_copy->copies); 198 break; 199 } 200 if (found_pending) { 201 spin_unlock(&dst_server->nfs_client->cl_lock); 202 kfree(copy); 203 copy = tmp_copy; 204 goto out; 205 } 206 207 memcpy(©->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE); 208 init_completion(©->completion); 209 copy->parent_dst_state = dst_ctx->state; 210 copy->parent_src_state = src_ctx->state; 211 212 list_add_tail(©->copies, &dst_server->ss_copies); 213 spin_unlock(&dst_server->nfs_client->cl_lock); 214 215 if (dst_server != src_server) { 216 spin_lock(&src_server->nfs_client->cl_lock); 217 list_add_tail(©->src_copies, &src_server->ss_copies); 218 spin_unlock(&src_server->nfs_client->cl_lock); 219 } 220 221 status = wait_for_completion_interruptible(©->completion); 222 spin_lock(&dst_server->nfs_client->cl_lock); 223 list_del_init(©->copies); 224 spin_unlock(&dst_server->nfs_client->cl_lock); 225 if (dst_server != src_server) { 226 spin_lock(&src_server->nfs_client->cl_lock); 227 list_del_init(©->src_copies); 228 spin_unlock(&src_server->nfs_client->cl_lock); 229 } 230 if (status == -ERESTARTSYS) { 231 goto out_cancel; 232 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) { 233 status = -EAGAIN; 234 *restart = true; 235 goto out_cancel; 236 } 237 out: 238 res->write_res.count = copy->count; 239 memcpy(&res->write_res.verifier, ©->verf, sizeof(copy->verf)); 240 status = -copy->error; 241 242 out_free: 243 kfree(copy); 244 return status; 245 out_cancel: 246 nfs42_do_offload_cancel_async(dst, ©->stateid); 247 if (!nfs42_files_from_same_server(src, dst)) 248 nfs42_do_offload_cancel_async(src, src_stateid); 249 goto out_free; 250 } 251 252 static int process_copy_commit(struct file *dst, loff_t pos_dst, 253 struct nfs42_copy_res *res) 254 { 255 struct nfs_commitres cres; 256 int status = -ENOMEM; 257 258 cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 259 if (!cres.verf) 260 goto out; 261 262 status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres); 263 if (status) 264 goto out_free; 265 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 266 &cres.verf->verifier)) { 267 dprintk("commit verf differs from copy verf\n"); 268 status = -EAGAIN; 269 } 270 out_free: 271 kfree(cres.verf); 272 out: 273 return status; 274 } 275 276 /** 277 * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload 278 * @inode: pointer to destination inode 279 * @pos: destination offset 280 * @len: copy length 281 * 282 * Punch a hole in the inode page cache, so that the NFS client will 283 * know to retrieve new data. 284 * Update the file size if necessary, and then mark the inode as having 285 * invalid cached values for change attribute, ctime, mtime and space used. 286 */ 287 static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len) 288 { 289 loff_t newsize = pos + len; 290 loff_t end = newsize - 1; 291 292 truncate_pagecache_range(inode, pos, end); 293 spin_lock(&inode->i_lock); 294 if (newsize > i_size_read(inode)) 295 i_size_write(inode, newsize); 296 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 297 NFS_INO_INVALID_CTIME | 298 NFS_INO_INVALID_MTIME | 299 NFS_INO_INVALID_BLOCKS); 300 spin_unlock(&inode->i_lock); 301 } 302 303 static ssize_t _nfs42_proc_copy(struct file *src, 304 struct nfs_lock_context *src_lock, 305 struct file *dst, 306 struct nfs_lock_context *dst_lock, 307 struct nfs42_copy_args *args, 308 struct nfs42_copy_res *res, 309 struct nl4_server *nss, 310 nfs4_stateid *cnr_stateid, 311 bool *restart) 312 { 313 struct rpc_message msg = { 314 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY], 315 .rpc_argp = args, 316 .rpc_resp = res, 317 }; 318 struct inode *dst_inode = file_inode(dst); 319 struct inode *src_inode = file_inode(src); 320 struct nfs_server *dst_server = NFS_SERVER(dst_inode); 321 struct nfs_server *src_server = NFS_SERVER(src_inode); 322 loff_t pos_src = args->src_pos; 323 loff_t pos_dst = args->dst_pos; 324 size_t count = args->count; 325 ssize_t status; 326 327 if (nss) { 328 args->cp_src = nss; 329 nfs4_stateid_copy(&args->src_stateid, cnr_stateid); 330 } else { 331 status = nfs4_set_rw_stateid(&args->src_stateid, 332 src_lock->open_context, src_lock, FMODE_READ); 333 if (status) { 334 if (status == -EAGAIN) 335 status = -NFS4ERR_BAD_STATEID; 336 return status; 337 } 338 } 339 status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping, 340 pos_src, pos_src + (loff_t)count - 1); 341 if (status) 342 return status; 343 344 status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context, 345 dst_lock, FMODE_WRITE); 346 if (status) { 347 if (status == -EAGAIN) 348 status = -NFS4ERR_BAD_STATEID; 349 return status; 350 } 351 352 status = nfs_sync_inode(dst_inode); 353 if (status) 354 return status; 355 356 res->commit_res.verf = NULL; 357 if (args->sync) { 358 res->commit_res.verf = 359 kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 360 if (!res->commit_res.verf) 361 return -ENOMEM; 362 } 363 set_bit(NFS_CLNT_SRC_SSC_COPY_STATE, 364 &src_lock->open_context->state->flags); 365 set_bit(NFS_CLNT_DST_SSC_COPY_STATE, 366 &dst_lock->open_context->state->flags); 367 368 status = nfs4_call_sync(dst_server->client, dst_server, &msg, 369 &args->seq_args, &res->seq_res, 0); 370 trace_nfs4_copy(src_inode, dst_inode, args, res, nss, status); 371 if (status == -ENOTSUPP) 372 dst_server->caps &= ~NFS_CAP_COPY; 373 if (status) 374 goto out; 375 376 if (args->sync && 377 nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 378 &res->commit_res.verf->verifier)) { 379 status = -EAGAIN; 380 goto out; 381 } 382 383 if (!res->synchronous) { 384 status = handle_async_copy(res, dst_server, src_server, src, 385 dst, &args->src_stateid, restart); 386 if (status) 387 goto out; 388 } 389 390 if ((!res->synchronous || !args->sync) && 391 res->write_res.verifier.committed != NFS_FILE_SYNC) { 392 status = process_copy_commit(dst, pos_dst, res); 393 if (status) 394 goto out; 395 } 396 397 nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count); 398 nfs_invalidate_atime(src_inode); 399 status = res->write_res.count; 400 out: 401 if (args->sync) 402 kfree(res->commit_res.verf); 403 return status; 404 } 405 406 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, 407 struct file *dst, loff_t pos_dst, size_t count, 408 struct nl4_server *nss, 409 nfs4_stateid *cnr_stateid, bool sync) 410 { 411 struct nfs_server *server = NFS_SERVER(file_inode(dst)); 412 struct nfs_lock_context *src_lock; 413 struct nfs_lock_context *dst_lock; 414 struct nfs42_copy_args args = { 415 .src_fh = NFS_FH(file_inode(src)), 416 .src_pos = pos_src, 417 .dst_fh = NFS_FH(file_inode(dst)), 418 .dst_pos = pos_dst, 419 .count = count, 420 .sync = sync, 421 }; 422 struct nfs42_copy_res res; 423 struct nfs4_exception src_exception = { 424 .inode = file_inode(src), 425 .stateid = &args.src_stateid, 426 }; 427 struct nfs4_exception dst_exception = { 428 .inode = file_inode(dst), 429 .stateid = &args.dst_stateid, 430 }; 431 ssize_t err, err2; 432 bool restart = false; 433 434 src_lock = nfs_get_lock_context(nfs_file_open_context(src)); 435 if (IS_ERR(src_lock)) 436 return PTR_ERR(src_lock); 437 438 src_exception.state = src_lock->open_context->state; 439 440 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst)); 441 if (IS_ERR(dst_lock)) { 442 err = PTR_ERR(dst_lock); 443 goto out_put_src_lock; 444 } 445 446 dst_exception.state = dst_lock->open_context->state; 447 448 do { 449 inode_lock(file_inode(dst)); 450 err = _nfs42_proc_copy(src, src_lock, 451 dst, dst_lock, 452 &args, &res, 453 nss, cnr_stateid, &restart); 454 inode_unlock(file_inode(dst)); 455 456 if (err >= 0) 457 break; 458 if (err == -ENOTSUPP && 459 nfs42_files_from_same_server(src, dst)) { 460 err = -EOPNOTSUPP; 461 break; 462 } else if (err == -EAGAIN) { 463 if (!restart) { 464 dst_exception.retry = 1; 465 continue; 466 } 467 break; 468 } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) { 469 args.sync = true; 470 dst_exception.retry = 1; 471 continue; 472 } else if ((err == -ESTALE || 473 err == -NFS4ERR_OFFLOAD_DENIED || 474 err == -ENOTSUPP) && 475 !nfs42_files_from_same_server(src, dst)) { 476 nfs42_do_offload_cancel_async(src, &args.src_stateid); 477 err = -EOPNOTSUPP; 478 break; 479 } 480 481 err2 = nfs4_handle_exception(server, err, &src_exception); 482 err = nfs4_handle_exception(server, err, &dst_exception); 483 if (!err) 484 err = err2; 485 } while (src_exception.retry || dst_exception.retry); 486 487 nfs_put_lock_context(dst_lock); 488 out_put_src_lock: 489 nfs_put_lock_context(src_lock); 490 return err; 491 } 492 493 struct nfs42_offloadcancel_data { 494 struct nfs_server *seq_server; 495 struct nfs42_offload_status_args args; 496 struct nfs42_offload_status_res res; 497 }; 498 499 static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata) 500 { 501 struct nfs42_offloadcancel_data *data = calldata; 502 503 nfs4_setup_sequence(data->seq_server->nfs_client, 504 &data->args.osa_seq_args, 505 &data->res.osr_seq_res, task); 506 } 507 508 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) 509 { 510 struct nfs42_offloadcancel_data *data = calldata; 511 512 trace_nfs4_offload_cancel(&data->args, task->tk_status); 513 nfs41_sequence_done(task, &data->res.osr_seq_res); 514 if (task->tk_status && 515 nfs4_async_handle_error(task, data->seq_server, NULL, 516 NULL) == -EAGAIN) 517 rpc_restart_call_prepare(task); 518 } 519 520 static void nfs42_free_offloadcancel_data(void *data) 521 { 522 kfree(data); 523 } 524 525 static const struct rpc_call_ops nfs42_offload_cancel_ops = { 526 .rpc_call_prepare = nfs42_offload_cancel_prepare, 527 .rpc_call_done = nfs42_offload_cancel_done, 528 .rpc_release = nfs42_free_offloadcancel_data, 529 }; 530 531 static int nfs42_do_offload_cancel_async(struct file *dst, 532 nfs4_stateid *stateid) 533 { 534 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 535 struct nfs42_offloadcancel_data *data = NULL; 536 struct nfs_open_context *ctx = nfs_file_open_context(dst); 537 struct rpc_task *task; 538 struct rpc_message msg = { 539 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL], 540 .rpc_cred = ctx->cred, 541 }; 542 struct rpc_task_setup task_setup_data = { 543 .rpc_client = dst_server->client, 544 .rpc_message = &msg, 545 .callback_ops = &nfs42_offload_cancel_ops, 546 .workqueue = nfsiod_workqueue, 547 .flags = RPC_TASK_ASYNC, 548 }; 549 int status; 550 551 if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL)) 552 return -EOPNOTSUPP; 553 554 data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS); 555 if (data == NULL) 556 return -ENOMEM; 557 558 data->seq_server = dst_server; 559 data->args.osa_src_fh = NFS_FH(file_inode(dst)); 560 memcpy(&data->args.osa_stateid, stateid, 561 sizeof(data->args.osa_stateid)); 562 msg.rpc_argp = &data->args; 563 msg.rpc_resp = &data->res; 564 task_setup_data.callback_data = data; 565 nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res, 566 1, 0); 567 task = rpc_run_task(&task_setup_data); 568 if (IS_ERR(task)) 569 return PTR_ERR(task); 570 status = rpc_wait_for_completion_task(task); 571 if (status == -ENOTSUPP) 572 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL; 573 rpc_put_task(task); 574 return status; 575 } 576 577 static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, 578 struct nfs42_copy_notify_args *args, 579 struct nfs42_copy_notify_res *res) 580 { 581 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 582 struct rpc_message msg = { 583 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY], 584 .rpc_argp = args, 585 .rpc_resp = res, 586 }; 587 int status; 588 struct nfs_open_context *ctx; 589 struct nfs_lock_context *l_ctx; 590 591 ctx = get_nfs_open_context(nfs_file_open_context(src)); 592 l_ctx = nfs_get_lock_context(ctx); 593 if (IS_ERR(l_ctx)) 594 return PTR_ERR(l_ctx); 595 596 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx, 597 FMODE_READ); 598 nfs_put_lock_context(l_ctx); 599 if (status) { 600 if (status == -EAGAIN) 601 status = -NFS4ERR_BAD_STATEID; 602 return status; 603 } 604 605 status = nfs4_call_sync(src_server->client, src_server, &msg, 606 &args->cna_seq_args, &res->cnr_seq_res, 0); 607 trace_nfs4_copy_notify(file_inode(src), args, res, status); 608 if (status == -ENOTSUPP) 609 src_server->caps &= ~NFS_CAP_COPY_NOTIFY; 610 611 put_nfs_open_context(nfs_file_open_context(src)); 612 return status; 613 } 614 615 int nfs42_proc_copy_notify(struct file *src, struct file *dst, 616 struct nfs42_copy_notify_res *res) 617 { 618 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 619 struct nfs42_copy_notify_args *args; 620 struct nfs4_exception exception = { 621 .inode = file_inode(src), 622 }; 623 int status; 624 625 if (!(src_server->caps & NFS_CAP_COPY_NOTIFY)) 626 return -EOPNOTSUPP; 627 628 args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_NOFS); 629 if (args == NULL) 630 return -ENOMEM; 631 632 args->cna_src_fh = NFS_FH(file_inode(src)), 633 args->cna_dst.nl4_type = NL4_NETADDR; 634 nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr); 635 exception.stateid = &args->cna_src_stateid; 636 637 do { 638 status = _nfs42_proc_copy_notify(src, dst, args, res); 639 if (status == -ENOTSUPP) { 640 status = -EOPNOTSUPP; 641 goto out; 642 } 643 status = nfs4_handle_exception(src_server, status, &exception); 644 } while (exception.retry); 645 646 out: 647 kfree(args); 648 return status; 649 } 650 651 static loff_t _nfs42_proc_llseek(struct file *filep, 652 struct nfs_lock_context *lock, loff_t offset, int whence) 653 { 654 struct inode *inode = file_inode(filep); 655 struct nfs42_seek_args args = { 656 .sa_fh = NFS_FH(inode), 657 .sa_offset = offset, 658 .sa_what = (whence == SEEK_HOLE) ? 659 NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA, 660 }; 661 struct nfs42_seek_res res; 662 struct rpc_message msg = { 663 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK], 664 .rpc_argp = &args, 665 .rpc_resp = &res, 666 }; 667 struct nfs_server *server = NFS_SERVER(inode); 668 int status; 669 670 if (!nfs_server_capable(inode, NFS_CAP_SEEK)) 671 return -ENOTSUPP; 672 673 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context, 674 lock, FMODE_READ); 675 if (status) { 676 if (status == -EAGAIN) 677 status = -NFS4ERR_BAD_STATEID; 678 return status; 679 } 680 681 status = nfs_filemap_write_and_wait_range(inode->i_mapping, 682 offset, LLONG_MAX); 683 if (status) 684 return status; 685 686 status = nfs4_call_sync(server->client, server, &msg, 687 &args.seq_args, &res.seq_res, 0); 688 trace_nfs4_llseek(inode, &args, &res, status); 689 if (status == -ENOTSUPP) 690 server->caps &= ~NFS_CAP_SEEK; 691 if (status) 692 return status; 693 694 if (whence == SEEK_DATA && res.sr_eof) 695 return -NFS4ERR_NXIO; 696 else 697 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); 698 } 699 700 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 701 { 702 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 703 struct nfs4_exception exception = { }; 704 struct nfs_lock_context *lock; 705 loff_t err; 706 707 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 708 if (IS_ERR(lock)) 709 return PTR_ERR(lock); 710 711 exception.inode = file_inode(filep); 712 exception.state = lock->open_context->state; 713 714 do { 715 err = _nfs42_proc_llseek(filep, lock, offset, whence); 716 if (err >= 0) 717 break; 718 if (err == -ENOTSUPP) { 719 err = -EOPNOTSUPP; 720 break; 721 } 722 err = nfs4_handle_exception(server, err, &exception); 723 } while (exception.retry); 724 725 nfs_put_lock_context(lock); 726 return err; 727 } 728 729 730 static void 731 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 732 { 733 struct nfs42_layoutstat_data *data = calldata; 734 struct inode *inode = data->inode; 735 struct nfs_server *server = NFS_SERVER(inode); 736 struct pnfs_layout_hdr *lo; 737 738 spin_lock(&inode->i_lock); 739 lo = NFS_I(inode)->layout; 740 if (!pnfs_layout_is_valid(lo)) { 741 spin_unlock(&inode->i_lock); 742 rpc_exit(task, 0); 743 return; 744 } 745 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid); 746 spin_unlock(&inode->i_lock); 747 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 748 &data->res.seq_res, task); 749 } 750 751 static void 752 nfs42_layoutstat_done(struct rpc_task *task, void *calldata) 753 { 754 struct nfs42_layoutstat_data *data = calldata; 755 struct inode *inode = data->inode; 756 struct pnfs_layout_hdr *lo; 757 758 if (!nfs4_sequence_done(task, &data->res.seq_res)) 759 return; 760 761 switch (task->tk_status) { 762 case 0: 763 return; 764 case -NFS4ERR_BADHANDLE: 765 case -ESTALE: 766 pnfs_destroy_layout(NFS_I(inode)); 767 break; 768 case -NFS4ERR_EXPIRED: 769 case -NFS4ERR_ADMIN_REVOKED: 770 case -NFS4ERR_DELEG_REVOKED: 771 case -NFS4ERR_STALE_STATEID: 772 case -NFS4ERR_BAD_STATEID: 773 spin_lock(&inode->i_lock); 774 lo = NFS_I(inode)->layout; 775 if (pnfs_layout_is_valid(lo) && 776 nfs4_stateid_match(&data->args.stateid, 777 &lo->plh_stateid)) { 778 LIST_HEAD(head); 779 780 /* 781 * Mark the bad layout state as invalid, then retry 782 * with the current stateid. 783 */ 784 pnfs_mark_layout_stateid_invalid(lo, &head); 785 spin_unlock(&inode->i_lock); 786 pnfs_free_lseg_list(&head); 787 nfs_commit_inode(inode, 0); 788 } else 789 spin_unlock(&inode->i_lock); 790 break; 791 case -NFS4ERR_OLD_STATEID: 792 spin_lock(&inode->i_lock); 793 lo = NFS_I(inode)->layout; 794 if (pnfs_layout_is_valid(lo) && 795 nfs4_stateid_match_other(&data->args.stateid, 796 &lo->plh_stateid)) { 797 /* Do we need to delay before resending? */ 798 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 799 &data->args.stateid)) 800 rpc_delay(task, HZ); 801 rpc_restart_call_prepare(task); 802 } 803 spin_unlock(&inode->i_lock); 804 break; 805 case -ENOTSUPP: 806 case -EOPNOTSUPP: 807 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS; 808 } 809 810 trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status); 811 } 812 813 static void 814 nfs42_layoutstat_release(void *calldata) 815 { 816 struct nfs42_layoutstat_data *data = calldata; 817 struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo; 818 int i; 819 820 for (i = 0; i < data->args.num_dev; i++) { 821 if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free) 822 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private); 823 } 824 825 pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout); 826 smp_mb__before_atomic(); 827 clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags); 828 smp_mb__after_atomic(); 829 nfs_iput_and_deactive(data->inode); 830 kfree(data->args.devinfo); 831 kfree(data); 832 } 833 834 static const struct rpc_call_ops nfs42_layoutstat_ops = { 835 .rpc_call_prepare = nfs42_layoutstat_prepare, 836 .rpc_call_done = nfs42_layoutstat_done, 837 .rpc_release = nfs42_layoutstat_release, 838 }; 839 840 int nfs42_proc_layoutstats_generic(struct nfs_server *server, 841 struct nfs42_layoutstat_data *data) 842 { 843 struct rpc_message msg = { 844 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS], 845 .rpc_argp = &data->args, 846 .rpc_resp = &data->res, 847 }; 848 struct rpc_task_setup task_setup = { 849 .rpc_client = server->client, 850 .rpc_message = &msg, 851 .callback_ops = &nfs42_layoutstat_ops, 852 .callback_data = data, 853 .flags = RPC_TASK_ASYNC, 854 }; 855 struct rpc_task *task; 856 857 data->inode = nfs_igrab_and_active(data->args.inode); 858 if (!data->inode) { 859 nfs42_layoutstat_release(data); 860 return -EAGAIN; 861 } 862 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 863 task = rpc_run_task(&task_setup); 864 if (IS_ERR(task)) 865 return PTR_ERR(task); 866 rpc_put_task(task); 867 return 0; 868 } 869 870 static struct nfs42_layouterror_data * 871 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags) 872 { 873 struct nfs42_layouterror_data *data; 874 struct inode *inode = lseg->pls_layout->plh_inode; 875 876 data = kzalloc(sizeof(*data), gfp_flags); 877 if (data) { 878 data->args.inode = data->inode = nfs_igrab_and_active(inode); 879 if (data->inode) { 880 data->lseg = pnfs_get_lseg(lseg); 881 if (data->lseg) 882 return data; 883 nfs_iput_and_deactive(data->inode); 884 } 885 kfree(data); 886 } 887 return NULL; 888 } 889 890 static void 891 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data) 892 { 893 pnfs_put_lseg(data->lseg); 894 nfs_iput_and_deactive(data->inode); 895 kfree(data); 896 } 897 898 static void 899 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata) 900 { 901 struct nfs42_layouterror_data *data = calldata; 902 struct inode *inode = data->inode; 903 struct nfs_server *server = NFS_SERVER(inode); 904 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 905 unsigned i; 906 907 spin_lock(&inode->i_lock); 908 if (!pnfs_layout_is_valid(lo)) { 909 spin_unlock(&inode->i_lock); 910 rpc_exit(task, 0); 911 return; 912 } 913 for (i = 0; i < data->args.num_errors; i++) 914 nfs4_stateid_copy(&data->args.errors[i].stateid, 915 &lo->plh_stateid); 916 spin_unlock(&inode->i_lock); 917 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 918 &data->res.seq_res, task); 919 } 920 921 static void 922 nfs42_layouterror_done(struct rpc_task *task, void *calldata) 923 { 924 struct nfs42_layouterror_data *data = calldata; 925 struct inode *inode = data->inode; 926 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 927 928 if (!nfs4_sequence_done(task, &data->res.seq_res)) 929 return; 930 931 switch (task->tk_status) { 932 case 0: 933 return; 934 case -NFS4ERR_BADHANDLE: 935 case -ESTALE: 936 pnfs_destroy_layout(NFS_I(inode)); 937 break; 938 case -NFS4ERR_EXPIRED: 939 case -NFS4ERR_ADMIN_REVOKED: 940 case -NFS4ERR_DELEG_REVOKED: 941 case -NFS4ERR_STALE_STATEID: 942 case -NFS4ERR_BAD_STATEID: 943 spin_lock(&inode->i_lock); 944 if (pnfs_layout_is_valid(lo) && 945 nfs4_stateid_match(&data->args.errors[0].stateid, 946 &lo->plh_stateid)) { 947 LIST_HEAD(head); 948 949 /* 950 * Mark the bad layout state as invalid, then retry 951 * with the current stateid. 952 */ 953 pnfs_mark_layout_stateid_invalid(lo, &head); 954 spin_unlock(&inode->i_lock); 955 pnfs_free_lseg_list(&head); 956 nfs_commit_inode(inode, 0); 957 } else 958 spin_unlock(&inode->i_lock); 959 break; 960 case -NFS4ERR_OLD_STATEID: 961 spin_lock(&inode->i_lock); 962 if (pnfs_layout_is_valid(lo) && 963 nfs4_stateid_match_other(&data->args.errors[0].stateid, 964 &lo->plh_stateid)) { 965 /* Do we need to delay before resending? */ 966 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 967 &data->args.errors[0].stateid)) 968 rpc_delay(task, HZ); 969 rpc_restart_call_prepare(task); 970 } 971 spin_unlock(&inode->i_lock); 972 break; 973 case -ENOTSUPP: 974 case -EOPNOTSUPP: 975 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR; 976 } 977 978 trace_nfs4_layouterror(inode, &data->args.errors[0].stateid, 979 task->tk_status); 980 } 981 982 static void 983 nfs42_layouterror_release(void *calldata) 984 { 985 struct nfs42_layouterror_data *data = calldata; 986 987 nfs42_free_layouterror_data(data); 988 } 989 990 static const struct rpc_call_ops nfs42_layouterror_ops = { 991 .rpc_call_prepare = nfs42_layouterror_prepare, 992 .rpc_call_done = nfs42_layouterror_done, 993 .rpc_release = nfs42_layouterror_release, 994 }; 995 996 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg, 997 const struct nfs42_layout_error *errors, size_t n) 998 { 999 struct inode *inode = lseg->pls_layout->plh_inode; 1000 struct nfs42_layouterror_data *data; 1001 struct rpc_task *task; 1002 struct rpc_message msg = { 1003 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR], 1004 }; 1005 struct rpc_task_setup task_setup = { 1006 .rpc_message = &msg, 1007 .callback_ops = &nfs42_layouterror_ops, 1008 .flags = RPC_TASK_ASYNC, 1009 }; 1010 unsigned int i; 1011 1012 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR)) 1013 return -EOPNOTSUPP; 1014 if (n > NFS42_LAYOUTERROR_MAX) 1015 return -EINVAL; 1016 data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS); 1017 if (!data) 1018 return -ENOMEM; 1019 for (i = 0; i < n; i++) { 1020 data->args.errors[i] = errors[i]; 1021 data->args.num_errors++; 1022 data->res.num_errors++; 1023 } 1024 msg.rpc_argp = &data->args; 1025 msg.rpc_resp = &data->res; 1026 task_setup.callback_data = data; 1027 task_setup.rpc_client = NFS_SERVER(inode)->client; 1028 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 1029 task = rpc_run_task(&task_setup); 1030 if (IS_ERR(task)) 1031 return PTR_ERR(task); 1032 rpc_put_task(task); 1033 return 0; 1034 } 1035 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror); 1036 1037 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f, 1038 struct file *dst_f, struct nfs_lock_context *src_lock, 1039 struct nfs_lock_context *dst_lock, loff_t src_offset, 1040 loff_t dst_offset, loff_t count) 1041 { 1042 struct inode *src_inode = file_inode(src_f); 1043 struct inode *dst_inode = file_inode(dst_f); 1044 struct nfs_server *server = NFS_SERVER(dst_inode); 1045 struct nfs42_clone_args args = { 1046 .src_fh = NFS_FH(src_inode), 1047 .dst_fh = NFS_FH(dst_inode), 1048 .src_offset = src_offset, 1049 .dst_offset = dst_offset, 1050 .count = count, 1051 .dst_bitmask = server->cache_consistency_bitmask, 1052 }; 1053 struct nfs42_clone_res res = { 1054 .server = server, 1055 }; 1056 int status; 1057 1058 msg->rpc_argp = &args; 1059 msg->rpc_resp = &res; 1060 1061 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context, 1062 src_lock, FMODE_READ); 1063 if (status) { 1064 if (status == -EAGAIN) 1065 status = -NFS4ERR_BAD_STATEID; 1066 return status; 1067 } 1068 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context, 1069 dst_lock, FMODE_WRITE); 1070 if (status) { 1071 if (status == -EAGAIN) 1072 status = -NFS4ERR_BAD_STATEID; 1073 return status; 1074 } 1075 1076 res.dst_fattr = nfs_alloc_fattr(); 1077 if (!res.dst_fattr) 1078 return -ENOMEM; 1079 1080 status = nfs4_call_sync(server->client, server, msg, 1081 &args.seq_args, &res.seq_res, 0); 1082 trace_nfs4_clone(src_inode, dst_inode, &args, status); 1083 if (status == 0) { 1084 nfs42_copy_dest_done(dst_inode, dst_offset, count); 1085 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr); 1086 } 1087 1088 kfree(res.dst_fattr); 1089 return status; 1090 } 1091 1092 int nfs42_proc_clone(struct file *src_f, struct file *dst_f, 1093 loff_t src_offset, loff_t dst_offset, loff_t count) 1094 { 1095 struct rpc_message msg = { 1096 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE], 1097 }; 1098 struct inode *inode = file_inode(src_f); 1099 struct nfs_server *server = NFS_SERVER(file_inode(src_f)); 1100 struct nfs_lock_context *src_lock; 1101 struct nfs_lock_context *dst_lock; 1102 struct nfs4_exception src_exception = { }; 1103 struct nfs4_exception dst_exception = { }; 1104 int err, err2; 1105 1106 if (!nfs_server_capable(inode, NFS_CAP_CLONE)) 1107 return -EOPNOTSUPP; 1108 1109 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f)); 1110 if (IS_ERR(src_lock)) 1111 return PTR_ERR(src_lock); 1112 1113 src_exception.inode = file_inode(src_f); 1114 src_exception.state = src_lock->open_context->state; 1115 1116 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f)); 1117 if (IS_ERR(dst_lock)) { 1118 err = PTR_ERR(dst_lock); 1119 goto out_put_src_lock; 1120 } 1121 1122 dst_exception.inode = file_inode(dst_f); 1123 dst_exception.state = dst_lock->open_context->state; 1124 1125 do { 1126 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock, 1127 src_offset, dst_offset, count); 1128 if (err == -ENOTSUPP || err == -EOPNOTSUPP) { 1129 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE; 1130 err = -EOPNOTSUPP; 1131 break; 1132 } 1133 1134 err2 = nfs4_handle_exception(server, err, &src_exception); 1135 err = nfs4_handle_exception(server, err, &dst_exception); 1136 if (!err) 1137 err = err2; 1138 } while (src_exception.retry || dst_exception.retry); 1139 1140 nfs_put_lock_context(dst_lock); 1141 out_put_src_lock: 1142 nfs_put_lock_context(src_lock); 1143 return err; 1144 } 1145 1146 #define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 1147 1148 static int _nfs42_proc_removexattr(struct inode *inode, const char *name) 1149 { 1150 struct nfs_server *server = NFS_SERVER(inode); 1151 struct nfs42_removexattrargs args = { 1152 .fh = NFS_FH(inode), 1153 .xattr_name = name, 1154 }; 1155 struct nfs42_removexattrres res; 1156 struct rpc_message msg = { 1157 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR], 1158 .rpc_argp = &args, 1159 .rpc_resp = &res, 1160 }; 1161 int ret; 1162 unsigned long timestamp = jiffies; 1163 1164 ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 1165 &res.seq_res, 1); 1166 if (!ret) 1167 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1168 1169 return ret; 1170 } 1171 1172 static int _nfs42_proc_setxattr(struct inode *inode, const char *name, 1173 const void *buf, size_t buflen, int flags) 1174 { 1175 struct nfs_server *server = NFS_SERVER(inode); 1176 struct page *pages[NFS4XATTR_MAXPAGES]; 1177 struct nfs42_setxattrargs arg = { 1178 .fh = NFS_FH(inode), 1179 .xattr_pages = pages, 1180 .xattr_len = buflen, 1181 .xattr_name = name, 1182 .xattr_flags = flags, 1183 }; 1184 struct nfs42_setxattrres res; 1185 struct rpc_message msg = { 1186 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR], 1187 .rpc_argp = &arg, 1188 .rpc_resp = &res, 1189 }; 1190 int ret, np; 1191 unsigned long timestamp = jiffies; 1192 1193 if (buflen > server->sxasize) 1194 return -ERANGE; 1195 1196 if (buflen > 0) { 1197 np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages); 1198 if (np < 0) 1199 return np; 1200 } else 1201 np = 0; 1202 1203 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1204 &res.seq_res, 1); 1205 1206 for (; np > 0; np--) 1207 put_page(pages[np - 1]); 1208 1209 if (!ret) 1210 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1211 1212 return ret; 1213 } 1214 1215 static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name, 1216 void *buf, size_t buflen, struct page **pages, 1217 size_t plen) 1218 { 1219 struct nfs_server *server = NFS_SERVER(inode); 1220 struct nfs42_getxattrargs arg = { 1221 .fh = NFS_FH(inode), 1222 .xattr_name = name, 1223 }; 1224 struct nfs42_getxattrres res; 1225 struct rpc_message msg = { 1226 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR], 1227 .rpc_argp = &arg, 1228 .rpc_resp = &res, 1229 }; 1230 ssize_t ret; 1231 1232 arg.xattr_len = plen; 1233 arg.xattr_pages = pages; 1234 1235 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1236 &res.seq_res, 0); 1237 if (ret < 0) 1238 return ret; 1239 1240 /* 1241 * Normally, the caching is done one layer up, but for successful 1242 * RPCS, always cache the result here, even if the caller was 1243 * just querying the length, or if the reply was too big for 1244 * the caller. This avoids a second RPC in the case of the 1245 * common query-alloc-retrieve cycle for xattrs. 1246 * 1247 * Note that xattr_len is always capped to XATTR_SIZE_MAX. 1248 */ 1249 1250 nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len); 1251 1252 if (buflen) { 1253 if (res.xattr_len > buflen) 1254 return -ERANGE; 1255 _copy_from_pages(buf, pages, 0, res.xattr_len); 1256 } 1257 1258 return res.xattr_len; 1259 } 1260 1261 static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf, 1262 size_t buflen, u64 *cookiep, bool *eofp) 1263 { 1264 struct nfs_server *server = NFS_SERVER(inode); 1265 struct page **pages; 1266 struct nfs42_listxattrsargs arg = { 1267 .fh = NFS_FH(inode), 1268 .cookie = *cookiep, 1269 }; 1270 struct nfs42_listxattrsres res = { 1271 .eof = false, 1272 .xattr_buf = buf, 1273 .xattr_len = buflen, 1274 }; 1275 struct rpc_message msg = { 1276 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS], 1277 .rpc_argp = &arg, 1278 .rpc_resp = &res, 1279 }; 1280 u32 xdrlen; 1281 int ret, np, i; 1282 1283 1284 ret = -ENOMEM; 1285 res.scratch = alloc_page(GFP_KERNEL); 1286 if (!res.scratch) 1287 goto out; 1288 1289 xdrlen = nfs42_listxattr_xdrsize(buflen); 1290 if (xdrlen > server->lxasize) 1291 xdrlen = server->lxasize; 1292 np = xdrlen / PAGE_SIZE + 1; 1293 1294 pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL); 1295 if (!pages) 1296 goto out_free_scratch; 1297 for (i = 0; i < np; i++) { 1298 pages[i] = alloc_page(GFP_KERNEL); 1299 if (!pages[i]) 1300 goto out_free_pages; 1301 } 1302 1303 arg.xattr_pages = pages; 1304 arg.count = xdrlen; 1305 1306 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1307 &res.seq_res, 0); 1308 1309 if (ret >= 0) { 1310 ret = res.copied; 1311 *cookiep = res.cookie; 1312 *eofp = res.eof; 1313 } 1314 1315 out_free_pages: 1316 while (--np >= 0) { 1317 if (pages[np]) 1318 __free_page(pages[np]); 1319 } 1320 kfree(pages); 1321 out_free_scratch: 1322 __free_page(res.scratch); 1323 out: 1324 return ret; 1325 1326 } 1327 1328 ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, 1329 void *buf, size_t buflen) 1330 { 1331 struct nfs4_exception exception = { }; 1332 ssize_t err, np, i; 1333 struct page **pages; 1334 1335 np = nfs_page_array_len(0, buflen ?: XATTR_SIZE_MAX); 1336 pages = kmalloc_array(np, sizeof(*pages), GFP_KERNEL); 1337 if (!pages) 1338 return -ENOMEM; 1339 1340 for (i = 0; i < np; i++) { 1341 pages[i] = alloc_page(GFP_KERNEL); 1342 if (!pages[i]) { 1343 np = i + 1; 1344 err = -ENOMEM; 1345 goto out; 1346 } 1347 } 1348 1349 /* 1350 * The GETXATTR op has no length field in the call, and the 1351 * xattr data is at the end of the reply. 1352 * 1353 * There is no downside in using the page-aligned length. It will 1354 * allow receiving and caching xattrs that are too large for the 1355 * caller but still fit in the page-rounded value. 1356 */ 1357 do { 1358 err = _nfs42_proc_getxattr(inode, name, buf, buflen, 1359 pages, np * PAGE_SIZE); 1360 if (err >= 0) 1361 break; 1362 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1363 &exception); 1364 } while (exception.retry); 1365 1366 out: 1367 while (--np >= 0) 1368 __free_page(pages[np]); 1369 kfree(pages); 1370 1371 return err; 1372 } 1373 1374 int nfs42_proc_setxattr(struct inode *inode, const char *name, 1375 const void *buf, size_t buflen, int flags) 1376 { 1377 struct nfs4_exception exception = { }; 1378 int err; 1379 1380 do { 1381 err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags); 1382 if (!err) 1383 break; 1384 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1385 &exception); 1386 } while (exception.retry); 1387 1388 return err; 1389 } 1390 1391 ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf, 1392 size_t buflen, u64 *cookiep, bool *eofp) 1393 { 1394 struct nfs4_exception exception = { }; 1395 ssize_t err; 1396 1397 do { 1398 err = _nfs42_proc_listxattrs(inode, buf, buflen, 1399 cookiep, eofp); 1400 if (err >= 0) 1401 break; 1402 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1403 &exception); 1404 } while (exception.retry); 1405 1406 return err; 1407 } 1408 1409 int nfs42_proc_removexattr(struct inode *inode, const char *name) 1410 { 1411 struct nfs4_exception exception = { }; 1412 int err; 1413 1414 do { 1415 err = _nfs42_proc_removexattr(inode, name); 1416 if (!err) 1417 break; 1418 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1419 &exception); 1420 } while (exception.retry); 1421 1422 return err; 1423 } 1424