1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com> 4 */ 5 #include <linux/fs.h> 6 #include <linux/sunrpc/addr.h> 7 #include <linux/sunrpc/sched.h> 8 #include <linux/nfs.h> 9 #include <linux/nfs3.h> 10 #include <linux/nfs4.h> 11 #include <linux/nfs_xdr.h> 12 #include <linux/nfs_fs.h> 13 #include "nfs4_fs.h" 14 #include "nfs42.h" 15 #include "iostat.h" 16 #include "pnfs.h" 17 #include "nfs4session.h" 18 #include "internal.h" 19 #include "delegation.h" 20 #include "nfs4trace.h" 21 22 #define NFSDBG_FACILITY NFSDBG_PROC 23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std); 24 25 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr) 26 { 27 struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client; 28 unsigned short port = 2049; 29 30 rcu_read_lock(); 31 naddr->netid_len = scnprintf(naddr->netid, 32 sizeof(naddr->netid), "%s", 33 rpc_peeraddr2str(clp->cl_rpcclient, 34 RPC_DISPLAY_NETID)); 35 naddr->addr_len = scnprintf(naddr->addr, 36 sizeof(naddr->addr), 37 "%s.%u.%u", 38 rpc_peeraddr2str(clp->cl_rpcclient, 39 RPC_DISPLAY_ADDR), 40 port >> 8, port & 255); 41 rcu_read_unlock(); 42 } 43 44 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 45 struct nfs_lock_context *lock, loff_t offset, loff_t len) 46 { 47 struct inode *inode = file_inode(filep); 48 struct nfs_server *server = NFS_SERVER(inode); 49 u32 bitmask[NFS_BITMASK_SZ]; 50 struct nfs42_falloc_args args = { 51 .falloc_fh = NFS_FH(inode), 52 .falloc_offset = offset, 53 .falloc_length = len, 54 .falloc_bitmask = bitmask, 55 }; 56 struct nfs42_falloc_res res = { 57 .falloc_server = server, 58 }; 59 int status; 60 61 msg->rpc_argp = &args; 62 msg->rpc_resp = &res; 63 64 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context, 65 lock, FMODE_WRITE); 66 if (status) { 67 if (status == -EAGAIN) 68 status = -NFS4ERR_BAD_STATEID; 69 return status; 70 } 71 72 nfs4_bitmask_set(bitmask, server->cache_consistency_bitmask, inode, 73 NFS_INO_INVALID_BLOCKS); 74 75 res.falloc_fattr = nfs_alloc_fattr(); 76 if (!res.falloc_fattr) 77 return -ENOMEM; 78 79 status = nfs4_call_sync(server->client, server, msg, 80 &args.seq_args, &res.seq_res, 0); 81 if (status == 0) 82 status = nfs_post_op_update_inode_force_wcc(inode, 83 res.falloc_fattr); 84 85 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE]) 86 trace_nfs4_fallocate(inode, &args, status); 87 else 88 trace_nfs4_deallocate(inode, &args, status); 89 kfree(res.falloc_fattr); 90 return status; 91 } 92 93 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 94 loff_t offset, loff_t len) 95 { 96 struct inode *inode = file_inode(filep); 97 struct nfs_server *server = NFS_SERVER(inode); 98 struct nfs4_exception exception = { }; 99 struct nfs_lock_context *lock; 100 int err; 101 102 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 103 if (IS_ERR(lock)) 104 return PTR_ERR(lock); 105 106 exception.inode = inode; 107 exception.state = lock->open_context->state; 108 109 err = nfs_sync_inode(inode); 110 if (err) 111 goto out; 112 113 do { 114 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len); 115 if (err == -ENOTSUPP) { 116 err = -EOPNOTSUPP; 117 break; 118 } 119 err = nfs4_handle_exception(server, err, &exception); 120 } while (exception.retry); 121 out: 122 nfs_put_lock_context(lock); 123 return err; 124 } 125 126 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len) 127 { 128 struct rpc_message msg = { 129 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE], 130 }; 131 struct inode *inode = file_inode(filep); 132 int err; 133 134 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE)) 135 return -EOPNOTSUPP; 136 137 inode_lock(inode); 138 139 err = nfs42_proc_fallocate(&msg, filep, offset, len); 140 if (err == -EOPNOTSUPP) 141 NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE; 142 143 inode_unlock(inode); 144 return err; 145 } 146 147 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) 148 { 149 struct rpc_message msg = { 150 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE], 151 }; 152 struct inode *inode = file_inode(filep); 153 int err; 154 155 if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE)) 156 return -EOPNOTSUPP; 157 158 inode_lock(inode); 159 160 err = nfs42_proc_fallocate(&msg, filep, offset, len); 161 if (err == 0) 162 truncate_pagecache_range(inode, offset, (offset + len) -1); 163 if (err == -EOPNOTSUPP) 164 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE; 165 166 inode_unlock(inode); 167 return err; 168 } 169 170 static int handle_async_copy(struct nfs42_copy_res *res, 171 struct nfs_server *dst_server, 172 struct nfs_server *src_server, 173 struct file *src, 174 struct file *dst, 175 nfs4_stateid *src_stateid, 176 bool *restart) 177 { 178 struct nfs4_copy_state *copy, *tmp_copy = NULL, *iter; 179 int status = NFS4_OK; 180 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst); 181 struct nfs_open_context *src_ctx = nfs_file_open_context(src); 182 183 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL); 184 if (!copy) 185 return -ENOMEM; 186 187 spin_lock(&dst_server->nfs_client->cl_lock); 188 list_for_each_entry(iter, 189 &dst_server->nfs_client->pending_cb_stateids, 190 copies) { 191 if (memcmp(&res->write_res.stateid, &iter->stateid, 192 NFS4_STATEID_SIZE)) 193 continue; 194 tmp_copy = iter; 195 list_del(&iter->copies); 196 break; 197 } 198 if (tmp_copy) { 199 spin_unlock(&dst_server->nfs_client->cl_lock); 200 kfree(copy); 201 copy = tmp_copy; 202 goto out; 203 } 204 205 memcpy(©->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE); 206 init_completion(©->completion); 207 copy->parent_dst_state = dst_ctx->state; 208 copy->parent_src_state = src_ctx->state; 209 210 list_add_tail(©->copies, &dst_server->ss_copies); 211 spin_unlock(&dst_server->nfs_client->cl_lock); 212 213 if (dst_server != src_server) { 214 spin_lock(&src_server->nfs_client->cl_lock); 215 list_add_tail(©->src_copies, &src_server->ss_copies); 216 spin_unlock(&src_server->nfs_client->cl_lock); 217 } 218 219 status = wait_for_completion_interruptible(©->completion); 220 spin_lock(&dst_server->nfs_client->cl_lock); 221 list_del_init(©->copies); 222 spin_unlock(&dst_server->nfs_client->cl_lock); 223 if (dst_server != src_server) { 224 spin_lock(&src_server->nfs_client->cl_lock); 225 list_del_init(©->src_copies); 226 spin_unlock(&src_server->nfs_client->cl_lock); 227 } 228 if (status == -ERESTARTSYS) { 229 goto out_cancel; 230 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) { 231 status = -EAGAIN; 232 *restart = true; 233 goto out_cancel; 234 } 235 out: 236 res->write_res.count = copy->count; 237 memcpy(&res->write_res.verifier, ©->verf, sizeof(copy->verf)); 238 status = -copy->error; 239 240 out_free: 241 kfree(copy); 242 return status; 243 out_cancel: 244 nfs42_do_offload_cancel_async(dst, ©->stateid); 245 if (!nfs42_files_from_same_server(src, dst)) 246 nfs42_do_offload_cancel_async(src, src_stateid); 247 goto out_free; 248 } 249 250 static int process_copy_commit(struct file *dst, loff_t pos_dst, 251 struct nfs42_copy_res *res) 252 { 253 struct nfs_commitres cres; 254 int status = -ENOMEM; 255 256 cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL); 257 if (!cres.verf) 258 goto out; 259 260 status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres); 261 if (status) 262 goto out_free; 263 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 264 &cres.verf->verifier)) { 265 dprintk("commit verf differs from copy verf\n"); 266 status = -EAGAIN; 267 } 268 out_free: 269 kfree(cres.verf); 270 out: 271 return status; 272 } 273 274 /** 275 * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload 276 * @inode: pointer to destination inode 277 * @pos: destination offset 278 * @len: copy length 279 * 280 * Punch a hole in the inode page cache, so that the NFS client will 281 * know to retrieve new data. 282 * Update the file size if necessary, and then mark the inode as having 283 * invalid cached values for change attribute, ctime, mtime and space used. 284 */ 285 static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len) 286 { 287 loff_t newsize = pos + len; 288 loff_t end = newsize - 1; 289 290 WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping, 291 pos >> PAGE_SHIFT, end >> PAGE_SHIFT)); 292 293 spin_lock(&inode->i_lock); 294 if (newsize > i_size_read(inode)) 295 i_size_write(inode, newsize); 296 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 297 NFS_INO_INVALID_CTIME | 298 NFS_INO_INVALID_MTIME | 299 NFS_INO_INVALID_BLOCKS); 300 spin_unlock(&inode->i_lock); 301 } 302 303 static ssize_t _nfs42_proc_copy(struct file *src, 304 struct nfs_lock_context *src_lock, 305 struct file *dst, 306 struct nfs_lock_context *dst_lock, 307 struct nfs42_copy_args *args, 308 struct nfs42_copy_res *res, 309 struct nl4_server *nss, 310 nfs4_stateid *cnr_stateid, 311 bool *restart) 312 { 313 struct rpc_message msg = { 314 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY], 315 .rpc_argp = args, 316 .rpc_resp = res, 317 }; 318 struct inode *dst_inode = file_inode(dst); 319 struct inode *src_inode = file_inode(src); 320 struct nfs_server *dst_server = NFS_SERVER(dst_inode); 321 struct nfs_server *src_server = NFS_SERVER(src_inode); 322 loff_t pos_src = args->src_pos; 323 loff_t pos_dst = args->dst_pos; 324 size_t count = args->count; 325 ssize_t status; 326 327 if (nss) { 328 args->cp_src = nss; 329 nfs4_stateid_copy(&args->src_stateid, cnr_stateid); 330 } else { 331 status = nfs4_set_rw_stateid(&args->src_stateid, 332 src_lock->open_context, src_lock, FMODE_READ); 333 if (status) { 334 if (status == -EAGAIN) 335 status = -NFS4ERR_BAD_STATEID; 336 return status; 337 } 338 } 339 status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping, 340 pos_src, pos_src + (loff_t)count - 1); 341 if (status) 342 return status; 343 344 status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context, 345 dst_lock, FMODE_WRITE); 346 if (status) { 347 if (status == -EAGAIN) 348 status = -NFS4ERR_BAD_STATEID; 349 return status; 350 } 351 352 status = nfs_sync_inode(dst_inode); 353 if (status) 354 return status; 355 356 res->commit_res.verf = NULL; 357 if (args->sync) { 358 res->commit_res.verf = 359 kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL); 360 if (!res->commit_res.verf) 361 return -ENOMEM; 362 } 363 set_bit(NFS_CLNT_SRC_SSC_COPY_STATE, 364 &src_lock->open_context->state->flags); 365 set_bit(NFS_CLNT_DST_SSC_COPY_STATE, 366 &dst_lock->open_context->state->flags); 367 368 status = nfs4_call_sync(dst_server->client, dst_server, &msg, 369 &args->seq_args, &res->seq_res, 0); 370 trace_nfs4_copy(src_inode, dst_inode, args, res, nss, status); 371 if (status == -ENOTSUPP) 372 dst_server->caps &= ~NFS_CAP_COPY; 373 if (status) 374 goto out; 375 376 if (args->sync && 377 nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 378 &res->commit_res.verf->verifier)) { 379 status = -EAGAIN; 380 goto out; 381 } 382 383 if (!res->synchronous) { 384 status = handle_async_copy(res, dst_server, src_server, src, 385 dst, &args->src_stateid, restart); 386 if (status) 387 goto out; 388 } 389 390 if ((!res->synchronous || !args->sync) && 391 res->write_res.verifier.committed != NFS_FILE_SYNC) { 392 status = process_copy_commit(dst, pos_dst, res); 393 if (status) 394 goto out; 395 } 396 397 nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count); 398 nfs_invalidate_atime(src_inode); 399 status = res->write_res.count; 400 out: 401 if (args->sync) 402 kfree(res->commit_res.verf); 403 return status; 404 } 405 406 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, 407 struct file *dst, loff_t pos_dst, size_t count, 408 struct nl4_server *nss, 409 nfs4_stateid *cnr_stateid, bool sync) 410 { 411 struct nfs_server *server = NFS_SERVER(file_inode(dst)); 412 struct nfs_lock_context *src_lock; 413 struct nfs_lock_context *dst_lock; 414 struct nfs42_copy_args args = { 415 .src_fh = NFS_FH(file_inode(src)), 416 .src_pos = pos_src, 417 .dst_fh = NFS_FH(file_inode(dst)), 418 .dst_pos = pos_dst, 419 .count = count, 420 .sync = sync, 421 }; 422 struct nfs42_copy_res res; 423 struct nfs4_exception src_exception = { 424 .inode = file_inode(src), 425 .stateid = &args.src_stateid, 426 }; 427 struct nfs4_exception dst_exception = { 428 .inode = file_inode(dst), 429 .stateid = &args.dst_stateid, 430 }; 431 ssize_t err, err2; 432 bool restart = false; 433 434 src_lock = nfs_get_lock_context(nfs_file_open_context(src)); 435 if (IS_ERR(src_lock)) 436 return PTR_ERR(src_lock); 437 438 src_exception.state = src_lock->open_context->state; 439 440 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst)); 441 if (IS_ERR(dst_lock)) { 442 err = PTR_ERR(dst_lock); 443 goto out_put_src_lock; 444 } 445 446 dst_exception.state = dst_lock->open_context->state; 447 448 do { 449 inode_lock(file_inode(dst)); 450 err = _nfs42_proc_copy(src, src_lock, 451 dst, dst_lock, 452 &args, &res, 453 nss, cnr_stateid, &restart); 454 inode_unlock(file_inode(dst)); 455 456 if (err >= 0) 457 break; 458 if (err == -ENOTSUPP && 459 nfs42_files_from_same_server(src, dst)) { 460 err = -EOPNOTSUPP; 461 break; 462 } else if (err == -EAGAIN) { 463 if (!restart) { 464 dst_exception.retry = 1; 465 continue; 466 } 467 break; 468 } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) { 469 args.sync = true; 470 dst_exception.retry = 1; 471 continue; 472 } else if ((err == -ESTALE || 473 err == -NFS4ERR_OFFLOAD_DENIED || 474 err == -ENOTSUPP) && 475 !nfs42_files_from_same_server(src, dst)) { 476 nfs42_do_offload_cancel_async(src, &args.src_stateid); 477 err = -EOPNOTSUPP; 478 break; 479 } 480 481 err2 = nfs4_handle_exception(server, err, &src_exception); 482 err = nfs4_handle_exception(server, err, &dst_exception); 483 if (!err) 484 err = err2; 485 } while (src_exception.retry || dst_exception.retry); 486 487 nfs_put_lock_context(dst_lock); 488 out_put_src_lock: 489 nfs_put_lock_context(src_lock); 490 return err; 491 } 492 493 struct nfs42_offloadcancel_data { 494 struct nfs_server *seq_server; 495 struct nfs42_offload_status_args args; 496 struct nfs42_offload_status_res res; 497 }; 498 499 static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata) 500 { 501 struct nfs42_offloadcancel_data *data = calldata; 502 503 nfs4_setup_sequence(data->seq_server->nfs_client, 504 &data->args.osa_seq_args, 505 &data->res.osr_seq_res, task); 506 } 507 508 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) 509 { 510 struct nfs42_offloadcancel_data *data = calldata; 511 512 trace_nfs4_offload_cancel(&data->args, task->tk_status); 513 nfs41_sequence_done(task, &data->res.osr_seq_res); 514 if (task->tk_status && 515 nfs4_async_handle_error(task, data->seq_server, NULL, 516 NULL) == -EAGAIN) 517 rpc_restart_call_prepare(task); 518 } 519 520 static void nfs42_free_offloadcancel_data(void *data) 521 { 522 kfree(data); 523 } 524 525 static const struct rpc_call_ops nfs42_offload_cancel_ops = { 526 .rpc_call_prepare = nfs42_offload_cancel_prepare, 527 .rpc_call_done = nfs42_offload_cancel_done, 528 .rpc_release = nfs42_free_offloadcancel_data, 529 }; 530 531 static int nfs42_do_offload_cancel_async(struct file *dst, 532 nfs4_stateid *stateid) 533 { 534 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 535 struct nfs42_offloadcancel_data *data = NULL; 536 struct nfs_open_context *ctx = nfs_file_open_context(dst); 537 struct rpc_task *task; 538 struct rpc_message msg = { 539 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL], 540 .rpc_cred = ctx->cred, 541 }; 542 struct rpc_task_setup task_setup_data = { 543 .rpc_client = dst_server->client, 544 .rpc_message = &msg, 545 .callback_ops = &nfs42_offload_cancel_ops, 546 .workqueue = nfsiod_workqueue, 547 .flags = RPC_TASK_ASYNC, 548 }; 549 int status; 550 551 if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL)) 552 return -EOPNOTSUPP; 553 554 data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_KERNEL); 555 if (data == NULL) 556 return -ENOMEM; 557 558 data->seq_server = dst_server; 559 data->args.osa_src_fh = NFS_FH(file_inode(dst)); 560 memcpy(&data->args.osa_stateid, stateid, 561 sizeof(data->args.osa_stateid)); 562 msg.rpc_argp = &data->args; 563 msg.rpc_resp = &data->res; 564 task_setup_data.callback_data = data; 565 nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res, 566 1, 0); 567 task = rpc_run_task(&task_setup_data); 568 if (IS_ERR(task)) 569 return PTR_ERR(task); 570 status = rpc_wait_for_completion_task(task); 571 if (status == -ENOTSUPP) 572 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL; 573 rpc_put_task(task); 574 return status; 575 } 576 577 static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, 578 struct nfs42_copy_notify_args *args, 579 struct nfs42_copy_notify_res *res) 580 { 581 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 582 struct rpc_message msg = { 583 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY], 584 .rpc_argp = args, 585 .rpc_resp = res, 586 }; 587 int status; 588 struct nfs_open_context *ctx; 589 struct nfs_lock_context *l_ctx; 590 591 ctx = get_nfs_open_context(nfs_file_open_context(src)); 592 l_ctx = nfs_get_lock_context(ctx); 593 if (IS_ERR(l_ctx)) { 594 status = PTR_ERR(l_ctx); 595 goto out; 596 } 597 598 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx, 599 FMODE_READ); 600 nfs_put_lock_context(l_ctx); 601 if (status) { 602 if (status == -EAGAIN) 603 status = -NFS4ERR_BAD_STATEID; 604 goto out; 605 } 606 607 status = nfs4_call_sync(src_server->client, src_server, &msg, 608 &args->cna_seq_args, &res->cnr_seq_res, 0); 609 trace_nfs4_copy_notify(file_inode(src), args, res, status); 610 if (status == -ENOTSUPP) 611 src_server->caps &= ~NFS_CAP_COPY_NOTIFY; 612 613 out: 614 put_nfs_open_context(nfs_file_open_context(src)); 615 return status; 616 } 617 618 int nfs42_proc_copy_notify(struct file *src, struct file *dst, 619 struct nfs42_copy_notify_res *res) 620 { 621 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 622 struct nfs42_copy_notify_args *args; 623 struct nfs4_exception exception = { 624 .inode = file_inode(src), 625 }; 626 int status; 627 628 if (!(src_server->caps & NFS_CAP_COPY_NOTIFY)) 629 return -EOPNOTSUPP; 630 631 args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_KERNEL); 632 if (args == NULL) 633 return -ENOMEM; 634 635 args->cna_src_fh = NFS_FH(file_inode(src)), 636 args->cna_dst.nl4_type = NL4_NETADDR; 637 nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr); 638 exception.stateid = &args->cna_src_stateid; 639 640 do { 641 status = _nfs42_proc_copy_notify(src, dst, args, res); 642 if (status == -ENOTSUPP) { 643 status = -EOPNOTSUPP; 644 goto out; 645 } 646 status = nfs4_handle_exception(src_server, status, &exception); 647 } while (exception.retry); 648 649 out: 650 kfree(args); 651 return status; 652 } 653 654 static loff_t _nfs42_proc_llseek(struct file *filep, 655 struct nfs_lock_context *lock, loff_t offset, int whence) 656 { 657 struct inode *inode = file_inode(filep); 658 struct nfs42_seek_args args = { 659 .sa_fh = NFS_FH(inode), 660 .sa_offset = offset, 661 .sa_what = (whence == SEEK_HOLE) ? 662 NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA, 663 }; 664 struct nfs42_seek_res res; 665 struct rpc_message msg = { 666 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK], 667 .rpc_argp = &args, 668 .rpc_resp = &res, 669 }; 670 struct nfs_server *server = NFS_SERVER(inode); 671 int status; 672 673 if (!nfs_server_capable(inode, NFS_CAP_SEEK)) 674 return -ENOTSUPP; 675 676 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context, 677 lock, FMODE_READ); 678 if (status) { 679 if (status == -EAGAIN) 680 status = -NFS4ERR_BAD_STATEID; 681 return status; 682 } 683 684 status = nfs_filemap_write_and_wait_range(inode->i_mapping, 685 offset, LLONG_MAX); 686 if (status) 687 return status; 688 689 status = nfs4_call_sync(server->client, server, &msg, 690 &args.seq_args, &res.seq_res, 0); 691 trace_nfs4_llseek(inode, &args, &res, status); 692 if (status == -ENOTSUPP) 693 server->caps &= ~NFS_CAP_SEEK; 694 if (status) 695 return status; 696 697 if (whence == SEEK_DATA && res.sr_eof) 698 return -NFS4ERR_NXIO; 699 else 700 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); 701 } 702 703 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 704 { 705 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 706 struct nfs4_exception exception = { }; 707 struct nfs_lock_context *lock; 708 loff_t err; 709 710 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 711 if (IS_ERR(lock)) 712 return PTR_ERR(lock); 713 714 exception.inode = file_inode(filep); 715 exception.state = lock->open_context->state; 716 717 do { 718 err = _nfs42_proc_llseek(filep, lock, offset, whence); 719 if (err >= 0) 720 break; 721 if (err == -ENOTSUPP) { 722 err = -EOPNOTSUPP; 723 break; 724 } 725 err = nfs4_handle_exception(server, err, &exception); 726 } while (exception.retry); 727 728 nfs_put_lock_context(lock); 729 return err; 730 } 731 732 733 static void 734 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 735 { 736 struct nfs42_layoutstat_data *data = calldata; 737 struct inode *inode = data->inode; 738 struct nfs_server *server = NFS_SERVER(inode); 739 struct pnfs_layout_hdr *lo; 740 741 spin_lock(&inode->i_lock); 742 lo = NFS_I(inode)->layout; 743 if (!pnfs_layout_is_valid(lo)) { 744 spin_unlock(&inode->i_lock); 745 rpc_exit(task, 0); 746 return; 747 } 748 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid); 749 spin_unlock(&inode->i_lock); 750 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 751 &data->res.seq_res, task); 752 } 753 754 static void 755 nfs42_layoutstat_done(struct rpc_task *task, void *calldata) 756 { 757 struct nfs42_layoutstat_data *data = calldata; 758 struct inode *inode = data->inode; 759 struct pnfs_layout_hdr *lo; 760 761 if (!nfs4_sequence_done(task, &data->res.seq_res)) 762 return; 763 764 switch (task->tk_status) { 765 case 0: 766 return; 767 case -NFS4ERR_BADHANDLE: 768 case -ESTALE: 769 pnfs_destroy_layout(NFS_I(inode)); 770 break; 771 case -NFS4ERR_EXPIRED: 772 case -NFS4ERR_ADMIN_REVOKED: 773 case -NFS4ERR_DELEG_REVOKED: 774 case -NFS4ERR_STALE_STATEID: 775 case -NFS4ERR_BAD_STATEID: 776 spin_lock(&inode->i_lock); 777 lo = NFS_I(inode)->layout; 778 if (pnfs_layout_is_valid(lo) && 779 nfs4_stateid_match(&data->args.stateid, 780 &lo->plh_stateid)) { 781 LIST_HEAD(head); 782 783 /* 784 * Mark the bad layout state as invalid, then retry 785 * with the current stateid. 786 */ 787 pnfs_mark_layout_stateid_invalid(lo, &head); 788 spin_unlock(&inode->i_lock); 789 pnfs_free_lseg_list(&head); 790 nfs_commit_inode(inode, 0); 791 } else 792 spin_unlock(&inode->i_lock); 793 break; 794 case -NFS4ERR_OLD_STATEID: 795 spin_lock(&inode->i_lock); 796 lo = NFS_I(inode)->layout; 797 if (pnfs_layout_is_valid(lo) && 798 nfs4_stateid_match_other(&data->args.stateid, 799 &lo->plh_stateid)) { 800 /* Do we need to delay before resending? */ 801 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 802 &data->args.stateid)) 803 rpc_delay(task, HZ); 804 rpc_restart_call_prepare(task); 805 } 806 spin_unlock(&inode->i_lock); 807 break; 808 case -ENOTSUPP: 809 case -EOPNOTSUPP: 810 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS; 811 } 812 813 trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status); 814 } 815 816 static void 817 nfs42_layoutstat_release(void *calldata) 818 { 819 struct nfs42_layoutstat_data *data = calldata; 820 struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo; 821 int i; 822 823 for (i = 0; i < data->args.num_dev; i++) { 824 if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free) 825 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private); 826 } 827 828 pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout); 829 smp_mb__before_atomic(); 830 clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags); 831 smp_mb__after_atomic(); 832 nfs_iput_and_deactive(data->inode); 833 kfree(data->args.devinfo); 834 kfree(data); 835 } 836 837 static const struct rpc_call_ops nfs42_layoutstat_ops = { 838 .rpc_call_prepare = nfs42_layoutstat_prepare, 839 .rpc_call_done = nfs42_layoutstat_done, 840 .rpc_release = nfs42_layoutstat_release, 841 }; 842 843 int nfs42_proc_layoutstats_generic(struct nfs_server *server, 844 struct nfs42_layoutstat_data *data) 845 { 846 struct rpc_message msg = { 847 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS], 848 .rpc_argp = &data->args, 849 .rpc_resp = &data->res, 850 }; 851 struct rpc_task_setup task_setup = { 852 .rpc_client = server->client, 853 .rpc_message = &msg, 854 .callback_ops = &nfs42_layoutstat_ops, 855 .callback_data = data, 856 .flags = RPC_TASK_ASYNC, 857 }; 858 struct rpc_task *task; 859 860 data->inode = nfs_igrab_and_active(data->args.inode); 861 if (!data->inode) { 862 nfs42_layoutstat_release(data); 863 return -EAGAIN; 864 } 865 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 866 task = rpc_run_task(&task_setup); 867 if (IS_ERR(task)) 868 return PTR_ERR(task); 869 rpc_put_task(task); 870 return 0; 871 } 872 873 static struct nfs42_layouterror_data * 874 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags) 875 { 876 struct nfs42_layouterror_data *data; 877 struct inode *inode = lseg->pls_layout->plh_inode; 878 879 data = kzalloc(sizeof(*data), gfp_flags); 880 if (data) { 881 data->args.inode = data->inode = nfs_igrab_and_active(inode); 882 if (data->inode) { 883 data->lseg = pnfs_get_lseg(lseg); 884 if (data->lseg) 885 return data; 886 nfs_iput_and_deactive(data->inode); 887 } 888 kfree(data); 889 } 890 return NULL; 891 } 892 893 static void 894 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data) 895 { 896 pnfs_put_lseg(data->lseg); 897 nfs_iput_and_deactive(data->inode); 898 kfree(data); 899 } 900 901 static void 902 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata) 903 { 904 struct nfs42_layouterror_data *data = calldata; 905 struct inode *inode = data->inode; 906 struct nfs_server *server = NFS_SERVER(inode); 907 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 908 unsigned i; 909 910 spin_lock(&inode->i_lock); 911 if (!pnfs_layout_is_valid(lo)) { 912 spin_unlock(&inode->i_lock); 913 rpc_exit(task, 0); 914 return; 915 } 916 for (i = 0; i < data->args.num_errors; i++) 917 nfs4_stateid_copy(&data->args.errors[i].stateid, 918 &lo->plh_stateid); 919 spin_unlock(&inode->i_lock); 920 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 921 &data->res.seq_res, task); 922 } 923 924 static void 925 nfs42_layouterror_done(struct rpc_task *task, void *calldata) 926 { 927 struct nfs42_layouterror_data *data = calldata; 928 struct inode *inode = data->inode; 929 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 930 931 if (!nfs4_sequence_done(task, &data->res.seq_res)) 932 return; 933 934 switch (task->tk_status) { 935 case 0: 936 return; 937 case -NFS4ERR_BADHANDLE: 938 case -ESTALE: 939 pnfs_destroy_layout(NFS_I(inode)); 940 break; 941 case -NFS4ERR_EXPIRED: 942 case -NFS4ERR_ADMIN_REVOKED: 943 case -NFS4ERR_DELEG_REVOKED: 944 case -NFS4ERR_STALE_STATEID: 945 case -NFS4ERR_BAD_STATEID: 946 spin_lock(&inode->i_lock); 947 if (pnfs_layout_is_valid(lo) && 948 nfs4_stateid_match(&data->args.errors[0].stateid, 949 &lo->plh_stateid)) { 950 LIST_HEAD(head); 951 952 /* 953 * Mark the bad layout state as invalid, then retry 954 * with the current stateid. 955 */ 956 pnfs_mark_layout_stateid_invalid(lo, &head); 957 spin_unlock(&inode->i_lock); 958 pnfs_free_lseg_list(&head); 959 nfs_commit_inode(inode, 0); 960 } else 961 spin_unlock(&inode->i_lock); 962 break; 963 case -NFS4ERR_OLD_STATEID: 964 spin_lock(&inode->i_lock); 965 if (pnfs_layout_is_valid(lo) && 966 nfs4_stateid_match_other(&data->args.errors[0].stateid, 967 &lo->plh_stateid)) { 968 /* Do we need to delay before resending? */ 969 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 970 &data->args.errors[0].stateid)) 971 rpc_delay(task, HZ); 972 rpc_restart_call_prepare(task); 973 } 974 spin_unlock(&inode->i_lock); 975 break; 976 case -ENOTSUPP: 977 case -EOPNOTSUPP: 978 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR; 979 } 980 981 trace_nfs4_layouterror(inode, &data->args.errors[0].stateid, 982 task->tk_status); 983 } 984 985 static void 986 nfs42_layouterror_release(void *calldata) 987 { 988 struct nfs42_layouterror_data *data = calldata; 989 990 nfs42_free_layouterror_data(data); 991 } 992 993 static const struct rpc_call_ops nfs42_layouterror_ops = { 994 .rpc_call_prepare = nfs42_layouterror_prepare, 995 .rpc_call_done = nfs42_layouterror_done, 996 .rpc_release = nfs42_layouterror_release, 997 }; 998 999 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg, 1000 const struct nfs42_layout_error *errors, size_t n) 1001 { 1002 struct inode *inode = lseg->pls_layout->plh_inode; 1003 struct nfs42_layouterror_data *data; 1004 struct rpc_task *task; 1005 struct rpc_message msg = { 1006 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR], 1007 }; 1008 struct rpc_task_setup task_setup = { 1009 .rpc_message = &msg, 1010 .callback_ops = &nfs42_layouterror_ops, 1011 .flags = RPC_TASK_ASYNC, 1012 }; 1013 unsigned int i; 1014 1015 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR)) 1016 return -EOPNOTSUPP; 1017 if (n > NFS42_LAYOUTERROR_MAX) 1018 return -EINVAL; 1019 data = nfs42_alloc_layouterror_data(lseg, nfs_io_gfp_mask()); 1020 if (!data) 1021 return -ENOMEM; 1022 for (i = 0; i < n; i++) { 1023 data->args.errors[i] = errors[i]; 1024 data->args.num_errors++; 1025 data->res.num_errors++; 1026 } 1027 msg.rpc_argp = &data->args; 1028 msg.rpc_resp = &data->res; 1029 task_setup.callback_data = data; 1030 task_setup.rpc_client = NFS_SERVER(inode)->client; 1031 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 1032 task = rpc_run_task(&task_setup); 1033 if (IS_ERR(task)) 1034 return PTR_ERR(task); 1035 rpc_put_task(task); 1036 return 0; 1037 } 1038 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror); 1039 1040 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f, 1041 struct file *dst_f, struct nfs_lock_context *src_lock, 1042 struct nfs_lock_context *dst_lock, loff_t src_offset, 1043 loff_t dst_offset, loff_t count) 1044 { 1045 struct inode *src_inode = file_inode(src_f); 1046 struct inode *dst_inode = file_inode(dst_f); 1047 struct nfs_server *server = NFS_SERVER(dst_inode); 1048 __u32 dst_bitmask[NFS_BITMASK_SZ]; 1049 struct nfs42_clone_args args = { 1050 .src_fh = NFS_FH(src_inode), 1051 .dst_fh = NFS_FH(dst_inode), 1052 .src_offset = src_offset, 1053 .dst_offset = dst_offset, 1054 .count = count, 1055 .dst_bitmask = dst_bitmask, 1056 }; 1057 struct nfs42_clone_res res = { 1058 .server = server, 1059 }; 1060 int status; 1061 1062 msg->rpc_argp = &args; 1063 msg->rpc_resp = &res; 1064 1065 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context, 1066 src_lock, FMODE_READ); 1067 if (status) { 1068 if (status == -EAGAIN) 1069 status = -NFS4ERR_BAD_STATEID; 1070 return status; 1071 } 1072 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context, 1073 dst_lock, FMODE_WRITE); 1074 if (status) { 1075 if (status == -EAGAIN) 1076 status = -NFS4ERR_BAD_STATEID; 1077 return status; 1078 } 1079 1080 res.dst_fattr = nfs_alloc_fattr(); 1081 if (!res.dst_fattr) 1082 return -ENOMEM; 1083 1084 nfs4_bitmask_set(dst_bitmask, server->cache_consistency_bitmask, 1085 dst_inode, NFS_INO_INVALID_BLOCKS); 1086 1087 status = nfs4_call_sync(server->client, server, msg, 1088 &args.seq_args, &res.seq_res, 0); 1089 trace_nfs4_clone(src_inode, dst_inode, &args, status); 1090 if (status == 0) { 1091 nfs42_copy_dest_done(dst_inode, dst_offset, count); 1092 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr); 1093 } 1094 1095 kfree(res.dst_fattr); 1096 return status; 1097 } 1098 1099 int nfs42_proc_clone(struct file *src_f, struct file *dst_f, 1100 loff_t src_offset, loff_t dst_offset, loff_t count) 1101 { 1102 struct rpc_message msg = { 1103 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE], 1104 }; 1105 struct inode *inode = file_inode(src_f); 1106 struct nfs_server *server = NFS_SERVER(file_inode(src_f)); 1107 struct nfs_lock_context *src_lock; 1108 struct nfs_lock_context *dst_lock; 1109 struct nfs4_exception src_exception = { }; 1110 struct nfs4_exception dst_exception = { }; 1111 int err, err2; 1112 1113 if (!nfs_server_capable(inode, NFS_CAP_CLONE)) 1114 return -EOPNOTSUPP; 1115 1116 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f)); 1117 if (IS_ERR(src_lock)) 1118 return PTR_ERR(src_lock); 1119 1120 src_exception.inode = file_inode(src_f); 1121 src_exception.state = src_lock->open_context->state; 1122 1123 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f)); 1124 if (IS_ERR(dst_lock)) { 1125 err = PTR_ERR(dst_lock); 1126 goto out_put_src_lock; 1127 } 1128 1129 dst_exception.inode = file_inode(dst_f); 1130 dst_exception.state = dst_lock->open_context->state; 1131 1132 do { 1133 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock, 1134 src_offset, dst_offset, count); 1135 if (err == -ENOTSUPP || err == -EOPNOTSUPP) { 1136 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE; 1137 err = -EOPNOTSUPP; 1138 break; 1139 } 1140 1141 err2 = nfs4_handle_exception(server, err, &src_exception); 1142 err = nfs4_handle_exception(server, err, &dst_exception); 1143 if (!err) 1144 err = err2; 1145 } while (src_exception.retry || dst_exception.retry); 1146 1147 nfs_put_lock_context(dst_lock); 1148 out_put_src_lock: 1149 nfs_put_lock_context(src_lock); 1150 return err; 1151 } 1152 1153 #define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 1154 1155 static int _nfs42_proc_removexattr(struct inode *inode, const char *name) 1156 { 1157 struct nfs_server *server = NFS_SERVER(inode); 1158 struct nfs42_removexattrargs args = { 1159 .fh = NFS_FH(inode), 1160 .xattr_name = name, 1161 }; 1162 struct nfs42_removexattrres res; 1163 struct rpc_message msg = { 1164 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR], 1165 .rpc_argp = &args, 1166 .rpc_resp = &res, 1167 }; 1168 int ret; 1169 unsigned long timestamp = jiffies; 1170 1171 ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 1172 &res.seq_res, 1); 1173 if (!ret) 1174 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1175 1176 return ret; 1177 } 1178 1179 static int _nfs42_proc_setxattr(struct inode *inode, const char *name, 1180 const void *buf, size_t buflen, int flags) 1181 { 1182 struct nfs_server *server = NFS_SERVER(inode); 1183 struct page *pages[NFS4XATTR_MAXPAGES]; 1184 struct nfs42_setxattrargs arg = { 1185 .fh = NFS_FH(inode), 1186 .xattr_pages = pages, 1187 .xattr_len = buflen, 1188 .xattr_name = name, 1189 .xattr_flags = flags, 1190 }; 1191 struct nfs42_setxattrres res; 1192 struct rpc_message msg = { 1193 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR], 1194 .rpc_argp = &arg, 1195 .rpc_resp = &res, 1196 }; 1197 int ret, np; 1198 unsigned long timestamp = jiffies; 1199 1200 if (buflen > server->sxasize) 1201 return -ERANGE; 1202 1203 if (buflen > 0) { 1204 np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages); 1205 if (np < 0) 1206 return np; 1207 } else 1208 np = 0; 1209 1210 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1211 &res.seq_res, 1); 1212 1213 for (; np > 0; np--) 1214 put_page(pages[np - 1]); 1215 1216 if (!ret) 1217 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1218 1219 return ret; 1220 } 1221 1222 static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name, 1223 void *buf, size_t buflen, struct page **pages, 1224 size_t plen) 1225 { 1226 struct nfs_server *server = NFS_SERVER(inode); 1227 struct nfs42_getxattrargs arg = { 1228 .fh = NFS_FH(inode), 1229 .xattr_name = name, 1230 }; 1231 struct nfs42_getxattrres res; 1232 struct rpc_message msg = { 1233 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR], 1234 .rpc_argp = &arg, 1235 .rpc_resp = &res, 1236 }; 1237 ssize_t ret; 1238 1239 arg.xattr_len = plen; 1240 arg.xattr_pages = pages; 1241 1242 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1243 &res.seq_res, 0); 1244 if (ret < 0) 1245 return ret; 1246 1247 /* 1248 * Normally, the caching is done one layer up, but for successful 1249 * RPCS, always cache the result here, even if the caller was 1250 * just querying the length, or if the reply was too big for 1251 * the caller. This avoids a second RPC in the case of the 1252 * common query-alloc-retrieve cycle for xattrs. 1253 * 1254 * Note that xattr_len is always capped to XATTR_SIZE_MAX. 1255 */ 1256 1257 nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len); 1258 1259 if (buflen) { 1260 if (res.xattr_len > buflen) 1261 return -ERANGE; 1262 _copy_from_pages(buf, pages, 0, res.xattr_len); 1263 } 1264 1265 return res.xattr_len; 1266 } 1267 1268 static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf, 1269 size_t buflen, u64 *cookiep, bool *eofp) 1270 { 1271 struct nfs_server *server = NFS_SERVER(inode); 1272 struct page **pages; 1273 struct nfs42_listxattrsargs arg = { 1274 .fh = NFS_FH(inode), 1275 .cookie = *cookiep, 1276 }; 1277 struct nfs42_listxattrsres res = { 1278 .eof = false, 1279 .xattr_buf = buf, 1280 .xattr_len = buflen, 1281 }; 1282 struct rpc_message msg = { 1283 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS], 1284 .rpc_argp = &arg, 1285 .rpc_resp = &res, 1286 }; 1287 u32 xdrlen; 1288 int ret, np, i; 1289 1290 1291 ret = -ENOMEM; 1292 res.scratch = alloc_page(GFP_KERNEL); 1293 if (!res.scratch) 1294 goto out; 1295 1296 xdrlen = nfs42_listxattr_xdrsize(buflen); 1297 if (xdrlen > server->lxasize) 1298 xdrlen = server->lxasize; 1299 np = xdrlen / PAGE_SIZE + 1; 1300 1301 pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL); 1302 if (!pages) 1303 goto out_free_scratch; 1304 for (i = 0; i < np; i++) { 1305 pages[i] = alloc_page(GFP_KERNEL); 1306 if (!pages[i]) 1307 goto out_free_pages; 1308 } 1309 1310 arg.xattr_pages = pages; 1311 arg.count = xdrlen; 1312 1313 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1314 &res.seq_res, 0); 1315 1316 if (ret >= 0) { 1317 ret = res.copied; 1318 *cookiep = res.cookie; 1319 *eofp = res.eof; 1320 } 1321 1322 out_free_pages: 1323 while (--np >= 0) { 1324 if (pages[np]) 1325 __free_page(pages[np]); 1326 } 1327 kfree(pages); 1328 out_free_scratch: 1329 __free_page(res.scratch); 1330 out: 1331 return ret; 1332 1333 } 1334 1335 ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, 1336 void *buf, size_t buflen) 1337 { 1338 struct nfs4_exception exception = { }; 1339 ssize_t err, np, i; 1340 struct page **pages; 1341 1342 np = nfs_page_array_len(0, buflen ?: XATTR_SIZE_MAX); 1343 pages = kmalloc_array(np, sizeof(*pages), GFP_KERNEL); 1344 if (!pages) 1345 return -ENOMEM; 1346 1347 for (i = 0; i < np; i++) { 1348 pages[i] = alloc_page(GFP_KERNEL); 1349 if (!pages[i]) { 1350 np = i + 1; 1351 err = -ENOMEM; 1352 goto out; 1353 } 1354 } 1355 1356 /* 1357 * The GETXATTR op has no length field in the call, and the 1358 * xattr data is at the end of the reply. 1359 * 1360 * There is no downside in using the page-aligned length. It will 1361 * allow receiving and caching xattrs that are too large for the 1362 * caller but still fit in the page-rounded value. 1363 */ 1364 do { 1365 err = _nfs42_proc_getxattr(inode, name, buf, buflen, 1366 pages, np * PAGE_SIZE); 1367 if (err >= 0) 1368 break; 1369 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1370 &exception); 1371 } while (exception.retry); 1372 1373 out: 1374 while (--np >= 0) 1375 __free_page(pages[np]); 1376 kfree(pages); 1377 1378 return err; 1379 } 1380 1381 int nfs42_proc_setxattr(struct inode *inode, const char *name, 1382 const void *buf, size_t buflen, int flags) 1383 { 1384 struct nfs4_exception exception = { }; 1385 int err; 1386 1387 do { 1388 err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags); 1389 if (!err) 1390 break; 1391 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1392 &exception); 1393 } while (exception.retry); 1394 1395 return err; 1396 } 1397 1398 ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf, 1399 size_t buflen, u64 *cookiep, bool *eofp) 1400 { 1401 struct nfs4_exception exception = { }; 1402 ssize_t err; 1403 1404 do { 1405 err = _nfs42_proc_listxattrs(inode, buf, buflen, 1406 cookiep, eofp); 1407 if (err >= 0) 1408 break; 1409 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1410 &exception); 1411 } while (exception.retry); 1412 1413 return err; 1414 } 1415 1416 int nfs42_proc_removexattr(struct inode *inode, const char *name) 1417 { 1418 struct nfs4_exception exception = { }; 1419 int err; 1420 1421 do { 1422 err = _nfs42_proc_removexattr(inode, name); 1423 if (!err) 1424 break; 1425 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1426 &exception); 1427 } while (exception.retry); 1428 1429 return err; 1430 } 1431