1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com> 4 */ 5 #include <linux/fs.h> 6 #include <linux/sunrpc/addr.h> 7 #include <linux/sunrpc/sched.h> 8 #include <linux/nfs.h> 9 #include <linux/nfs3.h> 10 #include <linux/nfs4.h> 11 #include <linux/nfs_xdr.h> 12 #include <linux/nfs_fs.h> 13 #include "nfs4_fs.h" 14 #include "nfs42.h" 15 #include "iostat.h" 16 #include "pnfs.h" 17 #include "nfs4session.h" 18 #include "internal.h" 19 #include "delegation.h" 20 #include "nfs4trace.h" 21 22 #define NFSDBG_FACILITY NFSDBG_PROC 23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std); 24 25 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr) 26 { 27 struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client; 28 unsigned short port = 2049; 29 30 rcu_read_lock(); 31 naddr->netid_len = scnprintf(naddr->netid, 32 sizeof(naddr->netid), "%s", 33 rpc_peeraddr2str(clp->cl_rpcclient, 34 RPC_DISPLAY_NETID)); 35 naddr->addr_len = scnprintf(naddr->addr, 36 sizeof(naddr->addr), 37 "%s.%u.%u", 38 rpc_peeraddr2str(clp->cl_rpcclient, 39 RPC_DISPLAY_ADDR), 40 port >> 8, port & 255); 41 rcu_read_unlock(); 42 } 43 44 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 45 struct nfs_lock_context *lock, loff_t offset, loff_t len) 46 { 47 struct inode *inode = file_inode(filep); 48 struct nfs_server *server = NFS_SERVER(inode); 49 u32 bitmask[NFS_BITMASK_SZ]; 50 struct nfs42_falloc_args args = { 51 .falloc_fh = NFS_FH(inode), 52 .falloc_offset = offset, 53 .falloc_length = len, 54 .falloc_bitmask = bitmask, 55 }; 56 struct nfs42_falloc_res res = { 57 .falloc_server = server, 58 }; 59 int status; 60 61 msg->rpc_argp = &args; 62 msg->rpc_resp = &res; 63 64 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context, 65 lock, FMODE_WRITE); 66 if (status) { 67 if (status == -EAGAIN) 68 status = -NFS4ERR_BAD_STATEID; 69 return status; 70 } 71 72 nfs4_bitmask_set(bitmask, server->cache_consistency_bitmask, inode, 73 NFS_INO_INVALID_BLOCKS); 74 75 res.falloc_fattr = nfs_alloc_fattr(); 76 if (!res.falloc_fattr) 77 return -ENOMEM; 78 79 status = nfs4_call_sync(server->client, server, msg, 80 &args.seq_args, &res.seq_res, 0); 81 if (status == 0) 82 status = nfs_post_op_update_inode_force_wcc(inode, 83 res.falloc_fattr); 84 85 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE]) 86 trace_nfs4_fallocate(inode, &args, status); 87 else 88 trace_nfs4_deallocate(inode, &args, status); 89 kfree(res.falloc_fattr); 90 return status; 91 } 92 93 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 94 loff_t offset, loff_t len) 95 { 96 struct inode *inode = file_inode(filep); 97 struct nfs_server *server = NFS_SERVER(inode); 98 struct nfs4_exception exception = { }; 99 struct nfs_lock_context *lock; 100 int err; 101 102 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 103 if (IS_ERR(lock)) 104 return PTR_ERR(lock); 105 106 exception.inode = inode; 107 exception.state = lock->open_context->state; 108 109 err = nfs_sync_inode(inode); 110 if (err) 111 goto out; 112 113 do { 114 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len); 115 if (err == -ENOTSUPP) { 116 err = -EOPNOTSUPP; 117 break; 118 } 119 err = nfs4_handle_exception(server, err, &exception); 120 } while (exception.retry); 121 out: 122 nfs_put_lock_context(lock); 123 return err; 124 } 125 126 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len) 127 { 128 struct rpc_message msg = { 129 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE], 130 }; 131 struct inode *inode = file_inode(filep); 132 int err; 133 134 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE)) 135 return -EOPNOTSUPP; 136 137 inode_lock(inode); 138 139 err = nfs42_proc_fallocate(&msg, filep, offset, len); 140 if (err == -EOPNOTSUPP) 141 NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE; 142 143 inode_unlock(inode); 144 return err; 145 } 146 147 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) 148 { 149 struct rpc_message msg = { 150 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE], 151 }; 152 struct inode *inode = file_inode(filep); 153 int err; 154 155 if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE)) 156 return -EOPNOTSUPP; 157 158 inode_lock(inode); 159 160 err = nfs42_proc_fallocate(&msg, filep, offset, len); 161 if (err == 0) 162 truncate_pagecache_range(inode, offset, (offset + len) -1); 163 if (err == -EOPNOTSUPP) 164 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE; 165 166 inode_unlock(inode); 167 return err; 168 } 169 170 static int handle_async_copy(struct nfs42_copy_res *res, 171 struct nfs_server *dst_server, 172 struct nfs_server *src_server, 173 struct file *src, 174 struct file *dst, 175 nfs4_stateid *src_stateid, 176 bool *restart) 177 { 178 struct nfs4_copy_state *copy, *tmp_copy; 179 int status = NFS4_OK; 180 bool found_pending = false; 181 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst); 182 struct nfs_open_context *src_ctx = nfs_file_open_context(src); 183 184 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS); 185 if (!copy) 186 return -ENOMEM; 187 188 spin_lock(&dst_server->nfs_client->cl_lock); 189 list_for_each_entry(tmp_copy, 190 &dst_server->nfs_client->pending_cb_stateids, 191 copies) { 192 if (memcmp(&res->write_res.stateid, &tmp_copy->stateid, 193 NFS4_STATEID_SIZE)) 194 continue; 195 found_pending = true; 196 list_del(&tmp_copy->copies); 197 break; 198 } 199 if (found_pending) { 200 spin_unlock(&dst_server->nfs_client->cl_lock); 201 kfree(copy); 202 copy = tmp_copy; 203 goto out; 204 } 205 206 memcpy(©->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE); 207 init_completion(©->completion); 208 copy->parent_dst_state = dst_ctx->state; 209 copy->parent_src_state = src_ctx->state; 210 211 list_add_tail(©->copies, &dst_server->ss_copies); 212 spin_unlock(&dst_server->nfs_client->cl_lock); 213 214 if (dst_server != src_server) { 215 spin_lock(&src_server->nfs_client->cl_lock); 216 list_add_tail(©->src_copies, &src_server->ss_copies); 217 spin_unlock(&src_server->nfs_client->cl_lock); 218 } 219 220 status = wait_for_completion_interruptible(©->completion); 221 spin_lock(&dst_server->nfs_client->cl_lock); 222 list_del_init(©->copies); 223 spin_unlock(&dst_server->nfs_client->cl_lock); 224 if (dst_server != src_server) { 225 spin_lock(&src_server->nfs_client->cl_lock); 226 list_del_init(©->src_copies); 227 spin_unlock(&src_server->nfs_client->cl_lock); 228 } 229 if (status == -ERESTARTSYS) { 230 goto out_cancel; 231 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) { 232 status = -EAGAIN; 233 *restart = true; 234 goto out_cancel; 235 } 236 out: 237 res->write_res.count = copy->count; 238 memcpy(&res->write_res.verifier, ©->verf, sizeof(copy->verf)); 239 status = -copy->error; 240 241 out_free: 242 kfree(copy); 243 return status; 244 out_cancel: 245 nfs42_do_offload_cancel_async(dst, ©->stateid); 246 if (!nfs42_files_from_same_server(src, dst)) 247 nfs42_do_offload_cancel_async(src, src_stateid); 248 goto out_free; 249 } 250 251 static int process_copy_commit(struct file *dst, loff_t pos_dst, 252 struct nfs42_copy_res *res) 253 { 254 struct nfs_commitres cres; 255 int status = -ENOMEM; 256 257 cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 258 if (!cres.verf) 259 goto out; 260 261 status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres); 262 if (status) 263 goto out_free; 264 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 265 &cres.verf->verifier)) { 266 dprintk("commit verf differs from copy verf\n"); 267 status = -EAGAIN; 268 } 269 out_free: 270 kfree(cres.verf); 271 out: 272 return status; 273 } 274 275 /** 276 * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload 277 * @inode: pointer to destination inode 278 * @pos: destination offset 279 * @len: copy length 280 * 281 * Punch a hole in the inode page cache, so that the NFS client will 282 * know to retrieve new data. 283 * Update the file size if necessary, and then mark the inode as having 284 * invalid cached values for change attribute, ctime, mtime and space used. 285 */ 286 static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len) 287 { 288 loff_t newsize = pos + len; 289 loff_t end = newsize - 1; 290 291 WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping, 292 pos >> PAGE_SHIFT, end >> PAGE_SHIFT)); 293 294 spin_lock(&inode->i_lock); 295 if (newsize > i_size_read(inode)) 296 i_size_write(inode, newsize); 297 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 298 NFS_INO_INVALID_CTIME | 299 NFS_INO_INVALID_MTIME | 300 NFS_INO_INVALID_BLOCKS); 301 spin_unlock(&inode->i_lock); 302 } 303 304 static ssize_t _nfs42_proc_copy(struct file *src, 305 struct nfs_lock_context *src_lock, 306 struct file *dst, 307 struct nfs_lock_context *dst_lock, 308 struct nfs42_copy_args *args, 309 struct nfs42_copy_res *res, 310 struct nl4_server *nss, 311 nfs4_stateid *cnr_stateid, 312 bool *restart) 313 { 314 struct rpc_message msg = { 315 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY], 316 .rpc_argp = args, 317 .rpc_resp = res, 318 }; 319 struct inode *dst_inode = file_inode(dst); 320 struct inode *src_inode = file_inode(src); 321 struct nfs_server *dst_server = NFS_SERVER(dst_inode); 322 struct nfs_server *src_server = NFS_SERVER(src_inode); 323 loff_t pos_src = args->src_pos; 324 loff_t pos_dst = args->dst_pos; 325 size_t count = args->count; 326 ssize_t status; 327 328 if (nss) { 329 args->cp_src = nss; 330 nfs4_stateid_copy(&args->src_stateid, cnr_stateid); 331 } else { 332 status = nfs4_set_rw_stateid(&args->src_stateid, 333 src_lock->open_context, src_lock, FMODE_READ); 334 if (status) { 335 if (status == -EAGAIN) 336 status = -NFS4ERR_BAD_STATEID; 337 return status; 338 } 339 } 340 status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping, 341 pos_src, pos_src + (loff_t)count - 1); 342 if (status) 343 return status; 344 345 status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context, 346 dst_lock, FMODE_WRITE); 347 if (status) { 348 if (status == -EAGAIN) 349 status = -NFS4ERR_BAD_STATEID; 350 return status; 351 } 352 353 status = nfs_sync_inode(dst_inode); 354 if (status) 355 return status; 356 357 res->commit_res.verf = NULL; 358 if (args->sync) { 359 res->commit_res.verf = 360 kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 361 if (!res->commit_res.verf) 362 return -ENOMEM; 363 } 364 set_bit(NFS_CLNT_SRC_SSC_COPY_STATE, 365 &src_lock->open_context->state->flags); 366 set_bit(NFS_CLNT_DST_SSC_COPY_STATE, 367 &dst_lock->open_context->state->flags); 368 369 status = nfs4_call_sync(dst_server->client, dst_server, &msg, 370 &args->seq_args, &res->seq_res, 0); 371 trace_nfs4_copy(src_inode, dst_inode, args, res, nss, status); 372 if (status == -ENOTSUPP) 373 dst_server->caps &= ~NFS_CAP_COPY; 374 if (status) 375 goto out; 376 377 if (args->sync && 378 nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 379 &res->commit_res.verf->verifier)) { 380 status = -EAGAIN; 381 goto out; 382 } 383 384 if (!res->synchronous) { 385 status = handle_async_copy(res, dst_server, src_server, src, 386 dst, &args->src_stateid, restart); 387 if (status) 388 goto out; 389 } 390 391 if ((!res->synchronous || !args->sync) && 392 res->write_res.verifier.committed != NFS_FILE_SYNC) { 393 status = process_copy_commit(dst, pos_dst, res); 394 if (status) 395 goto out; 396 } 397 398 nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count); 399 nfs_invalidate_atime(src_inode); 400 status = res->write_res.count; 401 out: 402 if (args->sync) 403 kfree(res->commit_res.verf); 404 return status; 405 } 406 407 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, 408 struct file *dst, loff_t pos_dst, size_t count, 409 struct nl4_server *nss, 410 nfs4_stateid *cnr_stateid, bool sync) 411 { 412 struct nfs_server *server = NFS_SERVER(file_inode(dst)); 413 struct nfs_lock_context *src_lock; 414 struct nfs_lock_context *dst_lock; 415 struct nfs42_copy_args args = { 416 .src_fh = NFS_FH(file_inode(src)), 417 .src_pos = pos_src, 418 .dst_fh = NFS_FH(file_inode(dst)), 419 .dst_pos = pos_dst, 420 .count = count, 421 .sync = sync, 422 }; 423 struct nfs42_copy_res res; 424 struct nfs4_exception src_exception = { 425 .inode = file_inode(src), 426 .stateid = &args.src_stateid, 427 }; 428 struct nfs4_exception dst_exception = { 429 .inode = file_inode(dst), 430 .stateid = &args.dst_stateid, 431 }; 432 ssize_t err, err2; 433 bool restart = false; 434 435 src_lock = nfs_get_lock_context(nfs_file_open_context(src)); 436 if (IS_ERR(src_lock)) 437 return PTR_ERR(src_lock); 438 439 src_exception.state = src_lock->open_context->state; 440 441 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst)); 442 if (IS_ERR(dst_lock)) { 443 err = PTR_ERR(dst_lock); 444 goto out_put_src_lock; 445 } 446 447 dst_exception.state = dst_lock->open_context->state; 448 449 do { 450 inode_lock(file_inode(dst)); 451 err = _nfs42_proc_copy(src, src_lock, 452 dst, dst_lock, 453 &args, &res, 454 nss, cnr_stateid, &restart); 455 inode_unlock(file_inode(dst)); 456 457 if (err >= 0) 458 break; 459 if (err == -ENOTSUPP && 460 nfs42_files_from_same_server(src, dst)) { 461 err = -EOPNOTSUPP; 462 break; 463 } else if (err == -EAGAIN) { 464 if (!restart) { 465 dst_exception.retry = 1; 466 continue; 467 } 468 break; 469 } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) { 470 args.sync = true; 471 dst_exception.retry = 1; 472 continue; 473 } else if ((err == -ESTALE || 474 err == -NFS4ERR_OFFLOAD_DENIED || 475 err == -ENOTSUPP) && 476 !nfs42_files_from_same_server(src, dst)) { 477 nfs42_do_offload_cancel_async(src, &args.src_stateid); 478 err = -EOPNOTSUPP; 479 break; 480 } 481 482 err2 = nfs4_handle_exception(server, err, &src_exception); 483 err = nfs4_handle_exception(server, err, &dst_exception); 484 if (!err) 485 err = err2; 486 } while (src_exception.retry || dst_exception.retry); 487 488 nfs_put_lock_context(dst_lock); 489 out_put_src_lock: 490 nfs_put_lock_context(src_lock); 491 return err; 492 } 493 494 struct nfs42_offloadcancel_data { 495 struct nfs_server *seq_server; 496 struct nfs42_offload_status_args args; 497 struct nfs42_offload_status_res res; 498 }; 499 500 static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata) 501 { 502 struct nfs42_offloadcancel_data *data = calldata; 503 504 nfs4_setup_sequence(data->seq_server->nfs_client, 505 &data->args.osa_seq_args, 506 &data->res.osr_seq_res, task); 507 } 508 509 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) 510 { 511 struct nfs42_offloadcancel_data *data = calldata; 512 513 trace_nfs4_offload_cancel(&data->args, task->tk_status); 514 nfs41_sequence_done(task, &data->res.osr_seq_res); 515 if (task->tk_status && 516 nfs4_async_handle_error(task, data->seq_server, NULL, 517 NULL) == -EAGAIN) 518 rpc_restart_call_prepare(task); 519 } 520 521 static void nfs42_free_offloadcancel_data(void *data) 522 { 523 kfree(data); 524 } 525 526 static const struct rpc_call_ops nfs42_offload_cancel_ops = { 527 .rpc_call_prepare = nfs42_offload_cancel_prepare, 528 .rpc_call_done = nfs42_offload_cancel_done, 529 .rpc_release = nfs42_free_offloadcancel_data, 530 }; 531 532 static int nfs42_do_offload_cancel_async(struct file *dst, 533 nfs4_stateid *stateid) 534 { 535 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 536 struct nfs42_offloadcancel_data *data = NULL; 537 struct nfs_open_context *ctx = nfs_file_open_context(dst); 538 struct rpc_task *task; 539 struct rpc_message msg = { 540 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL], 541 .rpc_cred = ctx->cred, 542 }; 543 struct rpc_task_setup task_setup_data = { 544 .rpc_client = dst_server->client, 545 .rpc_message = &msg, 546 .callback_ops = &nfs42_offload_cancel_ops, 547 .workqueue = nfsiod_workqueue, 548 .flags = RPC_TASK_ASYNC, 549 }; 550 int status; 551 552 if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL)) 553 return -EOPNOTSUPP; 554 555 data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS); 556 if (data == NULL) 557 return -ENOMEM; 558 559 data->seq_server = dst_server; 560 data->args.osa_src_fh = NFS_FH(file_inode(dst)); 561 memcpy(&data->args.osa_stateid, stateid, 562 sizeof(data->args.osa_stateid)); 563 msg.rpc_argp = &data->args; 564 msg.rpc_resp = &data->res; 565 task_setup_data.callback_data = data; 566 nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res, 567 1, 0); 568 task = rpc_run_task(&task_setup_data); 569 if (IS_ERR(task)) 570 return PTR_ERR(task); 571 status = rpc_wait_for_completion_task(task); 572 if (status == -ENOTSUPP) 573 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL; 574 rpc_put_task(task); 575 return status; 576 } 577 578 static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, 579 struct nfs42_copy_notify_args *args, 580 struct nfs42_copy_notify_res *res) 581 { 582 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 583 struct rpc_message msg = { 584 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY], 585 .rpc_argp = args, 586 .rpc_resp = res, 587 }; 588 int status; 589 struct nfs_open_context *ctx; 590 struct nfs_lock_context *l_ctx; 591 592 ctx = get_nfs_open_context(nfs_file_open_context(src)); 593 l_ctx = nfs_get_lock_context(ctx); 594 if (IS_ERR(l_ctx)) 595 return PTR_ERR(l_ctx); 596 597 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx, 598 FMODE_READ); 599 nfs_put_lock_context(l_ctx); 600 if (status) { 601 if (status == -EAGAIN) 602 status = -NFS4ERR_BAD_STATEID; 603 return status; 604 } 605 606 status = nfs4_call_sync(src_server->client, src_server, &msg, 607 &args->cna_seq_args, &res->cnr_seq_res, 0); 608 trace_nfs4_copy_notify(file_inode(src), args, res, status); 609 if (status == -ENOTSUPP) 610 src_server->caps &= ~NFS_CAP_COPY_NOTIFY; 611 612 put_nfs_open_context(nfs_file_open_context(src)); 613 return status; 614 } 615 616 int nfs42_proc_copy_notify(struct file *src, struct file *dst, 617 struct nfs42_copy_notify_res *res) 618 { 619 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 620 struct nfs42_copy_notify_args *args; 621 struct nfs4_exception exception = { 622 .inode = file_inode(src), 623 }; 624 int status; 625 626 if (!(src_server->caps & NFS_CAP_COPY_NOTIFY)) 627 return -EOPNOTSUPP; 628 629 args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_NOFS); 630 if (args == NULL) 631 return -ENOMEM; 632 633 args->cna_src_fh = NFS_FH(file_inode(src)), 634 args->cna_dst.nl4_type = NL4_NETADDR; 635 nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr); 636 exception.stateid = &args->cna_src_stateid; 637 638 do { 639 status = _nfs42_proc_copy_notify(src, dst, args, res); 640 if (status == -ENOTSUPP) { 641 status = -EOPNOTSUPP; 642 goto out; 643 } 644 status = nfs4_handle_exception(src_server, status, &exception); 645 } while (exception.retry); 646 647 out: 648 kfree(args); 649 return status; 650 } 651 652 static loff_t _nfs42_proc_llseek(struct file *filep, 653 struct nfs_lock_context *lock, loff_t offset, int whence) 654 { 655 struct inode *inode = file_inode(filep); 656 struct nfs42_seek_args args = { 657 .sa_fh = NFS_FH(inode), 658 .sa_offset = offset, 659 .sa_what = (whence == SEEK_HOLE) ? 660 NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA, 661 }; 662 struct nfs42_seek_res res; 663 struct rpc_message msg = { 664 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK], 665 .rpc_argp = &args, 666 .rpc_resp = &res, 667 }; 668 struct nfs_server *server = NFS_SERVER(inode); 669 int status; 670 671 if (!nfs_server_capable(inode, NFS_CAP_SEEK)) 672 return -ENOTSUPP; 673 674 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context, 675 lock, FMODE_READ); 676 if (status) { 677 if (status == -EAGAIN) 678 status = -NFS4ERR_BAD_STATEID; 679 return status; 680 } 681 682 status = nfs_filemap_write_and_wait_range(inode->i_mapping, 683 offset, LLONG_MAX); 684 if (status) 685 return status; 686 687 status = nfs4_call_sync(server->client, server, &msg, 688 &args.seq_args, &res.seq_res, 0); 689 trace_nfs4_llseek(inode, &args, &res, status); 690 if (status == -ENOTSUPP) 691 server->caps &= ~NFS_CAP_SEEK; 692 if (status) 693 return status; 694 695 if (whence == SEEK_DATA && res.sr_eof) 696 return -NFS4ERR_NXIO; 697 else 698 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); 699 } 700 701 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 702 { 703 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 704 struct nfs4_exception exception = { }; 705 struct nfs_lock_context *lock; 706 loff_t err; 707 708 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 709 if (IS_ERR(lock)) 710 return PTR_ERR(lock); 711 712 exception.inode = file_inode(filep); 713 exception.state = lock->open_context->state; 714 715 do { 716 err = _nfs42_proc_llseek(filep, lock, offset, whence); 717 if (err >= 0) 718 break; 719 if (err == -ENOTSUPP) { 720 err = -EOPNOTSUPP; 721 break; 722 } 723 err = nfs4_handle_exception(server, err, &exception); 724 } while (exception.retry); 725 726 nfs_put_lock_context(lock); 727 return err; 728 } 729 730 731 static void 732 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 733 { 734 struct nfs42_layoutstat_data *data = calldata; 735 struct inode *inode = data->inode; 736 struct nfs_server *server = NFS_SERVER(inode); 737 struct pnfs_layout_hdr *lo; 738 739 spin_lock(&inode->i_lock); 740 lo = NFS_I(inode)->layout; 741 if (!pnfs_layout_is_valid(lo)) { 742 spin_unlock(&inode->i_lock); 743 rpc_exit(task, 0); 744 return; 745 } 746 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid); 747 spin_unlock(&inode->i_lock); 748 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 749 &data->res.seq_res, task); 750 } 751 752 static void 753 nfs42_layoutstat_done(struct rpc_task *task, void *calldata) 754 { 755 struct nfs42_layoutstat_data *data = calldata; 756 struct inode *inode = data->inode; 757 struct pnfs_layout_hdr *lo; 758 759 if (!nfs4_sequence_done(task, &data->res.seq_res)) 760 return; 761 762 switch (task->tk_status) { 763 case 0: 764 return; 765 case -NFS4ERR_BADHANDLE: 766 case -ESTALE: 767 pnfs_destroy_layout(NFS_I(inode)); 768 break; 769 case -NFS4ERR_EXPIRED: 770 case -NFS4ERR_ADMIN_REVOKED: 771 case -NFS4ERR_DELEG_REVOKED: 772 case -NFS4ERR_STALE_STATEID: 773 case -NFS4ERR_BAD_STATEID: 774 spin_lock(&inode->i_lock); 775 lo = NFS_I(inode)->layout; 776 if (pnfs_layout_is_valid(lo) && 777 nfs4_stateid_match(&data->args.stateid, 778 &lo->plh_stateid)) { 779 LIST_HEAD(head); 780 781 /* 782 * Mark the bad layout state as invalid, then retry 783 * with the current stateid. 784 */ 785 pnfs_mark_layout_stateid_invalid(lo, &head); 786 spin_unlock(&inode->i_lock); 787 pnfs_free_lseg_list(&head); 788 nfs_commit_inode(inode, 0); 789 } else 790 spin_unlock(&inode->i_lock); 791 break; 792 case -NFS4ERR_OLD_STATEID: 793 spin_lock(&inode->i_lock); 794 lo = NFS_I(inode)->layout; 795 if (pnfs_layout_is_valid(lo) && 796 nfs4_stateid_match_other(&data->args.stateid, 797 &lo->plh_stateid)) { 798 /* Do we need to delay before resending? */ 799 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 800 &data->args.stateid)) 801 rpc_delay(task, HZ); 802 rpc_restart_call_prepare(task); 803 } 804 spin_unlock(&inode->i_lock); 805 break; 806 case -ENOTSUPP: 807 case -EOPNOTSUPP: 808 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS; 809 } 810 811 trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status); 812 } 813 814 static void 815 nfs42_layoutstat_release(void *calldata) 816 { 817 struct nfs42_layoutstat_data *data = calldata; 818 struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo; 819 int i; 820 821 for (i = 0; i < data->args.num_dev; i++) { 822 if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free) 823 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private); 824 } 825 826 pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout); 827 smp_mb__before_atomic(); 828 clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags); 829 smp_mb__after_atomic(); 830 nfs_iput_and_deactive(data->inode); 831 kfree(data->args.devinfo); 832 kfree(data); 833 } 834 835 static const struct rpc_call_ops nfs42_layoutstat_ops = { 836 .rpc_call_prepare = nfs42_layoutstat_prepare, 837 .rpc_call_done = nfs42_layoutstat_done, 838 .rpc_release = nfs42_layoutstat_release, 839 }; 840 841 int nfs42_proc_layoutstats_generic(struct nfs_server *server, 842 struct nfs42_layoutstat_data *data) 843 { 844 struct rpc_message msg = { 845 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS], 846 .rpc_argp = &data->args, 847 .rpc_resp = &data->res, 848 }; 849 struct rpc_task_setup task_setup = { 850 .rpc_client = server->client, 851 .rpc_message = &msg, 852 .callback_ops = &nfs42_layoutstat_ops, 853 .callback_data = data, 854 .flags = RPC_TASK_ASYNC, 855 }; 856 struct rpc_task *task; 857 858 data->inode = nfs_igrab_and_active(data->args.inode); 859 if (!data->inode) { 860 nfs42_layoutstat_release(data); 861 return -EAGAIN; 862 } 863 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 864 task = rpc_run_task(&task_setup); 865 if (IS_ERR(task)) 866 return PTR_ERR(task); 867 rpc_put_task(task); 868 return 0; 869 } 870 871 static struct nfs42_layouterror_data * 872 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags) 873 { 874 struct nfs42_layouterror_data *data; 875 struct inode *inode = lseg->pls_layout->plh_inode; 876 877 data = kzalloc(sizeof(*data), gfp_flags); 878 if (data) { 879 data->args.inode = data->inode = nfs_igrab_and_active(inode); 880 if (data->inode) { 881 data->lseg = pnfs_get_lseg(lseg); 882 if (data->lseg) 883 return data; 884 nfs_iput_and_deactive(data->inode); 885 } 886 kfree(data); 887 } 888 return NULL; 889 } 890 891 static void 892 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data) 893 { 894 pnfs_put_lseg(data->lseg); 895 nfs_iput_and_deactive(data->inode); 896 kfree(data); 897 } 898 899 static void 900 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata) 901 { 902 struct nfs42_layouterror_data *data = calldata; 903 struct inode *inode = data->inode; 904 struct nfs_server *server = NFS_SERVER(inode); 905 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 906 unsigned i; 907 908 spin_lock(&inode->i_lock); 909 if (!pnfs_layout_is_valid(lo)) { 910 spin_unlock(&inode->i_lock); 911 rpc_exit(task, 0); 912 return; 913 } 914 for (i = 0; i < data->args.num_errors; i++) 915 nfs4_stateid_copy(&data->args.errors[i].stateid, 916 &lo->plh_stateid); 917 spin_unlock(&inode->i_lock); 918 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 919 &data->res.seq_res, task); 920 } 921 922 static void 923 nfs42_layouterror_done(struct rpc_task *task, void *calldata) 924 { 925 struct nfs42_layouterror_data *data = calldata; 926 struct inode *inode = data->inode; 927 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 928 929 if (!nfs4_sequence_done(task, &data->res.seq_res)) 930 return; 931 932 switch (task->tk_status) { 933 case 0: 934 return; 935 case -NFS4ERR_BADHANDLE: 936 case -ESTALE: 937 pnfs_destroy_layout(NFS_I(inode)); 938 break; 939 case -NFS4ERR_EXPIRED: 940 case -NFS4ERR_ADMIN_REVOKED: 941 case -NFS4ERR_DELEG_REVOKED: 942 case -NFS4ERR_STALE_STATEID: 943 case -NFS4ERR_BAD_STATEID: 944 spin_lock(&inode->i_lock); 945 if (pnfs_layout_is_valid(lo) && 946 nfs4_stateid_match(&data->args.errors[0].stateid, 947 &lo->plh_stateid)) { 948 LIST_HEAD(head); 949 950 /* 951 * Mark the bad layout state as invalid, then retry 952 * with the current stateid. 953 */ 954 pnfs_mark_layout_stateid_invalid(lo, &head); 955 spin_unlock(&inode->i_lock); 956 pnfs_free_lseg_list(&head); 957 nfs_commit_inode(inode, 0); 958 } else 959 spin_unlock(&inode->i_lock); 960 break; 961 case -NFS4ERR_OLD_STATEID: 962 spin_lock(&inode->i_lock); 963 if (pnfs_layout_is_valid(lo) && 964 nfs4_stateid_match_other(&data->args.errors[0].stateid, 965 &lo->plh_stateid)) { 966 /* Do we need to delay before resending? */ 967 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 968 &data->args.errors[0].stateid)) 969 rpc_delay(task, HZ); 970 rpc_restart_call_prepare(task); 971 } 972 spin_unlock(&inode->i_lock); 973 break; 974 case -ENOTSUPP: 975 case -EOPNOTSUPP: 976 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR; 977 } 978 979 trace_nfs4_layouterror(inode, &data->args.errors[0].stateid, 980 task->tk_status); 981 } 982 983 static void 984 nfs42_layouterror_release(void *calldata) 985 { 986 struct nfs42_layouterror_data *data = calldata; 987 988 nfs42_free_layouterror_data(data); 989 } 990 991 static const struct rpc_call_ops nfs42_layouterror_ops = { 992 .rpc_call_prepare = nfs42_layouterror_prepare, 993 .rpc_call_done = nfs42_layouterror_done, 994 .rpc_release = nfs42_layouterror_release, 995 }; 996 997 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg, 998 const struct nfs42_layout_error *errors, size_t n) 999 { 1000 struct inode *inode = lseg->pls_layout->plh_inode; 1001 struct nfs42_layouterror_data *data; 1002 struct rpc_task *task; 1003 struct rpc_message msg = { 1004 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR], 1005 }; 1006 struct rpc_task_setup task_setup = { 1007 .rpc_message = &msg, 1008 .callback_ops = &nfs42_layouterror_ops, 1009 .flags = RPC_TASK_ASYNC, 1010 }; 1011 unsigned int i; 1012 1013 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR)) 1014 return -EOPNOTSUPP; 1015 if (n > NFS42_LAYOUTERROR_MAX) 1016 return -EINVAL; 1017 data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS); 1018 if (!data) 1019 return -ENOMEM; 1020 for (i = 0; i < n; i++) { 1021 data->args.errors[i] = errors[i]; 1022 data->args.num_errors++; 1023 data->res.num_errors++; 1024 } 1025 msg.rpc_argp = &data->args; 1026 msg.rpc_resp = &data->res; 1027 task_setup.callback_data = data; 1028 task_setup.rpc_client = NFS_SERVER(inode)->client; 1029 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 1030 task = rpc_run_task(&task_setup); 1031 if (IS_ERR(task)) 1032 return PTR_ERR(task); 1033 rpc_put_task(task); 1034 return 0; 1035 } 1036 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror); 1037 1038 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f, 1039 struct file *dst_f, struct nfs_lock_context *src_lock, 1040 struct nfs_lock_context *dst_lock, loff_t src_offset, 1041 loff_t dst_offset, loff_t count) 1042 { 1043 struct inode *src_inode = file_inode(src_f); 1044 struct inode *dst_inode = file_inode(dst_f); 1045 struct nfs_server *server = NFS_SERVER(dst_inode); 1046 __u32 dst_bitmask[NFS_BITMASK_SZ]; 1047 struct nfs42_clone_args args = { 1048 .src_fh = NFS_FH(src_inode), 1049 .dst_fh = NFS_FH(dst_inode), 1050 .src_offset = src_offset, 1051 .dst_offset = dst_offset, 1052 .count = count, 1053 .dst_bitmask = dst_bitmask, 1054 }; 1055 struct nfs42_clone_res res = { 1056 .server = server, 1057 }; 1058 int status; 1059 1060 msg->rpc_argp = &args; 1061 msg->rpc_resp = &res; 1062 1063 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context, 1064 src_lock, FMODE_READ); 1065 if (status) { 1066 if (status == -EAGAIN) 1067 status = -NFS4ERR_BAD_STATEID; 1068 return status; 1069 } 1070 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context, 1071 dst_lock, FMODE_WRITE); 1072 if (status) { 1073 if (status == -EAGAIN) 1074 status = -NFS4ERR_BAD_STATEID; 1075 return status; 1076 } 1077 1078 res.dst_fattr = nfs_alloc_fattr(); 1079 if (!res.dst_fattr) 1080 return -ENOMEM; 1081 1082 nfs4_bitmask_set(dst_bitmask, server->cache_consistency_bitmask, 1083 dst_inode, NFS_INO_INVALID_BLOCKS); 1084 1085 status = nfs4_call_sync(server->client, server, msg, 1086 &args.seq_args, &res.seq_res, 0); 1087 trace_nfs4_clone(src_inode, dst_inode, &args, status); 1088 if (status == 0) { 1089 nfs42_copy_dest_done(dst_inode, dst_offset, count); 1090 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr); 1091 } 1092 1093 kfree(res.dst_fattr); 1094 return status; 1095 } 1096 1097 int nfs42_proc_clone(struct file *src_f, struct file *dst_f, 1098 loff_t src_offset, loff_t dst_offset, loff_t count) 1099 { 1100 struct rpc_message msg = { 1101 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE], 1102 }; 1103 struct inode *inode = file_inode(src_f); 1104 struct nfs_server *server = NFS_SERVER(file_inode(src_f)); 1105 struct nfs_lock_context *src_lock; 1106 struct nfs_lock_context *dst_lock; 1107 struct nfs4_exception src_exception = { }; 1108 struct nfs4_exception dst_exception = { }; 1109 int err, err2; 1110 1111 if (!nfs_server_capable(inode, NFS_CAP_CLONE)) 1112 return -EOPNOTSUPP; 1113 1114 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f)); 1115 if (IS_ERR(src_lock)) 1116 return PTR_ERR(src_lock); 1117 1118 src_exception.inode = file_inode(src_f); 1119 src_exception.state = src_lock->open_context->state; 1120 1121 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f)); 1122 if (IS_ERR(dst_lock)) { 1123 err = PTR_ERR(dst_lock); 1124 goto out_put_src_lock; 1125 } 1126 1127 dst_exception.inode = file_inode(dst_f); 1128 dst_exception.state = dst_lock->open_context->state; 1129 1130 do { 1131 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock, 1132 src_offset, dst_offset, count); 1133 if (err == -ENOTSUPP || err == -EOPNOTSUPP) { 1134 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE; 1135 err = -EOPNOTSUPP; 1136 break; 1137 } 1138 1139 err2 = nfs4_handle_exception(server, err, &src_exception); 1140 err = nfs4_handle_exception(server, err, &dst_exception); 1141 if (!err) 1142 err = err2; 1143 } while (src_exception.retry || dst_exception.retry); 1144 1145 nfs_put_lock_context(dst_lock); 1146 out_put_src_lock: 1147 nfs_put_lock_context(src_lock); 1148 return err; 1149 } 1150 1151 #define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 1152 1153 static int _nfs42_proc_removexattr(struct inode *inode, const char *name) 1154 { 1155 struct nfs_server *server = NFS_SERVER(inode); 1156 struct nfs42_removexattrargs args = { 1157 .fh = NFS_FH(inode), 1158 .xattr_name = name, 1159 }; 1160 struct nfs42_removexattrres res; 1161 struct rpc_message msg = { 1162 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR], 1163 .rpc_argp = &args, 1164 .rpc_resp = &res, 1165 }; 1166 int ret; 1167 unsigned long timestamp = jiffies; 1168 1169 ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 1170 &res.seq_res, 1); 1171 if (!ret) 1172 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1173 1174 return ret; 1175 } 1176 1177 static int _nfs42_proc_setxattr(struct inode *inode, const char *name, 1178 const void *buf, size_t buflen, int flags) 1179 { 1180 struct nfs_server *server = NFS_SERVER(inode); 1181 struct page *pages[NFS4XATTR_MAXPAGES]; 1182 struct nfs42_setxattrargs arg = { 1183 .fh = NFS_FH(inode), 1184 .xattr_pages = pages, 1185 .xattr_len = buflen, 1186 .xattr_name = name, 1187 .xattr_flags = flags, 1188 }; 1189 struct nfs42_setxattrres res; 1190 struct rpc_message msg = { 1191 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR], 1192 .rpc_argp = &arg, 1193 .rpc_resp = &res, 1194 }; 1195 int ret, np; 1196 unsigned long timestamp = jiffies; 1197 1198 if (buflen > server->sxasize) 1199 return -ERANGE; 1200 1201 if (buflen > 0) { 1202 np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages); 1203 if (np < 0) 1204 return np; 1205 } else 1206 np = 0; 1207 1208 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1209 &res.seq_res, 1); 1210 1211 for (; np > 0; np--) 1212 put_page(pages[np - 1]); 1213 1214 if (!ret) 1215 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); 1216 1217 return ret; 1218 } 1219 1220 static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name, 1221 void *buf, size_t buflen, struct page **pages, 1222 size_t plen) 1223 { 1224 struct nfs_server *server = NFS_SERVER(inode); 1225 struct nfs42_getxattrargs arg = { 1226 .fh = NFS_FH(inode), 1227 .xattr_name = name, 1228 }; 1229 struct nfs42_getxattrres res; 1230 struct rpc_message msg = { 1231 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR], 1232 .rpc_argp = &arg, 1233 .rpc_resp = &res, 1234 }; 1235 ssize_t ret; 1236 1237 arg.xattr_len = plen; 1238 arg.xattr_pages = pages; 1239 1240 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1241 &res.seq_res, 0); 1242 if (ret < 0) 1243 return ret; 1244 1245 /* 1246 * Normally, the caching is done one layer up, but for successful 1247 * RPCS, always cache the result here, even if the caller was 1248 * just querying the length, or if the reply was too big for 1249 * the caller. This avoids a second RPC in the case of the 1250 * common query-alloc-retrieve cycle for xattrs. 1251 * 1252 * Note that xattr_len is always capped to XATTR_SIZE_MAX. 1253 */ 1254 1255 nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len); 1256 1257 if (buflen) { 1258 if (res.xattr_len > buflen) 1259 return -ERANGE; 1260 _copy_from_pages(buf, pages, 0, res.xattr_len); 1261 } 1262 1263 return res.xattr_len; 1264 } 1265 1266 static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf, 1267 size_t buflen, u64 *cookiep, bool *eofp) 1268 { 1269 struct nfs_server *server = NFS_SERVER(inode); 1270 struct page **pages; 1271 struct nfs42_listxattrsargs arg = { 1272 .fh = NFS_FH(inode), 1273 .cookie = *cookiep, 1274 }; 1275 struct nfs42_listxattrsres res = { 1276 .eof = false, 1277 .xattr_buf = buf, 1278 .xattr_len = buflen, 1279 }; 1280 struct rpc_message msg = { 1281 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS], 1282 .rpc_argp = &arg, 1283 .rpc_resp = &res, 1284 }; 1285 u32 xdrlen; 1286 int ret, np, i; 1287 1288 1289 ret = -ENOMEM; 1290 res.scratch = alloc_page(GFP_KERNEL); 1291 if (!res.scratch) 1292 goto out; 1293 1294 xdrlen = nfs42_listxattr_xdrsize(buflen); 1295 if (xdrlen > server->lxasize) 1296 xdrlen = server->lxasize; 1297 np = xdrlen / PAGE_SIZE + 1; 1298 1299 pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL); 1300 if (!pages) 1301 goto out_free_scratch; 1302 for (i = 0; i < np; i++) { 1303 pages[i] = alloc_page(GFP_KERNEL); 1304 if (!pages[i]) 1305 goto out_free_pages; 1306 } 1307 1308 arg.xattr_pages = pages; 1309 arg.count = xdrlen; 1310 1311 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, 1312 &res.seq_res, 0); 1313 1314 if (ret >= 0) { 1315 ret = res.copied; 1316 *cookiep = res.cookie; 1317 *eofp = res.eof; 1318 } 1319 1320 out_free_pages: 1321 while (--np >= 0) { 1322 if (pages[np]) 1323 __free_page(pages[np]); 1324 } 1325 kfree(pages); 1326 out_free_scratch: 1327 __free_page(res.scratch); 1328 out: 1329 return ret; 1330 1331 } 1332 1333 ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, 1334 void *buf, size_t buflen) 1335 { 1336 struct nfs4_exception exception = { }; 1337 ssize_t err, np, i; 1338 struct page **pages; 1339 1340 np = nfs_page_array_len(0, buflen ?: XATTR_SIZE_MAX); 1341 pages = kmalloc_array(np, sizeof(*pages), GFP_KERNEL); 1342 if (!pages) 1343 return -ENOMEM; 1344 1345 for (i = 0; i < np; i++) { 1346 pages[i] = alloc_page(GFP_KERNEL); 1347 if (!pages[i]) { 1348 np = i + 1; 1349 err = -ENOMEM; 1350 goto out; 1351 } 1352 } 1353 1354 /* 1355 * The GETXATTR op has no length field in the call, and the 1356 * xattr data is at the end of the reply. 1357 * 1358 * There is no downside in using the page-aligned length. It will 1359 * allow receiving and caching xattrs that are too large for the 1360 * caller but still fit in the page-rounded value. 1361 */ 1362 do { 1363 err = _nfs42_proc_getxattr(inode, name, buf, buflen, 1364 pages, np * PAGE_SIZE); 1365 if (err >= 0) 1366 break; 1367 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1368 &exception); 1369 } while (exception.retry); 1370 1371 out: 1372 while (--np >= 0) 1373 __free_page(pages[np]); 1374 kfree(pages); 1375 1376 return err; 1377 } 1378 1379 int nfs42_proc_setxattr(struct inode *inode, const char *name, 1380 const void *buf, size_t buflen, int flags) 1381 { 1382 struct nfs4_exception exception = { }; 1383 int err; 1384 1385 do { 1386 err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags); 1387 if (!err) 1388 break; 1389 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1390 &exception); 1391 } while (exception.retry); 1392 1393 return err; 1394 } 1395 1396 ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf, 1397 size_t buflen, u64 *cookiep, bool *eofp) 1398 { 1399 struct nfs4_exception exception = { }; 1400 ssize_t err; 1401 1402 do { 1403 err = _nfs42_proc_listxattrs(inode, buf, buflen, 1404 cookiep, eofp); 1405 if (err >= 0) 1406 break; 1407 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1408 &exception); 1409 } while (exception.retry); 1410 1411 return err; 1412 } 1413 1414 int nfs42_proc_removexattr(struct inode *inode, const char *name) 1415 { 1416 struct nfs4_exception exception = { }; 1417 int err; 1418 1419 do { 1420 err = _nfs42_proc_removexattr(inode, name); 1421 if (!err) 1422 break; 1423 err = nfs4_handle_exception(NFS_SERVER(inode), err, 1424 &exception); 1425 } while (exception.retry); 1426 1427 return err; 1428 } 1429