1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com> 4 */ 5 #include <linux/fs.h> 6 #include <linux/sunrpc/addr.h> 7 #include <linux/sunrpc/sched.h> 8 #include <linux/nfs.h> 9 #include <linux/nfs3.h> 10 #include <linux/nfs4.h> 11 #include <linux/nfs_xdr.h> 12 #include <linux/nfs_fs.h> 13 #include "nfs4_fs.h" 14 #include "nfs42.h" 15 #include "iostat.h" 16 #include "pnfs.h" 17 #include "nfs4session.h" 18 #include "internal.h" 19 #include "delegation.h" 20 21 #define NFSDBG_FACILITY NFSDBG_PROC 22 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std); 23 24 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr) 25 { 26 struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client; 27 unsigned short port = 2049; 28 29 rcu_read_lock(); 30 naddr->netid_len = scnprintf(naddr->netid, 31 sizeof(naddr->netid), "%s", 32 rpc_peeraddr2str(clp->cl_rpcclient, 33 RPC_DISPLAY_NETID)); 34 naddr->addr_len = scnprintf(naddr->addr, 35 sizeof(naddr->addr), 36 "%s.%u.%u", 37 rpc_peeraddr2str(clp->cl_rpcclient, 38 RPC_DISPLAY_ADDR), 39 port >> 8, port & 255); 40 rcu_read_unlock(); 41 } 42 43 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 44 struct nfs_lock_context *lock, loff_t offset, loff_t len) 45 { 46 struct inode *inode = file_inode(filep); 47 struct nfs_server *server = NFS_SERVER(inode); 48 struct nfs42_falloc_args args = { 49 .falloc_fh = NFS_FH(inode), 50 .falloc_offset = offset, 51 .falloc_length = len, 52 .falloc_bitmask = nfs4_fattr_bitmap, 53 }; 54 struct nfs42_falloc_res res = { 55 .falloc_server = server, 56 }; 57 int status; 58 59 msg->rpc_argp = &args; 60 msg->rpc_resp = &res; 61 62 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context, 63 lock, FMODE_WRITE); 64 if (status) 65 return status; 66 67 res.falloc_fattr = nfs_alloc_fattr(); 68 if (!res.falloc_fattr) 69 return -ENOMEM; 70 71 status = nfs4_call_sync(server->client, server, msg, 72 &args.seq_args, &res.seq_res, 0); 73 if (status == 0) 74 status = nfs_post_op_update_inode(inode, res.falloc_fattr); 75 76 kfree(res.falloc_fattr); 77 return status; 78 } 79 80 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep, 81 loff_t offset, loff_t len) 82 { 83 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 84 struct nfs4_exception exception = { }; 85 struct nfs_lock_context *lock; 86 int err; 87 88 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 89 if (IS_ERR(lock)) 90 return PTR_ERR(lock); 91 92 exception.inode = file_inode(filep); 93 exception.state = lock->open_context->state; 94 95 do { 96 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len); 97 if (err == -ENOTSUPP) { 98 err = -EOPNOTSUPP; 99 break; 100 } 101 err = nfs4_handle_exception(server, err, &exception); 102 } while (exception.retry); 103 104 nfs_put_lock_context(lock); 105 return err; 106 } 107 108 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len) 109 { 110 struct rpc_message msg = { 111 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE], 112 }; 113 struct inode *inode = file_inode(filep); 114 int err; 115 116 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE)) 117 return -EOPNOTSUPP; 118 119 inode_lock(inode); 120 121 err = nfs42_proc_fallocate(&msg, filep, offset, len); 122 if (err == -EOPNOTSUPP) 123 NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE; 124 125 inode_unlock(inode); 126 return err; 127 } 128 129 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) 130 { 131 struct rpc_message msg = { 132 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE], 133 }; 134 struct inode *inode = file_inode(filep); 135 int err; 136 137 if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE)) 138 return -EOPNOTSUPP; 139 140 inode_lock(inode); 141 err = nfs_sync_inode(inode); 142 if (err) 143 goto out_unlock; 144 145 err = nfs42_proc_fallocate(&msg, filep, offset, len); 146 if (err == 0) 147 truncate_pagecache_range(inode, offset, (offset + len) -1); 148 if (err == -EOPNOTSUPP) 149 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE; 150 out_unlock: 151 inode_unlock(inode); 152 return err; 153 } 154 155 static int handle_async_copy(struct nfs42_copy_res *res, 156 struct nfs_server *dst_server, 157 struct nfs_server *src_server, 158 struct file *src, 159 struct file *dst, 160 nfs4_stateid *src_stateid, 161 bool *restart) 162 { 163 struct nfs4_copy_state *copy, *tmp_copy; 164 int status = NFS4_OK; 165 bool found_pending = false; 166 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst); 167 struct nfs_open_context *src_ctx = nfs_file_open_context(src); 168 169 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS); 170 if (!copy) 171 return -ENOMEM; 172 173 spin_lock(&dst_server->nfs_client->cl_lock); 174 list_for_each_entry(tmp_copy, 175 &dst_server->nfs_client->pending_cb_stateids, 176 copies) { 177 if (memcmp(&res->write_res.stateid, &tmp_copy->stateid, 178 NFS4_STATEID_SIZE)) 179 continue; 180 found_pending = true; 181 list_del(&tmp_copy->copies); 182 break; 183 } 184 if (found_pending) { 185 spin_unlock(&dst_server->nfs_client->cl_lock); 186 kfree(copy); 187 copy = tmp_copy; 188 goto out; 189 } 190 191 memcpy(©->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE); 192 init_completion(©->completion); 193 copy->parent_dst_state = dst_ctx->state; 194 copy->parent_src_state = src_ctx->state; 195 196 list_add_tail(©->copies, &dst_server->ss_copies); 197 spin_unlock(&dst_server->nfs_client->cl_lock); 198 199 if (dst_server != src_server) { 200 spin_lock(&src_server->nfs_client->cl_lock); 201 list_add_tail(©->src_copies, &src_server->ss_copies); 202 spin_unlock(&src_server->nfs_client->cl_lock); 203 } 204 205 status = wait_for_completion_interruptible(©->completion); 206 spin_lock(&dst_server->nfs_client->cl_lock); 207 list_del_init(©->copies); 208 spin_unlock(&dst_server->nfs_client->cl_lock); 209 if (dst_server != src_server) { 210 spin_lock(&src_server->nfs_client->cl_lock); 211 list_del_init(©->src_copies); 212 spin_unlock(&src_server->nfs_client->cl_lock); 213 } 214 if (status == -ERESTARTSYS) { 215 goto out_cancel; 216 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) { 217 status = -EAGAIN; 218 *restart = true; 219 goto out_cancel; 220 } 221 out: 222 res->write_res.count = copy->count; 223 memcpy(&res->write_res.verifier, ©->verf, sizeof(copy->verf)); 224 status = -copy->error; 225 226 out_free: 227 kfree(copy); 228 return status; 229 out_cancel: 230 nfs42_do_offload_cancel_async(dst, ©->stateid); 231 if (!nfs42_files_from_same_server(src, dst)) 232 nfs42_do_offload_cancel_async(src, src_stateid); 233 goto out_free; 234 } 235 236 static int process_copy_commit(struct file *dst, loff_t pos_dst, 237 struct nfs42_copy_res *res) 238 { 239 struct nfs_commitres cres; 240 int status = -ENOMEM; 241 242 cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 243 if (!cres.verf) 244 goto out; 245 246 status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres); 247 if (status) 248 goto out_free; 249 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 250 &cres.verf->verifier)) { 251 dprintk("commit verf differs from copy verf\n"); 252 status = -EAGAIN; 253 } 254 out_free: 255 kfree(cres.verf); 256 out: 257 return status; 258 } 259 260 static ssize_t _nfs42_proc_copy(struct file *src, 261 struct nfs_lock_context *src_lock, 262 struct file *dst, 263 struct nfs_lock_context *dst_lock, 264 struct nfs42_copy_args *args, 265 struct nfs42_copy_res *res, 266 struct nl4_server *nss, 267 nfs4_stateid *cnr_stateid, 268 bool *restart) 269 { 270 struct rpc_message msg = { 271 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY], 272 .rpc_argp = args, 273 .rpc_resp = res, 274 }; 275 struct inode *dst_inode = file_inode(dst); 276 struct inode *src_inode = file_inode(src); 277 struct nfs_server *dst_server = NFS_SERVER(dst_inode); 278 struct nfs_server *src_server = NFS_SERVER(src_inode); 279 loff_t pos_src = args->src_pos; 280 loff_t pos_dst = args->dst_pos; 281 size_t count = args->count; 282 ssize_t status; 283 284 if (nss) { 285 args->cp_src = nss; 286 nfs4_stateid_copy(&args->src_stateid, cnr_stateid); 287 } else { 288 status = nfs4_set_rw_stateid(&args->src_stateid, 289 src_lock->open_context, src_lock, FMODE_READ); 290 if (status) 291 return status; 292 } 293 status = nfs_filemap_write_and_wait_range(file_inode(src)->i_mapping, 294 pos_src, pos_src + (loff_t)count - 1); 295 if (status) 296 return status; 297 298 status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context, 299 dst_lock, FMODE_WRITE); 300 if (status) 301 return status; 302 303 status = nfs_sync_inode(dst_inode); 304 if (status) 305 return status; 306 307 res->commit_res.verf = NULL; 308 if (args->sync) { 309 res->commit_res.verf = 310 kzalloc(sizeof(struct nfs_writeverf), GFP_NOFS); 311 if (!res->commit_res.verf) 312 return -ENOMEM; 313 } 314 set_bit(NFS_CLNT_SRC_SSC_COPY_STATE, 315 &src_lock->open_context->state->flags); 316 set_bit(NFS_CLNT_DST_SSC_COPY_STATE, 317 &dst_lock->open_context->state->flags); 318 319 status = nfs4_call_sync(dst_server->client, dst_server, &msg, 320 &args->seq_args, &res->seq_res, 0); 321 if (status == -ENOTSUPP) 322 dst_server->caps &= ~NFS_CAP_COPY; 323 if (status) 324 goto out; 325 326 if (args->sync && 327 nfs_write_verifier_cmp(&res->write_res.verifier.verifier, 328 &res->commit_res.verf->verifier)) { 329 status = -EAGAIN; 330 goto out; 331 } 332 333 if (!res->synchronous) { 334 status = handle_async_copy(res, dst_server, src_server, src, 335 dst, &args->src_stateid, restart); 336 if (status) 337 return status; 338 } 339 340 if ((!res->synchronous || !args->sync) && 341 res->write_res.verifier.committed != NFS_FILE_SYNC) { 342 status = process_copy_commit(dst, pos_dst, res); 343 if (status) 344 return status; 345 } 346 347 truncate_pagecache_range(dst_inode, pos_dst, 348 pos_dst + res->write_res.count); 349 350 status = res->write_res.count; 351 out: 352 if (args->sync) 353 kfree(res->commit_res.verf); 354 return status; 355 } 356 357 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src, 358 struct file *dst, loff_t pos_dst, size_t count, 359 struct nl4_server *nss, 360 nfs4_stateid *cnr_stateid, bool sync) 361 { 362 struct nfs_server *server = NFS_SERVER(file_inode(dst)); 363 struct nfs_lock_context *src_lock; 364 struct nfs_lock_context *dst_lock; 365 struct nfs42_copy_args args = { 366 .src_fh = NFS_FH(file_inode(src)), 367 .src_pos = pos_src, 368 .dst_fh = NFS_FH(file_inode(dst)), 369 .dst_pos = pos_dst, 370 .count = count, 371 .sync = sync, 372 }; 373 struct nfs42_copy_res res; 374 struct nfs4_exception src_exception = { 375 .inode = file_inode(src), 376 .stateid = &args.src_stateid, 377 }; 378 struct nfs4_exception dst_exception = { 379 .inode = file_inode(dst), 380 .stateid = &args.dst_stateid, 381 }; 382 ssize_t err, err2; 383 bool restart = false; 384 385 src_lock = nfs_get_lock_context(nfs_file_open_context(src)); 386 if (IS_ERR(src_lock)) 387 return PTR_ERR(src_lock); 388 389 src_exception.state = src_lock->open_context->state; 390 391 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst)); 392 if (IS_ERR(dst_lock)) { 393 err = PTR_ERR(dst_lock); 394 goto out_put_src_lock; 395 } 396 397 dst_exception.state = dst_lock->open_context->state; 398 399 do { 400 inode_lock(file_inode(dst)); 401 err = _nfs42_proc_copy(src, src_lock, 402 dst, dst_lock, 403 &args, &res, 404 nss, cnr_stateid, &restart); 405 inode_unlock(file_inode(dst)); 406 407 if (err >= 0) 408 break; 409 if (err == -ENOTSUPP && 410 nfs42_files_from_same_server(src, dst)) { 411 err = -EOPNOTSUPP; 412 break; 413 } else if (err == -EAGAIN) { 414 if (!restart) { 415 dst_exception.retry = 1; 416 continue; 417 } 418 break; 419 } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) { 420 args.sync = true; 421 dst_exception.retry = 1; 422 continue; 423 } else if ((err == -ESTALE || 424 err == -NFS4ERR_OFFLOAD_DENIED || 425 err == -ENOTSUPP) && 426 !nfs42_files_from_same_server(src, dst)) { 427 nfs42_do_offload_cancel_async(src, &args.src_stateid); 428 err = -EOPNOTSUPP; 429 break; 430 } 431 432 err2 = nfs4_handle_exception(server, err, &src_exception); 433 err = nfs4_handle_exception(server, err, &dst_exception); 434 if (!err) 435 err = err2; 436 } while (src_exception.retry || dst_exception.retry); 437 438 nfs_put_lock_context(dst_lock); 439 out_put_src_lock: 440 nfs_put_lock_context(src_lock); 441 return err; 442 } 443 444 struct nfs42_offloadcancel_data { 445 struct nfs_server *seq_server; 446 struct nfs42_offload_status_args args; 447 struct nfs42_offload_status_res res; 448 }; 449 450 static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata) 451 { 452 struct nfs42_offloadcancel_data *data = calldata; 453 454 nfs4_setup_sequence(data->seq_server->nfs_client, 455 &data->args.osa_seq_args, 456 &data->res.osr_seq_res, task); 457 } 458 459 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) 460 { 461 struct nfs42_offloadcancel_data *data = calldata; 462 463 nfs41_sequence_done(task, &data->res.osr_seq_res); 464 if (task->tk_status && 465 nfs4_async_handle_error(task, data->seq_server, NULL, 466 NULL) == -EAGAIN) 467 rpc_restart_call_prepare(task); 468 } 469 470 static void nfs42_free_offloadcancel_data(void *data) 471 { 472 kfree(data); 473 } 474 475 static const struct rpc_call_ops nfs42_offload_cancel_ops = { 476 .rpc_call_prepare = nfs42_offload_cancel_prepare, 477 .rpc_call_done = nfs42_offload_cancel_done, 478 .rpc_release = nfs42_free_offloadcancel_data, 479 }; 480 481 static int nfs42_do_offload_cancel_async(struct file *dst, 482 nfs4_stateid *stateid) 483 { 484 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 485 struct nfs42_offloadcancel_data *data = NULL; 486 struct nfs_open_context *ctx = nfs_file_open_context(dst); 487 struct rpc_task *task; 488 struct rpc_message msg = { 489 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL], 490 .rpc_cred = ctx->cred, 491 }; 492 struct rpc_task_setup task_setup_data = { 493 .rpc_client = dst_server->client, 494 .rpc_message = &msg, 495 .callback_ops = &nfs42_offload_cancel_ops, 496 .workqueue = nfsiod_workqueue, 497 .flags = RPC_TASK_ASYNC, 498 }; 499 int status; 500 501 if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL)) 502 return -EOPNOTSUPP; 503 504 data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_NOFS); 505 if (data == NULL) 506 return -ENOMEM; 507 508 data->seq_server = dst_server; 509 data->args.osa_src_fh = NFS_FH(file_inode(dst)); 510 memcpy(&data->args.osa_stateid, stateid, 511 sizeof(data->args.osa_stateid)); 512 msg.rpc_argp = &data->args; 513 msg.rpc_resp = &data->res; 514 task_setup_data.callback_data = data; 515 nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res, 516 1, 0); 517 task = rpc_run_task(&task_setup_data); 518 if (IS_ERR(task)) 519 return PTR_ERR(task); 520 status = rpc_wait_for_completion_task(task); 521 if (status == -ENOTSUPP) 522 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL; 523 rpc_put_task(task); 524 return status; 525 } 526 527 static int _nfs42_proc_copy_notify(struct file *src, struct file *dst, 528 struct nfs42_copy_notify_args *args, 529 struct nfs42_copy_notify_res *res) 530 { 531 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 532 struct rpc_message msg = { 533 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY], 534 .rpc_argp = args, 535 .rpc_resp = res, 536 }; 537 int status; 538 struct nfs_open_context *ctx; 539 struct nfs_lock_context *l_ctx; 540 541 ctx = get_nfs_open_context(nfs_file_open_context(src)); 542 l_ctx = nfs_get_lock_context(ctx); 543 if (IS_ERR(l_ctx)) 544 return PTR_ERR(l_ctx); 545 546 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx, 547 FMODE_READ); 548 nfs_put_lock_context(l_ctx); 549 if (status) 550 return status; 551 552 status = nfs4_call_sync(src_server->client, src_server, &msg, 553 &args->cna_seq_args, &res->cnr_seq_res, 0); 554 if (status == -ENOTSUPP) 555 src_server->caps &= ~NFS_CAP_COPY_NOTIFY; 556 557 put_nfs_open_context(nfs_file_open_context(src)); 558 return status; 559 } 560 561 int nfs42_proc_copy_notify(struct file *src, struct file *dst, 562 struct nfs42_copy_notify_res *res) 563 { 564 struct nfs_server *src_server = NFS_SERVER(file_inode(src)); 565 struct nfs42_copy_notify_args *args; 566 struct nfs4_exception exception = { 567 .inode = file_inode(src), 568 }; 569 int status; 570 571 if (!(src_server->caps & NFS_CAP_COPY_NOTIFY)) 572 return -EOPNOTSUPP; 573 574 args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_NOFS); 575 if (args == NULL) 576 return -ENOMEM; 577 578 args->cna_src_fh = NFS_FH(file_inode(src)), 579 args->cna_dst.nl4_type = NL4_NETADDR; 580 nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr); 581 exception.stateid = &args->cna_src_stateid; 582 583 do { 584 status = _nfs42_proc_copy_notify(src, dst, args, res); 585 if (status == -ENOTSUPP) { 586 status = -EOPNOTSUPP; 587 goto out; 588 } 589 status = nfs4_handle_exception(src_server, status, &exception); 590 } while (exception.retry); 591 592 out: 593 kfree(args); 594 return status; 595 } 596 597 static loff_t _nfs42_proc_llseek(struct file *filep, 598 struct nfs_lock_context *lock, loff_t offset, int whence) 599 { 600 struct inode *inode = file_inode(filep); 601 struct nfs42_seek_args args = { 602 .sa_fh = NFS_FH(inode), 603 .sa_offset = offset, 604 .sa_what = (whence == SEEK_HOLE) ? 605 NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA, 606 }; 607 struct nfs42_seek_res res; 608 struct rpc_message msg = { 609 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK], 610 .rpc_argp = &args, 611 .rpc_resp = &res, 612 }; 613 struct nfs_server *server = NFS_SERVER(inode); 614 int status; 615 616 if (!nfs_server_capable(inode, NFS_CAP_SEEK)) 617 return -ENOTSUPP; 618 619 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context, 620 lock, FMODE_READ); 621 if (status) 622 return status; 623 624 status = nfs_filemap_write_and_wait_range(inode->i_mapping, 625 offset, LLONG_MAX); 626 if (status) 627 return status; 628 629 status = nfs4_call_sync(server->client, server, &msg, 630 &args.seq_args, &res.seq_res, 0); 631 if (status == -ENOTSUPP) 632 server->caps &= ~NFS_CAP_SEEK; 633 if (status) 634 return status; 635 636 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); 637 } 638 639 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 640 { 641 struct nfs_server *server = NFS_SERVER(file_inode(filep)); 642 struct nfs4_exception exception = { }; 643 struct nfs_lock_context *lock; 644 loff_t err; 645 646 lock = nfs_get_lock_context(nfs_file_open_context(filep)); 647 if (IS_ERR(lock)) 648 return PTR_ERR(lock); 649 650 exception.inode = file_inode(filep); 651 exception.state = lock->open_context->state; 652 653 do { 654 err = _nfs42_proc_llseek(filep, lock, offset, whence); 655 if (err >= 0) 656 break; 657 if (err == -ENOTSUPP) { 658 err = -EOPNOTSUPP; 659 break; 660 } 661 err = nfs4_handle_exception(server, err, &exception); 662 } while (exception.retry); 663 664 nfs_put_lock_context(lock); 665 return err; 666 } 667 668 669 static void 670 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 671 { 672 struct nfs42_layoutstat_data *data = calldata; 673 struct inode *inode = data->inode; 674 struct nfs_server *server = NFS_SERVER(inode); 675 struct pnfs_layout_hdr *lo; 676 677 spin_lock(&inode->i_lock); 678 lo = NFS_I(inode)->layout; 679 if (!pnfs_layout_is_valid(lo)) { 680 spin_unlock(&inode->i_lock); 681 rpc_exit(task, 0); 682 return; 683 } 684 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid); 685 spin_unlock(&inode->i_lock); 686 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 687 &data->res.seq_res, task); 688 } 689 690 static void 691 nfs42_layoutstat_done(struct rpc_task *task, void *calldata) 692 { 693 struct nfs42_layoutstat_data *data = calldata; 694 struct inode *inode = data->inode; 695 struct pnfs_layout_hdr *lo; 696 697 if (!nfs4_sequence_done(task, &data->res.seq_res)) 698 return; 699 700 switch (task->tk_status) { 701 case 0: 702 break; 703 case -NFS4ERR_BADHANDLE: 704 case -ESTALE: 705 pnfs_destroy_layout(NFS_I(inode)); 706 break; 707 case -NFS4ERR_EXPIRED: 708 case -NFS4ERR_ADMIN_REVOKED: 709 case -NFS4ERR_DELEG_REVOKED: 710 case -NFS4ERR_STALE_STATEID: 711 case -NFS4ERR_BAD_STATEID: 712 spin_lock(&inode->i_lock); 713 lo = NFS_I(inode)->layout; 714 if (pnfs_layout_is_valid(lo) && 715 nfs4_stateid_match(&data->args.stateid, 716 &lo->plh_stateid)) { 717 LIST_HEAD(head); 718 719 /* 720 * Mark the bad layout state as invalid, then retry 721 * with the current stateid. 722 */ 723 pnfs_mark_layout_stateid_invalid(lo, &head); 724 spin_unlock(&inode->i_lock); 725 pnfs_free_lseg_list(&head); 726 nfs_commit_inode(inode, 0); 727 } else 728 spin_unlock(&inode->i_lock); 729 break; 730 case -NFS4ERR_OLD_STATEID: 731 spin_lock(&inode->i_lock); 732 lo = NFS_I(inode)->layout; 733 if (pnfs_layout_is_valid(lo) && 734 nfs4_stateid_match_other(&data->args.stateid, 735 &lo->plh_stateid)) { 736 /* Do we need to delay before resending? */ 737 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 738 &data->args.stateid)) 739 rpc_delay(task, HZ); 740 rpc_restart_call_prepare(task); 741 } 742 spin_unlock(&inode->i_lock); 743 break; 744 case -ENOTSUPP: 745 case -EOPNOTSUPP: 746 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS; 747 } 748 } 749 750 static void 751 nfs42_layoutstat_release(void *calldata) 752 { 753 struct nfs42_layoutstat_data *data = calldata; 754 struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo; 755 int i; 756 757 for (i = 0; i < data->args.num_dev; i++) { 758 if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free) 759 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private); 760 } 761 762 pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout); 763 smp_mb__before_atomic(); 764 clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags); 765 smp_mb__after_atomic(); 766 nfs_iput_and_deactive(data->inode); 767 kfree(data->args.devinfo); 768 kfree(data); 769 } 770 771 static const struct rpc_call_ops nfs42_layoutstat_ops = { 772 .rpc_call_prepare = nfs42_layoutstat_prepare, 773 .rpc_call_done = nfs42_layoutstat_done, 774 .rpc_release = nfs42_layoutstat_release, 775 }; 776 777 int nfs42_proc_layoutstats_generic(struct nfs_server *server, 778 struct nfs42_layoutstat_data *data) 779 { 780 struct rpc_message msg = { 781 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS], 782 .rpc_argp = &data->args, 783 .rpc_resp = &data->res, 784 }; 785 struct rpc_task_setup task_setup = { 786 .rpc_client = server->client, 787 .rpc_message = &msg, 788 .callback_ops = &nfs42_layoutstat_ops, 789 .callback_data = data, 790 .flags = RPC_TASK_ASYNC, 791 }; 792 struct rpc_task *task; 793 794 data->inode = nfs_igrab_and_active(data->args.inode); 795 if (!data->inode) { 796 nfs42_layoutstat_release(data); 797 return -EAGAIN; 798 } 799 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 800 task = rpc_run_task(&task_setup); 801 if (IS_ERR(task)) 802 return PTR_ERR(task); 803 rpc_put_task(task); 804 return 0; 805 } 806 807 static struct nfs42_layouterror_data * 808 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags) 809 { 810 struct nfs42_layouterror_data *data; 811 struct inode *inode = lseg->pls_layout->plh_inode; 812 813 data = kzalloc(sizeof(*data), gfp_flags); 814 if (data) { 815 data->args.inode = data->inode = nfs_igrab_and_active(inode); 816 if (data->inode) { 817 data->lseg = pnfs_get_lseg(lseg); 818 if (data->lseg) 819 return data; 820 nfs_iput_and_deactive(data->inode); 821 } 822 kfree(data); 823 } 824 return NULL; 825 } 826 827 static void 828 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data) 829 { 830 pnfs_put_lseg(data->lseg); 831 nfs_iput_and_deactive(data->inode); 832 kfree(data); 833 } 834 835 static void 836 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata) 837 { 838 struct nfs42_layouterror_data *data = calldata; 839 struct inode *inode = data->inode; 840 struct nfs_server *server = NFS_SERVER(inode); 841 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 842 unsigned i; 843 844 spin_lock(&inode->i_lock); 845 if (!pnfs_layout_is_valid(lo)) { 846 spin_unlock(&inode->i_lock); 847 rpc_exit(task, 0); 848 return; 849 } 850 for (i = 0; i < data->args.num_errors; i++) 851 nfs4_stateid_copy(&data->args.errors[i].stateid, 852 &lo->plh_stateid); 853 spin_unlock(&inode->i_lock); 854 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 855 &data->res.seq_res, task); 856 } 857 858 static void 859 nfs42_layouterror_done(struct rpc_task *task, void *calldata) 860 { 861 struct nfs42_layouterror_data *data = calldata; 862 struct inode *inode = data->inode; 863 struct pnfs_layout_hdr *lo = data->lseg->pls_layout; 864 865 if (!nfs4_sequence_done(task, &data->res.seq_res)) 866 return; 867 868 switch (task->tk_status) { 869 case 0: 870 break; 871 case -NFS4ERR_BADHANDLE: 872 case -ESTALE: 873 pnfs_destroy_layout(NFS_I(inode)); 874 break; 875 case -NFS4ERR_EXPIRED: 876 case -NFS4ERR_ADMIN_REVOKED: 877 case -NFS4ERR_DELEG_REVOKED: 878 case -NFS4ERR_STALE_STATEID: 879 case -NFS4ERR_BAD_STATEID: 880 spin_lock(&inode->i_lock); 881 if (pnfs_layout_is_valid(lo) && 882 nfs4_stateid_match(&data->args.errors[0].stateid, 883 &lo->plh_stateid)) { 884 LIST_HEAD(head); 885 886 /* 887 * Mark the bad layout state as invalid, then retry 888 * with the current stateid. 889 */ 890 pnfs_mark_layout_stateid_invalid(lo, &head); 891 spin_unlock(&inode->i_lock); 892 pnfs_free_lseg_list(&head); 893 nfs_commit_inode(inode, 0); 894 } else 895 spin_unlock(&inode->i_lock); 896 break; 897 case -NFS4ERR_OLD_STATEID: 898 spin_lock(&inode->i_lock); 899 if (pnfs_layout_is_valid(lo) && 900 nfs4_stateid_match_other(&data->args.errors[0].stateid, 901 &lo->plh_stateid)) { 902 /* Do we need to delay before resending? */ 903 if (!nfs4_stateid_is_newer(&lo->plh_stateid, 904 &data->args.errors[0].stateid)) 905 rpc_delay(task, HZ); 906 rpc_restart_call_prepare(task); 907 } 908 spin_unlock(&inode->i_lock); 909 break; 910 case -ENOTSUPP: 911 case -EOPNOTSUPP: 912 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR; 913 } 914 } 915 916 static void 917 nfs42_layouterror_release(void *calldata) 918 { 919 struct nfs42_layouterror_data *data = calldata; 920 921 nfs42_free_layouterror_data(data); 922 } 923 924 static const struct rpc_call_ops nfs42_layouterror_ops = { 925 .rpc_call_prepare = nfs42_layouterror_prepare, 926 .rpc_call_done = nfs42_layouterror_done, 927 .rpc_release = nfs42_layouterror_release, 928 }; 929 930 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg, 931 const struct nfs42_layout_error *errors, size_t n) 932 { 933 struct inode *inode = lseg->pls_layout->plh_inode; 934 struct nfs42_layouterror_data *data; 935 struct rpc_task *task; 936 struct rpc_message msg = { 937 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR], 938 }; 939 struct rpc_task_setup task_setup = { 940 .rpc_message = &msg, 941 .callback_ops = &nfs42_layouterror_ops, 942 .flags = RPC_TASK_ASYNC, 943 }; 944 unsigned int i; 945 946 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR)) 947 return -EOPNOTSUPP; 948 if (n > NFS42_LAYOUTERROR_MAX) 949 return -EINVAL; 950 data = nfs42_alloc_layouterror_data(lseg, GFP_NOFS); 951 if (!data) 952 return -ENOMEM; 953 for (i = 0; i < n; i++) { 954 data->args.errors[i] = errors[i]; 955 data->args.num_errors++; 956 data->res.num_errors++; 957 } 958 msg.rpc_argp = &data->args; 959 msg.rpc_resp = &data->res; 960 task_setup.callback_data = data; 961 task_setup.rpc_client = NFS_SERVER(inode)->client; 962 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 963 task = rpc_run_task(&task_setup); 964 if (IS_ERR(task)) 965 return PTR_ERR(task); 966 rpc_put_task(task); 967 return 0; 968 } 969 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror); 970 971 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f, 972 struct file *dst_f, struct nfs_lock_context *src_lock, 973 struct nfs_lock_context *dst_lock, loff_t src_offset, 974 loff_t dst_offset, loff_t count) 975 { 976 struct inode *src_inode = file_inode(src_f); 977 struct inode *dst_inode = file_inode(dst_f); 978 struct nfs_server *server = NFS_SERVER(dst_inode); 979 struct nfs42_clone_args args = { 980 .src_fh = NFS_FH(src_inode), 981 .dst_fh = NFS_FH(dst_inode), 982 .src_offset = src_offset, 983 .dst_offset = dst_offset, 984 .count = count, 985 .dst_bitmask = server->cache_consistency_bitmask, 986 }; 987 struct nfs42_clone_res res = { 988 .server = server, 989 }; 990 int status; 991 992 msg->rpc_argp = &args; 993 msg->rpc_resp = &res; 994 995 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context, 996 src_lock, FMODE_READ); 997 if (status) 998 return status; 999 1000 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context, 1001 dst_lock, FMODE_WRITE); 1002 if (status) 1003 return status; 1004 1005 res.dst_fattr = nfs_alloc_fattr(); 1006 if (!res.dst_fattr) 1007 return -ENOMEM; 1008 1009 status = nfs4_call_sync(server->client, server, msg, 1010 &args.seq_args, &res.seq_res, 0); 1011 if (status == 0) 1012 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr); 1013 1014 kfree(res.dst_fattr); 1015 return status; 1016 } 1017 1018 int nfs42_proc_clone(struct file *src_f, struct file *dst_f, 1019 loff_t src_offset, loff_t dst_offset, loff_t count) 1020 { 1021 struct rpc_message msg = { 1022 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE], 1023 }; 1024 struct inode *inode = file_inode(src_f); 1025 struct nfs_server *server = NFS_SERVER(file_inode(src_f)); 1026 struct nfs_lock_context *src_lock; 1027 struct nfs_lock_context *dst_lock; 1028 struct nfs4_exception src_exception = { }; 1029 struct nfs4_exception dst_exception = { }; 1030 int err, err2; 1031 1032 if (!nfs_server_capable(inode, NFS_CAP_CLONE)) 1033 return -EOPNOTSUPP; 1034 1035 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f)); 1036 if (IS_ERR(src_lock)) 1037 return PTR_ERR(src_lock); 1038 1039 src_exception.inode = file_inode(src_f); 1040 src_exception.state = src_lock->open_context->state; 1041 1042 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f)); 1043 if (IS_ERR(dst_lock)) { 1044 err = PTR_ERR(dst_lock); 1045 goto out_put_src_lock; 1046 } 1047 1048 dst_exception.inode = file_inode(dst_f); 1049 dst_exception.state = dst_lock->open_context->state; 1050 1051 do { 1052 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock, 1053 src_offset, dst_offset, count); 1054 if (err == -ENOTSUPP || err == -EOPNOTSUPP) { 1055 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE; 1056 err = -EOPNOTSUPP; 1057 break; 1058 } 1059 1060 err2 = nfs4_handle_exception(server, err, &src_exception); 1061 err = nfs4_handle_exception(server, err, &dst_exception); 1062 if (!err) 1063 err = err2; 1064 } while (src_exception.retry || dst_exception.retry); 1065 1066 nfs_put_lock_context(dst_lock); 1067 out_put_src_lock: 1068 nfs_put_lock_context(src_lock); 1069 return err; 1070 } 1071