1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/file.h> 42 #include <linux/string.h> 43 #include <linux/ratelimit.h> 44 #include <linux/printk.h> 45 #include <linux/slab.h> 46 #include <linux/sunrpc/clnt.h> 47 #include <linux/nfs.h> 48 #include <linux/nfs4.h> 49 #include <linux/nfs_fs.h> 50 #include <linux/nfs_page.h> 51 #include <linux/nfs_mount.h> 52 #include <linux/namei.h> 53 #include <linux/mount.h> 54 #include <linux/module.h> 55 #include <linux/xattr.h> 56 #include <linux/utsname.h> 57 #include <linux/freezer.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "nfs4idmap.h" 67 #include "nfs4session.h" 68 #include "fscache.h" 69 70 #include "nfs4trace.h" 71 72 #define NFSDBG_FACILITY NFSDBG_PROC 73 74 #define NFS4_POLL_RETRY_MIN (HZ/10) 75 #define NFS4_POLL_RETRY_MAX (15*HZ) 76 77 /* file attributes which can be mapped to nfs attributes */ 78 #define NFS4_VALID_ATTRS (ATTR_MODE \ 79 | ATTR_UID \ 80 | ATTR_GID \ 81 | ATTR_SIZE \ 82 | ATTR_ATIME \ 83 | ATTR_MTIME \ 84 | ATTR_CTIME \ 85 | ATTR_ATIME_SET \ 86 | ATTR_MTIME_SET) 87 88 struct nfs4_opendata; 89 static int _nfs4_proc_open(struct nfs4_opendata *data); 90 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 91 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 92 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 93 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label); 94 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label); 95 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 96 struct nfs_fattr *fattr, struct iattr *sattr, 97 struct nfs4_state *state, struct nfs4_label *ilabel, 98 struct nfs4_label *olabel); 99 #ifdef CONFIG_NFS_V4_1 100 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *, 101 struct rpc_cred *); 102 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 103 struct rpc_cred *); 104 #endif 105 106 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 107 static inline struct nfs4_label * 108 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 109 struct iattr *sattr, struct nfs4_label *label) 110 { 111 int err; 112 113 if (label == NULL) 114 return NULL; 115 116 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 117 return NULL; 118 119 err = security_dentry_init_security(dentry, sattr->ia_mode, 120 &dentry->d_name, (void **)&label->label, &label->len); 121 if (err == 0) 122 return label; 123 124 return NULL; 125 } 126 static inline void 127 nfs4_label_release_security(struct nfs4_label *label) 128 { 129 if (label) 130 security_release_secctx(label->label, label->len); 131 } 132 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 133 { 134 if (label) 135 return server->attr_bitmask; 136 137 return server->attr_bitmask_nl; 138 } 139 #else 140 static inline struct nfs4_label * 141 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 142 struct iattr *sattr, struct nfs4_label *l) 143 { return NULL; } 144 static inline void 145 nfs4_label_release_security(struct nfs4_label *label) 146 { return; } 147 static inline u32 * 148 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 149 { return server->attr_bitmask; } 150 #endif 151 152 /* Prevent leaks of NFSv4 errors into userland */ 153 static int nfs4_map_errors(int err) 154 { 155 if (err >= -1000) 156 return err; 157 switch (err) { 158 case -NFS4ERR_RESOURCE: 159 case -NFS4ERR_LAYOUTTRYLATER: 160 case -NFS4ERR_RECALLCONFLICT: 161 return -EREMOTEIO; 162 case -NFS4ERR_WRONGSEC: 163 case -NFS4ERR_WRONG_CRED: 164 return -EPERM; 165 case -NFS4ERR_BADOWNER: 166 case -NFS4ERR_BADNAME: 167 return -EINVAL; 168 case -NFS4ERR_SHARE_DENIED: 169 return -EACCES; 170 case -NFS4ERR_MINOR_VERS_MISMATCH: 171 return -EPROTONOSUPPORT; 172 case -NFS4ERR_FILE_OPEN: 173 return -EBUSY; 174 default: 175 dprintk("%s could not handle NFSv4 error %d\n", 176 __func__, -err); 177 break; 178 } 179 return -EIO; 180 } 181 182 /* 183 * This is our standard bitmap for GETATTR requests. 184 */ 185 const u32 nfs4_fattr_bitmap[3] = { 186 FATTR4_WORD0_TYPE 187 | FATTR4_WORD0_CHANGE 188 | FATTR4_WORD0_SIZE 189 | FATTR4_WORD0_FSID 190 | FATTR4_WORD0_FILEID, 191 FATTR4_WORD1_MODE 192 | FATTR4_WORD1_NUMLINKS 193 | FATTR4_WORD1_OWNER 194 | FATTR4_WORD1_OWNER_GROUP 195 | FATTR4_WORD1_RAWDEV 196 | FATTR4_WORD1_SPACE_USED 197 | FATTR4_WORD1_TIME_ACCESS 198 | FATTR4_WORD1_TIME_METADATA 199 | FATTR4_WORD1_TIME_MODIFY 200 | FATTR4_WORD1_MOUNTED_ON_FILEID, 201 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 202 FATTR4_WORD2_SECURITY_LABEL 203 #endif 204 }; 205 206 static const u32 nfs4_pnfs_open_bitmap[3] = { 207 FATTR4_WORD0_TYPE 208 | FATTR4_WORD0_CHANGE 209 | FATTR4_WORD0_SIZE 210 | FATTR4_WORD0_FSID 211 | FATTR4_WORD0_FILEID, 212 FATTR4_WORD1_MODE 213 | FATTR4_WORD1_NUMLINKS 214 | FATTR4_WORD1_OWNER 215 | FATTR4_WORD1_OWNER_GROUP 216 | FATTR4_WORD1_RAWDEV 217 | FATTR4_WORD1_SPACE_USED 218 | FATTR4_WORD1_TIME_ACCESS 219 | FATTR4_WORD1_TIME_METADATA 220 | FATTR4_WORD1_TIME_MODIFY, 221 FATTR4_WORD2_MDSTHRESHOLD 222 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 223 | FATTR4_WORD2_SECURITY_LABEL 224 #endif 225 }; 226 227 static const u32 nfs4_open_noattr_bitmap[3] = { 228 FATTR4_WORD0_TYPE 229 | FATTR4_WORD0_CHANGE 230 | FATTR4_WORD0_FILEID, 231 }; 232 233 const u32 nfs4_statfs_bitmap[3] = { 234 FATTR4_WORD0_FILES_AVAIL 235 | FATTR4_WORD0_FILES_FREE 236 | FATTR4_WORD0_FILES_TOTAL, 237 FATTR4_WORD1_SPACE_AVAIL 238 | FATTR4_WORD1_SPACE_FREE 239 | FATTR4_WORD1_SPACE_TOTAL 240 }; 241 242 const u32 nfs4_pathconf_bitmap[3] = { 243 FATTR4_WORD0_MAXLINK 244 | FATTR4_WORD0_MAXNAME, 245 0 246 }; 247 248 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 249 | FATTR4_WORD0_MAXREAD 250 | FATTR4_WORD0_MAXWRITE 251 | FATTR4_WORD0_LEASE_TIME, 252 FATTR4_WORD1_TIME_DELTA 253 | FATTR4_WORD1_FS_LAYOUT_TYPES, 254 FATTR4_WORD2_LAYOUT_BLKSIZE 255 | FATTR4_WORD2_CLONE_BLKSIZE 256 }; 257 258 const u32 nfs4_fs_locations_bitmap[3] = { 259 FATTR4_WORD0_TYPE 260 | FATTR4_WORD0_CHANGE 261 | FATTR4_WORD0_SIZE 262 | FATTR4_WORD0_FSID 263 | FATTR4_WORD0_FILEID 264 | FATTR4_WORD0_FS_LOCATIONS, 265 FATTR4_WORD1_MODE 266 | FATTR4_WORD1_NUMLINKS 267 | FATTR4_WORD1_OWNER 268 | FATTR4_WORD1_OWNER_GROUP 269 | FATTR4_WORD1_RAWDEV 270 | FATTR4_WORD1_SPACE_USED 271 | FATTR4_WORD1_TIME_ACCESS 272 | FATTR4_WORD1_TIME_METADATA 273 | FATTR4_WORD1_TIME_MODIFY 274 | FATTR4_WORD1_MOUNTED_ON_FILEID, 275 }; 276 277 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 278 struct nfs4_readdir_arg *readdir) 279 { 280 __be32 *start, *p; 281 282 if (cookie > 2) { 283 readdir->cookie = cookie; 284 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 285 return; 286 } 287 288 readdir->cookie = 0; 289 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 290 if (cookie == 2) 291 return; 292 293 /* 294 * NFSv4 servers do not return entries for '.' and '..' 295 * Therefore, we fake these entries here. We let '.' 296 * have cookie 0 and '..' have cookie 1. Note that 297 * when talking to the server, we always send cookie 0 298 * instead of 1 or 2. 299 */ 300 start = p = kmap_atomic(*readdir->pages); 301 302 if (cookie == 0) { 303 *p++ = xdr_one; /* next */ 304 *p++ = xdr_zero; /* cookie, first word */ 305 *p++ = xdr_one; /* cookie, second word */ 306 *p++ = xdr_one; /* entry len */ 307 memcpy(p, ".\0\0\0", 4); /* entry */ 308 p++; 309 *p++ = xdr_one; /* bitmap length */ 310 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 311 *p++ = htonl(8); /* attribute buffer length */ 312 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 313 } 314 315 *p++ = xdr_one; /* next */ 316 *p++ = xdr_zero; /* cookie, first word */ 317 *p++ = xdr_two; /* cookie, second word */ 318 *p++ = xdr_two; /* entry len */ 319 memcpy(p, "..\0\0", 4); /* entry */ 320 p++; 321 *p++ = xdr_one; /* bitmap length */ 322 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 323 *p++ = htonl(8); /* attribute buffer length */ 324 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 325 326 readdir->pgbase = (char *)p - (char *)start; 327 readdir->count -= readdir->pgbase; 328 kunmap_atomic(start); 329 } 330 331 static long nfs4_update_delay(long *timeout) 332 { 333 long ret; 334 if (!timeout) 335 return NFS4_POLL_RETRY_MAX; 336 if (*timeout <= 0) 337 *timeout = NFS4_POLL_RETRY_MIN; 338 if (*timeout > NFS4_POLL_RETRY_MAX) 339 *timeout = NFS4_POLL_RETRY_MAX; 340 ret = *timeout; 341 *timeout <<= 1; 342 return ret; 343 } 344 345 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 346 { 347 int res = 0; 348 349 might_sleep(); 350 351 freezable_schedule_timeout_killable_unsafe( 352 nfs4_update_delay(timeout)); 353 if (fatal_signal_pending(current)) 354 res = -ERESTARTSYS; 355 return res; 356 } 357 358 /* This is the error handling routine for processes that are allowed 359 * to sleep. 360 */ 361 static int nfs4_do_handle_exception(struct nfs_server *server, 362 int errorcode, struct nfs4_exception *exception) 363 { 364 struct nfs_client *clp = server->nfs_client; 365 struct nfs4_state *state = exception->state; 366 const nfs4_stateid *stateid = exception->stateid; 367 struct inode *inode = exception->inode; 368 int ret = errorcode; 369 370 exception->delay = 0; 371 exception->recovering = 0; 372 exception->retry = 0; 373 switch(errorcode) { 374 case 0: 375 return 0; 376 case -NFS4ERR_OPENMODE: 377 case -NFS4ERR_DELEG_REVOKED: 378 case -NFS4ERR_ADMIN_REVOKED: 379 case -NFS4ERR_BAD_STATEID: 380 if (inode) { 381 int err; 382 383 err = nfs_async_inode_return_delegation(inode, 384 stateid); 385 if (err == 0) 386 goto wait_on_recovery; 387 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) { 388 exception->retry = 1; 389 break; 390 } 391 } 392 if (state == NULL) 393 break; 394 ret = nfs4_schedule_stateid_recovery(server, state); 395 if (ret < 0) 396 break; 397 goto wait_on_recovery; 398 case -NFS4ERR_EXPIRED: 399 if (state != NULL) { 400 ret = nfs4_schedule_stateid_recovery(server, state); 401 if (ret < 0) 402 break; 403 } 404 case -NFS4ERR_STALE_STATEID: 405 case -NFS4ERR_STALE_CLIENTID: 406 nfs4_schedule_lease_recovery(clp); 407 goto wait_on_recovery; 408 case -NFS4ERR_MOVED: 409 ret = nfs4_schedule_migration_recovery(server); 410 if (ret < 0) 411 break; 412 goto wait_on_recovery; 413 case -NFS4ERR_LEASE_MOVED: 414 nfs4_schedule_lease_moved_recovery(clp); 415 goto wait_on_recovery; 416 #if defined(CONFIG_NFS_V4_1) 417 case -NFS4ERR_BADSESSION: 418 case -NFS4ERR_BADSLOT: 419 case -NFS4ERR_BAD_HIGH_SLOT: 420 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 421 case -NFS4ERR_DEADSESSION: 422 case -NFS4ERR_SEQ_FALSE_RETRY: 423 case -NFS4ERR_SEQ_MISORDERED: 424 dprintk("%s ERROR: %d Reset session\n", __func__, 425 errorcode); 426 nfs4_schedule_session_recovery(clp->cl_session, errorcode); 427 goto wait_on_recovery; 428 #endif /* defined(CONFIG_NFS_V4_1) */ 429 case -NFS4ERR_FILE_OPEN: 430 if (exception->timeout > HZ) { 431 /* We have retried a decent amount, time to 432 * fail 433 */ 434 ret = -EBUSY; 435 break; 436 } 437 case -NFS4ERR_DELAY: 438 nfs_inc_server_stats(server, NFSIOS_DELAY); 439 case -NFS4ERR_GRACE: 440 case -NFS4ERR_LAYOUTTRYLATER: 441 case -NFS4ERR_RECALLCONFLICT: 442 exception->delay = 1; 443 return 0; 444 445 case -NFS4ERR_RETRY_UNCACHED_REP: 446 case -NFS4ERR_OLD_STATEID: 447 exception->retry = 1; 448 break; 449 case -NFS4ERR_BADOWNER: 450 /* The following works around a Linux server bug! */ 451 case -NFS4ERR_BADNAME: 452 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 453 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 454 exception->retry = 1; 455 printk(KERN_WARNING "NFS: v4 server %s " 456 "does not accept raw " 457 "uid/gids. " 458 "Reenabling the idmapper.\n", 459 server->nfs_client->cl_hostname); 460 } 461 } 462 /* We failed to handle the error */ 463 return nfs4_map_errors(ret); 464 wait_on_recovery: 465 exception->recovering = 1; 466 return 0; 467 } 468 469 /* This is the error handling routine for processes that are allowed 470 * to sleep. 471 */ 472 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 473 { 474 struct nfs_client *clp = server->nfs_client; 475 int ret; 476 477 ret = nfs4_do_handle_exception(server, errorcode, exception); 478 if (exception->delay) { 479 ret = nfs4_delay(server->client, &exception->timeout); 480 goto out_retry; 481 } 482 if (exception->recovering) { 483 ret = nfs4_wait_clnt_recover(clp); 484 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 485 return -EIO; 486 goto out_retry; 487 } 488 return ret; 489 out_retry: 490 if (ret == 0) 491 exception->retry = 1; 492 return ret; 493 } 494 495 static int 496 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, 497 int errorcode, struct nfs4_exception *exception) 498 { 499 struct nfs_client *clp = server->nfs_client; 500 int ret; 501 502 ret = nfs4_do_handle_exception(server, errorcode, exception); 503 if (exception->delay) { 504 rpc_delay(task, nfs4_update_delay(&exception->timeout)); 505 goto out_retry; 506 } 507 if (exception->recovering) { 508 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 509 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 510 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 511 goto out_retry; 512 } 513 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 514 ret = -EIO; 515 return ret; 516 out_retry: 517 if (ret == 0) 518 exception->retry = 1; 519 return ret; 520 } 521 522 static int 523 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 524 struct nfs4_state *state, long *timeout) 525 { 526 struct nfs4_exception exception = { 527 .state = state, 528 }; 529 530 if (task->tk_status >= 0) 531 return 0; 532 if (timeout) 533 exception.timeout = *timeout; 534 task->tk_status = nfs4_async_handle_exception(task, server, 535 task->tk_status, 536 &exception); 537 if (exception.delay && timeout) 538 *timeout = exception.timeout; 539 if (exception.retry) 540 return -EAGAIN; 541 return 0; 542 } 543 544 /* 545 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 546 * or 'false' otherwise. 547 */ 548 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 549 { 550 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 551 552 if (flavor == RPC_AUTH_GSS_KRB5I || 553 flavor == RPC_AUTH_GSS_KRB5P) 554 return true; 555 556 return false; 557 } 558 559 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 560 { 561 spin_lock(&clp->cl_lock); 562 if (time_before(clp->cl_last_renewal,timestamp)) 563 clp->cl_last_renewal = timestamp; 564 spin_unlock(&clp->cl_lock); 565 } 566 567 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 568 { 569 struct nfs_client *clp = server->nfs_client; 570 571 if (!nfs4_has_session(clp)) 572 do_renew_lease(clp, timestamp); 573 } 574 575 struct nfs4_call_sync_data { 576 const struct nfs_server *seq_server; 577 struct nfs4_sequence_args *seq_args; 578 struct nfs4_sequence_res *seq_res; 579 }; 580 581 void nfs4_init_sequence(struct nfs4_sequence_args *args, 582 struct nfs4_sequence_res *res, int cache_reply) 583 { 584 args->sa_slot = NULL; 585 args->sa_cache_this = cache_reply; 586 args->sa_privileged = 0; 587 588 res->sr_slot = NULL; 589 } 590 591 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) 592 { 593 args->sa_privileged = 1; 594 } 595 596 int nfs40_setup_sequence(struct nfs4_slot_table *tbl, 597 struct nfs4_sequence_args *args, 598 struct nfs4_sequence_res *res, 599 struct rpc_task *task) 600 { 601 struct nfs4_slot *slot; 602 603 /* slot already allocated? */ 604 if (res->sr_slot != NULL) 605 goto out_start; 606 607 spin_lock(&tbl->slot_tbl_lock); 608 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 609 goto out_sleep; 610 611 slot = nfs4_alloc_slot(tbl); 612 if (IS_ERR(slot)) { 613 if (slot == ERR_PTR(-ENOMEM)) 614 task->tk_timeout = HZ >> 2; 615 goto out_sleep; 616 } 617 spin_unlock(&tbl->slot_tbl_lock); 618 619 args->sa_slot = slot; 620 res->sr_slot = slot; 621 622 out_start: 623 rpc_call_start(task); 624 return 0; 625 626 out_sleep: 627 if (args->sa_privileged) 628 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 629 NULL, RPC_PRIORITY_PRIVILEGED); 630 else 631 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 632 spin_unlock(&tbl->slot_tbl_lock); 633 return -EAGAIN; 634 } 635 EXPORT_SYMBOL_GPL(nfs40_setup_sequence); 636 637 static int nfs40_sequence_done(struct rpc_task *task, 638 struct nfs4_sequence_res *res) 639 { 640 struct nfs4_slot *slot = res->sr_slot; 641 struct nfs4_slot_table *tbl; 642 643 if (slot == NULL) 644 goto out; 645 646 tbl = slot->table; 647 spin_lock(&tbl->slot_tbl_lock); 648 if (!nfs41_wake_and_assign_slot(tbl, slot)) 649 nfs4_free_slot(tbl, slot); 650 spin_unlock(&tbl->slot_tbl_lock); 651 652 res->sr_slot = NULL; 653 out: 654 return 1; 655 } 656 657 #if defined(CONFIG_NFS_V4_1) 658 659 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 660 { 661 struct nfs4_session *session; 662 struct nfs4_slot_table *tbl; 663 struct nfs4_slot *slot = res->sr_slot; 664 bool send_new_highest_used_slotid = false; 665 666 tbl = slot->table; 667 session = tbl->session; 668 669 spin_lock(&tbl->slot_tbl_lock); 670 /* Be nice to the server: try to ensure that the last transmitted 671 * value for highest_user_slotid <= target_highest_slotid 672 */ 673 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 674 send_new_highest_used_slotid = true; 675 676 if (nfs41_wake_and_assign_slot(tbl, slot)) { 677 send_new_highest_used_slotid = false; 678 goto out_unlock; 679 } 680 nfs4_free_slot(tbl, slot); 681 682 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 683 send_new_highest_used_slotid = false; 684 out_unlock: 685 spin_unlock(&tbl->slot_tbl_lock); 686 res->sr_slot = NULL; 687 if (send_new_highest_used_slotid) 688 nfs41_notify_server(session->clp); 689 } 690 691 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 692 { 693 struct nfs4_session *session; 694 struct nfs4_slot *slot = res->sr_slot; 695 struct nfs_client *clp; 696 bool interrupted = false; 697 int ret = 1; 698 699 if (slot == NULL) 700 goto out_noaction; 701 /* don't increment the sequence number if the task wasn't sent */ 702 if (!RPC_WAS_SENT(task)) 703 goto out; 704 705 session = slot->table->session; 706 707 if (slot->interrupted) { 708 slot->interrupted = 0; 709 interrupted = true; 710 } 711 712 trace_nfs4_sequence_done(session, res); 713 /* Check the SEQUENCE operation status */ 714 switch (res->sr_status) { 715 case 0: 716 /* Update the slot's sequence and clientid lease timer */ 717 ++slot->seq_nr; 718 clp = session->clp; 719 do_renew_lease(clp, res->sr_timestamp); 720 /* Check sequence flags */ 721 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 722 nfs41_update_target_slotid(slot->table, slot, res); 723 break; 724 case 1: 725 /* 726 * sr_status remains 1 if an RPC level error occurred. 727 * The server may or may not have processed the sequence 728 * operation.. 729 * Mark the slot as having hosted an interrupted RPC call. 730 */ 731 slot->interrupted = 1; 732 goto out; 733 case -NFS4ERR_DELAY: 734 /* The server detected a resend of the RPC call and 735 * returned NFS4ERR_DELAY as per Section 2.10.6.2 736 * of RFC5661. 737 */ 738 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 739 __func__, 740 slot->slot_nr, 741 slot->seq_nr); 742 goto out_retry; 743 case -NFS4ERR_BADSLOT: 744 /* 745 * The slot id we used was probably retired. Try again 746 * using a different slot id. 747 */ 748 goto retry_nowait; 749 case -NFS4ERR_SEQ_MISORDERED: 750 /* 751 * Was the last operation on this sequence interrupted? 752 * If so, retry after bumping the sequence number. 753 */ 754 if (interrupted) { 755 ++slot->seq_nr; 756 goto retry_nowait; 757 } 758 /* 759 * Could this slot have been previously retired? 760 * If so, then the server may be expecting seq_nr = 1! 761 */ 762 if (slot->seq_nr != 1) { 763 slot->seq_nr = 1; 764 goto retry_nowait; 765 } 766 break; 767 case -NFS4ERR_SEQ_FALSE_RETRY: 768 ++slot->seq_nr; 769 goto retry_nowait; 770 default: 771 /* Just update the slot sequence no. */ 772 ++slot->seq_nr; 773 } 774 out: 775 /* The session may be reset by one of the error handlers. */ 776 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 777 nfs41_sequence_free_slot(res); 778 out_noaction: 779 return ret; 780 retry_nowait: 781 if (rpc_restart_call_prepare(task)) { 782 task->tk_status = 0; 783 ret = 0; 784 } 785 goto out; 786 out_retry: 787 if (!rpc_restart_call(task)) 788 goto out; 789 rpc_delay(task, NFS4_POLL_RETRY_MAX); 790 return 0; 791 } 792 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 793 794 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 795 { 796 if (res->sr_slot == NULL) 797 return 1; 798 if (!res->sr_slot->table->session) 799 return nfs40_sequence_done(task, res); 800 return nfs41_sequence_done(task, res); 801 } 802 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 803 804 int nfs41_setup_sequence(struct nfs4_session *session, 805 struct nfs4_sequence_args *args, 806 struct nfs4_sequence_res *res, 807 struct rpc_task *task) 808 { 809 struct nfs4_slot *slot; 810 struct nfs4_slot_table *tbl; 811 812 dprintk("--> %s\n", __func__); 813 /* slot already allocated? */ 814 if (res->sr_slot != NULL) 815 goto out_success; 816 817 tbl = &session->fc_slot_table; 818 819 task->tk_timeout = 0; 820 821 spin_lock(&tbl->slot_tbl_lock); 822 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) && 823 !args->sa_privileged) { 824 /* The state manager will wait until the slot table is empty */ 825 dprintk("%s session is draining\n", __func__); 826 goto out_sleep; 827 } 828 829 slot = nfs4_alloc_slot(tbl); 830 if (IS_ERR(slot)) { 831 /* If out of memory, try again in 1/4 second */ 832 if (slot == ERR_PTR(-ENOMEM)) 833 task->tk_timeout = HZ >> 2; 834 dprintk("<-- %s: no free slots\n", __func__); 835 goto out_sleep; 836 } 837 spin_unlock(&tbl->slot_tbl_lock); 838 839 args->sa_slot = slot; 840 841 dprintk("<-- %s slotid=%u seqid=%u\n", __func__, 842 slot->slot_nr, slot->seq_nr); 843 844 res->sr_slot = slot; 845 res->sr_timestamp = jiffies; 846 res->sr_status_flags = 0; 847 /* 848 * sr_status is only set in decode_sequence, and so will remain 849 * set to 1 if an rpc level failure occurs. 850 */ 851 res->sr_status = 1; 852 trace_nfs4_setup_sequence(session, args); 853 out_success: 854 rpc_call_start(task); 855 return 0; 856 out_sleep: 857 /* Privileged tasks are queued with top priority */ 858 if (args->sa_privileged) 859 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 860 NULL, RPC_PRIORITY_PRIVILEGED); 861 else 862 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 863 spin_unlock(&tbl->slot_tbl_lock); 864 return -EAGAIN; 865 } 866 EXPORT_SYMBOL_GPL(nfs41_setup_sequence); 867 868 static int nfs4_setup_sequence(const struct nfs_server *server, 869 struct nfs4_sequence_args *args, 870 struct nfs4_sequence_res *res, 871 struct rpc_task *task) 872 { 873 struct nfs4_session *session = nfs4_get_session(server); 874 int ret = 0; 875 876 if (!session) 877 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 878 args, res, task); 879 880 dprintk("--> %s clp %p session %p sr_slot %u\n", 881 __func__, session->clp, session, res->sr_slot ? 882 res->sr_slot->slot_nr : NFS4_NO_SLOT); 883 884 ret = nfs41_setup_sequence(session, args, res, task); 885 886 dprintk("<-- %s status=%d\n", __func__, ret); 887 return ret; 888 } 889 890 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 891 { 892 struct nfs4_call_sync_data *data = calldata; 893 struct nfs4_session *session = nfs4_get_session(data->seq_server); 894 895 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 896 897 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task); 898 } 899 900 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 901 { 902 struct nfs4_call_sync_data *data = calldata; 903 904 nfs41_sequence_done(task, data->seq_res); 905 } 906 907 static const struct rpc_call_ops nfs41_call_sync_ops = { 908 .rpc_call_prepare = nfs41_call_sync_prepare, 909 .rpc_call_done = nfs41_call_sync_done, 910 }; 911 912 #else /* !CONFIG_NFS_V4_1 */ 913 914 static int nfs4_setup_sequence(const struct nfs_server *server, 915 struct nfs4_sequence_args *args, 916 struct nfs4_sequence_res *res, 917 struct rpc_task *task) 918 { 919 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 920 args, res, task); 921 } 922 923 int nfs4_sequence_done(struct rpc_task *task, 924 struct nfs4_sequence_res *res) 925 { 926 return nfs40_sequence_done(task, res); 927 } 928 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 929 930 #endif /* !CONFIG_NFS_V4_1 */ 931 932 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 933 { 934 struct nfs4_call_sync_data *data = calldata; 935 nfs4_setup_sequence(data->seq_server, 936 data->seq_args, data->seq_res, task); 937 } 938 939 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 940 { 941 struct nfs4_call_sync_data *data = calldata; 942 nfs4_sequence_done(task, data->seq_res); 943 } 944 945 static const struct rpc_call_ops nfs40_call_sync_ops = { 946 .rpc_call_prepare = nfs40_call_sync_prepare, 947 .rpc_call_done = nfs40_call_sync_done, 948 }; 949 950 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 951 struct nfs_server *server, 952 struct rpc_message *msg, 953 struct nfs4_sequence_args *args, 954 struct nfs4_sequence_res *res) 955 { 956 int ret; 957 struct rpc_task *task; 958 struct nfs_client *clp = server->nfs_client; 959 struct nfs4_call_sync_data data = { 960 .seq_server = server, 961 .seq_args = args, 962 .seq_res = res, 963 }; 964 struct rpc_task_setup task_setup = { 965 .rpc_client = clnt, 966 .rpc_message = msg, 967 .callback_ops = clp->cl_mvops->call_sync_ops, 968 .callback_data = &data 969 }; 970 971 task = rpc_run_task(&task_setup); 972 if (IS_ERR(task)) 973 ret = PTR_ERR(task); 974 else { 975 ret = task->tk_status; 976 rpc_put_task(task); 977 } 978 return ret; 979 } 980 981 int nfs4_call_sync(struct rpc_clnt *clnt, 982 struct nfs_server *server, 983 struct rpc_message *msg, 984 struct nfs4_sequence_args *args, 985 struct nfs4_sequence_res *res, 986 int cache_reply) 987 { 988 nfs4_init_sequence(args, res, cache_reply); 989 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 990 } 991 992 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 993 { 994 struct nfs_inode *nfsi = NFS_I(dir); 995 996 spin_lock(&dir->i_lock); 997 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 998 if (!cinfo->atomic || cinfo->before != dir->i_version) 999 nfs_force_lookup_revalidate(dir); 1000 dir->i_version = cinfo->after; 1001 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1002 nfs_fscache_invalidate(dir); 1003 spin_unlock(&dir->i_lock); 1004 } 1005 1006 struct nfs4_opendata { 1007 struct kref kref; 1008 struct nfs_openargs o_arg; 1009 struct nfs_openres o_res; 1010 struct nfs_open_confirmargs c_arg; 1011 struct nfs_open_confirmres c_res; 1012 struct nfs4_string owner_name; 1013 struct nfs4_string group_name; 1014 struct nfs4_label *a_label; 1015 struct nfs_fattr f_attr; 1016 struct nfs4_label *f_label; 1017 struct dentry *dir; 1018 struct dentry *dentry; 1019 struct nfs4_state_owner *owner; 1020 struct nfs4_state *state; 1021 struct iattr attrs; 1022 unsigned long timestamp; 1023 unsigned int rpc_done : 1; 1024 unsigned int file_created : 1; 1025 unsigned int is_recover : 1; 1026 int rpc_status; 1027 int cancelled; 1028 }; 1029 1030 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 1031 int err, struct nfs4_exception *exception) 1032 { 1033 if (err != -EINVAL) 1034 return false; 1035 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1036 return false; 1037 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 1038 exception->retry = 1; 1039 return true; 1040 } 1041 1042 static u32 1043 nfs4_map_atomic_open_share(struct nfs_server *server, 1044 fmode_t fmode, int openflags) 1045 { 1046 u32 res = 0; 1047 1048 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 1049 case FMODE_READ: 1050 res = NFS4_SHARE_ACCESS_READ; 1051 break; 1052 case FMODE_WRITE: 1053 res = NFS4_SHARE_ACCESS_WRITE; 1054 break; 1055 case FMODE_READ|FMODE_WRITE: 1056 res = NFS4_SHARE_ACCESS_BOTH; 1057 } 1058 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1059 goto out; 1060 /* Want no delegation if we're using O_DIRECT */ 1061 if (openflags & O_DIRECT) 1062 res |= NFS4_SHARE_WANT_NO_DELEG; 1063 out: 1064 return res; 1065 } 1066 1067 static enum open_claim_type4 1068 nfs4_map_atomic_open_claim(struct nfs_server *server, 1069 enum open_claim_type4 claim) 1070 { 1071 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 1072 return claim; 1073 switch (claim) { 1074 default: 1075 return claim; 1076 case NFS4_OPEN_CLAIM_FH: 1077 return NFS4_OPEN_CLAIM_NULL; 1078 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1079 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 1080 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1081 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 1082 } 1083 } 1084 1085 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 1086 { 1087 p->o_res.f_attr = &p->f_attr; 1088 p->o_res.f_label = p->f_label; 1089 p->o_res.seqid = p->o_arg.seqid; 1090 p->c_res.seqid = p->c_arg.seqid; 1091 p->o_res.server = p->o_arg.server; 1092 p->o_res.access_request = p->o_arg.access; 1093 nfs_fattr_init(&p->f_attr); 1094 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 1095 } 1096 1097 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1098 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1099 const struct iattr *attrs, 1100 struct nfs4_label *label, 1101 enum open_claim_type4 claim, 1102 gfp_t gfp_mask) 1103 { 1104 struct dentry *parent = dget_parent(dentry); 1105 struct inode *dir = d_inode(parent); 1106 struct nfs_server *server = NFS_SERVER(dir); 1107 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1108 struct nfs4_opendata *p; 1109 1110 p = kzalloc(sizeof(*p), gfp_mask); 1111 if (p == NULL) 1112 goto err; 1113 1114 p->f_label = nfs4_label_alloc(server, gfp_mask); 1115 if (IS_ERR(p->f_label)) 1116 goto err_free_p; 1117 1118 p->a_label = nfs4_label_alloc(server, gfp_mask); 1119 if (IS_ERR(p->a_label)) 1120 goto err_free_f; 1121 1122 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1123 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1124 if (IS_ERR(p->o_arg.seqid)) 1125 goto err_free_label; 1126 nfs_sb_active(dentry->d_sb); 1127 p->dentry = dget(dentry); 1128 p->dir = parent; 1129 p->owner = sp; 1130 atomic_inc(&sp->so_count); 1131 p->o_arg.open_flags = flags; 1132 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1133 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1134 fmode, flags); 1135 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS 1136 * will return permission denied for all bits until close */ 1137 if (!(flags & O_EXCL)) { 1138 /* ask server to check for all possible rights as results 1139 * are cached */ 1140 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1141 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE; 1142 } 1143 p->o_arg.clientid = server->nfs_client->cl_clientid; 1144 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1145 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1146 p->o_arg.name = &dentry->d_name; 1147 p->o_arg.server = server; 1148 p->o_arg.bitmask = nfs4_bitmask(server, label); 1149 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1150 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1151 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1152 switch (p->o_arg.claim) { 1153 case NFS4_OPEN_CLAIM_NULL: 1154 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1155 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1156 p->o_arg.fh = NFS_FH(dir); 1157 break; 1158 case NFS4_OPEN_CLAIM_PREVIOUS: 1159 case NFS4_OPEN_CLAIM_FH: 1160 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1161 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1162 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1163 } 1164 if (attrs != NULL && attrs->ia_valid != 0) { 1165 __u32 verf[2]; 1166 1167 p->o_arg.u.attrs = &p->attrs; 1168 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 1169 1170 verf[0] = jiffies; 1171 verf[1] = current->pid; 1172 memcpy(p->o_arg.u.verifier.data, verf, 1173 sizeof(p->o_arg.u.verifier.data)); 1174 } 1175 p->c_arg.fh = &p->o_res.fh; 1176 p->c_arg.stateid = &p->o_res.stateid; 1177 p->c_arg.seqid = p->o_arg.seqid; 1178 nfs4_init_opendata_res(p); 1179 kref_init(&p->kref); 1180 return p; 1181 1182 err_free_label: 1183 nfs4_label_free(p->a_label); 1184 err_free_f: 1185 nfs4_label_free(p->f_label); 1186 err_free_p: 1187 kfree(p); 1188 err: 1189 dput(parent); 1190 return NULL; 1191 } 1192 1193 static void nfs4_opendata_free(struct kref *kref) 1194 { 1195 struct nfs4_opendata *p = container_of(kref, 1196 struct nfs4_opendata, kref); 1197 struct super_block *sb = p->dentry->d_sb; 1198 1199 nfs_free_seqid(p->o_arg.seqid); 1200 if (p->state != NULL) 1201 nfs4_put_open_state(p->state); 1202 nfs4_put_state_owner(p->owner); 1203 1204 nfs4_label_free(p->a_label); 1205 nfs4_label_free(p->f_label); 1206 1207 dput(p->dir); 1208 dput(p->dentry); 1209 nfs_sb_deactive(sb); 1210 nfs_fattr_free_names(&p->f_attr); 1211 kfree(p->f_attr.mdsthreshold); 1212 kfree(p); 1213 } 1214 1215 static void nfs4_opendata_put(struct nfs4_opendata *p) 1216 { 1217 if (p != NULL) 1218 kref_put(&p->kref, nfs4_opendata_free); 1219 } 1220 1221 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) 1222 { 1223 int ret; 1224 1225 ret = rpc_wait_for_completion_task(task); 1226 return ret; 1227 } 1228 1229 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, 1230 fmode_t fmode) 1231 { 1232 switch(fmode & (FMODE_READ|FMODE_WRITE)) { 1233 case FMODE_READ|FMODE_WRITE: 1234 return state->n_rdwr != 0; 1235 case FMODE_WRITE: 1236 return state->n_wronly != 0; 1237 case FMODE_READ: 1238 return state->n_rdonly != 0; 1239 } 1240 WARN_ON_ONCE(1); 1241 return false; 1242 } 1243 1244 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 1245 { 1246 int ret = 0; 1247 1248 if (open_mode & (O_EXCL|O_TRUNC)) 1249 goto out; 1250 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1251 case FMODE_READ: 1252 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1253 && state->n_rdonly != 0; 1254 break; 1255 case FMODE_WRITE: 1256 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1257 && state->n_wronly != 0; 1258 break; 1259 case FMODE_READ|FMODE_WRITE: 1260 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1261 && state->n_rdwr != 0; 1262 } 1263 out: 1264 return ret; 1265 } 1266 1267 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1268 enum open_claim_type4 claim) 1269 { 1270 if (delegation == NULL) 1271 return 0; 1272 if ((delegation->type & fmode) != fmode) 1273 return 0; 1274 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 1275 return 0; 1276 switch (claim) { 1277 case NFS4_OPEN_CLAIM_NULL: 1278 case NFS4_OPEN_CLAIM_FH: 1279 break; 1280 case NFS4_OPEN_CLAIM_PREVIOUS: 1281 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1282 break; 1283 default: 1284 return 0; 1285 } 1286 nfs_mark_delegation_referenced(delegation); 1287 return 1; 1288 } 1289 1290 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1291 { 1292 switch (fmode) { 1293 case FMODE_WRITE: 1294 state->n_wronly++; 1295 break; 1296 case FMODE_READ: 1297 state->n_rdonly++; 1298 break; 1299 case FMODE_READ|FMODE_WRITE: 1300 state->n_rdwr++; 1301 } 1302 nfs4_state_set_mode_locked(state, state->state | fmode); 1303 } 1304 1305 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1306 { 1307 struct nfs_client *clp = state->owner->so_server->nfs_client; 1308 bool need_recover = false; 1309 1310 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1311 need_recover = true; 1312 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1313 need_recover = true; 1314 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1315 need_recover = true; 1316 if (need_recover) 1317 nfs4_state_mark_reclaim_nograce(clp, state); 1318 } 1319 1320 static bool nfs_need_update_open_stateid(struct nfs4_state *state, 1321 nfs4_stateid *stateid) 1322 { 1323 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0) 1324 return true; 1325 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1326 nfs_test_and_clear_all_open_stateid(state); 1327 return true; 1328 } 1329 if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) 1330 return true; 1331 return false; 1332 } 1333 1334 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1335 { 1336 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1337 return; 1338 if (state->n_wronly) 1339 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1340 if (state->n_rdonly) 1341 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1342 if (state->n_rdwr) 1343 set_bit(NFS_O_RDWR_STATE, &state->flags); 1344 set_bit(NFS_OPEN_STATE, &state->flags); 1345 } 1346 1347 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1348 nfs4_stateid *arg_stateid, 1349 nfs4_stateid *stateid, fmode_t fmode) 1350 { 1351 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1352 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1353 case FMODE_WRITE: 1354 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1355 break; 1356 case FMODE_READ: 1357 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1358 break; 1359 case 0: 1360 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1361 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1362 clear_bit(NFS_OPEN_STATE, &state->flags); 1363 } 1364 if (stateid == NULL) 1365 return; 1366 /* Handle races with OPEN */ 1367 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || 1368 (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1369 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) { 1370 nfs_resync_open_stateid_locked(state); 1371 return; 1372 } 1373 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1374 nfs4_stateid_copy(&state->stateid, stateid); 1375 nfs4_stateid_copy(&state->open_stateid, stateid); 1376 } 1377 1378 static void nfs_clear_open_stateid(struct nfs4_state *state, 1379 nfs4_stateid *arg_stateid, 1380 nfs4_stateid *stateid, fmode_t fmode) 1381 { 1382 write_seqlock(&state->seqlock); 1383 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); 1384 write_sequnlock(&state->seqlock); 1385 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1386 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1387 } 1388 1389 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 1390 { 1391 switch (fmode) { 1392 case FMODE_READ: 1393 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1394 break; 1395 case FMODE_WRITE: 1396 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1397 break; 1398 case FMODE_READ|FMODE_WRITE: 1399 set_bit(NFS_O_RDWR_STATE, &state->flags); 1400 } 1401 if (!nfs_need_update_open_stateid(state, stateid)) 1402 return; 1403 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1404 nfs4_stateid_copy(&state->stateid, stateid); 1405 nfs4_stateid_copy(&state->open_stateid, stateid); 1406 } 1407 1408 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) 1409 { 1410 /* 1411 * Protect the call to nfs4_state_set_mode_locked and 1412 * serialise the stateid update 1413 */ 1414 spin_lock(&state->owner->so_lock); 1415 write_seqlock(&state->seqlock); 1416 if (deleg_stateid != NULL) { 1417 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1418 set_bit(NFS_DELEGATED_STATE, &state->flags); 1419 } 1420 if (open_stateid != NULL) 1421 nfs_set_open_stateid_locked(state, open_stateid, fmode); 1422 write_sequnlock(&state->seqlock); 1423 update_open_stateflags(state, fmode); 1424 spin_unlock(&state->owner->so_lock); 1425 } 1426 1427 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) 1428 { 1429 struct nfs_inode *nfsi = NFS_I(state->inode); 1430 struct nfs_delegation *deleg_cur; 1431 int ret = 0; 1432 1433 fmode &= (FMODE_READ|FMODE_WRITE); 1434 1435 rcu_read_lock(); 1436 deleg_cur = rcu_dereference(nfsi->delegation); 1437 if (deleg_cur == NULL) 1438 goto no_delegation; 1439 1440 spin_lock(&deleg_cur->lock); 1441 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1442 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1443 (deleg_cur->type & fmode) != fmode) 1444 goto no_delegation_unlock; 1445 1446 if (delegation == NULL) 1447 delegation = &deleg_cur->stateid; 1448 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) 1449 goto no_delegation_unlock; 1450 1451 nfs_mark_delegation_referenced(deleg_cur); 1452 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); 1453 ret = 1; 1454 no_delegation_unlock: 1455 spin_unlock(&deleg_cur->lock); 1456 no_delegation: 1457 rcu_read_unlock(); 1458 1459 if (!ret && open_stateid != NULL) { 1460 __update_open_stateid(state, open_stateid, NULL, fmode); 1461 ret = 1; 1462 } 1463 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1464 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1465 1466 return ret; 1467 } 1468 1469 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1470 const nfs4_stateid *stateid) 1471 { 1472 struct nfs4_state *state = lsp->ls_state; 1473 bool ret = false; 1474 1475 spin_lock(&state->state_lock); 1476 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1477 goto out_noupdate; 1478 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1479 goto out_noupdate; 1480 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1481 ret = true; 1482 out_noupdate: 1483 spin_unlock(&state->state_lock); 1484 return ret; 1485 } 1486 1487 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1488 { 1489 struct nfs_delegation *delegation; 1490 1491 rcu_read_lock(); 1492 delegation = rcu_dereference(NFS_I(inode)->delegation); 1493 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1494 rcu_read_unlock(); 1495 return; 1496 } 1497 rcu_read_unlock(); 1498 nfs4_inode_return_delegation(inode); 1499 } 1500 1501 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1502 { 1503 struct nfs4_state *state = opendata->state; 1504 struct nfs_inode *nfsi = NFS_I(state->inode); 1505 struct nfs_delegation *delegation; 1506 int open_mode = opendata->o_arg.open_flags; 1507 fmode_t fmode = opendata->o_arg.fmode; 1508 enum open_claim_type4 claim = opendata->o_arg.claim; 1509 nfs4_stateid stateid; 1510 int ret = -EAGAIN; 1511 1512 for (;;) { 1513 spin_lock(&state->owner->so_lock); 1514 if (can_open_cached(state, fmode, open_mode)) { 1515 update_open_stateflags(state, fmode); 1516 spin_unlock(&state->owner->so_lock); 1517 goto out_return_state; 1518 } 1519 spin_unlock(&state->owner->so_lock); 1520 rcu_read_lock(); 1521 delegation = rcu_dereference(nfsi->delegation); 1522 if (!can_open_delegated(delegation, fmode, claim)) { 1523 rcu_read_unlock(); 1524 break; 1525 } 1526 /* Save the delegation */ 1527 nfs4_stateid_copy(&stateid, &delegation->stateid); 1528 rcu_read_unlock(); 1529 nfs_release_seqid(opendata->o_arg.seqid); 1530 if (!opendata->is_recover) { 1531 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1532 if (ret != 0) 1533 goto out; 1534 } 1535 ret = -EAGAIN; 1536 1537 /* Try to update the stateid using the delegation */ 1538 if (update_open_stateid(state, NULL, &stateid, fmode)) 1539 goto out_return_state; 1540 } 1541 out: 1542 return ERR_PTR(ret); 1543 out_return_state: 1544 atomic_inc(&state->count); 1545 return state; 1546 } 1547 1548 static void 1549 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) 1550 { 1551 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client; 1552 struct nfs_delegation *delegation; 1553 int delegation_flags = 0; 1554 1555 rcu_read_lock(); 1556 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1557 if (delegation) 1558 delegation_flags = delegation->flags; 1559 rcu_read_unlock(); 1560 switch (data->o_arg.claim) { 1561 default: 1562 break; 1563 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1564 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1565 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1566 "returning a delegation for " 1567 "OPEN(CLAIM_DELEGATE_CUR)\n", 1568 clp->cl_hostname); 1569 return; 1570 } 1571 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1572 nfs_inode_set_delegation(state->inode, 1573 data->owner->so_cred, 1574 &data->o_res); 1575 else 1576 nfs_inode_reclaim_delegation(state->inode, 1577 data->owner->so_cred, 1578 &data->o_res); 1579 } 1580 1581 /* 1582 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 1583 * and update the nfs4_state. 1584 */ 1585 static struct nfs4_state * 1586 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 1587 { 1588 struct inode *inode = data->state->inode; 1589 struct nfs4_state *state = data->state; 1590 int ret; 1591 1592 if (!data->rpc_done) { 1593 if (data->rpc_status) { 1594 ret = data->rpc_status; 1595 goto err; 1596 } 1597 /* cached opens have already been processed */ 1598 goto update; 1599 } 1600 1601 ret = nfs_refresh_inode(inode, &data->f_attr); 1602 if (ret) 1603 goto err; 1604 1605 if (data->o_res.delegation_type != 0) 1606 nfs4_opendata_check_deleg(data, state); 1607 update: 1608 update_open_stateid(state, &data->o_res.stateid, NULL, 1609 data->o_arg.fmode); 1610 atomic_inc(&state->count); 1611 1612 return state; 1613 err: 1614 return ERR_PTR(ret); 1615 1616 } 1617 1618 static struct nfs4_state * 1619 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1620 { 1621 struct inode *inode; 1622 struct nfs4_state *state = NULL; 1623 int ret; 1624 1625 if (!data->rpc_done) { 1626 state = nfs4_try_open_cached(data); 1627 trace_nfs4_cached_open(data->state); 1628 goto out; 1629 } 1630 1631 ret = -EAGAIN; 1632 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1633 goto err; 1634 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label); 1635 ret = PTR_ERR(inode); 1636 if (IS_ERR(inode)) 1637 goto err; 1638 ret = -ENOMEM; 1639 state = nfs4_get_open_state(inode, data->owner); 1640 if (state == NULL) 1641 goto err_put_inode; 1642 if (data->o_res.delegation_type != 0) 1643 nfs4_opendata_check_deleg(data, state); 1644 update_open_stateid(state, &data->o_res.stateid, NULL, 1645 data->o_arg.fmode); 1646 iput(inode); 1647 out: 1648 nfs_release_seqid(data->o_arg.seqid); 1649 return state; 1650 err_put_inode: 1651 iput(inode); 1652 err: 1653 return ERR_PTR(ret); 1654 } 1655 1656 static struct nfs4_state * 1657 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1658 { 1659 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 1660 return _nfs4_opendata_reclaim_to_nfs4_state(data); 1661 return _nfs4_opendata_to_nfs4_state(data); 1662 } 1663 1664 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) 1665 { 1666 struct nfs_inode *nfsi = NFS_I(state->inode); 1667 struct nfs_open_context *ctx; 1668 1669 spin_lock(&state->inode->i_lock); 1670 list_for_each_entry(ctx, &nfsi->open_files, list) { 1671 if (ctx->state != state) 1672 continue; 1673 get_nfs_open_context(ctx); 1674 spin_unlock(&state->inode->i_lock); 1675 return ctx; 1676 } 1677 spin_unlock(&state->inode->i_lock); 1678 return ERR_PTR(-ENOENT); 1679 } 1680 1681 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 1682 struct nfs4_state *state, enum open_claim_type4 claim) 1683 { 1684 struct nfs4_opendata *opendata; 1685 1686 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 1687 NULL, NULL, claim, GFP_NOFS); 1688 if (opendata == NULL) 1689 return ERR_PTR(-ENOMEM); 1690 opendata->state = state; 1691 atomic_inc(&state->count); 1692 return opendata; 1693 } 1694 1695 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, 1696 fmode_t fmode) 1697 { 1698 struct nfs4_state *newstate; 1699 int ret; 1700 1701 if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) 1702 return 0; 1703 opendata->o_arg.open_flags = 0; 1704 opendata->o_arg.fmode = fmode; 1705 opendata->o_arg.share_access = nfs4_map_atomic_open_share( 1706 NFS_SB(opendata->dentry->d_sb), 1707 fmode, 0); 1708 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 1709 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 1710 nfs4_init_opendata_res(opendata); 1711 ret = _nfs4_recover_proc_open(opendata); 1712 if (ret != 0) 1713 return ret; 1714 newstate = nfs4_opendata_to_nfs4_state(opendata); 1715 if (IS_ERR(newstate)) 1716 return PTR_ERR(newstate); 1717 if (newstate != opendata->state) 1718 ret = -ESTALE; 1719 nfs4_close_state(newstate, fmode); 1720 return ret; 1721 } 1722 1723 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1724 { 1725 int ret; 1726 1727 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ 1728 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1729 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1730 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1731 /* memory barrier prior to reading state->n_* */ 1732 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1733 clear_bit(NFS_OPEN_STATE, &state->flags); 1734 smp_rmb(); 1735 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 1736 if (ret != 0) 1737 return ret; 1738 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); 1739 if (ret != 0) 1740 return ret; 1741 ret = nfs4_open_recover_helper(opendata, FMODE_READ); 1742 if (ret != 0) 1743 return ret; 1744 /* 1745 * We may have performed cached opens for all three recoveries. 1746 * Check if we need to update the current stateid. 1747 */ 1748 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 1749 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 1750 write_seqlock(&state->seqlock); 1751 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1752 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1753 write_sequnlock(&state->seqlock); 1754 } 1755 return 0; 1756 } 1757 1758 /* 1759 * OPEN_RECLAIM: 1760 * reclaim state on the server after a reboot. 1761 */ 1762 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1763 { 1764 struct nfs_delegation *delegation; 1765 struct nfs4_opendata *opendata; 1766 fmode_t delegation_type = 0; 1767 int status; 1768 1769 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1770 NFS4_OPEN_CLAIM_PREVIOUS); 1771 if (IS_ERR(opendata)) 1772 return PTR_ERR(opendata); 1773 rcu_read_lock(); 1774 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1775 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) 1776 delegation_type = delegation->type; 1777 rcu_read_unlock(); 1778 opendata->o_arg.u.delegation_type = delegation_type; 1779 status = nfs4_open_recover(opendata, state); 1780 nfs4_opendata_put(opendata); 1781 return status; 1782 } 1783 1784 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1785 { 1786 struct nfs_server *server = NFS_SERVER(state->inode); 1787 struct nfs4_exception exception = { }; 1788 int err; 1789 do { 1790 err = _nfs4_do_open_reclaim(ctx, state); 1791 trace_nfs4_open_reclaim(ctx, 0, err); 1792 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 1793 continue; 1794 if (err != -NFS4ERR_DELAY) 1795 break; 1796 nfs4_handle_exception(server, err, &exception); 1797 } while (exception.retry); 1798 return err; 1799 } 1800 1801 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 1802 { 1803 struct nfs_open_context *ctx; 1804 int ret; 1805 1806 ctx = nfs4_state_find_open_context(state); 1807 if (IS_ERR(ctx)) 1808 return -EAGAIN; 1809 ret = nfs4_do_open_reclaim(ctx, state); 1810 put_nfs_open_context(ctx); 1811 return ret; 1812 } 1813 1814 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err) 1815 { 1816 switch (err) { 1817 default: 1818 printk(KERN_ERR "NFS: %s: unhandled error " 1819 "%d.\n", __func__, err); 1820 case 0: 1821 case -ENOENT: 1822 case -EAGAIN: 1823 case -ESTALE: 1824 break; 1825 case -NFS4ERR_BADSESSION: 1826 case -NFS4ERR_BADSLOT: 1827 case -NFS4ERR_BAD_HIGH_SLOT: 1828 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1829 case -NFS4ERR_DEADSESSION: 1830 set_bit(NFS_DELEGATED_STATE, &state->flags); 1831 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 1832 return -EAGAIN; 1833 case -NFS4ERR_STALE_CLIENTID: 1834 case -NFS4ERR_STALE_STATEID: 1835 set_bit(NFS_DELEGATED_STATE, &state->flags); 1836 case -NFS4ERR_EXPIRED: 1837 /* Don't recall a delegation if it was lost */ 1838 nfs4_schedule_lease_recovery(server->nfs_client); 1839 return -EAGAIN; 1840 case -NFS4ERR_MOVED: 1841 nfs4_schedule_migration_recovery(server); 1842 return -EAGAIN; 1843 case -NFS4ERR_LEASE_MOVED: 1844 nfs4_schedule_lease_moved_recovery(server->nfs_client); 1845 return -EAGAIN; 1846 case -NFS4ERR_DELEG_REVOKED: 1847 case -NFS4ERR_ADMIN_REVOKED: 1848 case -NFS4ERR_BAD_STATEID: 1849 case -NFS4ERR_OPENMODE: 1850 nfs_inode_find_state_and_recover(state->inode, 1851 stateid); 1852 nfs4_schedule_stateid_recovery(server, state); 1853 return -EAGAIN; 1854 case -NFS4ERR_DELAY: 1855 case -NFS4ERR_GRACE: 1856 set_bit(NFS_DELEGATED_STATE, &state->flags); 1857 ssleep(1); 1858 return -EAGAIN; 1859 case -ENOMEM: 1860 case -NFS4ERR_DENIED: 1861 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1862 return 0; 1863 } 1864 return err; 1865 } 1866 1867 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 1868 struct nfs4_state *state, const nfs4_stateid *stateid, 1869 fmode_t type) 1870 { 1871 struct nfs_server *server = NFS_SERVER(state->inode); 1872 struct nfs4_opendata *opendata; 1873 int err = 0; 1874 1875 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1876 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 1877 if (IS_ERR(opendata)) 1878 return PTR_ERR(opendata); 1879 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1880 write_seqlock(&state->seqlock); 1881 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1882 write_sequnlock(&state->seqlock); 1883 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1884 switch (type & (FMODE_READ|FMODE_WRITE)) { 1885 case FMODE_READ|FMODE_WRITE: 1886 case FMODE_WRITE: 1887 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 1888 if (err) 1889 break; 1890 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 1891 if (err) 1892 break; 1893 case FMODE_READ: 1894 err = nfs4_open_recover_helper(opendata, FMODE_READ); 1895 } 1896 nfs4_opendata_put(opendata); 1897 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 1898 } 1899 1900 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 1901 { 1902 struct nfs4_opendata *data = calldata; 1903 1904 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl, 1905 &data->c_arg.seq_args, &data->c_res.seq_res, task); 1906 } 1907 1908 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 1909 { 1910 struct nfs4_opendata *data = calldata; 1911 1912 nfs40_sequence_done(task, &data->c_res.seq_res); 1913 1914 data->rpc_status = task->tk_status; 1915 if (data->rpc_status == 0) { 1916 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 1917 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1918 renew_lease(data->o_res.server, data->timestamp); 1919 data->rpc_done = 1; 1920 } 1921 } 1922 1923 static void nfs4_open_confirm_release(void *calldata) 1924 { 1925 struct nfs4_opendata *data = calldata; 1926 struct nfs4_state *state = NULL; 1927 1928 /* If this request hasn't been cancelled, do nothing */ 1929 if (data->cancelled == 0) 1930 goto out_free; 1931 /* In case of error, no cleanup! */ 1932 if (!data->rpc_done) 1933 goto out_free; 1934 state = nfs4_opendata_to_nfs4_state(data); 1935 if (!IS_ERR(state)) 1936 nfs4_close_state(state, data->o_arg.fmode); 1937 out_free: 1938 nfs4_opendata_put(data); 1939 } 1940 1941 static const struct rpc_call_ops nfs4_open_confirm_ops = { 1942 .rpc_call_prepare = nfs4_open_confirm_prepare, 1943 .rpc_call_done = nfs4_open_confirm_done, 1944 .rpc_release = nfs4_open_confirm_release, 1945 }; 1946 1947 /* 1948 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 1949 */ 1950 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 1951 { 1952 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 1953 struct rpc_task *task; 1954 struct rpc_message msg = { 1955 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 1956 .rpc_argp = &data->c_arg, 1957 .rpc_resp = &data->c_res, 1958 .rpc_cred = data->owner->so_cred, 1959 }; 1960 struct rpc_task_setup task_setup_data = { 1961 .rpc_client = server->client, 1962 .rpc_message = &msg, 1963 .callback_ops = &nfs4_open_confirm_ops, 1964 .callback_data = data, 1965 .workqueue = nfsiod_workqueue, 1966 .flags = RPC_TASK_ASYNC, 1967 }; 1968 int status; 1969 1970 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1); 1971 kref_get(&data->kref); 1972 data->rpc_done = 0; 1973 data->rpc_status = 0; 1974 data->timestamp = jiffies; 1975 if (data->is_recover) 1976 nfs4_set_sequence_privileged(&data->c_arg.seq_args); 1977 task = rpc_run_task(&task_setup_data); 1978 if (IS_ERR(task)) 1979 return PTR_ERR(task); 1980 status = nfs4_wait_for_completion_rpc_task(task); 1981 if (status != 0) { 1982 data->cancelled = 1; 1983 smp_wmb(); 1984 } else 1985 status = data->rpc_status; 1986 rpc_put_task(task); 1987 return status; 1988 } 1989 1990 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 1991 { 1992 struct nfs4_opendata *data = calldata; 1993 struct nfs4_state_owner *sp = data->owner; 1994 struct nfs_client *clp = sp->so_server->nfs_client; 1995 enum open_claim_type4 claim = data->o_arg.claim; 1996 1997 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 1998 goto out_wait; 1999 /* 2000 * Check if we still need to send an OPEN call, or if we can use 2001 * a delegation instead. 2002 */ 2003 if (data->state != NULL) { 2004 struct nfs_delegation *delegation; 2005 2006 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) 2007 goto out_no_action; 2008 rcu_read_lock(); 2009 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 2010 if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 2011 goto unlock_no_action; 2012 rcu_read_unlock(); 2013 } 2014 /* Update client id. */ 2015 data->o_arg.clientid = clp->cl_clientid; 2016 switch (claim) { 2017 default: 2018 break; 2019 case NFS4_OPEN_CLAIM_PREVIOUS: 2020 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2021 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 2022 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2023 case NFS4_OPEN_CLAIM_FH: 2024 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2025 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 2026 } 2027 data->timestamp = jiffies; 2028 if (nfs4_setup_sequence(data->o_arg.server, 2029 &data->o_arg.seq_args, 2030 &data->o_res.seq_res, 2031 task) != 0) 2032 nfs_release_seqid(data->o_arg.seqid); 2033 2034 /* Set the create mode (note dependency on the session type) */ 2035 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 2036 if (data->o_arg.open_flags & O_EXCL) { 2037 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 2038 if (nfs4_has_persistent_session(clp)) 2039 data->o_arg.createmode = NFS4_CREATE_GUARDED; 2040 else if (clp->cl_mvops->minor_version > 0) 2041 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 2042 } 2043 return; 2044 unlock_no_action: 2045 trace_nfs4_cached_open(data->state); 2046 rcu_read_unlock(); 2047 out_no_action: 2048 task->tk_action = NULL; 2049 out_wait: 2050 nfs4_sequence_done(task, &data->o_res.seq_res); 2051 } 2052 2053 static void nfs4_open_done(struct rpc_task *task, void *calldata) 2054 { 2055 struct nfs4_opendata *data = calldata; 2056 2057 data->rpc_status = task->tk_status; 2058 2059 if (!nfs4_sequence_done(task, &data->o_res.seq_res)) 2060 return; 2061 2062 if (task->tk_status == 0) { 2063 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 2064 switch (data->o_res.f_attr->mode & S_IFMT) { 2065 case S_IFREG: 2066 break; 2067 case S_IFLNK: 2068 data->rpc_status = -ELOOP; 2069 break; 2070 case S_IFDIR: 2071 data->rpc_status = -EISDIR; 2072 break; 2073 default: 2074 data->rpc_status = -ENOTDIR; 2075 } 2076 } 2077 renew_lease(data->o_res.server, data->timestamp); 2078 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 2079 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2080 } 2081 data->rpc_done = 1; 2082 } 2083 2084 static void nfs4_open_release(void *calldata) 2085 { 2086 struct nfs4_opendata *data = calldata; 2087 struct nfs4_state *state = NULL; 2088 2089 /* If this request hasn't been cancelled, do nothing */ 2090 if (data->cancelled == 0) 2091 goto out_free; 2092 /* In case of error, no cleanup! */ 2093 if (data->rpc_status != 0 || !data->rpc_done) 2094 goto out_free; 2095 /* In case we need an open_confirm, no cleanup! */ 2096 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 2097 goto out_free; 2098 state = nfs4_opendata_to_nfs4_state(data); 2099 if (!IS_ERR(state)) 2100 nfs4_close_state(state, data->o_arg.fmode); 2101 out_free: 2102 nfs4_opendata_put(data); 2103 } 2104 2105 static const struct rpc_call_ops nfs4_open_ops = { 2106 .rpc_call_prepare = nfs4_open_prepare, 2107 .rpc_call_done = nfs4_open_done, 2108 .rpc_release = nfs4_open_release, 2109 }; 2110 2111 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) 2112 { 2113 struct inode *dir = d_inode(data->dir); 2114 struct nfs_server *server = NFS_SERVER(dir); 2115 struct nfs_openargs *o_arg = &data->o_arg; 2116 struct nfs_openres *o_res = &data->o_res; 2117 struct rpc_task *task; 2118 struct rpc_message msg = { 2119 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 2120 .rpc_argp = o_arg, 2121 .rpc_resp = o_res, 2122 .rpc_cred = data->owner->so_cred, 2123 }; 2124 struct rpc_task_setup task_setup_data = { 2125 .rpc_client = server->client, 2126 .rpc_message = &msg, 2127 .callback_ops = &nfs4_open_ops, 2128 .callback_data = data, 2129 .workqueue = nfsiod_workqueue, 2130 .flags = RPC_TASK_ASYNC, 2131 }; 2132 int status; 2133 2134 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); 2135 kref_get(&data->kref); 2136 data->rpc_done = 0; 2137 data->rpc_status = 0; 2138 data->cancelled = 0; 2139 data->is_recover = 0; 2140 if (isrecover) { 2141 nfs4_set_sequence_privileged(&o_arg->seq_args); 2142 data->is_recover = 1; 2143 } 2144 task = rpc_run_task(&task_setup_data); 2145 if (IS_ERR(task)) 2146 return PTR_ERR(task); 2147 status = nfs4_wait_for_completion_rpc_task(task); 2148 if (status != 0) { 2149 data->cancelled = 1; 2150 smp_wmb(); 2151 } else 2152 status = data->rpc_status; 2153 rpc_put_task(task); 2154 2155 return status; 2156 } 2157 2158 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2159 { 2160 struct inode *dir = d_inode(data->dir); 2161 struct nfs_openres *o_res = &data->o_res; 2162 int status; 2163 2164 status = nfs4_run_open_task(data, 1); 2165 if (status != 0 || !data->rpc_done) 2166 return status; 2167 2168 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2169 2170 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2171 status = _nfs4_proc_open_confirm(data); 2172 if (status != 0) 2173 return status; 2174 } 2175 2176 return status; 2177 } 2178 2179 /* 2180 * Additional permission checks in order to distinguish between an 2181 * open for read, and an open for execute. This works around the 2182 * fact that NFSv4 OPEN treats read and execute permissions as being 2183 * the same. 2184 * Note that in the non-execute case, we want to turn off permission 2185 * checking if we just created a new file (POSIX open() semantics). 2186 */ 2187 static int nfs4_opendata_access(struct rpc_cred *cred, 2188 struct nfs4_opendata *opendata, 2189 struct nfs4_state *state, fmode_t fmode, 2190 int openflags) 2191 { 2192 struct nfs_access_entry cache; 2193 u32 mask; 2194 2195 /* access call failed or for some reason the server doesn't 2196 * support any access modes -- defer access call until later */ 2197 if (opendata->o_res.access_supported == 0) 2198 return 0; 2199 2200 mask = 0; 2201 /* 2202 * Use openflags to check for exec, because fmode won't 2203 * always have FMODE_EXEC set when file open for exec. 2204 */ 2205 if (openflags & __FMODE_EXEC) { 2206 /* ONLY check for exec rights */ 2207 mask = MAY_EXEC; 2208 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2209 mask = MAY_READ; 2210 2211 cache.cred = cred; 2212 cache.jiffies = jiffies; 2213 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2214 nfs_access_add_cache(state->inode, &cache); 2215 2216 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) 2217 return 0; 2218 2219 /* even though OPEN succeeded, access is denied. Close the file */ 2220 nfs4_close_state(state, fmode); 2221 return -EACCES; 2222 } 2223 2224 /* 2225 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2226 */ 2227 static int _nfs4_proc_open(struct nfs4_opendata *data) 2228 { 2229 struct inode *dir = d_inode(data->dir); 2230 struct nfs_server *server = NFS_SERVER(dir); 2231 struct nfs_openargs *o_arg = &data->o_arg; 2232 struct nfs_openres *o_res = &data->o_res; 2233 int status; 2234 2235 status = nfs4_run_open_task(data, 0); 2236 if (!data->rpc_done) 2237 return status; 2238 if (status != 0) { 2239 if (status == -NFS4ERR_BADNAME && 2240 !(o_arg->open_flags & O_CREAT)) 2241 return -ENOENT; 2242 return status; 2243 } 2244 2245 nfs_fattr_map_and_free_names(server, &data->f_attr); 2246 2247 if (o_arg->open_flags & O_CREAT) { 2248 update_changeattr(dir, &o_res->cinfo); 2249 if (o_arg->open_flags & O_EXCL) 2250 data->file_created = 1; 2251 else if (o_res->cinfo.before != o_res->cinfo.after) 2252 data->file_created = 1; 2253 } 2254 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2255 server->caps &= ~NFS_CAP_POSIX_LOCK; 2256 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2257 status = _nfs4_proc_open_confirm(data); 2258 if (status != 0) 2259 return status; 2260 } 2261 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 2262 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label); 2263 return 0; 2264 } 2265 2266 static int nfs4_recover_expired_lease(struct nfs_server *server) 2267 { 2268 return nfs4_client_recover_expired_lease(server->nfs_client); 2269 } 2270 2271 /* 2272 * OPEN_EXPIRED: 2273 * reclaim state on the server after a network partition. 2274 * Assumes caller holds the appropriate lock 2275 */ 2276 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2277 { 2278 struct nfs4_opendata *opendata; 2279 int ret; 2280 2281 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2282 NFS4_OPEN_CLAIM_FH); 2283 if (IS_ERR(opendata)) 2284 return PTR_ERR(opendata); 2285 ret = nfs4_open_recover(opendata, state); 2286 if (ret == -ESTALE) 2287 d_drop(ctx->dentry); 2288 nfs4_opendata_put(opendata); 2289 return ret; 2290 } 2291 2292 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2293 { 2294 struct nfs_server *server = NFS_SERVER(state->inode); 2295 struct nfs4_exception exception = { }; 2296 int err; 2297 2298 do { 2299 err = _nfs4_open_expired(ctx, state); 2300 trace_nfs4_open_expired(ctx, 0, err); 2301 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2302 continue; 2303 switch (err) { 2304 default: 2305 goto out; 2306 case -NFS4ERR_GRACE: 2307 case -NFS4ERR_DELAY: 2308 nfs4_handle_exception(server, err, &exception); 2309 err = 0; 2310 } 2311 } while (exception.retry); 2312 out: 2313 return err; 2314 } 2315 2316 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2317 { 2318 struct nfs_open_context *ctx; 2319 int ret; 2320 2321 ctx = nfs4_state_find_open_context(state); 2322 if (IS_ERR(ctx)) 2323 return -EAGAIN; 2324 ret = nfs4_do_open_expired(ctx, state); 2325 put_nfs_open_context(ctx); 2326 return ret; 2327 } 2328 2329 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state) 2330 { 2331 nfs_remove_bad_delegation(state->inode); 2332 write_seqlock(&state->seqlock); 2333 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2334 write_sequnlock(&state->seqlock); 2335 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2336 } 2337 2338 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2339 { 2340 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2341 nfs_finish_clear_delegation_stateid(state); 2342 } 2343 2344 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2345 { 2346 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2347 nfs40_clear_delegation_stateid(state); 2348 return nfs4_open_expired(sp, state); 2349 } 2350 2351 #if defined(CONFIG_NFS_V4_1) 2352 static void nfs41_check_delegation_stateid(struct nfs4_state *state) 2353 { 2354 struct nfs_server *server = NFS_SERVER(state->inode); 2355 nfs4_stateid stateid; 2356 struct nfs_delegation *delegation; 2357 struct rpc_cred *cred; 2358 int status; 2359 2360 /* Get the delegation credential for use by test/free_stateid */ 2361 rcu_read_lock(); 2362 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2363 if (delegation == NULL) { 2364 rcu_read_unlock(); 2365 return; 2366 } 2367 2368 nfs4_stateid_copy(&stateid, &delegation->stateid); 2369 cred = get_rpccred(delegation->cred); 2370 rcu_read_unlock(); 2371 status = nfs41_test_stateid(server, &stateid, cred); 2372 trace_nfs4_test_delegation_stateid(state, NULL, status); 2373 2374 if (status != NFS_OK) { 2375 /* Free the stateid unless the server explicitly 2376 * informs us the stateid is unrecognized. */ 2377 if (status != -NFS4ERR_BAD_STATEID) 2378 nfs41_free_stateid(server, &stateid, cred); 2379 nfs_finish_clear_delegation_stateid(state); 2380 } 2381 2382 put_rpccred(cred); 2383 } 2384 2385 /** 2386 * nfs41_check_open_stateid - possibly free an open stateid 2387 * 2388 * @state: NFSv4 state for an inode 2389 * 2390 * Returns NFS_OK if recovery for this stateid is now finished. 2391 * Otherwise a negative NFS4ERR value is returned. 2392 */ 2393 static int nfs41_check_open_stateid(struct nfs4_state *state) 2394 { 2395 struct nfs_server *server = NFS_SERVER(state->inode); 2396 nfs4_stateid *stateid = &state->open_stateid; 2397 struct rpc_cred *cred = state->owner->so_cred; 2398 int status; 2399 2400 /* If a state reset has been done, test_stateid is unneeded */ 2401 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) && 2402 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) && 2403 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0)) 2404 return -NFS4ERR_BAD_STATEID; 2405 2406 status = nfs41_test_stateid(server, stateid, cred); 2407 trace_nfs4_test_open_stateid(state, NULL, status); 2408 if (status != NFS_OK) { 2409 /* Free the stateid unless the server explicitly 2410 * informs us the stateid is unrecognized. */ 2411 if (status != -NFS4ERR_BAD_STATEID) 2412 nfs41_free_stateid(server, stateid, cred); 2413 2414 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2415 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2416 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2417 clear_bit(NFS_OPEN_STATE, &state->flags); 2418 } 2419 return status; 2420 } 2421 2422 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2423 { 2424 int status; 2425 2426 nfs41_check_delegation_stateid(state); 2427 status = nfs41_check_open_stateid(state); 2428 if (status != NFS_OK) 2429 status = nfs4_open_expired(sp, state); 2430 return status; 2431 } 2432 #endif 2433 2434 /* 2435 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 2436 * fields corresponding to attributes that were used to store the verifier. 2437 * Make sure we clobber those fields in the later setattr call 2438 */ 2439 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 2440 struct iattr *sattr, struct nfs4_label **label) 2441 { 2442 const u32 *attrset = opendata->o_res.attrset; 2443 2444 if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 2445 !(sattr->ia_valid & ATTR_ATIME_SET)) 2446 sattr->ia_valid |= ATTR_ATIME; 2447 2448 if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 2449 !(sattr->ia_valid & ATTR_MTIME_SET)) 2450 sattr->ia_valid |= ATTR_MTIME; 2451 2452 /* Except MODE, it seems harmless of setting twice. */ 2453 if ((attrset[1] & FATTR4_WORD1_MODE)) 2454 sattr->ia_valid &= ~ATTR_MODE; 2455 2456 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) 2457 *label = NULL; 2458 } 2459 2460 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 2461 fmode_t fmode, 2462 int flags, 2463 struct nfs_open_context *ctx) 2464 { 2465 struct nfs4_state_owner *sp = opendata->owner; 2466 struct nfs_server *server = sp->so_server; 2467 struct dentry *dentry; 2468 struct nfs4_state *state; 2469 unsigned int seq; 2470 int ret; 2471 2472 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 2473 2474 ret = _nfs4_proc_open(opendata); 2475 if (ret != 0) 2476 goto out; 2477 2478 state = nfs4_opendata_to_nfs4_state(opendata); 2479 ret = PTR_ERR(state); 2480 if (IS_ERR(state)) 2481 goto out; 2482 if (server->caps & NFS_CAP_POSIX_LOCK) 2483 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 2484 2485 dentry = opendata->dentry; 2486 if (d_really_is_negative(dentry)) { 2487 struct dentry *alias; 2488 d_drop(dentry); 2489 alias = d_exact_alias(dentry, state->inode); 2490 if (!alias) 2491 alias = d_splice_alias(igrab(state->inode), dentry); 2492 /* d_splice_alias() can't fail here - it's a non-directory */ 2493 if (alias) { 2494 dput(ctx->dentry); 2495 ctx->dentry = dentry = alias; 2496 } 2497 nfs_set_verifier(dentry, 2498 nfs_save_change_attribute(d_inode(opendata->dir))); 2499 } 2500 2501 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags); 2502 if (ret != 0) 2503 goto out; 2504 2505 ctx->state = state; 2506 if (d_inode(dentry) == state->inode) { 2507 nfs_inode_attach_open_context(ctx); 2508 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) 2509 nfs4_schedule_stateid_recovery(server, state); 2510 } 2511 out: 2512 return ret; 2513 } 2514 2515 /* 2516 * Returns a referenced nfs4_state 2517 */ 2518 static int _nfs4_do_open(struct inode *dir, 2519 struct nfs_open_context *ctx, 2520 int flags, 2521 struct iattr *sattr, 2522 struct nfs4_label *label, 2523 int *opened) 2524 { 2525 struct nfs4_state_owner *sp; 2526 struct nfs4_state *state = NULL; 2527 struct nfs_server *server = NFS_SERVER(dir); 2528 struct nfs4_opendata *opendata; 2529 struct dentry *dentry = ctx->dentry; 2530 struct rpc_cred *cred = ctx->cred; 2531 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 2532 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 2533 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 2534 struct nfs4_label *olabel = NULL; 2535 int status; 2536 2537 /* Protect against reboot recovery conflicts */ 2538 status = -ENOMEM; 2539 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 2540 if (sp == NULL) { 2541 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 2542 goto out_err; 2543 } 2544 status = nfs4_recover_expired_lease(server); 2545 if (status != 0) 2546 goto err_put_state_owner; 2547 if (d_really_is_positive(dentry)) 2548 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 2549 status = -ENOMEM; 2550 if (d_really_is_positive(dentry)) 2551 claim = NFS4_OPEN_CLAIM_FH; 2552 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, 2553 label, claim, GFP_KERNEL); 2554 if (opendata == NULL) 2555 goto err_put_state_owner; 2556 2557 if (label) { 2558 olabel = nfs4_label_alloc(server, GFP_KERNEL); 2559 if (IS_ERR(olabel)) { 2560 status = PTR_ERR(olabel); 2561 goto err_opendata_put; 2562 } 2563 } 2564 2565 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 2566 if (!opendata->f_attr.mdsthreshold) { 2567 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 2568 if (!opendata->f_attr.mdsthreshold) 2569 goto err_free_label; 2570 } 2571 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 2572 } 2573 if (d_really_is_positive(dentry)) 2574 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 2575 2576 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx); 2577 if (status != 0) 2578 goto err_free_label; 2579 state = ctx->state; 2580 2581 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 2582 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 2583 nfs4_exclusive_attrset(opendata, sattr, &label); 2584 /* 2585 * send create attributes which was not set by open 2586 * with an extra setattr. 2587 */ 2588 if (sattr->ia_valid & NFS4_VALID_ATTRS) { 2589 nfs_fattr_init(opendata->o_res.f_attr); 2590 status = nfs4_do_setattr(state->inode, cred, 2591 opendata->o_res.f_attr, sattr, 2592 state, label, olabel); 2593 if (status == 0) { 2594 nfs_setattr_update_inode(state->inode, sattr, 2595 opendata->o_res.f_attr); 2596 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); 2597 } 2598 } 2599 } 2600 if (opened && opendata->file_created) 2601 *opened |= FILE_CREATED; 2602 2603 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 2604 *ctx_th = opendata->f_attr.mdsthreshold; 2605 opendata->f_attr.mdsthreshold = NULL; 2606 } 2607 2608 nfs4_label_free(olabel); 2609 2610 nfs4_opendata_put(opendata); 2611 nfs4_put_state_owner(sp); 2612 return 0; 2613 err_free_label: 2614 nfs4_label_free(olabel); 2615 err_opendata_put: 2616 nfs4_opendata_put(opendata); 2617 err_put_state_owner: 2618 nfs4_put_state_owner(sp); 2619 out_err: 2620 return status; 2621 } 2622 2623 2624 static struct nfs4_state *nfs4_do_open(struct inode *dir, 2625 struct nfs_open_context *ctx, 2626 int flags, 2627 struct iattr *sattr, 2628 struct nfs4_label *label, 2629 int *opened) 2630 { 2631 struct nfs_server *server = NFS_SERVER(dir); 2632 struct nfs4_exception exception = { }; 2633 struct nfs4_state *res; 2634 int status; 2635 2636 do { 2637 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened); 2638 res = ctx->state; 2639 trace_nfs4_open_file(ctx, flags, status); 2640 if (status == 0) 2641 break; 2642 /* NOTE: BAD_SEQID means the server and client disagree about the 2643 * book-keeping w.r.t. state-changing operations 2644 * (OPEN/CLOSE/LOCK/LOCKU...) 2645 * It is actually a sign of a bug on the client or on the server. 2646 * 2647 * If we receive a BAD_SEQID error in the particular case of 2648 * doing an OPEN, we assume that nfs_increment_open_seqid() will 2649 * have unhashed the old state_owner for us, and that we can 2650 * therefore safely retry using a new one. We should still warn 2651 * the user though... 2652 */ 2653 if (status == -NFS4ERR_BAD_SEQID) { 2654 pr_warn_ratelimited("NFS: v4 server %s " 2655 " returned a bad sequence-id error!\n", 2656 NFS_SERVER(dir)->nfs_client->cl_hostname); 2657 exception.retry = 1; 2658 continue; 2659 } 2660 /* 2661 * BAD_STATEID on OPEN means that the server cancelled our 2662 * state before it received the OPEN_CONFIRM. 2663 * Recover by retrying the request as per the discussion 2664 * on Page 181 of RFC3530. 2665 */ 2666 if (status == -NFS4ERR_BAD_STATEID) { 2667 exception.retry = 1; 2668 continue; 2669 } 2670 if (status == -EAGAIN) { 2671 /* We must have found a delegation */ 2672 exception.retry = 1; 2673 continue; 2674 } 2675 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 2676 continue; 2677 res = ERR_PTR(nfs4_handle_exception(server, 2678 status, &exception)); 2679 } while (exception.retry); 2680 return res; 2681 } 2682 2683 static int _nfs4_do_setattr(struct inode *inode, 2684 struct nfs_setattrargs *arg, 2685 struct nfs_setattrres *res, 2686 struct rpc_cred *cred, 2687 struct nfs4_state *state) 2688 { 2689 struct nfs_server *server = NFS_SERVER(inode); 2690 struct rpc_message msg = { 2691 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 2692 .rpc_argp = arg, 2693 .rpc_resp = res, 2694 .rpc_cred = cred, 2695 }; 2696 struct rpc_cred *delegation_cred = NULL; 2697 unsigned long timestamp = jiffies; 2698 fmode_t fmode; 2699 bool truncate; 2700 int status; 2701 2702 nfs_fattr_init(res->fattr); 2703 2704 /* Servers should only apply open mode checks for file size changes */ 2705 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; 2706 fmode = truncate ? FMODE_WRITE : FMODE_READ; 2707 2708 if (nfs4_copy_delegation_stateid(inode, fmode, &arg->stateid, &delegation_cred)) { 2709 /* Use that stateid */ 2710 } else if (truncate && state != NULL) { 2711 struct nfs_lockowner lockowner = { 2712 .l_owner = current->files, 2713 .l_pid = current->tgid, 2714 }; 2715 if (!nfs4_valid_open_stateid(state)) 2716 return -EBADF; 2717 if (nfs4_select_rw_stateid(state, FMODE_WRITE, &lockowner, 2718 &arg->stateid, &delegation_cred) == -EIO) 2719 return -EBADF; 2720 } else 2721 nfs4_stateid_copy(&arg->stateid, &zero_stateid); 2722 if (delegation_cred) 2723 msg.rpc_cred = delegation_cred; 2724 2725 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1); 2726 2727 put_rpccred(delegation_cred); 2728 if (status == 0 && state != NULL) 2729 renew_lease(server, timestamp); 2730 trace_nfs4_setattr(inode, &arg->stateid, status); 2731 return status; 2732 } 2733 2734 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2735 struct nfs_fattr *fattr, struct iattr *sattr, 2736 struct nfs4_state *state, struct nfs4_label *ilabel, 2737 struct nfs4_label *olabel) 2738 { 2739 struct nfs_server *server = NFS_SERVER(inode); 2740 struct nfs_setattrargs arg = { 2741 .fh = NFS_FH(inode), 2742 .iap = sattr, 2743 .server = server, 2744 .bitmask = server->attr_bitmask, 2745 .label = ilabel, 2746 }; 2747 struct nfs_setattrres res = { 2748 .fattr = fattr, 2749 .label = olabel, 2750 .server = server, 2751 }; 2752 struct nfs4_exception exception = { 2753 .state = state, 2754 .inode = inode, 2755 .stateid = &arg.stateid, 2756 }; 2757 int err; 2758 2759 arg.bitmask = nfs4_bitmask(server, ilabel); 2760 if (ilabel) 2761 arg.bitmask = nfs4_bitmask(server, olabel); 2762 2763 do { 2764 err = _nfs4_do_setattr(inode, &arg, &res, cred, state); 2765 switch (err) { 2766 case -NFS4ERR_OPENMODE: 2767 if (!(sattr->ia_valid & ATTR_SIZE)) { 2768 pr_warn_once("NFSv4: server %s is incorrectly " 2769 "applying open mode checks to " 2770 "a SETATTR that is not " 2771 "changing file size.\n", 2772 server->nfs_client->cl_hostname); 2773 } 2774 if (state && !(state->state & FMODE_WRITE)) { 2775 err = -EBADF; 2776 if (sattr->ia_valid & ATTR_OPEN) 2777 err = -EACCES; 2778 goto out; 2779 } 2780 } 2781 err = nfs4_handle_exception(server, err, &exception); 2782 } while (exception.retry); 2783 out: 2784 return err; 2785 } 2786 2787 static bool 2788 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) 2789 { 2790 if (inode == NULL || !nfs_have_layout(inode)) 2791 return false; 2792 2793 return pnfs_wait_on_layoutreturn(inode, task); 2794 } 2795 2796 struct nfs4_closedata { 2797 struct inode *inode; 2798 struct nfs4_state *state; 2799 struct nfs_closeargs arg; 2800 struct nfs_closeres res; 2801 struct nfs_fattr fattr; 2802 unsigned long timestamp; 2803 bool roc; 2804 u32 roc_barrier; 2805 }; 2806 2807 static void nfs4_free_closedata(void *data) 2808 { 2809 struct nfs4_closedata *calldata = data; 2810 struct nfs4_state_owner *sp = calldata->state->owner; 2811 struct super_block *sb = calldata->state->inode->i_sb; 2812 2813 if (calldata->roc) 2814 pnfs_roc_release(calldata->state->inode); 2815 nfs4_put_open_state(calldata->state); 2816 nfs_free_seqid(calldata->arg.seqid); 2817 nfs4_put_state_owner(sp); 2818 nfs_sb_deactive(sb); 2819 kfree(calldata); 2820 } 2821 2822 static void nfs4_close_done(struct rpc_task *task, void *data) 2823 { 2824 struct nfs4_closedata *calldata = data; 2825 struct nfs4_state *state = calldata->state; 2826 struct nfs_server *server = NFS_SERVER(calldata->inode); 2827 nfs4_stateid *res_stateid = NULL; 2828 2829 dprintk("%s: begin!\n", __func__); 2830 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 2831 return; 2832 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 2833 /* hmm. we are done with the inode, and in the process of freeing 2834 * the state_owner. we keep this around to process errors 2835 */ 2836 switch (task->tk_status) { 2837 case 0: 2838 res_stateid = &calldata->res.stateid; 2839 if (calldata->roc) 2840 pnfs_roc_set_barrier(state->inode, 2841 calldata->roc_barrier); 2842 renew_lease(server, calldata->timestamp); 2843 break; 2844 case -NFS4ERR_ADMIN_REVOKED: 2845 case -NFS4ERR_STALE_STATEID: 2846 case -NFS4ERR_OLD_STATEID: 2847 case -NFS4ERR_BAD_STATEID: 2848 case -NFS4ERR_EXPIRED: 2849 if (!nfs4_stateid_match(&calldata->arg.stateid, 2850 &state->open_stateid)) { 2851 rpc_restart_call_prepare(task); 2852 goto out_release; 2853 } 2854 if (calldata->arg.fmode == 0) 2855 break; 2856 default: 2857 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) { 2858 rpc_restart_call_prepare(task); 2859 goto out_release; 2860 } 2861 } 2862 nfs_clear_open_stateid(state, &calldata->arg.stateid, 2863 res_stateid, calldata->arg.fmode); 2864 out_release: 2865 nfs_release_seqid(calldata->arg.seqid); 2866 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2867 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2868 } 2869 2870 static void nfs4_close_prepare(struct rpc_task *task, void *data) 2871 { 2872 struct nfs4_closedata *calldata = data; 2873 struct nfs4_state *state = calldata->state; 2874 struct inode *inode = calldata->inode; 2875 bool is_rdonly, is_wronly, is_rdwr; 2876 int call_close = 0; 2877 2878 dprintk("%s: begin!\n", __func__); 2879 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 2880 goto out_wait; 2881 2882 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 2883 spin_lock(&state->owner->so_lock); 2884 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 2885 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 2886 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 2887 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid); 2888 /* Calculate the change in open mode */ 2889 calldata->arg.fmode = 0; 2890 if (state->n_rdwr == 0) { 2891 if (state->n_rdonly == 0) 2892 call_close |= is_rdonly; 2893 else if (is_rdonly) 2894 calldata->arg.fmode |= FMODE_READ; 2895 if (state->n_wronly == 0) 2896 call_close |= is_wronly; 2897 else if (is_wronly) 2898 calldata->arg.fmode |= FMODE_WRITE; 2899 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE)) 2900 call_close |= is_rdwr; 2901 } else if (is_rdwr) 2902 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 2903 2904 if (!nfs4_valid_open_stateid(state)) 2905 call_close = 0; 2906 spin_unlock(&state->owner->so_lock); 2907 2908 if (!call_close) { 2909 /* Note: exit _without_ calling nfs4_close_done */ 2910 goto out_no_action; 2911 } 2912 2913 if (nfs4_wait_on_layoutreturn(inode, task)) { 2914 nfs_release_seqid(calldata->arg.seqid); 2915 goto out_wait; 2916 } 2917 2918 if (calldata->arg.fmode == 0) 2919 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2920 if (calldata->roc) 2921 pnfs_roc_get_barrier(inode, &calldata->roc_barrier); 2922 2923 calldata->arg.share_access = 2924 nfs4_map_atomic_open_share(NFS_SERVER(inode), 2925 calldata->arg.fmode, 0); 2926 2927 nfs_fattr_init(calldata->res.fattr); 2928 calldata->timestamp = jiffies; 2929 if (nfs4_setup_sequence(NFS_SERVER(inode), 2930 &calldata->arg.seq_args, 2931 &calldata->res.seq_res, 2932 task) != 0) 2933 nfs_release_seqid(calldata->arg.seqid); 2934 dprintk("%s: done!\n", __func__); 2935 return; 2936 out_no_action: 2937 task->tk_action = NULL; 2938 out_wait: 2939 nfs4_sequence_done(task, &calldata->res.seq_res); 2940 } 2941 2942 static const struct rpc_call_ops nfs4_close_ops = { 2943 .rpc_call_prepare = nfs4_close_prepare, 2944 .rpc_call_done = nfs4_close_done, 2945 .rpc_release = nfs4_free_closedata, 2946 }; 2947 2948 static bool nfs4_roc(struct inode *inode) 2949 { 2950 if (!nfs_have_layout(inode)) 2951 return false; 2952 return pnfs_roc(inode); 2953 } 2954 2955 /* 2956 * It is possible for data to be read/written from a mem-mapped file 2957 * after the sys_close call (which hits the vfs layer as a flush). 2958 * This means that we can't safely call nfsv4 close on a file until 2959 * the inode is cleared. This in turn means that we are not good 2960 * NFSv4 citizens - we do not indicate to the server to update the file's 2961 * share state even when we are done with one of the three share 2962 * stateid's in the inode. 2963 * 2964 * NOTE: Caller must be holding the sp->so_owner semaphore! 2965 */ 2966 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 2967 { 2968 struct nfs_server *server = NFS_SERVER(state->inode); 2969 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 2970 struct nfs4_closedata *calldata; 2971 struct nfs4_state_owner *sp = state->owner; 2972 struct rpc_task *task; 2973 struct rpc_message msg = { 2974 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 2975 .rpc_cred = state->owner->so_cred, 2976 }; 2977 struct rpc_task_setup task_setup_data = { 2978 .rpc_client = server->client, 2979 .rpc_message = &msg, 2980 .callback_ops = &nfs4_close_ops, 2981 .workqueue = nfsiod_workqueue, 2982 .flags = RPC_TASK_ASYNC, 2983 }; 2984 int status = -ENOMEM; 2985 2986 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 2987 &task_setup_data.rpc_client, &msg); 2988 2989 calldata = kzalloc(sizeof(*calldata), gfp_mask); 2990 if (calldata == NULL) 2991 goto out; 2992 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); 2993 calldata->inode = state->inode; 2994 calldata->state = state; 2995 calldata->arg.fh = NFS_FH(state->inode); 2996 /* Serialization for the sequence id */ 2997 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 2998 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 2999 if (IS_ERR(calldata->arg.seqid)) 3000 goto out_free_calldata; 3001 calldata->arg.fmode = 0; 3002 calldata->arg.bitmask = server->cache_consistency_bitmask; 3003 calldata->res.fattr = &calldata->fattr; 3004 calldata->res.seqid = calldata->arg.seqid; 3005 calldata->res.server = server; 3006 calldata->roc = nfs4_roc(state->inode); 3007 nfs_sb_active(calldata->inode->i_sb); 3008 3009 msg.rpc_argp = &calldata->arg; 3010 msg.rpc_resp = &calldata->res; 3011 task_setup_data.callback_data = calldata; 3012 task = rpc_run_task(&task_setup_data); 3013 if (IS_ERR(task)) 3014 return PTR_ERR(task); 3015 status = 0; 3016 if (wait) 3017 status = rpc_wait_for_completion_task(task); 3018 rpc_put_task(task); 3019 return status; 3020 out_free_calldata: 3021 kfree(calldata); 3022 out: 3023 nfs4_put_open_state(state); 3024 nfs4_put_state_owner(sp); 3025 return status; 3026 } 3027 3028 static struct inode * 3029 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 3030 int open_flags, struct iattr *attr, int *opened) 3031 { 3032 struct nfs4_state *state; 3033 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; 3034 3035 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 3036 3037 /* Protect against concurrent sillydeletes */ 3038 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 3039 3040 nfs4_label_release_security(label); 3041 3042 if (IS_ERR(state)) 3043 return ERR_CAST(state); 3044 return state->inode; 3045 } 3046 3047 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 3048 { 3049 if (ctx->state == NULL) 3050 return; 3051 if (is_sync) 3052 nfs4_close_sync(ctx->state, ctx->mode); 3053 else 3054 nfs4_close_state(ctx->state, ctx->mode); 3055 } 3056 3057 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 3058 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 3059 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL) 3060 3061 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3062 { 3063 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion; 3064 struct nfs4_server_caps_arg args = { 3065 .fhandle = fhandle, 3066 .bitmask = bitmask, 3067 }; 3068 struct nfs4_server_caps_res res = {}; 3069 struct rpc_message msg = { 3070 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 3071 .rpc_argp = &args, 3072 .rpc_resp = &res, 3073 }; 3074 int status; 3075 3076 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3077 FATTR4_WORD0_FH_EXPIRE_TYPE | 3078 FATTR4_WORD0_LINK_SUPPORT | 3079 FATTR4_WORD0_SYMLINK_SUPPORT | 3080 FATTR4_WORD0_ACLSUPPORT; 3081 if (minorversion) 3082 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 3083 3084 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3085 if (status == 0) { 3086 /* Sanity check the server answers */ 3087 switch (minorversion) { 3088 case 0: 3089 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 3090 res.attr_bitmask[2] = 0; 3091 break; 3092 case 1: 3093 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 3094 break; 3095 case 2: 3096 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 3097 } 3098 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 3099 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| 3100 NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 3101 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| 3102 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| 3103 NFS_CAP_CTIME|NFS_CAP_MTIME| 3104 NFS_CAP_SECURITY_LABEL); 3105 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 3106 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 3107 server->caps |= NFS_CAP_ACLS; 3108 if (res.has_links != 0) 3109 server->caps |= NFS_CAP_HARDLINKS; 3110 if (res.has_symlinks != 0) 3111 server->caps |= NFS_CAP_SYMLINKS; 3112 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) 3113 server->caps |= NFS_CAP_FILEID; 3114 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) 3115 server->caps |= NFS_CAP_MODE; 3116 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) 3117 server->caps |= NFS_CAP_NLINK; 3118 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) 3119 server->caps |= NFS_CAP_OWNER; 3120 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) 3121 server->caps |= NFS_CAP_OWNER_GROUP; 3122 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) 3123 server->caps |= NFS_CAP_ATIME; 3124 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) 3125 server->caps |= NFS_CAP_CTIME; 3126 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) 3127 server->caps |= NFS_CAP_MTIME; 3128 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 3129 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 3130 server->caps |= NFS_CAP_SECURITY_LABEL; 3131 #endif 3132 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 3133 sizeof(server->attr_bitmask)); 3134 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 3135 3136 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 3137 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 3138 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 3139 server->cache_consistency_bitmask[2] = 0; 3140 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 3141 sizeof(server->exclcreat_bitmask)); 3142 server->acl_bitmask = res.acl_bitmask; 3143 server->fh_expire_type = res.fh_expire_type; 3144 } 3145 3146 return status; 3147 } 3148 3149 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3150 { 3151 struct nfs4_exception exception = { }; 3152 int err; 3153 do { 3154 err = nfs4_handle_exception(server, 3155 _nfs4_server_capabilities(server, fhandle), 3156 &exception); 3157 } while (exception.retry); 3158 return err; 3159 } 3160 3161 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 3162 struct nfs_fsinfo *info) 3163 { 3164 u32 bitmask[3]; 3165 struct nfs4_lookup_root_arg args = { 3166 .bitmask = bitmask, 3167 }; 3168 struct nfs4_lookup_res res = { 3169 .server = server, 3170 .fattr = info->fattr, 3171 .fh = fhandle, 3172 }; 3173 struct rpc_message msg = { 3174 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 3175 .rpc_argp = &args, 3176 .rpc_resp = &res, 3177 }; 3178 3179 bitmask[0] = nfs4_fattr_bitmap[0]; 3180 bitmask[1] = nfs4_fattr_bitmap[1]; 3181 /* 3182 * Process the label in the upcoming getfattr 3183 */ 3184 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL; 3185 3186 nfs_fattr_init(info->fattr); 3187 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3188 } 3189 3190 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 3191 struct nfs_fsinfo *info) 3192 { 3193 struct nfs4_exception exception = { }; 3194 int err; 3195 do { 3196 err = _nfs4_lookup_root(server, fhandle, info); 3197 trace_nfs4_lookup_root(server, fhandle, info->fattr, err); 3198 switch (err) { 3199 case 0: 3200 case -NFS4ERR_WRONGSEC: 3201 goto out; 3202 default: 3203 err = nfs4_handle_exception(server, err, &exception); 3204 } 3205 } while (exception.retry); 3206 out: 3207 return err; 3208 } 3209 3210 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3211 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 3212 { 3213 struct rpc_auth_create_args auth_args = { 3214 .pseudoflavor = flavor, 3215 }; 3216 struct rpc_auth *auth; 3217 int ret; 3218 3219 auth = rpcauth_create(&auth_args, server->client); 3220 if (IS_ERR(auth)) { 3221 ret = -EACCES; 3222 goto out; 3223 } 3224 ret = nfs4_lookup_root(server, fhandle, info); 3225 out: 3226 return ret; 3227 } 3228 3229 /* 3230 * Retry pseudoroot lookup with various security flavors. We do this when: 3231 * 3232 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 3233 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 3234 * 3235 * Returns zero on success, or a negative NFS4ERR value, or a 3236 * negative errno value. 3237 */ 3238 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3239 struct nfs_fsinfo *info) 3240 { 3241 /* Per 3530bis 15.33.5 */ 3242 static const rpc_authflavor_t flav_array[] = { 3243 RPC_AUTH_GSS_KRB5P, 3244 RPC_AUTH_GSS_KRB5I, 3245 RPC_AUTH_GSS_KRB5, 3246 RPC_AUTH_UNIX, /* courtesy */ 3247 RPC_AUTH_NULL, 3248 }; 3249 int status = -EPERM; 3250 size_t i; 3251 3252 if (server->auth_info.flavor_len > 0) { 3253 /* try each flavor specified by user */ 3254 for (i = 0; i < server->auth_info.flavor_len; i++) { 3255 status = nfs4_lookup_root_sec(server, fhandle, info, 3256 server->auth_info.flavors[i]); 3257 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3258 continue; 3259 break; 3260 } 3261 } else { 3262 /* no flavors specified by user, try default list */ 3263 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 3264 status = nfs4_lookup_root_sec(server, fhandle, info, 3265 flav_array[i]); 3266 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3267 continue; 3268 break; 3269 } 3270 } 3271 3272 /* 3273 * -EACCESS could mean that the user doesn't have correct permissions 3274 * to access the mount. It could also mean that we tried to mount 3275 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 3276 * existing mount programs don't handle -EACCES very well so it should 3277 * be mapped to -EPERM instead. 3278 */ 3279 if (status == -EACCES) 3280 status = -EPERM; 3281 return status; 3282 } 3283 3284 /** 3285 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 3286 * @server: initialized nfs_server handle 3287 * @fhandle: we fill in the pseudo-fs root file handle 3288 * @info: we fill in an FSINFO struct 3289 * @auth_probe: probe the auth flavours 3290 * 3291 * Returns zero on success, or a negative errno. 3292 */ 3293 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 3294 struct nfs_fsinfo *info, 3295 bool auth_probe) 3296 { 3297 int status = 0; 3298 3299 if (!auth_probe) 3300 status = nfs4_lookup_root(server, fhandle, info); 3301 3302 if (auth_probe || status == NFS4ERR_WRONGSEC) 3303 status = server->nfs_client->cl_mvops->find_root_sec(server, 3304 fhandle, info); 3305 3306 if (status == 0) 3307 status = nfs4_server_capabilities(server, fhandle); 3308 if (status == 0) 3309 status = nfs4_do_fsinfo(server, fhandle, info); 3310 3311 return nfs4_map_errors(status); 3312 } 3313 3314 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 3315 struct nfs_fsinfo *info) 3316 { 3317 int error; 3318 struct nfs_fattr *fattr = info->fattr; 3319 struct nfs4_label *label = NULL; 3320 3321 error = nfs4_server_capabilities(server, mntfh); 3322 if (error < 0) { 3323 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 3324 return error; 3325 } 3326 3327 label = nfs4_label_alloc(server, GFP_KERNEL); 3328 if (IS_ERR(label)) 3329 return PTR_ERR(label); 3330 3331 error = nfs4_proc_getattr(server, mntfh, fattr, label); 3332 if (error < 0) { 3333 dprintk("nfs4_get_root: getattr error = %d\n", -error); 3334 goto err_free_label; 3335 } 3336 3337 if (fattr->valid & NFS_ATTR_FATTR_FSID && 3338 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 3339 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 3340 3341 err_free_label: 3342 nfs4_label_free(label); 3343 3344 return error; 3345 } 3346 3347 /* 3348 * Get locations and (maybe) other attributes of a referral. 3349 * Note that we'll actually follow the referral later when 3350 * we detect fsid mismatch in inode revalidation 3351 */ 3352 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 3353 const struct qstr *name, struct nfs_fattr *fattr, 3354 struct nfs_fh *fhandle) 3355 { 3356 int status = -ENOMEM; 3357 struct page *page = NULL; 3358 struct nfs4_fs_locations *locations = NULL; 3359 3360 page = alloc_page(GFP_KERNEL); 3361 if (page == NULL) 3362 goto out; 3363 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 3364 if (locations == NULL) 3365 goto out; 3366 3367 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 3368 if (status != 0) 3369 goto out; 3370 3371 /* 3372 * If the fsid didn't change, this is a migration event, not a 3373 * referral. Cause us to drop into the exception handler, which 3374 * will kick off migration recovery. 3375 */ 3376 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 3377 dprintk("%s: server did not return a different fsid for" 3378 " a referral at %s\n", __func__, name->name); 3379 status = -NFS4ERR_MOVED; 3380 goto out; 3381 } 3382 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 3383 nfs_fixup_referral_attributes(&locations->fattr); 3384 3385 /* replace the lookup nfs_fattr with the locations nfs_fattr */ 3386 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 3387 memset(fhandle, 0, sizeof(struct nfs_fh)); 3388 out: 3389 if (page) 3390 __free_page(page); 3391 kfree(locations); 3392 return status; 3393 } 3394 3395 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3396 struct nfs_fattr *fattr, struct nfs4_label *label) 3397 { 3398 struct nfs4_getattr_arg args = { 3399 .fh = fhandle, 3400 .bitmask = server->attr_bitmask, 3401 }; 3402 struct nfs4_getattr_res res = { 3403 .fattr = fattr, 3404 .label = label, 3405 .server = server, 3406 }; 3407 struct rpc_message msg = { 3408 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 3409 .rpc_argp = &args, 3410 .rpc_resp = &res, 3411 }; 3412 3413 args.bitmask = nfs4_bitmask(server, label); 3414 3415 nfs_fattr_init(fattr); 3416 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3417 } 3418 3419 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3420 struct nfs_fattr *fattr, struct nfs4_label *label) 3421 { 3422 struct nfs4_exception exception = { }; 3423 int err; 3424 do { 3425 err = _nfs4_proc_getattr(server, fhandle, fattr, label); 3426 trace_nfs4_getattr(server, fhandle, fattr, err); 3427 err = nfs4_handle_exception(server, err, 3428 &exception); 3429 } while (exception.retry); 3430 return err; 3431 } 3432 3433 /* 3434 * The file is not closed if it is opened due to the a request to change 3435 * the size of the file. The open call will not be needed once the 3436 * VFS layer lookup-intents are implemented. 3437 * 3438 * Close is called when the inode is destroyed. 3439 * If we haven't opened the file for O_WRONLY, we 3440 * need to in the size_change case to obtain a stateid. 3441 * 3442 * Got race? 3443 * Because OPEN is always done by name in nfsv4, it is 3444 * possible that we opened a different file by the same 3445 * name. We can recognize this race condition, but we 3446 * can't do anything about it besides returning an error. 3447 * 3448 * This will be fixed with VFS changes (lookup-intent). 3449 */ 3450 static int 3451 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 3452 struct iattr *sattr) 3453 { 3454 struct inode *inode = d_inode(dentry); 3455 struct rpc_cred *cred = NULL; 3456 struct nfs4_state *state = NULL; 3457 struct nfs4_label *label = NULL; 3458 int status; 3459 3460 if (pnfs_ld_layoutret_on_setattr(inode) && 3461 sattr->ia_valid & ATTR_SIZE && 3462 sattr->ia_size < i_size_read(inode)) 3463 pnfs_commit_and_return_layout(inode); 3464 3465 nfs_fattr_init(fattr); 3466 3467 /* Deal with open(O_TRUNC) */ 3468 if (sattr->ia_valid & ATTR_OPEN) 3469 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 3470 3471 /* Optimization: if the end result is no change, don't RPC */ 3472 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 3473 return 0; 3474 3475 /* Search for an existing open(O_WRITE) file */ 3476 if (sattr->ia_valid & ATTR_FILE) { 3477 struct nfs_open_context *ctx; 3478 3479 ctx = nfs_file_open_context(sattr->ia_file); 3480 if (ctx) { 3481 cred = ctx->cred; 3482 state = ctx->state; 3483 } 3484 } 3485 3486 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 3487 if (IS_ERR(label)) 3488 return PTR_ERR(label); 3489 3490 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label); 3491 if (status == 0) { 3492 nfs_setattr_update_inode(inode, sattr, fattr); 3493 nfs_setsecurity(inode, fattr, label); 3494 } 3495 nfs4_label_free(label); 3496 return status; 3497 } 3498 3499 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 3500 const struct qstr *name, struct nfs_fh *fhandle, 3501 struct nfs_fattr *fattr, struct nfs4_label *label) 3502 { 3503 struct nfs_server *server = NFS_SERVER(dir); 3504 int status; 3505 struct nfs4_lookup_arg args = { 3506 .bitmask = server->attr_bitmask, 3507 .dir_fh = NFS_FH(dir), 3508 .name = name, 3509 }; 3510 struct nfs4_lookup_res res = { 3511 .server = server, 3512 .fattr = fattr, 3513 .label = label, 3514 .fh = fhandle, 3515 }; 3516 struct rpc_message msg = { 3517 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 3518 .rpc_argp = &args, 3519 .rpc_resp = &res, 3520 }; 3521 3522 args.bitmask = nfs4_bitmask(server, label); 3523 3524 nfs_fattr_init(fattr); 3525 3526 dprintk("NFS call lookup %s\n", name->name); 3527 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); 3528 dprintk("NFS reply lookup: %d\n", status); 3529 return status; 3530 } 3531 3532 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 3533 { 3534 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 3535 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 3536 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 3537 fattr->nlink = 2; 3538 } 3539 3540 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 3541 struct qstr *name, struct nfs_fh *fhandle, 3542 struct nfs_fattr *fattr, struct nfs4_label *label) 3543 { 3544 struct nfs4_exception exception = { }; 3545 struct rpc_clnt *client = *clnt; 3546 int err; 3547 do { 3548 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label); 3549 trace_nfs4_lookup(dir, name, err); 3550 switch (err) { 3551 case -NFS4ERR_BADNAME: 3552 err = -ENOENT; 3553 goto out; 3554 case -NFS4ERR_MOVED: 3555 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 3556 if (err == -NFS4ERR_MOVED) 3557 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 3558 goto out; 3559 case -NFS4ERR_WRONGSEC: 3560 err = -EPERM; 3561 if (client != *clnt) 3562 goto out; 3563 client = nfs4_negotiate_security(client, dir, name); 3564 if (IS_ERR(client)) 3565 return PTR_ERR(client); 3566 3567 exception.retry = 1; 3568 break; 3569 default: 3570 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 3571 } 3572 } while (exception.retry); 3573 3574 out: 3575 if (err == 0) 3576 *clnt = client; 3577 else if (client != *clnt) 3578 rpc_shutdown_client(client); 3579 3580 return err; 3581 } 3582 3583 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, 3584 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 3585 struct nfs4_label *label) 3586 { 3587 int status; 3588 struct rpc_clnt *client = NFS_CLIENT(dir); 3589 3590 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label); 3591 if (client != NFS_CLIENT(dir)) { 3592 rpc_shutdown_client(client); 3593 nfs_fixup_secinfo_attributes(fattr); 3594 } 3595 return status; 3596 } 3597 3598 struct rpc_clnt * 3599 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, 3600 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 3601 { 3602 struct rpc_clnt *client = NFS_CLIENT(dir); 3603 int status; 3604 3605 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL); 3606 if (status < 0) 3607 return ERR_PTR(status); 3608 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 3609 } 3610 3611 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3612 { 3613 struct nfs_server *server = NFS_SERVER(inode); 3614 struct nfs4_accessargs args = { 3615 .fh = NFS_FH(inode), 3616 .bitmask = server->cache_consistency_bitmask, 3617 }; 3618 struct nfs4_accessres res = { 3619 .server = server, 3620 }; 3621 struct rpc_message msg = { 3622 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 3623 .rpc_argp = &args, 3624 .rpc_resp = &res, 3625 .rpc_cred = entry->cred, 3626 }; 3627 int mode = entry->mask; 3628 int status = 0; 3629 3630 /* 3631 * Determine which access bits we want to ask for... 3632 */ 3633 if (mode & MAY_READ) 3634 args.access |= NFS4_ACCESS_READ; 3635 if (S_ISDIR(inode->i_mode)) { 3636 if (mode & MAY_WRITE) 3637 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; 3638 if (mode & MAY_EXEC) 3639 args.access |= NFS4_ACCESS_LOOKUP; 3640 } else { 3641 if (mode & MAY_WRITE) 3642 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; 3643 if (mode & MAY_EXEC) 3644 args.access |= NFS4_ACCESS_EXECUTE; 3645 } 3646 3647 res.fattr = nfs_alloc_fattr(); 3648 if (res.fattr == NULL) 3649 return -ENOMEM; 3650 3651 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3652 if (!status) { 3653 nfs_access_set_mask(entry, res.access); 3654 nfs_refresh_inode(inode, res.fattr); 3655 } 3656 nfs_free_fattr(res.fattr); 3657 return status; 3658 } 3659 3660 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3661 { 3662 struct nfs4_exception exception = { }; 3663 int err; 3664 do { 3665 err = _nfs4_proc_access(inode, entry); 3666 trace_nfs4_access(inode, err); 3667 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3668 &exception); 3669 } while (exception.retry); 3670 return err; 3671 } 3672 3673 /* 3674 * TODO: For the time being, we don't try to get any attributes 3675 * along with any of the zero-copy operations READ, READDIR, 3676 * READLINK, WRITE. 3677 * 3678 * In the case of the first three, we want to put the GETATTR 3679 * after the read-type operation -- this is because it is hard 3680 * to predict the length of a GETATTR response in v4, and thus 3681 * align the READ data correctly. This means that the GETATTR 3682 * may end up partially falling into the page cache, and we should 3683 * shift it into the 'tail' of the xdr_buf before processing. 3684 * To do this efficiently, we need to know the total length 3685 * of data received, which doesn't seem to be available outside 3686 * of the RPC layer. 3687 * 3688 * In the case of WRITE, we also want to put the GETATTR after 3689 * the operation -- in this case because we want to make sure 3690 * we get the post-operation mtime and size. 3691 * 3692 * Both of these changes to the XDR layer would in fact be quite 3693 * minor, but I decided to leave them for a subsequent patch. 3694 */ 3695 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 3696 unsigned int pgbase, unsigned int pglen) 3697 { 3698 struct nfs4_readlink args = { 3699 .fh = NFS_FH(inode), 3700 .pgbase = pgbase, 3701 .pglen = pglen, 3702 .pages = &page, 3703 }; 3704 struct nfs4_readlink_res res; 3705 struct rpc_message msg = { 3706 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 3707 .rpc_argp = &args, 3708 .rpc_resp = &res, 3709 }; 3710 3711 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 3712 } 3713 3714 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 3715 unsigned int pgbase, unsigned int pglen) 3716 { 3717 struct nfs4_exception exception = { }; 3718 int err; 3719 do { 3720 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 3721 trace_nfs4_readlink(inode, err); 3722 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3723 &exception); 3724 } while (exception.retry); 3725 return err; 3726 } 3727 3728 /* 3729 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 3730 */ 3731 static int 3732 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 3733 int flags) 3734 { 3735 struct nfs4_label l, *ilabel = NULL; 3736 struct nfs_open_context *ctx; 3737 struct nfs4_state *state; 3738 int status = 0; 3739 3740 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 3741 if (IS_ERR(ctx)) 3742 return PTR_ERR(ctx); 3743 3744 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 3745 3746 sattr->ia_mode &= ~current_umask(); 3747 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 3748 if (IS_ERR(state)) { 3749 status = PTR_ERR(state); 3750 goto out; 3751 } 3752 out: 3753 nfs4_label_release_security(ilabel); 3754 put_nfs_open_context(ctx); 3755 return status; 3756 } 3757 3758 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) 3759 { 3760 struct nfs_server *server = NFS_SERVER(dir); 3761 struct nfs_removeargs args = { 3762 .fh = NFS_FH(dir), 3763 .name = *name, 3764 }; 3765 struct nfs_removeres res = { 3766 .server = server, 3767 }; 3768 struct rpc_message msg = { 3769 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 3770 .rpc_argp = &args, 3771 .rpc_resp = &res, 3772 }; 3773 int status; 3774 3775 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 3776 if (status == 0) 3777 update_changeattr(dir, &res.cinfo); 3778 return status; 3779 } 3780 3781 static int nfs4_proc_remove(struct inode *dir, struct qstr *name) 3782 { 3783 struct nfs4_exception exception = { }; 3784 int err; 3785 do { 3786 err = _nfs4_proc_remove(dir, name); 3787 trace_nfs4_remove(dir, name, err); 3788 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3789 &exception); 3790 } while (exception.retry); 3791 return err; 3792 } 3793 3794 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) 3795 { 3796 struct nfs_server *server = NFS_SERVER(dir); 3797 struct nfs_removeargs *args = msg->rpc_argp; 3798 struct nfs_removeres *res = msg->rpc_resp; 3799 3800 res->server = server; 3801 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 3802 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1); 3803 3804 nfs_fattr_init(res->dir_attr); 3805 } 3806 3807 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 3808 { 3809 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb), 3810 &data->args.seq_args, 3811 &data->res.seq_res, 3812 task); 3813 } 3814 3815 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 3816 { 3817 struct nfs_unlinkdata *data = task->tk_calldata; 3818 struct nfs_removeres *res = &data->res; 3819 3820 if (!nfs4_sequence_done(task, &res->seq_res)) 3821 return 0; 3822 if (nfs4_async_handle_error(task, res->server, NULL, 3823 &data->timeout) == -EAGAIN) 3824 return 0; 3825 update_changeattr(dir, &res->cinfo); 3826 return 1; 3827 } 3828 3829 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) 3830 { 3831 struct nfs_server *server = NFS_SERVER(dir); 3832 struct nfs_renameargs *arg = msg->rpc_argp; 3833 struct nfs_renameres *res = msg->rpc_resp; 3834 3835 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 3836 res->server = server; 3837 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1); 3838 } 3839 3840 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 3841 { 3842 nfs4_setup_sequence(NFS_SERVER(data->old_dir), 3843 &data->args.seq_args, 3844 &data->res.seq_res, 3845 task); 3846 } 3847 3848 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 3849 struct inode *new_dir) 3850 { 3851 struct nfs_renamedata *data = task->tk_calldata; 3852 struct nfs_renameres *res = &data->res; 3853 3854 if (!nfs4_sequence_done(task, &res->seq_res)) 3855 return 0; 3856 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 3857 return 0; 3858 3859 update_changeattr(old_dir, &res->old_cinfo); 3860 update_changeattr(new_dir, &res->new_cinfo); 3861 return 1; 3862 } 3863 3864 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3865 { 3866 struct nfs_server *server = NFS_SERVER(inode); 3867 struct nfs4_link_arg arg = { 3868 .fh = NFS_FH(inode), 3869 .dir_fh = NFS_FH(dir), 3870 .name = name, 3871 .bitmask = server->attr_bitmask, 3872 }; 3873 struct nfs4_link_res res = { 3874 .server = server, 3875 .label = NULL, 3876 }; 3877 struct rpc_message msg = { 3878 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 3879 .rpc_argp = &arg, 3880 .rpc_resp = &res, 3881 }; 3882 int status = -ENOMEM; 3883 3884 res.fattr = nfs_alloc_fattr(); 3885 if (res.fattr == NULL) 3886 goto out; 3887 3888 res.label = nfs4_label_alloc(server, GFP_KERNEL); 3889 if (IS_ERR(res.label)) { 3890 status = PTR_ERR(res.label); 3891 goto out; 3892 } 3893 arg.bitmask = nfs4_bitmask(server, res.label); 3894 3895 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3896 if (!status) { 3897 update_changeattr(dir, &res.cinfo); 3898 status = nfs_post_op_update_inode(inode, res.fattr); 3899 if (!status) 3900 nfs_setsecurity(inode, res.fattr, res.label); 3901 } 3902 3903 3904 nfs4_label_free(res.label); 3905 3906 out: 3907 nfs_free_fattr(res.fattr); 3908 return status; 3909 } 3910 3911 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3912 { 3913 struct nfs4_exception exception = { }; 3914 int err; 3915 do { 3916 err = nfs4_handle_exception(NFS_SERVER(inode), 3917 _nfs4_proc_link(inode, dir, name), 3918 &exception); 3919 } while (exception.retry); 3920 return err; 3921 } 3922 3923 struct nfs4_createdata { 3924 struct rpc_message msg; 3925 struct nfs4_create_arg arg; 3926 struct nfs4_create_res res; 3927 struct nfs_fh fh; 3928 struct nfs_fattr fattr; 3929 struct nfs4_label *label; 3930 }; 3931 3932 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 3933 struct qstr *name, struct iattr *sattr, u32 ftype) 3934 { 3935 struct nfs4_createdata *data; 3936 3937 data = kzalloc(sizeof(*data), GFP_KERNEL); 3938 if (data != NULL) { 3939 struct nfs_server *server = NFS_SERVER(dir); 3940 3941 data->label = nfs4_label_alloc(server, GFP_KERNEL); 3942 if (IS_ERR(data->label)) 3943 goto out_free; 3944 3945 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 3946 data->msg.rpc_argp = &data->arg; 3947 data->msg.rpc_resp = &data->res; 3948 data->arg.dir_fh = NFS_FH(dir); 3949 data->arg.server = server; 3950 data->arg.name = name; 3951 data->arg.attrs = sattr; 3952 data->arg.ftype = ftype; 3953 data->arg.bitmask = nfs4_bitmask(server, data->label); 3954 data->res.server = server; 3955 data->res.fh = &data->fh; 3956 data->res.fattr = &data->fattr; 3957 data->res.label = data->label; 3958 nfs_fattr_init(data->res.fattr); 3959 } 3960 return data; 3961 out_free: 3962 kfree(data); 3963 return NULL; 3964 } 3965 3966 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 3967 { 3968 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 3969 &data->arg.seq_args, &data->res.seq_res, 1); 3970 if (status == 0) { 3971 update_changeattr(dir, &data->res.dir_cinfo); 3972 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label); 3973 } 3974 return status; 3975 } 3976 3977 static void nfs4_free_createdata(struct nfs4_createdata *data) 3978 { 3979 nfs4_label_free(data->label); 3980 kfree(data); 3981 } 3982 3983 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3984 struct page *page, unsigned int len, struct iattr *sattr, 3985 struct nfs4_label *label) 3986 { 3987 struct nfs4_createdata *data; 3988 int status = -ENAMETOOLONG; 3989 3990 if (len > NFS4_MAXPATHLEN) 3991 goto out; 3992 3993 status = -ENOMEM; 3994 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 3995 if (data == NULL) 3996 goto out; 3997 3998 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 3999 data->arg.u.symlink.pages = &page; 4000 data->arg.u.symlink.len = len; 4001 data->arg.label = label; 4002 4003 status = nfs4_do_create(dir, dentry, data); 4004 4005 nfs4_free_createdata(data); 4006 out: 4007 return status; 4008 } 4009 4010 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 4011 struct page *page, unsigned int len, struct iattr *sattr) 4012 { 4013 struct nfs4_exception exception = { }; 4014 struct nfs4_label l, *label = NULL; 4015 int err; 4016 4017 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4018 4019 do { 4020 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label); 4021 trace_nfs4_symlink(dir, &dentry->d_name, err); 4022 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4023 &exception); 4024 } while (exception.retry); 4025 4026 nfs4_label_release_security(label); 4027 return err; 4028 } 4029 4030 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 4031 struct iattr *sattr, struct nfs4_label *label) 4032 { 4033 struct nfs4_createdata *data; 4034 int status = -ENOMEM; 4035 4036 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 4037 if (data == NULL) 4038 goto out; 4039 4040 data->arg.label = label; 4041 status = nfs4_do_create(dir, dentry, data); 4042 4043 nfs4_free_createdata(data); 4044 out: 4045 return status; 4046 } 4047 4048 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 4049 struct iattr *sattr) 4050 { 4051 struct nfs4_exception exception = { }; 4052 struct nfs4_label l, *label = NULL; 4053 int err; 4054 4055 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4056 4057 sattr->ia_mode &= ~current_umask(); 4058 do { 4059 err = _nfs4_proc_mkdir(dir, dentry, sattr, label); 4060 trace_nfs4_mkdir(dir, &dentry->d_name, err); 4061 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4062 &exception); 4063 } while (exception.retry); 4064 nfs4_label_release_security(label); 4065 4066 return err; 4067 } 4068 4069 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 4070 u64 cookie, struct page **pages, unsigned int count, int plus) 4071 { 4072 struct inode *dir = d_inode(dentry); 4073 struct nfs4_readdir_arg args = { 4074 .fh = NFS_FH(dir), 4075 .pages = pages, 4076 .pgbase = 0, 4077 .count = count, 4078 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask, 4079 .plus = plus, 4080 }; 4081 struct nfs4_readdir_res res; 4082 struct rpc_message msg = { 4083 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 4084 .rpc_argp = &args, 4085 .rpc_resp = &res, 4086 .rpc_cred = cred, 4087 }; 4088 int status; 4089 4090 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__, 4091 dentry, 4092 (unsigned long long)cookie); 4093 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); 4094 res.pgbase = args.pgbase; 4095 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 4096 if (status >= 0) { 4097 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); 4098 status += args.pgbase; 4099 } 4100 4101 nfs_invalidate_atime(dir); 4102 4103 dprintk("%s: returns %d\n", __func__, status); 4104 return status; 4105 } 4106 4107 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 4108 u64 cookie, struct page **pages, unsigned int count, int plus) 4109 { 4110 struct nfs4_exception exception = { }; 4111 int err; 4112 do { 4113 err = _nfs4_proc_readdir(dentry, cred, cookie, 4114 pages, count, plus); 4115 trace_nfs4_readdir(d_inode(dentry), err); 4116 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err, 4117 &exception); 4118 } while (exception.retry); 4119 return err; 4120 } 4121 4122 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 4123 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 4124 { 4125 struct nfs4_createdata *data; 4126 int mode = sattr->ia_mode; 4127 int status = -ENOMEM; 4128 4129 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 4130 if (data == NULL) 4131 goto out; 4132 4133 if (S_ISFIFO(mode)) 4134 data->arg.ftype = NF4FIFO; 4135 else if (S_ISBLK(mode)) { 4136 data->arg.ftype = NF4BLK; 4137 data->arg.u.device.specdata1 = MAJOR(rdev); 4138 data->arg.u.device.specdata2 = MINOR(rdev); 4139 } 4140 else if (S_ISCHR(mode)) { 4141 data->arg.ftype = NF4CHR; 4142 data->arg.u.device.specdata1 = MAJOR(rdev); 4143 data->arg.u.device.specdata2 = MINOR(rdev); 4144 } else if (!S_ISSOCK(mode)) { 4145 status = -EINVAL; 4146 goto out_free; 4147 } 4148 4149 data->arg.label = label; 4150 status = nfs4_do_create(dir, dentry, data); 4151 out_free: 4152 nfs4_free_createdata(data); 4153 out: 4154 return status; 4155 } 4156 4157 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 4158 struct iattr *sattr, dev_t rdev) 4159 { 4160 struct nfs4_exception exception = { }; 4161 struct nfs4_label l, *label = NULL; 4162 int err; 4163 4164 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4165 4166 sattr->ia_mode &= ~current_umask(); 4167 do { 4168 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 4169 trace_nfs4_mknod(dir, &dentry->d_name, err); 4170 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4171 &exception); 4172 } while (exception.retry); 4173 4174 nfs4_label_release_security(label); 4175 4176 return err; 4177 } 4178 4179 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 4180 struct nfs_fsstat *fsstat) 4181 { 4182 struct nfs4_statfs_arg args = { 4183 .fh = fhandle, 4184 .bitmask = server->attr_bitmask, 4185 }; 4186 struct nfs4_statfs_res res = { 4187 .fsstat = fsstat, 4188 }; 4189 struct rpc_message msg = { 4190 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 4191 .rpc_argp = &args, 4192 .rpc_resp = &res, 4193 }; 4194 4195 nfs_fattr_init(fsstat->fattr); 4196 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4197 } 4198 4199 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 4200 { 4201 struct nfs4_exception exception = { }; 4202 int err; 4203 do { 4204 err = nfs4_handle_exception(server, 4205 _nfs4_proc_statfs(server, fhandle, fsstat), 4206 &exception); 4207 } while (exception.retry); 4208 return err; 4209 } 4210 4211 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 4212 struct nfs_fsinfo *fsinfo) 4213 { 4214 struct nfs4_fsinfo_arg args = { 4215 .fh = fhandle, 4216 .bitmask = server->attr_bitmask, 4217 }; 4218 struct nfs4_fsinfo_res res = { 4219 .fsinfo = fsinfo, 4220 }; 4221 struct rpc_message msg = { 4222 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 4223 .rpc_argp = &args, 4224 .rpc_resp = &res, 4225 }; 4226 4227 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4228 } 4229 4230 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4231 { 4232 struct nfs4_exception exception = { }; 4233 unsigned long now = jiffies; 4234 int err; 4235 4236 do { 4237 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 4238 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 4239 if (err == 0) { 4240 struct nfs_client *clp = server->nfs_client; 4241 4242 spin_lock(&clp->cl_lock); 4243 clp->cl_lease_time = fsinfo->lease_time * HZ; 4244 clp->cl_last_renewal = now; 4245 spin_unlock(&clp->cl_lock); 4246 break; 4247 } 4248 err = nfs4_handle_exception(server, err, &exception); 4249 } while (exception.retry); 4250 return err; 4251 } 4252 4253 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4254 { 4255 int error; 4256 4257 nfs_fattr_init(fsinfo->fattr); 4258 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 4259 if (error == 0) { 4260 /* block layout checks this! */ 4261 server->pnfs_blksize = fsinfo->blksize; 4262 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype); 4263 } 4264 4265 return error; 4266 } 4267 4268 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4269 struct nfs_pathconf *pathconf) 4270 { 4271 struct nfs4_pathconf_arg args = { 4272 .fh = fhandle, 4273 .bitmask = server->attr_bitmask, 4274 }; 4275 struct nfs4_pathconf_res res = { 4276 .pathconf = pathconf, 4277 }; 4278 struct rpc_message msg = { 4279 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 4280 .rpc_argp = &args, 4281 .rpc_resp = &res, 4282 }; 4283 4284 /* None of the pathconf attributes are mandatory to implement */ 4285 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 4286 memset(pathconf, 0, sizeof(*pathconf)); 4287 return 0; 4288 } 4289 4290 nfs_fattr_init(pathconf->fattr); 4291 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4292 } 4293 4294 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4295 struct nfs_pathconf *pathconf) 4296 { 4297 struct nfs4_exception exception = { }; 4298 int err; 4299 4300 do { 4301 err = nfs4_handle_exception(server, 4302 _nfs4_proc_pathconf(server, fhandle, pathconf), 4303 &exception); 4304 } while (exception.retry); 4305 return err; 4306 } 4307 4308 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 4309 const struct nfs_open_context *ctx, 4310 const struct nfs_lock_context *l_ctx, 4311 fmode_t fmode) 4312 { 4313 const struct nfs_lockowner *lockowner = NULL; 4314 4315 if (l_ctx != NULL) 4316 lockowner = &l_ctx->lockowner; 4317 return nfs4_select_rw_stateid(ctx->state, fmode, lockowner, stateid, NULL); 4318 } 4319 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 4320 4321 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 4322 const struct nfs_open_context *ctx, 4323 const struct nfs_lock_context *l_ctx, 4324 fmode_t fmode) 4325 { 4326 nfs4_stateid current_stateid; 4327 4328 /* If the current stateid represents a lost lock, then exit */ 4329 if (nfs4_set_rw_stateid(¤t_stateid, ctx, l_ctx, fmode) == -EIO) 4330 return true; 4331 return nfs4_stateid_match(stateid, ¤t_stateid); 4332 } 4333 4334 static bool nfs4_error_stateid_expired(int err) 4335 { 4336 switch (err) { 4337 case -NFS4ERR_DELEG_REVOKED: 4338 case -NFS4ERR_ADMIN_REVOKED: 4339 case -NFS4ERR_BAD_STATEID: 4340 case -NFS4ERR_STALE_STATEID: 4341 case -NFS4ERR_OLD_STATEID: 4342 case -NFS4ERR_OPENMODE: 4343 case -NFS4ERR_EXPIRED: 4344 return true; 4345 } 4346 return false; 4347 } 4348 4349 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr) 4350 { 4351 nfs_invalidate_atime(hdr->inode); 4352 } 4353 4354 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 4355 { 4356 struct nfs_server *server = NFS_SERVER(hdr->inode); 4357 4358 trace_nfs4_read(hdr, task->tk_status); 4359 if (nfs4_async_handle_error(task, server, 4360 hdr->args.context->state, 4361 NULL) == -EAGAIN) { 4362 rpc_restart_call_prepare(task); 4363 return -EAGAIN; 4364 } 4365 4366 __nfs4_read_done_cb(hdr); 4367 if (task->tk_status > 0) 4368 renew_lease(server, hdr->timestamp); 4369 return 0; 4370 } 4371 4372 static bool nfs4_read_stateid_changed(struct rpc_task *task, 4373 struct nfs_pgio_args *args) 4374 { 4375 4376 if (!nfs4_error_stateid_expired(task->tk_status) || 4377 nfs4_stateid_is_current(&args->stateid, 4378 args->context, 4379 args->lock_context, 4380 FMODE_READ)) 4381 return false; 4382 rpc_restart_call_prepare(task); 4383 return true; 4384 } 4385 4386 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4387 { 4388 4389 dprintk("--> %s\n", __func__); 4390 4391 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4392 return -EAGAIN; 4393 if (nfs4_read_stateid_changed(task, &hdr->args)) 4394 return -EAGAIN; 4395 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4396 nfs4_read_done_cb(task, hdr); 4397 } 4398 4399 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 4400 struct rpc_message *msg) 4401 { 4402 hdr->timestamp = jiffies; 4403 if (!hdr->pgio_done_cb) 4404 hdr->pgio_done_cb = nfs4_read_done_cb; 4405 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 4406 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0); 4407 } 4408 4409 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 4410 struct nfs_pgio_header *hdr) 4411 { 4412 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode), 4413 &hdr->args.seq_args, 4414 &hdr->res.seq_res, 4415 task)) 4416 return 0; 4417 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 4418 hdr->args.lock_context, 4419 hdr->rw_ops->rw_mode) == -EIO) 4420 return -EIO; 4421 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 4422 return -EIO; 4423 return 0; 4424 } 4425 4426 static int nfs4_write_done_cb(struct rpc_task *task, 4427 struct nfs_pgio_header *hdr) 4428 { 4429 struct inode *inode = hdr->inode; 4430 4431 trace_nfs4_write(hdr, task->tk_status); 4432 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4433 hdr->args.context->state, 4434 NULL) == -EAGAIN) { 4435 rpc_restart_call_prepare(task); 4436 return -EAGAIN; 4437 } 4438 if (task->tk_status >= 0) { 4439 renew_lease(NFS_SERVER(inode), hdr->timestamp); 4440 nfs_writeback_update_inode(hdr); 4441 } 4442 return 0; 4443 } 4444 4445 static bool nfs4_write_stateid_changed(struct rpc_task *task, 4446 struct nfs_pgio_args *args) 4447 { 4448 4449 if (!nfs4_error_stateid_expired(task->tk_status) || 4450 nfs4_stateid_is_current(&args->stateid, 4451 args->context, 4452 args->lock_context, 4453 FMODE_WRITE)) 4454 return false; 4455 rpc_restart_call_prepare(task); 4456 return true; 4457 } 4458 4459 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4460 { 4461 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4462 return -EAGAIN; 4463 if (nfs4_write_stateid_changed(task, &hdr->args)) 4464 return -EAGAIN; 4465 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4466 nfs4_write_done_cb(task, hdr); 4467 } 4468 4469 static 4470 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 4471 { 4472 /* Don't request attributes for pNFS or O_DIRECT writes */ 4473 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 4474 return false; 4475 /* Otherwise, request attributes if and only if we don't hold 4476 * a delegation 4477 */ 4478 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; 4479 } 4480 4481 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 4482 struct rpc_message *msg) 4483 { 4484 struct nfs_server *server = NFS_SERVER(hdr->inode); 4485 4486 if (!nfs4_write_need_cache_consistency_data(hdr)) { 4487 hdr->args.bitmask = NULL; 4488 hdr->res.fattr = NULL; 4489 } else 4490 hdr->args.bitmask = server->cache_consistency_bitmask; 4491 4492 if (!hdr->pgio_done_cb) 4493 hdr->pgio_done_cb = nfs4_write_done_cb; 4494 hdr->res.server = server; 4495 hdr->timestamp = jiffies; 4496 4497 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 4498 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1); 4499 } 4500 4501 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 4502 { 4503 nfs4_setup_sequence(NFS_SERVER(data->inode), 4504 &data->args.seq_args, 4505 &data->res.seq_res, 4506 task); 4507 } 4508 4509 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 4510 { 4511 struct inode *inode = data->inode; 4512 4513 trace_nfs4_commit(data, task->tk_status); 4514 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4515 NULL, NULL) == -EAGAIN) { 4516 rpc_restart_call_prepare(task); 4517 return -EAGAIN; 4518 } 4519 return 0; 4520 } 4521 4522 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 4523 { 4524 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4525 return -EAGAIN; 4526 return data->commit_done_cb(task, data); 4527 } 4528 4529 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) 4530 { 4531 struct nfs_server *server = NFS_SERVER(data->inode); 4532 4533 if (data->commit_done_cb == NULL) 4534 data->commit_done_cb = nfs4_commit_done_cb; 4535 data->res.server = server; 4536 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 4537 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 4538 } 4539 4540 struct nfs4_renewdata { 4541 struct nfs_client *client; 4542 unsigned long timestamp; 4543 }; 4544 4545 /* 4546 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 4547 * standalone procedure for queueing an asynchronous RENEW. 4548 */ 4549 static void nfs4_renew_release(void *calldata) 4550 { 4551 struct nfs4_renewdata *data = calldata; 4552 struct nfs_client *clp = data->client; 4553 4554 if (atomic_read(&clp->cl_count) > 1) 4555 nfs4_schedule_state_renewal(clp); 4556 nfs_put_client(clp); 4557 kfree(data); 4558 } 4559 4560 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 4561 { 4562 struct nfs4_renewdata *data = calldata; 4563 struct nfs_client *clp = data->client; 4564 unsigned long timestamp = data->timestamp; 4565 4566 trace_nfs4_renew_async(clp, task->tk_status); 4567 switch (task->tk_status) { 4568 case 0: 4569 break; 4570 case -NFS4ERR_LEASE_MOVED: 4571 nfs4_schedule_lease_moved_recovery(clp); 4572 break; 4573 default: 4574 /* Unless we're shutting down, schedule state recovery! */ 4575 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 4576 return; 4577 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 4578 nfs4_schedule_lease_recovery(clp); 4579 return; 4580 } 4581 nfs4_schedule_path_down_recovery(clp); 4582 } 4583 do_renew_lease(clp, timestamp); 4584 } 4585 4586 static const struct rpc_call_ops nfs4_renew_ops = { 4587 .rpc_call_done = nfs4_renew_done, 4588 .rpc_release = nfs4_renew_release, 4589 }; 4590 4591 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 4592 { 4593 struct rpc_message msg = { 4594 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4595 .rpc_argp = clp, 4596 .rpc_cred = cred, 4597 }; 4598 struct nfs4_renewdata *data; 4599 4600 if (renew_flags == 0) 4601 return 0; 4602 if (!atomic_inc_not_zero(&clp->cl_count)) 4603 return -EIO; 4604 data = kmalloc(sizeof(*data), GFP_NOFS); 4605 if (data == NULL) 4606 return -ENOMEM; 4607 data->client = clp; 4608 data->timestamp = jiffies; 4609 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 4610 &nfs4_renew_ops, data); 4611 } 4612 4613 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 4614 { 4615 struct rpc_message msg = { 4616 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4617 .rpc_argp = clp, 4618 .rpc_cred = cred, 4619 }; 4620 unsigned long now = jiffies; 4621 int status; 4622 4623 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4624 if (status < 0) 4625 return status; 4626 do_renew_lease(clp, now); 4627 return 0; 4628 } 4629 4630 static inline int nfs4_server_supports_acls(struct nfs_server *server) 4631 { 4632 return server->caps & NFS_CAP_ACLS; 4633 } 4634 4635 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 4636 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 4637 * the stack. 4638 */ 4639 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 4640 4641 static int buf_to_pages_noslab(const void *buf, size_t buflen, 4642 struct page **pages) 4643 { 4644 struct page *newpage, **spages; 4645 int rc = 0; 4646 size_t len; 4647 spages = pages; 4648 4649 do { 4650 len = min_t(size_t, PAGE_SIZE, buflen); 4651 newpage = alloc_page(GFP_KERNEL); 4652 4653 if (newpage == NULL) 4654 goto unwind; 4655 memcpy(page_address(newpage), buf, len); 4656 buf += len; 4657 buflen -= len; 4658 *pages++ = newpage; 4659 rc++; 4660 } while (buflen != 0); 4661 4662 return rc; 4663 4664 unwind: 4665 for(; rc > 0; rc--) 4666 __free_page(spages[rc-1]); 4667 return -ENOMEM; 4668 } 4669 4670 struct nfs4_cached_acl { 4671 int cached; 4672 size_t len; 4673 char data[0]; 4674 }; 4675 4676 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 4677 { 4678 struct nfs_inode *nfsi = NFS_I(inode); 4679 4680 spin_lock(&inode->i_lock); 4681 kfree(nfsi->nfs4_acl); 4682 nfsi->nfs4_acl = acl; 4683 spin_unlock(&inode->i_lock); 4684 } 4685 4686 static void nfs4_zap_acl_attr(struct inode *inode) 4687 { 4688 nfs4_set_cached_acl(inode, NULL); 4689 } 4690 4691 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 4692 { 4693 struct nfs_inode *nfsi = NFS_I(inode); 4694 struct nfs4_cached_acl *acl; 4695 int ret = -ENOENT; 4696 4697 spin_lock(&inode->i_lock); 4698 acl = nfsi->nfs4_acl; 4699 if (acl == NULL) 4700 goto out; 4701 if (buf == NULL) /* user is just asking for length */ 4702 goto out_len; 4703 if (acl->cached == 0) 4704 goto out; 4705 ret = -ERANGE; /* see getxattr(2) man page */ 4706 if (acl->len > buflen) 4707 goto out; 4708 memcpy(buf, acl->data, acl->len); 4709 out_len: 4710 ret = acl->len; 4711 out: 4712 spin_unlock(&inode->i_lock); 4713 return ret; 4714 } 4715 4716 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 4717 { 4718 struct nfs4_cached_acl *acl; 4719 size_t buflen = sizeof(*acl) + acl_len; 4720 4721 if (buflen <= PAGE_SIZE) { 4722 acl = kmalloc(buflen, GFP_KERNEL); 4723 if (acl == NULL) 4724 goto out; 4725 acl->cached = 1; 4726 _copy_from_pages(acl->data, pages, pgbase, acl_len); 4727 } else { 4728 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 4729 if (acl == NULL) 4730 goto out; 4731 acl->cached = 0; 4732 } 4733 acl->len = acl_len; 4734 out: 4735 nfs4_set_cached_acl(inode, acl); 4736 } 4737 4738 /* 4739 * The getxattr API returns the required buffer length when called with a 4740 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 4741 * the required buf. On a NULL buf, we send a page of data to the server 4742 * guessing that the ACL request can be serviced by a page. If so, we cache 4743 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 4744 * the cache. If not so, we throw away the page, and cache the required 4745 * length. The next getxattr call will then produce another round trip to 4746 * the server, this time with the input buf of the required size. 4747 */ 4748 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4749 { 4750 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; 4751 struct nfs_getaclargs args = { 4752 .fh = NFS_FH(inode), 4753 .acl_pages = pages, 4754 .acl_len = buflen, 4755 }; 4756 struct nfs_getaclres res = { 4757 .acl_len = buflen, 4758 }; 4759 struct rpc_message msg = { 4760 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 4761 .rpc_argp = &args, 4762 .rpc_resp = &res, 4763 }; 4764 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4765 int ret = -ENOMEM, i; 4766 4767 /* As long as we're doing a round trip to the server anyway, 4768 * let's be prepared for a page of acl data. */ 4769 if (npages == 0) 4770 npages = 1; 4771 if (npages > ARRAY_SIZE(pages)) 4772 return -ERANGE; 4773 4774 for (i = 0; i < npages; i++) { 4775 pages[i] = alloc_page(GFP_KERNEL); 4776 if (!pages[i]) 4777 goto out_free; 4778 } 4779 4780 /* for decoding across pages */ 4781 res.acl_scratch = alloc_page(GFP_KERNEL); 4782 if (!res.acl_scratch) 4783 goto out_free; 4784 4785 args.acl_len = npages * PAGE_SIZE; 4786 4787 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 4788 __func__, buf, buflen, npages, args.acl_len); 4789 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 4790 &msg, &args.seq_args, &res.seq_res, 0); 4791 if (ret) 4792 goto out_free; 4793 4794 /* Handle the case where the passed-in buffer is too short */ 4795 if (res.acl_flags & NFS4_ACL_TRUNC) { 4796 /* Did the user only issue a request for the acl length? */ 4797 if (buf == NULL) 4798 goto out_ok; 4799 ret = -ERANGE; 4800 goto out_free; 4801 } 4802 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); 4803 if (buf) { 4804 if (res.acl_len > buflen) { 4805 ret = -ERANGE; 4806 goto out_free; 4807 } 4808 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 4809 } 4810 out_ok: 4811 ret = res.acl_len; 4812 out_free: 4813 for (i = 0; i < npages; i++) 4814 if (pages[i]) 4815 __free_page(pages[i]); 4816 if (res.acl_scratch) 4817 __free_page(res.acl_scratch); 4818 return ret; 4819 } 4820 4821 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4822 { 4823 struct nfs4_exception exception = { }; 4824 ssize_t ret; 4825 do { 4826 ret = __nfs4_get_acl_uncached(inode, buf, buflen); 4827 trace_nfs4_get_acl(inode, ret); 4828 if (ret >= 0) 4829 break; 4830 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 4831 } while (exception.retry); 4832 return ret; 4833 } 4834 4835 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 4836 { 4837 struct nfs_server *server = NFS_SERVER(inode); 4838 int ret; 4839 4840 if (!nfs4_server_supports_acls(server)) 4841 return -EOPNOTSUPP; 4842 ret = nfs_revalidate_inode(server, inode); 4843 if (ret < 0) 4844 return ret; 4845 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 4846 nfs_zap_acl_cache(inode); 4847 ret = nfs4_read_cached_acl(inode, buf, buflen); 4848 if (ret != -ENOENT) 4849 /* -ENOENT is returned if there is no ACL or if there is an ACL 4850 * but no cached acl data, just the acl length */ 4851 return ret; 4852 return nfs4_get_acl_uncached(inode, buf, buflen); 4853 } 4854 4855 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4856 { 4857 struct nfs_server *server = NFS_SERVER(inode); 4858 struct page *pages[NFS4ACL_MAXPAGES]; 4859 struct nfs_setaclargs arg = { 4860 .fh = NFS_FH(inode), 4861 .acl_pages = pages, 4862 .acl_len = buflen, 4863 }; 4864 struct nfs_setaclres res; 4865 struct rpc_message msg = { 4866 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 4867 .rpc_argp = &arg, 4868 .rpc_resp = &res, 4869 }; 4870 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4871 int ret, i; 4872 4873 if (!nfs4_server_supports_acls(server)) 4874 return -EOPNOTSUPP; 4875 if (npages > ARRAY_SIZE(pages)) 4876 return -ERANGE; 4877 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages); 4878 if (i < 0) 4879 return i; 4880 nfs4_inode_return_delegation(inode); 4881 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4882 4883 /* 4884 * Free each page after tx, so the only ref left is 4885 * held by the network stack 4886 */ 4887 for (; i > 0; i--) 4888 put_page(pages[i-1]); 4889 4890 /* 4891 * Acl update can result in inode attribute update. 4892 * so mark the attribute cache invalid. 4893 */ 4894 spin_lock(&inode->i_lock); 4895 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 4896 spin_unlock(&inode->i_lock); 4897 nfs_access_zap_cache(inode); 4898 nfs_zap_acl_cache(inode); 4899 return ret; 4900 } 4901 4902 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4903 { 4904 struct nfs4_exception exception = { }; 4905 int err; 4906 do { 4907 err = __nfs4_proc_set_acl(inode, buf, buflen); 4908 trace_nfs4_set_acl(inode, err); 4909 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4910 &exception); 4911 } while (exception.retry); 4912 return err; 4913 } 4914 4915 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 4916 static int _nfs4_get_security_label(struct inode *inode, void *buf, 4917 size_t buflen) 4918 { 4919 struct nfs_server *server = NFS_SERVER(inode); 4920 struct nfs_fattr fattr; 4921 struct nfs4_label label = {0, 0, buflen, buf}; 4922 4923 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 4924 struct nfs4_getattr_arg arg = { 4925 .fh = NFS_FH(inode), 4926 .bitmask = bitmask, 4927 }; 4928 struct nfs4_getattr_res res = { 4929 .fattr = &fattr, 4930 .label = &label, 4931 .server = server, 4932 }; 4933 struct rpc_message msg = { 4934 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4935 .rpc_argp = &arg, 4936 .rpc_resp = &res, 4937 }; 4938 int ret; 4939 4940 nfs_fattr_init(&fattr); 4941 4942 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 4943 if (ret) 4944 return ret; 4945 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 4946 return -ENOENT; 4947 if (buflen < label.len) 4948 return -ERANGE; 4949 return 0; 4950 } 4951 4952 static int nfs4_get_security_label(struct inode *inode, void *buf, 4953 size_t buflen) 4954 { 4955 struct nfs4_exception exception = { }; 4956 int err; 4957 4958 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 4959 return -EOPNOTSUPP; 4960 4961 do { 4962 err = _nfs4_get_security_label(inode, buf, buflen); 4963 trace_nfs4_get_security_label(inode, err); 4964 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4965 &exception); 4966 } while (exception.retry); 4967 return err; 4968 } 4969 4970 static int _nfs4_do_set_security_label(struct inode *inode, 4971 struct nfs4_label *ilabel, 4972 struct nfs_fattr *fattr, 4973 struct nfs4_label *olabel) 4974 { 4975 4976 struct iattr sattr = {0}; 4977 struct nfs_server *server = NFS_SERVER(inode); 4978 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 4979 struct nfs_setattrargs arg = { 4980 .fh = NFS_FH(inode), 4981 .iap = &sattr, 4982 .server = server, 4983 .bitmask = bitmask, 4984 .label = ilabel, 4985 }; 4986 struct nfs_setattrres res = { 4987 .fattr = fattr, 4988 .label = olabel, 4989 .server = server, 4990 }; 4991 struct rpc_message msg = { 4992 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 4993 .rpc_argp = &arg, 4994 .rpc_resp = &res, 4995 }; 4996 int status; 4997 4998 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 4999 5000 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5001 if (status) 5002 dprintk("%s failed: %d\n", __func__, status); 5003 5004 return status; 5005 } 5006 5007 static int nfs4_do_set_security_label(struct inode *inode, 5008 struct nfs4_label *ilabel, 5009 struct nfs_fattr *fattr, 5010 struct nfs4_label *olabel) 5011 { 5012 struct nfs4_exception exception = { }; 5013 int err; 5014 5015 do { 5016 err = _nfs4_do_set_security_label(inode, ilabel, 5017 fattr, olabel); 5018 trace_nfs4_set_security_label(inode, err); 5019 err = nfs4_handle_exception(NFS_SERVER(inode), err, 5020 &exception); 5021 } while (exception.retry); 5022 return err; 5023 } 5024 5025 static int 5026 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen) 5027 { 5028 struct nfs4_label ilabel, *olabel = NULL; 5029 struct nfs_fattr fattr; 5030 struct rpc_cred *cred; 5031 int status; 5032 5033 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 5034 return -EOPNOTSUPP; 5035 5036 nfs_fattr_init(&fattr); 5037 5038 ilabel.pi = 0; 5039 ilabel.lfs = 0; 5040 ilabel.label = (char *)buf; 5041 ilabel.len = buflen; 5042 5043 cred = rpc_lookup_cred(); 5044 if (IS_ERR(cred)) 5045 return PTR_ERR(cred); 5046 5047 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 5048 if (IS_ERR(olabel)) { 5049 status = -PTR_ERR(olabel); 5050 goto out; 5051 } 5052 5053 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel); 5054 if (status == 0) 5055 nfs_setsecurity(inode, &fattr, olabel); 5056 5057 nfs4_label_free(olabel); 5058 out: 5059 put_rpccred(cred); 5060 return status; 5061 } 5062 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 5063 5064 5065 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 5066 nfs4_verifier *bootverf) 5067 { 5068 __be32 verf[2]; 5069 5070 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 5071 /* An impossible timestamp guarantees this value 5072 * will never match a generated boot time. */ 5073 verf[0] = 0; 5074 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1); 5075 } else { 5076 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 5077 verf[0] = cpu_to_be32(nn->boot_time.tv_sec); 5078 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec); 5079 } 5080 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 5081 } 5082 5083 static int 5084 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 5085 { 5086 size_t len; 5087 char *str; 5088 5089 if (clp->cl_owner_id != NULL) 5090 return 0; 5091 5092 rcu_read_lock(); 5093 len = 14 + strlen(clp->cl_ipaddr) + 1 + 5094 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 5095 1 + 5096 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) + 5097 1; 5098 rcu_read_unlock(); 5099 5100 if (len > NFS4_OPAQUE_LIMIT + 1) 5101 return -EINVAL; 5102 5103 /* 5104 * Since this string is allocated at mount time, and held until the 5105 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5106 * about a memory-reclaim deadlock. 5107 */ 5108 str = kmalloc(len, GFP_KERNEL); 5109 if (!str) 5110 return -ENOMEM; 5111 5112 rcu_read_lock(); 5113 scnprintf(str, len, "Linux NFSv4.0 %s/%s %s", 5114 clp->cl_ipaddr, 5115 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR), 5116 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)); 5117 rcu_read_unlock(); 5118 5119 clp->cl_owner_id = str; 5120 return 0; 5121 } 5122 5123 static int 5124 nfs4_init_uniquifier_client_string(struct nfs_client *clp) 5125 { 5126 size_t len; 5127 char *str; 5128 5129 len = 10 + 10 + 1 + 10 + 1 + 5130 strlen(nfs4_client_id_uniquifier) + 1 + 5131 strlen(clp->cl_rpcclient->cl_nodename) + 1; 5132 5133 if (len > NFS4_OPAQUE_LIMIT + 1) 5134 return -EINVAL; 5135 5136 /* 5137 * Since this string is allocated at mount time, and held until the 5138 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5139 * about a memory-reclaim deadlock. 5140 */ 5141 str = kmalloc(len, GFP_KERNEL); 5142 if (!str) 5143 return -ENOMEM; 5144 5145 scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 5146 clp->rpc_ops->version, clp->cl_minorversion, 5147 nfs4_client_id_uniquifier, 5148 clp->cl_rpcclient->cl_nodename); 5149 clp->cl_owner_id = str; 5150 return 0; 5151 } 5152 5153 static int 5154 nfs4_init_uniform_client_string(struct nfs_client *clp) 5155 { 5156 size_t len; 5157 char *str; 5158 5159 if (clp->cl_owner_id != NULL) 5160 return 0; 5161 5162 if (nfs4_client_id_uniquifier[0] != '\0') 5163 return nfs4_init_uniquifier_client_string(clp); 5164 5165 len = 10 + 10 + 1 + 10 + 1 + 5166 strlen(clp->cl_rpcclient->cl_nodename) + 1; 5167 5168 if (len > NFS4_OPAQUE_LIMIT + 1) 5169 return -EINVAL; 5170 5171 /* 5172 * Since this string is allocated at mount time, and held until the 5173 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5174 * about a memory-reclaim deadlock. 5175 */ 5176 str = kmalloc(len, GFP_KERNEL); 5177 if (!str) 5178 return -ENOMEM; 5179 5180 scnprintf(str, len, "Linux NFSv%u.%u %s", 5181 clp->rpc_ops->version, clp->cl_minorversion, 5182 clp->cl_rpcclient->cl_nodename); 5183 clp->cl_owner_id = str; 5184 return 0; 5185 } 5186 5187 /* 5188 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 5189 * services. Advertise one based on the address family of the 5190 * clientaddr. 5191 */ 5192 static unsigned int 5193 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 5194 { 5195 if (strchr(clp->cl_ipaddr, ':') != NULL) 5196 return scnprintf(buf, len, "tcp6"); 5197 else 5198 return scnprintf(buf, len, "tcp"); 5199 } 5200 5201 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 5202 { 5203 struct nfs4_setclientid *sc = calldata; 5204 5205 if (task->tk_status == 0) 5206 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 5207 } 5208 5209 static const struct rpc_call_ops nfs4_setclientid_ops = { 5210 .rpc_call_done = nfs4_setclientid_done, 5211 }; 5212 5213 /** 5214 * nfs4_proc_setclientid - Negotiate client ID 5215 * @clp: state data structure 5216 * @program: RPC program for NFSv4 callback service 5217 * @port: IP port number for NFS4 callback service 5218 * @cred: RPC credential to use for this call 5219 * @res: where to place the result 5220 * 5221 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5222 */ 5223 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 5224 unsigned short port, struct rpc_cred *cred, 5225 struct nfs4_setclientid_res *res) 5226 { 5227 nfs4_verifier sc_verifier; 5228 struct nfs4_setclientid setclientid = { 5229 .sc_verifier = &sc_verifier, 5230 .sc_prog = program, 5231 .sc_clnt = clp, 5232 }; 5233 struct rpc_message msg = { 5234 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 5235 .rpc_argp = &setclientid, 5236 .rpc_resp = res, 5237 .rpc_cred = cred, 5238 }; 5239 struct rpc_task *task; 5240 struct rpc_task_setup task_setup_data = { 5241 .rpc_client = clp->cl_rpcclient, 5242 .rpc_message = &msg, 5243 .callback_ops = &nfs4_setclientid_ops, 5244 .callback_data = &setclientid, 5245 .flags = RPC_TASK_TIMEOUT, 5246 }; 5247 int status; 5248 5249 /* nfs_client_id4 */ 5250 nfs4_init_boot_verifier(clp, &sc_verifier); 5251 5252 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 5253 status = nfs4_init_uniform_client_string(clp); 5254 else 5255 status = nfs4_init_nonuniform_client_string(clp); 5256 5257 if (status) 5258 goto out; 5259 5260 /* cb_client4 */ 5261 setclientid.sc_netid_len = 5262 nfs4_init_callback_netid(clp, 5263 setclientid.sc_netid, 5264 sizeof(setclientid.sc_netid)); 5265 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 5266 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 5267 clp->cl_ipaddr, port >> 8, port & 255); 5268 5269 dprintk("NFS call setclientid auth=%s, '%s'\n", 5270 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5271 clp->cl_owner_id); 5272 task = rpc_run_task(&task_setup_data); 5273 if (IS_ERR(task)) { 5274 status = PTR_ERR(task); 5275 goto out; 5276 } 5277 status = task->tk_status; 5278 if (setclientid.sc_cred) { 5279 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 5280 put_rpccred(setclientid.sc_cred); 5281 } 5282 rpc_put_task(task); 5283 out: 5284 trace_nfs4_setclientid(clp, status); 5285 dprintk("NFS reply setclientid: %d\n", status); 5286 return status; 5287 } 5288 5289 /** 5290 * nfs4_proc_setclientid_confirm - Confirm client ID 5291 * @clp: state data structure 5292 * @res: result of a previous SETCLIENTID 5293 * @cred: RPC credential to use for this call 5294 * 5295 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5296 */ 5297 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 5298 struct nfs4_setclientid_res *arg, 5299 struct rpc_cred *cred) 5300 { 5301 struct rpc_message msg = { 5302 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 5303 .rpc_argp = arg, 5304 .rpc_cred = cred, 5305 }; 5306 int status; 5307 5308 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 5309 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5310 clp->cl_clientid); 5311 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5312 trace_nfs4_setclientid_confirm(clp, status); 5313 dprintk("NFS reply setclientid_confirm: %d\n", status); 5314 return status; 5315 } 5316 5317 struct nfs4_delegreturndata { 5318 struct nfs4_delegreturnargs args; 5319 struct nfs4_delegreturnres res; 5320 struct nfs_fh fh; 5321 nfs4_stateid stateid; 5322 unsigned long timestamp; 5323 struct nfs_fattr fattr; 5324 int rpc_status; 5325 struct inode *inode; 5326 bool roc; 5327 u32 roc_barrier; 5328 }; 5329 5330 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 5331 { 5332 struct nfs4_delegreturndata *data = calldata; 5333 5334 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5335 return; 5336 5337 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 5338 switch (task->tk_status) { 5339 case 0: 5340 renew_lease(data->res.server, data->timestamp); 5341 case -NFS4ERR_ADMIN_REVOKED: 5342 case -NFS4ERR_DELEG_REVOKED: 5343 case -NFS4ERR_BAD_STATEID: 5344 case -NFS4ERR_OLD_STATEID: 5345 case -NFS4ERR_STALE_STATEID: 5346 case -NFS4ERR_EXPIRED: 5347 task->tk_status = 0; 5348 if (data->roc) 5349 pnfs_roc_set_barrier(data->inode, data->roc_barrier); 5350 break; 5351 default: 5352 if (nfs4_async_handle_error(task, data->res.server, 5353 NULL, NULL) == -EAGAIN) { 5354 rpc_restart_call_prepare(task); 5355 return; 5356 } 5357 } 5358 data->rpc_status = task->tk_status; 5359 } 5360 5361 static void nfs4_delegreturn_release(void *calldata) 5362 { 5363 struct nfs4_delegreturndata *data = calldata; 5364 struct inode *inode = data->inode; 5365 5366 if (inode) { 5367 if (data->roc) 5368 pnfs_roc_release(inode); 5369 nfs_iput_and_deactive(inode); 5370 } 5371 kfree(calldata); 5372 } 5373 5374 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 5375 { 5376 struct nfs4_delegreturndata *d_data; 5377 5378 d_data = (struct nfs4_delegreturndata *)data; 5379 5380 if (nfs4_wait_on_layoutreturn(d_data->inode, task)) 5381 return; 5382 5383 if (d_data->roc) 5384 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier); 5385 5386 nfs4_setup_sequence(d_data->res.server, 5387 &d_data->args.seq_args, 5388 &d_data->res.seq_res, 5389 task); 5390 } 5391 5392 static const struct rpc_call_ops nfs4_delegreturn_ops = { 5393 .rpc_call_prepare = nfs4_delegreturn_prepare, 5394 .rpc_call_done = nfs4_delegreturn_done, 5395 .rpc_release = nfs4_delegreturn_release, 5396 }; 5397 5398 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5399 { 5400 struct nfs4_delegreturndata *data; 5401 struct nfs_server *server = NFS_SERVER(inode); 5402 struct rpc_task *task; 5403 struct rpc_message msg = { 5404 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 5405 .rpc_cred = cred, 5406 }; 5407 struct rpc_task_setup task_setup_data = { 5408 .rpc_client = server->client, 5409 .rpc_message = &msg, 5410 .callback_ops = &nfs4_delegreturn_ops, 5411 .flags = RPC_TASK_ASYNC, 5412 }; 5413 int status = 0; 5414 5415 data = kzalloc(sizeof(*data), GFP_NOFS); 5416 if (data == NULL) 5417 return -ENOMEM; 5418 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 5419 5420 nfs4_state_protect(server->nfs_client, 5421 NFS_SP4_MACH_CRED_CLEANUP, 5422 &task_setup_data.rpc_client, &msg); 5423 5424 data->args.fhandle = &data->fh; 5425 data->args.stateid = &data->stateid; 5426 data->args.bitmask = server->cache_consistency_bitmask; 5427 nfs_copy_fh(&data->fh, NFS_FH(inode)); 5428 nfs4_stateid_copy(&data->stateid, stateid); 5429 data->res.fattr = &data->fattr; 5430 data->res.server = server; 5431 nfs_fattr_init(data->res.fattr); 5432 data->timestamp = jiffies; 5433 data->rpc_status = 0; 5434 data->inode = nfs_igrab_and_active(inode); 5435 if (data->inode) 5436 data->roc = nfs4_roc(inode); 5437 5438 task_setup_data.callback_data = data; 5439 msg.rpc_argp = &data->args; 5440 msg.rpc_resp = &data->res; 5441 task = rpc_run_task(&task_setup_data); 5442 if (IS_ERR(task)) 5443 return PTR_ERR(task); 5444 if (!issync) 5445 goto out; 5446 status = nfs4_wait_for_completion_rpc_task(task); 5447 if (status != 0) 5448 goto out; 5449 status = data->rpc_status; 5450 if (status == 0) 5451 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 5452 else 5453 nfs_refresh_inode(inode, &data->fattr); 5454 out: 5455 rpc_put_task(task); 5456 return status; 5457 } 5458 5459 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5460 { 5461 struct nfs_server *server = NFS_SERVER(inode); 5462 struct nfs4_exception exception = { }; 5463 int err; 5464 do { 5465 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); 5466 trace_nfs4_delegreturn(inode, stateid, err); 5467 switch (err) { 5468 case -NFS4ERR_STALE_STATEID: 5469 case -NFS4ERR_EXPIRED: 5470 case 0: 5471 return 0; 5472 } 5473 err = nfs4_handle_exception(server, err, &exception); 5474 } while (exception.retry); 5475 return err; 5476 } 5477 5478 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 5479 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 5480 5481 /* 5482 * sleep, with exponential backoff, and retry the LOCK operation. 5483 */ 5484 static unsigned long 5485 nfs4_set_lock_task_retry(unsigned long timeout) 5486 { 5487 freezable_schedule_timeout_killable_unsafe(timeout); 5488 timeout <<= 1; 5489 if (timeout > NFS4_LOCK_MAXTIMEOUT) 5490 return NFS4_LOCK_MAXTIMEOUT; 5491 return timeout; 5492 } 5493 5494 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5495 { 5496 struct inode *inode = state->inode; 5497 struct nfs_server *server = NFS_SERVER(inode); 5498 struct nfs_client *clp = server->nfs_client; 5499 struct nfs_lockt_args arg = { 5500 .fh = NFS_FH(inode), 5501 .fl = request, 5502 }; 5503 struct nfs_lockt_res res = { 5504 .denied = request, 5505 }; 5506 struct rpc_message msg = { 5507 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 5508 .rpc_argp = &arg, 5509 .rpc_resp = &res, 5510 .rpc_cred = state->owner->so_cred, 5511 }; 5512 struct nfs4_lock_state *lsp; 5513 int status; 5514 5515 arg.lock_owner.clientid = clp->cl_clientid; 5516 status = nfs4_set_lock_state(state, request); 5517 if (status != 0) 5518 goto out; 5519 lsp = request->fl_u.nfs4_fl.owner; 5520 arg.lock_owner.id = lsp->ls_seqid.owner_id; 5521 arg.lock_owner.s_dev = server->s_dev; 5522 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5523 switch (status) { 5524 case 0: 5525 request->fl_type = F_UNLCK; 5526 break; 5527 case -NFS4ERR_DENIED: 5528 status = 0; 5529 } 5530 request->fl_ops->fl_release_private(request); 5531 request->fl_ops = NULL; 5532 out: 5533 return status; 5534 } 5535 5536 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5537 { 5538 struct nfs4_exception exception = { }; 5539 int err; 5540 5541 do { 5542 err = _nfs4_proc_getlk(state, cmd, request); 5543 trace_nfs4_get_lock(request, state, cmd, err); 5544 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 5545 &exception); 5546 } while (exception.retry); 5547 return err; 5548 } 5549 5550 static int do_vfs_lock(struct inode *inode, struct file_lock *fl) 5551 { 5552 return locks_lock_inode_wait(inode, fl); 5553 } 5554 5555 struct nfs4_unlockdata { 5556 struct nfs_locku_args arg; 5557 struct nfs_locku_res res; 5558 struct nfs4_lock_state *lsp; 5559 struct nfs_open_context *ctx; 5560 struct file_lock fl; 5561 struct nfs_server *server; 5562 unsigned long timestamp; 5563 }; 5564 5565 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 5566 struct nfs_open_context *ctx, 5567 struct nfs4_lock_state *lsp, 5568 struct nfs_seqid *seqid) 5569 { 5570 struct nfs4_unlockdata *p; 5571 struct inode *inode = lsp->ls_state->inode; 5572 5573 p = kzalloc(sizeof(*p), GFP_NOFS); 5574 if (p == NULL) 5575 return NULL; 5576 p->arg.fh = NFS_FH(inode); 5577 p->arg.fl = &p->fl; 5578 p->arg.seqid = seqid; 5579 p->res.seqid = seqid; 5580 p->lsp = lsp; 5581 atomic_inc(&lsp->ls_count); 5582 /* Ensure we don't close file until we're done freeing locks! */ 5583 p->ctx = get_nfs_open_context(ctx); 5584 memcpy(&p->fl, fl, sizeof(p->fl)); 5585 p->server = NFS_SERVER(inode); 5586 return p; 5587 } 5588 5589 static void nfs4_locku_release_calldata(void *data) 5590 { 5591 struct nfs4_unlockdata *calldata = data; 5592 nfs_free_seqid(calldata->arg.seqid); 5593 nfs4_put_lock_state(calldata->lsp); 5594 put_nfs_open_context(calldata->ctx); 5595 kfree(calldata); 5596 } 5597 5598 static void nfs4_locku_done(struct rpc_task *task, void *data) 5599 { 5600 struct nfs4_unlockdata *calldata = data; 5601 5602 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 5603 return; 5604 switch (task->tk_status) { 5605 case 0: 5606 renew_lease(calldata->server, calldata->timestamp); 5607 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl); 5608 if (nfs4_update_lock_stateid(calldata->lsp, 5609 &calldata->res.stateid)) 5610 break; 5611 case -NFS4ERR_BAD_STATEID: 5612 case -NFS4ERR_OLD_STATEID: 5613 case -NFS4ERR_STALE_STATEID: 5614 case -NFS4ERR_EXPIRED: 5615 if (!nfs4_stateid_match(&calldata->arg.stateid, 5616 &calldata->lsp->ls_stateid)) 5617 rpc_restart_call_prepare(task); 5618 break; 5619 default: 5620 if (nfs4_async_handle_error(task, calldata->server, 5621 NULL, NULL) == -EAGAIN) 5622 rpc_restart_call_prepare(task); 5623 } 5624 nfs_release_seqid(calldata->arg.seqid); 5625 } 5626 5627 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 5628 { 5629 struct nfs4_unlockdata *calldata = data; 5630 5631 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 5632 goto out_wait; 5633 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid); 5634 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 5635 /* Note: exit _without_ running nfs4_locku_done */ 5636 goto out_no_action; 5637 } 5638 calldata->timestamp = jiffies; 5639 if (nfs4_setup_sequence(calldata->server, 5640 &calldata->arg.seq_args, 5641 &calldata->res.seq_res, 5642 task) != 0) 5643 nfs_release_seqid(calldata->arg.seqid); 5644 return; 5645 out_no_action: 5646 task->tk_action = NULL; 5647 out_wait: 5648 nfs4_sequence_done(task, &calldata->res.seq_res); 5649 } 5650 5651 static const struct rpc_call_ops nfs4_locku_ops = { 5652 .rpc_call_prepare = nfs4_locku_prepare, 5653 .rpc_call_done = nfs4_locku_done, 5654 .rpc_release = nfs4_locku_release_calldata, 5655 }; 5656 5657 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 5658 struct nfs_open_context *ctx, 5659 struct nfs4_lock_state *lsp, 5660 struct nfs_seqid *seqid) 5661 { 5662 struct nfs4_unlockdata *data; 5663 struct rpc_message msg = { 5664 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 5665 .rpc_cred = ctx->cred, 5666 }; 5667 struct rpc_task_setup task_setup_data = { 5668 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 5669 .rpc_message = &msg, 5670 .callback_ops = &nfs4_locku_ops, 5671 .workqueue = nfsiod_workqueue, 5672 .flags = RPC_TASK_ASYNC, 5673 }; 5674 5675 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 5676 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 5677 5678 /* Ensure this is an unlock - when canceling a lock, the 5679 * canceled lock is passed in, and it won't be an unlock. 5680 */ 5681 fl->fl_type = F_UNLCK; 5682 5683 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 5684 if (data == NULL) { 5685 nfs_free_seqid(seqid); 5686 return ERR_PTR(-ENOMEM); 5687 } 5688 5689 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5690 msg.rpc_argp = &data->arg; 5691 msg.rpc_resp = &data->res; 5692 task_setup_data.callback_data = data; 5693 return rpc_run_task(&task_setup_data); 5694 } 5695 5696 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 5697 { 5698 struct inode *inode = state->inode; 5699 struct nfs4_state_owner *sp = state->owner; 5700 struct nfs_inode *nfsi = NFS_I(inode); 5701 struct nfs_seqid *seqid; 5702 struct nfs4_lock_state *lsp; 5703 struct rpc_task *task; 5704 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5705 int status = 0; 5706 unsigned char fl_flags = request->fl_flags; 5707 5708 status = nfs4_set_lock_state(state, request); 5709 /* Unlock _before_ we do the RPC call */ 5710 request->fl_flags |= FL_EXISTS; 5711 /* Exclude nfs_delegation_claim_locks() */ 5712 mutex_lock(&sp->so_delegreturn_mutex); 5713 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 5714 down_read(&nfsi->rwsem); 5715 if (do_vfs_lock(inode, request) == -ENOENT) { 5716 up_read(&nfsi->rwsem); 5717 mutex_unlock(&sp->so_delegreturn_mutex); 5718 goto out; 5719 } 5720 up_read(&nfsi->rwsem); 5721 mutex_unlock(&sp->so_delegreturn_mutex); 5722 if (status != 0) 5723 goto out; 5724 /* Is this a delegated lock? */ 5725 lsp = request->fl_u.nfs4_fl.owner; 5726 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 5727 goto out; 5728 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 5729 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 5730 status = -ENOMEM; 5731 if (IS_ERR(seqid)) 5732 goto out; 5733 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 5734 status = PTR_ERR(task); 5735 if (IS_ERR(task)) 5736 goto out; 5737 status = nfs4_wait_for_completion_rpc_task(task); 5738 rpc_put_task(task); 5739 out: 5740 request->fl_flags = fl_flags; 5741 trace_nfs4_unlock(request, state, F_SETLK, status); 5742 return status; 5743 } 5744 5745 struct nfs4_lockdata { 5746 struct nfs_lock_args arg; 5747 struct nfs_lock_res res; 5748 struct nfs4_lock_state *lsp; 5749 struct nfs_open_context *ctx; 5750 struct file_lock fl; 5751 unsigned long timestamp; 5752 int rpc_status; 5753 int cancelled; 5754 struct nfs_server *server; 5755 }; 5756 5757 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 5758 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 5759 gfp_t gfp_mask) 5760 { 5761 struct nfs4_lockdata *p; 5762 struct inode *inode = lsp->ls_state->inode; 5763 struct nfs_server *server = NFS_SERVER(inode); 5764 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5765 5766 p = kzalloc(sizeof(*p), gfp_mask); 5767 if (p == NULL) 5768 return NULL; 5769 5770 p->arg.fh = NFS_FH(inode); 5771 p->arg.fl = &p->fl; 5772 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 5773 if (IS_ERR(p->arg.open_seqid)) 5774 goto out_free; 5775 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 5776 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 5777 if (IS_ERR(p->arg.lock_seqid)) 5778 goto out_free_seqid; 5779 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 5780 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 5781 p->arg.lock_owner.s_dev = server->s_dev; 5782 p->res.lock_seqid = p->arg.lock_seqid; 5783 p->lsp = lsp; 5784 p->server = server; 5785 atomic_inc(&lsp->ls_count); 5786 p->ctx = get_nfs_open_context(ctx); 5787 get_file(fl->fl_file); 5788 memcpy(&p->fl, fl, sizeof(p->fl)); 5789 return p; 5790 out_free_seqid: 5791 nfs_free_seqid(p->arg.open_seqid); 5792 out_free: 5793 kfree(p); 5794 return NULL; 5795 } 5796 5797 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 5798 { 5799 struct nfs4_lockdata *data = calldata; 5800 struct nfs4_state *state = data->lsp->ls_state; 5801 5802 dprintk("%s: begin!\n", __func__); 5803 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 5804 goto out_wait; 5805 /* Do we need to do an open_to_lock_owner? */ 5806 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 5807 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 5808 goto out_release_lock_seqid; 5809 } 5810 nfs4_stateid_copy(&data->arg.open_stateid, 5811 &state->open_stateid); 5812 data->arg.new_lock_owner = 1; 5813 data->res.open_seqid = data->arg.open_seqid; 5814 } else { 5815 data->arg.new_lock_owner = 0; 5816 nfs4_stateid_copy(&data->arg.lock_stateid, 5817 &data->lsp->ls_stateid); 5818 } 5819 if (!nfs4_valid_open_stateid(state)) { 5820 data->rpc_status = -EBADF; 5821 task->tk_action = NULL; 5822 goto out_release_open_seqid; 5823 } 5824 data->timestamp = jiffies; 5825 if (nfs4_setup_sequence(data->server, 5826 &data->arg.seq_args, 5827 &data->res.seq_res, 5828 task) == 0) 5829 return; 5830 out_release_open_seqid: 5831 nfs_release_seqid(data->arg.open_seqid); 5832 out_release_lock_seqid: 5833 nfs_release_seqid(data->arg.lock_seqid); 5834 out_wait: 5835 nfs4_sequence_done(task, &data->res.seq_res); 5836 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); 5837 } 5838 5839 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 5840 { 5841 struct nfs4_lockdata *data = calldata; 5842 struct nfs4_lock_state *lsp = data->lsp; 5843 5844 dprintk("%s: begin!\n", __func__); 5845 5846 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5847 return; 5848 5849 data->rpc_status = task->tk_status; 5850 switch (task->tk_status) { 5851 case 0: 5852 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 5853 data->timestamp); 5854 if (data->arg.new_lock) { 5855 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); 5856 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) { 5857 rpc_restart_call_prepare(task); 5858 break; 5859 } 5860 } 5861 if (data->arg.new_lock_owner != 0) { 5862 nfs_confirm_seqid(&lsp->ls_seqid, 0); 5863 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 5864 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 5865 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 5866 rpc_restart_call_prepare(task); 5867 break; 5868 case -NFS4ERR_BAD_STATEID: 5869 case -NFS4ERR_OLD_STATEID: 5870 case -NFS4ERR_STALE_STATEID: 5871 case -NFS4ERR_EXPIRED: 5872 if (data->arg.new_lock_owner != 0) { 5873 if (!nfs4_stateid_match(&data->arg.open_stateid, 5874 &lsp->ls_state->open_stateid)) 5875 rpc_restart_call_prepare(task); 5876 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 5877 &lsp->ls_stateid)) 5878 rpc_restart_call_prepare(task); 5879 } 5880 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 5881 } 5882 5883 static void nfs4_lock_release(void *calldata) 5884 { 5885 struct nfs4_lockdata *data = calldata; 5886 5887 dprintk("%s: begin!\n", __func__); 5888 nfs_free_seqid(data->arg.open_seqid); 5889 if (data->cancelled != 0) { 5890 struct rpc_task *task; 5891 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 5892 data->arg.lock_seqid); 5893 if (!IS_ERR(task)) 5894 rpc_put_task_async(task); 5895 dprintk("%s: cancelling lock!\n", __func__); 5896 } else 5897 nfs_free_seqid(data->arg.lock_seqid); 5898 nfs4_put_lock_state(data->lsp); 5899 put_nfs_open_context(data->ctx); 5900 fput(data->fl.fl_file); 5901 kfree(data); 5902 dprintk("%s: done!\n", __func__); 5903 } 5904 5905 static const struct rpc_call_ops nfs4_lock_ops = { 5906 .rpc_call_prepare = nfs4_lock_prepare, 5907 .rpc_call_done = nfs4_lock_done, 5908 .rpc_release = nfs4_lock_release, 5909 }; 5910 5911 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 5912 { 5913 switch (error) { 5914 case -NFS4ERR_ADMIN_REVOKED: 5915 case -NFS4ERR_BAD_STATEID: 5916 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5917 if (new_lock_owner != 0 || 5918 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 5919 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 5920 break; 5921 case -NFS4ERR_STALE_STATEID: 5922 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5923 case -NFS4ERR_EXPIRED: 5924 nfs4_schedule_lease_recovery(server->nfs_client); 5925 }; 5926 } 5927 5928 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 5929 { 5930 struct nfs4_lockdata *data; 5931 struct rpc_task *task; 5932 struct rpc_message msg = { 5933 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 5934 .rpc_cred = state->owner->so_cred, 5935 }; 5936 struct rpc_task_setup task_setup_data = { 5937 .rpc_client = NFS_CLIENT(state->inode), 5938 .rpc_message = &msg, 5939 .callback_ops = &nfs4_lock_ops, 5940 .workqueue = nfsiod_workqueue, 5941 .flags = RPC_TASK_ASYNC, 5942 }; 5943 int ret; 5944 5945 dprintk("%s: begin!\n", __func__); 5946 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 5947 fl->fl_u.nfs4_fl.owner, 5948 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); 5949 if (data == NULL) 5950 return -ENOMEM; 5951 if (IS_SETLKW(cmd)) 5952 data->arg.block = 1; 5953 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5954 msg.rpc_argp = &data->arg; 5955 msg.rpc_resp = &data->res; 5956 task_setup_data.callback_data = data; 5957 if (recovery_type > NFS_LOCK_NEW) { 5958 if (recovery_type == NFS_LOCK_RECLAIM) 5959 data->arg.reclaim = NFS_LOCK_RECLAIM; 5960 nfs4_set_sequence_privileged(&data->arg.seq_args); 5961 } else 5962 data->arg.new_lock = 1; 5963 task = rpc_run_task(&task_setup_data); 5964 if (IS_ERR(task)) 5965 return PTR_ERR(task); 5966 ret = nfs4_wait_for_completion_rpc_task(task); 5967 if (ret == 0) { 5968 ret = data->rpc_status; 5969 if (ret) 5970 nfs4_handle_setlk_error(data->server, data->lsp, 5971 data->arg.new_lock_owner, ret); 5972 } else 5973 data->cancelled = 1; 5974 rpc_put_task(task); 5975 dprintk("%s: done, ret = %d!\n", __func__, ret); 5976 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); 5977 return ret; 5978 } 5979 5980 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 5981 { 5982 struct nfs_server *server = NFS_SERVER(state->inode); 5983 struct nfs4_exception exception = { 5984 .inode = state->inode, 5985 }; 5986 int err; 5987 5988 do { 5989 /* Cache the lock if possible... */ 5990 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 5991 return 0; 5992 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 5993 if (err != -NFS4ERR_DELAY) 5994 break; 5995 nfs4_handle_exception(server, err, &exception); 5996 } while (exception.retry); 5997 return err; 5998 } 5999 6000 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 6001 { 6002 struct nfs_server *server = NFS_SERVER(state->inode); 6003 struct nfs4_exception exception = { 6004 .inode = state->inode, 6005 }; 6006 int err; 6007 6008 err = nfs4_set_lock_state(state, request); 6009 if (err != 0) 6010 return err; 6011 if (!recover_lost_locks) { 6012 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 6013 return 0; 6014 } 6015 do { 6016 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 6017 return 0; 6018 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 6019 switch (err) { 6020 default: 6021 goto out; 6022 case -NFS4ERR_GRACE: 6023 case -NFS4ERR_DELAY: 6024 nfs4_handle_exception(server, err, &exception); 6025 err = 0; 6026 } 6027 } while (exception.retry); 6028 out: 6029 return err; 6030 } 6031 6032 #if defined(CONFIG_NFS_V4_1) 6033 /** 6034 * nfs41_check_expired_locks - possibly free a lock stateid 6035 * 6036 * @state: NFSv4 state for an inode 6037 * 6038 * Returns NFS_OK if recovery for this stateid is now finished. 6039 * Otherwise a negative NFS4ERR value is returned. 6040 */ 6041 static int nfs41_check_expired_locks(struct nfs4_state *state) 6042 { 6043 int status, ret = -NFS4ERR_BAD_STATEID; 6044 struct nfs4_lock_state *lsp; 6045 struct nfs_server *server = NFS_SERVER(state->inode); 6046 6047 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 6048 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 6049 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 6050 6051 status = nfs41_test_stateid(server, 6052 &lsp->ls_stateid, 6053 cred); 6054 trace_nfs4_test_lock_stateid(state, lsp, status); 6055 if (status != NFS_OK) { 6056 /* Free the stateid unless the server 6057 * informs us the stateid is unrecognized. */ 6058 if (status != -NFS4ERR_BAD_STATEID) 6059 nfs41_free_stateid(server, 6060 &lsp->ls_stateid, 6061 cred); 6062 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 6063 ret = status; 6064 } 6065 } 6066 }; 6067 6068 return ret; 6069 } 6070 6071 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 6072 { 6073 int status = NFS_OK; 6074 6075 if (test_bit(LK_STATE_IN_USE, &state->flags)) 6076 status = nfs41_check_expired_locks(state); 6077 if (status != NFS_OK) 6078 status = nfs4_lock_expired(state, request); 6079 return status; 6080 } 6081 #endif 6082 6083 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6084 { 6085 struct nfs_inode *nfsi = NFS_I(state->inode); 6086 struct nfs4_state_owner *sp = state->owner; 6087 unsigned char fl_flags = request->fl_flags; 6088 int status = -ENOLCK; 6089 6090 if ((fl_flags & FL_POSIX) && 6091 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 6092 goto out; 6093 /* Is this a delegated open? */ 6094 status = nfs4_set_lock_state(state, request); 6095 if (status != 0) 6096 goto out; 6097 request->fl_flags |= FL_ACCESS; 6098 status = do_vfs_lock(state->inode, request); 6099 if (status < 0) 6100 goto out; 6101 mutex_lock(&sp->so_delegreturn_mutex); 6102 down_read(&nfsi->rwsem); 6103 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 6104 /* Yes: cache locks! */ 6105 /* ...but avoid races with delegation recall... */ 6106 request->fl_flags = fl_flags & ~FL_SLEEP; 6107 status = do_vfs_lock(state->inode, request); 6108 up_read(&nfsi->rwsem); 6109 mutex_unlock(&sp->so_delegreturn_mutex); 6110 goto out; 6111 } 6112 up_read(&nfsi->rwsem); 6113 mutex_unlock(&sp->so_delegreturn_mutex); 6114 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 6115 out: 6116 request->fl_flags = fl_flags; 6117 return status; 6118 } 6119 6120 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6121 { 6122 struct nfs4_exception exception = { 6123 .state = state, 6124 .inode = state->inode, 6125 }; 6126 int err; 6127 6128 do { 6129 err = _nfs4_proc_setlk(state, cmd, request); 6130 if (err == -NFS4ERR_DENIED) 6131 err = -EAGAIN; 6132 err = nfs4_handle_exception(NFS_SERVER(state->inode), 6133 err, &exception); 6134 } while (exception.retry); 6135 return err; 6136 } 6137 6138 static int 6139 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 6140 { 6141 struct nfs_open_context *ctx; 6142 struct nfs4_state *state; 6143 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 6144 int status; 6145 6146 /* verify open state */ 6147 ctx = nfs_file_open_context(filp); 6148 state = ctx->state; 6149 6150 if (request->fl_start < 0 || request->fl_end < 0) 6151 return -EINVAL; 6152 6153 if (IS_GETLK(cmd)) { 6154 if (state != NULL) 6155 return nfs4_proc_getlk(state, F_GETLK, request); 6156 return 0; 6157 } 6158 6159 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 6160 return -EINVAL; 6161 6162 if (request->fl_type == F_UNLCK) { 6163 if (state != NULL) 6164 return nfs4_proc_unlck(state, cmd, request); 6165 return 0; 6166 } 6167 6168 if (state == NULL) 6169 return -ENOLCK; 6170 /* 6171 * Don't rely on the VFS having checked the file open mode, 6172 * since it won't do this for flock() locks. 6173 */ 6174 switch (request->fl_type) { 6175 case F_RDLCK: 6176 if (!(filp->f_mode & FMODE_READ)) 6177 return -EBADF; 6178 break; 6179 case F_WRLCK: 6180 if (!(filp->f_mode & FMODE_WRITE)) 6181 return -EBADF; 6182 } 6183 6184 do { 6185 status = nfs4_proc_setlk(state, cmd, request); 6186 if ((status != -EAGAIN) || IS_SETLK(cmd)) 6187 break; 6188 timeout = nfs4_set_lock_task_retry(timeout); 6189 status = -ERESTARTSYS; 6190 if (signalled()) 6191 break; 6192 } while(status < 0); 6193 return status; 6194 } 6195 6196 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 6197 { 6198 struct nfs_server *server = NFS_SERVER(state->inode); 6199 int err; 6200 6201 err = nfs4_set_lock_state(state, fl); 6202 if (err != 0) 6203 return err; 6204 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 6205 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 6206 } 6207 6208 struct nfs_release_lockowner_data { 6209 struct nfs4_lock_state *lsp; 6210 struct nfs_server *server; 6211 struct nfs_release_lockowner_args args; 6212 struct nfs_release_lockowner_res res; 6213 unsigned long timestamp; 6214 }; 6215 6216 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 6217 { 6218 struct nfs_release_lockowner_data *data = calldata; 6219 struct nfs_server *server = data->server; 6220 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 6221 &data->args.seq_args, &data->res.seq_res, task); 6222 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6223 data->timestamp = jiffies; 6224 } 6225 6226 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 6227 { 6228 struct nfs_release_lockowner_data *data = calldata; 6229 struct nfs_server *server = data->server; 6230 6231 nfs40_sequence_done(task, &data->res.seq_res); 6232 6233 switch (task->tk_status) { 6234 case 0: 6235 renew_lease(server, data->timestamp); 6236 break; 6237 case -NFS4ERR_STALE_CLIENTID: 6238 case -NFS4ERR_EXPIRED: 6239 nfs4_schedule_lease_recovery(server->nfs_client); 6240 break; 6241 case -NFS4ERR_LEASE_MOVED: 6242 case -NFS4ERR_DELAY: 6243 if (nfs4_async_handle_error(task, server, 6244 NULL, NULL) == -EAGAIN) 6245 rpc_restart_call_prepare(task); 6246 } 6247 } 6248 6249 static void nfs4_release_lockowner_release(void *calldata) 6250 { 6251 struct nfs_release_lockowner_data *data = calldata; 6252 nfs4_free_lock_state(data->server, data->lsp); 6253 kfree(calldata); 6254 } 6255 6256 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 6257 .rpc_call_prepare = nfs4_release_lockowner_prepare, 6258 .rpc_call_done = nfs4_release_lockowner_done, 6259 .rpc_release = nfs4_release_lockowner_release, 6260 }; 6261 6262 static void 6263 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 6264 { 6265 struct nfs_release_lockowner_data *data; 6266 struct rpc_message msg = { 6267 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 6268 }; 6269 6270 if (server->nfs_client->cl_mvops->minor_version != 0) 6271 return; 6272 6273 data = kmalloc(sizeof(*data), GFP_NOFS); 6274 if (!data) 6275 return; 6276 data->lsp = lsp; 6277 data->server = server; 6278 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6279 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 6280 data->args.lock_owner.s_dev = server->s_dev; 6281 6282 msg.rpc_argp = &data->args; 6283 msg.rpc_resp = &data->res; 6284 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 6285 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 6286 } 6287 6288 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 6289 6290 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 6291 struct dentry *unused, struct inode *inode, 6292 const char *key, const void *buf, 6293 size_t buflen, int flags) 6294 { 6295 return nfs4_proc_set_acl(inode, buf, buflen); 6296 } 6297 6298 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 6299 struct dentry *unused, struct inode *inode, 6300 const char *key, void *buf, size_t buflen) 6301 { 6302 return nfs4_proc_get_acl(inode, buf, buflen); 6303 } 6304 6305 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) 6306 { 6307 return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry))); 6308 } 6309 6310 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6311 6312 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 6313 struct dentry *unused, struct inode *inode, 6314 const char *key, const void *buf, 6315 size_t buflen, int flags) 6316 { 6317 if (security_ismaclabel(key)) 6318 return nfs4_set_security_label(inode, buf, buflen); 6319 6320 return -EOPNOTSUPP; 6321 } 6322 6323 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, 6324 struct dentry *unused, struct inode *inode, 6325 const char *key, void *buf, size_t buflen) 6326 { 6327 if (security_ismaclabel(key)) 6328 return nfs4_get_security_label(inode, buf, buflen); 6329 return -EOPNOTSUPP; 6330 } 6331 6332 static ssize_t 6333 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 6334 { 6335 int len = 0; 6336 6337 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) { 6338 len = security_inode_listsecurity(inode, list, list_len); 6339 if (list_len && len > list_len) 6340 return -ERANGE; 6341 } 6342 return len; 6343 } 6344 6345 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 6346 .prefix = XATTR_SECURITY_PREFIX, 6347 .get = nfs4_xattr_get_nfs4_label, 6348 .set = nfs4_xattr_set_nfs4_label, 6349 }; 6350 6351 #else 6352 6353 static ssize_t 6354 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 6355 { 6356 return 0; 6357 } 6358 6359 #endif 6360 6361 /* 6362 * nfs_fhget will use either the mounted_on_fileid or the fileid 6363 */ 6364 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 6365 { 6366 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 6367 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 6368 (fattr->valid & NFS_ATTR_FATTR_FSID) && 6369 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 6370 return; 6371 6372 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 6373 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 6374 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 6375 fattr->nlink = 2; 6376 } 6377 6378 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6379 const struct qstr *name, 6380 struct nfs4_fs_locations *fs_locations, 6381 struct page *page) 6382 { 6383 struct nfs_server *server = NFS_SERVER(dir); 6384 u32 bitmask[3] = { 6385 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6386 }; 6387 struct nfs4_fs_locations_arg args = { 6388 .dir_fh = NFS_FH(dir), 6389 .name = name, 6390 .page = page, 6391 .bitmask = bitmask, 6392 }; 6393 struct nfs4_fs_locations_res res = { 6394 .fs_locations = fs_locations, 6395 }; 6396 struct rpc_message msg = { 6397 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6398 .rpc_argp = &args, 6399 .rpc_resp = &res, 6400 }; 6401 int status; 6402 6403 dprintk("%s: start\n", __func__); 6404 6405 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 6406 * is not supported */ 6407 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 6408 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; 6409 else 6410 bitmask[0] |= FATTR4_WORD0_FILEID; 6411 6412 nfs_fattr_init(&fs_locations->fattr); 6413 fs_locations->server = server; 6414 fs_locations->nlocations = 0; 6415 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 6416 dprintk("%s: returned status = %d\n", __func__, status); 6417 return status; 6418 } 6419 6420 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6421 const struct qstr *name, 6422 struct nfs4_fs_locations *fs_locations, 6423 struct page *page) 6424 { 6425 struct nfs4_exception exception = { }; 6426 int err; 6427 do { 6428 err = _nfs4_proc_fs_locations(client, dir, name, 6429 fs_locations, page); 6430 trace_nfs4_get_fs_locations(dir, name, err); 6431 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6432 &exception); 6433 } while (exception.retry); 6434 return err; 6435 } 6436 6437 /* 6438 * This operation also signals the server that this client is 6439 * performing migration recovery. The server can stop returning 6440 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 6441 * appended to this compound to identify the client ID which is 6442 * performing recovery. 6443 */ 6444 static int _nfs40_proc_get_locations(struct inode *inode, 6445 struct nfs4_fs_locations *locations, 6446 struct page *page, struct rpc_cred *cred) 6447 { 6448 struct nfs_server *server = NFS_SERVER(inode); 6449 struct rpc_clnt *clnt = server->client; 6450 u32 bitmask[2] = { 6451 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6452 }; 6453 struct nfs4_fs_locations_arg args = { 6454 .clientid = server->nfs_client->cl_clientid, 6455 .fh = NFS_FH(inode), 6456 .page = page, 6457 .bitmask = bitmask, 6458 .migration = 1, /* skip LOOKUP */ 6459 .renew = 1, /* append RENEW */ 6460 }; 6461 struct nfs4_fs_locations_res res = { 6462 .fs_locations = locations, 6463 .migration = 1, 6464 .renew = 1, 6465 }; 6466 struct rpc_message msg = { 6467 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6468 .rpc_argp = &args, 6469 .rpc_resp = &res, 6470 .rpc_cred = cred, 6471 }; 6472 unsigned long now = jiffies; 6473 int status; 6474 6475 nfs_fattr_init(&locations->fattr); 6476 locations->server = server; 6477 locations->nlocations = 0; 6478 6479 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6480 nfs4_set_sequence_privileged(&args.seq_args); 6481 status = nfs4_call_sync_sequence(clnt, server, &msg, 6482 &args.seq_args, &res.seq_res); 6483 if (status) 6484 return status; 6485 6486 renew_lease(server, now); 6487 return 0; 6488 } 6489 6490 #ifdef CONFIG_NFS_V4_1 6491 6492 /* 6493 * This operation also signals the server that this client is 6494 * performing migration recovery. The server can stop asserting 6495 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 6496 * performing this operation is identified in the SEQUENCE 6497 * operation in this compound. 6498 * 6499 * When the client supports GETATTR(fs_locations_info), it can 6500 * be plumbed in here. 6501 */ 6502 static int _nfs41_proc_get_locations(struct inode *inode, 6503 struct nfs4_fs_locations *locations, 6504 struct page *page, struct rpc_cred *cred) 6505 { 6506 struct nfs_server *server = NFS_SERVER(inode); 6507 struct rpc_clnt *clnt = server->client; 6508 u32 bitmask[2] = { 6509 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6510 }; 6511 struct nfs4_fs_locations_arg args = { 6512 .fh = NFS_FH(inode), 6513 .page = page, 6514 .bitmask = bitmask, 6515 .migration = 1, /* skip LOOKUP */ 6516 }; 6517 struct nfs4_fs_locations_res res = { 6518 .fs_locations = locations, 6519 .migration = 1, 6520 }; 6521 struct rpc_message msg = { 6522 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6523 .rpc_argp = &args, 6524 .rpc_resp = &res, 6525 .rpc_cred = cred, 6526 }; 6527 int status; 6528 6529 nfs_fattr_init(&locations->fattr); 6530 locations->server = server; 6531 locations->nlocations = 0; 6532 6533 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6534 nfs4_set_sequence_privileged(&args.seq_args); 6535 status = nfs4_call_sync_sequence(clnt, server, &msg, 6536 &args.seq_args, &res.seq_res); 6537 if (status == NFS4_OK && 6538 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6539 status = -NFS4ERR_LEASE_MOVED; 6540 return status; 6541 } 6542 6543 #endif /* CONFIG_NFS_V4_1 */ 6544 6545 /** 6546 * nfs4_proc_get_locations - discover locations for a migrated FSID 6547 * @inode: inode on FSID that is migrating 6548 * @locations: result of query 6549 * @page: buffer 6550 * @cred: credential to use for this operation 6551 * 6552 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 6553 * operation failed, or a negative errno if a local error occurred. 6554 * 6555 * On success, "locations" is filled in, but if the server has 6556 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 6557 * asserted. 6558 * 6559 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 6560 * from this client that require migration recovery. 6561 */ 6562 int nfs4_proc_get_locations(struct inode *inode, 6563 struct nfs4_fs_locations *locations, 6564 struct page *page, struct rpc_cred *cred) 6565 { 6566 struct nfs_server *server = NFS_SERVER(inode); 6567 struct nfs_client *clp = server->nfs_client; 6568 const struct nfs4_mig_recovery_ops *ops = 6569 clp->cl_mvops->mig_recovery_ops; 6570 struct nfs4_exception exception = { }; 6571 int status; 6572 6573 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6574 (unsigned long long)server->fsid.major, 6575 (unsigned long long)server->fsid.minor, 6576 clp->cl_hostname); 6577 nfs_display_fhandle(NFS_FH(inode), __func__); 6578 6579 do { 6580 status = ops->get_locations(inode, locations, page, cred); 6581 if (status != -NFS4ERR_DELAY) 6582 break; 6583 nfs4_handle_exception(server, status, &exception); 6584 } while (exception.retry); 6585 return status; 6586 } 6587 6588 /* 6589 * This operation also signals the server that this client is 6590 * performing "lease moved" recovery. The server can stop 6591 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 6592 * is appended to this compound to identify the client ID which is 6593 * performing recovery. 6594 */ 6595 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6596 { 6597 struct nfs_server *server = NFS_SERVER(inode); 6598 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 6599 struct rpc_clnt *clnt = server->client; 6600 struct nfs4_fsid_present_arg args = { 6601 .fh = NFS_FH(inode), 6602 .clientid = clp->cl_clientid, 6603 .renew = 1, /* append RENEW */ 6604 }; 6605 struct nfs4_fsid_present_res res = { 6606 .renew = 1, 6607 }; 6608 struct rpc_message msg = { 6609 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6610 .rpc_argp = &args, 6611 .rpc_resp = &res, 6612 .rpc_cred = cred, 6613 }; 6614 unsigned long now = jiffies; 6615 int status; 6616 6617 res.fh = nfs_alloc_fhandle(); 6618 if (res.fh == NULL) 6619 return -ENOMEM; 6620 6621 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6622 nfs4_set_sequence_privileged(&args.seq_args); 6623 status = nfs4_call_sync_sequence(clnt, server, &msg, 6624 &args.seq_args, &res.seq_res); 6625 nfs_free_fhandle(res.fh); 6626 if (status) 6627 return status; 6628 6629 do_renew_lease(clp, now); 6630 return 0; 6631 } 6632 6633 #ifdef CONFIG_NFS_V4_1 6634 6635 /* 6636 * This operation also signals the server that this client is 6637 * performing "lease moved" recovery. The server can stop asserting 6638 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 6639 * this operation is identified in the SEQUENCE operation in this 6640 * compound. 6641 */ 6642 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6643 { 6644 struct nfs_server *server = NFS_SERVER(inode); 6645 struct rpc_clnt *clnt = server->client; 6646 struct nfs4_fsid_present_arg args = { 6647 .fh = NFS_FH(inode), 6648 }; 6649 struct nfs4_fsid_present_res res = { 6650 }; 6651 struct rpc_message msg = { 6652 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6653 .rpc_argp = &args, 6654 .rpc_resp = &res, 6655 .rpc_cred = cred, 6656 }; 6657 int status; 6658 6659 res.fh = nfs_alloc_fhandle(); 6660 if (res.fh == NULL) 6661 return -ENOMEM; 6662 6663 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6664 nfs4_set_sequence_privileged(&args.seq_args); 6665 status = nfs4_call_sync_sequence(clnt, server, &msg, 6666 &args.seq_args, &res.seq_res); 6667 nfs_free_fhandle(res.fh); 6668 if (status == NFS4_OK && 6669 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6670 status = -NFS4ERR_LEASE_MOVED; 6671 return status; 6672 } 6673 6674 #endif /* CONFIG_NFS_V4_1 */ 6675 6676 /** 6677 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 6678 * @inode: inode on FSID to check 6679 * @cred: credential to use for this operation 6680 * 6681 * Server indicates whether the FSID is present, moved, or not 6682 * recognized. This operation is necessary to clear a LEASE_MOVED 6683 * condition for this client ID. 6684 * 6685 * Returns NFS4_OK if the FSID is present on this server, 6686 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 6687 * NFS4ERR code if some error occurred on the server, or a 6688 * negative errno if a local failure occurred. 6689 */ 6690 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6691 { 6692 struct nfs_server *server = NFS_SERVER(inode); 6693 struct nfs_client *clp = server->nfs_client; 6694 const struct nfs4_mig_recovery_ops *ops = 6695 clp->cl_mvops->mig_recovery_ops; 6696 struct nfs4_exception exception = { }; 6697 int status; 6698 6699 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6700 (unsigned long long)server->fsid.major, 6701 (unsigned long long)server->fsid.minor, 6702 clp->cl_hostname); 6703 nfs_display_fhandle(NFS_FH(inode), __func__); 6704 6705 do { 6706 status = ops->fsid_present(inode, cred); 6707 if (status != -NFS4ERR_DELAY) 6708 break; 6709 nfs4_handle_exception(server, status, &exception); 6710 } while (exception.retry); 6711 return status; 6712 } 6713 6714 /** 6715 * If 'use_integrity' is true and the state managment nfs_client 6716 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 6717 * and the machine credential as per RFC3530bis and RFC5661 Security 6718 * Considerations sections. Otherwise, just use the user cred with the 6719 * filesystem's rpc_client. 6720 */ 6721 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 6722 { 6723 int status; 6724 struct nfs4_secinfo_arg args = { 6725 .dir_fh = NFS_FH(dir), 6726 .name = name, 6727 }; 6728 struct nfs4_secinfo_res res = { 6729 .flavors = flavors, 6730 }; 6731 struct rpc_message msg = { 6732 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 6733 .rpc_argp = &args, 6734 .rpc_resp = &res, 6735 }; 6736 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 6737 struct rpc_cred *cred = NULL; 6738 6739 if (use_integrity) { 6740 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient; 6741 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client); 6742 msg.rpc_cred = cred; 6743 } 6744 6745 dprintk("NFS call secinfo %s\n", name->name); 6746 6747 nfs4_state_protect(NFS_SERVER(dir)->nfs_client, 6748 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 6749 6750 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args, 6751 &res.seq_res, 0); 6752 dprintk("NFS reply secinfo: %d\n", status); 6753 6754 if (cred) 6755 put_rpccred(cred); 6756 6757 return status; 6758 } 6759 6760 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 6761 struct nfs4_secinfo_flavors *flavors) 6762 { 6763 struct nfs4_exception exception = { }; 6764 int err; 6765 do { 6766 err = -NFS4ERR_WRONGSEC; 6767 6768 /* try to use integrity protection with machine cred */ 6769 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 6770 err = _nfs4_proc_secinfo(dir, name, flavors, true); 6771 6772 /* 6773 * if unable to use integrity protection, or SECINFO with 6774 * integrity protection returns NFS4ERR_WRONGSEC (which is 6775 * disallowed by spec, but exists in deployed servers) use 6776 * the current filesystem's rpc_client and the user cred. 6777 */ 6778 if (err == -NFS4ERR_WRONGSEC) 6779 err = _nfs4_proc_secinfo(dir, name, flavors, false); 6780 6781 trace_nfs4_secinfo(dir, name, err); 6782 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6783 &exception); 6784 } while (exception.retry); 6785 return err; 6786 } 6787 6788 #ifdef CONFIG_NFS_V4_1 6789 /* 6790 * Check the exchange flags returned by the server for invalid flags, having 6791 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 6792 * DS flags set. 6793 */ 6794 static int nfs4_check_cl_exchange_flags(u32 flags) 6795 { 6796 if (flags & ~EXCHGID4_FLAG_MASK_R) 6797 goto out_inval; 6798 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 6799 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 6800 goto out_inval; 6801 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 6802 goto out_inval; 6803 return NFS_OK; 6804 out_inval: 6805 return -NFS4ERR_INVAL; 6806 } 6807 6808 static bool 6809 nfs41_same_server_scope(struct nfs41_server_scope *a, 6810 struct nfs41_server_scope *b) 6811 { 6812 if (a->server_scope_sz == b->server_scope_sz && 6813 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) 6814 return true; 6815 6816 return false; 6817 } 6818 6819 static void 6820 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) 6821 { 6822 } 6823 6824 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { 6825 .rpc_call_done = &nfs4_bind_one_conn_to_session_done, 6826 }; 6827 6828 /* 6829 * nfs4_proc_bind_one_conn_to_session() 6830 * 6831 * The 4.1 client currently uses the same TCP connection for the 6832 * fore and backchannel. 6833 */ 6834 static 6835 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, 6836 struct rpc_xprt *xprt, 6837 struct nfs_client *clp, 6838 struct rpc_cred *cred) 6839 { 6840 int status; 6841 struct nfs41_bind_conn_to_session_args args = { 6842 .client = clp, 6843 .dir = NFS4_CDFC4_FORE_OR_BOTH, 6844 }; 6845 struct nfs41_bind_conn_to_session_res res; 6846 struct rpc_message msg = { 6847 .rpc_proc = 6848 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 6849 .rpc_argp = &args, 6850 .rpc_resp = &res, 6851 .rpc_cred = cred, 6852 }; 6853 struct rpc_task_setup task_setup_data = { 6854 .rpc_client = clnt, 6855 .rpc_xprt = xprt, 6856 .callback_ops = &nfs4_bind_one_conn_to_session_ops, 6857 .rpc_message = &msg, 6858 .flags = RPC_TASK_TIMEOUT, 6859 }; 6860 struct rpc_task *task; 6861 6862 dprintk("--> %s\n", __func__); 6863 6864 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 6865 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 6866 args.dir = NFS4_CDFC4_FORE; 6867 6868 /* Do not set the backchannel flag unless this is clnt->cl_xprt */ 6869 if (xprt != rcu_access_pointer(clnt->cl_xprt)) 6870 args.dir = NFS4_CDFC4_FORE; 6871 6872 task = rpc_run_task(&task_setup_data); 6873 if (!IS_ERR(task)) { 6874 status = task->tk_status; 6875 rpc_put_task(task); 6876 } else 6877 status = PTR_ERR(task); 6878 trace_nfs4_bind_conn_to_session(clp, status); 6879 if (status == 0) { 6880 if (memcmp(res.sessionid.data, 6881 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 6882 dprintk("NFS: %s: Session ID mismatch\n", __func__); 6883 status = -EIO; 6884 goto out; 6885 } 6886 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 6887 dprintk("NFS: %s: Unexpected direction from server\n", 6888 __func__); 6889 status = -EIO; 6890 goto out; 6891 } 6892 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 6893 dprintk("NFS: %s: Server returned RDMA mode = true\n", 6894 __func__); 6895 status = -EIO; 6896 goto out; 6897 } 6898 } 6899 out: 6900 dprintk("<-- %s status= %d\n", __func__, status); 6901 return status; 6902 } 6903 6904 struct rpc_bind_conn_calldata { 6905 struct nfs_client *clp; 6906 struct rpc_cred *cred; 6907 }; 6908 6909 static int 6910 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt, 6911 struct rpc_xprt *xprt, 6912 void *calldata) 6913 { 6914 struct rpc_bind_conn_calldata *p = calldata; 6915 6916 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred); 6917 } 6918 6919 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) 6920 { 6921 struct rpc_bind_conn_calldata data = { 6922 .clp = clp, 6923 .cred = cred, 6924 }; 6925 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient, 6926 nfs4_proc_bind_conn_to_session_callback, &data); 6927 } 6928 6929 /* 6930 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 6931 * and operations we'd like to see to enable certain features in the allow map 6932 */ 6933 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 6934 .how = SP4_MACH_CRED, 6935 .enforce.u.words = { 6936 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 6937 1 << (OP_EXCHANGE_ID - 32) | 6938 1 << (OP_CREATE_SESSION - 32) | 6939 1 << (OP_DESTROY_SESSION - 32) | 6940 1 << (OP_DESTROY_CLIENTID - 32) 6941 }, 6942 .allow.u.words = { 6943 [0] = 1 << (OP_CLOSE) | 6944 1 << (OP_OPEN_DOWNGRADE) | 6945 1 << (OP_LOCKU) | 6946 1 << (OP_DELEGRETURN) | 6947 1 << (OP_COMMIT), 6948 [1] = 1 << (OP_SECINFO - 32) | 6949 1 << (OP_SECINFO_NO_NAME - 32) | 6950 1 << (OP_LAYOUTRETURN - 32) | 6951 1 << (OP_TEST_STATEID - 32) | 6952 1 << (OP_FREE_STATEID - 32) | 6953 1 << (OP_WRITE - 32) 6954 } 6955 }; 6956 6957 /* 6958 * Select the state protection mode for client `clp' given the server results 6959 * from exchange_id in `sp'. 6960 * 6961 * Returns 0 on success, negative errno otherwise. 6962 */ 6963 static int nfs4_sp4_select_mode(struct nfs_client *clp, 6964 struct nfs41_state_protection *sp) 6965 { 6966 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 6967 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 6968 1 << (OP_EXCHANGE_ID - 32) | 6969 1 << (OP_CREATE_SESSION - 32) | 6970 1 << (OP_DESTROY_SESSION - 32) | 6971 1 << (OP_DESTROY_CLIENTID - 32) 6972 }; 6973 unsigned int i; 6974 6975 if (sp->how == SP4_MACH_CRED) { 6976 /* Print state protect result */ 6977 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 6978 for (i = 0; i <= LAST_NFS4_OP; i++) { 6979 if (test_bit(i, sp->enforce.u.longs)) 6980 dfprintk(MOUNT, " enforce op %d\n", i); 6981 if (test_bit(i, sp->allow.u.longs)) 6982 dfprintk(MOUNT, " allow op %d\n", i); 6983 } 6984 6985 /* make sure nothing is on enforce list that isn't supported */ 6986 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 6987 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 6988 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 6989 return -EINVAL; 6990 } 6991 } 6992 6993 /* 6994 * Minimal mode - state operations are allowed to use machine 6995 * credential. Note this already happens by default, so the 6996 * client doesn't have to do anything more than the negotiation. 6997 * 6998 * NOTE: we don't care if EXCHANGE_ID is in the list - 6999 * we're already using the machine cred for exchange_id 7000 * and will never use a different cred. 7001 */ 7002 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 7003 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 7004 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 7005 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 7006 dfprintk(MOUNT, "sp4_mach_cred:\n"); 7007 dfprintk(MOUNT, " minimal mode enabled\n"); 7008 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags); 7009 } else { 7010 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 7011 return -EINVAL; 7012 } 7013 7014 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 7015 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) && 7016 test_bit(OP_DELEGRETURN, sp->allow.u.longs) && 7017 test_bit(OP_LOCKU, sp->allow.u.longs)) { 7018 dfprintk(MOUNT, " cleanup mode enabled\n"); 7019 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags); 7020 } 7021 7022 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) { 7023 dfprintk(MOUNT, " pnfs cleanup mode enabled\n"); 7024 set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, 7025 &clp->cl_sp4_flags); 7026 } 7027 7028 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 7029 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 7030 dfprintk(MOUNT, " secinfo mode enabled\n"); 7031 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags); 7032 } 7033 7034 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 7035 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 7036 dfprintk(MOUNT, " stateid mode enabled\n"); 7037 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags); 7038 } 7039 7040 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 7041 dfprintk(MOUNT, " write mode enabled\n"); 7042 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags); 7043 } 7044 7045 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 7046 dfprintk(MOUNT, " commit mode enabled\n"); 7047 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags); 7048 } 7049 } 7050 7051 return 0; 7052 } 7053 7054 /* 7055 * _nfs4_proc_exchange_id() 7056 * 7057 * Wrapper for EXCHANGE_ID operation. 7058 */ 7059 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, 7060 u32 sp4_how) 7061 { 7062 nfs4_verifier verifier; 7063 struct nfs41_exchange_id_args args = { 7064 .verifier = &verifier, 7065 .client = clp, 7066 #ifdef CONFIG_NFS_V4_1_MIGRATION 7067 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 7068 EXCHGID4_FLAG_BIND_PRINC_STATEID | 7069 EXCHGID4_FLAG_SUPP_MOVED_MIGR, 7070 #else 7071 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 7072 EXCHGID4_FLAG_BIND_PRINC_STATEID, 7073 #endif 7074 }; 7075 struct nfs41_exchange_id_res res = { 7076 0 7077 }; 7078 int status; 7079 struct rpc_message msg = { 7080 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 7081 .rpc_argp = &args, 7082 .rpc_resp = &res, 7083 .rpc_cred = cred, 7084 }; 7085 7086 nfs4_init_boot_verifier(clp, &verifier); 7087 7088 status = nfs4_init_uniform_client_string(clp); 7089 if (status) 7090 goto out; 7091 7092 dprintk("NFS call exchange_id auth=%s, '%s'\n", 7093 clp->cl_rpcclient->cl_auth->au_ops->au_name, 7094 clp->cl_owner_id); 7095 7096 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 7097 GFP_NOFS); 7098 if (unlikely(res.server_owner == NULL)) { 7099 status = -ENOMEM; 7100 goto out; 7101 } 7102 7103 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 7104 GFP_NOFS); 7105 if (unlikely(res.server_scope == NULL)) { 7106 status = -ENOMEM; 7107 goto out_server_owner; 7108 } 7109 7110 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 7111 if (unlikely(res.impl_id == NULL)) { 7112 status = -ENOMEM; 7113 goto out_server_scope; 7114 } 7115 7116 switch (sp4_how) { 7117 case SP4_NONE: 7118 args.state_protect.how = SP4_NONE; 7119 break; 7120 7121 case SP4_MACH_CRED: 7122 args.state_protect = nfs4_sp4_mach_cred_request; 7123 break; 7124 7125 default: 7126 /* unsupported! */ 7127 WARN_ON_ONCE(1); 7128 status = -EINVAL; 7129 goto out_impl_id; 7130 } 7131 7132 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7133 trace_nfs4_exchange_id(clp, status); 7134 if (status == 0) 7135 status = nfs4_check_cl_exchange_flags(res.flags); 7136 7137 if (status == 0) 7138 status = nfs4_sp4_select_mode(clp, &res.state_protect); 7139 7140 if (status == 0) { 7141 clp->cl_clientid = res.clientid; 7142 clp->cl_exchange_flags = res.flags; 7143 /* Client ID is not confirmed */ 7144 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) { 7145 clear_bit(NFS4_SESSION_ESTABLISHED, 7146 &clp->cl_session->session_state); 7147 clp->cl_seqid = res.seqid; 7148 } 7149 7150 kfree(clp->cl_serverowner); 7151 clp->cl_serverowner = res.server_owner; 7152 res.server_owner = NULL; 7153 7154 /* use the most recent implementation id */ 7155 kfree(clp->cl_implid); 7156 clp->cl_implid = res.impl_id; 7157 res.impl_id = NULL; 7158 7159 if (clp->cl_serverscope != NULL && 7160 !nfs41_same_server_scope(clp->cl_serverscope, 7161 res.server_scope)) { 7162 dprintk("%s: server_scope mismatch detected\n", 7163 __func__); 7164 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 7165 kfree(clp->cl_serverscope); 7166 clp->cl_serverscope = NULL; 7167 } 7168 7169 if (clp->cl_serverscope == NULL) { 7170 clp->cl_serverscope = res.server_scope; 7171 res.server_scope = NULL; 7172 } 7173 } 7174 7175 out_impl_id: 7176 kfree(res.impl_id); 7177 out_server_scope: 7178 kfree(res.server_scope); 7179 out_server_owner: 7180 kfree(res.server_owner); 7181 out: 7182 if (clp->cl_implid != NULL) 7183 dprintk("NFS reply exchange_id: Server Implementation ID: " 7184 "domain: %s, name: %s, date: %llu,%u\n", 7185 clp->cl_implid->domain, clp->cl_implid->name, 7186 clp->cl_implid->date.seconds, 7187 clp->cl_implid->date.nseconds); 7188 dprintk("NFS reply exchange_id: %d\n", status); 7189 return status; 7190 } 7191 7192 /* 7193 * nfs4_proc_exchange_id() 7194 * 7195 * Returns zero, a negative errno, or a negative NFS4ERR status code. 7196 * 7197 * Since the clientid has expired, all compounds using sessions 7198 * associated with the stale clientid will be returning 7199 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 7200 * be in some phase of session reset. 7201 * 7202 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 7203 */ 7204 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) 7205 { 7206 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 7207 int status; 7208 7209 /* try SP4_MACH_CRED if krb5i/p */ 7210 if (authflavor == RPC_AUTH_GSS_KRB5I || 7211 authflavor == RPC_AUTH_GSS_KRB5P) { 7212 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 7213 if (!status) 7214 return 0; 7215 } 7216 7217 /* try SP4_NONE */ 7218 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 7219 } 7220 7221 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 7222 struct rpc_cred *cred) 7223 { 7224 struct rpc_message msg = { 7225 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 7226 .rpc_argp = clp, 7227 .rpc_cred = cred, 7228 }; 7229 int status; 7230 7231 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7232 trace_nfs4_destroy_clientid(clp, status); 7233 if (status) 7234 dprintk("NFS: Got error %d from the server %s on " 7235 "DESTROY_CLIENTID.", status, clp->cl_hostname); 7236 return status; 7237 } 7238 7239 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 7240 struct rpc_cred *cred) 7241 { 7242 unsigned int loop; 7243 int ret; 7244 7245 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 7246 ret = _nfs4_proc_destroy_clientid(clp, cred); 7247 switch (ret) { 7248 case -NFS4ERR_DELAY: 7249 case -NFS4ERR_CLIENTID_BUSY: 7250 ssleep(1); 7251 break; 7252 default: 7253 return ret; 7254 } 7255 } 7256 return 0; 7257 } 7258 7259 int nfs4_destroy_clientid(struct nfs_client *clp) 7260 { 7261 struct rpc_cred *cred; 7262 int ret = 0; 7263 7264 if (clp->cl_mvops->minor_version < 1) 7265 goto out; 7266 if (clp->cl_exchange_flags == 0) 7267 goto out; 7268 if (clp->cl_preserve_clid) 7269 goto out; 7270 cred = nfs4_get_clid_cred(clp); 7271 ret = nfs4_proc_destroy_clientid(clp, cred); 7272 if (cred) 7273 put_rpccred(cred); 7274 switch (ret) { 7275 case 0: 7276 case -NFS4ERR_STALE_CLIENTID: 7277 clp->cl_exchange_flags = 0; 7278 } 7279 out: 7280 return ret; 7281 } 7282 7283 struct nfs4_get_lease_time_data { 7284 struct nfs4_get_lease_time_args *args; 7285 struct nfs4_get_lease_time_res *res; 7286 struct nfs_client *clp; 7287 }; 7288 7289 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 7290 void *calldata) 7291 { 7292 struct nfs4_get_lease_time_data *data = 7293 (struct nfs4_get_lease_time_data *)calldata; 7294 7295 dprintk("--> %s\n", __func__); 7296 /* just setup sequence, do not trigger session recovery 7297 since we're invoked within one */ 7298 nfs41_setup_sequence(data->clp->cl_session, 7299 &data->args->la_seq_args, 7300 &data->res->lr_seq_res, 7301 task); 7302 dprintk("<-- %s\n", __func__); 7303 } 7304 7305 /* 7306 * Called from nfs4_state_manager thread for session setup, so don't recover 7307 * from sequence operation or clientid errors. 7308 */ 7309 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 7310 { 7311 struct nfs4_get_lease_time_data *data = 7312 (struct nfs4_get_lease_time_data *)calldata; 7313 7314 dprintk("--> %s\n", __func__); 7315 if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) 7316 return; 7317 switch (task->tk_status) { 7318 case -NFS4ERR_DELAY: 7319 case -NFS4ERR_GRACE: 7320 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 7321 rpc_delay(task, NFS4_POLL_RETRY_MIN); 7322 task->tk_status = 0; 7323 /* fall through */ 7324 case -NFS4ERR_RETRY_UNCACHED_REP: 7325 rpc_restart_call_prepare(task); 7326 return; 7327 } 7328 dprintk("<-- %s\n", __func__); 7329 } 7330 7331 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 7332 .rpc_call_prepare = nfs4_get_lease_time_prepare, 7333 .rpc_call_done = nfs4_get_lease_time_done, 7334 }; 7335 7336 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 7337 { 7338 struct rpc_task *task; 7339 struct nfs4_get_lease_time_args args; 7340 struct nfs4_get_lease_time_res res = { 7341 .lr_fsinfo = fsinfo, 7342 }; 7343 struct nfs4_get_lease_time_data data = { 7344 .args = &args, 7345 .res = &res, 7346 .clp = clp, 7347 }; 7348 struct rpc_message msg = { 7349 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 7350 .rpc_argp = &args, 7351 .rpc_resp = &res, 7352 }; 7353 struct rpc_task_setup task_setup = { 7354 .rpc_client = clp->cl_rpcclient, 7355 .rpc_message = &msg, 7356 .callback_ops = &nfs4_get_lease_time_ops, 7357 .callback_data = &data, 7358 .flags = RPC_TASK_TIMEOUT, 7359 }; 7360 int status; 7361 7362 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); 7363 nfs4_set_sequence_privileged(&args.la_seq_args); 7364 dprintk("--> %s\n", __func__); 7365 task = rpc_run_task(&task_setup); 7366 7367 if (IS_ERR(task)) 7368 status = PTR_ERR(task); 7369 else { 7370 status = task->tk_status; 7371 rpc_put_task(task); 7372 } 7373 dprintk("<-- %s return %d\n", __func__, status); 7374 7375 return status; 7376 } 7377 7378 /* 7379 * Initialize the values to be used by the client in CREATE_SESSION 7380 * If nfs4_init_session set the fore channel request and response sizes, 7381 * use them. 7382 * 7383 * Set the back channel max_resp_sz_cached to zero to force the client to 7384 * always set csa_cachethis to FALSE because the current implementation 7385 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 7386 */ 7387 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args, 7388 struct rpc_clnt *clnt) 7389 { 7390 unsigned int max_rqst_sz, max_resp_sz; 7391 unsigned int max_bc_payload = rpc_max_bc_payload(clnt); 7392 7393 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 7394 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 7395 7396 /* Fore channel attributes */ 7397 args->fc_attrs.max_rqst_sz = max_rqst_sz; 7398 args->fc_attrs.max_resp_sz = max_resp_sz; 7399 args->fc_attrs.max_ops = NFS4_MAX_OPS; 7400 args->fc_attrs.max_reqs = max_session_slots; 7401 7402 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 7403 "max_ops=%u max_reqs=%u\n", 7404 __func__, 7405 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 7406 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 7407 7408 /* Back channel attributes */ 7409 args->bc_attrs.max_rqst_sz = max_bc_payload; 7410 args->bc_attrs.max_resp_sz = max_bc_payload; 7411 args->bc_attrs.max_resp_sz_cached = 0; 7412 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 7413 args->bc_attrs.max_reqs = NFS41_BC_MAX_CALLBACKS; 7414 7415 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 7416 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 7417 __func__, 7418 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 7419 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 7420 args->bc_attrs.max_reqs); 7421 } 7422 7423 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 7424 struct nfs41_create_session_res *res) 7425 { 7426 struct nfs4_channel_attrs *sent = &args->fc_attrs; 7427 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 7428 7429 if (rcvd->max_resp_sz > sent->max_resp_sz) 7430 return -EINVAL; 7431 /* 7432 * Our requested max_ops is the minimum we need; we're not 7433 * prepared to break up compounds into smaller pieces than that. 7434 * So, no point even trying to continue if the server won't 7435 * cooperate: 7436 */ 7437 if (rcvd->max_ops < sent->max_ops) 7438 return -EINVAL; 7439 if (rcvd->max_reqs == 0) 7440 return -EINVAL; 7441 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 7442 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 7443 return 0; 7444 } 7445 7446 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 7447 struct nfs41_create_session_res *res) 7448 { 7449 struct nfs4_channel_attrs *sent = &args->bc_attrs; 7450 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 7451 7452 if (!(res->flags & SESSION4_BACK_CHAN)) 7453 goto out; 7454 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 7455 return -EINVAL; 7456 if (rcvd->max_resp_sz < sent->max_resp_sz) 7457 return -EINVAL; 7458 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 7459 return -EINVAL; 7460 /* These would render the backchannel useless: */ 7461 if (rcvd->max_ops != sent->max_ops) 7462 return -EINVAL; 7463 if (rcvd->max_reqs != sent->max_reqs) 7464 return -EINVAL; 7465 out: 7466 return 0; 7467 } 7468 7469 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 7470 struct nfs41_create_session_res *res) 7471 { 7472 int ret; 7473 7474 ret = nfs4_verify_fore_channel_attrs(args, res); 7475 if (ret) 7476 return ret; 7477 return nfs4_verify_back_channel_attrs(args, res); 7478 } 7479 7480 static void nfs4_update_session(struct nfs4_session *session, 7481 struct nfs41_create_session_res *res) 7482 { 7483 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 7484 /* Mark client id and session as being confirmed */ 7485 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 7486 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 7487 session->flags = res->flags; 7488 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 7489 if (res->flags & SESSION4_BACK_CHAN) 7490 memcpy(&session->bc_attrs, &res->bc_attrs, 7491 sizeof(session->bc_attrs)); 7492 } 7493 7494 static int _nfs4_proc_create_session(struct nfs_client *clp, 7495 struct rpc_cred *cred) 7496 { 7497 struct nfs4_session *session = clp->cl_session; 7498 struct nfs41_create_session_args args = { 7499 .client = clp, 7500 .clientid = clp->cl_clientid, 7501 .seqid = clp->cl_seqid, 7502 .cb_program = NFS4_CALLBACK, 7503 }; 7504 struct nfs41_create_session_res res; 7505 7506 struct rpc_message msg = { 7507 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 7508 .rpc_argp = &args, 7509 .rpc_resp = &res, 7510 .rpc_cred = cred, 7511 }; 7512 int status; 7513 7514 nfs4_init_channel_attrs(&args, clp->cl_rpcclient); 7515 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 7516 7517 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7518 trace_nfs4_create_session(clp, status); 7519 7520 if (!status) { 7521 /* Verify the session's negotiated channel_attrs values */ 7522 status = nfs4_verify_channel_attrs(&args, &res); 7523 /* Increment the clientid slot sequence id */ 7524 if (clp->cl_seqid == res.seqid) 7525 clp->cl_seqid++; 7526 if (status) 7527 goto out; 7528 nfs4_update_session(session, &res); 7529 } 7530 out: 7531 return status; 7532 } 7533 7534 /* 7535 * Issues a CREATE_SESSION operation to the server. 7536 * It is the responsibility of the caller to verify the session is 7537 * expired before calling this routine. 7538 */ 7539 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred) 7540 { 7541 int status; 7542 unsigned *ptr; 7543 struct nfs4_session *session = clp->cl_session; 7544 7545 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 7546 7547 status = _nfs4_proc_create_session(clp, cred); 7548 if (status) 7549 goto out; 7550 7551 /* Init or reset the session slot tables */ 7552 status = nfs4_setup_session_slot_tables(session); 7553 dprintk("slot table setup returned %d\n", status); 7554 if (status) 7555 goto out; 7556 7557 ptr = (unsigned *)&session->sess_id.data[0]; 7558 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 7559 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 7560 out: 7561 dprintk("<-- %s\n", __func__); 7562 return status; 7563 } 7564 7565 /* 7566 * Issue the over-the-wire RPC DESTROY_SESSION. 7567 * The caller must serialize access to this routine. 7568 */ 7569 int nfs4_proc_destroy_session(struct nfs4_session *session, 7570 struct rpc_cred *cred) 7571 { 7572 struct rpc_message msg = { 7573 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 7574 .rpc_argp = session, 7575 .rpc_cred = cred, 7576 }; 7577 int status = 0; 7578 7579 dprintk("--> nfs4_proc_destroy_session\n"); 7580 7581 /* session is still being setup */ 7582 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 7583 return 0; 7584 7585 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7586 trace_nfs4_destroy_session(session->clp, status); 7587 7588 if (status) 7589 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 7590 "Session has been destroyed regardless...\n", status); 7591 7592 dprintk("<-- nfs4_proc_destroy_session\n"); 7593 return status; 7594 } 7595 7596 /* 7597 * Renew the cl_session lease. 7598 */ 7599 struct nfs4_sequence_data { 7600 struct nfs_client *clp; 7601 struct nfs4_sequence_args args; 7602 struct nfs4_sequence_res res; 7603 }; 7604 7605 static void nfs41_sequence_release(void *data) 7606 { 7607 struct nfs4_sequence_data *calldata = data; 7608 struct nfs_client *clp = calldata->clp; 7609 7610 if (atomic_read(&clp->cl_count) > 1) 7611 nfs4_schedule_state_renewal(clp); 7612 nfs_put_client(clp); 7613 kfree(calldata); 7614 } 7615 7616 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 7617 { 7618 switch(task->tk_status) { 7619 case -NFS4ERR_DELAY: 7620 rpc_delay(task, NFS4_POLL_RETRY_MAX); 7621 return -EAGAIN; 7622 default: 7623 nfs4_schedule_lease_recovery(clp); 7624 } 7625 return 0; 7626 } 7627 7628 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 7629 { 7630 struct nfs4_sequence_data *calldata = data; 7631 struct nfs_client *clp = calldata->clp; 7632 7633 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 7634 return; 7635 7636 trace_nfs4_sequence(clp, task->tk_status); 7637 if (task->tk_status < 0) { 7638 dprintk("%s ERROR %d\n", __func__, task->tk_status); 7639 if (atomic_read(&clp->cl_count) == 1) 7640 goto out; 7641 7642 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 7643 rpc_restart_call_prepare(task); 7644 return; 7645 } 7646 } 7647 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 7648 out: 7649 dprintk("<-- %s\n", __func__); 7650 } 7651 7652 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 7653 { 7654 struct nfs4_sequence_data *calldata = data; 7655 struct nfs_client *clp = calldata->clp; 7656 struct nfs4_sequence_args *args; 7657 struct nfs4_sequence_res *res; 7658 7659 args = task->tk_msg.rpc_argp; 7660 res = task->tk_msg.rpc_resp; 7661 7662 nfs41_setup_sequence(clp->cl_session, args, res, task); 7663 } 7664 7665 static const struct rpc_call_ops nfs41_sequence_ops = { 7666 .rpc_call_done = nfs41_sequence_call_done, 7667 .rpc_call_prepare = nfs41_sequence_prepare, 7668 .rpc_release = nfs41_sequence_release, 7669 }; 7670 7671 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 7672 struct rpc_cred *cred, 7673 bool is_privileged) 7674 { 7675 struct nfs4_sequence_data *calldata; 7676 struct rpc_message msg = { 7677 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 7678 .rpc_cred = cred, 7679 }; 7680 struct rpc_task_setup task_setup_data = { 7681 .rpc_client = clp->cl_rpcclient, 7682 .rpc_message = &msg, 7683 .callback_ops = &nfs41_sequence_ops, 7684 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 7685 }; 7686 7687 if (!atomic_inc_not_zero(&clp->cl_count)) 7688 return ERR_PTR(-EIO); 7689 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7690 if (calldata == NULL) { 7691 nfs_put_client(clp); 7692 return ERR_PTR(-ENOMEM); 7693 } 7694 nfs4_init_sequence(&calldata->args, &calldata->res, 0); 7695 if (is_privileged) 7696 nfs4_set_sequence_privileged(&calldata->args); 7697 msg.rpc_argp = &calldata->args; 7698 msg.rpc_resp = &calldata->res; 7699 calldata->clp = clp; 7700 task_setup_data.callback_data = calldata; 7701 7702 return rpc_run_task(&task_setup_data); 7703 } 7704 7705 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 7706 { 7707 struct rpc_task *task; 7708 int ret = 0; 7709 7710 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 7711 return -EAGAIN; 7712 task = _nfs41_proc_sequence(clp, cred, false); 7713 if (IS_ERR(task)) 7714 ret = PTR_ERR(task); 7715 else 7716 rpc_put_task_async(task); 7717 dprintk("<-- %s status=%d\n", __func__, ret); 7718 return ret; 7719 } 7720 7721 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 7722 { 7723 struct rpc_task *task; 7724 int ret; 7725 7726 task = _nfs41_proc_sequence(clp, cred, true); 7727 if (IS_ERR(task)) { 7728 ret = PTR_ERR(task); 7729 goto out; 7730 } 7731 ret = rpc_wait_for_completion_task(task); 7732 if (!ret) 7733 ret = task->tk_status; 7734 rpc_put_task(task); 7735 out: 7736 dprintk("<-- %s status=%d\n", __func__, ret); 7737 return ret; 7738 } 7739 7740 struct nfs4_reclaim_complete_data { 7741 struct nfs_client *clp; 7742 struct nfs41_reclaim_complete_args arg; 7743 struct nfs41_reclaim_complete_res res; 7744 }; 7745 7746 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 7747 { 7748 struct nfs4_reclaim_complete_data *calldata = data; 7749 7750 nfs41_setup_sequence(calldata->clp->cl_session, 7751 &calldata->arg.seq_args, 7752 &calldata->res.seq_res, 7753 task); 7754 } 7755 7756 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 7757 { 7758 switch(task->tk_status) { 7759 case 0: 7760 case -NFS4ERR_COMPLETE_ALREADY: 7761 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 7762 break; 7763 case -NFS4ERR_DELAY: 7764 rpc_delay(task, NFS4_POLL_RETRY_MAX); 7765 /* fall through */ 7766 case -NFS4ERR_RETRY_UNCACHED_REP: 7767 return -EAGAIN; 7768 default: 7769 nfs4_schedule_lease_recovery(clp); 7770 } 7771 return 0; 7772 } 7773 7774 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 7775 { 7776 struct nfs4_reclaim_complete_data *calldata = data; 7777 struct nfs_client *clp = calldata->clp; 7778 struct nfs4_sequence_res *res = &calldata->res.seq_res; 7779 7780 dprintk("--> %s\n", __func__); 7781 if (!nfs41_sequence_done(task, res)) 7782 return; 7783 7784 trace_nfs4_reclaim_complete(clp, task->tk_status); 7785 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 7786 rpc_restart_call_prepare(task); 7787 return; 7788 } 7789 dprintk("<-- %s\n", __func__); 7790 } 7791 7792 static void nfs4_free_reclaim_complete_data(void *data) 7793 { 7794 struct nfs4_reclaim_complete_data *calldata = data; 7795 7796 kfree(calldata); 7797 } 7798 7799 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 7800 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 7801 .rpc_call_done = nfs4_reclaim_complete_done, 7802 .rpc_release = nfs4_free_reclaim_complete_data, 7803 }; 7804 7805 /* 7806 * Issue a global reclaim complete. 7807 */ 7808 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 7809 struct rpc_cred *cred) 7810 { 7811 struct nfs4_reclaim_complete_data *calldata; 7812 struct rpc_task *task; 7813 struct rpc_message msg = { 7814 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 7815 .rpc_cred = cred, 7816 }; 7817 struct rpc_task_setup task_setup_data = { 7818 .rpc_client = clp->cl_rpcclient, 7819 .rpc_message = &msg, 7820 .callback_ops = &nfs4_reclaim_complete_call_ops, 7821 .flags = RPC_TASK_ASYNC, 7822 }; 7823 int status = -ENOMEM; 7824 7825 dprintk("--> %s\n", __func__); 7826 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7827 if (calldata == NULL) 7828 goto out; 7829 calldata->clp = clp; 7830 calldata->arg.one_fs = 0; 7831 7832 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); 7833 nfs4_set_sequence_privileged(&calldata->arg.seq_args); 7834 msg.rpc_argp = &calldata->arg; 7835 msg.rpc_resp = &calldata->res; 7836 task_setup_data.callback_data = calldata; 7837 task = rpc_run_task(&task_setup_data); 7838 if (IS_ERR(task)) { 7839 status = PTR_ERR(task); 7840 goto out; 7841 } 7842 status = nfs4_wait_for_completion_rpc_task(task); 7843 if (status == 0) 7844 status = task->tk_status; 7845 rpc_put_task(task); 7846 return 0; 7847 out: 7848 dprintk("<-- %s status=%d\n", __func__, status); 7849 return status; 7850 } 7851 7852 static void 7853 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 7854 { 7855 struct nfs4_layoutget *lgp = calldata; 7856 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 7857 struct nfs4_session *session = nfs4_get_session(server); 7858 7859 dprintk("--> %s\n", __func__); 7860 nfs41_setup_sequence(session, &lgp->args.seq_args, 7861 &lgp->res.seq_res, task); 7862 dprintk("<-- %s\n", __func__); 7863 } 7864 7865 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 7866 { 7867 struct nfs4_layoutget *lgp = calldata; 7868 7869 dprintk("--> %s\n", __func__); 7870 nfs41_sequence_done(task, &lgp->res.seq_res); 7871 dprintk("<-- %s\n", __func__); 7872 } 7873 7874 static int 7875 nfs4_layoutget_handle_exception(struct rpc_task *task, 7876 struct nfs4_layoutget *lgp, struct nfs4_exception *exception) 7877 { 7878 struct inode *inode = lgp->args.inode; 7879 struct nfs_server *server = NFS_SERVER(inode); 7880 struct pnfs_layout_hdr *lo; 7881 int nfs4err = task->tk_status; 7882 int err, status = 0; 7883 LIST_HEAD(head); 7884 7885 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 7886 7887 switch (nfs4err) { 7888 case 0: 7889 goto out; 7890 7891 /* 7892 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs 7893 * on the file. set tk_status to -ENODATA to tell upper layer to 7894 * retry go inband. 7895 */ 7896 case -NFS4ERR_LAYOUTUNAVAILABLE: 7897 status = -ENODATA; 7898 goto out; 7899 /* 7900 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 7901 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 7902 */ 7903 case -NFS4ERR_BADLAYOUT: 7904 status = -EOVERFLOW; 7905 goto out; 7906 /* 7907 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 7908 * (or clients) writing to the same RAID stripe except when 7909 * the minlength argument is 0 (see RFC5661 section 18.43.3). 7910 * 7911 * Treat it like we would RECALLCONFLICT -- we retry for a little 7912 * while, and then eventually give up. 7913 */ 7914 case -NFS4ERR_LAYOUTTRYLATER: 7915 if (lgp->args.minlength == 0) { 7916 status = -EOVERFLOW; 7917 goto out; 7918 } 7919 status = -EBUSY; 7920 break; 7921 case -NFS4ERR_RECALLCONFLICT: 7922 status = -ERECALLCONFLICT; 7923 break; 7924 case -NFS4ERR_EXPIRED: 7925 case -NFS4ERR_BAD_STATEID: 7926 exception->timeout = 0; 7927 spin_lock(&inode->i_lock); 7928 lo = NFS_I(inode)->layout; 7929 /* If the open stateid was bad, then recover it. */ 7930 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || 7931 nfs4_stateid_match_other(&lgp->args.stateid, 7932 &lgp->args.ctx->state->stateid)) { 7933 spin_unlock(&inode->i_lock); 7934 exception->state = lgp->args.ctx->state; 7935 break; 7936 } 7937 7938 /* 7939 * Mark the bad layout state as invalid, then retry 7940 */ 7941 pnfs_mark_layout_stateid_invalid(lo, &head); 7942 spin_unlock(&inode->i_lock); 7943 pnfs_free_lseg_list(&head); 7944 status = -EAGAIN; 7945 goto out; 7946 } 7947 7948 err = nfs4_handle_exception(server, nfs4err, exception); 7949 if (!status) { 7950 if (exception->retry) 7951 status = -EAGAIN; 7952 else 7953 status = err; 7954 } 7955 out: 7956 dprintk("<-- %s\n", __func__); 7957 return status; 7958 } 7959 7960 static size_t max_response_pages(struct nfs_server *server) 7961 { 7962 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 7963 return nfs_page_array_len(0, max_resp_sz); 7964 } 7965 7966 static void nfs4_free_pages(struct page **pages, size_t size) 7967 { 7968 int i; 7969 7970 if (!pages) 7971 return; 7972 7973 for (i = 0; i < size; i++) { 7974 if (!pages[i]) 7975 break; 7976 __free_page(pages[i]); 7977 } 7978 kfree(pages); 7979 } 7980 7981 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) 7982 { 7983 struct page **pages; 7984 int i; 7985 7986 pages = kcalloc(size, sizeof(struct page *), gfp_flags); 7987 if (!pages) { 7988 dprintk("%s: can't alloc array of %zu pages\n", __func__, size); 7989 return NULL; 7990 } 7991 7992 for (i = 0; i < size; i++) { 7993 pages[i] = alloc_page(gfp_flags); 7994 if (!pages[i]) { 7995 dprintk("%s: failed to allocate page\n", __func__); 7996 nfs4_free_pages(pages, size); 7997 return NULL; 7998 } 7999 } 8000 8001 return pages; 8002 } 8003 8004 static void nfs4_layoutget_release(void *calldata) 8005 { 8006 struct nfs4_layoutget *lgp = calldata; 8007 struct inode *inode = lgp->args.inode; 8008 struct nfs_server *server = NFS_SERVER(inode); 8009 size_t max_pages = max_response_pages(server); 8010 8011 dprintk("--> %s\n", __func__); 8012 nfs4_free_pages(lgp->args.layout.pages, max_pages); 8013 pnfs_put_layout_hdr(NFS_I(inode)->layout); 8014 put_nfs_open_context(lgp->args.ctx); 8015 kfree(calldata); 8016 dprintk("<-- %s\n", __func__); 8017 } 8018 8019 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 8020 .rpc_call_prepare = nfs4_layoutget_prepare, 8021 .rpc_call_done = nfs4_layoutget_done, 8022 .rpc_release = nfs4_layoutget_release, 8023 }; 8024 8025 struct pnfs_layout_segment * 8026 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags) 8027 { 8028 struct inode *inode = lgp->args.inode; 8029 struct nfs_server *server = NFS_SERVER(inode); 8030 size_t max_pages = max_response_pages(server); 8031 struct rpc_task *task; 8032 struct rpc_message msg = { 8033 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 8034 .rpc_argp = &lgp->args, 8035 .rpc_resp = &lgp->res, 8036 .rpc_cred = lgp->cred, 8037 }; 8038 struct rpc_task_setup task_setup_data = { 8039 .rpc_client = server->client, 8040 .rpc_message = &msg, 8041 .callback_ops = &nfs4_layoutget_call_ops, 8042 .callback_data = lgp, 8043 .flags = RPC_TASK_ASYNC, 8044 }; 8045 struct pnfs_layout_segment *lseg = NULL; 8046 struct nfs4_exception exception = { 8047 .inode = inode, 8048 .timeout = *timeout, 8049 }; 8050 int status = 0; 8051 8052 dprintk("--> %s\n", __func__); 8053 8054 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */ 8055 pnfs_get_layout_hdr(NFS_I(inode)->layout); 8056 8057 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); 8058 if (!lgp->args.layout.pages) { 8059 nfs4_layoutget_release(lgp); 8060 return ERR_PTR(-ENOMEM); 8061 } 8062 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 8063 8064 lgp->res.layoutp = &lgp->args.layout; 8065 lgp->res.seq_res.sr_slot = NULL; 8066 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 8067 8068 task = rpc_run_task(&task_setup_data); 8069 if (IS_ERR(task)) 8070 return ERR_CAST(task); 8071 status = nfs4_wait_for_completion_rpc_task(task); 8072 if (status == 0) { 8073 status = nfs4_layoutget_handle_exception(task, lgp, &exception); 8074 *timeout = exception.timeout; 8075 } 8076 8077 trace_nfs4_layoutget(lgp->args.ctx, 8078 &lgp->args.range, 8079 &lgp->res.range, 8080 &lgp->res.stateid, 8081 status); 8082 8083 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ 8084 if (status == 0 && lgp->res.layoutp->len) 8085 lseg = pnfs_layout_process(lgp); 8086 rpc_put_task(task); 8087 dprintk("<-- %s status=%d\n", __func__, status); 8088 if (status) 8089 return ERR_PTR(status); 8090 return lseg; 8091 } 8092 8093 static void 8094 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 8095 { 8096 struct nfs4_layoutreturn *lrp = calldata; 8097 8098 dprintk("--> %s\n", __func__); 8099 nfs41_setup_sequence(lrp->clp->cl_session, 8100 &lrp->args.seq_args, 8101 &lrp->res.seq_res, 8102 task); 8103 } 8104 8105 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 8106 { 8107 struct nfs4_layoutreturn *lrp = calldata; 8108 struct nfs_server *server; 8109 8110 dprintk("--> %s\n", __func__); 8111 8112 if (!nfs41_sequence_done(task, &lrp->res.seq_res)) 8113 return; 8114 8115 server = NFS_SERVER(lrp->args.inode); 8116 switch (task->tk_status) { 8117 default: 8118 task->tk_status = 0; 8119 case 0: 8120 break; 8121 case -NFS4ERR_DELAY: 8122 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN) 8123 break; 8124 rpc_restart_call_prepare(task); 8125 return; 8126 } 8127 dprintk("<-- %s\n", __func__); 8128 } 8129 8130 static void nfs4_layoutreturn_release(void *calldata) 8131 { 8132 struct nfs4_layoutreturn *lrp = calldata; 8133 struct pnfs_layout_hdr *lo = lrp->args.layout; 8134 LIST_HEAD(freeme); 8135 8136 dprintk("--> %s\n", __func__); 8137 spin_lock(&lo->plh_inode->i_lock); 8138 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range, 8139 be32_to_cpu(lrp->args.stateid.seqid)); 8140 if (lrp->res.lrs_present && pnfs_layout_is_valid(lo)) 8141 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 8142 pnfs_clear_layoutreturn_waitbit(lo); 8143 spin_unlock(&lo->plh_inode->i_lock); 8144 pnfs_free_lseg_list(&freeme); 8145 pnfs_put_layout_hdr(lrp->args.layout); 8146 nfs_iput_and_deactive(lrp->inode); 8147 kfree(calldata); 8148 dprintk("<-- %s\n", __func__); 8149 } 8150 8151 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 8152 .rpc_call_prepare = nfs4_layoutreturn_prepare, 8153 .rpc_call_done = nfs4_layoutreturn_done, 8154 .rpc_release = nfs4_layoutreturn_release, 8155 }; 8156 8157 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync) 8158 { 8159 struct rpc_task *task; 8160 struct rpc_message msg = { 8161 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 8162 .rpc_argp = &lrp->args, 8163 .rpc_resp = &lrp->res, 8164 .rpc_cred = lrp->cred, 8165 }; 8166 struct rpc_task_setup task_setup_data = { 8167 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 8168 .rpc_message = &msg, 8169 .callback_ops = &nfs4_layoutreturn_call_ops, 8170 .callback_data = lrp, 8171 }; 8172 int status = 0; 8173 8174 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client, 8175 NFS_SP4_MACH_CRED_PNFS_CLEANUP, 8176 &task_setup_data.rpc_client, &msg); 8177 8178 dprintk("--> %s\n", __func__); 8179 if (!sync) { 8180 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 8181 if (!lrp->inode) { 8182 nfs4_layoutreturn_release(lrp); 8183 return -EAGAIN; 8184 } 8185 task_setup_data.flags |= RPC_TASK_ASYNC; 8186 } 8187 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); 8188 task = rpc_run_task(&task_setup_data); 8189 if (IS_ERR(task)) 8190 return PTR_ERR(task); 8191 if (sync) 8192 status = task->tk_status; 8193 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status); 8194 dprintk("<-- %s status=%d\n", __func__, status); 8195 rpc_put_task(task); 8196 return status; 8197 } 8198 8199 static int 8200 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 8201 struct pnfs_device *pdev, 8202 struct rpc_cred *cred) 8203 { 8204 struct nfs4_getdeviceinfo_args args = { 8205 .pdev = pdev, 8206 .notify_types = NOTIFY_DEVICEID4_CHANGE | 8207 NOTIFY_DEVICEID4_DELETE, 8208 }; 8209 struct nfs4_getdeviceinfo_res res = { 8210 .pdev = pdev, 8211 }; 8212 struct rpc_message msg = { 8213 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 8214 .rpc_argp = &args, 8215 .rpc_resp = &res, 8216 .rpc_cred = cred, 8217 }; 8218 int status; 8219 8220 dprintk("--> %s\n", __func__); 8221 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 8222 if (res.notification & ~args.notify_types) 8223 dprintk("%s: unsupported notification\n", __func__); 8224 if (res.notification != args.notify_types) 8225 pdev->nocache = 1; 8226 8227 dprintk("<-- %s status=%d\n", __func__, status); 8228 8229 return status; 8230 } 8231 8232 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 8233 struct pnfs_device *pdev, 8234 struct rpc_cred *cred) 8235 { 8236 struct nfs4_exception exception = { }; 8237 int err; 8238 8239 do { 8240 err = nfs4_handle_exception(server, 8241 _nfs4_proc_getdeviceinfo(server, pdev, cred), 8242 &exception); 8243 } while (exception.retry); 8244 return err; 8245 } 8246 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 8247 8248 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 8249 { 8250 struct nfs4_layoutcommit_data *data = calldata; 8251 struct nfs_server *server = NFS_SERVER(data->args.inode); 8252 struct nfs4_session *session = nfs4_get_session(server); 8253 8254 nfs41_setup_sequence(session, 8255 &data->args.seq_args, 8256 &data->res.seq_res, 8257 task); 8258 } 8259 8260 static void 8261 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 8262 { 8263 struct nfs4_layoutcommit_data *data = calldata; 8264 struct nfs_server *server = NFS_SERVER(data->args.inode); 8265 8266 if (!nfs41_sequence_done(task, &data->res.seq_res)) 8267 return; 8268 8269 switch (task->tk_status) { /* Just ignore these failures */ 8270 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 8271 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 8272 case -NFS4ERR_BADLAYOUT: /* no layout */ 8273 case -NFS4ERR_GRACE: /* loca_recalim always false */ 8274 task->tk_status = 0; 8275 case 0: 8276 break; 8277 default: 8278 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 8279 rpc_restart_call_prepare(task); 8280 return; 8281 } 8282 } 8283 } 8284 8285 static void nfs4_layoutcommit_release(void *calldata) 8286 { 8287 struct nfs4_layoutcommit_data *data = calldata; 8288 8289 pnfs_cleanup_layoutcommit(data); 8290 nfs_post_op_update_inode_force_wcc(data->args.inode, 8291 data->res.fattr); 8292 put_rpccred(data->cred); 8293 nfs_iput_and_deactive(data->inode); 8294 kfree(data); 8295 } 8296 8297 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 8298 .rpc_call_prepare = nfs4_layoutcommit_prepare, 8299 .rpc_call_done = nfs4_layoutcommit_done, 8300 .rpc_release = nfs4_layoutcommit_release, 8301 }; 8302 8303 int 8304 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 8305 { 8306 struct rpc_message msg = { 8307 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 8308 .rpc_argp = &data->args, 8309 .rpc_resp = &data->res, 8310 .rpc_cred = data->cred, 8311 }; 8312 struct rpc_task_setup task_setup_data = { 8313 .task = &data->task, 8314 .rpc_client = NFS_CLIENT(data->args.inode), 8315 .rpc_message = &msg, 8316 .callback_ops = &nfs4_layoutcommit_ops, 8317 .callback_data = data, 8318 }; 8319 struct rpc_task *task; 8320 int status = 0; 8321 8322 dprintk("NFS: initiating layoutcommit call. sync %d " 8323 "lbw: %llu inode %lu\n", sync, 8324 data->args.lastbytewritten, 8325 data->args.inode->i_ino); 8326 8327 if (!sync) { 8328 data->inode = nfs_igrab_and_active(data->args.inode); 8329 if (data->inode == NULL) { 8330 nfs4_layoutcommit_release(data); 8331 return -EAGAIN; 8332 } 8333 task_setup_data.flags = RPC_TASK_ASYNC; 8334 } 8335 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 8336 task = rpc_run_task(&task_setup_data); 8337 if (IS_ERR(task)) 8338 return PTR_ERR(task); 8339 if (sync) 8340 status = task->tk_status; 8341 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status); 8342 dprintk("%s: status %d\n", __func__, status); 8343 rpc_put_task(task); 8344 return status; 8345 } 8346 8347 /** 8348 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 8349 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 8350 */ 8351 static int 8352 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8353 struct nfs_fsinfo *info, 8354 struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8355 { 8356 struct nfs41_secinfo_no_name_args args = { 8357 .style = SECINFO_STYLE_CURRENT_FH, 8358 }; 8359 struct nfs4_secinfo_res res = { 8360 .flavors = flavors, 8361 }; 8362 struct rpc_message msg = { 8363 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 8364 .rpc_argp = &args, 8365 .rpc_resp = &res, 8366 }; 8367 struct rpc_clnt *clnt = server->client; 8368 struct rpc_cred *cred = NULL; 8369 int status; 8370 8371 if (use_integrity) { 8372 clnt = server->nfs_client->cl_rpcclient; 8373 cred = nfs4_get_clid_cred(server->nfs_client); 8374 msg.rpc_cred = cred; 8375 } 8376 8377 dprintk("--> %s\n", __func__); 8378 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, 8379 &res.seq_res, 0); 8380 dprintk("<-- %s status=%d\n", __func__, status); 8381 8382 if (cred) 8383 put_rpccred(cred); 8384 8385 return status; 8386 } 8387 8388 static int 8389 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8390 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 8391 { 8392 struct nfs4_exception exception = { }; 8393 int err; 8394 do { 8395 /* first try using integrity protection */ 8396 err = -NFS4ERR_WRONGSEC; 8397 8398 /* try to use integrity protection with machine cred */ 8399 if (_nfs4_is_integrity_protected(server->nfs_client)) 8400 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8401 flavors, true); 8402 8403 /* 8404 * if unable to use integrity protection, or SECINFO with 8405 * integrity protection returns NFS4ERR_WRONGSEC (which is 8406 * disallowed by spec, but exists in deployed servers) use 8407 * the current filesystem's rpc_client and the user cred. 8408 */ 8409 if (err == -NFS4ERR_WRONGSEC) 8410 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8411 flavors, false); 8412 8413 switch (err) { 8414 case 0: 8415 case -NFS4ERR_WRONGSEC: 8416 case -ENOTSUPP: 8417 goto out; 8418 default: 8419 err = nfs4_handle_exception(server, err, &exception); 8420 } 8421 } while (exception.retry); 8422 out: 8423 return err; 8424 } 8425 8426 static int 8427 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 8428 struct nfs_fsinfo *info) 8429 { 8430 int err; 8431 struct page *page; 8432 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 8433 struct nfs4_secinfo_flavors *flavors; 8434 struct nfs4_secinfo4 *secinfo; 8435 int i; 8436 8437 page = alloc_page(GFP_KERNEL); 8438 if (!page) { 8439 err = -ENOMEM; 8440 goto out; 8441 } 8442 8443 flavors = page_address(page); 8444 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 8445 8446 /* 8447 * Fall back on "guess and check" method if 8448 * the server doesn't support SECINFO_NO_NAME 8449 */ 8450 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 8451 err = nfs4_find_root_sec(server, fhandle, info); 8452 goto out_freepage; 8453 } 8454 if (err) 8455 goto out_freepage; 8456 8457 for (i = 0; i < flavors->num_flavors; i++) { 8458 secinfo = &flavors->flavors[i]; 8459 8460 switch (secinfo->flavor) { 8461 case RPC_AUTH_NULL: 8462 case RPC_AUTH_UNIX: 8463 case RPC_AUTH_GSS: 8464 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 8465 &secinfo->flavor_info); 8466 break; 8467 default: 8468 flavor = RPC_AUTH_MAXFLAVOR; 8469 break; 8470 } 8471 8472 if (!nfs_auth_info_match(&server->auth_info, flavor)) 8473 flavor = RPC_AUTH_MAXFLAVOR; 8474 8475 if (flavor != RPC_AUTH_MAXFLAVOR) { 8476 err = nfs4_lookup_root_sec(server, fhandle, 8477 info, flavor); 8478 if (!err) 8479 break; 8480 } 8481 } 8482 8483 if (flavor == RPC_AUTH_MAXFLAVOR) 8484 err = -EPERM; 8485 8486 out_freepage: 8487 put_page(page); 8488 if (err == -EACCES) 8489 return -EPERM; 8490 out: 8491 return err; 8492 } 8493 8494 static int _nfs41_test_stateid(struct nfs_server *server, 8495 nfs4_stateid *stateid, 8496 struct rpc_cred *cred) 8497 { 8498 int status; 8499 struct nfs41_test_stateid_args args = { 8500 .stateid = stateid, 8501 }; 8502 struct nfs41_test_stateid_res res; 8503 struct rpc_message msg = { 8504 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 8505 .rpc_argp = &args, 8506 .rpc_resp = &res, 8507 .rpc_cred = cred, 8508 }; 8509 struct rpc_clnt *rpc_client = server->client; 8510 8511 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8512 &rpc_client, &msg); 8513 8514 dprintk("NFS call test_stateid %p\n", stateid); 8515 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 8516 nfs4_set_sequence_privileged(&args.seq_args); 8517 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 8518 &args.seq_args, &res.seq_res); 8519 if (status != NFS_OK) { 8520 dprintk("NFS reply test_stateid: failed, %d\n", status); 8521 return status; 8522 } 8523 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 8524 return -res.status; 8525 } 8526 8527 /** 8528 * nfs41_test_stateid - perform a TEST_STATEID operation 8529 * 8530 * @server: server / transport on which to perform the operation 8531 * @stateid: state ID to test 8532 * @cred: credential 8533 * 8534 * Returns NFS_OK if the server recognizes that "stateid" is valid. 8535 * Otherwise a negative NFS4ERR value is returned if the operation 8536 * failed or the state ID is not currently valid. 8537 */ 8538 static int nfs41_test_stateid(struct nfs_server *server, 8539 nfs4_stateid *stateid, 8540 struct rpc_cred *cred) 8541 { 8542 struct nfs4_exception exception = { }; 8543 int err; 8544 do { 8545 err = _nfs41_test_stateid(server, stateid, cred); 8546 if (err != -NFS4ERR_DELAY) 8547 break; 8548 nfs4_handle_exception(server, err, &exception); 8549 } while (exception.retry); 8550 return err; 8551 } 8552 8553 struct nfs_free_stateid_data { 8554 struct nfs_server *server; 8555 struct nfs41_free_stateid_args args; 8556 struct nfs41_free_stateid_res res; 8557 }; 8558 8559 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 8560 { 8561 struct nfs_free_stateid_data *data = calldata; 8562 nfs41_setup_sequence(nfs4_get_session(data->server), 8563 &data->args.seq_args, 8564 &data->res.seq_res, 8565 task); 8566 } 8567 8568 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 8569 { 8570 struct nfs_free_stateid_data *data = calldata; 8571 8572 nfs41_sequence_done(task, &data->res.seq_res); 8573 8574 switch (task->tk_status) { 8575 case -NFS4ERR_DELAY: 8576 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 8577 rpc_restart_call_prepare(task); 8578 } 8579 } 8580 8581 static void nfs41_free_stateid_release(void *calldata) 8582 { 8583 kfree(calldata); 8584 } 8585 8586 static const struct rpc_call_ops nfs41_free_stateid_ops = { 8587 .rpc_call_prepare = nfs41_free_stateid_prepare, 8588 .rpc_call_done = nfs41_free_stateid_done, 8589 .rpc_release = nfs41_free_stateid_release, 8590 }; 8591 8592 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server, 8593 nfs4_stateid *stateid, 8594 struct rpc_cred *cred, 8595 bool privileged) 8596 { 8597 struct rpc_message msg = { 8598 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 8599 .rpc_cred = cred, 8600 }; 8601 struct rpc_task_setup task_setup = { 8602 .rpc_client = server->client, 8603 .rpc_message = &msg, 8604 .callback_ops = &nfs41_free_stateid_ops, 8605 .flags = RPC_TASK_ASYNC, 8606 }; 8607 struct nfs_free_stateid_data *data; 8608 8609 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8610 &task_setup.rpc_client, &msg); 8611 8612 dprintk("NFS call free_stateid %p\n", stateid); 8613 data = kmalloc(sizeof(*data), GFP_NOFS); 8614 if (!data) 8615 return ERR_PTR(-ENOMEM); 8616 data->server = server; 8617 nfs4_stateid_copy(&data->args.stateid, stateid); 8618 8619 task_setup.callback_data = data; 8620 8621 msg.rpc_argp = &data->args; 8622 msg.rpc_resp = &data->res; 8623 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 8624 if (privileged) 8625 nfs4_set_sequence_privileged(&data->args.seq_args); 8626 8627 return rpc_run_task(&task_setup); 8628 } 8629 8630 /** 8631 * nfs41_free_stateid - perform a FREE_STATEID operation 8632 * 8633 * @server: server / transport on which to perform the operation 8634 * @stateid: state ID to release 8635 * @cred: credential 8636 * 8637 * Returns NFS_OK if the server freed "stateid". Otherwise a 8638 * negative NFS4ERR value is returned. 8639 */ 8640 static int nfs41_free_stateid(struct nfs_server *server, 8641 nfs4_stateid *stateid, 8642 struct rpc_cred *cred) 8643 { 8644 struct rpc_task *task; 8645 int ret; 8646 8647 task = _nfs41_free_stateid(server, stateid, cred, true); 8648 if (IS_ERR(task)) 8649 return PTR_ERR(task); 8650 ret = rpc_wait_for_completion_task(task); 8651 if (!ret) 8652 ret = task->tk_status; 8653 rpc_put_task(task); 8654 return ret; 8655 } 8656 8657 static void 8658 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 8659 { 8660 struct rpc_task *task; 8661 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 8662 8663 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 8664 nfs4_free_lock_state(server, lsp); 8665 if (IS_ERR(task)) 8666 return; 8667 rpc_put_task(task); 8668 } 8669 8670 static bool nfs41_match_stateid(const nfs4_stateid *s1, 8671 const nfs4_stateid *s2) 8672 { 8673 if (s1->type != s2->type) 8674 return false; 8675 8676 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 8677 return false; 8678 8679 if (s1->seqid == s2->seqid) 8680 return true; 8681 if (s1->seqid == 0 || s2->seqid == 0) 8682 return true; 8683 8684 return false; 8685 } 8686 8687 #endif /* CONFIG_NFS_V4_1 */ 8688 8689 static bool nfs4_match_stateid(const nfs4_stateid *s1, 8690 const nfs4_stateid *s2) 8691 { 8692 return nfs4_stateid_match(s1, s2); 8693 } 8694 8695 8696 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 8697 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8698 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8699 .recover_open = nfs4_open_reclaim, 8700 .recover_lock = nfs4_lock_reclaim, 8701 .establish_clid = nfs4_init_clientid, 8702 .detect_trunking = nfs40_discover_server_trunking, 8703 }; 8704 8705 #if defined(CONFIG_NFS_V4_1) 8706 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 8707 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8708 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8709 .recover_open = nfs4_open_reclaim, 8710 .recover_lock = nfs4_lock_reclaim, 8711 .establish_clid = nfs41_init_clientid, 8712 .reclaim_complete = nfs41_proc_reclaim_complete, 8713 .detect_trunking = nfs41_discover_server_trunking, 8714 }; 8715 #endif /* CONFIG_NFS_V4_1 */ 8716 8717 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 8718 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 8719 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 8720 .recover_open = nfs40_open_expired, 8721 .recover_lock = nfs4_lock_expired, 8722 .establish_clid = nfs4_init_clientid, 8723 }; 8724 8725 #if defined(CONFIG_NFS_V4_1) 8726 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 8727 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 8728 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 8729 .recover_open = nfs41_open_expired, 8730 .recover_lock = nfs41_lock_expired, 8731 .establish_clid = nfs41_init_clientid, 8732 }; 8733 #endif /* CONFIG_NFS_V4_1 */ 8734 8735 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 8736 .sched_state_renewal = nfs4_proc_async_renew, 8737 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, 8738 .renew_lease = nfs4_proc_renew, 8739 }; 8740 8741 #if defined(CONFIG_NFS_V4_1) 8742 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 8743 .sched_state_renewal = nfs41_proc_async_sequence, 8744 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, 8745 .renew_lease = nfs4_proc_sequence, 8746 }; 8747 #endif 8748 8749 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 8750 .get_locations = _nfs40_proc_get_locations, 8751 .fsid_present = _nfs40_proc_fsid_present, 8752 }; 8753 8754 #if defined(CONFIG_NFS_V4_1) 8755 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 8756 .get_locations = _nfs41_proc_get_locations, 8757 .fsid_present = _nfs41_proc_fsid_present, 8758 }; 8759 #endif /* CONFIG_NFS_V4_1 */ 8760 8761 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 8762 .minor_version = 0, 8763 .init_caps = NFS_CAP_READDIRPLUS 8764 | NFS_CAP_ATOMIC_OPEN 8765 | NFS_CAP_POSIX_LOCK, 8766 .init_client = nfs40_init_client, 8767 .shutdown_client = nfs40_shutdown_client, 8768 .match_stateid = nfs4_match_stateid, 8769 .find_root_sec = nfs4_find_root_sec, 8770 .free_lock_state = nfs4_release_lockowner, 8771 .alloc_seqid = nfs_alloc_seqid, 8772 .call_sync_ops = &nfs40_call_sync_ops, 8773 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 8774 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 8775 .state_renewal_ops = &nfs40_state_renewal_ops, 8776 .mig_recovery_ops = &nfs40_mig_recovery_ops, 8777 }; 8778 8779 #if defined(CONFIG_NFS_V4_1) 8780 static struct nfs_seqid * 8781 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 8782 { 8783 return NULL; 8784 } 8785 8786 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 8787 .minor_version = 1, 8788 .init_caps = NFS_CAP_READDIRPLUS 8789 | NFS_CAP_ATOMIC_OPEN 8790 | NFS_CAP_POSIX_LOCK 8791 | NFS_CAP_STATEID_NFSV41 8792 | NFS_CAP_ATOMIC_OPEN_V1, 8793 .init_client = nfs41_init_client, 8794 .shutdown_client = nfs41_shutdown_client, 8795 .match_stateid = nfs41_match_stateid, 8796 .find_root_sec = nfs41_find_root_sec, 8797 .free_lock_state = nfs41_free_lock_state, 8798 .alloc_seqid = nfs_alloc_no_seqid, 8799 .call_sync_ops = &nfs41_call_sync_ops, 8800 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 8801 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 8802 .state_renewal_ops = &nfs41_state_renewal_ops, 8803 .mig_recovery_ops = &nfs41_mig_recovery_ops, 8804 }; 8805 #endif 8806 8807 #if defined(CONFIG_NFS_V4_2) 8808 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 8809 .minor_version = 2, 8810 .init_caps = NFS_CAP_READDIRPLUS 8811 | NFS_CAP_ATOMIC_OPEN 8812 | NFS_CAP_POSIX_LOCK 8813 | NFS_CAP_STATEID_NFSV41 8814 | NFS_CAP_ATOMIC_OPEN_V1 8815 | NFS_CAP_ALLOCATE 8816 | NFS_CAP_COPY 8817 | NFS_CAP_DEALLOCATE 8818 | NFS_CAP_SEEK 8819 | NFS_CAP_LAYOUTSTATS 8820 | NFS_CAP_CLONE, 8821 .init_client = nfs41_init_client, 8822 .shutdown_client = nfs41_shutdown_client, 8823 .match_stateid = nfs41_match_stateid, 8824 .find_root_sec = nfs41_find_root_sec, 8825 .free_lock_state = nfs41_free_lock_state, 8826 .call_sync_ops = &nfs41_call_sync_ops, 8827 .alloc_seqid = nfs_alloc_no_seqid, 8828 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 8829 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 8830 .state_renewal_ops = &nfs41_state_renewal_ops, 8831 .mig_recovery_ops = &nfs41_mig_recovery_ops, 8832 }; 8833 #endif 8834 8835 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 8836 [0] = &nfs_v4_0_minor_ops, 8837 #if defined(CONFIG_NFS_V4_1) 8838 [1] = &nfs_v4_1_minor_ops, 8839 #endif 8840 #if defined(CONFIG_NFS_V4_2) 8841 [2] = &nfs_v4_2_minor_ops, 8842 #endif 8843 }; 8844 8845 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 8846 { 8847 ssize_t error, error2; 8848 8849 error = generic_listxattr(dentry, list, size); 8850 if (error < 0) 8851 return error; 8852 if (list) { 8853 list += error; 8854 size -= error; 8855 } 8856 8857 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size); 8858 if (error2 < 0) 8859 return error2; 8860 return error + error2; 8861 } 8862 8863 static const struct inode_operations nfs4_dir_inode_operations = { 8864 .create = nfs_create, 8865 .lookup = nfs_lookup, 8866 .atomic_open = nfs_atomic_open, 8867 .link = nfs_link, 8868 .unlink = nfs_unlink, 8869 .symlink = nfs_symlink, 8870 .mkdir = nfs_mkdir, 8871 .rmdir = nfs_rmdir, 8872 .mknod = nfs_mknod, 8873 .rename = nfs_rename, 8874 .permission = nfs_permission, 8875 .getattr = nfs_getattr, 8876 .setattr = nfs_setattr, 8877 .getxattr = generic_getxattr, 8878 .setxattr = generic_setxattr, 8879 .listxattr = nfs4_listxattr, 8880 .removexattr = generic_removexattr, 8881 }; 8882 8883 static const struct inode_operations nfs4_file_inode_operations = { 8884 .permission = nfs_permission, 8885 .getattr = nfs_getattr, 8886 .setattr = nfs_setattr, 8887 .getxattr = generic_getxattr, 8888 .setxattr = generic_setxattr, 8889 .listxattr = nfs4_listxattr, 8890 .removexattr = generic_removexattr, 8891 }; 8892 8893 const struct nfs_rpc_ops nfs_v4_clientops = { 8894 .version = 4, /* protocol version */ 8895 .dentry_ops = &nfs4_dentry_operations, 8896 .dir_inode_ops = &nfs4_dir_inode_operations, 8897 .file_inode_ops = &nfs4_file_inode_operations, 8898 .file_ops = &nfs4_file_operations, 8899 .getroot = nfs4_proc_get_root, 8900 .submount = nfs4_submount, 8901 .try_mount = nfs4_try_mount, 8902 .getattr = nfs4_proc_getattr, 8903 .setattr = nfs4_proc_setattr, 8904 .lookup = nfs4_proc_lookup, 8905 .access = nfs4_proc_access, 8906 .readlink = nfs4_proc_readlink, 8907 .create = nfs4_proc_create, 8908 .remove = nfs4_proc_remove, 8909 .unlink_setup = nfs4_proc_unlink_setup, 8910 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 8911 .unlink_done = nfs4_proc_unlink_done, 8912 .rename_setup = nfs4_proc_rename_setup, 8913 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 8914 .rename_done = nfs4_proc_rename_done, 8915 .link = nfs4_proc_link, 8916 .symlink = nfs4_proc_symlink, 8917 .mkdir = nfs4_proc_mkdir, 8918 .rmdir = nfs4_proc_remove, 8919 .readdir = nfs4_proc_readdir, 8920 .mknod = nfs4_proc_mknod, 8921 .statfs = nfs4_proc_statfs, 8922 .fsinfo = nfs4_proc_fsinfo, 8923 .pathconf = nfs4_proc_pathconf, 8924 .set_capabilities = nfs4_server_capabilities, 8925 .decode_dirent = nfs4_decode_dirent, 8926 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 8927 .read_setup = nfs4_proc_read_setup, 8928 .read_done = nfs4_read_done, 8929 .write_setup = nfs4_proc_write_setup, 8930 .write_done = nfs4_write_done, 8931 .commit_setup = nfs4_proc_commit_setup, 8932 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 8933 .commit_done = nfs4_commit_done, 8934 .lock = nfs4_proc_lock, 8935 .clear_acl_cache = nfs4_zap_acl_attr, 8936 .close_context = nfs4_close_context, 8937 .open_context = nfs4_atomic_open, 8938 .have_delegation = nfs4_have_delegation, 8939 .return_delegation = nfs4_inode_return_delegation, 8940 .alloc_client = nfs4_alloc_client, 8941 .init_client = nfs4_init_client, 8942 .free_client = nfs4_free_client, 8943 .create_server = nfs4_create_server, 8944 .clone_server = nfs_clone_server, 8945 }; 8946 8947 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 8948 .name = XATTR_NAME_NFSV4_ACL, 8949 .list = nfs4_xattr_list_nfs4_acl, 8950 .get = nfs4_xattr_get_nfs4_acl, 8951 .set = nfs4_xattr_set_nfs4_acl, 8952 }; 8953 8954 const struct xattr_handler *nfs4_xattr_handlers[] = { 8955 &nfs4_xattr_nfs4_acl_handler, 8956 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 8957 &nfs4_xattr_nfs4_label_handler, 8958 #endif 8959 NULL 8960 }; 8961 8962 /* 8963 * Local variables: 8964 * c-basic-offset: 8 8965 * End: 8966 */ 8967