1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/xattr.h> 55 #include <linux/utsname.h> 56 #include <linux/freezer.h> 57 #include <linux/iversion.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "sysfs.h" 67 #include "nfs4idmap.h" 68 #include "nfs4session.h" 69 #include "fscache.h" 70 #include "nfs42.h" 71 72 #include "nfs4trace.h" 73 74 #define NFSDBG_FACILITY NFSDBG_PROC 75 76 #define NFS4_BITMASK_SZ 3 77 78 #define NFS4_POLL_RETRY_MIN (HZ/10) 79 #define NFS4_POLL_RETRY_MAX (15*HZ) 80 81 /* file attributes which can be mapped to nfs attributes */ 82 #define NFS4_VALID_ATTRS (ATTR_MODE \ 83 | ATTR_UID \ 84 | ATTR_GID \ 85 | ATTR_SIZE \ 86 | ATTR_ATIME \ 87 | ATTR_MTIME \ 88 | ATTR_CTIME \ 89 | ATTR_ATIME_SET \ 90 | ATTR_MTIME_SET) 91 92 struct nfs4_opendata; 93 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 94 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 95 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 96 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label, struct inode *inode); 97 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 98 struct nfs_fattr *fattr, struct iattr *sattr, 99 struct nfs_open_context *ctx, struct nfs4_label *ilabel, 100 struct nfs4_label *olabel); 101 #ifdef CONFIG_NFS_V4_1 102 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 103 const struct cred *cred, 104 struct nfs4_slot *slot, 105 bool is_privileged); 106 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *, 107 const struct cred *); 108 static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *, 109 const struct cred *, bool); 110 #endif 111 static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], 112 const __u32 *src, struct inode *inode, 113 struct nfs_server *server, 114 struct nfs4_label *label); 115 116 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 117 static inline struct nfs4_label * 118 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 119 struct iattr *sattr, struct nfs4_label *label) 120 { 121 int err; 122 123 if (label == NULL) 124 return NULL; 125 126 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 127 return NULL; 128 129 err = security_dentry_init_security(dentry, sattr->ia_mode, 130 &dentry->d_name, (void **)&label->label, &label->len); 131 if (err == 0) 132 return label; 133 134 return NULL; 135 } 136 static inline void 137 nfs4_label_release_security(struct nfs4_label *label) 138 { 139 if (label) 140 security_release_secctx(label->label, label->len); 141 } 142 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 143 { 144 if (label) 145 return server->attr_bitmask; 146 147 return server->attr_bitmask_nl; 148 } 149 #else 150 static inline struct nfs4_label * 151 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 152 struct iattr *sattr, struct nfs4_label *l) 153 { return NULL; } 154 static inline void 155 nfs4_label_release_security(struct nfs4_label *label) 156 { return; } 157 static inline u32 * 158 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 159 { return server->attr_bitmask; } 160 #endif 161 162 /* Prevent leaks of NFSv4 errors into userland */ 163 static int nfs4_map_errors(int err) 164 { 165 if (err >= -1000) 166 return err; 167 switch (err) { 168 case -NFS4ERR_RESOURCE: 169 case -NFS4ERR_LAYOUTTRYLATER: 170 case -NFS4ERR_RECALLCONFLICT: 171 return -EREMOTEIO; 172 case -NFS4ERR_WRONGSEC: 173 case -NFS4ERR_WRONG_CRED: 174 return -EPERM; 175 case -NFS4ERR_BADOWNER: 176 case -NFS4ERR_BADNAME: 177 return -EINVAL; 178 case -NFS4ERR_SHARE_DENIED: 179 return -EACCES; 180 case -NFS4ERR_MINOR_VERS_MISMATCH: 181 return -EPROTONOSUPPORT; 182 case -NFS4ERR_FILE_OPEN: 183 return -EBUSY; 184 case -NFS4ERR_NOT_SAME: 185 return -ENOTSYNC; 186 default: 187 dprintk("%s could not handle NFSv4 error %d\n", 188 __func__, -err); 189 break; 190 } 191 return -EIO; 192 } 193 194 /* 195 * This is our standard bitmap for GETATTR requests. 196 */ 197 const u32 nfs4_fattr_bitmap[3] = { 198 FATTR4_WORD0_TYPE 199 | FATTR4_WORD0_CHANGE 200 | FATTR4_WORD0_SIZE 201 | FATTR4_WORD0_FSID 202 | FATTR4_WORD0_FILEID, 203 FATTR4_WORD1_MODE 204 | FATTR4_WORD1_NUMLINKS 205 | FATTR4_WORD1_OWNER 206 | FATTR4_WORD1_OWNER_GROUP 207 | FATTR4_WORD1_RAWDEV 208 | FATTR4_WORD1_SPACE_USED 209 | FATTR4_WORD1_TIME_ACCESS 210 | FATTR4_WORD1_TIME_METADATA 211 | FATTR4_WORD1_TIME_MODIFY 212 | FATTR4_WORD1_MOUNTED_ON_FILEID, 213 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 214 FATTR4_WORD2_SECURITY_LABEL 215 #endif 216 }; 217 218 static const u32 nfs4_pnfs_open_bitmap[3] = { 219 FATTR4_WORD0_TYPE 220 | FATTR4_WORD0_CHANGE 221 | FATTR4_WORD0_SIZE 222 | FATTR4_WORD0_FSID 223 | FATTR4_WORD0_FILEID, 224 FATTR4_WORD1_MODE 225 | FATTR4_WORD1_NUMLINKS 226 | FATTR4_WORD1_OWNER 227 | FATTR4_WORD1_OWNER_GROUP 228 | FATTR4_WORD1_RAWDEV 229 | FATTR4_WORD1_SPACE_USED 230 | FATTR4_WORD1_TIME_ACCESS 231 | FATTR4_WORD1_TIME_METADATA 232 | FATTR4_WORD1_TIME_MODIFY, 233 FATTR4_WORD2_MDSTHRESHOLD 234 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 235 | FATTR4_WORD2_SECURITY_LABEL 236 #endif 237 }; 238 239 static const u32 nfs4_open_noattr_bitmap[3] = { 240 FATTR4_WORD0_TYPE 241 | FATTR4_WORD0_FILEID, 242 }; 243 244 const u32 nfs4_statfs_bitmap[3] = { 245 FATTR4_WORD0_FILES_AVAIL 246 | FATTR4_WORD0_FILES_FREE 247 | FATTR4_WORD0_FILES_TOTAL, 248 FATTR4_WORD1_SPACE_AVAIL 249 | FATTR4_WORD1_SPACE_FREE 250 | FATTR4_WORD1_SPACE_TOTAL 251 }; 252 253 const u32 nfs4_pathconf_bitmap[3] = { 254 FATTR4_WORD0_MAXLINK 255 | FATTR4_WORD0_MAXNAME, 256 0 257 }; 258 259 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 260 | FATTR4_WORD0_MAXREAD 261 | FATTR4_WORD0_MAXWRITE 262 | FATTR4_WORD0_LEASE_TIME, 263 FATTR4_WORD1_TIME_DELTA 264 | FATTR4_WORD1_FS_LAYOUT_TYPES, 265 FATTR4_WORD2_LAYOUT_BLKSIZE 266 | FATTR4_WORD2_CLONE_BLKSIZE 267 | FATTR4_WORD2_CHANGE_ATTR_TYPE 268 | FATTR4_WORD2_XATTR_SUPPORT 269 }; 270 271 const u32 nfs4_fs_locations_bitmap[3] = { 272 FATTR4_WORD0_CHANGE 273 | FATTR4_WORD0_SIZE 274 | FATTR4_WORD0_FSID 275 | FATTR4_WORD0_FILEID 276 | FATTR4_WORD0_FS_LOCATIONS, 277 FATTR4_WORD1_OWNER 278 | FATTR4_WORD1_OWNER_GROUP 279 | FATTR4_WORD1_RAWDEV 280 | FATTR4_WORD1_SPACE_USED 281 | FATTR4_WORD1_TIME_ACCESS 282 | FATTR4_WORD1_TIME_METADATA 283 | FATTR4_WORD1_TIME_MODIFY 284 | FATTR4_WORD1_MOUNTED_ON_FILEID, 285 }; 286 287 static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src, 288 struct inode *inode, unsigned long flags) 289 { 290 unsigned long cache_validity; 291 292 memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst)); 293 if (!inode || !nfs4_have_delegation(inode, FMODE_READ)) 294 return; 295 296 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags; 297 298 /* Remove the attributes over which we have full control */ 299 dst[1] &= ~FATTR4_WORD1_RAWDEV; 300 if (!(cache_validity & NFS_INO_INVALID_SIZE)) 301 dst[0] &= ~FATTR4_WORD0_SIZE; 302 303 if (!(cache_validity & NFS_INO_INVALID_CHANGE)) 304 dst[0] &= ~FATTR4_WORD0_CHANGE; 305 306 if (!(cache_validity & NFS_INO_INVALID_MODE)) 307 dst[1] &= ~FATTR4_WORD1_MODE; 308 if (!(cache_validity & NFS_INO_INVALID_OTHER)) 309 dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP); 310 } 311 312 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 313 struct nfs4_readdir_arg *readdir) 314 { 315 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE; 316 __be32 *start, *p; 317 318 if (cookie > 2) { 319 readdir->cookie = cookie; 320 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 321 return; 322 } 323 324 readdir->cookie = 0; 325 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 326 if (cookie == 2) 327 return; 328 329 /* 330 * NFSv4 servers do not return entries for '.' and '..' 331 * Therefore, we fake these entries here. We let '.' 332 * have cookie 0 and '..' have cookie 1. Note that 333 * when talking to the server, we always send cookie 0 334 * instead of 1 or 2. 335 */ 336 start = p = kmap_atomic(*readdir->pages); 337 338 if (cookie == 0) { 339 *p++ = xdr_one; /* next */ 340 *p++ = xdr_zero; /* cookie, first word */ 341 *p++ = xdr_one; /* cookie, second word */ 342 *p++ = xdr_one; /* entry len */ 343 memcpy(p, ".\0\0\0", 4); /* entry */ 344 p++; 345 *p++ = xdr_one; /* bitmap length */ 346 *p++ = htonl(attrs); /* bitmap */ 347 *p++ = htonl(12); /* attribute buffer length */ 348 *p++ = htonl(NF4DIR); 349 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 350 } 351 352 *p++ = xdr_one; /* next */ 353 *p++ = xdr_zero; /* cookie, first word */ 354 *p++ = xdr_two; /* cookie, second word */ 355 *p++ = xdr_two; /* entry len */ 356 memcpy(p, "..\0\0", 4); /* entry */ 357 p++; 358 *p++ = xdr_one; /* bitmap length */ 359 *p++ = htonl(attrs); /* bitmap */ 360 *p++ = htonl(12); /* attribute buffer length */ 361 *p++ = htonl(NF4DIR); 362 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 363 364 readdir->pgbase = (char *)p - (char *)start; 365 readdir->count -= readdir->pgbase; 366 kunmap_atomic(start); 367 } 368 369 static void nfs4_test_and_free_stateid(struct nfs_server *server, 370 nfs4_stateid *stateid, 371 const struct cred *cred) 372 { 373 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops; 374 375 ops->test_and_free_expired(server, stateid, cred); 376 } 377 378 static void __nfs4_free_revoked_stateid(struct nfs_server *server, 379 nfs4_stateid *stateid, 380 const struct cred *cred) 381 { 382 stateid->type = NFS4_REVOKED_STATEID_TYPE; 383 nfs4_test_and_free_stateid(server, stateid, cred); 384 } 385 386 static void nfs4_free_revoked_stateid(struct nfs_server *server, 387 const nfs4_stateid *stateid, 388 const struct cred *cred) 389 { 390 nfs4_stateid tmp; 391 392 nfs4_stateid_copy(&tmp, stateid); 393 __nfs4_free_revoked_stateid(server, &tmp, cred); 394 } 395 396 static long nfs4_update_delay(long *timeout) 397 { 398 long ret; 399 if (!timeout) 400 return NFS4_POLL_RETRY_MAX; 401 if (*timeout <= 0) 402 *timeout = NFS4_POLL_RETRY_MIN; 403 if (*timeout > NFS4_POLL_RETRY_MAX) 404 *timeout = NFS4_POLL_RETRY_MAX; 405 ret = *timeout; 406 *timeout <<= 1; 407 return ret; 408 } 409 410 static int nfs4_delay_killable(long *timeout) 411 { 412 might_sleep(); 413 414 freezable_schedule_timeout_killable_unsafe( 415 nfs4_update_delay(timeout)); 416 if (!__fatal_signal_pending(current)) 417 return 0; 418 return -EINTR; 419 } 420 421 static int nfs4_delay_interruptible(long *timeout) 422 { 423 might_sleep(); 424 425 freezable_schedule_timeout_interruptible_unsafe(nfs4_update_delay(timeout)); 426 if (!signal_pending(current)) 427 return 0; 428 return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; 429 } 430 431 static int nfs4_delay(long *timeout, bool interruptible) 432 { 433 if (interruptible) 434 return nfs4_delay_interruptible(timeout); 435 return nfs4_delay_killable(timeout); 436 } 437 438 static const nfs4_stateid * 439 nfs4_recoverable_stateid(const nfs4_stateid *stateid) 440 { 441 if (!stateid) 442 return NULL; 443 switch (stateid->type) { 444 case NFS4_OPEN_STATEID_TYPE: 445 case NFS4_LOCK_STATEID_TYPE: 446 case NFS4_DELEGATION_STATEID_TYPE: 447 return stateid; 448 default: 449 break; 450 } 451 return NULL; 452 } 453 454 /* This is the error handling routine for processes that are allowed 455 * to sleep. 456 */ 457 static int nfs4_do_handle_exception(struct nfs_server *server, 458 int errorcode, struct nfs4_exception *exception) 459 { 460 struct nfs_client *clp = server->nfs_client; 461 struct nfs4_state *state = exception->state; 462 const nfs4_stateid *stateid; 463 struct inode *inode = exception->inode; 464 int ret = errorcode; 465 466 exception->delay = 0; 467 exception->recovering = 0; 468 exception->retry = 0; 469 470 stateid = nfs4_recoverable_stateid(exception->stateid); 471 if (stateid == NULL && state != NULL) 472 stateid = nfs4_recoverable_stateid(&state->stateid); 473 474 switch(errorcode) { 475 case 0: 476 return 0; 477 case -NFS4ERR_BADHANDLE: 478 case -ESTALE: 479 if (inode != NULL && S_ISREG(inode->i_mode)) 480 pnfs_destroy_layout(NFS_I(inode)); 481 break; 482 case -NFS4ERR_DELEG_REVOKED: 483 case -NFS4ERR_ADMIN_REVOKED: 484 case -NFS4ERR_EXPIRED: 485 case -NFS4ERR_BAD_STATEID: 486 case -NFS4ERR_PARTNER_NO_AUTH: 487 if (inode != NULL && stateid != NULL) { 488 nfs_inode_find_state_and_recover(inode, 489 stateid); 490 goto wait_on_recovery; 491 } 492 fallthrough; 493 case -NFS4ERR_OPENMODE: 494 if (inode) { 495 int err; 496 497 err = nfs_async_inode_return_delegation(inode, 498 stateid); 499 if (err == 0) 500 goto wait_on_recovery; 501 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) { 502 exception->retry = 1; 503 break; 504 } 505 } 506 if (state == NULL) 507 break; 508 ret = nfs4_schedule_stateid_recovery(server, state); 509 if (ret < 0) 510 break; 511 goto wait_on_recovery; 512 case -NFS4ERR_STALE_STATEID: 513 case -NFS4ERR_STALE_CLIENTID: 514 nfs4_schedule_lease_recovery(clp); 515 goto wait_on_recovery; 516 case -NFS4ERR_MOVED: 517 ret = nfs4_schedule_migration_recovery(server); 518 if (ret < 0) 519 break; 520 goto wait_on_recovery; 521 case -NFS4ERR_LEASE_MOVED: 522 nfs4_schedule_lease_moved_recovery(clp); 523 goto wait_on_recovery; 524 #if defined(CONFIG_NFS_V4_1) 525 case -NFS4ERR_BADSESSION: 526 case -NFS4ERR_BADSLOT: 527 case -NFS4ERR_BAD_HIGH_SLOT: 528 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 529 case -NFS4ERR_DEADSESSION: 530 case -NFS4ERR_SEQ_FALSE_RETRY: 531 case -NFS4ERR_SEQ_MISORDERED: 532 /* Handled in nfs41_sequence_process() */ 533 goto wait_on_recovery; 534 #endif /* defined(CONFIG_NFS_V4_1) */ 535 case -NFS4ERR_FILE_OPEN: 536 if (exception->timeout > HZ) { 537 /* We have retried a decent amount, time to 538 * fail 539 */ 540 ret = -EBUSY; 541 break; 542 } 543 fallthrough; 544 case -NFS4ERR_DELAY: 545 nfs_inc_server_stats(server, NFSIOS_DELAY); 546 fallthrough; 547 case -NFS4ERR_GRACE: 548 case -NFS4ERR_LAYOUTTRYLATER: 549 case -NFS4ERR_RECALLCONFLICT: 550 exception->delay = 1; 551 return 0; 552 553 case -NFS4ERR_RETRY_UNCACHED_REP: 554 case -NFS4ERR_OLD_STATEID: 555 exception->retry = 1; 556 break; 557 case -NFS4ERR_BADOWNER: 558 /* The following works around a Linux server bug! */ 559 case -NFS4ERR_BADNAME: 560 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 561 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 562 exception->retry = 1; 563 printk(KERN_WARNING "NFS: v4 server %s " 564 "does not accept raw " 565 "uid/gids. " 566 "Reenabling the idmapper.\n", 567 server->nfs_client->cl_hostname); 568 } 569 } 570 /* We failed to handle the error */ 571 return nfs4_map_errors(ret); 572 wait_on_recovery: 573 exception->recovering = 1; 574 return 0; 575 } 576 577 /* This is the error handling routine for processes that are allowed 578 * to sleep. 579 */ 580 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 581 { 582 struct nfs_client *clp = server->nfs_client; 583 int ret; 584 585 ret = nfs4_do_handle_exception(server, errorcode, exception); 586 if (exception->delay) { 587 ret = nfs4_delay(&exception->timeout, 588 exception->interruptible); 589 goto out_retry; 590 } 591 if (exception->recovering) { 592 if (exception->task_is_privileged) 593 return -EDEADLOCK; 594 ret = nfs4_wait_clnt_recover(clp); 595 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 596 return -EIO; 597 goto out_retry; 598 } 599 return ret; 600 out_retry: 601 if (ret == 0) 602 exception->retry = 1; 603 return ret; 604 } 605 606 static int 607 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, 608 int errorcode, struct nfs4_exception *exception) 609 { 610 struct nfs_client *clp = server->nfs_client; 611 int ret; 612 613 ret = nfs4_do_handle_exception(server, errorcode, exception); 614 if (exception->delay) { 615 rpc_delay(task, nfs4_update_delay(&exception->timeout)); 616 goto out_retry; 617 } 618 if (exception->recovering) { 619 if (exception->task_is_privileged) 620 return -EDEADLOCK; 621 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 622 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 623 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 624 goto out_retry; 625 } 626 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 627 ret = -EIO; 628 return ret; 629 out_retry: 630 if (ret == 0) { 631 exception->retry = 1; 632 /* 633 * For NFS4ERR_MOVED, the client transport will need to 634 * be recomputed after migration recovery has completed. 635 */ 636 if (errorcode == -NFS4ERR_MOVED) 637 rpc_task_release_transport(task); 638 } 639 return ret; 640 } 641 642 int 643 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 644 struct nfs4_state *state, long *timeout) 645 { 646 struct nfs4_exception exception = { 647 .state = state, 648 }; 649 650 if (task->tk_status >= 0) 651 return 0; 652 if (timeout) 653 exception.timeout = *timeout; 654 task->tk_status = nfs4_async_handle_exception(task, server, 655 task->tk_status, 656 &exception); 657 if (exception.delay && timeout) 658 *timeout = exception.timeout; 659 if (exception.retry) 660 return -EAGAIN; 661 return 0; 662 } 663 664 /* 665 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 666 * or 'false' otherwise. 667 */ 668 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 669 { 670 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 671 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P); 672 } 673 674 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 675 { 676 spin_lock(&clp->cl_lock); 677 if (time_before(clp->cl_last_renewal,timestamp)) 678 clp->cl_last_renewal = timestamp; 679 spin_unlock(&clp->cl_lock); 680 } 681 682 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 683 { 684 struct nfs_client *clp = server->nfs_client; 685 686 if (!nfs4_has_session(clp)) 687 do_renew_lease(clp, timestamp); 688 } 689 690 struct nfs4_call_sync_data { 691 const struct nfs_server *seq_server; 692 struct nfs4_sequence_args *seq_args; 693 struct nfs4_sequence_res *seq_res; 694 }; 695 696 void nfs4_init_sequence(struct nfs4_sequence_args *args, 697 struct nfs4_sequence_res *res, int cache_reply, 698 int privileged) 699 { 700 args->sa_slot = NULL; 701 args->sa_cache_this = cache_reply; 702 args->sa_privileged = privileged; 703 704 res->sr_slot = NULL; 705 } 706 707 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res) 708 { 709 struct nfs4_slot *slot = res->sr_slot; 710 struct nfs4_slot_table *tbl; 711 712 tbl = slot->table; 713 spin_lock(&tbl->slot_tbl_lock); 714 if (!nfs41_wake_and_assign_slot(tbl, slot)) 715 nfs4_free_slot(tbl, slot); 716 spin_unlock(&tbl->slot_tbl_lock); 717 718 res->sr_slot = NULL; 719 } 720 721 static int nfs40_sequence_done(struct rpc_task *task, 722 struct nfs4_sequence_res *res) 723 { 724 if (res->sr_slot != NULL) 725 nfs40_sequence_free_slot(res); 726 return 1; 727 } 728 729 #if defined(CONFIG_NFS_V4_1) 730 731 static void nfs41_release_slot(struct nfs4_slot *slot) 732 { 733 struct nfs4_session *session; 734 struct nfs4_slot_table *tbl; 735 bool send_new_highest_used_slotid = false; 736 737 if (!slot) 738 return; 739 tbl = slot->table; 740 session = tbl->session; 741 742 /* Bump the slot sequence number */ 743 if (slot->seq_done) 744 slot->seq_nr++; 745 slot->seq_done = 0; 746 747 spin_lock(&tbl->slot_tbl_lock); 748 /* Be nice to the server: try to ensure that the last transmitted 749 * value for highest_user_slotid <= target_highest_slotid 750 */ 751 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 752 send_new_highest_used_slotid = true; 753 754 if (nfs41_wake_and_assign_slot(tbl, slot)) { 755 send_new_highest_used_slotid = false; 756 goto out_unlock; 757 } 758 nfs4_free_slot(tbl, slot); 759 760 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 761 send_new_highest_used_slotid = false; 762 out_unlock: 763 spin_unlock(&tbl->slot_tbl_lock); 764 if (send_new_highest_used_slotid) 765 nfs41_notify_server(session->clp); 766 if (waitqueue_active(&tbl->slot_waitq)) 767 wake_up_all(&tbl->slot_waitq); 768 } 769 770 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 771 { 772 nfs41_release_slot(res->sr_slot); 773 res->sr_slot = NULL; 774 } 775 776 static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot, 777 u32 seqnr) 778 { 779 if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0) 780 slot->seq_nr_highest_sent = seqnr; 781 } 782 static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, 783 u32 seqnr) 784 { 785 slot->seq_nr_highest_sent = seqnr; 786 slot->seq_nr_last_acked = seqnr; 787 } 788 789 static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred, 790 struct nfs4_slot *slot) 791 { 792 struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true); 793 if (!IS_ERR(task)) 794 rpc_put_task_async(task); 795 } 796 797 static int nfs41_sequence_process(struct rpc_task *task, 798 struct nfs4_sequence_res *res) 799 { 800 struct nfs4_session *session; 801 struct nfs4_slot *slot = res->sr_slot; 802 struct nfs_client *clp; 803 int status; 804 int ret = 1; 805 806 if (slot == NULL) 807 goto out_noaction; 808 /* don't increment the sequence number if the task wasn't sent */ 809 if (!RPC_WAS_SENT(task) || slot->seq_done) 810 goto out; 811 812 session = slot->table->session; 813 clp = session->clp; 814 815 trace_nfs4_sequence_done(session, res); 816 817 status = res->sr_status; 818 if (task->tk_status == -NFS4ERR_DEADSESSION) 819 status = -NFS4ERR_DEADSESSION; 820 821 /* Check the SEQUENCE operation status */ 822 switch (status) { 823 case 0: 824 /* Mark this sequence number as having been acked */ 825 nfs4_slot_sequence_acked(slot, slot->seq_nr); 826 /* Update the slot's sequence and clientid lease timer */ 827 slot->seq_done = 1; 828 do_renew_lease(clp, res->sr_timestamp); 829 /* Check sequence flags */ 830 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, 831 !!slot->privileged); 832 nfs41_update_target_slotid(slot->table, slot, res); 833 break; 834 case 1: 835 /* 836 * sr_status remains 1 if an RPC level error occurred. 837 * The server may or may not have processed the sequence 838 * operation.. 839 */ 840 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 841 slot->seq_done = 1; 842 goto out; 843 case -NFS4ERR_DELAY: 844 /* The server detected a resend of the RPC call and 845 * returned NFS4ERR_DELAY as per Section 2.10.6.2 846 * of RFC5661. 847 */ 848 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 849 __func__, 850 slot->slot_nr, 851 slot->seq_nr); 852 nfs4_slot_sequence_acked(slot, slot->seq_nr); 853 goto out_retry; 854 case -NFS4ERR_RETRY_UNCACHED_REP: 855 case -NFS4ERR_SEQ_FALSE_RETRY: 856 /* 857 * The server thinks we tried to replay a request. 858 * Retry the call after bumping the sequence ID. 859 */ 860 nfs4_slot_sequence_acked(slot, slot->seq_nr); 861 goto retry_new_seq; 862 case -NFS4ERR_BADSLOT: 863 /* 864 * The slot id we used was probably retired. Try again 865 * using a different slot id. 866 */ 867 if (slot->slot_nr < slot->table->target_highest_slotid) 868 goto session_recover; 869 goto retry_nowait; 870 case -NFS4ERR_SEQ_MISORDERED: 871 nfs4_slot_sequence_record_sent(slot, slot->seq_nr); 872 /* 873 * Were one or more calls using this slot interrupted? 874 * If the server never received the request, then our 875 * transmitted slot sequence number may be too high. However, 876 * if the server did receive the request then it might 877 * accidentally give us a reply with a mismatched operation. 878 * We can sort this out by sending a lone sequence operation 879 * to the server on the same slot. 880 */ 881 if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) { 882 slot->seq_nr--; 883 if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) { 884 nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot); 885 res->sr_slot = NULL; 886 } 887 goto retry_nowait; 888 } 889 /* 890 * RFC5661: 891 * A retry might be sent while the original request is 892 * still in progress on the replier. The replier SHOULD 893 * deal with the issue by returning NFS4ERR_DELAY as the 894 * reply to SEQUENCE or CB_SEQUENCE operation, but 895 * implementations MAY return NFS4ERR_SEQ_MISORDERED. 896 * 897 * Restart the search after a delay. 898 */ 899 slot->seq_nr = slot->seq_nr_highest_sent; 900 goto out_retry; 901 case -NFS4ERR_BADSESSION: 902 case -NFS4ERR_DEADSESSION: 903 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 904 goto session_recover; 905 default: 906 /* Just update the slot sequence no. */ 907 slot->seq_done = 1; 908 } 909 out: 910 /* The session may be reset by one of the error handlers. */ 911 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 912 out_noaction: 913 return ret; 914 session_recover: 915 nfs4_schedule_session_recovery(session, status); 916 dprintk("%s ERROR: %d Reset session\n", __func__, status); 917 nfs41_sequence_free_slot(res); 918 goto out; 919 retry_new_seq: 920 ++slot->seq_nr; 921 retry_nowait: 922 if (rpc_restart_call_prepare(task)) { 923 nfs41_sequence_free_slot(res); 924 task->tk_status = 0; 925 ret = 0; 926 } 927 goto out; 928 out_retry: 929 if (!rpc_restart_call(task)) 930 goto out; 931 rpc_delay(task, NFS4_POLL_RETRY_MAX); 932 return 0; 933 } 934 935 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 936 { 937 if (!nfs41_sequence_process(task, res)) 938 return 0; 939 if (res->sr_slot != NULL) 940 nfs41_sequence_free_slot(res); 941 return 1; 942 943 } 944 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 945 946 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 947 { 948 if (res->sr_slot == NULL) 949 return 1; 950 if (res->sr_slot->table->session != NULL) 951 return nfs41_sequence_process(task, res); 952 return nfs40_sequence_done(task, res); 953 } 954 955 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 956 { 957 if (res->sr_slot != NULL) { 958 if (res->sr_slot->table->session != NULL) 959 nfs41_sequence_free_slot(res); 960 else 961 nfs40_sequence_free_slot(res); 962 } 963 } 964 965 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 966 { 967 if (res->sr_slot == NULL) 968 return 1; 969 if (!res->sr_slot->table->session) 970 return nfs40_sequence_done(task, res); 971 return nfs41_sequence_done(task, res); 972 } 973 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 974 975 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 976 { 977 struct nfs4_call_sync_data *data = calldata; 978 979 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 980 981 nfs4_setup_sequence(data->seq_server->nfs_client, 982 data->seq_args, data->seq_res, task); 983 } 984 985 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 986 { 987 struct nfs4_call_sync_data *data = calldata; 988 989 nfs41_sequence_done(task, data->seq_res); 990 } 991 992 static const struct rpc_call_ops nfs41_call_sync_ops = { 993 .rpc_call_prepare = nfs41_call_sync_prepare, 994 .rpc_call_done = nfs41_call_sync_done, 995 }; 996 997 #else /* !CONFIG_NFS_V4_1 */ 998 999 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 1000 { 1001 return nfs40_sequence_done(task, res); 1002 } 1003 1004 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 1005 { 1006 if (res->sr_slot != NULL) 1007 nfs40_sequence_free_slot(res); 1008 } 1009 1010 int nfs4_sequence_done(struct rpc_task *task, 1011 struct nfs4_sequence_res *res) 1012 { 1013 return nfs40_sequence_done(task, res); 1014 } 1015 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 1016 1017 #endif /* !CONFIG_NFS_V4_1 */ 1018 1019 static void nfs41_sequence_res_init(struct nfs4_sequence_res *res) 1020 { 1021 res->sr_timestamp = jiffies; 1022 res->sr_status_flags = 0; 1023 res->sr_status = 1; 1024 } 1025 1026 static 1027 void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args, 1028 struct nfs4_sequence_res *res, 1029 struct nfs4_slot *slot) 1030 { 1031 if (!slot) 1032 return; 1033 slot->privileged = args->sa_privileged ? 1 : 0; 1034 args->sa_slot = slot; 1035 1036 res->sr_slot = slot; 1037 } 1038 1039 int nfs4_setup_sequence(struct nfs_client *client, 1040 struct nfs4_sequence_args *args, 1041 struct nfs4_sequence_res *res, 1042 struct rpc_task *task) 1043 { 1044 struct nfs4_session *session = nfs4_get_session(client); 1045 struct nfs4_slot_table *tbl = client->cl_slot_tbl; 1046 struct nfs4_slot *slot; 1047 1048 /* slot already allocated? */ 1049 if (res->sr_slot != NULL) 1050 goto out_start; 1051 1052 if (session) 1053 tbl = &session->fc_slot_table; 1054 1055 spin_lock(&tbl->slot_tbl_lock); 1056 /* The state manager will wait until the slot table is empty */ 1057 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 1058 goto out_sleep; 1059 1060 slot = nfs4_alloc_slot(tbl); 1061 if (IS_ERR(slot)) { 1062 if (slot == ERR_PTR(-ENOMEM)) 1063 goto out_sleep_timeout; 1064 goto out_sleep; 1065 } 1066 spin_unlock(&tbl->slot_tbl_lock); 1067 1068 nfs4_sequence_attach_slot(args, res, slot); 1069 1070 trace_nfs4_setup_sequence(session, args); 1071 out_start: 1072 nfs41_sequence_res_init(res); 1073 rpc_call_start(task); 1074 return 0; 1075 out_sleep_timeout: 1076 /* Try again in 1/4 second */ 1077 if (args->sa_privileged) 1078 rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task, 1079 jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED); 1080 else 1081 rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task, 1082 NULL, jiffies + (HZ >> 2)); 1083 spin_unlock(&tbl->slot_tbl_lock); 1084 return -EAGAIN; 1085 out_sleep: 1086 if (args->sa_privileged) 1087 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 1088 RPC_PRIORITY_PRIVILEGED); 1089 else 1090 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 1091 spin_unlock(&tbl->slot_tbl_lock); 1092 return -EAGAIN; 1093 } 1094 EXPORT_SYMBOL_GPL(nfs4_setup_sequence); 1095 1096 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 1097 { 1098 struct nfs4_call_sync_data *data = calldata; 1099 nfs4_setup_sequence(data->seq_server->nfs_client, 1100 data->seq_args, data->seq_res, task); 1101 } 1102 1103 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 1104 { 1105 struct nfs4_call_sync_data *data = calldata; 1106 nfs4_sequence_done(task, data->seq_res); 1107 } 1108 1109 static const struct rpc_call_ops nfs40_call_sync_ops = { 1110 .rpc_call_prepare = nfs40_call_sync_prepare, 1111 .rpc_call_done = nfs40_call_sync_done, 1112 }; 1113 1114 static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup) 1115 { 1116 int ret; 1117 struct rpc_task *task; 1118 1119 task = rpc_run_task(task_setup); 1120 if (IS_ERR(task)) 1121 return PTR_ERR(task); 1122 1123 ret = task->tk_status; 1124 rpc_put_task(task); 1125 return ret; 1126 } 1127 1128 static int nfs4_do_call_sync(struct rpc_clnt *clnt, 1129 struct nfs_server *server, 1130 struct rpc_message *msg, 1131 struct nfs4_sequence_args *args, 1132 struct nfs4_sequence_res *res, 1133 unsigned short task_flags) 1134 { 1135 struct nfs_client *clp = server->nfs_client; 1136 struct nfs4_call_sync_data data = { 1137 .seq_server = server, 1138 .seq_args = args, 1139 .seq_res = res, 1140 }; 1141 struct rpc_task_setup task_setup = { 1142 .rpc_client = clnt, 1143 .rpc_message = msg, 1144 .callback_ops = clp->cl_mvops->call_sync_ops, 1145 .callback_data = &data, 1146 .flags = task_flags, 1147 }; 1148 1149 return nfs4_call_sync_custom(&task_setup); 1150 } 1151 1152 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 1153 struct nfs_server *server, 1154 struct rpc_message *msg, 1155 struct nfs4_sequence_args *args, 1156 struct nfs4_sequence_res *res) 1157 { 1158 unsigned short task_flags = 0; 1159 1160 if (server->nfs_client->cl_minorversion) 1161 task_flags = RPC_TASK_MOVEABLE; 1162 return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags); 1163 } 1164 1165 1166 int nfs4_call_sync(struct rpc_clnt *clnt, 1167 struct nfs_server *server, 1168 struct rpc_message *msg, 1169 struct nfs4_sequence_args *args, 1170 struct nfs4_sequence_res *res, 1171 int cache_reply) 1172 { 1173 nfs4_init_sequence(args, res, cache_reply, 0); 1174 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 1175 } 1176 1177 static void 1178 nfs4_inc_nlink_locked(struct inode *inode) 1179 { 1180 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1181 NFS_INO_INVALID_CTIME | 1182 NFS_INO_INVALID_NLINK); 1183 inc_nlink(inode); 1184 } 1185 1186 static void 1187 nfs4_inc_nlink(struct inode *inode) 1188 { 1189 spin_lock(&inode->i_lock); 1190 nfs4_inc_nlink_locked(inode); 1191 spin_unlock(&inode->i_lock); 1192 } 1193 1194 static void 1195 nfs4_dec_nlink_locked(struct inode *inode) 1196 { 1197 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 1198 NFS_INO_INVALID_CTIME | 1199 NFS_INO_INVALID_NLINK); 1200 drop_nlink(inode); 1201 } 1202 1203 static void 1204 nfs4_update_changeattr_locked(struct inode *inode, 1205 struct nfs4_change_info *cinfo, 1206 unsigned long timestamp, unsigned long cache_validity) 1207 { 1208 struct nfs_inode *nfsi = NFS_I(inode); 1209 u64 change_attr = inode_peek_iversion_raw(inode); 1210 1211 cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME; 1212 if (S_ISDIR(inode->i_mode)) 1213 cache_validity |= NFS_INO_INVALID_DATA; 1214 1215 switch (NFS_SERVER(inode)->change_attr_type) { 1216 case NFS4_CHANGE_TYPE_IS_UNDEFINED: 1217 if (cinfo->after == change_attr) 1218 goto out; 1219 break; 1220 default: 1221 if ((s64)(change_attr - cinfo->after) >= 0) 1222 goto out; 1223 } 1224 1225 inode_set_iversion_raw(inode, cinfo->after); 1226 if (!cinfo->atomic || cinfo->before != change_attr) { 1227 if (S_ISDIR(inode->i_mode)) 1228 nfs_force_lookup_revalidate(inode); 1229 1230 if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) 1231 cache_validity |= 1232 NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | 1233 NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER | 1234 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK | 1235 NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR | 1236 NFS_INO_REVAL_PAGECACHE; 1237 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); 1238 } 1239 nfsi->attrtimeo_timestamp = jiffies; 1240 nfsi->read_cache_jiffies = timestamp; 1241 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1242 nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE; 1243 out: 1244 nfs_set_cache_invalid(inode, cache_validity); 1245 } 1246 1247 void 1248 nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, 1249 unsigned long timestamp, unsigned long cache_validity) 1250 { 1251 spin_lock(&dir->i_lock); 1252 nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity); 1253 spin_unlock(&dir->i_lock); 1254 } 1255 1256 struct nfs4_open_createattrs { 1257 struct nfs4_label *label; 1258 struct iattr *sattr; 1259 const __u32 verf[2]; 1260 }; 1261 1262 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 1263 int err, struct nfs4_exception *exception) 1264 { 1265 if (err != -EINVAL) 1266 return false; 1267 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1268 return false; 1269 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 1270 exception->retry = 1; 1271 return true; 1272 } 1273 1274 static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx) 1275 { 1276 return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 1277 } 1278 1279 static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx) 1280 { 1281 fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE); 1282 1283 return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret; 1284 } 1285 1286 static u32 1287 nfs4_map_atomic_open_share(struct nfs_server *server, 1288 fmode_t fmode, int openflags) 1289 { 1290 u32 res = 0; 1291 1292 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 1293 case FMODE_READ: 1294 res = NFS4_SHARE_ACCESS_READ; 1295 break; 1296 case FMODE_WRITE: 1297 res = NFS4_SHARE_ACCESS_WRITE; 1298 break; 1299 case FMODE_READ|FMODE_WRITE: 1300 res = NFS4_SHARE_ACCESS_BOTH; 1301 } 1302 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1303 goto out; 1304 /* Want no delegation if we're using O_DIRECT */ 1305 if (openflags & O_DIRECT) 1306 res |= NFS4_SHARE_WANT_NO_DELEG; 1307 out: 1308 return res; 1309 } 1310 1311 static enum open_claim_type4 1312 nfs4_map_atomic_open_claim(struct nfs_server *server, 1313 enum open_claim_type4 claim) 1314 { 1315 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 1316 return claim; 1317 switch (claim) { 1318 default: 1319 return claim; 1320 case NFS4_OPEN_CLAIM_FH: 1321 return NFS4_OPEN_CLAIM_NULL; 1322 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1323 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 1324 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1325 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 1326 } 1327 } 1328 1329 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 1330 { 1331 p->o_res.f_attr = &p->f_attr; 1332 p->o_res.f_label = p->f_label; 1333 p->o_res.seqid = p->o_arg.seqid; 1334 p->c_res.seqid = p->c_arg.seqid; 1335 p->o_res.server = p->o_arg.server; 1336 p->o_res.access_request = p->o_arg.access; 1337 nfs_fattr_init(&p->f_attr); 1338 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 1339 } 1340 1341 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1342 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1343 const struct nfs4_open_createattrs *c, 1344 enum open_claim_type4 claim, 1345 gfp_t gfp_mask) 1346 { 1347 struct dentry *parent = dget_parent(dentry); 1348 struct inode *dir = d_inode(parent); 1349 struct nfs_server *server = NFS_SERVER(dir); 1350 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1351 struct nfs4_label *label = (c != NULL) ? c->label : NULL; 1352 struct nfs4_opendata *p; 1353 1354 p = kzalloc(sizeof(*p), gfp_mask); 1355 if (p == NULL) 1356 goto err; 1357 1358 p->f_label = nfs4_label_alloc(server, gfp_mask); 1359 if (IS_ERR(p->f_label)) 1360 goto err_free_p; 1361 1362 p->a_label = nfs4_label_alloc(server, gfp_mask); 1363 if (IS_ERR(p->a_label)) 1364 goto err_free_f; 1365 1366 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1367 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1368 if (IS_ERR(p->o_arg.seqid)) 1369 goto err_free_label; 1370 nfs_sb_active(dentry->d_sb); 1371 p->dentry = dget(dentry); 1372 p->dir = parent; 1373 p->owner = sp; 1374 atomic_inc(&sp->so_count); 1375 p->o_arg.open_flags = flags; 1376 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1377 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1378 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1379 fmode, flags); 1380 if (flags & O_CREAT) { 1381 p->o_arg.umask = current_umask(); 1382 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1383 if (c->sattr != NULL && c->sattr->ia_valid != 0) { 1384 p->o_arg.u.attrs = &p->attrs; 1385 memcpy(&p->attrs, c->sattr, sizeof(p->attrs)); 1386 1387 memcpy(p->o_arg.u.verifier.data, c->verf, 1388 sizeof(p->o_arg.u.verifier.data)); 1389 } 1390 } 1391 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS 1392 * will return permission denied for all bits until close */ 1393 if (!(flags & O_EXCL)) { 1394 /* ask server to check for all possible rights as results 1395 * are cached */ 1396 switch (p->o_arg.claim) { 1397 default: 1398 break; 1399 case NFS4_OPEN_CLAIM_NULL: 1400 case NFS4_OPEN_CLAIM_FH: 1401 p->o_arg.access = NFS4_ACCESS_READ | 1402 NFS4_ACCESS_MODIFY | 1403 NFS4_ACCESS_EXTEND | 1404 NFS4_ACCESS_EXECUTE; 1405 #ifdef CONFIG_NFS_V4_2 1406 if (server->caps & NFS_CAP_XATTR) 1407 p->o_arg.access |= NFS4_ACCESS_XAREAD | 1408 NFS4_ACCESS_XAWRITE | 1409 NFS4_ACCESS_XALIST; 1410 #endif 1411 } 1412 } 1413 p->o_arg.clientid = server->nfs_client->cl_clientid; 1414 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1415 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1416 p->o_arg.name = &dentry->d_name; 1417 p->o_arg.server = server; 1418 p->o_arg.bitmask = nfs4_bitmask(server, label); 1419 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1420 switch (p->o_arg.claim) { 1421 case NFS4_OPEN_CLAIM_NULL: 1422 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1423 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1424 p->o_arg.fh = NFS_FH(dir); 1425 break; 1426 case NFS4_OPEN_CLAIM_PREVIOUS: 1427 case NFS4_OPEN_CLAIM_FH: 1428 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1429 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1430 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1431 } 1432 p->c_arg.fh = &p->o_res.fh; 1433 p->c_arg.stateid = &p->o_res.stateid; 1434 p->c_arg.seqid = p->o_arg.seqid; 1435 nfs4_init_opendata_res(p); 1436 kref_init(&p->kref); 1437 return p; 1438 1439 err_free_label: 1440 nfs4_label_free(p->a_label); 1441 err_free_f: 1442 nfs4_label_free(p->f_label); 1443 err_free_p: 1444 kfree(p); 1445 err: 1446 dput(parent); 1447 return NULL; 1448 } 1449 1450 static void nfs4_opendata_free(struct kref *kref) 1451 { 1452 struct nfs4_opendata *p = container_of(kref, 1453 struct nfs4_opendata, kref); 1454 struct super_block *sb = p->dentry->d_sb; 1455 1456 nfs4_lgopen_release(p->lgp); 1457 nfs_free_seqid(p->o_arg.seqid); 1458 nfs4_sequence_free_slot(&p->o_res.seq_res); 1459 if (p->state != NULL) 1460 nfs4_put_open_state(p->state); 1461 nfs4_put_state_owner(p->owner); 1462 1463 nfs4_label_free(p->a_label); 1464 nfs4_label_free(p->f_label); 1465 1466 dput(p->dir); 1467 dput(p->dentry); 1468 nfs_sb_deactive(sb); 1469 nfs_fattr_free_names(&p->f_attr); 1470 kfree(p->f_attr.mdsthreshold); 1471 kfree(p); 1472 } 1473 1474 static void nfs4_opendata_put(struct nfs4_opendata *p) 1475 { 1476 if (p != NULL) 1477 kref_put(&p->kref, nfs4_opendata_free); 1478 } 1479 1480 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, 1481 fmode_t fmode) 1482 { 1483 switch(fmode & (FMODE_READ|FMODE_WRITE)) { 1484 case FMODE_READ|FMODE_WRITE: 1485 return state->n_rdwr != 0; 1486 case FMODE_WRITE: 1487 return state->n_wronly != 0; 1488 case FMODE_READ: 1489 return state->n_rdonly != 0; 1490 } 1491 WARN_ON_ONCE(1); 1492 return false; 1493 } 1494 1495 static int can_open_cached(struct nfs4_state *state, fmode_t mode, 1496 int open_mode, enum open_claim_type4 claim) 1497 { 1498 int ret = 0; 1499 1500 if (open_mode & (O_EXCL|O_TRUNC)) 1501 goto out; 1502 switch (claim) { 1503 case NFS4_OPEN_CLAIM_NULL: 1504 case NFS4_OPEN_CLAIM_FH: 1505 goto out; 1506 default: 1507 break; 1508 } 1509 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1510 case FMODE_READ: 1511 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1512 && state->n_rdonly != 0; 1513 break; 1514 case FMODE_WRITE: 1515 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1516 && state->n_wronly != 0; 1517 break; 1518 case FMODE_READ|FMODE_WRITE: 1519 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1520 && state->n_rdwr != 0; 1521 } 1522 out: 1523 return ret; 1524 } 1525 1526 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1527 enum open_claim_type4 claim) 1528 { 1529 if (delegation == NULL) 1530 return 0; 1531 if ((delegation->type & fmode) != fmode) 1532 return 0; 1533 switch (claim) { 1534 case NFS4_OPEN_CLAIM_NULL: 1535 case NFS4_OPEN_CLAIM_FH: 1536 break; 1537 case NFS4_OPEN_CLAIM_PREVIOUS: 1538 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1539 break; 1540 fallthrough; 1541 default: 1542 return 0; 1543 } 1544 nfs_mark_delegation_referenced(delegation); 1545 return 1; 1546 } 1547 1548 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1549 { 1550 switch (fmode) { 1551 case FMODE_WRITE: 1552 state->n_wronly++; 1553 break; 1554 case FMODE_READ: 1555 state->n_rdonly++; 1556 break; 1557 case FMODE_READ|FMODE_WRITE: 1558 state->n_rdwr++; 1559 } 1560 nfs4_state_set_mode_locked(state, state->state | fmode); 1561 } 1562 1563 #ifdef CONFIG_NFS_V4_1 1564 static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state) 1565 { 1566 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags)) 1567 return true; 1568 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags)) 1569 return true; 1570 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags)) 1571 return true; 1572 return false; 1573 } 1574 #endif /* CONFIG_NFS_V4_1 */ 1575 1576 static void nfs_state_log_update_open_stateid(struct nfs4_state *state) 1577 { 1578 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags)) 1579 wake_up_all(&state->waitq); 1580 } 1581 1582 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1583 { 1584 struct nfs_client *clp = state->owner->so_server->nfs_client; 1585 bool need_recover = false; 1586 1587 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1588 need_recover = true; 1589 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1590 need_recover = true; 1591 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1592 need_recover = true; 1593 if (need_recover) 1594 nfs4_state_mark_reclaim_nograce(clp, state); 1595 } 1596 1597 /* 1598 * Check for whether or not the caller may update the open stateid 1599 * to the value passed in by stateid. 1600 * 1601 * Note: This function relies heavily on the server implementing 1602 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2 1603 * correctly. 1604 * i.e. The stateid seqids have to be initialised to 1, and 1605 * are then incremented on every state transition. 1606 */ 1607 static bool nfs_stateid_is_sequential(struct nfs4_state *state, 1608 const nfs4_stateid *stateid) 1609 { 1610 if (test_bit(NFS_OPEN_STATE, &state->flags)) { 1611 /* The common case - we're updating to a new sequence number */ 1612 if (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1613 nfs4_stateid_is_next(&state->open_stateid, stateid)) { 1614 return true; 1615 } 1616 } else { 1617 /* This is the first OPEN in this generation */ 1618 if (stateid->seqid == cpu_to_be32(1)) 1619 return true; 1620 } 1621 return false; 1622 } 1623 1624 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1625 { 1626 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1627 return; 1628 if (state->n_wronly) 1629 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1630 if (state->n_rdonly) 1631 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1632 if (state->n_rdwr) 1633 set_bit(NFS_O_RDWR_STATE, &state->flags); 1634 set_bit(NFS_OPEN_STATE, &state->flags); 1635 } 1636 1637 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1638 nfs4_stateid *stateid, fmode_t fmode) 1639 { 1640 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1641 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1642 case FMODE_WRITE: 1643 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1644 break; 1645 case FMODE_READ: 1646 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1647 break; 1648 case 0: 1649 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1650 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1651 clear_bit(NFS_OPEN_STATE, &state->flags); 1652 } 1653 if (stateid == NULL) 1654 return; 1655 /* Handle OPEN+OPEN_DOWNGRADE races */ 1656 if (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1657 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1658 nfs_resync_open_stateid_locked(state); 1659 goto out; 1660 } 1661 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1662 nfs4_stateid_copy(&state->stateid, stateid); 1663 nfs4_stateid_copy(&state->open_stateid, stateid); 1664 trace_nfs4_open_stateid_update(state->inode, stateid, 0); 1665 out: 1666 nfs_state_log_update_open_stateid(state); 1667 } 1668 1669 static void nfs_clear_open_stateid(struct nfs4_state *state, 1670 nfs4_stateid *arg_stateid, 1671 nfs4_stateid *stateid, fmode_t fmode) 1672 { 1673 write_seqlock(&state->seqlock); 1674 /* Ignore, if the CLOSE argment doesn't match the current stateid */ 1675 if (nfs4_state_match_open_stateid_other(state, arg_stateid)) 1676 nfs_clear_open_stateid_locked(state, stateid, fmode); 1677 write_sequnlock(&state->seqlock); 1678 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1679 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1680 } 1681 1682 static void nfs_set_open_stateid_locked(struct nfs4_state *state, 1683 const nfs4_stateid *stateid, nfs4_stateid *freeme) 1684 __must_hold(&state->owner->so_lock) 1685 __must_hold(&state->seqlock) 1686 __must_hold(RCU) 1687 1688 { 1689 DEFINE_WAIT(wait); 1690 int status = 0; 1691 for (;;) { 1692 1693 if (nfs_stateid_is_sequential(state, stateid)) 1694 break; 1695 1696 if (status) 1697 break; 1698 /* Rely on seqids for serialisation with NFSv4.0 */ 1699 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client)) 1700 break; 1701 1702 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 1703 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 1704 /* 1705 * Ensure we process the state changes in the same order 1706 * in which the server processed them by delaying the 1707 * update of the stateid until we are in sequence. 1708 */ 1709 write_sequnlock(&state->seqlock); 1710 spin_unlock(&state->owner->so_lock); 1711 rcu_read_unlock(); 1712 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); 1713 1714 if (!fatal_signal_pending(current)) { 1715 if (schedule_timeout(5*HZ) == 0) 1716 status = -EAGAIN; 1717 else 1718 status = 0; 1719 } else 1720 status = -EINTR; 1721 finish_wait(&state->waitq, &wait); 1722 rcu_read_lock(); 1723 spin_lock(&state->owner->so_lock); 1724 write_seqlock(&state->seqlock); 1725 } 1726 1727 if (test_bit(NFS_OPEN_STATE, &state->flags) && 1728 !nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1729 nfs4_stateid_copy(freeme, &state->open_stateid); 1730 nfs_test_and_clear_all_open_stateid(state); 1731 } 1732 1733 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1734 nfs4_stateid_copy(&state->stateid, stateid); 1735 nfs4_stateid_copy(&state->open_stateid, stateid); 1736 trace_nfs4_open_stateid_update(state->inode, stateid, status); 1737 nfs_state_log_update_open_stateid(state); 1738 } 1739 1740 static void nfs_state_set_open_stateid(struct nfs4_state *state, 1741 const nfs4_stateid *open_stateid, 1742 fmode_t fmode, 1743 nfs4_stateid *freeme) 1744 { 1745 /* 1746 * Protect the call to nfs4_state_set_mode_locked and 1747 * serialise the stateid update 1748 */ 1749 write_seqlock(&state->seqlock); 1750 nfs_set_open_stateid_locked(state, open_stateid, freeme); 1751 switch (fmode) { 1752 case FMODE_READ: 1753 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1754 break; 1755 case FMODE_WRITE: 1756 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1757 break; 1758 case FMODE_READ|FMODE_WRITE: 1759 set_bit(NFS_O_RDWR_STATE, &state->flags); 1760 } 1761 set_bit(NFS_OPEN_STATE, &state->flags); 1762 write_sequnlock(&state->seqlock); 1763 } 1764 1765 static void nfs_state_clear_open_state_flags(struct nfs4_state *state) 1766 { 1767 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1768 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1769 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1770 clear_bit(NFS_OPEN_STATE, &state->flags); 1771 } 1772 1773 static void nfs_state_set_delegation(struct nfs4_state *state, 1774 const nfs4_stateid *deleg_stateid, 1775 fmode_t fmode) 1776 { 1777 /* 1778 * Protect the call to nfs4_state_set_mode_locked and 1779 * serialise the stateid update 1780 */ 1781 write_seqlock(&state->seqlock); 1782 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1783 set_bit(NFS_DELEGATED_STATE, &state->flags); 1784 write_sequnlock(&state->seqlock); 1785 } 1786 1787 static void nfs_state_clear_delegation(struct nfs4_state *state) 1788 { 1789 write_seqlock(&state->seqlock); 1790 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1791 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1792 write_sequnlock(&state->seqlock); 1793 } 1794 1795 int update_open_stateid(struct nfs4_state *state, 1796 const nfs4_stateid *open_stateid, 1797 const nfs4_stateid *delegation, 1798 fmode_t fmode) 1799 { 1800 struct nfs_server *server = NFS_SERVER(state->inode); 1801 struct nfs_client *clp = server->nfs_client; 1802 struct nfs_inode *nfsi = NFS_I(state->inode); 1803 struct nfs_delegation *deleg_cur; 1804 nfs4_stateid freeme = { }; 1805 int ret = 0; 1806 1807 fmode &= (FMODE_READ|FMODE_WRITE); 1808 1809 rcu_read_lock(); 1810 spin_lock(&state->owner->so_lock); 1811 if (open_stateid != NULL) { 1812 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme); 1813 ret = 1; 1814 } 1815 1816 deleg_cur = nfs4_get_valid_delegation(state->inode); 1817 if (deleg_cur == NULL) 1818 goto no_delegation; 1819 1820 spin_lock(&deleg_cur->lock); 1821 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1822 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1823 (deleg_cur->type & fmode) != fmode) 1824 goto no_delegation_unlock; 1825 1826 if (delegation == NULL) 1827 delegation = &deleg_cur->stateid; 1828 else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation)) 1829 goto no_delegation_unlock; 1830 1831 nfs_mark_delegation_referenced(deleg_cur); 1832 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode); 1833 ret = 1; 1834 no_delegation_unlock: 1835 spin_unlock(&deleg_cur->lock); 1836 no_delegation: 1837 if (ret) 1838 update_open_stateflags(state, fmode); 1839 spin_unlock(&state->owner->so_lock); 1840 rcu_read_unlock(); 1841 1842 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1843 nfs4_schedule_state_manager(clp); 1844 if (freeme.type != 0) 1845 nfs4_test_and_free_stateid(server, &freeme, 1846 state->owner->so_cred); 1847 1848 return ret; 1849 } 1850 1851 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1852 const nfs4_stateid *stateid) 1853 { 1854 struct nfs4_state *state = lsp->ls_state; 1855 bool ret = false; 1856 1857 spin_lock(&state->state_lock); 1858 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1859 goto out_noupdate; 1860 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1861 goto out_noupdate; 1862 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1863 ret = true; 1864 out_noupdate: 1865 spin_unlock(&state->state_lock); 1866 return ret; 1867 } 1868 1869 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1870 { 1871 struct nfs_delegation *delegation; 1872 1873 fmode &= FMODE_READ|FMODE_WRITE; 1874 rcu_read_lock(); 1875 delegation = nfs4_get_valid_delegation(inode); 1876 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1877 rcu_read_unlock(); 1878 return; 1879 } 1880 rcu_read_unlock(); 1881 nfs4_inode_return_delegation(inode); 1882 } 1883 1884 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1885 { 1886 struct nfs4_state *state = opendata->state; 1887 struct nfs_delegation *delegation; 1888 int open_mode = opendata->o_arg.open_flags; 1889 fmode_t fmode = opendata->o_arg.fmode; 1890 enum open_claim_type4 claim = opendata->o_arg.claim; 1891 nfs4_stateid stateid; 1892 int ret = -EAGAIN; 1893 1894 for (;;) { 1895 spin_lock(&state->owner->so_lock); 1896 if (can_open_cached(state, fmode, open_mode, claim)) { 1897 update_open_stateflags(state, fmode); 1898 spin_unlock(&state->owner->so_lock); 1899 goto out_return_state; 1900 } 1901 spin_unlock(&state->owner->so_lock); 1902 rcu_read_lock(); 1903 delegation = nfs4_get_valid_delegation(state->inode); 1904 if (!can_open_delegated(delegation, fmode, claim)) { 1905 rcu_read_unlock(); 1906 break; 1907 } 1908 /* Save the delegation */ 1909 nfs4_stateid_copy(&stateid, &delegation->stateid); 1910 rcu_read_unlock(); 1911 nfs_release_seqid(opendata->o_arg.seqid); 1912 if (!opendata->is_recover) { 1913 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1914 if (ret != 0) 1915 goto out; 1916 } 1917 ret = -EAGAIN; 1918 1919 /* Try to update the stateid using the delegation */ 1920 if (update_open_stateid(state, NULL, &stateid, fmode)) 1921 goto out_return_state; 1922 } 1923 out: 1924 return ERR_PTR(ret); 1925 out_return_state: 1926 refcount_inc(&state->count); 1927 return state; 1928 } 1929 1930 static void 1931 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) 1932 { 1933 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client; 1934 struct nfs_delegation *delegation; 1935 int delegation_flags = 0; 1936 1937 rcu_read_lock(); 1938 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1939 if (delegation) 1940 delegation_flags = delegation->flags; 1941 rcu_read_unlock(); 1942 switch (data->o_arg.claim) { 1943 default: 1944 break; 1945 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1946 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1947 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1948 "returning a delegation for " 1949 "OPEN(CLAIM_DELEGATE_CUR)\n", 1950 clp->cl_hostname); 1951 return; 1952 } 1953 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1954 nfs_inode_set_delegation(state->inode, 1955 data->owner->so_cred, 1956 data->o_res.delegation_type, 1957 &data->o_res.delegation, 1958 data->o_res.pagemod_limit); 1959 else 1960 nfs_inode_reclaim_delegation(state->inode, 1961 data->owner->so_cred, 1962 data->o_res.delegation_type, 1963 &data->o_res.delegation, 1964 data->o_res.pagemod_limit); 1965 1966 if (data->o_res.do_recall) 1967 nfs_async_inode_return_delegation(state->inode, 1968 &data->o_res.delegation); 1969 } 1970 1971 /* 1972 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 1973 * and update the nfs4_state. 1974 */ 1975 static struct nfs4_state * 1976 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 1977 { 1978 struct inode *inode = data->state->inode; 1979 struct nfs4_state *state = data->state; 1980 int ret; 1981 1982 if (!data->rpc_done) { 1983 if (data->rpc_status) 1984 return ERR_PTR(data->rpc_status); 1985 /* cached opens have already been processed */ 1986 goto update; 1987 } 1988 1989 ret = nfs_refresh_inode(inode, &data->f_attr); 1990 if (ret) 1991 return ERR_PTR(ret); 1992 1993 if (data->o_res.delegation_type != 0) 1994 nfs4_opendata_check_deleg(data, state); 1995 update: 1996 if (!update_open_stateid(state, &data->o_res.stateid, 1997 NULL, data->o_arg.fmode)) 1998 return ERR_PTR(-EAGAIN); 1999 refcount_inc(&state->count); 2000 2001 return state; 2002 } 2003 2004 static struct inode * 2005 nfs4_opendata_get_inode(struct nfs4_opendata *data) 2006 { 2007 struct inode *inode; 2008 2009 switch (data->o_arg.claim) { 2010 case NFS4_OPEN_CLAIM_NULL: 2011 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 2012 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 2013 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 2014 return ERR_PTR(-EAGAIN); 2015 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, 2016 &data->f_attr, data->f_label); 2017 break; 2018 default: 2019 inode = d_inode(data->dentry); 2020 ihold(inode); 2021 nfs_refresh_inode(inode, &data->f_attr); 2022 } 2023 return inode; 2024 } 2025 2026 static struct nfs4_state * 2027 nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data) 2028 { 2029 struct nfs4_state *state; 2030 struct inode *inode; 2031 2032 inode = nfs4_opendata_get_inode(data); 2033 if (IS_ERR(inode)) 2034 return ERR_CAST(inode); 2035 if (data->state != NULL && data->state->inode == inode) { 2036 state = data->state; 2037 refcount_inc(&state->count); 2038 } else 2039 state = nfs4_get_open_state(inode, data->owner); 2040 iput(inode); 2041 if (state == NULL) 2042 state = ERR_PTR(-ENOMEM); 2043 return state; 2044 } 2045 2046 static struct nfs4_state * 2047 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2048 { 2049 struct nfs4_state *state; 2050 2051 if (!data->rpc_done) { 2052 state = nfs4_try_open_cached(data); 2053 trace_nfs4_cached_open(data->state); 2054 goto out; 2055 } 2056 2057 state = nfs4_opendata_find_nfs4_state(data); 2058 if (IS_ERR(state)) 2059 goto out; 2060 2061 if (data->o_res.delegation_type != 0) 2062 nfs4_opendata_check_deleg(data, state); 2063 if (!update_open_stateid(state, &data->o_res.stateid, 2064 NULL, data->o_arg.fmode)) { 2065 nfs4_put_open_state(state); 2066 state = ERR_PTR(-EAGAIN); 2067 } 2068 out: 2069 nfs_release_seqid(data->o_arg.seqid); 2070 return state; 2071 } 2072 2073 static struct nfs4_state * 2074 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 2075 { 2076 struct nfs4_state *ret; 2077 2078 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 2079 ret =_nfs4_opendata_reclaim_to_nfs4_state(data); 2080 else 2081 ret = _nfs4_opendata_to_nfs4_state(data); 2082 nfs4_sequence_free_slot(&data->o_res.seq_res); 2083 return ret; 2084 } 2085 2086 static struct nfs_open_context * 2087 nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode) 2088 { 2089 struct nfs_inode *nfsi = NFS_I(state->inode); 2090 struct nfs_open_context *ctx; 2091 2092 rcu_read_lock(); 2093 list_for_each_entry_rcu(ctx, &nfsi->open_files, list) { 2094 if (ctx->state != state) 2095 continue; 2096 if ((ctx->mode & mode) != mode) 2097 continue; 2098 if (!get_nfs_open_context(ctx)) 2099 continue; 2100 rcu_read_unlock(); 2101 return ctx; 2102 } 2103 rcu_read_unlock(); 2104 return ERR_PTR(-ENOENT); 2105 } 2106 2107 static struct nfs_open_context * 2108 nfs4_state_find_open_context(struct nfs4_state *state) 2109 { 2110 struct nfs_open_context *ctx; 2111 2112 ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE); 2113 if (!IS_ERR(ctx)) 2114 return ctx; 2115 ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE); 2116 if (!IS_ERR(ctx)) 2117 return ctx; 2118 return nfs4_state_find_open_context_mode(state, FMODE_READ); 2119 } 2120 2121 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 2122 struct nfs4_state *state, enum open_claim_type4 claim) 2123 { 2124 struct nfs4_opendata *opendata; 2125 2126 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 2127 NULL, claim, GFP_NOFS); 2128 if (opendata == NULL) 2129 return ERR_PTR(-ENOMEM); 2130 opendata->state = state; 2131 refcount_inc(&state->count); 2132 return opendata; 2133 } 2134 2135 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, 2136 fmode_t fmode) 2137 { 2138 struct nfs4_state *newstate; 2139 int ret; 2140 2141 if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) 2142 return 0; 2143 opendata->o_arg.open_flags = 0; 2144 opendata->o_arg.fmode = fmode; 2145 opendata->o_arg.share_access = nfs4_map_atomic_open_share( 2146 NFS_SB(opendata->dentry->d_sb), 2147 fmode, 0); 2148 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 2149 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 2150 nfs4_init_opendata_res(opendata); 2151 ret = _nfs4_recover_proc_open(opendata); 2152 if (ret != 0) 2153 return ret; 2154 newstate = nfs4_opendata_to_nfs4_state(opendata); 2155 if (IS_ERR(newstate)) 2156 return PTR_ERR(newstate); 2157 if (newstate != opendata->state) 2158 ret = -ESTALE; 2159 nfs4_close_state(newstate, fmode); 2160 return ret; 2161 } 2162 2163 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 2164 { 2165 int ret; 2166 2167 /* memory barrier prior to reading state->n_* */ 2168 smp_rmb(); 2169 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2170 if (ret != 0) 2171 return ret; 2172 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2173 if (ret != 0) 2174 return ret; 2175 ret = nfs4_open_recover_helper(opendata, FMODE_READ); 2176 if (ret != 0) 2177 return ret; 2178 /* 2179 * We may have performed cached opens for all three recoveries. 2180 * Check if we need to update the current stateid. 2181 */ 2182 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 2183 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 2184 write_seqlock(&state->seqlock); 2185 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 2186 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2187 write_sequnlock(&state->seqlock); 2188 } 2189 return 0; 2190 } 2191 2192 /* 2193 * OPEN_RECLAIM: 2194 * reclaim state on the server after a reboot. 2195 */ 2196 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2197 { 2198 struct nfs_delegation *delegation; 2199 struct nfs4_opendata *opendata; 2200 fmode_t delegation_type = 0; 2201 int status; 2202 2203 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2204 NFS4_OPEN_CLAIM_PREVIOUS); 2205 if (IS_ERR(opendata)) 2206 return PTR_ERR(opendata); 2207 rcu_read_lock(); 2208 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2209 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) 2210 delegation_type = delegation->type; 2211 rcu_read_unlock(); 2212 opendata->o_arg.u.delegation_type = delegation_type; 2213 status = nfs4_open_recover(opendata, state); 2214 nfs4_opendata_put(opendata); 2215 return status; 2216 } 2217 2218 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 2219 { 2220 struct nfs_server *server = NFS_SERVER(state->inode); 2221 struct nfs4_exception exception = { }; 2222 int err; 2223 do { 2224 err = _nfs4_do_open_reclaim(ctx, state); 2225 trace_nfs4_open_reclaim(ctx, 0, err); 2226 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2227 continue; 2228 if (err != -NFS4ERR_DELAY) 2229 break; 2230 nfs4_handle_exception(server, err, &exception); 2231 } while (exception.retry); 2232 return err; 2233 } 2234 2235 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 2236 { 2237 struct nfs_open_context *ctx; 2238 int ret; 2239 2240 ctx = nfs4_state_find_open_context(state); 2241 if (IS_ERR(ctx)) 2242 return -EAGAIN; 2243 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2244 nfs_state_clear_open_state_flags(state); 2245 ret = nfs4_do_open_reclaim(ctx, state); 2246 put_nfs_open_context(ctx); 2247 return ret; 2248 } 2249 2250 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err) 2251 { 2252 switch (err) { 2253 default: 2254 printk(KERN_ERR "NFS: %s: unhandled error " 2255 "%d.\n", __func__, err); 2256 fallthrough; 2257 case 0: 2258 case -ENOENT: 2259 case -EAGAIN: 2260 case -ESTALE: 2261 case -ETIMEDOUT: 2262 break; 2263 case -NFS4ERR_BADSESSION: 2264 case -NFS4ERR_BADSLOT: 2265 case -NFS4ERR_BAD_HIGH_SLOT: 2266 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 2267 case -NFS4ERR_DEADSESSION: 2268 return -EAGAIN; 2269 case -NFS4ERR_STALE_CLIENTID: 2270 case -NFS4ERR_STALE_STATEID: 2271 /* Don't recall a delegation if it was lost */ 2272 nfs4_schedule_lease_recovery(server->nfs_client); 2273 return -EAGAIN; 2274 case -NFS4ERR_MOVED: 2275 nfs4_schedule_migration_recovery(server); 2276 return -EAGAIN; 2277 case -NFS4ERR_LEASE_MOVED: 2278 nfs4_schedule_lease_moved_recovery(server->nfs_client); 2279 return -EAGAIN; 2280 case -NFS4ERR_DELEG_REVOKED: 2281 case -NFS4ERR_ADMIN_REVOKED: 2282 case -NFS4ERR_EXPIRED: 2283 case -NFS4ERR_BAD_STATEID: 2284 case -NFS4ERR_OPENMODE: 2285 nfs_inode_find_state_and_recover(state->inode, 2286 stateid); 2287 nfs4_schedule_stateid_recovery(server, state); 2288 return -EAGAIN; 2289 case -NFS4ERR_DELAY: 2290 case -NFS4ERR_GRACE: 2291 ssleep(1); 2292 return -EAGAIN; 2293 case -ENOMEM: 2294 case -NFS4ERR_DENIED: 2295 if (fl) { 2296 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner; 2297 if (lsp) 2298 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2299 } 2300 return 0; 2301 } 2302 return err; 2303 } 2304 2305 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 2306 struct nfs4_state *state, const nfs4_stateid *stateid) 2307 { 2308 struct nfs_server *server = NFS_SERVER(state->inode); 2309 struct nfs4_opendata *opendata; 2310 int err = 0; 2311 2312 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2313 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 2314 if (IS_ERR(opendata)) 2315 return PTR_ERR(opendata); 2316 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 2317 if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) { 2318 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 2319 if (err) 2320 goto out; 2321 } 2322 if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) { 2323 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 2324 if (err) 2325 goto out; 2326 } 2327 if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) { 2328 err = nfs4_open_recover_helper(opendata, FMODE_READ); 2329 if (err) 2330 goto out; 2331 } 2332 nfs_state_clear_delegation(state); 2333 out: 2334 nfs4_opendata_put(opendata); 2335 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err); 2336 } 2337 2338 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 2339 { 2340 struct nfs4_opendata *data = calldata; 2341 2342 nfs4_setup_sequence(data->o_arg.server->nfs_client, 2343 &data->c_arg.seq_args, &data->c_res.seq_res, task); 2344 } 2345 2346 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 2347 { 2348 struct nfs4_opendata *data = calldata; 2349 2350 nfs40_sequence_done(task, &data->c_res.seq_res); 2351 2352 data->rpc_status = task->tk_status; 2353 if (data->rpc_status == 0) { 2354 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 2355 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2356 renew_lease(data->o_res.server, data->timestamp); 2357 data->rpc_done = true; 2358 } 2359 } 2360 2361 static void nfs4_open_confirm_release(void *calldata) 2362 { 2363 struct nfs4_opendata *data = calldata; 2364 struct nfs4_state *state = NULL; 2365 2366 /* If this request hasn't been cancelled, do nothing */ 2367 if (!data->cancelled) 2368 goto out_free; 2369 /* In case of error, no cleanup! */ 2370 if (!data->rpc_done) 2371 goto out_free; 2372 state = nfs4_opendata_to_nfs4_state(data); 2373 if (!IS_ERR(state)) 2374 nfs4_close_state(state, data->o_arg.fmode); 2375 out_free: 2376 nfs4_opendata_put(data); 2377 } 2378 2379 static const struct rpc_call_ops nfs4_open_confirm_ops = { 2380 .rpc_call_prepare = nfs4_open_confirm_prepare, 2381 .rpc_call_done = nfs4_open_confirm_done, 2382 .rpc_release = nfs4_open_confirm_release, 2383 }; 2384 2385 /* 2386 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 2387 */ 2388 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 2389 { 2390 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 2391 struct rpc_task *task; 2392 struct rpc_message msg = { 2393 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 2394 .rpc_argp = &data->c_arg, 2395 .rpc_resp = &data->c_res, 2396 .rpc_cred = data->owner->so_cred, 2397 }; 2398 struct rpc_task_setup task_setup_data = { 2399 .rpc_client = server->client, 2400 .rpc_message = &msg, 2401 .callback_ops = &nfs4_open_confirm_ops, 2402 .callback_data = data, 2403 .workqueue = nfsiod_workqueue, 2404 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2405 }; 2406 int status; 2407 2408 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1, 2409 data->is_recover); 2410 kref_get(&data->kref); 2411 data->rpc_done = false; 2412 data->rpc_status = 0; 2413 data->timestamp = jiffies; 2414 task = rpc_run_task(&task_setup_data); 2415 if (IS_ERR(task)) 2416 return PTR_ERR(task); 2417 status = rpc_wait_for_completion_task(task); 2418 if (status != 0) { 2419 data->cancelled = true; 2420 smp_wmb(); 2421 } else 2422 status = data->rpc_status; 2423 rpc_put_task(task); 2424 return status; 2425 } 2426 2427 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 2428 { 2429 struct nfs4_opendata *data = calldata; 2430 struct nfs4_state_owner *sp = data->owner; 2431 struct nfs_client *clp = sp->so_server->nfs_client; 2432 enum open_claim_type4 claim = data->o_arg.claim; 2433 2434 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 2435 goto out_wait; 2436 /* 2437 * Check if we still need to send an OPEN call, or if we can use 2438 * a delegation instead. 2439 */ 2440 if (data->state != NULL) { 2441 struct nfs_delegation *delegation; 2442 2443 if (can_open_cached(data->state, data->o_arg.fmode, 2444 data->o_arg.open_flags, claim)) 2445 goto out_no_action; 2446 rcu_read_lock(); 2447 delegation = nfs4_get_valid_delegation(data->state->inode); 2448 if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 2449 goto unlock_no_action; 2450 rcu_read_unlock(); 2451 } 2452 /* Update client id. */ 2453 data->o_arg.clientid = clp->cl_clientid; 2454 switch (claim) { 2455 default: 2456 break; 2457 case NFS4_OPEN_CLAIM_PREVIOUS: 2458 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2459 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 2460 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2461 fallthrough; 2462 case NFS4_OPEN_CLAIM_FH: 2463 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2464 } 2465 data->timestamp = jiffies; 2466 if (nfs4_setup_sequence(data->o_arg.server->nfs_client, 2467 &data->o_arg.seq_args, 2468 &data->o_res.seq_res, 2469 task) != 0) 2470 nfs_release_seqid(data->o_arg.seqid); 2471 2472 /* Set the create mode (note dependency on the session type) */ 2473 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 2474 if (data->o_arg.open_flags & O_EXCL) { 2475 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 2476 if (nfs4_has_persistent_session(clp)) 2477 data->o_arg.createmode = NFS4_CREATE_GUARDED; 2478 else if (clp->cl_mvops->minor_version > 0) 2479 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 2480 } 2481 return; 2482 unlock_no_action: 2483 trace_nfs4_cached_open(data->state); 2484 rcu_read_unlock(); 2485 out_no_action: 2486 task->tk_action = NULL; 2487 out_wait: 2488 nfs4_sequence_done(task, &data->o_res.seq_res); 2489 } 2490 2491 static void nfs4_open_done(struct rpc_task *task, void *calldata) 2492 { 2493 struct nfs4_opendata *data = calldata; 2494 2495 data->rpc_status = task->tk_status; 2496 2497 if (!nfs4_sequence_process(task, &data->o_res.seq_res)) 2498 return; 2499 2500 if (task->tk_status == 0) { 2501 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 2502 switch (data->o_res.f_attr->mode & S_IFMT) { 2503 case S_IFREG: 2504 break; 2505 case S_IFLNK: 2506 data->rpc_status = -ELOOP; 2507 break; 2508 case S_IFDIR: 2509 data->rpc_status = -EISDIR; 2510 break; 2511 default: 2512 data->rpc_status = -ENOTDIR; 2513 } 2514 } 2515 renew_lease(data->o_res.server, data->timestamp); 2516 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 2517 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2518 } 2519 data->rpc_done = true; 2520 } 2521 2522 static void nfs4_open_release(void *calldata) 2523 { 2524 struct nfs4_opendata *data = calldata; 2525 struct nfs4_state *state = NULL; 2526 2527 /* If this request hasn't been cancelled, do nothing */ 2528 if (!data->cancelled) 2529 goto out_free; 2530 /* In case of error, no cleanup! */ 2531 if (data->rpc_status != 0 || !data->rpc_done) 2532 goto out_free; 2533 /* In case we need an open_confirm, no cleanup! */ 2534 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 2535 goto out_free; 2536 state = nfs4_opendata_to_nfs4_state(data); 2537 if (!IS_ERR(state)) 2538 nfs4_close_state(state, data->o_arg.fmode); 2539 out_free: 2540 nfs4_opendata_put(data); 2541 } 2542 2543 static const struct rpc_call_ops nfs4_open_ops = { 2544 .rpc_call_prepare = nfs4_open_prepare, 2545 .rpc_call_done = nfs4_open_done, 2546 .rpc_release = nfs4_open_release, 2547 }; 2548 2549 static int nfs4_run_open_task(struct nfs4_opendata *data, 2550 struct nfs_open_context *ctx) 2551 { 2552 struct inode *dir = d_inode(data->dir); 2553 struct nfs_server *server = NFS_SERVER(dir); 2554 struct nfs_openargs *o_arg = &data->o_arg; 2555 struct nfs_openres *o_res = &data->o_res; 2556 struct rpc_task *task; 2557 struct rpc_message msg = { 2558 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 2559 .rpc_argp = o_arg, 2560 .rpc_resp = o_res, 2561 .rpc_cred = data->owner->so_cred, 2562 }; 2563 struct rpc_task_setup task_setup_data = { 2564 .rpc_client = server->client, 2565 .rpc_message = &msg, 2566 .callback_ops = &nfs4_open_ops, 2567 .callback_data = data, 2568 .workqueue = nfsiod_workqueue, 2569 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 2570 }; 2571 int status; 2572 2573 if (server->nfs_client->cl_minorversion) 2574 task_setup_data.flags |= RPC_TASK_MOVEABLE; 2575 2576 kref_get(&data->kref); 2577 data->rpc_done = false; 2578 data->rpc_status = 0; 2579 data->cancelled = false; 2580 data->is_recover = false; 2581 if (!ctx) { 2582 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1); 2583 data->is_recover = true; 2584 task_setup_data.flags |= RPC_TASK_TIMEOUT; 2585 } else { 2586 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0); 2587 pnfs_lgopen_prepare(data, ctx); 2588 } 2589 task = rpc_run_task(&task_setup_data); 2590 if (IS_ERR(task)) 2591 return PTR_ERR(task); 2592 status = rpc_wait_for_completion_task(task); 2593 if (status != 0) { 2594 data->cancelled = true; 2595 smp_wmb(); 2596 } else 2597 status = data->rpc_status; 2598 rpc_put_task(task); 2599 2600 return status; 2601 } 2602 2603 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2604 { 2605 struct inode *dir = d_inode(data->dir); 2606 struct nfs_openres *o_res = &data->o_res; 2607 int status; 2608 2609 status = nfs4_run_open_task(data, NULL); 2610 if (status != 0 || !data->rpc_done) 2611 return status; 2612 2613 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2614 2615 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) 2616 status = _nfs4_proc_open_confirm(data); 2617 2618 return status; 2619 } 2620 2621 /* 2622 * Additional permission checks in order to distinguish between an 2623 * open for read, and an open for execute. This works around the 2624 * fact that NFSv4 OPEN treats read and execute permissions as being 2625 * the same. 2626 * Note that in the non-execute case, we want to turn off permission 2627 * checking if we just created a new file (POSIX open() semantics). 2628 */ 2629 static int nfs4_opendata_access(const struct cred *cred, 2630 struct nfs4_opendata *opendata, 2631 struct nfs4_state *state, fmode_t fmode, 2632 int openflags) 2633 { 2634 struct nfs_access_entry cache; 2635 u32 mask, flags; 2636 2637 /* access call failed or for some reason the server doesn't 2638 * support any access modes -- defer access call until later */ 2639 if (opendata->o_res.access_supported == 0) 2640 return 0; 2641 2642 mask = 0; 2643 /* 2644 * Use openflags to check for exec, because fmode won't 2645 * always have FMODE_EXEC set when file open for exec. 2646 */ 2647 if (openflags & __FMODE_EXEC) { 2648 /* ONLY check for exec rights */ 2649 if (S_ISDIR(state->inode->i_mode)) 2650 mask = NFS4_ACCESS_LOOKUP; 2651 else 2652 mask = NFS4_ACCESS_EXECUTE; 2653 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2654 mask = NFS4_ACCESS_READ; 2655 2656 cache.cred = cred; 2657 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2658 nfs_access_add_cache(state->inode, &cache); 2659 2660 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP; 2661 if ((mask & ~cache.mask & flags) == 0) 2662 return 0; 2663 2664 return -EACCES; 2665 } 2666 2667 /* 2668 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2669 */ 2670 static int _nfs4_proc_open(struct nfs4_opendata *data, 2671 struct nfs_open_context *ctx) 2672 { 2673 struct inode *dir = d_inode(data->dir); 2674 struct nfs_server *server = NFS_SERVER(dir); 2675 struct nfs_openargs *o_arg = &data->o_arg; 2676 struct nfs_openres *o_res = &data->o_res; 2677 int status; 2678 2679 status = nfs4_run_open_task(data, ctx); 2680 if (!data->rpc_done) 2681 return status; 2682 if (status != 0) { 2683 if (status == -NFS4ERR_BADNAME && 2684 !(o_arg->open_flags & O_CREAT)) 2685 return -ENOENT; 2686 return status; 2687 } 2688 2689 nfs_fattr_map_and_free_names(server, &data->f_attr); 2690 2691 if (o_arg->open_flags & O_CREAT) { 2692 if (o_arg->open_flags & O_EXCL) 2693 data->file_created = true; 2694 else if (o_res->cinfo.before != o_res->cinfo.after) 2695 data->file_created = true; 2696 if (data->file_created || 2697 inode_peek_iversion_raw(dir) != o_res->cinfo.after) 2698 nfs4_update_changeattr(dir, &o_res->cinfo, 2699 o_res->f_attr->time_start, 2700 NFS_INO_INVALID_DATA); 2701 } 2702 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2703 server->caps &= ~NFS_CAP_POSIX_LOCK; 2704 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2705 status = _nfs4_proc_open_confirm(data); 2706 if (status != 0) 2707 return status; 2708 } 2709 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) { 2710 nfs4_sequence_free_slot(&o_res->seq_res); 2711 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, 2712 o_res->f_label, NULL); 2713 } 2714 return 0; 2715 } 2716 2717 /* 2718 * OPEN_EXPIRED: 2719 * reclaim state on the server after a network partition. 2720 * Assumes caller holds the appropriate lock 2721 */ 2722 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2723 { 2724 struct nfs4_opendata *opendata; 2725 int ret; 2726 2727 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2728 NFS4_OPEN_CLAIM_FH); 2729 if (IS_ERR(opendata)) 2730 return PTR_ERR(opendata); 2731 ret = nfs4_open_recover(opendata, state); 2732 if (ret == -ESTALE) 2733 d_drop(ctx->dentry); 2734 nfs4_opendata_put(opendata); 2735 return ret; 2736 } 2737 2738 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2739 { 2740 struct nfs_server *server = NFS_SERVER(state->inode); 2741 struct nfs4_exception exception = { }; 2742 int err; 2743 2744 do { 2745 err = _nfs4_open_expired(ctx, state); 2746 trace_nfs4_open_expired(ctx, 0, err); 2747 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2748 continue; 2749 switch (err) { 2750 default: 2751 goto out; 2752 case -NFS4ERR_GRACE: 2753 case -NFS4ERR_DELAY: 2754 nfs4_handle_exception(server, err, &exception); 2755 err = 0; 2756 } 2757 } while (exception.retry); 2758 out: 2759 return err; 2760 } 2761 2762 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2763 { 2764 struct nfs_open_context *ctx; 2765 int ret; 2766 2767 ctx = nfs4_state_find_open_context(state); 2768 if (IS_ERR(ctx)) 2769 return -EAGAIN; 2770 ret = nfs4_do_open_expired(ctx, state); 2771 put_nfs_open_context(ctx); 2772 return ret; 2773 } 2774 2775 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state, 2776 const nfs4_stateid *stateid) 2777 { 2778 nfs_remove_bad_delegation(state->inode, stateid); 2779 nfs_state_clear_delegation(state); 2780 } 2781 2782 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2783 { 2784 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2785 nfs_finish_clear_delegation_stateid(state, NULL); 2786 } 2787 2788 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2789 { 2790 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2791 nfs40_clear_delegation_stateid(state); 2792 nfs_state_clear_open_state_flags(state); 2793 return nfs4_open_expired(sp, state); 2794 } 2795 2796 static int nfs40_test_and_free_expired_stateid(struct nfs_server *server, 2797 nfs4_stateid *stateid, 2798 const struct cred *cred) 2799 { 2800 return -NFS4ERR_BAD_STATEID; 2801 } 2802 2803 #if defined(CONFIG_NFS_V4_1) 2804 static int nfs41_test_and_free_expired_stateid(struct nfs_server *server, 2805 nfs4_stateid *stateid, 2806 const struct cred *cred) 2807 { 2808 int status; 2809 2810 switch (stateid->type) { 2811 default: 2812 break; 2813 case NFS4_INVALID_STATEID_TYPE: 2814 case NFS4_SPECIAL_STATEID_TYPE: 2815 return -NFS4ERR_BAD_STATEID; 2816 case NFS4_REVOKED_STATEID_TYPE: 2817 goto out_free; 2818 } 2819 2820 status = nfs41_test_stateid(server, stateid, cred); 2821 switch (status) { 2822 case -NFS4ERR_EXPIRED: 2823 case -NFS4ERR_ADMIN_REVOKED: 2824 case -NFS4ERR_DELEG_REVOKED: 2825 break; 2826 default: 2827 return status; 2828 } 2829 out_free: 2830 /* Ack the revoked state to the server */ 2831 nfs41_free_stateid(server, stateid, cred, true); 2832 return -NFS4ERR_EXPIRED; 2833 } 2834 2835 static int nfs41_check_delegation_stateid(struct nfs4_state *state) 2836 { 2837 struct nfs_server *server = NFS_SERVER(state->inode); 2838 nfs4_stateid stateid; 2839 struct nfs_delegation *delegation; 2840 const struct cred *cred = NULL; 2841 int status, ret = NFS_OK; 2842 2843 /* Get the delegation credential for use by test/free_stateid */ 2844 rcu_read_lock(); 2845 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2846 if (delegation == NULL) { 2847 rcu_read_unlock(); 2848 nfs_state_clear_delegation(state); 2849 return NFS_OK; 2850 } 2851 2852 spin_lock(&delegation->lock); 2853 nfs4_stateid_copy(&stateid, &delegation->stateid); 2854 2855 if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, 2856 &delegation->flags)) { 2857 spin_unlock(&delegation->lock); 2858 rcu_read_unlock(); 2859 return NFS_OK; 2860 } 2861 2862 if (delegation->cred) 2863 cred = get_cred(delegation->cred); 2864 spin_unlock(&delegation->lock); 2865 rcu_read_unlock(); 2866 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred); 2867 trace_nfs4_test_delegation_stateid(state, NULL, status); 2868 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) 2869 nfs_finish_clear_delegation_stateid(state, &stateid); 2870 else 2871 ret = status; 2872 2873 put_cred(cred); 2874 return ret; 2875 } 2876 2877 static void nfs41_delegation_recover_stateid(struct nfs4_state *state) 2878 { 2879 nfs4_stateid tmp; 2880 2881 if (test_bit(NFS_DELEGATED_STATE, &state->flags) && 2882 nfs4_copy_delegation_stateid(state->inode, state->state, 2883 &tmp, NULL) && 2884 nfs4_stateid_match_other(&state->stateid, &tmp)) 2885 nfs_state_set_delegation(state, &tmp, state->state); 2886 else 2887 nfs_state_clear_delegation(state); 2888 } 2889 2890 /** 2891 * nfs41_check_expired_locks - possibly free a lock stateid 2892 * 2893 * @state: NFSv4 state for an inode 2894 * 2895 * Returns NFS_OK if recovery for this stateid is now finished. 2896 * Otherwise a negative NFS4ERR value is returned. 2897 */ 2898 static int nfs41_check_expired_locks(struct nfs4_state *state) 2899 { 2900 int status, ret = NFS_OK; 2901 struct nfs4_lock_state *lsp, *prev = NULL; 2902 struct nfs_server *server = NFS_SERVER(state->inode); 2903 2904 if (!test_bit(LK_STATE_IN_USE, &state->flags)) 2905 goto out; 2906 2907 spin_lock(&state->state_lock); 2908 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 2909 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 2910 const struct cred *cred = lsp->ls_state->owner->so_cred; 2911 2912 refcount_inc(&lsp->ls_count); 2913 spin_unlock(&state->state_lock); 2914 2915 nfs4_put_lock_state(prev); 2916 prev = lsp; 2917 2918 status = nfs41_test_and_free_expired_stateid(server, 2919 &lsp->ls_stateid, 2920 cred); 2921 trace_nfs4_test_lock_stateid(state, lsp, status); 2922 if (status == -NFS4ERR_EXPIRED || 2923 status == -NFS4ERR_BAD_STATEID) { 2924 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 2925 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE; 2926 if (!recover_lost_locks) 2927 set_bit(NFS_LOCK_LOST, &lsp->ls_flags); 2928 } else if (status != NFS_OK) { 2929 ret = status; 2930 nfs4_put_lock_state(prev); 2931 goto out; 2932 } 2933 spin_lock(&state->state_lock); 2934 } 2935 } 2936 spin_unlock(&state->state_lock); 2937 nfs4_put_lock_state(prev); 2938 out: 2939 return ret; 2940 } 2941 2942 /** 2943 * nfs41_check_open_stateid - possibly free an open stateid 2944 * 2945 * @state: NFSv4 state for an inode 2946 * 2947 * Returns NFS_OK if recovery for this stateid is now finished. 2948 * Otherwise a negative NFS4ERR value is returned. 2949 */ 2950 static int nfs41_check_open_stateid(struct nfs4_state *state) 2951 { 2952 struct nfs_server *server = NFS_SERVER(state->inode); 2953 nfs4_stateid *stateid = &state->open_stateid; 2954 const struct cred *cred = state->owner->so_cred; 2955 int status; 2956 2957 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) 2958 return -NFS4ERR_BAD_STATEID; 2959 status = nfs41_test_and_free_expired_stateid(server, stateid, cred); 2960 trace_nfs4_test_open_stateid(state, NULL, status); 2961 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) { 2962 nfs_state_clear_open_state_flags(state); 2963 stateid->type = NFS4_INVALID_STATEID_TYPE; 2964 return status; 2965 } 2966 if (nfs_open_stateid_recover_openmode(state)) 2967 return -NFS4ERR_OPENMODE; 2968 return NFS_OK; 2969 } 2970 2971 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2972 { 2973 int status; 2974 2975 status = nfs41_check_delegation_stateid(state); 2976 if (status != NFS_OK) 2977 return status; 2978 nfs41_delegation_recover_stateid(state); 2979 2980 status = nfs41_check_expired_locks(state); 2981 if (status != NFS_OK) 2982 return status; 2983 status = nfs41_check_open_stateid(state); 2984 if (status != NFS_OK) 2985 status = nfs4_open_expired(sp, state); 2986 return status; 2987 } 2988 #endif 2989 2990 /* 2991 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 2992 * fields corresponding to attributes that were used to store the verifier. 2993 * Make sure we clobber those fields in the later setattr call 2994 */ 2995 static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 2996 struct iattr *sattr, struct nfs4_label **label) 2997 { 2998 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask; 2999 __u32 attrset[3]; 3000 unsigned ret; 3001 unsigned i; 3002 3003 for (i = 0; i < ARRAY_SIZE(attrset); i++) { 3004 attrset[i] = opendata->o_res.attrset[i]; 3005 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1) 3006 attrset[i] &= ~bitmask[i]; 3007 } 3008 3009 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ? 3010 sattr->ia_valid : 0; 3011 3012 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) { 3013 if (sattr->ia_valid & ATTR_ATIME_SET) 3014 ret |= ATTR_ATIME_SET; 3015 else 3016 ret |= ATTR_ATIME; 3017 } 3018 3019 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) { 3020 if (sattr->ia_valid & ATTR_MTIME_SET) 3021 ret |= ATTR_MTIME_SET; 3022 else 3023 ret |= ATTR_MTIME; 3024 } 3025 3026 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL)) 3027 *label = NULL; 3028 return ret; 3029 } 3030 3031 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 3032 int flags, struct nfs_open_context *ctx) 3033 { 3034 struct nfs4_state_owner *sp = opendata->owner; 3035 struct nfs_server *server = sp->so_server; 3036 struct dentry *dentry; 3037 struct nfs4_state *state; 3038 fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx); 3039 struct inode *dir = d_inode(opendata->dir); 3040 unsigned long dir_verifier; 3041 unsigned int seq; 3042 int ret; 3043 3044 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 3045 dir_verifier = nfs_save_change_attribute(dir); 3046 3047 ret = _nfs4_proc_open(opendata, ctx); 3048 if (ret != 0) 3049 goto out; 3050 3051 state = _nfs4_opendata_to_nfs4_state(opendata); 3052 ret = PTR_ERR(state); 3053 if (IS_ERR(state)) 3054 goto out; 3055 ctx->state = state; 3056 if (server->caps & NFS_CAP_POSIX_LOCK) 3057 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 3058 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) 3059 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags); 3060 3061 dentry = opendata->dentry; 3062 if (d_really_is_negative(dentry)) { 3063 struct dentry *alias; 3064 d_drop(dentry); 3065 alias = d_exact_alias(dentry, state->inode); 3066 if (!alias) 3067 alias = d_splice_alias(igrab(state->inode), dentry); 3068 /* d_splice_alias() can't fail here - it's a non-directory */ 3069 if (alias) { 3070 dput(ctx->dentry); 3071 ctx->dentry = dentry = alias; 3072 } 3073 } 3074 3075 switch(opendata->o_arg.claim) { 3076 default: 3077 break; 3078 case NFS4_OPEN_CLAIM_NULL: 3079 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 3080 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 3081 if (!opendata->rpc_done) 3082 break; 3083 if (opendata->o_res.delegation_type != 0) 3084 dir_verifier = nfs_save_change_attribute(dir); 3085 nfs_set_verifier(dentry, dir_verifier); 3086 } 3087 3088 /* Parse layoutget results before we check for access */ 3089 pnfs_parse_lgopen(state->inode, opendata->lgp, ctx); 3090 3091 ret = nfs4_opendata_access(sp->so_cred, opendata, state, 3092 acc_mode, flags); 3093 if (ret != 0) 3094 goto out; 3095 3096 if (d_inode(dentry) == state->inode) { 3097 nfs_inode_attach_open_context(ctx); 3098 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) 3099 nfs4_schedule_stateid_recovery(server, state); 3100 } 3101 3102 out: 3103 if (!opendata->cancelled) 3104 nfs4_sequence_free_slot(&opendata->o_res.seq_res); 3105 return ret; 3106 } 3107 3108 /* 3109 * Returns a referenced nfs4_state 3110 */ 3111 static int _nfs4_do_open(struct inode *dir, 3112 struct nfs_open_context *ctx, 3113 int flags, 3114 const struct nfs4_open_createattrs *c, 3115 int *opened) 3116 { 3117 struct nfs4_state_owner *sp; 3118 struct nfs4_state *state = NULL; 3119 struct nfs_server *server = NFS_SERVER(dir); 3120 struct nfs4_opendata *opendata; 3121 struct dentry *dentry = ctx->dentry; 3122 const struct cred *cred = ctx->cred; 3123 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 3124 fmode_t fmode = _nfs4_ctx_to_openmode(ctx); 3125 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 3126 struct iattr *sattr = c->sattr; 3127 struct nfs4_label *label = c->label; 3128 struct nfs4_label *olabel = NULL; 3129 int status; 3130 3131 /* Protect against reboot recovery conflicts */ 3132 status = -ENOMEM; 3133 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 3134 if (sp == NULL) { 3135 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 3136 goto out_err; 3137 } 3138 status = nfs4_client_recover_expired_lease(server->nfs_client); 3139 if (status != 0) 3140 goto err_put_state_owner; 3141 if (d_really_is_positive(dentry)) 3142 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 3143 status = -ENOMEM; 3144 if (d_really_is_positive(dentry)) 3145 claim = NFS4_OPEN_CLAIM_FH; 3146 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, 3147 c, claim, GFP_KERNEL); 3148 if (opendata == NULL) 3149 goto err_put_state_owner; 3150 3151 if (label) { 3152 olabel = nfs4_label_alloc(server, GFP_KERNEL); 3153 if (IS_ERR(olabel)) { 3154 status = PTR_ERR(olabel); 3155 goto err_opendata_put; 3156 } 3157 } 3158 3159 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 3160 if (!opendata->f_attr.mdsthreshold) { 3161 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 3162 if (!opendata->f_attr.mdsthreshold) 3163 goto err_free_label; 3164 } 3165 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 3166 } 3167 if (d_really_is_positive(dentry)) 3168 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 3169 3170 status = _nfs4_open_and_get_state(opendata, flags, ctx); 3171 if (status != 0) 3172 goto err_free_label; 3173 state = ctx->state; 3174 3175 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 3176 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 3177 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label); 3178 /* 3179 * send create attributes which was not set by open 3180 * with an extra setattr. 3181 */ 3182 if (attrs || label) { 3183 unsigned ia_old = sattr->ia_valid; 3184 3185 sattr->ia_valid = attrs; 3186 nfs_fattr_init(opendata->o_res.f_attr); 3187 status = nfs4_do_setattr(state->inode, cred, 3188 opendata->o_res.f_attr, sattr, 3189 ctx, label, olabel); 3190 if (status == 0) { 3191 nfs_setattr_update_inode(state->inode, sattr, 3192 opendata->o_res.f_attr); 3193 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); 3194 } 3195 sattr->ia_valid = ia_old; 3196 } 3197 } 3198 if (opened && opendata->file_created) 3199 *opened = 1; 3200 3201 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 3202 *ctx_th = opendata->f_attr.mdsthreshold; 3203 opendata->f_attr.mdsthreshold = NULL; 3204 } 3205 3206 nfs4_label_free(olabel); 3207 3208 nfs4_opendata_put(opendata); 3209 nfs4_put_state_owner(sp); 3210 return 0; 3211 err_free_label: 3212 nfs4_label_free(olabel); 3213 err_opendata_put: 3214 nfs4_opendata_put(opendata); 3215 err_put_state_owner: 3216 nfs4_put_state_owner(sp); 3217 out_err: 3218 return status; 3219 } 3220 3221 3222 static struct nfs4_state *nfs4_do_open(struct inode *dir, 3223 struct nfs_open_context *ctx, 3224 int flags, 3225 struct iattr *sattr, 3226 struct nfs4_label *label, 3227 int *opened) 3228 { 3229 struct nfs_server *server = NFS_SERVER(dir); 3230 struct nfs4_exception exception = { 3231 .interruptible = true, 3232 }; 3233 struct nfs4_state *res; 3234 struct nfs4_open_createattrs c = { 3235 .label = label, 3236 .sattr = sattr, 3237 .verf = { 3238 [0] = (__u32)jiffies, 3239 [1] = (__u32)current->pid, 3240 }, 3241 }; 3242 int status; 3243 3244 do { 3245 status = _nfs4_do_open(dir, ctx, flags, &c, opened); 3246 res = ctx->state; 3247 trace_nfs4_open_file(ctx, flags, status); 3248 if (status == 0) 3249 break; 3250 /* NOTE: BAD_SEQID means the server and client disagree about the 3251 * book-keeping w.r.t. state-changing operations 3252 * (OPEN/CLOSE/LOCK/LOCKU...) 3253 * It is actually a sign of a bug on the client or on the server. 3254 * 3255 * If we receive a BAD_SEQID error in the particular case of 3256 * doing an OPEN, we assume that nfs_increment_open_seqid() will 3257 * have unhashed the old state_owner for us, and that we can 3258 * therefore safely retry using a new one. We should still warn 3259 * the user though... 3260 */ 3261 if (status == -NFS4ERR_BAD_SEQID) { 3262 pr_warn_ratelimited("NFS: v4 server %s " 3263 " returned a bad sequence-id error!\n", 3264 NFS_SERVER(dir)->nfs_client->cl_hostname); 3265 exception.retry = 1; 3266 continue; 3267 } 3268 /* 3269 * BAD_STATEID on OPEN means that the server cancelled our 3270 * state before it received the OPEN_CONFIRM. 3271 * Recover by retrying the request as per the discussion 3272 * on Page 181 of RFC3530. 3273 */ 3274 if (status == -NFS4ERR_BAD_STATEID) { 3275 exception.retry = 1; 3276 continue; 3277 } 3278 if (status == -NFS4ERR_EXPIRED) { 3279 nfs4_schedule_lease_recovery(server->nfs_client); 3280 exception.retry = 1; 3281 continue; 3282 } 3283 if (status == -EAGAIN) { 3284 /* We must have found a delegation */ 3285 exception.retry = 1; 3286 continue; 3287 } 3288 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 3289 continue; 3290 res = ERR_PTR(nfs4_handle_exception(server, 3291 status, &exception)); 3292 } while (exception.retry); 3293 return res; 3294 } 3295 3296 static int _nfs4_do_setattr(struct inode *inode, 3297 struct nfs_setattrargs *arg, 3298 struct nfs_setattrres *res, 3299 const struct cred *cred, 3300 struct nfs_open_context *ctx) 3301 { 3302 struct nfs_server *server = NFS_SERVER(inode); 3303 struct rpc_message msg = { 3304 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 3305 .rpc_argp = arg, 3306 .rpc_resp = res, 3307 .rpc_cred = cred, 3308 }; 3309 const struct cred *delegation_cred = NULL; 3310 unsigned long timestamp = jiffies; 3311 bool truncate; 3312 int status; 3313 3314 nfs_fattr_init(res->fattr); 3315 3316 /* Servers should only apply open mode checks for file size changes */ 3317 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; 3318 if (!truncate) { 3319 nfs4_inode_make_writeable(inode); 3320 goto zero_stateid; 3321 } 3322 3323 if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { 3324 /* Use that stateid */ 3325 } else if (ctx != NULL && ctx->state) { 3326 struct nfs_lock_context *l_ctx; 3327 if (!nfs4_valid_open_stateid(ctx->state)) 3328 return -EBADF; 3329 l_ctx = nfs_get_lock_context(ctx); 3330 if (IS_ERR(l_ctx)) 3331 return PTR_ERR(l_ctx); 3332 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx, 3333 &arg->stateid, &delegation_cred); 3334 nfs_put_lock_context(l_ctx); 3335 if (status == -EIO) 3336 return -EBADF; 3337 else if (status == -EAGAIN) 3338 goto zero_stateid; 3339 } else { 3340 zero_stateid: 3341 nfs4_stateid_copy(&arg->stateid, &zero_stateid); 3342 } 3343 if (delegation_cred) 3344 msg.rpc_cred = delegation_cred; 3345 3346 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1); 3347 3348 put_cred(delegation_cred); 3349 if (status == 0 && ctx != NULL) 3350 renew_lease(server, timestamp); 3351 trace_nfs4_setattr(inode, &arg->stateid, status); 3352 return status; 3353 } 3354 3355 static int nfs4_do_setattr(struct inode *inode, const struct cred *cred, 3356 struct nfs_fattr *fattr, struct iattr *sattr, 3357 struct nfs_open_context *ctx, struct nfs4_label *ilabel, 3358 struct nfs4_label *olabel) 3359 { 3360 struct nfs_server *server = NFS_SERVER(inode); 3361 __u32 bitmask[NFS4_BITMASK_SZ]; 3362 struct nfs4_state *state = ctx ? ctx->state : NULL; 3363 struct nfs_setattrargs arg = { 3364 .fh = NFS_FH(inode), 3365 .iap = sattr, 3366 .server = server, 3367 .bitmask = bitmask, 3368 .label = ilabel, 3369 }; 3370 struct nfs_setattrres res = { 3371 .fattr = fattr, 3372 .label = olabel, 3373 .server = server, 3374 }; 3375 struct nfs4_exception exception = { 3376 .state = state, 3377 .inode = inode, 3378 .stateid = &arg.stateid, 3379 }; 3380 unsigned long adjust_flags = NFS_INO_INVALID_CHANGE; 3381 int err; 3382 3383 if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID)) 3384 adjust_flags |= NFS_INO_INVALID_MODE; 3385 if (sattr->ia_valid & (ATTR_UID | ATTR_GID)) 3386 adjust_flags |= NFS_INO_INVALID_OTHER; 3387 3388 do { 3389 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, olabel), 3390 inode, adjust_flags); 3391 3392 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx); 3393 switch (err) { 3394 case -NFS4ERR_OPENMODE: 3395 if (!(sattr->ia_valid & ATTR_SIZE)) { 3396 pr_warn_once("NFSv4: server %s is incorrectly " 3397 "applying open mode checks to " 3398 "a SETATTR that is not " 3399 "changing file size.\n", 3400 server->nfs_client->cl_hostname); 3401 } 3402 if (state && !(state->state & FMODE_WRITE)) { 3403 err = -EBADF; 3404 if (sattr->ia_valid & ATTR_OPEN) 3405 err = -EACCES; 3406 goto out; 3407 } 3408 } 3409 err = nfs4_handle_exception(server, err, &exception); 3410 } while (exception.retry); 3411 out: 3412 return err; 3413 } 3414 3415 static bool 3416 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) 3417 { 3418 if (inode == NULL || !nfs_have_layout(inode)) 3419 return false; 3420 3421 return pnfs_wait_on_layoutreturn(inode, task); 3422 } 3423 3424 /* 3425 * Update the seqid of an open stateid 3426 */ 3427 static void nfs4_sync_open_stateid(nfs4_stateid *dst, 3428 struct nfs4_state *state) 3429 { 3430 __be32 seqid_open; 3431 u32 dst_seqid; 3432 int seq; 3433 3434 for (;;) { 3435 if (!nfs4_valid_open_stateid(state)) 3436 break; 3437 seq = read_seqbegin(&state->seqlock); 3438 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3439 nfs4_stateid_copy(dst, &state->open_stateid); 3440 if (read_seqretry(&state->seqlock, seq)) 3441 continue; 3442 break; 3443 } 3444 seqid_open = state->open_stateid.seqid; 3445 if (read_seqretry(&state->seqlock, seq)) 3446 continue; 3447 3448 dst_seqid = be32_to_cpu(dst->seqid); 3449 if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0) 3450 dst->seqid = seqid_open; 3451 break; 3452 } 3453 } 3454 3455 /* 3456 * Update the seqid of an open stateid after receiving 3457 * NFS4ERR_OLD_STATEID 3458 */ 3459 static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, 3460 struct nfs4_state *state) 3461 { 3462 __be32 seqid_open; 3463 u32 dst_seqid; 3464 bool ret; 3465 int seq, status = -EAGAIN; 3466 DEFINE_WAIT(wait); 3467 3468 for (;;) { 3469 ret = false; 3470 if (!nfs4_valid_open_stateid(state)) 3471 break; 3472 seq = read_seqbegin(&state->seqlock); 3473 if (!nfs4_state_match_open_stateid_other(state, dst)) { 3474 if (read_seqretry(&state->seqlock, seq)) 3475 continue; 3476 break; 3477 } 3478 3479 write_seqlock(&state->seqlock); 3480 seqid_open = state->open_stateid.seqid; 3481 3482 dst_seqid = be32_to_cpu(dst->seqid); 3483 3484 /* Did another OPEN bump the state's seqid? try again: */ 3485 if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) { 3486 dst->seqid = seqid_open; 3487 write_sequnlock(&state->seqlock); 3488 ret = true; 3489 break; 3490 } 3491 3492 /* server says we're behind but we haven't seen the update yet */ 3493 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags); 3494 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE); 3495 write_sequnlock(&state->seqlock); 3496 trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); 3497 3498 if (fatal_signal_pending(current)) 3499 status = -EINTR; 3500 else 3501 if (schedule_timeout(5*HZ) != 0) 3502 status = 0; 3503 3504 finish_wait(&state->waitq, &wait); 3505 3506 if (!status) 3507 continue; 3508 if (status == -EINTR) 3509 break; 3510 3511 /* we slept the whole 5 seconds, we must have lost a seqid */ 3512 dst->seqid = cpu_to_be32(dst_seqid + 1); 3513 ret = true; 3514 break; 3515 } 3516 3517 return ret; 3518 } 3519 3520 struct nfs4_closedata { 3521 struct inode *inode; 3522 struct nfs4_state *state; 3523 struct nfs_closeargs arg; 3524 struct nfs_closeres res; 3525 struct { 3526 struct nfs4_layoutreturn_args arg; 3527 struct nfs4_layoutreturn_res res; 3528 struct nfs4_xdr_opaque_data ld_private; 3529 u32 roc_barrier; 3530 bool roc; 3531 } lr; 3532 struct nfs_fattr fattr; 3533 unsigned long timestamp; 3534 }; 3535 3536 static void nfs4_free_closedata(void *data) 3537 { 3538 struct nfs4_closedata *calldata = data; 3539 struct nfs4_state_owner *sp = calldata->state->owner; 3540 struct super_block *sb = calldata->state->inode->i_sb; 3541 3542 if (calldata->lr.roc) 3543 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res, 3544 calldata->res.lr_ret); 3545 nfs4_put_open_state(calldata->state); 3546 nfs_free_seqid(calldata->arg.seqid); 3547 nfs4_put_state_owner(sp); 3548 nfs_sb_deactive(sb); 3549 kfree(calldata); 3550 } 3551 3552 static void nfs4_close_done(struct rpc_task *task, void *data) 3553 { 3554 struct nfs4_closedata *calldata = data; 3555 struct nfs4_state *state = calldata->state; 3556 struct nfs_server *server = NFS_SERVER(calldata->inode); 3557 nfs4_stateid *res_stateid = NULL; 3558 struct nfs4_exception exception = { 3559 .state = state, 3560 .inode = calldata->inode, 3561 .stateid = &calldata->arg.stateid, 3562 }; 3563 3564 dprintk("%s: begin!\n", __func__); 3565 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 3566 return; 3567 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 3568 3569 /* Handle Layoutreturn errors */ 3570 if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res, 3571 &calldata->res.lr_ret) == -EAGAIN) 3572 goto out_restart; 3573 3574 /* hmm. we are done with the inode, and in the process of freeing 3575 * the state_owner. we keep this around to process errors 3576 */ 3577 switch (task->tk_status) { 3578 case 0: 3579 res_stateid = &calldata->res.stateid; 3580 renew_lease(server, calldata->timestamp); 3581 break; 3582 case -NFS4ERR_ACCESS: 3583 if (calldata->arg.bitmask != NULL) { 3584 calldata->arg.bitmask = NULL; 3585 calldata->res.fattr = NULL; 3586 goto out_restart; 3587 3588 } 3589 break; 3590 case -NFS4ERR_OLD_STATEID: 3591 /* Did we race with OPEN? */ 3592 if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid, 3593 state)) 3594 goto out_restart; 3595 goto out_release; 3596 case -NFS4ERR_ADMIN_REVOKED: 3597 case -NFS4ERR_STALE_STATEID: 3598 case -NFS4ERR_EXPIRED: 3599 nfs4_free_revoked_stateid(server, 3600 &calldata->arg.stateid, 3601 task->tk_msg.rpc_cred); 3602 fallthrough; 3603 case -NFS4ERR_BAD_STATEID: 3604 if (calldata->arg.fmode == 0) 3605 break; 3606 fallthrough; 3607 default: 3608 task->tk_status = nfs4_async_handle_exception(task, 3609 server, task->tk_status, &exception); 3610 if (exception.retry) 3611 goto out_restart; 3612 } 3613 nfs_clear_open_stateid(state, &calldata->arg.stateid, 3614 res_stateid, calldata->arg.fmode); 3615 out_release: 3616 task->tk_status = 0; 3617 nfs_release_seqid(calldata->arg.seqid); 3618 nfs_refresh_inode(calldata->inode, &calldata->fattr); 3619 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 3620 return; 3621 out_restart: 3622 task->tk_status = 0; 3623 rpc_restart_call_prepare(task); 3624 goto out_release; 3625 } 3626 3627 static void nfs4_close_prepare(struct rpc_task *task, void *data) 3628 { 3629 struct nfs4_closedata *calldata = data; 3630 struct nfs4_state *state = calldata->state; 3631 struct inode *inode = calldata->inode; 3632 struct nfs_server *server = NFS_SERVER(inode); 3633 struct pnfs_layout_hdr *lo; 3634 bool is_rdonly, is_wronly, is_rdwr; 3635 int call_close = 0; 3636 3637 dprintk("%s: begin!\n", __func__); 3638 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 3639 goto out_wait; 3640 3641 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 3642 spin_lock(&state->owner->so_lock); 3643 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 3644 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 3645 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 3646 /* Calculate the change in open mode */ 3647 calldata->arg.fmode = 0; 3648 if (state->n_rdwr == 0) { 3649 if (state->n_rdonly == 0) 3650 call_close |= is_rdonly; 3651 else if (is_rdonly) 3652 calldata->arg.fmode |= FMODE_READ; 3653 if (state->n_wronly == 0) 3654 call_close |= is_wronly; 3655 else if (is_wronly) 3656 calldata->arg.fmode |= FMODE_WRITE; 3657 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE)) 3658 call_close |= is_rdwr; 3659 } else if (is_rdwr) 3660 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 3661 3662 nfs4_sync_open_stateid(&calldata->arg.stateid, state); 3663 if (!nfs4_valid_open_stateid(state)) 3664 call_close = 0; 3665 spin_unlock(&state->owner->so_lock); 3666 3667 if (!call_close) { 3668 /* Note: exit _without_ calling nfs4_close_done */ 3669 goto out_no_action; 3670 } 3671 3672 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) { 3673 nfs_release_seqid(calldata->arg.seqid); 3674 goto out_wait; 3675 } 3676 3677 lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL; 3678 if (lo && !pnfs_layout_is_valid(lo)) { 3679 calldata->arg.lr_args = NULL; 3680 calldata->res.lr_res = NULL; 3681 } 3682 3683 if (calldata->arg.fmode == 0) 3684 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 3685 3686 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) { 3687 /* Close-to-open cache consistency revalidation */ 3688 if (!nfs4_have_delegation(inode, FMODE_READ)) { 3689 nfs4_bitmask_set(calldata->arg.bitmask_store, 3690 server->cache_consistency_bitmask, 3691 inode, server, NULL); 3692 calldata->arg.bitmask = calldata->arg.bitmask_store; 3693 } else 3694 calldata->arg.bitmask = NULL; 3695 } 3696 3697 calldata->arg.share_access = 3698 nfs4_map_atomic_open_share(NFS_SERVER(inode), 3699 calldata->arg.fmode, 0); 3700 3701 if (calldata->res.fattr == NULL) 3702 calldata->arg.bitmask = NULL; 3703 else if (calldata->arg.bitmask == NULL) 3704 calldata->res.fattr = NULL; 3705 calldata->timestamp = jiffies; 3706 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client, 3707 &calldata->arg.seq_args, 3708 &calldata->res.seq_res, 3709 task) != 0) 3710 nfs_release_seqid(calldata->arg.seqid); 3711 dprintk("%s: done!\n", __func__); 3712 return; 3713 out_no_action: 3714 task->tk_action = NULL; 3715 out_wait: 3716 nfs4_sequence_done(task, &calldata->res.seq_res); 3717 } 3718 3719 static const struct rpc_call_ops nfs4_close_ops = { 3720 .rpc_call_prepare = nfs4_close_prepare, 3721 .rpc_call_done = nfs4_close_done, 3722 .rpc_release = nfs4_free_closedata, 3723 }; 3724 3725 /* 3726 * It is possible for data to be read/written from a mem-mapped file 3727 * after the sys_close call (which hits the vfs layer as a flush). 3728 * This means that we can't safely call nfsv4 close on a file until 3729 * the inode is cleared. This in turn means that we are not good 3730 * NFSv4 citizens - we do not indicate to the server to update the file's 3731 * share state even when we are done with one of the three share 3732 * stateid's in the inode. 3733 * 3734 * NOTE: Caller must be holding the sp->so_owner semaphore! 3735 */ 3736 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 3737 { 3738 struct nfs_server *server = NFS_SERVER(state->inode); 3739 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 3740 struct nfs4_closedata *calldata; 3741 struct nfs4_state_owner *sp = state->owner; 3742 struct rpc_task *task; 3743 struct rpc_message msg = { 3744 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 3745 .rpc_cred = state->owner->so_cred, 3746 }; 3747 struct rpc_task_setup task_setup_data = { 3748 .rpc_client = server->client, 3749 .rpc_message = &msg, 3750 .callback_ops = &nfs4_close_ops, 3751 .workqueue = nfsiod_workqueue, 3752 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 3753 }; 3754 int status = -ENOMEM; 3755 3756 if (server->nfs_client->cl_minorversion) 3757 task_setup_data.flags |= RPC_TASK_MOVEABLE; 3758 3759 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 3760 &task_setup_data.rpc_client, &msg); 3761 3762 calldata = kzalloc(sizeof(*calldata), gfp_mask); 3763 if (calldata == NULL) 3764 goto out; 3765 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0); 3766 calldata->inode = state->inode; 3767 calldata->state = state; 3768 calldata->arg.fh = NFS_FH(state->inode); 3769 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state)) 3770 goto out_free_calldata; 3771 /* Serialization for the sequence id */ 3772 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 3773 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 3774 if (IS_ERR(calldata->arg.seqid)) 3775 goto out_free_calldata; 3776 nfs_fattr_init(&calldata->fattr); 3777 calldata->arg.fmode = 0; 3778 calldata->lr.arg.ld_private = &calldata->lr.ld_private; 3779 calldata->res.fattr = &calldata->fattr; 3780 calldata->res.seqid = calldata->arg.seqid; 3781 calldata->res.server = server; 3782 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 3783 calldata->lr.roc = pnfs_roc(state->inode, 3784 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred); 3785 if (calldata->lr.roc) { 3786 calldata->arg.lr_args = &calldata->lr.arg; 3787 calldata->res.lr_res = &calldata->lr.res; 3788 } 3789 nfs_sb_active(calldata->inode->i_sb); 3790 3791 msg.rpc_argp = &calldata->arg; 3792 msg.rpc_resp = &calldata->res; 3793 task_setup_data.callback_data = calldata; 3794 task = rpc_run_task(&task_setup_data); 3795 if (IS_ERR(task)) 3796 return PTR_ERR(task); 3797 status = 0; 3798 if (wait) 3799 status = rpc_wait_for_completion_task(task); 3800 rpc_put_task(task); 3801 return status; 3802 out_free_calldata: 3803 kfree(calldata); 3804 out: 3805 nfs4_put_open_state(state); 3806 nfs4_put_state_owner(sp); 3807 return status; 3808 } 3809 3810 static struct inode * 3811 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 3812 int open_flags, struct iattr *attr, int *opened) 3813 { 3814 struct nfs4_state *state; 3815 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; 3816 3817 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 3818 3819 /* Protect against concurrent sillydeletes */ 3820 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 3821 3822 nfs4_label_release_security(label); 3823 3824 if (IS_ERR(state)) 3825 return ERR_CAST(state); 3826 return state->inode; 3827 } 3828 3829 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 3830 { 3831 if (ctx->state == NULL) 3832 return; 3833 if (is_sync) 3834 nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3835 else 3836 nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx)); 3837 } 3838 3839 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 3840 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 3841 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_XATTR_SUPPORT - 1UL) 3842 3843 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3844 { 3845 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion; 3846 struct nfs4_server_caps_arg args = { 3847 .fhandle = fhandle, 3848 .bitmask = bitmask, 3849 }; 3850 struct nfs4_server_caps_res res = {}; 3851 struct rpc_message msg = { 3852 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 3853 .rpc_argp = &args, 3854 .rpc_resp = &res, 3855 }; 3856 int status; 3857 int i; 3858 3859 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3860 FATTR4_WORD0_FH_EXPIRE_TYPE | 3861 FATTR4_WORD0_LINK_SUPPORT | 3862 FATTR4_WORD0_SYMLINK_SUPPORT | 3863 FATTR4_WORD0_ACLSUPPORT; 3864 if (minorversion) 3865 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 3866 3867 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3868 if (status == 0) { 3869 /* Sanity check the server answers */ 3870 switch (minorversion) { 3871 case 0: 3872 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 3873 res.attr_bitmask[2] = 0; 3874 break; 3875 case 1: 3876 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 3877 break; 3878 case 2: 3879 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 3880 } 3881 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 3882 server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS | 3883 NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL); 3884 server->fattr_valid = NFS_ATTR_FATTR_V4; 3885 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 3886 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 3887 server->caps |= NFS_CAP_ACLS; 3888 if (res.has_links != 0) 3889 server->caps |= NFS_CAP_HARDLINKS; 3890 if (res.has_symlinks != 0) 3891 server->caps |= NFS_CAP_SYMLINKS; 3892 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 3893 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 3894 server->caps |= NFS_CAP_SECURITY_LABEL; 3895 #endif 3896 if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID)) 3897 server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID; 3898 if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE)) 3899 server->fattr_valid &= ~NFS_ATTR_FATTR_MODE; 3900 if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)) 3901 server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK; 3902 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER)) 3903 server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER | 3904 NFS_ATTR_FATTR_OWNER_NAME); 3905 if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)) 3906 server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP | 3907 NFS_ATTR_FATTR_GROUP_NAME); 3908 if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)) 3909 server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED; 3910 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)) 3911 server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME; 3912 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)) 3913 server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME; 3914 if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)) 3915 server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME; 3916 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 3917 sizeof(server->attr_bitmask)); 3918 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 3919 3920 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 3921 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 3922 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 3923 server->cache_consistency_bitmask[2] = 0; 3924 3925 /* Avoid a regression due to buggy server */ 3926 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++) 3927 res.exclcreat_bitmask[i] &= res.attr_bitmask[i]; 3928 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 3929 sizeof(server->exclcreat_bitmask)); 3930 3931 server->acl_bitmask = res.acl_bitmask; 3932 server->fh_expire_type = res.fh_expire_type; 3933 } 3934 3935 return status; 3936 } 3937 3938 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3939 { 3940 struct nfs4_exception exception = { 3941 .interruptible = true, 3942 }; 3943 int err; 3944 do { 3945 err = nfs4_handle_exception(server, 3946 _nfs4_server_capabilities(server, fhandle), 3947 &exception); 3948 } while (exception.retry); 3949 return err; 3950 } 3951 3952 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 3953 struct nfs_fsinfo *info) 3954 { 3955 u32 bitmask[3]; 3956 struct nfs4_lookup_root_arg args = { 3957 .bitmask = bitmask, 3958 }; 3959 struct nfs4_lookup_res res = { 3960 .server = server, 3961 .fattr = info->fattr, 3962 .fh = fhandle, 3963 }; 3964 struct rpc_message msg = { 3965 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 3966 .rpc_argp = &args, 3967 .rpc_resp = &res, 3968 }; 3969 3970 bitmask[0] = nfs4_fattr_bitmap[0]; 3971 bitmask[1] = nfs4_fattr_bitmap[1]; 3972 /* 3973 * Process the label in the upcoming getfattr 3974 */ 3975 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL; 3976 3977 nfs_fattr_init(info->fattr); 3978 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3979 } 3980 3981 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 3982 struct nfs_fsinfo *info) 3983 { 3984 struct nfs4_exception exception = { 3985 .interruptible = true, 3986 }; 3987 int err; 3988 do { 3989 err = _nfs4_lookup_root(server, fhandle, info); 3990 trace_nfs4_lookup_root(server, fhandle, info->fattr, err); 3991 switch (err) { 3992 case 0: 3993 case -NFS4ERR_WRONGSEC: 3994 goto out; 3995 default: 3996 err = nfs4_handle_exception(server, err, &exception); 3997 } 3998 } while (exception.retry); 3999 out: 4000 return err; 4001 } 4002 4003 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4004 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 4005 { 4006 struct rpc_auth_create_args auth_args = { 4007 .pseudoflavor = flavor, 4008 }; 4009 struct rpc_auth *auth; 4010 4011 auth = rpcauth_create(&auth_args, server->client); 4012 if (IS_ERR(auth)) 4013 return -EACCES; 4014 return nfs4_lookup_root(server, fhandle, info); 4015 } 4016 4017 /* 4018 * Retry pseudoroot lookup with various security flavors. We do this when: 4019 * 4020 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 4021 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 4022 * 4023 * Returns zero on success, or a negative NFS4ERR value, or a 4024 * negative errno value. 4025 */ 4026 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 4027 struct nfs_fsinfo *info) 4028 { 4029 /* Per 3530bis 15.33.5 */ 4030 static const rpc_authflavor_t flav_array[] = { 4031 RPC_AUTH_GSS_KRB5P, 4032 RPC_AUTH_GSS_KRB5I, 4033 RPC_AUTH_GSS_KRB5, 4034 RPC_AUTH_UNIX, /* courtesy */ 4035 RPC_AUTH_NULL, 4036 }; 4037 int status = -EPERM; 4038 size_t i; 4039 4040 if (server->auth_info.flavor_len > 0) { 4041 /* try each flavor specified by user */ 4042 for (i = 0; i < server->auth_info.flavor_len; i++) { 4043 status = nfs4_lookup_root_sec(server, fhandle, info, 4044 server->auth_info.flavors[i]); 4045 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4046 continue; 4047 break; 4048 } 4049 } else { 4050 /* no flavors specified by user, try default list */ 4051 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 4052 status = nfs4_lookup_root_sec(server, fhandle, info, 4053 flav_array[i]); 4054 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 4055 continue; 4056 break; 4057 } 4058 } 4059 4060 /* 4061 * -EACCES could mean that the user doesn't have correct permissions 4062 * to access the mount. It could also mean that we tried to mount 4063 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 4064 * existing mount programs don't handle -EACCES very well so it should 4065 * be mapped to -EPERM instead. 4066 */ 4067 if (status == -EACCES) 4068 status = -EPERM; 4069 return status; 4070 } 4071 4072 /** 4073 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 4074 * @server: initialized nfs_server handle 4075 * @fhandle: we fill in the pseudo-fs root file handle 4076 * @info: we fill in an FSINFO struct 4077 * @auth_probe: probe the auth flavours 4078 * 4079 * Returns zero on success, or a negative errno. 4080 */ 4081 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 4082 struct nfs_fsinfo *info, 4083 bool auth_probe) 4084 { 4085 int status = 0; 4086 4087 if (!auth_probe) 4088 status = nfs4_lookup_root(server, fhandle, info); 4089 4090 if (auth_probe || status == NFS4ERR_WRONGSEC) 4091 status = server->nfs_client->cl_mvops->find_root_sec(server, 4092 fhandle, info); 4093 4094 if (status == 0) 4095 status = nfs4_server_capabilities(server, fhandle); 4096 if (status == 0) 4097 status = nfs4_do_fsinfo(server, fhandle, info); 4098 4099 return nfs4_map_errors(status); 4100 } 4101 4102 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 4103 struct nfs_fsinfo *info) 4104 { 4105 int error; 4106 struct nfs_fattr *fattr = info->fattr; 4107 struct nfs4_label *label = fattr->label; 4108 4109 error = nfs4_server_capabilities(server, mntfh); 4110 if (error < 0) { 4111 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 4112 return error; 4113 } 4114 4115 error = nfs4_proc_getattr(server, mntfh, fattr, label, NULL); 4116 if (error < 0) { 4117 dprintk("nfs4_get_root: getattr error = %d\n", -error); 4118 goto out; 4119 } 4120 4121 if (fattr->valid & NFS_ATTR_FATTR_FSID && 4122 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 4123 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 4124 4125 out: 4126 return error; 4127 } 4128 4129 /* 4130 * Get locations and (maybe) other attributes of a referral. 4131 * Note that we'll actually follow the referral later when 4132 * we detect fsid mismatch in inode revalidation 4133 */ 4134 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 4135 const struct qstr *name, struct nfs_fattr *fattr, 4136 struct nfs_fh *fhandle) 4137 { 4138 int status = -ENOMEM; 4139 struct page *page = NULL; 4140 struct nfs4_fs_locations *locations = NULL; 4141 4142 page = alloc_page(GFP_KERNEL); 4143 if (page == NULL) 4144 goto out; 4145 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 4146 if (locations == NULL) 4147 goto out; 4148 4149 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 4150 if (status != 0) 4151 goto out; 4152 4153 /* 4154 * If the fsid didn't change, this is a migration event, not a 4155 * referral. Cause us to drop into the exception handler, which 4156 * will kick off migration recovery. 4157 */ 4158 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 4159 dprintk("%s: server did not return a different fsid for" 4160 " a referral at %s\n", __func__, name->name); 4161 status = -NFS4ERR_MOVED; 4162 goto out; 4163 } 4164 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 4165 nfs_fixup_referral_attributes(&locations->fattr); 4166 4167 /* replace the lookup nfs_fattr with the locations nfs_fattr */ 4168 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 4169 memset(fhandle, 0, sizeof(struct nfs_fh)); 4170 out: 4171 if (page) 4172 __free_page(page); 4173 kfree(locations); 4174 return status; 4175 } 4176 4177 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4178 struct nfs_fattr *fattr, struct nfs4_label *label, 4179 struct inode *inode) 4180 { 4181 __u32 bitmask[NFS4_BITMASK_SZ]; 4182 struct nfs4_getattr_arg args = { 4183 .fh = fhandle, 4184 .bitmask = bitmask, 4185 }; 4186 struct nfs4_getattr_res res = { 4187 .fattr = fattr, 4188 .label = label, 4189 .server = server, 4190 }; 4191 struct rpc_message msg = { 4192 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4193 .rpc_argp = &args, 4194 .rpc_resp = &res, 4195 }; 4196 unsigned short task_flags = 0; 4197 4198 if (nfs4_has_session(server->nfs_client)) 4199 task_flags = RPC_TASK_MOVEABLE; 4200 4201 /* Is this is an attribute revalidation, subject to softreval? */ 4202 if (inode && (server->flags & NFS_MOUNT_SOFTREVAL)) 4203 task_flags |= RPC_TASK_TIMEOUT; 4204 4205 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, label), inode, 0); 4206 nfs_fattr_init(fattr); 4207 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4208 return nfs4_do_call_sync(server->client, server, &msg, 4209 &args.seq_args, &res.seq_res, task_flags); 4210 } 4211 4212 int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 4213 struct nfs_fattr *fattr, struct nfs4_label *label, 4214 struct inode *inode) 4215 { 4216 struct nfs4_exception exception = { 4217 .interruptible = true, 4218 }; 4219 int err; 4220 do { 4221 err = _nfs4_proc_getattr(server, fhandle, fattr, label, inode); 4222 trace_nfs4_getattr(server, fhandle, fattr, err); 4223 err = nfs4_handle_exception(server, err, 4224 &exception); 4225 } while (exception.retry); 4226 return err; 4227 } 4228 4229 /* 4230 * The file is not closed if it is opened due to the a request to change 4231 * the size of the file. The open call will not be needed once the 4232 * VFS layer lookup-intents are implemented. 4233 * 4234 * Close is called when the inode is destroyed. 4235 * If we haven't opened the file for O_WRONLY, we 4236 * need to in the size_change case to obtain a stateid. 4237 * 4238 * Got race? 4239 * Because OPEN is always done by name in nfsv4, it is 4240 * possible that we opened a different file by the same 4241 * name. We can recognize this race condition, but we 4242 * can't do anything about it besides returning an error. 4243 * 4244 * This will be fixed with VFS changes (lookup-intent). 4245 */ 4246 static int 4247 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 4248 struct iattr *sattr) 4249 { 4250 struct inode *inode = d_inode(dentry); 4251 const struct cred *cred = NULL; 4252 struct nfs_open_context *ctx = NULL; 4253 struct nfs4_label *label = NULL; 4254 int status; 4255 4256 if (pnfs_ld_layoutret_on_setattr(inode) && 4257 sattr->ia_valid & ATTR_SIZE && 4258 sattr->ia_size < i_size_read(inode)) 4259 pnfs_commit_and_return_layout(inode); 4260 4261 nfs_fattr_init(fattr); 4262 4263 /* Deal with open(O_TRUNC) */ 4264 if (sattr->ia_valid & ATTR_OPEN) 4265 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 4266 4267 /* Optimization: if the end result is no change, don't RPC */ 4268 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 4269 return 0; 4270 4271 /* Search for an existing open(O_WRITE) file */ 4272 if (sattr->ia_valid & ATTR_FILE) { 4273 4274 ctx = nfs_file_open_context(sattr->ia_file); 4275 if (ctx) 4276 cred = ctx->cred; 4277 } 4278 4279 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 4280 if (IS_ERR(label)) 4281 return PTR_ERR(label); 4282 4283 /* Return any delegations if we're going to change ACLs */ 4284 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) 4285 nfs4_inode_make_writeable(inode); 4286 4287 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL, label); 4288 if (status == 0) { 4289 nfs_setattr_update_inode(inode, sattr, fattr); 4290 nfs_setsecurity(inode, fattr, label); 4291 } 4292 nfs4_label_free(label); 4293 return status; 4294 } 4295 4296 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 4297 struct dentry *dentry, struct nfs_fh *fhandle, 4298 struct nfs_fattr *fattr, struct nfs4_label *label) 4299 { 4300 struct nfs_server *server = NFS_SERVER(dir); 4301 int status; 4302 struct nfs4_lookup_arg args = { 4303 .bitmask = server->attr_bitmask, 4304 .dir_fh = NFS_FH(dir), 4305 .name = &dentry->d_name, 4306 }; 4307 struct nfs4_lookup_res res = { 4308 .server = server, 4309 .fattr = fattr, 4310 .label = label, 4311 .fh = fhandle, 4312 }; 4313 struct rpc_message msg = { 4314 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 4315 .rpc_argp = &args, 4316 .rpc_resp = &res, 4317 }; 4318 unsigned short task_flags = 0; 4319 4320 if (server->nfs_client->cl_minorversion) 4321 task_flags = RPC_TASK_MOVEABLE; 4322 4323 /* Is this is an attribute revalidation, subject to softreval? */ 4324 if (nfs_lookup_is_soft_revalidate(dentry)) 4325 task_flags |= RPC_TASK_TIMEOUT; 4326 4327 args.bitmask = nfs4_bitmask(server, label); 4328 4329 nfs_fattr_init(fattr); 4330 4331 dprintk("NFS call lookup %pd2\n", dentry); 4332 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 4333 status = nfs4_do_call_sync(clnt, server, &msg, 4334 &args.seq_args, &res.seq_res, task_flags); 4335 dprintk("NFS reply lookup: %d\n", status); 4336 return status; 4337 } 4338 4339 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 4340 { 4341 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 4342 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 4343 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 4344 fattr->nlink = 2; 4345 } 4346 4347 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 4348 struct dentry *dentry, struct nfs_fh *fhandle, 4349 struct nfs_fattr *fattr, struct nfs4_label *label) 4350 { 4351 struct nfs4_exception exception = { 4352 .interruptible = true, 4353 }; 4354 struct rpc_clnt *client = *clnt; 4355 const struct qstr *name = &dentry->d_name; 4356 int err; 4357 do { 4358 err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr, label); 4359 trace_nfs4_lookup(dir, name, err); 4360 switch (err) { 4361 case -NFS4ERR_BADNAME: 4362 err = -ENOENT; 4363 goto out; 4364 case -NFS4ERR_MOVED: 4365 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 4366 if (err == -NFS4ERR_MOVED) 4367 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4368 goto out; 4369 case -NFS4ERR_WRONGSEC: 4370 err = -EPERM; 4371 if (client != *clnt) 4372 goto out; 4373 client = nfs4_negotiate_security(client, dir, name); 4374 if (IS_ERR(client)) 4375 return PTR_ERR(client); 4376 4377 exception.retry = 1; 4378 break; 4379 default: 4380 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 4381 } 4382 } while (exception.retry); 4383 4384 out: 4385 if (err == 0) 4386 *clnt = client; 4387 else if (client != *clnt) 4388 rpc_shutdown_client(client); 4389 4390 return err; 4391 } 4392 4393 static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, 4394 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 4395 struct nfs4_label *label) 4396 { 4397 int status; 4398 struct rpc_clnt *client = NFS_CLIENT(dir); 4399 4400 status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr, label); 4401 if (client != NFS_CLIENT(dir)) { 4402 rpc_shutdown_client(client); 4403 nfs_fixup_secinfo_attributes(fattr); 4404 } 4405 return status; 4406 } 4407 4408 struct rpc_clnt * 4409 nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry, 4410 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 4411 { 4412 struct rpc_clnt *client = NFS_CLIENT(dir); 4413 int status; 4414 4415 status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr, NULL); 4416 if (status < 0) 4417 return ERR_PTR(status); 4418 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 4419 } 4420 4421 static int _nfs4_proc_lookupp(struct inode *inode, 4422 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 4423 struct nfs4_label *label) 4424 { 4425 struct rpc_clnt *clnt = NFS_CLIENT(inode); 4426 struct nfs_server *server = NFS_SERVER(inode); 4427 int status; 4428 struct nfs4_lookupp_arg args = { 4429 .bitmask = server->attr_bitmask, 4430 .fh = NFS_FH(inode), 4431 }; 4432 struct nfs4_lookupp_res res = { 4433 .server = server, 4434 .fattr = fattr, 4435 .label = label, 4436 .fh = fhandle, 4437 }; 4438 struct rpc_message msg = { 4439 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP], 4440 .rpc_argp = &args, 4441 .rpc_resp = &res, 4442 }; 4443 unsigned short task_flags = 0; 4444 4445 if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL) 4446 task_flags |= RPC_TASK_TIMEOUT; 4447 4448 args.bitmask = nfs4_bitmask(server, label); 4449 4450 nfs_fattr_init(fattr); 4451 4452 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino); 4453 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, 4454 &res.seq_res, task_flags); 4455 dprintk("NFS reply lookupp: %d\n", status); 4456 return status; 4457 } 4458 4459 static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle, 4460 struct nfs_fattr *fattr, struct nfs4_label *label) 4461 { 4462 struct nfs4_exception exception = { 4463 .interruptible = true, 4464 }; 4465 int err; 4466 do { 4467 err = _nfs4_proc_lookupp(inode, fhandle, fattr, label); 4468 trace_nfs4_lookupp(inode, err); 4469 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4470 &exception); 4471 } while (exception.retry); 4472 return err; 4473 } 4474 4475 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 4476 { 4477 struct nfs_server *server = NFS_SERVER(inode); 4478 struct nfs4_accessargs args = { 4479 .fh = NFS_FH(inode), 4480 .access = entry->mask, 4481 }; 4482 struct nfs4_accessres res = { 4483 .server = server, 4484 }; 4485 struct rpc_message msg = { 4486 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 4487 .rpc_argp = &args, 4488 .rpc_resp = &res, 4489 .rpc_cred = entry->cred, 4490 }; 4491 int status = 0; 4492 4493 if (!nfs4_have_delegation(inode, FMODE_READ)) { 4494 res.fattr = nfs_alloc_fattr(); 4495 if (res.fattr == NULL) 4496 return -ENOMEM; 4497 args.bitmask = server->cache_consistency_bitmask; 4498 } 4499 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4500 if (!status) { 4501 nfs_access_set_mask(entry, res.access); 4502 if (res.fattr) 4503 nfs_refresh_inode(inode, res.fattr); 4504 } 4505 nfs_free_fattr(res.fattr); 4506 return status; 4507 } 4508 4509 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 4510 { 4511 struct nfs4_exception exception = { 4512 .interruptible = true, 4513 }; 4514 int err; 4515 do { 4516 err = _nfs4_proc_access(inode, entry); 4517 trace_nfs4_access(inode, err); 4518 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4519 &exception); 4520 } while (exception.retry); 4521 return err; 4522 } 4523 4524 /* 4525 * TODO: For the time being, we don't try to get any attributes 4526 * along with any of the zero-copy operations READ, READDIR, 4527 * READLINK, WRITE. 4528 * 4529 * In the case of the first three, we want to put the GETATTR 4530 * after the read-type operation -- this is because it is hard 4531 * to predict the length of a GETATTR response in v4, and thus 4532 * align the READ data correctly. This means that the GETATTR 4533 * may end up partially falling into the page cache, and we should 4534 * shift it into the 'tail' of the xdr_buf before processing. 4535 * To do this efficiently, we need to know the total length 4536 * of data received, which doesn't seem to be available outside 4537 * of the RPC layer. 4538 * 4539 * In the case of WRITE, we also want to put the GETATTR after 4540 * the operation -- in this case because we want to make sure 4541 * we get the post-operation mtime and size. 4542 * 4543 * Both of these changes to the XDR layer would in fact be quite 4544 * minor, but I decided to leave them for a subsequent patch. 4545 */ 4546 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 4547 unsigned int pgbase, unsigned int pglen) 4548 { 4549 struct nfs4_readlink args = { 4550 .fh = NFS_FH(inode), 4551 .pgbase = pgbase, 4552 .pglen = pglen, 4553 .pages = &page, 4554 }; 4555 struct nfs4_readlink_res res; 4556 struct rpc_message msg = { 4557 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 4558 .rpc_argp = &args, 4559 .rpc_resp = &res, 4560 }; 4561 4562 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 4563 } 4564 4565 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 4566 unsigned int pgbase, unsigned int pglen) 4567 { 4568 struct nfs4_exception exception = { 4569 .interruptible = true, 4570 }; 4571 int err; 4572 do { 4573 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 4574 trace_nfs4_readlink(inode, err); 4575 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4576 &exception); 4577 } while (exception.retry); 4578 return err; 4579 } 4580 4581 /* 4582 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 4583 */ 4584 static int 4585 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 4586 int flags) 4587 { 4588 struct nfs_server *server = NFS_SERVER(dir); 4589 struct nfs4_label l, *ilabel = NULL; 4590 struct nfs_open_context *ctx; 4591 struct nfs4_state *state; 4592 int status = 0; 4593 4594 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL); 4595 if (IS_ERR(ctx)) 4596 return PTR_ERR(ctx); 4597 4598 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 4599 4600 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 4601 sattr->ia_mode &= ~current_umask(); 4602 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 4603 if (IS_ERR(state)) { 4604 status = PTR_ERR(state); 4605 goto out; 4606 } 4607 out: 4608 nfs4_label_release_security(ilabel); 4609 put_nfs_open_context(ctx); 4610 return status; 4611 } 4612 4613 static int 4614 _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype) 4615 { 4616 struct nfs_server *server = NFS_SERVER(dir); 4617 struct nfs_removeargs args = { 4618 .fh = NFS_FH(dir), 4619 .name = *name, 4620 }; 4621 struct nfs_removeres res = { 4622 .server = server, 4623 }; 4624 struct rpc_message msg = { 4625 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 4626 .rpc_argp = &args, 4627 .rpc_resp = &res, 4628 }; 4629 unsigned long timestamp = jiffies; 4630 int status; 4631 4632 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 4633 if (status == 0) { 4634 spin_lock(&dir->i_lock); 4635 /* Removing a directory decrements nlink in the parent */ 4636 if (ftype == NF4DIR && dir->i_nlink > 2) 4637 nfs4_dec_nlink_locked(dir); 4638 nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp, 4639 NFS_INO_INVALID_DATA); 4640 spin_unlock(&dir->i_lock); 4641 } 4642 return status; 4643 } 4644 4645 static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry) 4646 { 4647 struct nfs4_exception exception = { 4648 .interruptible = true, 4649 }; 4650 struct inode *inode = d_inode(dentry); 4651 int err; 4652 4653 if (inode) { 4654 if (inode->i_nlink == 1) 4655 nfs4_inode_return_delegation(inode); 4656 else 4657 nfs4_inode_make_writeable(inode); 4658 } 4659 do { 4660 err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG); 4661 trace_nfs4_remove(dir, &dentry->d_name, err); 4662 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4663 &exception); 4664 } while (exception.retry); 4665 return err; 4666 } 4667 4668 static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name) 4669 { 4670 struct nfs4_exception exception = { 4671 .interruptible = true, 4672 }; 4673 int err; 4674 4675 do { 4676 err = _nfs4_proc_remove(dir, name, NF4DIR); 4677 trace_nfs4_remove(dir, name, err); 4678 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4679 &exception); 4680 } while (exception.retry); 4681 return err; 4682 } 4683 4684 static void nfs4_proc_unlink_setup(struct rpc_message *msg, 4685 struct dentry *dentry, 4686 struct inode *inode) 4687 { 4688 struct nfs_removeargs *args = msg->rpc_argp; 4689 struct nfs_removeres *res = msg->rpc_resp; 4690 4691 res->server = NFS_SB(dentry->d_sb); 4692 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 4693 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0); 4694 4695 nfs_fattr_init(res->dir_attr); 4696 4697 if (inode) 4698 nfs4_inode_return_delegation(inode); 4699 } 4700 4701 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 4702 { 4703 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client, 4704 &data->args.seq_args, 4705 &data->res.seq_res, 4706 task); 4707 } 4708 4709 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 4710 { 4711 struct nfs_unlinkdata *data = task->tk_calldata; 4712 struct nfs_removeres *res = &data->res; 4713 4714 if (!nfs4_sequence_done(task, &res->seq_res)) 4715 return 0; 4716 if (nfs4_async_handle_error(task, res->server, NULL, 4717 &data->timeout) == -EAGAIN) 4718 return 0; 4719 if (task->tk_status == 0) 4720 nfs4_update_changeattr(dir, &res->cinfo, 4721 res->dir_attr->time_start, 4722 NFS_INO_INVALID_DATA); 4723 return 1; 4724 } 4725 4726 static void nfs4_proc_rename_setup(struct rpc_message *msg, 4727 struct dentry *old_dentry, 4728 struct dentry *new_dentry) 4729 { 4730 struct nfs_renameargs *arg = msg->rpc_argp; 4731 struct nfs_renameres *res = msg->rpc_resp; 4732 struct inode *old_inode = d_inode(old_dentry); 4733 struct inode *new_inode = d_inode(new_dentry); 4734 4735 if (old_inode) 4736 nfs4_inode_make_writeable(old_inode); 4737 if (new_inode) 4738 nfs4_inode_return_delegation(new_inode); 4739 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 4740 res->server = NFS_SB(old_dentry->d_sb); 4741 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0); 4742 } 4743 4744 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 4745 { 4746 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client, 4747 &data->args.seq_args, 4748 &data->res.seq_res, 4749 task); 4750 } 4751 4752 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 4753 struct inode *new_dir) 4754 { 4755 struct nfs_renamedata *data = task->tk_calldata; 4756 struct nfs_renameres *res = &data->res; 4757 4758 if (!nfs4_sequence_done(task, &res->seq_res)) 4759 return 0; 4760 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 4761 return 0; 4762 4763 if (task->tk_status == 0) { 4764 if (new_dir != old_dir) { 4765 /* Note: If we moved a directory, nlink will change */ 4766 nfs4_update_changeattr(old_dir, &res->old_cinfo, 4767 res->old_fattr->time_start, 4768 NFS_INO_INVALID_NLINK | 4769 NFS_INO_INVALID_DATA); 4770 nfs4_update_changeattr(new_dir, &res->new_cinfo, 4771 res->new_fattr->time_start, 4772 NFS_INO_INVALID_NLINK | 4773 NFS_INO_INVALID_DATA); 4774 } else 4775 nfs4_update_changeattr(old_dir, &res->old_cinfo, 4776 res->old_fattr->time_start, 4777 NFS_INO_INVALID_DATA); 4778 } 4779 return 1; 4780 } 4781 4782 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 4783 { 4784 struct nfs_server *server = NFS_SERVER(inode); 4785 __u32 bitmask[NFS4_BITMASK_SZ]; 4786 struct nfs4_link_arg arg = { 4787 .fh = NFS_FH(inode), 4788 .dir_fh = NFS_FH(dir), 4789 .name = name, 4790 .bitmask = bitmask, 4791 }; 4792 struct nfs4_link_res res = { 4793 .server = server, 4794 .label = NULL, 4795 }; 4796 struct rpc_message msg = { 4797 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 4798 .rpc_argp = &arg, 4799 .rpc_resp = &res, 4800 }; 4801 int status = -ENOMEM; 4802 4803 res.fattr = nfs_alloc_fattr(); 4804 if (res.fattr == NULL) 4805 goto out; 4806 4807 res.label = nfs4_label_alloc(server, GFP_KERNEL); 4808 if (IS_ERR(res.label)) { 4809 status = PTR_ERR(res.label); 4810 goto out; 4811 } 4812 4813 nfs4_inode_make_writeable(inode); 4814 nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.label), inode, 4815 NFS_INO_INVALID_CHANGE); 4816 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4817 if (!status) { 4818 nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start, 4819 NFS_INO_INVALID_DATA); 4820 nfs4_inc_nlink(inode); 4821 status = nfs_post_op_update_inode(inode, res.fattr); 4822 if (!status) 4823 nfs_setsecurity(inode, res.fattr, res.label); 4824 } 4825 4826 4827 nfs4_label_free(res.label); 4828 4829 out: 4830 nfs_free_fattr(res.fattr); 4831 return status; 4832 } 4833 4834 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 4835 { 4836 struct nfs4_exception exception = { 4837 .interruptible = true, 4838 }; 4839 int err; 4840 do { 4841 err = nfs4_handle_exception(NFS_SERVER(inode), 4842 _nfs4_proc_link(inode, dir, name), 4843 &exception); 4844 } while (exception.retry); 4845 return err; 4846 } 4847 4848 struct nfs4_createdata { 4849 struct rpc_message msg; 4850 struct nfs4_create_arg arg; 4851 struct nfs4_create_res res; 4852 struct nfs_fh fh; 4853 struct nfs_fattr fattr; 4854 struct nfs4_label *label; 4855 }; 4856 4857 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 4858 const struct qstr *name, struct iattr *sattr, u32 ftype) 4859 { 4860 struct nfs4_createdata *data; 4861 4862 data = kzalloc(sizeof(*data), GFP_KERNEL); 4863 if (data != NULL) { 4864 struct nfs_server *server = NFS_SERVER(dir); 4865 4866 data->label = nfs4_label_alloc(server, GFP_KERNEL); 4867 if (IS_ERR(data->label)) 4868 goto out_free; 4869 4870 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 4871 data->msg.rpc_argp = &data->arg; 4872 data->msg.rpc_resp = &data->res; 4873 data->arg.dir_fh = NFS_FH(dir); 4874 data->arg.server = server; 4875 data->arg.name = name; 4876 data->arg.attrs = sattr; 4877 data->arg.ftype = ftype; 4878 data->arg.bitmask = nfs4_bitmask(server, data->label); 4879 data->arg.umask = current_umask(); 4880 data->res.server = server; 4881 data->res.fh = &data->fh; 4882 data->res.fattr = &data->fattr; 4883 data->res.label = data->label; 4884 nfs_fattr_init(data->res.fattr); 4885 } 4886 return data; 4887 out_free: 4888 kfree(data); 4889 return NULL; 4890 } 4891 4892 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 4893 { 4894 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 4895 &data->arg.seq_args, &data->res.seq_res, 1); 4896 if (status == 0) { 4897 spin_lock(&dir->i_lock); 4898 /* Creating a directory bumps nlink in the parent */ 4899 if (data->arg.ftype == NF4DIR) 4900 nfs4_inc_nlink_locked(dir); 4901 nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, 4902 data->res.fattr->time_start, 4903 NFS_INO_INVALID_DATA); 4904 spin_unlock(&dir->i_lock); 4905 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label); 4906 } 4907 return status; 4908 } 4909 4910 static void nfs4_free_createdata(struct nfs4_createdata *data) 4911 { 4912 nfs4_label_free(data->label); 4913 kfree(data); 4914 } 4915 4916 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 4917 struct page *page, unsigned int len, struct iattr *sattr, 4918 struct nfs4_label *label) 4919 { 4920 struct nfs4_createdata *data; 4921 int status = -ENAMETOOLONG; 4922 4923 if (len > NFS4_MAXPATHLEN) 4924 goto out; 4925 4926 status = -ENOMEM; 4927 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 4928 if (data == NULL) 4929 goto out; 4930 4931 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 4932 data->arg.u.symlink.pages = &page; 4933 data->arg.u.symlink.len = len; 4934 data->arg.label = label; 4935 4936 status = nfs4_do_create(dir, dentry, data); 4937 4938 nfs4_free_createdata(data); 4939 out: 4940 return status; 4941 } 4942 4943 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 4944 struct page *page, unsigned int len, struct iattr *sattr) 4945 { 4946 struct nfs4_exception exception = { 4947 .interruptible = true, 4948 }; 4949 struct nfs4_label l, *label = NULL; 4950 int err; 4951 4952 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4953 4954 do { 4955 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label); 4956 trace_nfs4_symlink(dir, &dentry->d_name, err); 4957 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4958 &exception); 4959 } while (exception.retry); 4960 4961 nfs4_label_release_security(label); 4962 return err; 4963 } 4964 4965 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 4966 struct iattr *sattr, struct nfs4_label *label) 4967 { 4968 struct nfs4_createdata *data; 4969 int status = -ENOMEM; 4970 4971 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 4972 if (data == NULL) 4973 goto out; 4974 4975 data->arg.label = label; 4976 status = nfs4_do_create(dir, dentry, data); 4977 4978 nfs4_free_createdata(data); 4979 out: 4980 return status; 4981 } 4982 4983 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 4984 struct iattr *sattr) 4985 { 4986 struct nfs_server *server = NFS_SERVER(dir); 4987 struct nfs4_exception exception = { 4988 .interruptible = true, 4989 }; 4990 struct nfs4_label l, *label = NULL; 4991 int err; 4992 4993 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4994 4995 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 4996 sattr->ia_mode &= ~current_umask(); 4997 do { 4998 err = _nfs4_proc_mkdir(dir, dentry, sattr, label); 4999 trace_nfs4_mkdir(dir, &dentry->d_name, err); 5000 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5001 &exception); 5002 } while (exception.retry); 5003 nfs4_label_release_security(label); 5004 5005 return err; 5006 } 5007 5008 static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg, 5009 struct nfs_readdir_res *nr_res) 5010 { 5011 struct inode *dir = d_inode(nr_arg->dentry); 5012 struct nfs_server *server = NFS_SERVER(dir); 5013 struct nfs4_readdir_arg args = { 5014 .fh = NFS_FH(dir), 5015 .pages = nr_arg->pages, 5016 .pgbase = 0, 5017 .count = nr_arg->page_len, 5018 .plus = nr_arg->plus, 5019 }; 5020 struct nfs4_readdir_res res; 5021 struct rpc_message msg = { 5022 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 5023 .rpc_argp = &args, 5024 .rpc_resp = &res, 5025 .rpc_cred = nr_arg->cred, 5026 }; 5027 int status; 5028 5029 dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__, 5030 nr_arg->dentry, (unsigned long long)nr_arg->cookie); 5031 if (!(server->caps & NFS_CAP_SECURITY_LABEL)) 5032 args.bitmask = server->attr_bitmask_nl; 5033 else 5034 args.bitmask = server->attr_bitmask; 5035 5036 nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args); 5037 res.pgbase = args.pgbase; 5038 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 5039 &res.seq_res, 0); 5040 if (status >= 0) { 5041 memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE); 5042 status += args.pgbase; 5043 } 5044 5045 nfs_invalidate_atime(dir); 5046 5047 dprintk("%s: returns %d\n", __func__, status); 5048 return status; 5049 } 5050 5051 static int nfs4_proc_readdir(struct nfs_readdir_arg *arg, 5052 struct nfs_readdir_res *res) 5053 { 5054 struct nfs4_exception exception = { 5055 .interruptible = true, 5056 }; 5057 int err; 5058 do { 5059 err = _nfs4_proc_readdir(arg, res); 5060 trace_nfs4_readdir(d_inode(arg->dentry), err); 5061 err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)), 5062 err, &exception); 5063 } while (exception.retry); 5064 return err; 5065 } 5066 5067 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5068 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 5069 { 5070 struct nfs4_createdata *data; 5071 int mode = sattr->ia_mode; 5072 int status = -ENOMEM; 5073 5074 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 5075 if (data == NULL) 5076 goto out; 5077 5078 if (S_ISFIFO(mode)) 5079 data->arg.ftype = NF4FIFO; 5080 else if (S_ISBLK(mode)) { 5081 data->arg.ftype = NF4BLK; 5082 data->arg.u.device.specdata1 = MAJOR(rdev); 5083 data->arg.u.device.specdata2 = MINOR(rdev); 5084 } 5085 else if (S_ISCHR(mode)) { 5086 data->arg.ftype = NF4CHR; 5087 data->arg.u.device.specdata1 = MAJOR(rdev); 5088 data->arg.u.device.specdata2 = MINOR(rdev); 5089 } else if (!S_ISSOCK(mode)) { 5090 status = -EINVAL; 5091 goto out_free; 5092 } 5093 5094 data->arg.label = label; 5095 status = nfs4_do_create(dir, dentry, data); 5096 out_free: 5097 nfs4_free_createdata(data); 5098 out: 5099 return status; 5100 } 5101 5102 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 5103 struct iattr *sattr, dev_t rdev) 5104 { 5105 struct nfs_server *server = NFS_SERVER(dir); 5106 struct nfs4_exception exception = { 5107 .interruptible = true, 5108 }; 5109 struct nfs4_label l, *label = NULL; 5110 int err; 5111 5112 label = nfs4_label_init_security(dir, dentry, sattr, &l); 5113 5114 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) 5115 sattr->ia_mode &= ~current_umask(); 5116 do { 5117 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 5118 trace_nfs4_mknod(dir, &dentry->d_name, err); 5119 err = nfs4_handle_exception(NFS_SERVER(dir), err, 5120 &exception); 5121 } while (exception.retry); 5122 5123 nfs4_label_release_security(label); 5124 5125 return err; 5126 } 5127 5128 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 5129 struct nfs_fsstat *fsstat) 5130 { 5131 struct nfs4_statfs_arg args = { 5132 .fh = fhandle, 5133 .bitmask = server->attr_bitmask, 5134 }; 5135 struct nfs4_statfs_res res = { 5136 .fsstat = fsstat, 5137 }; 5138 struct rpc_message msg = { 5139 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 5140 .rpc_argp = &args, 5141 .rpc_resp = &res, 5142 }; 5143 5144 nfs_fattr_init(fsstat->fattr); 5145 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5146 } 5147 5148 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 5149 { 5150 struct nfs4_exception exception = { 5151 .interruptible = true, 5152 }; 5153 int err; 5154 do { 5155 err = nfs4_handle_exception(server, 5156 _nfs4_proc_statfs(server, fhandle, fsstat), 5157 &exception); 5158 } while (exception.retry); 5159 return err; 5160 } 5161 5162 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 5163 struct nfs_fsinfo *fsinfo) 5164 { 5165 struct nfs4_fsinfo_arg args = { 5166 .fh = fhandle, 5167 .bitmask = server->attr_bitmask, 5168 }; 5169 struct nfs4_fsinfo_res res = { 5170 .fsinfo = fsinfo, 5171 }; 5172 struct rpc_message msg = { 5173 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 5174 .rpc_argp = &args, 5175 .rpc_resp = &res, 5176 }; 5177 5178 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5179 } 5180 5181 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5182 { 5183 struct nfs4_exception exception = { 5184 .interruptible = true, 5185 }; 5186 int err; 5187 5188 do { 5189 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 5190 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 5191 if (err == 0) { 5192 nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ); 5193 break; 5194 } 5195 err = nfs4_handle_exception(server, err, &exception); 5196 } while (exception.retry); 5197 return err; 5198 } 5199 5200 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 5201 { 5202 int error; 5203 5204 nfs_fattr_init(fsinfo->fattr); 5205 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 5206 if (error == 0) { 5207 /* block layout checks this! */ 5208 server->pnfs_blksize = fsinfo->blksize; 5209 set_pnfs_layoutdriver(server, fhandle, fsinfo); 5210 } 5211 5212 return error; 5213 } 5214 5215 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5216 struct nfs_pathconf *pathconf) 5217 { 5218 struct nfs4_pathconf_arg args = { 5219 .fh = fhandle, 5220 .bitmask = server->attr_bitmask, 5221 }; 5222 struct nfs4_pathconf_res res = { 5223 .pathconf = pathconf, 5224 }; 5225 struct rpc_message msg = { 5226 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 5227 .rpc_argp = &args, 5228 .rpc_resp = &res, 5229 }; 5230 5231 /* None of the pathconf attributes are mandatory to implement */ 5232 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 5233 memset(pathconf, 0, sizeof(*pathconf)); 5234 return 0; 5235 } 5236 5237 nfs_fattr_init(pathconf->fattr); 5238 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 5239 } 5240 5241 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 5242 struct nfs_pathconf *pathconf) 5243 { 5244 struct nfs4_exception exception = { 5245 .interruptible = true, 5246 }; 5247 int err; 5248 5249 do { 5250 err = nfs4_handle_exception(server, 5251 _nfs4_proc_pathconf(server, fhandle, pathconf), 5252 &exception); 5253 } while (exception.retry); 5254 return err; 5255 } 5256 5257 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 5258 const struct nfs_open_context *ctx, 5259 const struct nfs_lock_context *l_ctx, 5260 fmode_t fmode) 5261 { 5262 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL); 5263 } 5264 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 5265 5266 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 5267 const struct nfs_open_context *ctx, 5268 const struct nfs_lock_context *l_ctx, 5269 fmode_t fmode) 5270 { 5271 nfs4_stateid _current_stateid; 5272 5273 /* If the current stateid represents a lost lock, then exit */ 5274 if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO) 5275 return true; 5276 return nfs4_stateid_match(stateid, &_current_stateid); 5277 } 5278 5279 static bool nfs4_error_stateid_expired(int err) 5280 { 5281 switch (err) { 5282 case -NFS4ERR_DELEG_REVOKED: 5283 case -NFS4ERR_ADMIN_REVOKED: 5284 case -NFS4ERR_BAD_STATEID: 5285 case -NFS4ERR_STALE_STATEID: 5286 case -NFS4ERR_OLD_STATEID: 5287 case -NFS4ERR_OPENMODE: 5288 case -NFS4ERR_EXPIRED: 5289 return true; 5290 } 5291 return false; 5292 } 5293 5294 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 5295 { 5296 struct nfs_server *server = NFS_SERVER(hdr->inode); 5297 5298 trace_nfs4_read(hdr, task->tk_status); 5299 if (task->tk_status < 0) { 5300 struct nfs4_exception exception = { 5301 .inode = hdr->inode, 5302 .state = hdr->args.context->state, 5303 .stateid = &hdr->args.stateid, 5304 }; 5305 task->tk_status = nfs4_async_handle_exception(task, 5306 server, task->tk_status, &exception); 5307 if (exception.retry) { 5308 rpc_restart_call_prepare(task); 5309 return -EAGAIN; 5310 } 5311 } 5312 5313 if (task->tk_status > 0) 5314 renew_lease(server, hdr->timestamp); 5315 return 0; 5316 } 5317 5318 static bool nfs4_read_stateid_changed(struct rpc_task *task, 5319 struct nfs_pgio_args *args) 5320 { 5321 5322 if (!nfs4_error_stateid_expired(task->tk_status) || 5323 nfs4_stateid_is_current(&args->stateid, 5324 args->context, 5325 args->lock_context, 5326 FMODE_READ)) 5327 return false; 5328 rpc_restart_call_prepare(task); 5329 return true; 5330 } 5331 5332 static bool nfs4_read_plus_not_supported(struct rpc_task *task, 5333 struct nfs_pgio_header *hdr) 5334 { 5335 struct nfs_server *server = NFS_SERVER(hdr->inode); 5336 struct rpc_message *msg = &task->tk_msg; 5337 5338 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] && 5339 server->caps & NFS_CAP_READ_PLUS && task->tk_status == -ENOTSUPP) { 5340 server->caps &= ~NFS_CAP_READ_PLUS; 5341 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5342 rpc_restart_call_prepare(task); 5343 return true; 5344 } 5345 return false; 5346 } 5347 5348 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5349 { 5350 dprintk("--> %s\n", __func__); 5351 5352 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5353 return -EAGAIN; 5354 if (nfs4_read_stateid_changed(task, &hdr->args)) 5355 return -EAGAIN; 5356 if (nfs4_read_plus_not_supported(task, hdr)) 5357 return -EAGAIN; 5358 if (task->tk_status > 0) 5359 nfs_invalidate_atime(hdr->inode); 5360 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5361 nfs4_read_done_cb(task, hdr); 5362 } 5363 5364 #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS 5365 static void nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5366 struct rpc_message *msg) 5367 { 5368 /* Note: We don't use READ_PLUS with pNFS yet */ 5369 if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) 5370 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS]; 5371 } 5372 #else 5373 static void nfs42_read_plus_support(struct nfs_pgio_header *hdr, 5374 struct rpc_message *msg) 5375 { 5376 } 5377 #endif /* CONFIG_NFS_V4_2 */ 5378 5379 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 5380 struct rpc_message *msg) 5381 { 5382 hdr->timestamp = jiffies; 5383 if (!hdr->pgio_done_cb) 5384 hdr->pgio_done_cb = nfs4_read_done_cb; 5385 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 5386 nfs42_read_plus_support(hdr, msg); 5387 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5388 } 5389 5390 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 5391 struct nfs_pgio_header *hdr) 5392 { 5393 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client, 5394 &hdr->args.seq_args, 5395 &hdr->res.seq_res, 5396 task)) 5397 return 0; 5398 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 5399 hdr->args.lock_context, 5400 hdr->rw_mode) == -EIO) 5401 return -EIO; 5402 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 5403 return -EIO; 5404 return 0; 5405 } 5406 5407 static int nfs4_write_done_cb(struct rpc_task *task, 5408 struct nfs_pgio_header *hdr) 5409 { 5410 struct inode *inode = hdr->inode; 5411 5412 trace_nfs4_write(hdr, task->tk_status); 5413 if (task->tk_status < 0) { 5414 struct nfs4_exception exception = { 5415 .inode = hdr->inode, 5416 .state = hdr->args.context->state, 5417 .stateid = &hdr->args.stateid, 5418 }; 5419 task->tk_status = nfs4_async_handle_exception(task, 5420 NFS_SERVER(inode), task->tk_status, 5421 &exception); 5422 if (exception.retry) { 5423 rpc_restart_call_prepare(task); 5424 return -EAGAIN; 5425 } 5426 } 5427 if (task->tk_status >= 0) { 5428 renew_lease(NFS_SERVER(inode), hdr->timestamp); 5429 nfs_writeback_update_inode(hdr); 5430 } 5431 return 0; 5432 } 5433 5434 static bool nfs4_write_stateid_changed(struct rpc_task *task, 5435 struct nfs_pgio_args *args) 5436 { 5437 5438 if (!nfs4_error_stateid_expired(task->tk_status) || 5439 nfs4_stateid_is_current(&args->stateid, 5440 args->context, 5441 args->lock_context, 5442 FMODE_WRITE)) 5443 return false; 5444 rpc_restart_call_prepare(task); 5445 return true; 5446 } 5447 5448 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 5449 { 5450 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 5451 return -EAGAIN; 5452 if (nfs4_write_stateid_changed(task, &hdr->args)) 5453 return -EAGAIN; 5454 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 5455 nfs4_write_done_cb(task, hdr); 5456 } 5457 5458 static 5459 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 5460 { 5461 /* Don't request attributes for pNFS or O_DIRECT writes */ 5462 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 5463 return false; 5464 /* Otherwise, request attributes if and only if we don't hold 5465 * a delegation 5466 */ 5467 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; 5468 } 5469 5470 static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src, 5471 struct inode *inode, struct nfs_server *server, 5472 struct nfs4_label *label) 5473 { 5474 unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity); 5475 unsigned int i; 5476 5477 memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ); 5478 5479 if (cache_validity & NFS_INO_INVALID_CHANGE) 5480 bitmask[0] |= FATTR4_WORD0_CHANGE; 5481 if (cache_validity & NFS_INO_INVALID_ATIME) 5482 bitmask[1] |= FATTR4_WORD1_TIME_ACCESS; 5483 if (cache_validity & NFS_INO_INVALID_MODE) 5484 bitmask[1] |= FATTR4_WORD1_MODE; 5485 if (cache_validity & NFS_INO_INVALID_OTHER) 5486 bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP; 5487 if (cache_validity & NFS_INO_INVALID_NLINK) 5488 bitmask[1] |= FATTR4_WORD1_NUMLINKS; 5489 if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL) 5490 bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL; 5491 if (cache_validity & NFS_INO_INVALID_CTIME) 5492 bitmask[1] |= FATTR4_WORD1_TIME_METADATA; 5493 if (cache_validity & NFS_INO_INVALID_MTIME) 5494 bitmask[1] |= FATTR4_WORD1_TIME_MODIFY; 5495 if (cache_validity & NFS_INO_INVALID_BLOCKS) 5496 bitmask[1] |= FATTR4_WORD1_SPACE_USED; 5497 5498 if (cache_validity & NFS_INO_INVALID_SIZE) 5499 bitmask[0] |= FATTR4_WORD0_SIZE; 5500 5501 for (i = 0; i < NFS4_BITMASK_SZ; i++) 5502 bitmask[i] &= server->attr_bitmask[i]; 5503 } 5504 5505 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 5506 struct rpc_message *msg, 5507 struct rpc_clnt **clnt) 5508 { 5509 struct nfs_server *server = NFS_SERVER(hdr->inode); 5510 5511 if (!nfs4_write_need_cache_consistency_data(hdr)) { 5512 hdr->args.bitmask = NULL; 5513 hdr->res.fattr = NULL; 5514 } else { 5515 nfs4_bitmask_set(hdr->args.bitmask_store, 5516 server->cache_consistency_bitmask, 5517 hdr->inode, server, NULL); 5518 hdr->args.bitmask = hdr->args.bitmask_store; 5519 } 5520 5521 if (!hdr->pgio_done_cb) 5522 hdr->pgio_done_cb = nfs4_write_done_cb; 5523 hdr->res.server = server; 5524 hdr->timestamp = jiffies; 5525 5526 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 5527 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); 5528 nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr); 5529 } 5530 5531 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 5532 { 5533 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client, 5534 &data->args.seq_args, 5535 &data->res.seq_res, 5536 task); 5537 } 5538 5539 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 5540 { 5541 struct inode *inode = data->inode; 5542 5543 trace_nfs4_commit(data, task->tk_status); 5544 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 5545 NULL, NULL) == -EAGAIN) { 5546 rpc_restart_call_prepare(task); 5547 return -EAGAIN; 5548 } 5549 return 0; 5550 } 5551 5552 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 5553 { 5554 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5555 return -EAGAIN; 5556 return data->commit_done_cb(task, data); 5557 } 5558 5559 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg, 5560 struct rpc_clnt **clnt) 5561 { 5562 struct nfs_server *server = NFS_SERVER(data->inode); 5563 5564 if (data->commit_done_cb == NULL) 5565 data->commit_done_cb = nfs4_commit_done_cb; 5566 data->res.server = server; 5567 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 5568 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 5569 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg); 5570 } 5571 5572 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args, 5573 struct nfs_commitres *res) 5574 { 5575 struct inode *dst_inode = file_inode(dst); 5576 struct nfs_server *server = NFS_SERVER(dst_inode); 5577 struct rpc_message msg = { 5578 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT], 5579 .rpc_argp = args, 5580 .rpc_resp = res, 5581 }; 5582 5583 args->fh = NFS_FH(dst_inode); 5584 return nfs4_call_sync(server->client, server, &msg, 5585 &args->seq_args, &res->seq_res, 1); 5586 } 5587 5588 int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res) 5589 { 5590 struct nfs_commitargs args = { 5591 .offset = offset, 5592 .count = count, 5593 }; 5594 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); 5595 struct nfs4_exception exception = { }; 5596 int status; 5597 5598 do { 5599 status = _nfs4_proc_commit(dst, &args, res); 5600 status = nfs4_handle_exception(dst_server, status, &exception); 5601 } while (exception.retry); 5602 5603 return status; 5604 } 5605 5606 struct nfs4_renewdata { 5607 struct nfs_client *client; 5608 unsigned long timestamp; 5609 }; 5610 5611 /* 5612 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 5613 * standalone procedure for queueing an asynchronous RENEW. 5614 */ 5615 static void nfs4_renew_release(void *calldata) 5616 { 5617 struct nfs4_renewdata *data = calldata; 5618 struct nfs_client *clp = data->client; 5619 5620 if (refcount_read(&clp->cl_count) > 1) 5621 nfs4_schedule_state_renewal(clp); 5622 nfs_put_client(clp); 5623 kfree(data); 5624 } 5625 5626 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 5627 { 5628 struct nfs4_renewdata *data = calldata; 5629 struct nfs_client *clp = data->client; 5630 unsigned long timestamp = data->timestamp; 5631 5632 trace_nfs4_renew_async(clp, task->tk_status); 5633 switch (task->tk_status) { 5634 case 0: 5635 break; 5636 case -NFS4ERR_LEASE_MOVED: 5637 nfs4_schedule_lease_moved_recovery(clp); 5638 break; 5639 default: 5640 /* Unless we're shutting down, schedule state recovery! */ 5641 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 5642 return; 5643 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 5644 nfs4_schedule_lease_recovery(clp); 5645 return; 5646 } 5647 nfs4_schedule_path_down_recovery(clp); 5648 } 5649 do_renew_lease(clp, timestamp); 5650 } 5651 5652 static const struct rpc_call_ops nfs4_renew_ops = { 5653 .rpc_call_done = nfs4_renew_done, 5654 .rpc_release = nfs4_renew_release, 5655 }; 5656 5657 static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 5658 { 5659 struct rpc_message msg = { 5660 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 5661 .rpc_argp = clp, 5662 .rpc_cred = cred, 5663 }; 5664 struct nfs4_renewdata *data; 5665 5666 if (renew_flags == 0) 5667 return 0; 5668 if (!refcount_inc_not_zero(&clp->cl_count)) 5669 return -EIO; 5670 data = kmalloc(sizeof(*data), GFP_NOFS); 5671 if (data == NULL) { 5672 nfs_put_client(clp); 5673 return -ENOMEM; 5674 } 5675 data->client = clp; 5676 data->timestamp = jiffies; 5677 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 5678 &nfs4_renew_ops, data); 5679 } 5680 5681 static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred) 5682 { 5683 struct rpc_message msg = { 5684 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 5685 .rpc_argp = clp, 5686 .rpc_cred = cred, 5687 }; 5688 unsigned long now = jiffies; 5689 int status; 5690 5691 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5692 if (status < 0) 5693 return status; 5694 do_renew_lease(clp, now); 5695 return 0; 5696 } 5697 5698 static inline int nfs4_server_supports_acls(struct nfs_server *server) 5699 { 5700 return server->caps & NFS_CAP_ACLS; 5701 } 5702 5703 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 5704 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 5705 * the stack. 5706 */ 5707 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 5708 5709 int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen, 5710 struct page **pages) 5711 { 5712 struct page *newpage, **spages; 5713 int rc = 0; 5714 size_t len; 5715 spages = pages; 5716 5717 do { 5718 len = min_t(size_t, PAGE_SIZE, buflen); 5719 newpage = alloc_page(GFP_KERNEL); 5720 5721 if (newpage == NULL) 5722 goto unwind; 5723 memcpy(page_address(newpage), buf, len); 5724 buf += len; 5725 buflen -= len; 5726 *pages++ = newpage; 5727 rc++; 5728 } while (buflen != 0); 5729 5730 return rc; 5731 5732 unwind: 5733 for(; rc > 0; rc--) 5734 __free_page(spages[rc-1]); 5735 return -ENOMEM; 5736 } 5737 5738 struct nfs4_cached_acl { 5739 int cached; 5740 size_t len; 5741 char data[]; 5742 }; 5743 5744 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 5745 { 5746 struct nfs_inode *nfsi = NFS_I(inode); 5747 5748 spin_lock(&inode->i_lock); 5749 kfree(nfsi->nfs4_acl); 5750 nfsi->nfs4_acl = acl; 5751 spin_unlock(&inode->i_lock); 5752 } 5753 5754 static void nfs4_zap_acl_attr(struct inode *inode) 5755 { 5756 nfs4_set_cached_acl(inode, NULL); 5757 } 5758 5759 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 5760 { 5761 struct nfs_inode *nfsi = NFS_I(inode); 5762 struct nfs4_cached_acl *acl; 5763 int ret = -ENOENT; 5764 5765 spin_lock(&inode->i_lock); 5766 acl = nfsi->nfs4_acl; 5767 if (acl == NULL) 5768 goto out; 5769 if (buf == NULL) /* user is just asking for length */ 5770 goto out_len; 5771 if (acl->cached == 0) 5772 goto out; 5773 ret = -ERANGE; /* see getxattr(2) man page */ 5774 if (acl->len > buflen) 5775 goto out; 5776 memcpy(buf, acl->data, acl->len); 5777 out_len: 5778 ret = acl->len; 5779 out: 5780 spin_unlock(&inode->i_lock); 5781 return ret; 5782 } 5783 5784 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 5785 { 5786 struct nfs4_cached_acl *acl; 5787 size_t buflen = sizeof(*acl) + acl_len; 5788 5789 if (buflen <= PAGE_SIZE) { 5790 acl = kmalloc(buflen, GFP_KERNEL); 5791 if (acl == NULL) 5792 goto out; 5793 acl->cached = 1; 5794 _copy_from_pages(acl->data, pages, pgbase, acl_len); 5795 } else { 5796 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 5797 if (acl == NULL) 5798 goto out; 5799 acl->cached = 0; 5800 } 5801 acl->len = acl_len; 5802 out: 5803 nfs4_set_cached_acl(inode, acl); 5804 } 5805 5806 /* 5807 * The getxattr API returns the required buffer length when called with a 5808 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 5809 * the required buf. On a NULL buf, we send a page of data to the server 5810 * guessing that the ACL request can be serviced by a page. If so, we cache 5811 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 5812 * the cache. If not so, we throw away the page, and cache the required 5813 * length. The next getxattr call will then produce another round trip to 5814 * the server, this time with the input buf of the required size. 5815 */ 5816 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 5817 { 5818 struct page **pages; 5819 struct nfs_getaclargs args = { 5820 .fh = NFS_FH(inode), 5821 .acl_len = buflen, 5822 }; 5823 struct nfs_getaclres res = { 5824 .acl_len = buflen, 5825 }; 5826 struct rpc_message msg = { 5827 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 5828 .rpc_argp = &args, 5829 .rpc_resp = &res, 5830 }; 5831 unsigned int npages; 5832 int ret = -ENOMEM, i; 5833 struct nfs_server *server = NFS_SERVER(inode); 5834 5835 if (buflen == 0) 5836 buflen = server->rsize; 5837 5838 npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1; 5839 pages = kmalloc_array(npages, sizeof(struct page *), GFP_NOFS); 5840 if (!pages) 5841 return -ENOMEM; 5842 5843 args.acl_pages = pages; 5844 5845 for (i = 0; i < npages; i++) { 5846 pages[i] = alloc_page(GFP_KERNEL); 5847 if (!pages[i]) 5848 goto out_free; 5849 } 5850 5851 /* for decoding across pages */ 5852 res.acl_scratch = alloc_page(GFP_KERNEL); 5853 if (!res.acl_scratch) 5854 goto out_free; 5855 5856 args.acl_len = npages * PAGE_SIZE; 5857 5858 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 5859 __func__, buf, buflen, npages, args.acl_len); 5860 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 5861 &msg, &args.seq_args, &res.seq_res, 0); 5862 if (ret) 5863 goto out_free; 5864 5865 /* Handle the case where the passed-in buffer is too short */ 5866 if (res.acl_flags & NFS4_ACL_TRUNC) { 5867 /* Did the user only issue a request for the acl length? */ 5868 if (buf == NULL) 5869 goto out_ok; 5870 ret = -ERANGE; 5871 goto out_free; 5872 } 5873 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); 5874 if (buf) { 5875 if (res.acl_len > buflen) { 5876 ret = -ERANGE; 5877 goto out_free; 5878 } 5879 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 5880 } 5881 out_ok: 5882 ret = res.acl_len; 5883 out_free: 5884 for (i = 0; i < npages; i++) 5885 if (pages[i]) 5886 __free_page(pages[i]); 5887 if (res.acl_scratch) 5888 __free_page(res.acl_scratch); 5889 kfree(pages); 5890 return ret; 5891 } 5892 5893 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 5894 { 5895 struct nfs4_exception exception = { 5896 .interruptible = true, 5897 }; 5898 ssize_t ret; 5899 do { 5900 ret = __nfs4_get_acl_uncached(inode, buf, buflen); 5901 trace_nfs4_get_acl(inode, ret); 5902 if (ret >= 0) 5903 break; 5904 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 5905 } while (exception.retry); 5906 return ret; 5907 } 5908 5909 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 5910 { 5911 struct nfs_server *server = NFS_SERVER(inode); 5912 int ret; 5913 5914 if (!nfs4_server_supports_acls(server)) 5915 return -EOPNOTSUPP; 5916 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 5917 if (ret < 0) 5918 return ret; 5919 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 5920 nfs_zap_acl_cache(inode); 5921 ret = nfs4_read_cached_acl(inode, buf, buflen); 5922 if (ret != -ENOENT) 5923 /* -ENOENT is returned if there is no ACL or if there is an ACL 5924 * but no cached acl data, just the acl length */ 5925 return ret; 5926 return nfs4_get_acl_uncached(inode, buf, buflen); 5927 } 5928 5929 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 5930 { 5931 struct nfs_server *server = NFS_SERVER(inode); 5932 struct page *pages[NFS4ACL_MAXPAGES]; 5933 struct nfs_setaclargs arg = { 5934 .fh = NFS_FH(inode), 5935 .acl_pages = pages, 5936 .acl_len = buflen, 5937 }; 5938 struct nfs_setaclres res; 5939 struct rpc_message msg = { 5940 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 5941 .rpc_argp = &arg, 5942 .rpc_resp = &res, 5943 }; 5944 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 5945 int ret, i; 5946 5947 /* You can't remove system.nfs4_acl: */ 5948 if (buflen == 0) 5949 return -EINVAL; 5950 if (!nfs4_server_supports_acls(server)) 5951 return -EOPNOTSUPP; 5952 if (npages > ARRAY_SIZE(pages)) 5953 return -ERANGE; 5954 i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages); 5955 if (i < 0) 5956 return i; 5957 nfs4_inode_make_writeable(inode); 5958 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5959 5960 /* 5961 * Free each page after tx, so the only ref left is 5962 * held by the network stack 5963 */ 5964 for (; i > 0; i--) 5965 put_page(pages[i-1]); 5966 5967 /* 5968 * Acl update can result in inode attribute update. 5969 * so mark the attribute cache invalid. 5970 */ 5971 spin_lock(&inode->i_lock); 5972 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE | 5973 NFS_INO_INVALID_CTIME | 5974 NFS_INO_REVAL_FORCED); 5975 spin_unlock(&inode->i_lock); 5976 nfs_access_zap_cache(inode); 5977 nfs_zap_acl_cache(inode); 5978 return ret; 5979 } 5980 5981 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 5982 { 5983 struct nfs4_exception exception = { }; 5984 int err; 5985 do { 5986 err = __nfs4_proc_set_acl(inode, buf, buflen); 5987 trace_nfs4_set_acl(inode, err); 5988 if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) { 5989 /* 5990 * no need to retry since the kernel 5991 * isn't involved in encoding the ACEs. 5992 */ 5993 err = -EINVAL; 5994 break; 5995 } 5996 err = nfs4_handle_exception(NFS_SERVER(inode), err, 5997 &exception); 5998 } while (exception.retry); 5999 return err; 6000 } 6001 6002 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6003 static int _nfs4_get_security_label(struct inode *inode, void *buf, 6004 size_t buflen) 6005 { 6006 struct nfs_server *server = NFS_SERVER(inode); 6007 struct nfs_fattr fattr; 6008 struct nfs4_label label = {0, 0, buflen, buf}; 6009 6010 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6011 struct nfs4_getattr_arg arg = { 6012 .fh = NFS_FH(inode), 6013 .bitmask = bitmask, 6014 }; 6015 struct nfs4_getattr_res res = { 6016 .fattr = &fattr, 6017 .label = &label, 6018 .server = server, 6019 }; 6020 struct rpc_message msg = { 6021 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 6022 .rpc_argp = &arg, 6023 .rpc_resp = &res, 6024 }; 6025 int ret; 6026 6027 nfs_fattr_init(&fattr); 6028 6029 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 6030 if (ret) 6031 return ret; 6032 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 6033 return -ENOENT; 6034 return label.len; 6035 } 6036 6037 static int nfs4_get_security_label(struct inode *inode, void *buf, 6038 size_t buflen) 6039 { 6040 struct nfs4_exception exception = { 6041 .interruptible = true, 6042 }; 6043 int err; 6044 6045 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6046 return -EOPNOTSUPP; 6047 6048 do { 6049 err = _nfs4_get_security_label(inode, buf, buflen); 6050 trace_nfs4_get_security_label(inode, err); 6051 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6052 &exception); 6053 } while (exception.retry); 6054 return err; 6055 } 6056 6057 static int _nfs4_do_set_security_label(struct inode *inode, 6058 struct nfs4_label *ilabel, 6059 struct nfs_fattr *fattr, 6060 struct nfs4_label *olabel) 6061 { 6062 6063 struct iattr sattr = {0}; 6064 struct nfs_server *server = NFS_SERVER(inode); 6065 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 6066 struct nfs_setattrargs arg = { 6067 .fh = NFS_FH(inode), 6068 .iap = &sattr, 6069 .server = server, 6070 .bitmask = bitmask, 6071 .label = ilabel, 6072 }; 6073 struct nfs_setattrres res = { 6074 .fattr = fattr, 6075 .label = olabel, 6076 .server = server, 6077 }; 6078 struct rpc_message msg = { 6079 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 6080 .rpc_argp = &arg, 6081 .rpc_resp = &res, 6082 }; 6083 int status; 6084 6085 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 6086 6087 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6088 if (status) 6089 dprintk("%s failed: %d\n", __func__, status); 6090 6091 return status; 6092 } 6093 6094 static int nfs4_do_set_security_label(struct inode *inode, 6095 struct nfs4_label *ilabel, 6096 struct nfs_fattr *fattr, 6097 struct nfs4_label *olabel) 6098 { 6099 struct nfs4_exception exception = { }; 6100 int err; 6101 6102 do { 6103 err = _nfs4_do_set_security_label(inode, ilabel, 6104 fattr, olabel); 6105 trace_nfs4_set_security_label(inode, err); 6106 err = nfs4_handle_exception(NFS_SERVER(inode), err, 6107 &exception); 6108 } while (exception.retry); 6109 return err; 6110 } 6111 6112 static int 6113 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen) 6114 { 6115 struct nfs4_label ilabel, *olabel = NULL; 6116 struct nfs_fattr fattr; 6117 int status; 6118 6119 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 6120 return -EOPNOTSUPP; 6121 6122 nfs_fattr_init(&fattr); 6123 6124 ilabel.pi = 0; 6125 ilabel.lfs = 0; 6126 ilabel.label = (char *)buf; 6127 ilabel.len = buflen; 6128 6129 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 6130 if (IS_ERR(olabel)) { 6131 status = -PTR_ERR(olabel); 6132 goto out; 6133 } 6134 6135 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel); 6136 if (status == 0) 6137 nfs_setsecurity(inode, &fattr, olabel); 6138 6139 nfs4_label_free(olabel); 6140 out: 6141 return status; 6142 } 6143 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 6144 6145 6146 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 6147 nfs4_verifier *bootverf) 6148 { 6149 __be32 verf[2]; 6150 6151 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 6152 /* An impossible timestamp guarantees this value 6153 * will never match a generated boot time. */ 6154 verf[0] = cpu_to_be32(U32_MAX); 6155 verf[1] = cpu_to_be32(U32_MAX); 6156 } else { 6157 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6158 u64 ns = ktime_to_ns(nn->boot_time); 6159 6160 verf[0] = cpu_to_be32(ns >> 32); 6161 verf[1] = cpu_to_be32(ns); 6162 } 6163 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 6164 } 6165 6166 static size_t 6167 nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen) 6168 { 6169 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 6170 struct nfs_netns_client *nn_clp = nn->nfs_client; 6171 const char *id; 6172 6173 buf[0] = '\0'; 6174 6175 if (nn_clp) { 6176 rcu_read_lock(); 6177 id = rcu_dereference(nn_clp->identifier); 6178 if (id) 6179 strscpy(buf, id, buflen); 6180 rcu_read_unlock(); 6181 } 6182 6183 if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0') 6184 strscpy(buf, nfs4_client_id_uniquifier, buflen); 6185 6186 return strlen(buf); 6187 } 6188 6189 static int 6190 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 6191 { 6192 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6193 size_t buflen; 6194 size_t len; 6195 char *str; 6196 6197 if (clp->cl_owner_id != NULL) 6198 return 0; 6199 6200 rcu_read_lock(); 6201 len = 14 + 6202 strlen(clp->cl_rpcclient->cl_nodename) + 6203 1 + 6204 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 6205 1; 6206 rcu_read_unlock(); 6207 6208 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6209 if (buflen) 6210 len += buflen + 1; 6211 6212 if (len > NFS4_OPAQUE_LIMIT + 1) 6213 return -EINVAL; 6214 6215 /* 6216 * Since this string is allocated at mount time, and held until the 6217 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6218 * about a memory-reclaim deadlock. 6219 */ 6220 str = kmalloc(len, GFP_KERNEL); 6221 if (!str) 6222 return -ENOMEM; 6223 6224 rcu_read_lock(); 6225 if (buflen) 6226 scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s", 6227 clp->cl_rpcclient->cl_nodename, buf, 6228 rpc_peeraddr2str(clp->cl_rpcclient, 6229 RPC_DISPLAY_ADDR)); 6230 else 6231 scnprintf(str, len, "Linux NFSv4.0 %s/%s", 6232 clp->cl_rpcclient->cl_nodename, 6233 rpc_peeraddr2str(clp->cl_rpcclient, 6234 RPC_DISPLAY_ADDR)); 6235 rcu_read_unlock(); 6236 6237 clp->cl_owner_id = str; 6238 return 0; 6239 } 6240 6241 static int 6242 nfs4_init_uniform_client_string(struct nfs_client *clp) 6243 { 6244 char buf[NFS4_CLIENT_ID_UNIQ_LEN]; 6245 size_t buflen; 6246 size_t len; 6247 char *str; 6248 6249 if (clp->cl_owner_id != NULL) 6250 return 0; 6251 6252 len = 10 + 10 + 1 + 10 + 1 + 6253 strlen(clp->cl_rpcclient->cl_nodename) + 1; 6254 6255 buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf)); 6256 if (buflen) 6257 len += buflen + 1; 6258 6259 if (len > NFS4_OPAQUE_LIMIT + 1) 6260 return -EINVAL; 6261 6262 /* 6263 * Since this string is allocated at mount time, and held until the 6264 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 6265 * about a memory-reclaim deadlock. 6266 */ 6267 str = kmalloc(len, GFP_KERNEL); 6268 if (!str) 6269 return -ENOMEM; 6270 6271 if (buflen) 6272 scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 6273 clp->rpc_ops->version, clp->cl_minorversion, 6274 buf, clp->cl_rpcclient->cl_nodename); 6275 else 6276 scnprintf(str, len, "Linux NFSv%u.%u %s", 6277 clp->rpc_ops->version, clp->cl_minorversion, 6278 clp->cl_rpcclient->cl_nodename); 6279 clp->cl_owner_id = str; 6280 return 0; 6281 } 6282 6283 /* 6284 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 6285 * services. Advertise one based on the address family of the 6286 * clientaddr. 6287 */ 6288 static unsigned int 6289 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 6290 { 6291 if (strchr(clp->cl_ipaddr, ':') != NULL) 6292 return scnprintf(buf, len, "tcp6"); 6293 else 6294 return scnprintf(buf, len, "tcp"); 6295 } 6296 6297 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 6298 { 6299 struct nfs4_setclientid *sc = calldata; 6300 6301 if (task->tk_status == 0) 6302 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 6303 } 6304 6305 static const struct rpc_call_ops nfs4_setclientid_ops = { 6306 .rpc_call_done = nfs4_setclientid_done, 6307 }; 6308 6309 /** 6310 * nfs4_proc_setclientid - Negotiate client ID 6311 * @clp: state data structure 6312 * @program: RPC program for NFSv4 callback service 6313 * @port: IP port number for NFS4 callback service 6314 * @cred: credential to use for this call 6315 * @res: where to place the result 6316 * 6317 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6318 */ 6319 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 6320 unsigned short port, const struct cred *cred, 6321 struct nfs4_setclientid_res *res) 6322 { 6323 nfs4_verifier sc_verifier; 6324 struct nfs4_setclientid setclientid = { 6325 .sc_verifier = &sc_verifier, 6326 .sc_prog = program, 6327 .sc_clnt = clp, 6328 }; 6329 struct rpc_message msg = { 6330 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 6331 .rpc_argp = &setclientid, 6332 .rpc_resp = res, 6333 .rpc_cred = cred, 6334 }; 6335 struct rpc_task_setup task_setup_data = { 6336 .rpc_client = clp->cl_rpcclient, 6337 .rpc_message = &msg, 6338 .callback_ops = &nfs4_setclientid_ops, 6339 .callback_data = &setclientid, 6340 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 6341 }; 6342 unsigned long now = jiffies; 6343 int status; 6344 6345 /* nfs_client_id4 */ 6346 nfs4_init_boot_verifier(clp, &sc_verifier); 6347 6348 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 6349 status = nfs4_init_uniform_client_string(clp); 6350 else 6351 status = nfs4_init_nonuniform_client_string(clp); 6352 6353 if (status) 6354 goto out; 6355 6356 /* cb_client4 */ 6357 setclientid.sc_netid_len = 6358 nfs4_init_callback_netid(clp, 6359 setclientid.sc_netid, 6360 sizeof(setclientid.sc_netid)); 6361 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 6362 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 6363 clp->cl_ipaddr, port >> 8, port & 255); 6364 6365 dprintk("NFS call setclientid auth=%s, '%s'\n", 6366 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6367 clp->cl_owner_id); 6368 6369 status = nfs4_call_sync_custom(&task_setup_data); 6370 if (setclientid.sc_cred) { 6371 kfree(clp->cl_acceptor); 6372 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 6373 put_rpccred(setclientid.sc_cred); 6374 } 6375 6376 if (status == 0) 6377 do_renew_lease(clp, now); 6378 out: 6379 trace_nfs4_setclientid(clp, status); 6380 dprintk("NFS reply setclientid: %d\n", status); 6381 return status; 6382 } 6383 6384 /** 6385 * nfs4_proc_setclientid_confirm - Confirm client ID 6386 * @clp: state data structure 6387 * @arg: result of a previous SETCLIENTID 6388 * @cred: credential to use for this call 6389 * 6390 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6391 */ 6392 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 6393 struct nfs4_setclientid_res *arg, 6394 const struct cred *cred) 6395 { 6396 struct rpc_message msg = { 6397 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 6398 .rpc_argp = arg, 6399 .rpc_cred = cred, 6400 }; 6401 int status; 6402 6403 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 6404 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6405 clp->cl_clientid); 6406 status = rpc_call_sync(clp->cl_rpcclient, &msg, 6407 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 6408 trace_nfs4_setclientid_confirm(clp, status); 6409 dprintk("NFS reply setclientid_confirm: %d\n", status); 6410 return status; 6411 } 6412 6413 struct nfs4_delegreturndata { 6414 struct nfs4_delegreturnargs args; 6415 struct nfs4_delegreturnres res; 6416 struct nfs_fh fh; 6417 nfs4_stateid stateid; 6418 unsigned long timestamp; 6419 struct { 6420 struct nfs4_layoutreturn_args arg; 6421 struct nfs4_layoutreturn_res res; 6422 struct nfs4_xdr_opaque_data ld_private; 6423 u32 roc_barrier; 6424 bool roc; 6425 } lr; 6426 struct nfs_fattr fattr; 6427 int rpc_status; 6428 struct inode *inode; 6429 }; 6430 6431 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 6432 { 6433 struct nfs4_delegreturndata *data = calldata; 6434 struct nfs4_exception exception = { 6435 .inode = data->inode, 6436 .stateid = &data->stateid, 6437 .task_is_privileged = data->args.seq_args.sa_privileged, 6438 }; 6439 6440 if (!nfs4_sequence_done(task, &data->res.seq_res)) 6441 return; 6442 6443 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 6444 6445 /* Handle Layoutreturn errors */ 6446 if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res, 6447 &data->res.lr_ret) == -EAGAIN) 6448 goto out_restart; 6449 6450 switch (task->tk_status) { 6451 case 0: 6452 renew_lease(data->res.server, data->timestamp); 6453 break; 6454 case -NFS4ERR_ADMIN_REVOKED: 6455 case -NFS4ERR_DELEG_REVOKED: 6456 case -NFS4ERR_EXPIRED: 6457 nfs4_free_revoked_stateid(data->res.server, 6458 data->args.stateid, 6459 task->tk_msg.rpc_cred); 6460 fallthrough; 6461 case -NFS4ERR_BAD_STATEID: 6462 case -NFS4ERR_STALE_STATEID: 6463 case -ETIMEDOUT: 6464 task->tk_status = 0; 6465 break; 6466 case -NFS4ERR_OLD_STATEID: 6467 if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode)) 6468 nfs4_stateid_seqid_inc(&data->stateid); 6469 if (data->args.bitmask) { 6470 data->args.bitmask = NULL; 6471 data->res.fattr = NULL; 6472 } 6473 goto out_restart; 6474 case -NFS4ERR_ACCESS: 6475 if (data->args.bitmask) { 6476 data->args.bitmask = NULL; 6477 data->res.fattr = NULL; 6478 goto out_restart; 6479 } 6480 fallthrough; 6481 default: 6482 task->tk_status = nfs4_async_handle_exception(task, 6483 data->res.server, task->tk_status, 6484 &exception); 6485 if (exception.retry) 6486 goto out_restart; 6487 } 6488 nfs_delegation_mark_returned(data->inode, data->args.stateid); 6489 data->rpc_status = task->tk_status; 6490 return; 6491 out_restart: 6492 task->tk_status = 0; 6493 rpc_restart_call_prepare(task); 6494 } 6495 6496 static void nfs4_delegreturn_release(void *calldata) 6497 { 6498 struct nfs4_delegreturndata *data = calldata; 6499 struct inode *inode = data->inode; 6500 6501 if (data->lr.roc) 6502 pnfs_roc_release(&data->lr.arg, &data->lr.res, 6503 data->res.lr_ret); 6504 if (inode) { 6505 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 6506 nfs_iput_and_deactive(inode); 6507 } 6508 kfree(calldata); 6509 } 6510 6511 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 6512 { 6513 struct nfs4_delegreturndata *d_data; 6514 struct pnfs_layout_hdr *lo; 6515 6516 d_data = (struct nfs4_delegreturndata *)data; 6517 6518 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) { 6519 nfs4_sequence_done(task, &d_data->res.seq_res); 6520 return; 6521 } 6522 6523 lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL; 6524 if (lo && !pnfs_layout_is_valid(lo)) { 6525 d_data->args.lr_args = NULL; 6526 d_data->res.lr_res = NULL; 6527 } 6528 6529 nfs4_setup_sequence(d_data->res.server->nfs_client, 6530 &d_data->args.seq_args, 6531 &d_data->res.seq_res, 6532 task); 6533 } 6534 6535 static const struct rpc_call_ops nfs4_delegreturn_ops = { 6536 .rpc_call_prepare = nfs4_delegreturn_prepare, 6537 .rpc_call_done = nfs4_delegreturn_done, 6538 .rpc_release = nfs4_delegreturn_release, 6539 }; 6540 6541 static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync) 6542 { 6543 struct nfs4_delegreturndata *data; 6544 struct nfs_server *server = NFS_SERVER(inode); 6545 struct rpc_task *task; 6546 struct rpc_message msg = { 6547 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 6548 .rpc_cred = cred, 6549 }; 6550 struct rpc_task_setup task_setup_data = { 6551 .rpc_client = server->client, 6552 .rpc_message = &msg, 6553 .callback_ops = &nfs4_delegreturn_ops, 6554 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE, 6555 }; 6556 int status = 0; 6557 6558 data = kzalloc(sizeof(*data), GFP_NOFS); 6559 if (data == NULL) 6560 return -ENOMEM; 6561 6562 nfs4_state_protect(server->nfs_client, 6563 NFS_SP4_MACH_CRED_CLEANUP, 6564 &task_setup_data.rpc_client, &msg); 6565 6566 data->args.fhandle = &data->fh; 6567 data->args.stateid = &data->stateid; 6568 nfs4_bitmask_set(data->args.bitmask_store, 6569 server->cache_consistency_bitmask, inode, server, 6570 NULL); 6571 data->args.bitmask = data->args.bitmask_store; 6572 nfs_copy_fh(&data->fh, NFS_FH(inode)); 6573 nfs4_stateid_copy(&data->stateid, stateid); 6574 data->res.fattr = &data->fattr; 6575 data->res.server = server; 6576 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT; 6577 data->lr.arg.ld_private = &data->lr.ld_private; 6578 nfs_fattr_init(data->res.fattr); 6579 data->timestamp = jiffies; 6580 data->rpc_status = 0; 6581 data->inode = nfs_igrab_and_active(inode); 6582 if (data->inode || issync) { 6583 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, 6584 cred); 6585 if (data->lr.roc) { 6586 data->args.lr_args = &data->lr.arg; 6587 data->res.lr_res = &data->lr.res; 6588 } 6589 } 6590 6591 if (!data->inode) 6592 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 6593 1); 6594 else 6595 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 6596 0); 6597 task_setup_data.callback_data = data; 6598 msg.rpc_argp = &data->args; 6599 msg.rpc_resp = &data->res; 6600 task = rpc_run_task(&task_setup_data); 6601 if (IS_ERR(task)) 6602 return PTR_ERR(task); 6603 if (!issync) 6604 goto out; 6605 status = rpc_wait_for_completion_task(task); 6606 if (status != 0) 6607 goto out; 6608 status = data->rpc_status; 6609 out: 6610 rpc_put_task(task); 6611 return status; 6612 } 6613 6614 int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync) 6615 { 6616 struct nfs_server *server = NFS_SERVER(inode); 6617 struct nfs4_exception exception = { }; 6618 int err; 6619 do { 6620 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); 6621 trace_nfs4_delegreturn(inode, stateid, err); 6622 switch (err) { 6623 case -NFS4ERR_STALE_STATEID: 6624 case -NFS4ERR_EXPIRED: 6625 case 0: 6626 return 0; 6627 } 6628 err = nfs4_handle_exception(server, err, &exception); 6629 } while (exception.retry); 6630 return err; 6631 } 6632 6633 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6634 { 6635 struct inode *inode = state->inode; 6636 struct nfs_server *server = NFS_SERVER(inode); 6637 struct nfs_client *clp = server->nfs_client; 6638 struct nfs_lockt_args arg = { 6639 .fh = NFS_FH(inode), 6640 .fl = request, 6641 }; 6642 struct nfs_lockt_res res = { 6643 .denied = request, 6644 }; 6645 struct rpc_message msg = { 6646 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 6647 .rpc_argp = &arg, 6648 .rpc_resp = &res, 6649 .rpc_cred = state->owner->so_cred, 6650 }; 6651 struct nfs4_lock_state *lsp; 6652 int status; 6653 6654 arg.lock_owner.clientid = clp->cl_clientid; 6655 status = nfs4_set_lock_state(state, request); 6656 if (status != 0) 6657 goto out; 6658 lsp = request->fl_u.nfs4_fl.owner; 6659 arg.lock_owner.id = lsp->ls_seqid.owner_id; 6660 arg.lock_owner.s_dev = server->s_dev; 6661 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 6662 switch (status) { 6663 case 0: 6664 request->fl_type = F_UNLCK; 6665 break; 6666 case -NFS4ERR_DENIED: 6667 status = 0; 6668 } 6669 request->fl_ops->fl_release_private(request); 6670 request->fl_ops = NULL; 6671 out: 6672 return status; 6673 } 6674 6675 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6676 { 6677 struct nfs4_exception exception = { 6678 .interruptible = true, 6679 }; 6680 int err; 6681 6682 do { 6683 err = _nfs4_proc_getlk(state, cmd, request); 6684 trace_nfs4_get_lock(request, state, cmd, err); 6685 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 6686 &exception); 6687 } while (exception.retry); 6688 return err; 6689 } 6690 6691 /* 6692 * Update the seqid of a lock stateid after receiving 6693 * NFS4ERR_OLD_STATEID 6694 */ 6695 static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst, 6696 struct nfs4_lock_state *lsp) 6697 { 6698 struct nfs4_state *state = lsp->ls_state; 6699 bool ret = false; 6700 6701 spin_lock(&state->state_lock); 6702 if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid)) 6703 goto out; 6704 if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst)) 6705 nfs4_stateid_seqid_inc(dst); 6706 else 6707 dst->seqid = lsp->ls_stateid.seqid; 6708 ret = true; 6709 out: 6710 spin_unlock(&state->state_lock); 6711 return ret; 6712 } 6713 6714 static bool nfs4_sync_lock_stateid(nfs4_stateid *dst, 6715 struct nfs4_lock_state *lsp) 6716 { 6717 struct nfs4_state *state = lsp->ls_state; 6718 bool ret; 6719 6720 spin_lock(&state->state_lock); 6721 ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid); 6722 nfs4_stateid_copy(dst, &lsp->ls_stateid); 6723 spin_unlock(&state->state_lock); 6724 return ret; 6725 } 6726 6727 struct nfs4_unlockdata { 6728 struct nfs_locku_args arg; 6729 struct nfs_locku_res res; 6730 struct nfs4_lock_state *lsp; 6731 struct nfs_open_context *ctx; 6732 struct nfs_lock_context *l_ctx; 6733 struct file_lock fl; 6734 struct nfs_server *server; 6735 unsigned long timestamp; 6736 }; 6737 6738 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 6739 struct nfs_open_context *ctx, 6740 struct nfs4_lock_state *lsp, 6741 struct nfs_seqid *seqid) 6742 { 6743 struct nfs4_unlockdata *p; 6744 struct nfs4_state *state = lsp->ls_state; 6745 struct inode *inode = state->inode; 6746 6747 p = kzalloc(sizeof(*p), GFP_NOFS); 6748 if (p == NULL) 6749 return NULL; 6750 p->arg.fh = NFS_FH(inode); 6751 p->arg.fl = &p->fl; 6752 p->arg.seqid = seqid; 6753 p->res.seqid = seqid; 6754 p->lsp = lsp; 6755 /* Ensure we don't close file until we're done freeing locks! */ 6756 p->ctx = get_nfs_open_context(ctx); 6757 p->l_ctx = nfs_get_lock_context(ctx); 6758 locks_init_lock(&p->fl); 6759 locks_copy_lock(&p->fl, fl); 6760 p->server = NFS_SERVER(inode); 6761 spin_lock(&state->state_lock); 6762 nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid); 6763 spin_unlock(&state->state_lock); 6764 return p; 6765 } 6766 6767 static void nfs4_locku_release_calldata(void *data) 6768 { 6769 struct nfs4_unlockdata *calldata = data; 6770 nfs_free_seqid(calldata->arg.seqid); 6771 nfs4_put_lock_state(calldata->lsp); 6772 nfs_put_lock_context(calldata->l_ctx); 6773 put_nfs_open_context(calldata->ctx); 6774 kfree(calldata); 6775 } 6776 6777 static void nfs4_locku_done(struct rpc_task *task, void *data) 6778 { 6779 struct nfs4_unlockdata *calldata = data; 6780 struct nfs4_exception exception = { 6781 .inode = calldata->lsp->ls_state->inode, 6782 .stateid = &calldata->arg.stateid, 6783 }; 6784 6785 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 6786 return; 6787 switch (task->tk_status) { 6788 case 0: 6789 renew_lease(calldata->server, calldata->timestamp); 6790 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl); 6791 if (nfs4_update_lock_stateid(calldata->lsp, 6792 &calldata->res.stateid)) 6793 break; 6794 fallthrough; 6795 case -NFS4ERR_ADMIN_REVOKED: 6796 case -NFS4ERR_EXPIRED: 6797 nfs4_free_revoked_stateid(calldata->server, 6798 &calldata->arg.stateid, 6799 task->tk_msg.rpc_cred); 6800 fallthrough; 6801 case -NFS4ERR_BAD_STATEID: 6802 case -NFS4ERR_STALE_STATEID: 6803 if (nfs4_sync_lock_stateid(&calldata->arg.stateid, 6804 calldata->lsp)) 6805 rpc_restart_call_prepare(task); 6806 break; 6807 case -NFS4ERR_OLD_STATEID: 6808 if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid, 6809 calldata->lsp)) 6810 rpc_restart_call_prepare(task); 6811 break; 6812 default: 6813 task->tk_status = nfs4_async_handle_exception(task, 6814 calldata->server, task->tk_status, 6815 &exception); 6816 if (exception.retry) 6817 rpc_restart_call_prepare(task); 6818 } 6819 nfs_release_seqid(calldata->arg.seqid); 6820 } 6821 6822 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 6823 { 6824 struct nfs4_unlockdata *calldata = data; 6825 6826 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) && 6827 nfs_async_iocounter_wait(task, calldata->l_ctx)) 6828 return; 6829 6830 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 6831 goto out_wait; 6832 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 6833 /* Note: exit _without_ running nfs4_locku_done */ 6834 goto out_no_action; 6835 } 6836 calldata->timestamp = jiffies; 6837 if (nfs4_setup_sequence(calldata->server->nfs_client, 6838 &calldata->arg.seq_args, 6839 &calldata->res.seq_res, 6840 task) != 0) 6841 nfs_release_seqid(calldata->arg.seqid); 6842 return; 6843 out_no_action: 6844 task->tk_action = NULL; 6845 out_wait: 6846 nfs4_sequence_done(task, &calldata->res.seq_res); 6847 } 6848 6849 static const struct rpc_call_ops nfs4_locku_ops = { 6850 .rpc_call_prepare = nfs4_locku_prepare, 6851 .rpc_call_done = nfs4_locku_done, 6852 .rpc_release = nfs4_locku_release_calldata, 6853 }; 6854 6855 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 6856 struct nfs_open_context *ctx, 6857 struct nfs4_lock_state *lsp, 6858 struct nfs_seqid *seqid) 6859 { 6860 struct nfs4_unlockdata *data; 6861 struct rpc_message msg = { 6862 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 6863 .rpc_cred = ctx->cred, 6864 }; 6865 struct rpc_task_setup task_setup_data = { 6866 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 6867 .rpc_message = &msg, 6868 .callback_ops = &nfs4_locku_ops, 6869 .workqueue = nfsiod_workqueue, 6870 .flags = RPC_TASK_ASYNC, 6871 }; 6872 struct nfs_client *client = 6873 NFS_SERVER(lsp->ls_state->inode)->nfs_client; 6874 6875 if (client->cl_minorversion) 6876 task_setup_data.flags |= RPC_TASK_MOVEABLE; 6877 6878 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 6879 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 6880 6881 /* Ensure this is an unlock - when canceling a lock, the 6882 * canceled lock is passed in, and it won't be an unlock. 6883 */ 6884 fl->fl_type = F_UNLCK; 6885 if (fl->fl_flags & FL_CLOSE) 6886 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags); 6887 6888 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 6889 if (data == NULL) { 6890 nfs_free_seqid(seqid); 6891 return ERR_PTR(-ENOMEM); 6892 } 6893 6894 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0); 6895 msg.rpc_argp = &data->arg; 6896 msg.rpc_resp = &data->res; 6897 task_setup_data.callback_data = data; 6898 return rpc_run_task(&task_setup_data); 6899 } 6900 6901 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 6902 { 6903 struct inode *inode = state->inode; 6904 struct nfs4_state_owner *sp = state->owner; 6905 struct nfs_inode *nfsi = NFS_I(inode); 6906 struct nfs_seqid *seqid; 6907 struct nfs4_lock_state *lsp; 6908 struct rpc_task *task; 6909 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 6910 int status = 0; 6911 unsigned char fl_flags = request->fl_flags; 6912 6913 status = nfs4_set_lock_state(state, request); 6914 /* Unlock _before_ we do the RPC call */ 6915 request->fl_flags |= FL_EXISTS; 6916 /* Exclude nfs_delegation_claim_locks() */ 6917 mutex_lock(&sp->so_delegreturn_mutex); 6918 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 6919 down_read(&nfsi->rwsem); 6920 if (locks_lock_inode_wait(inode, request) == -ENOENT) { 6921 up_read(&nfsi->rwsem); 6922 mutex_unlock(&sp->so_delegreturn_mutex); 6923 goto out; 6924 } 6925 up_read(&nfsi->rwsem); 6926 mutex_unlock(&sp->so_delegreturn_mutex); 6927 if (status != 0) 6928 goto out; 6929 /* Is this a delegated lock? */ 6930 lsp = request->fl_u.nfs4_fl.owner; 6931 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 6932 goto out; 6933 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 6934 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 6935 status = -ENOMEM; 6936 if (IS_ERR(seqid)) 6937 goto out; 6938 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 6939 status = PTR_ERR(task); 6940 if (IS_ERR(task)) 6941 goto out; 6942 status = rpc_wait_for_completion_task(task); 6943 rpc_put_task(task); 6944 out: 6945 request->fl_flags = fl_flags; 6946 trace_nfs4_unlock(request, state, F_SETLK, status); 6947 return status; 6948 } 6949 6950 struct nfs4_lockdata { 6951 struct nfs_lock_args arg; 6952 struct nfs_lock_res res; 6953 struct nfs4_lock_state *lsp; 6954 struct nfs_open_context *ctx; 6955 struct file_lock fl; 6956 unsigned long timestamp; 6957 int rpc_status; 6958 int cancelled; 6959 struct nfs_server *server; 6960 }; 6961 6962 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 6963 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 6964 gfp_t gfp_mask) 6965 { 6966 struct nfs4_lockdata *p; 6967 struct inode *inode = lsp->ls_state->inode; 6968 struct nfs_server *server = NFS_SERVER(inode); 6969 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 6970 6971 p = kzalloc(sizeof(*p), gfp_mask); 6972 if (p == NULL) 6973 return NULL; 6974 6975 p->arg.fh = NFS_FH(inode); 6976 p->arg.fl = &p->fl; 6977 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 6978 if (IS_ERR(p->arg.open_seqid)) 6979 goto out_free; 6980 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 6981 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 6982 if (IS_ERR(p->arg.lock_seqid)) 6983 goto out_free_seqid; 6984 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 6985 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 6986 p->arg.lock_owner.s_dev = server->s_dev; 6987 p->res.lock_seqid = p->arg.lock_seqid; 6988 p->lsp = lsp; 6989 p->server = server; 6990 p->ctx = get_nfs_open_context(ctx); 6991 locks_init_lock(&p->fl); 6992 locks_copy_lock(&p->fl, fl); 6993 return p; 6994 out_free_seqid: 6995 nfs_free_seqid(p->arg.open_seqid); 6996 out_free: 6997 kfree(p); 6998 return NULL; 6999 } 7000 7001 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 7002 { 7003 struct nfs4_lockdata *data = calldata; 7004 struct nfs4_state *state = data->lsp->ls_state; 7005 7006 dprintk("%s: begin!\n", __func__); 7007 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 7008 goto out_wait; 7009 /* Do we need to do an open_to_lock_owner? */ 7010 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 7011 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 7012 goto out_release_lock_seqid; 7013 } 7014 nfs4_stateid_copy(&data->arg.open_stateid, 7015 &state->open_stateid); 7016 data->arg.new_lock_owner = 1; 7017 data->res.open_seqid = data->arg.open_seqid; 7018 } else { 7019 data->arg.new_lock_owner = 0; 7020 nfs4_stateid_copy(&data->arg.lock_stateid, 7021 &data->lsp->ls_stateid); 7022 } 7023 if (!nfs4_valid_open_stateid(state)) { 7024 data->rpc_status = -EBADF; 7025 task->tk_action = NULL; 7026 goto out_release_open_seqid; 7027 } 7028 data->timestamp = jiffies; 7029 if (nfs4_setup_sequence(data->server->nfs_client, 7030 &data->arg.seq_args, 7031 &data->res.seq_res, 7032 task) == 0) 7033 return; 7034 out_release_open_seqid: 7035 nfs_release_seqid(data->arg.open_seqid); 7036 out_release_lock_seqid: 7037 nfs_release_seqid(data->arg.lock_seqid); 7038 out_wait: 7039 nfs4_sequence_done(task, &data->res.seq_res); 7040 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); 7041 } 7042 7043 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 7044 { 7045 struct nfs4_lockdata *data = calldata; 7046 struct nfs4_lock_state *lsp = data->lsp; 7047 7048 dprintk("%s: begin!\n", __func__); 7049 7050 if (!nfs4_sequence_done(task, &data->res.seq_res)) 7051 return; 7052 7053 data->rpc_status = task->tk_status; 7054 switch (task->tk_status) { 7055 case 0: 7056 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 7057 data->timestamp); 7058 if (data->arg.new_lock && !data->cancelled) { 7059 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); 7060 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) 7061 goto out_restart; 7062 } 7063 if (data->arg.new_lock_owner != 0) { 7064 nfs_confirm_seqid(&lsp->ls_seqid, 0); 7065 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 7066 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 7067 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 7068 goto out_restart; 7069 break; 7070 case -NFS4ERR_BAD_STATEID: 7071 case -NFS4ERR_OLD_STATEID: 7072 case -NFS4ERR_STALE_STATEID: 7073 case -NFS4ERR_EXPIRED: 7074 if (data->arg.new_lock_owner != 0) { 7075 if (!nfs4_stateid_match(&data->arg.open_stateid, 7076 &lsp->ls_state->open_stateid)) 7077 goto out_restart; 7078 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 7079 &lsp->ls_stateid)) 7080 goto out_restart; 7081 } 7082 out_done: 7083 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 7084 return; 7085 out_restart: 7086 if (!data->cancelled) 7087 rpc_restart_call_prepare(task); 7088 goto out_done; 7089 } 7090 7091 static void nfs4_lock_release(void *calldata) 7092 { 7093 struct nfs4_lockdata *data = calldata; 7094 7095 dprintk("%s: begin!\n", __func__); 7096 nfs_free_seqid(data->arg.open_seqid); 7097 if (data->cancelled && data->rpc_status == 0) { 7098 struct rpc_task *task; 7099 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 7100 data->arg.lock_seqid); 7101 if (!IS_ERR(task)) 7102 rpc_put_task_async(task); 7103 dprintk("%s: cancelling lock!\n", __func__); 7104 } else 7105 nfs_free_seqid(data->arg.lock_seqid); 7106 nfs4_put_lock_state(data->lsp); 7107 put_nfs_open_context(data->ctx); 7108 kfree(data); 7109 dprintk("%s: done!\n", __func__); 7110 } 7111 7112 static const struct rpc_call_ops nfs4_lock_ops = { 7113 .rpc_call_prepare = nfs4_lock_prepare, 7114 .rpc_call_done = nfs4_lock_done, 7115 .rpc_release = nfs4_lock_release, 7116 }; 7117 7118 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 7119 { 7120 switch (error) { 7121 case -NFS4ERR_ADMIN_REVOKED: 7122 case -NFS4ERR_EXPIRED: 7123 case -NFS4ERR_BAD_STATEID: 7124 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7125 if (new_lock_owner != 0 || 7126 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 7127 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 7128 break; 7129 case -NFS4ERR_STALE_STATEID: 7130 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 7131 nfs4_schedule_lease_recovery(server->nfs_client); 7132 } 7133 } 7134 7135 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 7136 { 7137 struct nfs4_lockdata *data; 7138 struct rpc_task *task; 7139 struct rpc_message msg = { 7140 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 7141 .rpc_cred = state->owner->so_cred, 7142 }; 7143 struct rpc_task_setup task_setup_data = { 7144 .rpc_client = NFS_CLIENT(state->inode), 7145 .rpc_message = &msg, 7146 .callback_ops = &nfs4_lock_ops, 7147 .workqueue = nfsiod_workqueue, 7148 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF, 7149 }; 7150 int ret; 7151 struct nfs_client *client = NFS_SERVER(state->inode)->nfs_client; 7152 7153 if (client->cl_minorversion) 7154 task_setup_data.flags |= RPC_TASK_MOVEABLE; 7155 7156 dprintk("%s: begin!\n", __func__); 7157 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 7158 fl->fl_u.nfs4_fl.owner, 7159 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); 7160 if (data == NULL) 7161 return -ENOMEM; 7162 if (IS_SETLKW(cmd)) 7163 data->arg.block = 1; 7164 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 7165 recovery_type > NFS_LOCK_NEW); 7166 msg.rpc_argp = &data->arg; 7167 msg.rpc_resp = &data->res; 7168 task_setup_data.callback_data = data; 7169 if (recovery_type > NFS_LOCK_NEW) { 7170 if (recovery_type == NFS_LOCK_RECLAIM) 7171 data->arg.reclaim = NFS_LOCK_RECLAIM; 7172 } else 7173 data->arg.new_lock = 1; 7174 task = rpc_run_task(&task_setup_data); 7175 if (IS_ERR(task)) 7176 return PTR_ERR(task); 7177 ret = rpc_wait_for_completion_task(task); 7178 if (ret == 0) { 7179 ret = data->rpc_status; 7180 if (ret) 7181 nfs4_handle_setlk_error(data->server, data->lsp, 7182 data->arg.new_lock_owner, ret); 7183 } else 7184 data->cancelled = true; 7185 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); 7186 rpc_put_task(task); 7187 dprintk("%s: done, ret = %d!\n", __func__, ret); 7188 return ret; 7189 } 7190 7191 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 7192 { 7193 struct nfs_server *server = NFS_SERVER(state->inode); 7194 struct nfs4_exception exception = { 7195 .inode = state->inode, 7196 }; 7197 int err; 7198 7199 do { 7200 /* Cache the lock if possible... */ 7201 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7202 return 0; 7203 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 7204 if (err != -NFS4ERR_DELAY) 7205 break; 7206 nfs4_handle_exception(server, err, &exception); 7207 } while (exception.retry); 7208 return err; 7209 } 7210 7211 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 7212 { 7213 struct nfs_server *server = NFS_SERVER(state->inode); 7214 struct nfs4_exception exception = { 7215 .inode = state->inode, 7216 }; 7217 int err; 7218 7219 err = nfs4_set_lock_state(state, request); 7220 if (err != 0) 7221 return err; 7222 if (!recover_lost_locks) { 7223 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 7224 return 0; 7225 } 7226 do { 7227 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 7228 return 0; 7229 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 7230 switch (err) { 7231 default: 7232 goto out; 7233 case -NFS4ERR_GRACE: 7234 case -NFS4ERR_DELAY: 7235 nfs4_handle_exception(server, err, &exception); 7236 err = 0; 7237 } 7238 } while (exception.retry); 7239 out: 7240 return err; 7241 } 7242 7243 #if defined(CONFIG_NFS_V4_1) 7244 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 7245 { 7246 struct nfs4_lock_state *lsp; 7247 int status; 7248 7249 status = nfs4_set_lock_state(state, request); 7250 if (status != 0) 7251 return status; 7252 lsp = request->fl_u.nfs4_fl.owner; 7253 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) || 7254 test_bit(NFS_LOCK_LOST, &lsp->ls_flags)) 7255 return 0; 7256 return nfs4_lock_expired(state, request); 7257 } 7258 #endif 7259 7260 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7261 { 7262 struct nfs_inode *nfsi = NFS_I(state->inode); 7263 struct nfs4_state_owner *sp = state->owner; 7264 unsigned char fl_flags = request->fl_flags; 7265 int status; 7266 7267 request->fl_flags |= FL_ACCESS; 7268 status = locks_lock_inode_wait(state->inode, request); 7269 if (status < 0) 7270 goto out; 7271 mutex_lock(&sp->so_delegreturn_mutex); 7272 down_read(&nfsi->rwsem); 7273 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 7274 /* Yes: cache locks! */ 7275 /* ...but avoid races with delegation recall... */ 7276 request->fl_flags = fl_flags & ~FL_SLEEP; 7277 status = locks_lock_inode_wait(state->inode, request); 7278 up_read(&nfsi->rwsem); 7279 mutex_unlock(&sp->so_delegreturn_mutex); 7280 goto out; 7281 } 7282 up_read(&nfsi->rwsem); 7283 mutex_unlock(&sp->so_delegreturn_mutex); 7284 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 7285 out: 7286 request->fl_flags = fl_flags; 7287 return status; 7288 } 7289 7290 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7291 { 7292 struct nfs4_exception exception = { 7293 .state = state, 7294 .inode = state->inode, 7295 .interruptible = true, 7296 }; 7297 int err; 7298 7299 do { 7300 err = _nfs4_proc_setlk(state, cmd, request); 7301 if (err == -NFS4ERR_DENIED) 7302 err = -EAGAIN; 7303 err = nfs4_handle_exception(NFS_SERVER(state->inode), 7304 err, &exception); 7305 } while (exception.retry); 7306 return err; 7307 } 7308 7309 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 7310 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 7311 7312 static int 7313 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd, 7314 struct file_lock *request) 7315 { 7316 int status = -ERESTARTSYS; 7317 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 7318 7319 while(!signalled()) { 7320 status = nfs4_proc_setlk(state, cmd, request); 7321 if ((status != -EAGAIN) || IS_SETLK(cmd)) 7322 break; 7323 freezable_schedule_timeout_interruptible(timeout); 7324 timeout *= 2; 7325 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout); 7326 status = -ERESTARTSYS; 7327 } 7328 return status; 7329 } 7330 7331 #ifdef CONFIG_NFS_V4_1 7332 struct nfs4_lock_waiter { 7333 struct inode *inode; 7334 struct nfs_lowner owner; 7335 wait_queue_entry_t wait; 7336 }; 7337 7338 static int 7339 nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key) 7340 { 7341 struct nfs4_lock_waiter *waiter = 7342 container_of(wait, struct nfs4_lock_waiter, wait); 7343 7344 /* NULL key means to wake up everyone */ 7345 if (key) { 7346 struct cb_notify_lock_args *cbnl = key; 7347 struct nfs_lowner *lowner = &cbnl->cbnl_owner, 7348 *wowner = &waiter->owner; 7349 7350 /* Only wake if the callback was for the same owner. */ 7351 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev) 7352 return 0; 7353 7354 /* Make sure it's for the right inode */ 7355 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh)) 7356 return 0; 7357 } 7358 7359 return woken_wake_function(wait, mode, flags, key); 7360 } 7361 7362 static int 7363 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7364 { 7365 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; 7366 struct nfs_server *server = NFS_SERVER(state->inode); 7367 struct nfs_client *clp = server->nfs_client; 7368 wait_queue_head_t *q = &clp->cl_lock_waitq; 7369 struct nfs4_lock_waiter waiter = { 7370 .inode = state->inode, 7371 .owner = { .clientid = clp->cl_clientid, 7372 .id = lsp->ls_seqid.owner_id, 7373 .s_dev = server->s_dev }, 7374 }; 7375 int status; 7376 7377 /* Don't bother with waitqueue if we don't expect a callback */ 7378 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags)) 7379 return nfs4_retry_setlk_simple(state, cmd, request); 7380 7381 init_wait(&waiter.wait); 7382 waiter.wait.func = nfs4_wake_lock_waiter; 7383 add_wait_queue(q, &waiter.wait); 7384 7385 do { 7386 status = nfs4_proc_setlk(state, cmd, request); 7387 if (status != -EAGAIN || IS_SETLK(cmd)) 7388 break; 7389 7390 status = -ERESTARTSYS; 7391 freezer_do_not_count(); 7392 wait_woken(&waiter.wait, TASK_INTERRUPTIBLE, 7393 NFS4_LOCK_MAXTIMEOUT); 7394 freezer_count(); 7395 } while (!signalled()); 7396 7397 remove_wait_queue(q, &waiter.wait); 7398 7399 return status; 7400 } 7401 #else /* !CONFIG_NFS_V4_1 */ 7402 static inline int 7403 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 7404 { 7405 return nfs4_retry_setlk_simple(state, cmd, request); 7406 } 7407 #endif 7408 7409 static int 7410 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 7411 { 7412 struct nfs_open_context *ctx; 7413 struct nfs4_state *state; 7414 int status; 7415 7416 /* verify open state */ 7417 ctx = nfs_file_open_context(filp); 7418 state = ctx->state; 7419 7420 if (IS_GETLK(cmd)) { 7421 if (state != NULL) 7422 return nfs4_proc_getlk(state, F_GETLK, request); 7423 return 0; 7424 } 7425 7426 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 7427 return -EINVAL; 7428 7429 if (request->fl_type == F_UNLCK) { 7430 if (state != NULL) 7431 return nfs4_proc_unlck(state, cmd, request); 7432 return 0; 7433 } 7434 7435 if (state == NULL) 7436 return -ENOLCK; 7437 7438 if ((request->fl_flags & FL_POSIX) && 7439 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 7440 return -ENOLCK; 7441 7442 /* 7443 * Don't rely on the VFS having checked the file open mode, 7444 * since it won't do this for flock() locks. 7445 */ 7446 switch (request->fl_type) { 7447 case F_RDLCK: 7448 if (!(filp->f_mode & FMODE_READ)) 7449 return -EBADF; 7450 break; 7451 case F_WRLCK: 7452 if (!(filp->f_mode & FMODE_WRITE)) 7453 return -EBADF; 7454 } 7455 7456 status = nfs4_set_lock_state(state, request); 7457 if (status != 0) 7458 return status; 7459 7460 return nfs4_retry_setlk(state, cmd, request); 7461 } 7462 7463 static int nfs4_delete_lease(struct file *file, void **priv) 7464 { 7465 return generic_setlease(file, F_UNLCK, NULL, priv); 7466 } 7467 7468 static int nfs4_add_lease(struct file *file, long arg, struct file_lock **lease, 7469 void **priv) 7470 { 7471 struct inode *inode = file_inode(file); 7472 fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE; 7473 int ret; 7474 7475 /* No delegation, no lease */ 7476 if (!nfs4_have_delegation(inode, type)) 7477 return -EAGAIN; 7478 ret = generic_setlease(file, arg, lease, priv); 7479 if (ret || nfs4_have_delegation(inode, type)) 7480 return ret; 7481 /* We raced with a delegation return */ 7482 nfs4_delete_lease(file, priv); 7483 return -EAGAIN; 7484 } 7485 7486 int nfs4_proc_setlease(struct file *file, long arg, struct file_lock **lease, 7487 void **priv) 7488 { 7489 switch (arg) { 7490 case F_RDLCK: 7491 case F_WRLCK: 7492 return nfs4_add_lease(file, arg, lease, priv); 7493 case F_UNLCK: 7494 return nfs4_delete_lease(file, priv); 7495 default: 7496 return -EINVAL; 7497 } 7498 } 7499 7500 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 7501 { 7502 struct nfs_server *server = NFS_SERVER(state->inode); 7503 int err; 7504 7505 err = nfs4_set_lock_state(state, fl); 7506 if (err != 0) 7507 return err; 7508 do { 7509 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 7510 if (err != -NFS4ERR_DELAY) 7511 break; 7512 ssleep(1); 7513 } while (err == -NFS4ERR_DELAY); 7514 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); 7515 } 7516 7517 struct nfs_release_lockowner_data { 7518 struct nfs4_lock_state *lsp; 7519 struct nfs_server *server; 7520 struct nfs_release_lockowner_args args; 7521 struct nfs_release_lockowner_res res; 7522 unsigned long timestamp; 7523 }; 7524 7525 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 7526 { 7527 struct nfs_release_lockowner_data *data = calldata; 7528 struct nfs_server *server = data->server; 7529 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args, 7530 &data->res.seq_res, task); 7531 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 7532 data->timestamp = jiffies; 7533 } 7534 7535 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 7536 { 7537 struct nfs_release_lockowner_data *data = calldata; 7538 struct nfs_server *server = data->server; 7539 7540 nfs40_sequence_done(task, &data->res.seq_res); 7541 7542 switch (task->tk_status) { 7543 case 0: 7544 renew_lease(server, data->timestamp); 7545 break; 7546 case -NFS4ERR_STALE_CLIENTID: 7547 case -NFS4ERR_EXPIRED: 7548 nfs4_schedule_lease_recovery(server->nfs_client); 7549 break; 7550 case -NFS4ERR_LEASE_MOVED: 7551 case -NFS4ERR_DELAY: 7552 if (nfs4_async_handle_error(task, server, 7553 NULL, NULL) == -EAGAIN) 7554 rpc_restart_call_prepare(task); 7555 } 7556 } 7557 7558 static void nfs4_release_lockowner_release(void *calldata) 7559 { 7560 struct nfs_release_lockowner_data *data = calldata; 7561 nfs4_free_lock_state(data->server, data->lsp); 7562 kfree(calldata); 7563 } 7564 7565 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 7566 .rpc_call_prepare = nfs4_release_lockowner_prepare, 7567 .rpc_call_done = nfs4_release_lockowner_done, 7568 .rpc_release = nfs4_release_lockowner_release, 7569 }; 7570 7571 static void 7572 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 7573 { 7574 struct nfs_release_lockowner_data *data; 7575 struct rpc_message msg = { 7576 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 7577 }; 7578 7579 if (server->nfs_client->cl_mvops->minor_version != 0) 7580 return; 7581 7582 data = kmalloc(sizeof(*data), GFP_NOFS); 7583 if (!data) 7584 return; 7585 data->lsp = lsp; 7586 data->server = server; 7587 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 7588 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 7589 data->args.lock_owner.s_dev = server->s_dev; 7590 7591 msg.rpc_argp = &data->args; 7592 msg.rpc_resp = &data->res; 7593 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0); 7594 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 7595 } 7596 7597 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 7598 7599 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 7600 struct user_namespace *mnt_userns, 7601 struct dentry *unused, struct inode *inode, 7602 const char *key, const void *buf, 7603 size_t buflen, int flags) 7604 { 7605 return nfs4_proc_set_acl(inode, buf, buflen); 7606 } 7607 7608 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 7609 struct dentry *unused, struct inode *inode, 7610 const char *key, void *buf, size_t buflen) 7611 { 7612 return nfs4_proc_get_acl(inode, buf, buflen); 7613 } 7614 7615 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) 7616 { 7617 return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry))); 7618 } 7619 7620 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 7621 7622 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 7623 struct user_namespace *mnt_userns, 7624 struct dentry *unused, struct inode *inode, 7625 const char *key, const void *buf, 7626 size_t buflen, int flags) 7627 { 7628 if (security_ismaclabel(key)) 7629 return nfs4_set_security_label(inode, buf, buflen); 7630 7631 return -EOPNOTSUPP; 7632 } 7633 7634 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, 7635 struct dentry *unused, struct inode *inode, 7636 const char *key, void *buf, size_t buflen) 7637 { 7638 if (security_ismaclabel(key)) 7639 return nfs4_get_security_label(inode, buf, buflen); 7640 return -EOPNOTSUPP; 7641 } 7642 7643 static ssize_t 7644 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 7645 { 7646 int len = 0; 7647 7648 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) { 7649 len = security_inode_listsecurity(inode, list, list_len); 7650 if (len >= 0 && list_len && len > list_len) 7651 return -ERANGE; 7652 } 7653 return len; 7654 } 7655 7656 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 7657 .prefix = XATTR_SECURITY_PREFIX, 7658 .get = nfs4_xattr_get_nfs4_label, 7659 .set = nfs4_xattr_set_nfs4_label, 7660 }; 7661 7662 #else 7663 7664 static ssize_t 7665 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 7666 { 7667 return 0; 7668 } 7669 7670 #endif 7671 7672 #ifdef CONFIG_NFS_V4_2 7673 static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, 7674 struct user_namespace *mnt_userns, 7675 struct dentry *unused, struct inode *inode, 7676 const char *key, const void *buf, 7677 size_t buflen, int flags) 7678 { 7679 struct nfs_access_entry cache; 7680 int ret; 7681 7682 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 7683 return -EOPNOTSUPP; 7684 7685 /* 7686 * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA* 7687 * flags right now. Handling of xattr operations use the normal 7688 * file read/write permissions. 7689 * 7690 * Just in case the server has other ideas (which RFC 8276 allows), 7691 * do a cached access check for the XA* flags to possibly avoid 7692 * doing an RPC and getting EACCES back. 7693 */ 7694 if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) { 7695 if (!(cache.mask & NFS_ACCESS_XAWRITE)) 7696 return -EACCES; 7697 } 7698 7699 if (buf == NULL) { 7700 ret = nfs42_proc_removexattr(inode, key); 7701 if (!ret) 7702 nfs4_xattr_cache_remove(inode, key); 7703 } else { 7704 ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags); 7705 if (!ret) 7706 nfs4_xattr_cache_add(inode, key, buf, NULL, buflen); 7707 } 7708 7709 return ret; 7710 } 7711 7712 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, 7713 struct dentry *unused, struct inode *inode, 7714 const char *key, void *buf, size_t buflen) 7715 { 7716 struct nfs_access_entry cache; 7717 ssize_t ret; 7718 7719 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 7720 return -EOPNOTSUPP; 7721 7722 if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) { 7723 if (!(cache.mask & NFS_ACCESS_XAREAD)) 7724 return -EACCES; 7725 } 7726 7727 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 7728 if (ret) 7729 return ret; 7730 7731 ret = nfs4_xattr_cache_get(inode, key, buf, buflen); 7732 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 7733 return ret; 7734 7735 ret = nfs42_proc_getxattr(inode, key, buf, buflen); 7736 7737 return ret; 7738 } 7739 7740 static ssize_t 7741 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 7742 { 7743 u64 cookie; 7744 bool eof; 7745 ssize_t ret, size; 7746 char *buf; 7747 size_t buflen; 7748 struct nfs_access_entry cache; 7749 7750 if (!nfs_server_capable(inode, NFS_CAP_XATTR)) 7751 return 0; 7752 7753 if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) { 7754 if (!(cache.mask & NFS_ACCESS_XALIST)) 7755 return 0; 7756 } 7757 7758 ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE); 7759 if (ret) 7760 return ret; 7761 7762 ret = nfs4_xattr_cache_list(inode, list, list_len); 7763 if (ret >= 0 || (ret < 0 && ret != -ENOENT)) 7764 return ret; 7765 7766 cookie = 0; 7767 eof = false; 7768 buflen = list_len ? list_len : XATTR_LIST_MAX; 7769 buf = list_len ? list : NULL; 7770 size = 0; 7771 7772 while (!eof) { 7773 ret = nfs42_proc_listxattrs(inode, buf, buflen, 7774 &cookie, &eof); 7775 if (ret < 0) 7776 return ret; 7777 7778 if (list_len) { 7779 buf += ret; 7780 buflen -= ret; 7781 } 7782 size += ret; 7783 } 7784 7785 if (list_len) 7786 nfs4_xattr_cache_set_list(inode, list, size); 7787 7788 return size; 7789 } 7790 7791 #else 7792 7793 static ssize_t 7794 nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) 7795 { 7796 return 0; 7797 } 7798 #endif /* CONFIG_NFS_V4_2 */ 7799 7800 /* 7801 * nfs_fhget will use either the mounted_on_fileid or the fileid 7802 */ 7803 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 7804 { 7805 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 7806 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 7807 (fattr->valid & NFS_ATTR_FATTR_FSID) && 7808 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 7809 return; 7810 7811 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 7812 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 7813 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 7814 fattr->nlink = 2; 7815 } 7816 7817 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 7818 const struct qstr *name, 7819 struct nfs4_fs_locations *fs_locations, 7820 struct page *page) 7821 { 7822 struct nfs_server *server = NFS_SERVER(dir); 7823 u32 bitmask[3]; 7824 struct nfs4_fs_locations_arg args = { 7825 .dir_fh = NFS_FH(dir), 7826 .name = name, 7827 .page = page, 7828 .bitmask = bitmask, 7829 }; 7830 struct nfs4_fs_locations_res res = { 7831 .fs_locations = fs_locations, 7832 }; 7833 struct rpc_message msg = { 7834 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 7835 .rpc_argp = &args, 7836 .rpc_resp = &res, 7837 }; 7838 int status; 7839 7840 dprintk("%s: start\n", __func__); 7841 7842 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS; 7843 bitmask[1] = nfs4_fattr_bitmap[1]; 7844 7845 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 7846 * is not supported */ 7847 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 7848 bitmask[0] &= ~FATTR4_WORD0_FILEID; 7849 else 7850 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; 7851 7852 nfs_fattr_init(&fs_locations->fattr); 7853 fs_locations->server = server; 7854 fs_locations->nlocations = 0; 7855 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 7856 dprintk("%s: returned status = %d\n", __func__, status); 7857 return status; 7858 } 7859 7860 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 7861 const struct qstr *name, 7862 struct nfs4_fs_locations *fs_locations, 7863 struct page *page) 7864 { 7865 struct nfs4_exception exception = { 7866 .interruptible = true, 7867 }; 7868 int err; 7869 do { 7870 err = _nfs4_proc_fs_locations(client, dir, name, 7871 fs_locations, page); 7872 trace_nfs4_get_fs_locations(dir, name, err); 7873 err = nfs4_handle_exception(NFS_SERVER(dir), err, 7874 &exception); 7875 } while (exception.retry); 7876 return err; 7877 } 7878 7879 /* 7880 * This operation also signals the server that this client is 7881 * performing migration recovery. The server can stop returning 7882 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 7883 * appended to this compound to identify the client ID which is 7884 * performing recovery. 7885 */ 7886 static int _nfs40_proc_get_locations(struct inode *inode, 7887 struct nfs4_fs_locations *locations, 7888 struct page *page, const struct cred *cred) 7889 { 7890 struct nfs_server *server = NFS_SERVER(inode); 7891 struct rpc_clnt *clnt = server->client; 7892 u32 bitmask[2] = { 7893 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 7894 }; 7895 struct nfs4_fs_locations_arg args = { 7896 .clientid = server->nfs_client->cl_clientid, 7897 .fh = NFS_FH(inode), 7898 .page = page, 7899 .bitmask = bitmask, 7900 .migration = 1, /* skip LOOKUP */ 7901 .renew = 1, /* append RENEW */ 7902 }; 7903 struct nfs4_fs_locations_res res = { 7904 .fs_locations = locations, 7905 .migration = 1, 7906 .renew = 1, 7907 }; 7908 struct rpc_message msg = { 7909 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 7910 .rpc_argp = &args, 7911 .rpc_resp = &res, 7912 .rpc_cred = cred, 7913 }; 7914 unsigned long now = jiffies; 7915 int status; 7916 7917 nfs_fattr_init(&locations->fattr); 7918 locations->server = server; 7919 locations->nlocations = 0; 7920 7921 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 7922 status = nfs4_call_sync_sequence(clnt, server, &msg, 7923 &args.seq_args, &res.seq_res); 7924 if (status) 7925 return status; 7926 7927 renew_lease(server, now); 7928 return 0; 7929 } 7930 7931 #ifdef CONFIG_NFS_V4_1 7932 7933 /* 7934 * This operation also signals the server that this client is 7935 * performing migration recovery. The server can stop asserting 7936 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 7937 * performing this operation is identified in the SEQUENCE 7938 * operation in this compound. 7939 * 7940 * When the client supports GETATTR(fs_locations_info), it can 7941 * be plumbed in here. 7942 */ 7943 static int _nfs41_proc_get_locations(struct inode *inode, 7944 struct nfs4_fs_locations *locations, 7945 struct page *page, const struct cred *cred) 7946 { 7947 struct nfs_server *server = NFS_SERVER(inode); 7948 struct rpc_clnt *clnt = server->client; 7949 u32 bitmask[2] = { 7950 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 7951 }; 7952 struct nfs4_fs_locations_arg args = { 7953 .fh = NFS_FH(inode), 7954 .page = page, 7955 .bitmask = bitmask, 7956 .migration = 1, /* skip LOOKUP */ 7957 }; 7958 struct nfs4_fs_locations_res res = { 7959 .fs_locations = locations, 7960 .migration = 1, 7961 }; 7962 struct rpc_message msg = { 7963 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 7964 .rpc_argp = &args, 7965 .rpc_resp = &res, 7966 .rpc_cred = cred, 7967 }; 7968 int status; 7969 7970 nfs_fattr_init(&locations->fattr); 7971 locations->server = server; 7972 locations->nlocations = 0; 7973 7974 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 7975 status = nfs4_call_sync_sequence(clnt, server, &msg, 7976 &args.seq_args, &res.seq_res); 7977 if (status == NFS4_OK && 7978 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 7979 status = -NFS4ERR_LEASE_MOVED; 7980 return status; 7981 } 7982 7983 #endif /* CONFIG_NFS_V4_1 */ 7984 7985 /** 7986 * nfs4_proc_get_locations - discover locations for a migrated FSID 7987 * @inode: inode on FSID that is migrating 7988 * @locations: result of query 7989 * @page: buffer 7990 * @cred: credential to use for this operation 7991 * 7992 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 7993 * operation failed, or a negative errno if a local error occurred. 7994 * 7995 * On success, "locations" is filled in, but if the server has 7996 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 7997 * asserted. 7998 * 7999 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 8000 * from this client that require migration recovery. 8001 */ 8002 int nfs4_proc_get_locations(struct inode *inode, 8003 struct nfs4_fs_locations *locations, 8004 struct page *page, const struct cred *cred) 8005 { 8006 struct nfs_server *server = NFS_SERVER(inode); 8007 struct nfs_client *clp = server->nfs_client; 8008 const struct nfs4_mig_recovery_ops *ops = 8009 clp->cl_mvops->mig_recovery_ops; 8010 struct nfs4_exception exception = { 8011 .interruptible = true, 8012 }; 8013 int status; 8014 8015 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8016 (unsigned long long)server->fsid.major, 8017 (unsigned long long)server->fsid.minor, 8018 clp->cl_hostname); 8019 nfs_display_fhandle(NFS_FH(inode), __func__); 8020 8021 do { 8022 status = ops->get_locations(inode, locations, page, cred); 8023 if (status != -NFS4ERR_DELAY) 8024 break; 8025 nfs4_handle_exception(server, status, &exception); 8026 } while (exception.retry); 8027 return status; 8028 } 8029 8030 /* 8031 * This operation also signals the server that this client is 8032 * performing "lease moved" recovery. The server can stop 8033 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 8034 * is appended to this compound to identify the client ID which is 8035 * performing recovery. 8036 */ 8037 static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred) 8038 { 8039 struct nfs_server *server = NFS_SERVER(inode); 8040 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 8041 struct rpc_clnt *clnt = server->client; 8042 struct nfs4_fsid_present_arg args = { 8043 .fh = NFS_FH(inode), 8044 .clientid = clp->cl_clientid, 8045 .renew = 1, /* append RENEW */ 8046 }; 8047 struct nfs4_fsid_present_res res = { 8048 .renew = 1, 8049 }; 8050 struct rpc_message msg = { 8051 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8052 .rpc_argp = &args, 8053 .rpc_resp = &res, 8054 .rpc_cred = cred, 8055 }; 8056 unsigned long now = jiffies; 8057 int status; 8058 8059 res.fh = nfs_alloc_fhandle(); 8060 if (res.fh == NULL) 8061 return -ENOMEM; 8062 8063 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8064 status = nfs4_call_sync_sequence(clnt, server, &msg, 8065 &args.seq_args, &res.seq_res); 8066 nfs_free_fhandle(res.fh); 8067 if (status) 8068 return status; 8069 8070 do_renew_lease(clp, now); 8071 return 0; 8072 } 8073 8074 #ifdef CONFIG_NFS_V4_1 8075 8076 /* 8077 * This operation also signals the server that this client is 8078 * performing "lease moved" recovery. The server can stop asserting 8079 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 8080 * this operation is identified in the SEQUENCE operation in this 8081 * compound. 8082 */ 8083 static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred) 8084 { 8085 struct nfs_server *server = NFS_SERVER(inode); 8086 struct rpc_clnt *clnt = server->client; 8087 struct nfs4_fsid_present_arg args = { 8088 .fh = NFS_FH(inode), 8089 }; 8090 struct nfs4_fsid_present_res res = { 8091 }; 8092 struct rpc_message msg = { 8093 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 8094 .rpc_argp = &args, 8095 .rpc_resp = &res, 8096 .rpc_cred = cred, 8097 }; 8098 int status; 8099 8100 res.fh = nfs_alloc_fhandle(); 8101 if (res.fh == NULL) 8102 return -ENOMEM; 8103 8104 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 8105 status = nfs4_call_sync_sequence(clnt, server, &msg, 8106 &args.seq_args, &res.seq_res); 8107 nfs_free_fhandle(res.fh); 8108 if (status == NFS4_OK && 8109 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 8110 status = -NFS4ERR_LEASE_MOVED; 8111 return status; 8112 } 8113 8114 #endif /* CONFIG_NFS_V4_1 */ 8115 8116 /** 8117 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 8118 * @inode: inode on FSID to check 8119 * @cred: credential to use for this operation 8120 * 8121 * Server indicates whether the FSID is present, moved, or not 8122 * recognized. This operation is necessary to clear a LEASE_MOVED 8123 * condition for this client ID. 8124 * 8125 * Returns NFS4_OK if the FSID is present on this server, 8126 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 8127 * NFS4ERR code if some error occurred on the server, or a 8128 * negative errno if a local failure occurred. 8129 */ 8130 int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred) 8131 { 8132 struct nfs_server *server = NFS_SERVER(inode); 8133 struct nfs_client *clp = server->nfs_client; 8134 const struct nfs4_mig_recovery_ops *ops = 8135 clp->cl_mvops->mig_recovery_ops; 8136 struct nfs4_exception exception = { 8137 .interruptible = true, 8138 }; 8139 int status; 8140 8141 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 8142 (unsigned long long)server->fsid.major, 8143 (unsigned long long)server->fsid.minor, 8144 clp->cl_hostname); 8145 nfs_display_fhandle(NFS_FH(inode), __func__); 8146 8147 do { 8148 status = ops->fsid_present(inode, cred); 8149 if (status != -NFS4ERR_DELAY) 8150 break; 8151 nfs4_handle_exception(server, status, &exception); 8152 } while (exception.retry); 8153 return status; 8154 } 8155 8156 /* 8157 * If 'use_integrity' is true and the state managment nfs_client 8158 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 8159 * and the machine credential as per RFC3530bis and RFC5661 Security 8160 * Considerations sections. Otherwise, just use the user cred with the 8161 * filesystem's rpc_client. 8162 */ 8163 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8164 { 8165 int status; 8166 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 8167 struct nfs_client *clp = NFS_SERVER(dir)->nfs_client; 8168 struct nfs4_secinfo_arg args = { 8169 .dir_fh = NFS_FH(dir), 8170 .name = name, 8171 }; 8172 struct nfs4_secinfo_res res = { 8173 .flavors = flavors, 8174 }; 8175 struct rpc_message msg = { 8176 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 8177 .rpc_argp = &args, 8178 .rpc_resp = &res, 8179 }; 8180 struct nfs4_call_sync_data data = { 8181 .seq_server = NFS_SERVER(dir), 8182 .seq_args = &args.seq_args, 8183 .seq_res = &res.seq_res, 8184 }; 8185 struct rpc_task_setup task_setup = { 8186 .rpc_client = clnt, 8187 .rpc_message = &msg, 8188 .callback_ops = clp->cl_mvops->call_sync_ops, 8189 .callback_data = &data, 8190 .flags = RPC_TASK_NO_ROUND_ROBIN, 8191 }; 8192 const struct cred *cred = NULL; 8193 8194 if (use_integrity) { 8195 clnt = clp->cl_rpcclient; 8196 task_setup.rpc_client = clnt; 8197 8198 cred = nfs4_get_clid_cred(clp); 8199 msg.rpc_cred = cred; 8200 } 8201 8202 dprintk("NFS call secinfo %s\n", name->name); 8203 8204 nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 8205 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 8206 status = nfs4_call_sync_custom(&task_setup); 8207 8208 dprintk("NFS reply secinfo: %d\n", status); 8209 8210 put_cred(cred); 8211 return status; 8212 } 8213 8214 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 8215 struct nfs4_secinfo_flavors *flavors) 8216 { 8217 struct nfs4_exception exception = { 8218 .interruptible = true, 8219 }; 8220 int err; 8221 do { 8222 err = -NFS4ERR_WRONGSEC; 8223 8224 /* try to use integrity protection with machine cred */ 8225 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 8226 err = _nfs4_proc_secinfo(dir, name, flavors, true); 8227 8228 /* 8229 * if unable to use integrity protection, or SECINFO with 8230 * integrity protection returns NFS4ERR_WRONGSEC (which is 8231 * disallowed by spec, but exists in deployed servers) use 8232 * the current filesystem's rpc_client and the user cred. 8233 */ 8234 if (err == -NFS4ERR_WRONGSEC) 8235 err = _nfs4_proc_secinfo(dir, name, flavors, false); 8236 8237 trace_nfs4_secinfo(dir, name, err); 8238 err = nfs4_handle_exception(NFS_SERVER(dir), err, 8239 &exception); 8240 } while (exception.retry); 8241 return err; 8242 } 8243 8244 #ifdef CONFIG_NFS_V4_1 8245 /* 8246 * Check the exchange flags returned by the server for invalid flags, having 8247 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 8248 * DS flags set. 8249 */ 8250 static int nfs4_check_cl_exchange_flags(u32 flags, u32 version) 8251 { 8252 if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R)) 8253 goto out_inval; 8254 else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R)) 8255 goto out_inval; 8256 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 8257 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 8258 goto out_inval; 8259 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 8260 goto out_inval; 8261 return NFS_OK; 8262 out_inval: 8263 return -NFS4ERR_INVAL; 8264 } 8265 8266 static bool 8267 nfs41_same_server_scope(struct nfs41_server_scope *a, 8268 struct nfs41_server_scope *b) 8269 { 8270 if (a->server_scope_sz != b->server_scope_sz) 8271 return false; 8272 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0; 8273 } 8274 8275 static void 8276 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) 8277 { 8278 struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp; 8279 struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp; 8280 struct nfs_client *clp = args->client; 8281 8282 switch (task->tk_status) { 8283 case -NFS4ERR_BADSESSION: 8284 case -NFS4ERR_DEADSESSION: 8285 nfs4_schedule_session_recovery(clp->cl_session, 8286 task->tk_status); 8287 } 8288 if (args->dir == NFS4_CDFC4_FORE_OR_BOTH && 8289 res->dir != NFS4_CDFS4_BOTH) { 8290 rpc_task_close_connection(task); 8291 if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES) 8292 rpc_restart_call(task); 8293 } 8294 } 8295 8296 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { 8297 .rpc_call_done = nfs4_bind_one_conn_to_session_done, 8298 }; 8299 8300 /* 8301 * nfs4_proc_bind_one_conn_to_session() 8302 * 8303 * The 4.1 client currently uses the same TCP connection for the 8304 * fore and backchannel. 8305 */ 8306 static 8307 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, 8308 struct rpc_xprt *xprt, 8309 struct nfs_client *clp, 8310 const struct cred *cred) 8311 { 8312 int status; 8313 struct nfs41_bind_conn_to_session_args args = { 8314 .client = clp, 8315 .dir = NFS4_CDFC4_FORE_OR_BOTH, 8316 .retries = 0, 8317 }; 8318 struct nfs41_bind_conn_to_session_res res; 8319 struct rpc_message msg = { 8320 .rpc_proc = 8321 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 8322 .rpc_argp = &args, 8323 .rpc_resp = &res, 8324 .rpc_cred = cred, 8325 }; 8326 struct rpc_task_setup task_setup_data = { 8327 .rpc_client = clnt, 8328 .rpc_xprt = xprt, 8329 .callback_ops = &nfs4_bind_one_conn_to_session_ops, 8330 .rpc_message = &msg, 8331 .flags = RPC_TASK_TIMEOUT, 8332 }; 8333 struct rpc_task *task; 8334 8335 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 8336 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 8337 args.dir = NFS4_CDFC4_FORE; 8338 8339 /* Do not set the backchannel flag unless this is clnt->cl_xprt */ 8340 if (xprt != rcu_access_pointer(clnt->cl_xprt)) 8341 args.dir = NFS4_CDFC4_FORE; 8342 8343 task = rpc_run_task(&task_setup_data); 8344 if (!IS_ERR(task)) { 8345 status = task->tk_status; 8346 rpc_put_task(task); 8347 } else 8348 status = PTR_ERR(task); 8349 trace_nfs4_bind_conn_to_session(clp, status); 8350 if (status == 0) { 8351 if (memcmp(res.sessionid.data, 8352 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 8353 dprintk("NFS: %s: Session ID mismatch\n", __func__); 8354 return -EIO; 8355 } 8356 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 8357 dprintk("NFS: %s: Unexpected direction from server\n", 8358 __func__); 8359 return -EIO; 8360 } 8361 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 8362 dprintk("NFS: %s: Server returned RDMA mode = true\n", 8363 __func__); 8364 return -EIO; 8365 } 8366 } 8367 8368 return status; 8369 } 8370 8371 struct rpc_bind_conn_calldata { 8372 struct nfs_client *clp; 8373 const struct cred *cred; 8374 }; 8375 8376 static int 8377 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt, 8378 struct rpc_xprt *xprt, 8379 void *calldata) 8380 { 8381 struct rpc_bind_conn_calldata *p = calldata; 8382 8383 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred); 8384 } 8385 8386 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred) 8387 { 8388 struct rpc_bind_conn_calldata data = { 8389 .clp = clp, 8390 .cred = cred, 8391 }; 8392 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient, 8393 nfs4_proc_bind_conn_to_session_callback, &data); 8394 } 8395 8396 /* 8397 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 8398 * and operations we'd like to see to enable certain features in the allow map 8399 */ 8400 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 8401 .how = SP4_MACH_CRED, 8402 .enforce.u.words = { 8403 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8404 1 << (OP_EXCHANGE_ID - 32) | 8405 1 << (OP_CREATE_SESSION - 32) | 8406 1 << (OP_DESTROY_SESSION - 32) | 8407 1 << (OP_DESTROY_CLIENTID - 32) 8408 }, 8409 .allow.u.words = { 8410 [0] = 1 << (OP_CLOSE) | 8411 1 << (OP_OPEN_DOWNGRADE) | 8412 1 << (OP_LOCKU) | 8413 1 << (OP_DELEGRETURN) | 8414 1 << (OP_COMMIT), 8415 [1] = 1 << (OP_SECINFO - 32) | 8416 1 << (OP_SECINFO_NO_NAME - 32) | 8417 1 << (OP_LAYOUTRETURN - 32) | 8418 1 << (OP_TEST_STATEID - 32) | 8419 1 << (OP_FREE_STATEID - 32) | 8420 1 << (OP_WRITE - 32) 8421 } 8422 }; 8423 8424 /* 8425 * Select the state protection mode for client `clp' given the server results 8426 * from exchange_id in `sp'. 8427 * 8428 * Returns 0 on success, negative errno otherwise. 8429 */ 8430 static int nfs4_sp4_select_mode(struct nfs_client *clp, 8431 struct nfs41_state_protection *sp) 8432 { 8433 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 8434 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 8435 1 << (OP_EXCHANGE_ID - 32) | 8436 1 << (OP_CREATE_SESSION - 32) | 8437 1 << (OP_DESTROY_SESSION - 32) | 8438 1 << (OP_DESTROY_CLIENTID - 32) 8439 }; 8440 unsigned long flags = 0; 8441 unsigned int i; 8442 int ret = 0; 8443 8444 if (sp->how == SP4_MACH_CRED) { 8445 /* Print state protect result */ 8446 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 8447 for (i = 0; i <= LAST_NFS4_OP; i++) { 8448 if (test_bit(i, sp->enforce.u.longs)) 8449 dfprintk(MOUNT, " enforce op %d\n", i); 8450 if (test_bit(i, sp->allow.u.longs)) 8451 dfprintk(MOUNT, " allow op %d\n", i); 8452 } 8453 8454 /* make sure nothing is on enforce list that isn't supported */ 8455 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 8456 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 8457 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8458 ret = -EINVAL; 8459 goto out; 8460 } 8461 } 8462 8463 /* 8464 * Minimal mode - state operations are allowed to use machine 8465 * credential. Note this already happens by default, so the 8466 * client doesn't have to do anything more than the negotiation. 8467 * 8468 * NOTE: we don't care if EXCHANGE_ID is in the list - 8469 * we're already using the machine cred for exchange_id 8470 * and will never use a different cred. 8471 */ 8472 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 8473 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 8474 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 8475 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 8476 dfprintk(MOUNT, "sp4_mach_cred:\n"); 8477 dfprintk(MOUNT, " minimal mode enabled\n"); 8478 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags); 8479 } else { 8480 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 8481 ret = -EINVAL; 8482 goto out; 8483 } 8484 8485 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 8486 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) && 8487 test_bit(OP_DELEGRETURN, sp->allow.u.longs) && 8488 test_bit(OP_LOCKU, sp->allow.u.longs)) { 8489 dfprintk(MOUNT, " cleanup mode enabled\n"); 8490 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags); 8491 } 8492 8493 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) { 8494 dfprintk(MOUNT, " pnfs cleanup mode enabled\n"); 8495 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags); 8496 } 8497 8498 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 8499 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 8500 dfprintk(MOUNT, " secinfo mode enabled\n"); 8501 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags); 8502 } 8503 8504 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 8505 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 8506 dfprintk(MOUNT, " stateid mode enabled\n"); 8507 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags); 8508 } 8509 8510 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 8511 dfprintk(MOUNT, " write mode enabled\n"); 8512 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags); 8513 } 8514 8515 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 8516 dfprintk(MOUNT, " commit mode enabled\n"); 8517 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags); 8518 } 8519 } 8520 out: 8521 clp->cl_sp4_flags = flags; 8522 return ret; 8523 } 8524 8525 struct nfs41_exchange_id_data { 8526 struct nfs41_exchange_id_res res; 8527 struct nfs41_exchange_id_args args; 8528 }; 8529 8530 static void nfs4_exchange_id_release(void *data) 8531 { 8532 struct nfs41_exchange_id_data *cdata = 8533 (struct nfs41_exchange_id_data *)data; 8534 8535 nfs_put_client(cdata->args.client); 8536 kfree(cdata->res.impl_id); 8537 kfree(cdata->res.server_scope); 8538 kfree(cdata->res.server_owner); 8539 kfree(cdata); 8540 } 8541 8542 static const struct rpc_call_ops nfs4_exchange_id_call_ops = { 8543 .rpc_release = nfs4_exchange_id_release, 8544 }; 8545 8546 /* 8547 * _nfs4_proc_exchange_id() 8548 * 8549 * Wrapper for EXCHANGE_ID operation. 8550 */ 8551 static struct rpc_task * 8552 nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred, 8553 u32 sp4_how, struct rpc_xprt *xprt) 8554 { 8555 struct rpc_message msg = { 8556 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 8557 .rpc_cred = cred, 8558 }; 8559 struct rpc_task_setup task_setup_data = { 8560 .rpc_client = clp->cl_rpcclient, 8561 .callback_ops = &nfs4_exchange_id_call_ops, 8562 .rpc_message = &msg, 8563 .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN, 8564 }; 8565 struct nfs41_exchange_id_data *calldata; 8566 int status; 8567 8568 if (!refcount_inc_not_zero(&clp->cl_count)) 8569 return ERR_PTR(-EIO); 8570 8571 status = -ENOMEM; 8572 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 8573 if (!calldata) 8574 goto out; 8575 8576 nfs4_init_boot_verifier(clp, &calldata->args.verifier); 8577 8578 status = nfs4_init_uniform_client_string(clp); 8579 if (status) 8580 goto out_calldata; 8581 8582 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 8583 GFP_NOFS); 8584 status = -ENOMEM; 8585 if (unlikely(calldata->res.server_owner == NULL)) 8586 goto out_calldata; 8587 8588 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 8589 GFP_NOFS); 8590 if (unlikely(calldata->res.server_scope == NULL)) 8591 goto out_server_owner; 8592 8593 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 8594 if (unlikely(calldata->res.impl_id == NULL)) 8595 goto out_server_scope; 8596 8597 switch (sp4_how) { 8598 case SP4_NONE: 8599 calldata->args.state_protect.how = SP4_NONE; 8600 break; 8601 8602 case SP4_MACH_CRED: 8603 calldata->args.state_protect = nfs4_sp4_mach_cred_request; 8604 break; 8605 8606 default: 8607 /* unsupported! */ 8608 WARN_ON_ONCE(1); 8609 status = -EINVAL; 8610 goto out_impl_id; 8611 } 8612 if (xprt) { 8613 task_setup_data.rpc_xprt = xprt; 8614 task_setup_data.flags |= RPC_TASK_SOFTCONN; 8615 memcpy(calldata->args.verifier.data, clp->cl_confirm.data, 8616 sizeof(calldata->args.verifier.data)); 8617 } 8618 calldata->args.client = clp; 8619 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 8620 EXCHGID4_FLAG_BIND_PRINC_STATEID; 8621 #ifdef CONFIG_NFS_V4_1_MIGRATION 8622 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR; 8623 #endif 8624 msg.rpc_argp = &calldata->args; 8625 msg.rpc_resp = &calldata->res; 8626 task_setup_data.callback_data = calldata; 8627 8628 return rpc_run_task(&task_setup_data); 8629 8630 out_impl_id: 8631 kfree(calldata->res.impl_id); 8632 out_server_scope: 8633 kfree(calldata->res.server_scope); 8634 out_server_owner: 8635 kfree(calldata->res.server_owner); 8636 out_calldata: 8637 kfree(calldata); 8638 out: 8639 nfs_put_client(clp); 8640 return ERR_PTR(status); 8641 } 8642 8643 /* 8644 * _nfs4_proc_exchange_id() 8645 * 8646 * Wrapper for EXCHANGE_ID operation. 8647 */ 8648 static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred, 8649 u32 sp4_how) 8650 { 8651 struct rpc_task *task; 8652 struct nfs41_exchange_id_args *argp; 8653 struct nfs41_exchange_id_res *resp; 8654 unsigned long now = jiffies; 8655 int status; 8656 8657 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL); 8658 if (IS_ERR(task)) 8659 return PTR_ERR(task); 8660 8661 argp = task->tk_msg.rpc_argp; 8662 resp = task->tk_msg.rpc_resp; 8663 status = task->tk_status; 8664 if (status != 0) 8665 goto out; 8666 8667 status = nfs4_check_cl_exchange_flags(resp->flags, 8668 clp->cl_mvops->minor_version); 8669 if (status != 0) 8670 goto out; 8671 8672 status = nfs4_sp4_select_mode(clp, &resp->state_protect); 8673 if (status != 0) 8674 goto out; 8675 8676 do_renew_lease(clp, now); 8677 8678 clp->cl_clientid = resp->clientid; 8679 clp->cl_exchange_flags = resp->flags; 8680 clp->cl_seqid = resp->seqid; 8681 /* Client ID is not confirmed */ 8682 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R)) 8683 clear_bit(NFS4_SESSION_ESTABLISHED, 8684 &clp->cl_session->session_state); 8685 8686 if (clp->cl_serverscope != NULL && 8687 !nfs41_same_server_scope(clp->cl_serverscope, 8688 resp->server_scope)) { 8689 dprintk("%s: server_scope mismatch detected\n", 8690 __func__); 8691 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 8692 } 8693 8694 swap(clp->cl_serverowner, resp->server_owner); 8695 swap(clp->cl_serverscope, resp->server_scope); 8696 swap(clp->cl_implid, resp->impl_id); 8697 8698 /* Save the EXCHANGE_ID verifier session trunk tests */ 8699 memcpy(clp->cl_confirm.data, argp->verifier.data, 8700 sizeof(clp->cl_confirm.data)); 8701 out: 8702 trace_nfs4_exchange_id(clp, status); 8703 rpc_put_task(task); 8704 return status; 8705 } 8706 8707 /* 8708 * nfs4_proc_exchange_id() 8709 * 8710 * Returns zero, a negative errno, or a negative NFS4ERR status code. 8711 * 8712 * Since the clientid has expired, all compounds using sessions 8713 * associated with the stale clientid will be returning 8714 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 8715 * be in some phase of session reset. 8716 * 8717 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 8718 */ 8719 int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred) 8720 { 8721 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 8722 int status; 8723 8724 /* try SP4_MACH_CRED if krb5i/p */ 8725 if (authflavor == RPC_AUTH_GSS_KRB5I || 8726 authflavor == RPC_AUTH_GSS_KRB5P) { 8727 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 8728 if (!status) 8729 return 0; 8730 } 8731 8732 /* try SP4_NONE */ 8733 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 8734 } 8735 8736 /** 8737 * nfs4_test_session_trunk 8738 * 8739 * This is an add_xprt_test() test function called from 8740 * rpc_clnt_setup_test_and_add_xprt. 8741 * 8742 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt 8743 * and is dereferrenced in nfs4_exchange_id_release 8744 * 8745 * Upon success, add the new transport to the rpc_clnt 8746 * 8747 * @clnt: struct rpc_clnt to get new transport 8748 * @xprt: the rpc_xprt to test 8749 * @data: call data for _nfs4_proc_exchange_id. 8750 */ 8751 void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt, 8752 void *data) 8753 { 8754 struct nfs4_add_xprt_data *adata = (struct nfs4_add_xprt_data *)data; 8755 struct rpc_task *task; 8756 int status; 8757 8758 u32 sp4_how; 8759 8760 dprintk("--> %s try %s\n", __func__, 8761 xprt->address_strings[RPC_DISPLAY_ADDR]); 8762 8763 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED); 8764 8765 /* Test connection for session trunking. Async exchange_id call */ 8766 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt); 8767 if (IS_ERR(task)) 8768 return; 8769 8770 status = task->tk_status; 8771 if (status == 0) 8772 status = nfs4_detect_session_trunking(adata->clp, 8773 task->tk_msg.rpc_resp, xprt); 8774 8775 if (status == 0) 8776 rpc_clnt_xprt_switch_add_xprt(clnt, xprt); 8777 8778 rpc_put_task(task); 8779 } 8780 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk); 8781 8782 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 8783 const struct cred *cred) 8784 { 8785 struct rpc_message msg = { 8786 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 8787 .rpc_argp = clp, 8788 .rpc_cred = cred, 8789 }; 8790 int status; 8791 8792 status = rpc_call_sync(clp->cl_rpcclient, &msg, 8793 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 8794 trace_nfs4_destroy_clientid(clp, status); 8795 if (status) 8796 dprintk("NFS: Got error %d from the server %s on " 8797 "DESTROY_CLIENTID.", status, clp->cl_hostname); 8798 return status; 8799 } 8800 8801 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 8802 const struct cred *cred) 8803 { 8804 unsigned int loop; 8805 int ret; 8806 8807 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 8808 ret = _nfs4_proc_destroy_clientid(clp, cred); 8809 switch (ret) { 8810 case -NFS4ERR_DELAY: 8811 case -NFS4ERR_CLIENTID_BUSY: 8812 ssleep(1); 8813 break; 8814 default: 8815 return ret; 8816 } 8817 } 8818 return 0; 8819 } 8820 8821 int nfs4_destroy_clientid(struct nfs_client *clp) 8822 { 8823 const struct cred *cred; 8824 int ret = 0; 8825 8826 if (clp->cl_mvops->minor_version < 1) 8827 goto out; 8828 if (clp->cl_exchange_flags == 0) 8829 goto out; 8830 if (clp->cl_preserve_clid) 8831 goto out; 8832 cred = nfs4_get_clid_cred(clp); 8833 ret = nfs4_proc_destroy_clientid(clp, cred); 8834 put_cred(cred); 8835 switch (ret) { 8836 case 0: 8837 case -NFS4ERR_STALE_CLIENTID: 8838 clp->cl_exchange_flags = 0; 8839 } 8840 out: 8841 return ret; 8842 } 8843 8844 #endif /* CONFIG_NFS_V4_1 */ 8845 8846 struct nfs4_get_lease_time_data { 8847 struct nfs4_get_lease_time_args *args; 8848 struct nfs4_get_lease_time_res *res; 8849 struct nfs_client *clp; 8850 }; 8851 8852 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 8853 void *calldata) 8854 { 8855 struct nfs4_get_lease_time_data *data = 8856 (struct nfs4_get_lease_time_data *)calldata; 8857 8858 dprintk("--> %s\n", __func__); 8859 /* just setup sequence, do not trigger session recovery 8860 since we're invoked within one */ 8861 nfs4_setup_sequence(data->clp, 8862 &data->args->la_seq_args, 8863 &data->res->lr_seq_res, 8864 task); 8865 dprintk("<-- %s\n", __func__); 8866 } 8867 8868 /* 8869 * Called from nfs4_state_manager thread for session setup, so don't recover 8870 * from sequence operation or clientid errors. 8871 */ 8872 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 8873 { 8874 struct nfs4_get_lease_time_data *data = 8875 (struct nfs4_get_lease_time_data *)calldata; 8876 8877 dprintk("--> %s\n", __func__); 8878 if (!nfs4_sequence_done(task, &data->res->lr_seq_res)) 8879 return; 8880 switch (task->tk_status) { 8881 case -NFS4ERR_DELAY: 8882 case -NFS4ERR_GRACE: 8883 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 8884 rpc_delay(task, NFS4_POLL_RETRY_MIN); 8885 task->tk_status = 0; 8886 fallthrough; 8887 case -NFS4ERR_RETRY_UNCACHED_REP: 8888 rpc_restart_call_prepare(task); 8889 return; 8890 } 8891 dprintk("<-- %s\n", __func__); 8892 } 8893 8894 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 8895 .rpc_call_prepare = nfs4_get_lease_time_prepare, 8896 .rpc_call_done = nfs4_get_lease_time_done, 8897 }; 8898 8899 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 8900 { 8901 struct nfs4_get_lease_time_args args; 8902 struct nfs4_get_lease_time_res res = { 8903 .lr_fsinfo = fsinfo, 8904 }; 8905 struct nfs4_get_lease_time_data data = { 8906 .args = &args, 8907 .res = &res, 8908 .clp = clp, 8909 }; 8910 struct rpc_message msg = { 8911 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 8912 .rpc_argp = &args, 8913 .rpc_resp = &res, 8914 }; 8915 struct rpc_task_setup task_setup = { 8916 .rpc_client = clp->cl_rpcclient, 8917 .rpc_message = &msg, 8918 .callback_ops = &nfs4_get_lease_time_ops, 8919 .callback_data = &data, 8920 .flags = RPC_TASK_TIMEOUT, 8921 }; 8922 8923 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1); 8924 return nfs4_call_sync_custom(&task_setup); 8925 } 8926 8927 #ifdef CONFIG_NFS_V4_1 8928 8929 /* 8930 * Initialize the values to be used by the client in CREATE_SESSION 8931 * If nfs4_init_session set the fore channel request and response sizes, 8932 * use them. 8933 * 8934 * Set the back channel max_resp_sz_cached to zero to force the client to 8935 * always set csa_cachethis to FALSE because the current implementation 8936 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 8937 */ 8938 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args, 8939 struct rpc_clnt *clnt) 8940 { 8941 unsigned int max_rqst_sz, max_resp_sz; 8942 unsigned int max_bc_payload = rpc_max_bc_payload(clnt); 8943 unsigned int max_bc_slots = rpc_num_bc_slots(clnt); 8944 8945 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 8946 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 8947 8948 /* Fore channel attributes */ 8949 args->fc_attrs.max_rqst_sz = max_rqst_sz; 8950 args->fc_attrs.max_resp_sz = max_resp_sz; 8951 args->fc_attrs.max_ops = NFS4_MAX_OPS; 8952 args->fc_attrs.max_reqs = max_session_slots; 8953 8954 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 8955 "max_ops=%u max_reqs=%u\n", 8956 __func__, 8957 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 8958 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 8959 8960 /* Back channel attributes */ 8961 args->bc_attrs.max_rqst_sz = max_bc_payload; 8962 args->bc_attrs.max_resp_sz = max_bc_payload; 8963 args->bc_attrs.max_resp_sz_cached = 0; 8964 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 8965 args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1); 8966 if (args->bc_attrs.max_reqs > max_bc_slots) 8967 args->bc_attrs.max_reqs = max_bc_slots; 8968 8969 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 8970 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 8971 __func__, 8972 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 8973 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 8974 args->bc_attrs.max_reqs); 8975 } 8976 8977 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 8978 struct nfs41_create_session_res *res) 8979 { 8980 struct nfs4_channel_attrs *sent = &args->fc_attrs; 8981 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 8982 8983 if (rcvd->max_resp_sz > sent->max_resp_sz) 8984 return -EINVAL; 8985 /* 8986 * Our requested max_ops is the minimum we need; we're not 8987 * prepared to break up compounds into smaller pieces than that. 8988 * So, no point even trying to continue if the server won't 8989 * cooperate: 8990 */ 8991 if (rcvd->max_ops < sent->max_ops) 8992 return -EINVAL; 8993 if (rcvd->max_reqs == 0) 8994 return -EINVAL; 8995 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 8996 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 8997 return 0; 8998 } 8999 9000 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 9001 struct nfs41_create_session_res *res) 9002 { 9003 struct nfs4_channel_attrs *sent = &args->bc_attrs; 9004 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 9005 9006 if (!(res->flags & SESSION4_BACK_CHAN)) 9007 goto out; 9008 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 9009 return -EINVAL; 9010 if (rcvd->max_resp_sz < sent->max_resp_sz) 9011 return -EINVAL; 9012 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 9013 return -EINVAL; 9014 if (rcvd->max_ops > sent->max_ops) 9015 return -EINVAL; 9016 if (rcvd->max_reqs > sent->max_reqs) 9017 return -EINVAL; 9018 out: 9019 return 0; 9020 } 9021 9022 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 9023 struct nfs41_create_session_res *res) 9024 { 9025 int ret; 9026 9027 ret = nfs4_verify_fore_channel_attrs(args, res); 9028 if (ret) 9029 return ret; 9030 return nfs4_verify_back_channel_attrs(args, res); 9031 } 9032 9033 static void nfs4_update_session(struct nfs4_session *session, 9034 struct nfs41_create_session_res *res) 9035 { 9036 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 9037 /* Mark client id and session as being confirmed */ 9038 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 9039 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 9040 session->flags = res->flags; 9041 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 9042 if (res->flags & SESSION4_BACK_CHAN) 9043 memcpy(&session->bc_attrs, &res->bc_attrs, 9044 sizeof(session->bc_attrs)); 9045 } 9046 9047 static int _nfs4_proc_create_session(struct nfs_client *clp, 9048 const struct cred *cred) 9049 { 9050 struct nfs4_session *session = clp->cl_session; 9051 struct nfs41_create_session_args args = { 9052 .client = clp, 9053 .clientid = clp->cl_clientid, 9054 .seqid = clp->cl_seqid, 9055 .cb_program = NFS4_CALLBACK, 9056 }; 9057 struct nfs41_create_session_res res; 9058 9059 struct rpc_message msg = { 9060 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 9061 .rpc_argp = &args, 9062 .rpc_resp = &res, 9063 .rpc_cred = cred, 9064 }; 9065 int status; 9066 9067 nfs4_init_channel_attrs(&args, clp->cl_rpcclient); 9068 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 9069 9070 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9071 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9072 trace_nfs4_create_session(clp, status); 9073 9074 switch (status) { 9075 case -NFS4ERR_STALE_CLIENTID: 9076 case -NFS4ERR_DELAY: 9077 case -ETIMEDOUT: 9078 case -EACCES: 9079 case -EAGAIN: 9080 goto out; 9081 } 9082 9083 clp->cl_seqid++; 9084 if (!status) { 9085 /* Verify the session's negotiated channel_attrs values */ 9086 status = nfs4_verify_channel_attrs(&args, &res); 9087 /* Increment the clientid slot sequence id */ 9088 if (status) 9089 goto out; 9090 nfs4_update_session(session, &res); 9091 } 9092 out: 9093 return status; 9094 } 9095 9096 /* 9097 * Issues a CREATE_SESSION operation to the server. 9098 * It is the responsibility of the caller to verify the session is 9099 * expired before calling this routine. 9100 */ 9101 int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred) 9102 { 9103 int status; 9104 unsigned *ptr; 9105 struct nfs4_session *session = clp->cl_session; 9106 9107 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 9108 9109 status = _nfs4_proc_create_session(clp, cred); 9110 if (status) 9111 goto out; 9112 9113 /* Init or reset the session slot tables */ 9114 status = nfs4_setup_session_slot_tables(session); 9115 dprintk("slot table setup returned %d\n", status); 9116 if (status) 9117 goto out; 9118 9119 ptr = (unsigned *)&session->sess_id.data[0]; 9120 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 9121 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 9122 out: 9123 dprintk("<-- %s\n", __func__); 9124 return status; 9125 } 9126 9127 /* 9128 * Issue the over-the-wire RPC DESTROY_SESSION. 9129 * The caller must serialize access to this routine. 9130 */ 9131 int nfs4_proc_destroy_session(struct nfs4_session *session, 9132 const struct cred *cred) 9133 { 9134 struct rpc_message msg = { 9135 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 9136 .rpc_argp = session, 9137 .rpc_cred = cred, 9138 }; 9139 int status = 0; 9140 9141 dprintk("--> nfs4_proc_destroy_session\n"); 9142 9143 /* session is still being setup */ 9144 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 9145 return 0; 9146 9147 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 9148 RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN); 9149 trace_nfs4_destroy_session(session->clp, status); 9150 9151 if (status) 9152 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 9153 "Session has been destroyed regardless...\n", status); 9154 9155 dprintk("<-- nfs4_proc_destroy_session\n"); 9156 return status; 9157 } 9158 9159 /* 9160 * Renew the cl_session lease. 9161 */ 9162 struct nfs4_sequence_data { 9163 struct nfs_client *clp; 9164 struct nfs4_sequence_args args; 9165 struct nfs4_sequence_res res; 9166 }; 9167 9168 static void nfs41_sequence_release(void *data) 9169 { 9170 struct nfs4_sequence_data *calldata = data; 9171 struct nfs_client *clp = calldata->clp; 9172 9173 if (refcount_read(&clp->cl_count) > 1) 9174 nfs4_schedule_state_renewal(clp); 9175 nfs_put_client(clp); 9176 kfree(calldata); 9177 } 9178 9179 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9180 { 9181 switch(task->tk_status) { 9182 case -NFS4ERR_DELAY: 9183 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9184 return -EAGAIN; 9185 default: 9186 nfs4_schedule_lease_recovery(clp); 9187 } 9188 return 0; 9189 } 9190 9191 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 9192 { 9193 struct nfs4_sequence_data *calldata = data; 9194 struct nfs_client *clp = calldata->clp; 9195 9196 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 9197 return; 9198 9199 trace_nfs4_sequence(clp, task->tk_status); 9200 if (task->tk_status < 0) { 9201 dprintk("%s ERROR %d\n", __func__, task->tk_status); 9202 if (refcount_read(&clp->cl_count) == 1) 9203 goto out; 9204 9205 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 9206 rpc_restart_call_prepare(task); 9207 return; 9208 } 9209 } 9210 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 9211 out: 9212 dprintk("<-- %s\n", __func__); 9213 } 9214 9215 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 9216 { 9217 struct nfs4_sequence_data *calldata = data; 9218 struct nfs_client *clp = calldata->clp; 9219 struct nfs4_sequence_args *args; 9220 struct nfs4_sequence_res *res; 9221 9222 args = task->tk_msg.rpc_argp; 9223 res = task->tk_msg.rpc_resp; 9224 9225 nfs4_setup_sequence(clp, args, res, task); 9226 } 9227 9228 static const struct rpc_call_ops nfs41_sequence_ops = { 9229 .rpc_call_done = nfs41_sequence_call_done, 9230 .rpc_call_prepare = nfs41_sequence_prepare, 9231 .rpc_release = nfs41_sequence_release, 9232 }; 9233 9234 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 9235 const struct cred *cred, 9236 struct nfs4_slot *slot, 9237 bool is_privileged) 9238 { 9239 struct nfs4_sequence_data *calldata; 9240 struct rpc_message msg = { 9241 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 9242 .rpc_cred = cred, 9243 }; 9244 struct rpc_task_setup task_setup_data = { 9245 .rpc_client = clp->cl_rpcclient, 9246 .rpc_message = &msg, 9247 .callback_ops = &nfs41_sequence_ops, 9248 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE, 9249 }; 9250 struct rpc_task *ret; 9251 9252 ret = ERR_PTR(-EIO); 9253 if (!refcount_inc_not_zero(&clp->cl_count)) 9254 goto out_err; 9255 9256 ret = ERR_PTR(-ENOMEM); 9257 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9258 if (calldata == NULL) 9259 goto out_put_clp; 9260 nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged); 9261 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot); 9262 msg.rpc_argp = &calldata->args; 9263 msg.rpc_resp = &calldata->res; 9264 calldata->clp = clp; 9265 task_setup_data.callback_data = calldata; 9266 9267 ret = rpc_run_task(&task_setup_data); 9268 if (IS_ERR(ret)) 9269 goto out_err; 9270 return ret; 9271 out_put_clp: 9272 nfs_put_client(clp); 9273 out_err: 9274 nfs41_release_slot(slot); 9275 return ret; 9276 } 9277 9278 static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags) 9279 { 9280 struct rpc_task *task; 9281 int ret = 0; 9282 9283 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 9284 return -EAGAIN; 9285 task = _nfs41_proc_sequence(clp, cred, NULL, false); 9286 if (IS_ERR(task)) 9287 ret = PTR_ERR(task); 9288 else 9289 rpc_put_task_async(task); 9290 dprintk("<-- %s status=%d\n", __func__, ret); 9291 return ret; 9292 } 9293 9294 static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred) 9295 { 9296 struct rpc_task *task; 9297 int ret; 9298 9299 task = _nfs41_proc_sequence(clp, cred, NULL, true); 9300 if (IS_ERR(task)) { 9301 ret = PTR_ERR(task); 9302 goto out; 9303 } 9304 ret = rpc_wait_for_completion_task(task); 9305 if (!ret) 9306 ret = task->tk_status; 9307 rpc_put_task(task); 9308 out: 9309 dprintk("<-- %s status=%d\n", __func__, ret); 9310 return ret; 9311 } 9312 9313 struct nfs4_reclaim_complete_data { 9314 struct nfs_client *clp; 9315 struct nfs41_reclaim_complete_args arg; 9316 struct nfs41_reclaim_complete_res res; 9317 }; 9318 9319 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 9320 { 9321 struct nfs4_reclaim_complete_data *calldata = data; 9322 9323 nfs4_setup_sequence(calldata->clp, 9324 &calldata->arg.seq_args, 9325 &calldata->res.seq_res, 9326 task); 9327 } 9328 9329 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 9330 { 9331 switch(task->tk_status) { 9332 case 0: 9333 wake_up_all(&clp->cl_lock_waitq); 9334 fallthrough; 9335 case -NFS4ERR_COMPLETE_ALREADY: 9336 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 9337 break; 9338 case -NFS4ERR_DELAY: 9339 rpc_delay(task, NFS4_POLL_RETRY_MAX); 9340 fallthrough; 9341 case -NFS4ERR_RETRY_UNCACHED_REP: 9342 return -EAGAIN; 9343 case -NFS4ERR_BADSESSION: 9344 case -NFS4ERR_DEADSESSION: 9345 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 9346 break; 9347 default: 9348 nfs4_schedule_lease_recovery(clp); 9349 } 9350 return 0; 9351 } 9352 9353 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 9354 { 9355 struct nfs4_reclaim_complete_data *calldata = data; 9356 struct nfs_client *clp = calldata->clp; 9357 struct nfs4_sequence_res *res = &calldata->res.seq_res; 9358 9359 dprintk("--> %s\n", __func__); 9360 if (!nfs41_sequence_done(task, res)) 9361 return; 9362 9363 trace_nfs4_reclaim_complete(clp, task->tk_status); 9364 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 9365 rpc_restart_call_prepare(task); 9366 return; 9367 } 9368 dprintk("<-- %s\n", __func__); 9369 } 9370 9371 static void nfs4_free_reclaim_complete_data(void *data) 9372 { 9373 struct nfs4_reclaim_complete_data *calldata = data; 9374 9375 kfree(calldata); 9376 } 9377 9378 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 9379 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 9380 .rpc_call_done = nfs4_reclaim_complete_done, 9381 .rpc_release = nfs4_free_reclaim_complete_data, 9382 }; 9383 9384 /* 9385 * Issue a global reclaim complete. 9386 */ 9387 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 9388 const struct cred *cred) 9389 { 9390 struct nfs4_reclaim_complete_data *calldata; 9391 struct rpc_message msg = { 9392 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 9393 .rpc_cred = cred, 9394 }; 9395 struct rpc_task_setup task_setup_data = { 9396 .rpc_client = clp->cl_rpcclient, 9397 .rpc_message = &msg, 9398 .callback_ops = &nfs4_reclaim_complete_call_ops, 9399 .flags = RPC_TASK_NO_ROUND_ROBIN, 9400 }; 9401 int status = -ENOMEM; 9402 9403 dprintk("--> %s\n", __func__); 9404 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 9405 if (calldata == NULL) 9406 goto out; 9407 calldata->clp = clp; 9408 calldata->arg.one_fs = 0; 9409 9410 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1); 9411 msg.rpc_argp = &calldata->arg; 9412 msg.rpc_resp = &calldata->res; 9413 task_setup_data.callback_data = calldata; 9414 status = nfs4_call_sync_custom(&task_setup_data); 9415 out: 9416 dprintk("<-- %s status=%d\n", __func__, status); 9417 return status; 9418 } 9419 9420 static void 9421 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 9422 { 9423 struct nfs4_layoutget *lgp = calldata; 9424 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 9425 9426 dprintk("--> %s\n", __func__); 9427 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args, 9428 &lgp->res.seq_res, task); 9429 dprintk("<-- %s\n", __func__); 9430 } 9431 9432 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 9433 { 9434 struct nfs4_layoutget *lgp = calldata; 9435 9436 dprintk("--> %s\n", __func__); 9437 nfs41_sequence_process(task, &lgp->res.seq_res); 9438 dprintk("<-- %s\n", __func__); 9439 } 9440 9441 static int 9442 nfs4_layoutget_handle_exception(struct rpc_task *task, 9443 struct nfs4_layoutget *lgp, struct nfs4_exception *exception) 9444 { 9445 struct inode *inode = lgp->args.inode; 9446 struct nfs_server *server = NFS_SERVER(inode); 9447 struct pnfs_layout_hdr *lo = lgp->lo; 9448 int nfs4err = task->tk_status; 9449 int err, status = 0; 9450 LIST_HEAD(head); 9451 9452 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 9453 9454 nfs4_sequence_free_slot(&lgp->res.seq_res); 9455 9456 switch (nfs4err) { 9457 case 0: 9458 goto out; 9459 9460 /* 9461 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs 9462 * on the file. set tk_status to -ENODATA to tell upper layer to 9463 * retry go inband. 9464 */ 9465 case -NFS4ERR_LAYOUTUNAVAILABLE: 9466 status = -ENODATA; 9467 goto out; 9468 /* 9469 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 9470 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 9471 */ 9472 case -NFS4ERR_BADLAYOUT: 9473 status = -EOVERFLOW; 9474 goto out; 9475 /* 9476 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 9477 * (or clients) writing to the same RAID stripe except when 9478 * the minlength argument is 0 (see RFC5661 section 18.43.3). 9479 * 9480 * Treat it like we would RECALLCONFLICT -- we retry for a little 9481 * while, and then eventually give up. 9482 */ 9483 case -NFS4ERR_LAYOUTTRYLATER: 9484 if (lgp->args.minlength == 0) { 9485 status = -EOVERFLOW; 9486 goto out; 9487 } 9488 status = -EBUSY; 9489 break; 9490 case -NFS4ERR_RECALLCONFLICT: 9491 status = -ERECALLCONFLICT; 9492 break; 9493 case -NFS4ERR_DELEG_REVOKED: 9494 case -NFS4ERR_ADMIN_REVOKED: 9495 case -NFS4ERR_EXPIRED: 9496 case -NFS4ERR_BAD_STATEID: 9497 exception->timeout = 0; 9498 spin_lock(&inode->i_lock); 9499 /* If the open stateid was bad, then recover it. */ 9500 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || 9501 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) { 9502 spin_unlock(&inode->i_lock); 9503 exception->state = lgp->args.ctx->state; 9504 exception->stateid = &lgp->args.stateid; 9505 break; 9506 } 9507 9508 /* 9509 * Mark the bad layout state as invalid, then retry 9510 */ 9511 pnfs_mark_layout_stateid_invalid(lo, &head); 9512 spin_unlock(&inode->i_lock); 9513 nfs_commit_inode(inode, 0); 9514 pnfs_free_lseg_list(&head); 9515 status = -EAGAIN; 9516 goto out; 9517 } 9518 9519 err = nfs4_handle_exception(server, nfs4err, exception); 9520 if (!status) { 9521 if (exception->retry) 9522 status = -EAGAIN; 9523 else 9524 status = err; 9525 } 9526 out: 9527 dprintk("<-- %s\n", __func__); 9528 return status; 9529 } 9530 9531 size_t max_response_pages(struct nfs_server *server) 9532 { 9533 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 9534 return nfs_page_array_len(0, max_resp_sz); 9535 } 9536 9537 static void nfs4_layoutget_release(void *calldata) 9538 { 9539 struct nfs4_layoutget *lgp = calldata; 9540 9541 dprintk("--> %s\n", __func__); 9542 nfs4_sequence_free_slot(&lgp->res.seq_res); 9543 pnfs_layoutget_free(lgp); 9544 dprintk("<-- %s\n", __func__); 9545 } 9546 9547 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 9548 .rpc_call_prepare = nfs4_layoutget_prepare, 9549 .rpc_call_done = nfs4_layoutget_done, 9550 .rpc_release = nfs4_layoutget_release, 9551 }; 9552 9553 struct pnfs_layout_segment * 9554 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout) 9555 { 9556 struct inode *inode = lgp->args.inode; 9557 struct nfs_server *server = NFS_SERVER(inode); 9558 struct rpc_task *task; 9559 struct rpc_message msg = { 9560 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 9561 .rpc_argp = &lgp->args, 9562 .rpc_resp = &lgp->res, 9563 .rpc_cred = lgp->cred, 9564 }; 9565 struct rpc_task_setup task_setup_data = { 9566 .rpc_client = server->client, 9567 .rpc_message = &msg, 9568 .callback_ops = &nfs4_layoutget_call_ops, 9569 .callback_data = lgp, 9570 .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF | 9571 RPC_TASK_MOVEABLE, 9572 }; 9573 struct pnfs_layout_segment *lseg = NULL; 9574 struct nfs4_exception exception = { 9575 .inode = inode, 9576 .timeout = *timeout, 9577 }; 9578 int status = 0; 9579 9580 dprintk("--> %s\n", __func__); 9581 9582 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0); 9583 9584 task = rpc_run_task(&task_setup_data); 9585 9586 status = rpc_wait_for_completion_task(task); 9587 if (status != 0) 9588 goto out; 9589 9590 if (task->tk_status < 0) { 9591 status = nfs4_layoutget_handle_exception(task, lgp, &exception); 9592 *timeout = exception.timeout; 9593 } else if (lgp->res.layoutp->len == 0) { 9594 status = -EAGAIN; 9595 *timeout = nfs4_update_delay(&exception.timeout); 9596 } else 9597 lseg = pnfs_layout_process(lgp); 9598 out: 9599 trace_nfs4_layoutget(lgp->args.ctx, 9600 &lgp->args.range, 9601 &lgp->res.range, 9602 &lgp->res.stateid, 9603 status); 9604 9605 rpc_put_task(task); 9606 dprintk("<-- %s status=%d\n", __func__, status); 9607 if (status) 9608 return ERR_PTR(status); 9609 return lseg; 9610 } 9611 9612 static void 9613 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 9614 { 9615 struct nfs4_layoutreturn *lrp = calldata; 9616 9617 dprintk("--> %s\n", __func__); 9618 nfs4_setup_sequence(lrp->clp, 9619 &lrp->args.seq_args, 9620 &lrp->res.seq_res, 9621 task); 9622 if (!pnfs_layout_is_valid(lrp->args.layout)) 9623 rpc_exit(task, 0); 9624 } 9625 9626 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 9627 { 9628 struct nfs4_layoutreturn *lrp = calldata; 9629 struct nfs_server *server; 9630 9631 dprintk("--> %s\n", __func__); 9632 9633 if (!nfs41_sequence_process(task, &lrp->res.seq_res)) 9634 return; 9635 9636 /* 9637 * Was there an RPC level error? Assume the call succeeded, 9638 * and that we need to release the layout 9639 */ 9640 if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) { 9641 lrp->res.lrs_present = 0; 9642 return; 9643 } 9644 9645 server = NFS_SERVER(lrp->args.inode); 9646 switch (task->tk_status) { 9647 case -NFS4ERR_OLD_STATEID: 9648 if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid, 9649 &lrp->args.range, 9650 lrp->args.inode)) 9651 goto out_restart; 9652 fallthrough; 9653 default: 9654 task->tk_status = 0; 9655 fallthrough; 9656 case 0: 9657 break; 9658 case -NFS4ERR_DELAY: 9659 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN) 9660 break; 9661 goto out_restart; 9662 } 9663 dprintk("<-- %s\n", __func__); 9664 return; 9665 out_restart: 9666 task->tk_status = 0; 9667 nfs4_sequence_free_slot(&lrp->res.seq_res); 9668 rpc_restart_call_prepare(task); 9669 } 9670 9671 static void nfs4_layoutreturn_release(void *calldata) 9672 { 9673 struct nfs4_layoutreturn *lrp = calldata; 9674 struct pnfs_layout_hdr *lo = lrp->args.layout; 9675 9676 dprintk("--> %s\n", __func__); 9677 pnfs_layoutreturn_free_lsegs(lo, &lrp->args.stateid, &lrp->args.range, 9678 lrp->res.lrs_present ? &lrp->res.stateid : NULL); 9679 nfs4_sequence_free_slot(&lrp->res.seq_res); 9680 if (lrp->ld_private.ops && lrp->ld_private.ops->free) 9681 lrp->ld_private.ops->free(&lrp->ld_private); 9682 pnfs_put_layout_hdr(lrp->args.layout); 9683 nfs_iput_and_deactive(lrp->inode); 9684 put_cred(lrp->cred); 9685 kfree(calldata); 9686 dprintk("<-- %s\n", __func__); 9687 } 9688 9689 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 9690 .rpc_call_prepare = nfs4_layoutreturn_prepare, 9691 .rpc_call_done = nfs4_layoutreturn_done, 9692 .rpc_release = nfs4_layoutreturn_release, 9693 }; 9694 9695 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync) 9696 { 9697 struct rpc_task *task; 9698 struct rpc_message msg = { 9699 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 9700 .rpc_argp = &lrp->args, 9701 .rpc_resp = &lrp->res, 9702 .rpc_cred = lrp->cred, 9703 }; 9704 struct rpc_task_setup task_setup_data = { 9705 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 9706 .rpc_message = &msg, 9707 .callback_ops = &nfs4_layoutreturn_call_ops, 9708 .callback_data = lrp, 9709 .flags = RPC_TASK_MOVEABLE, 9710 }; 9711 int status = 0; 9712 9713 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client, 9714 NFS_SP4_MACH_CRED_PNFS_CLEANUP, 9715 &task_setup_data.rpc_client, &msg); 9716 9717 dprintk("--> %s\n", __func__); 9718 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 9719 if (!sync) { 9720 if (!lrp->inode) { 9721 nfs4_layoutreturn_release(lrp); 9722 return -EAGAIN; 9723 } 9724 task_setup_data.flags |= RPC_TASK_ASYNC; 9725 } 9726 if (!lrp->inode) 9727 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 9728 1); 9729 else 9730 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1, 9731 0); 9732 task = rpc_run_task(&task_setup_data); 9733 if (IS_ERR(task)) 9734 return PTR_ERR(task); 9735 if (sync) 9736 status = task->tk_status; 9737 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status); 9738 dprintk("<-- %s status=%d\n", __func__, status); 9739 rpc_put_task(task); 9740 return status; 9741 } 9742 9743 static int 9744 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 9745 struct pnfs_device *pdev, 9746 const struct cred *cred) 9747 { 9748 struct nfs4_getdeviceinfo_args args = { 9749 .pdev = pdev, 9750 .notify_types = NOTIFY_DEVICEID4_CHANGE | 9751 NOTIFY_DEVICEID4_DELETE, 9752 }; 9753 struct nfs4_getdeviceinfo_res res = { 9754 .pdev = pdev, 9755 }; 9756 struct rpc_message msg = { 9757 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 9758 .rpc_argp = &args, 9759 .rpc_resp = &res, 9760 .rpc_cred = cred, 9761 }; 9762 int status; 9763 9764 dprintk("--> %s\n", __func__); 9765 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 9766 if (res.notification & ~args.notify_types) 9767 dprintk("%s: unsupported notification\n", __func__); 9768 if (res.notification != args.notify_types) 9769 pdev->nocache = 1; 9770 9771 trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status); 9772 9773 dprintk("<-- %s status=%d\n", __func__, status); 9774 9775 return status; 9776 } 9777 9778 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 9779 struct pnfs_device *pdev, 9780 const struct cred *cred) 9781 { 9782 struct nfs4_exception exception = { }; 9783 int err; 9784 9785 do { 9786 err = nfs4_handle_exception(server, 9787 _nfs4_proc_getdeviceinfo(server, pdev, cred), 9788 &exception); 9789 } while (exception.retry); 9790 return err; 9791 } 9792 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 9793 9794 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 9795 { 9796 struct nfs4_layoutcommit_data *data = calldata; 9797 struct nfs_server *server = NFS_SERVER(data->args.inode); 9798 9799 nfs4_setup_sequence(server->nfs_client, 9800 &data->args.seq_args, 9801 &data->res.seq_res, 9802 task); 9803 } 9804 9805 static void 9806 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 9807 { 9808 struct nfs4_layoutcommit_data *data = calldata; 9809 struct nfs_server *server = NFS_SERVER(data->args.inode); 9810 9811 if (!nfs41_sequence_done(task, &data->res.seq_res)) 9812 return; 9813 9814 switch (task->tk_status) { /* Just ignore these failures */ 9815 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 9816 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 9817 case -NFS4ERR_BADLAYOUT: /* no layout */ 9818 case -NFS4ERR_GRACE: /* loca_recalim always false */ 9819 task->tk_status = 0; 9820 break; 9821 case 0: 9822 break; 9823 default: 9824 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 9825 rpc_restart_call_prepare(task); 9826 return; 9827 } 9828 } 9829 } 9830 9831 static void nfs4_layoutcommit_release(void *calldata) 9832 { 9833 struct nfs4_layoutcommit_data *data = calldata; 9834 9835 pnfs_cleanup_layoutcommit(data); 9836 nfs_post_op_update_inode_force_wcc(data->args.inode, 9837 data->res.fattr); 9838 put_cred(data->cred); 9839 nfs_iput_and_deactive(data->inode); 9840 kfree(data); 9841 } 9842 9843 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 9844 .rpc_call_prepare = nfs4_layoutcommit_prepare, 9845 .rpc_call_done = nfs4_layoutcommit_done, 9846 .rpc_release = nfs4_layoutcommit_release, 9847 }; 9848 9849 int 9850 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 9851 { 9852 struct rpc_message msg = { 9853 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 9854 .rpc_argp = &data->args, 9855 .rpc_resp = &data->res, 9856 .rpc_cred = data->cred, 9857 }; 9858 struct rpc_task_setup task_setup_data = { 9859 .task = &data->task, 9860 .rpc_client = NFS_CLIENT(data->args.inode), 9861 .rpc_message = &msg, 9862 .callback_ops = &nfs4_layoutcommit_ops, 9863 .callback_data = data, 9864 .flags = RPC_TASK_MOVEABLE, 9865 }; 9866 struct rpc_task *task; 9867 int status = 0; 9868 9869 dprintk("NFS: initiating layoutcommit call. sync %d " 9870 "lbw: %llu inode %lu\n", sync, 9871 data->args.lastbytewritten, 9872 data->args.inode->i_ino); 9873 9874 if (!sync) { 9875 data->inode = nfs_igrab_and_active(data->args.inode); 9876 if (data->inode == NULL) { 9877 nfs4_layoutcommit_release(data); 9878 return -EAGAIN; 9879 } 9880 task_setup_data.flags = RPC_TASK_ASYNC; 9881 } 9882 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0); 9883 task = rpc_run_task(&task_setup_data); 9884 if (IS_ERR(task)) 9885 return PTR_ERR(task); 9886 if (sync) 9887 status = task->tk_status; 9888 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status); 9889 dprintk("%s: status %d\n", __func__, status); 9890 rpc_put_task(task); 9891 return status; 9892 } 9893 9894 /* 9895 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 9896 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 9897 */ 9898 static int 9899 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 9900 struct nfs_fsinfo *info, 9901 struct nfs4_secinfo_flavors *flavors, bool use_integrity) 9902 { 9903 struct nfs41_secinfo_no_name_args args = { 9904 .style = SECINFO_STYLE_CURRENT_FH, 9905 }; 9906 struct nfs4_secinfo_res res = { 9907 .flavors = flavors, 9908 }; 9909 struct rpc_message msg = { 9910 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 9911 .rpc_argp = &args, 9912 .rpc_resp = &res, 9913 }; 9914 struct nfs4_call_sync_data data = { 9915 .seq_server = server, 9916 .seq_args = &args.seq_args, 9917 .seq_res = &res.seq_res, 9918 }; 9919 struct rpc_task_setup task_setup = { 9920 .rpc_client = server->client, 9921 .rpc_message = &msg, 9922 .callback_ops = server->nfs_client->cl_mvops->call_sync_ops, 9923 .callback_data = &data, 9924 .flags = RPC_TASK_NO_ROUND_ROBIN, 9925 }; 9926 const struct cred *cred = NULL; 9927 int status; 9928 9929 if (use_integrity) { 9930 task_setup.rpc_client = server->nfs_client->cl_rpcclient; 9931 9932 cred = nfs4_get_clid_cred(server->nfs_client); 9933 msg.rpc_cred = cred; 9934 } 9935 9936 dprintk("--> %s\n", __func__); 9937 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); 9938 status = nfs4_call_sync_custom(&task_setup); 9939 dprintk("<-- %s status=%d\n", __func__, status); 9940 9941 put_cred(cred); 9942 9943 return status; 9944 } 9945 9946 static int 9947 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 9948 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 9949 { 9950 struct nfs4_exception exception = { 9951 .interruptible = true, 9952 }; 9953 int err; 9954 do { 9955 /* first try using integrity protection */ 9956 err = -NFS4ERR_WRONGSEC; 9957 9958 /* try to use integrity protection with machine cred */ 9959 if (_nfs4_is_integrity_protected(server->nfs_client)) 9960 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 9961 flavors, true); 9962 9963 /* 9964 * if unable to use integrity protection, or SECINFO with 9965 * integrity protection returns NFS4ERR_WRONGSEC (which is 9966 * disallowed by spec, but exists in deployed servers) use 9967 * the current filesystem's rpc_client and the user cred. 9968 */ 9969 if (err == -NFS4ERR_WRONGSEC) 9970 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 9971 flavors, false); 9972 9973 switch (err) { 9974 case 0: 9975 case -NFS4ERR_WRONGSEC: 9976 case -ENOTSUPP: 9977 goto out; 9978 default: 9979 err = nfs4_handle_exception(server, err, &exception); 9980 } 9981 } while (exception.retry); 9982 out: 9983 return err; 9984 } 9985 9986 static int 9987 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 9988 struct nfs_fsinfo *info) 9989 { 9990 int err; 9991 struct page *page; 9992 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 9993 struct nfs4_secinfo_flavors *flavors; 9994 struct nfs4_secinfo4 *secinfo; 9995 int i; 9996 9997 page = alloc_page(GFP_KERNEL); 9998 if (!page) { 9999 err = -ENOMEM; 10000 goto out; 10001 } 10002 10003 flavors = page_address(page); 10004 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 10005 10006 /* 10007 * Fall back on "guess and check" method if 10008 * the server doesn't support SECINFO_NO_NAME 10009 */ 10010 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 10011 err = nfs4_find_root_sec(server, fhandle, info); 10012 goto out_freepage; 10013 } 10014 if (err) 10015 goto out_freepage; 10016 10017 for (i = 0; i < flavors->num_flavors; i++) { 10018 secinfo = &flavors->flavors[i]; 10019 10020 switch (secinfo->flavor) { 10021 case RPC_AUTH_NULL: 10022 case RPC_AUTH_UNIX: 10023 case RPC_AUTH_GSS: 10024 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 10025 &secinfo->flavor_info); 10026 break; 10027 default: 10028 flavor = RPC_AUTH_MAXFLAVOR; 10029 break; 10030 } 10031 10032 if (!nfs_auth_info_match(&server->auth_info, flavor)) 10033 flavor = RPC_AUTH_MAXFLAVOR; 10034 10035 if (flavor != RPC_AUTH_MAXFLAVOR) { 10036 err = nfs4_lookup_root_sec(server, fhandle, 10037 info, flavor); 10038 if (!err) 10039 break; 10040 } 10041 } 10042 10043 if (flavor == RPC_AUTH_MAXFLAVOR) 10044 err = -EPERM; 10045 10046 out_freepage: 10047 put_page(page); 10048 if (err == -EACCES) 10049 return -EPERM; 10050 out: 10051 return err; 10052 } 10053 10054 static int _nfs41_test_stateid(struct nfs_server *server, 10055 nfs4_stateid *stateid, 10056 const struct cred *cred) 10057 { 10058 int status; 10059 struct nfs41_test_stateid_args args = { 10060 .stateid = stateid, 10061 }; 10062 struct nfs41_test_stateid_res res; 10063 struct rpc_message msg = { 10064 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 10065 .rpc_argp = &args, 10066 .rpc_resp = &res, 10067 .rpc_cred = cred, 10068 }; 10069 struct rpc_clnt *rpc_client = server->client; 10070 10071 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10072 &rpc_client, &msg); 10073 10074 dprintk("NFS call test_stateid %p\n", stateid); 10075 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1); 10076 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 10077 &args.seq_args, &res.seq_res); 10078 if (status != NFS_OK) { 10079 dprintk("NFS reply test_stateid: failed, %d\n", status); 10080 return status; 10081 } 10082 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 10083 return -res.status; 10084 } 10085 10086 static void nfs4_handle_delay_or_session_error(struct nfs_server *server, 10087 int err, struct nfs4_exception *exception) 10088 { 10089 exception->retry = 0; 10090 switch(err) { 10091 case -NFS4ERR_DELAY: 10092 case -NFS4ERR_RETRY_UNCACHED_REP: 10093 nfs4_handle_exception(server, err, exception); 10094 break; 10095 case -NFS4ERR_BADSESSION: 10096 case -NFS4ERR_BADSLOT: 10097 case -NFS4ERR_BAD_HIGH_SLOT: 10098 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 10099 case -NFS4ERR_DEADSESSION: 10100 nfs4_do_handle_exception(server, err, exception); 10101 } 10102 } 10103 10104 /** 10105 * nfs41_test_stateid - perform a TEST_STATEID operation 10106 * 10107 * @server: server / transport on which to perform the operation 10108 * @stateid: state ID to test 10109 * @cred: credential 10110 * 10111 * Returns NFS_OK if the server recognizes that "stateid" is valid. 10112 * Otherwise a negative NFS4ERR value is returned if the operation 10113 * failed or the state ID is not currently valid. 10114 */ 10115 static int nfs41_test_stateid(struct nfs_server *server, 10116 nfs4_stateid *stateid, 10117 const struct cred *cred) 10118 { 10119 struct nfs4_exception exception = { 10120 .interruptible = true, 10121 }; 10122 int err; 10123 do { 10124 err = _nfs41_test_stateid(server, stateid, cred); 10125 nfs4_handle_delay_or_session_error(server, err, &exception); 10126 } while (exception.retry); 10127 return err; 10128 } 10129 10130 struct nfs_free_stateid_data { 10131 struct nfs_server *server; 10132 struct nfs41_free_stateid_args args; 10133 struct nfs41_free_stateid_res res; 10134 }; 10135 10136 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 10137 { 10138 struct nfs_free_stateid_data *data = calldata; 10139 nfs4_setup_sequence(data->server->nfs_client, 10140 &data->args.seq_args, 10141 &data->res.seq_res, 10142 task); 10143 } 10144 10145 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 10146 { 10147 struct nfs_free_stateid_data *data = calldata; 10148 10149 nfs41_sequence_done(task, &data->res.seq_res); 10150 10151 switch (task->tk_status) { 10152 case -NFS4ERR_DELAY: 10153 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 10154 rpc_restart_call_prepare(task); 10155 } 10156 } 10157 10158 static void nfs41_free_stateid_release(void *calldata) 10159 { 10160 kfree(calldata); 10161 } 10162 10163 static const struct rpc_call_ops nfs41_free_stateid_ops = { 10164 .rpc_call_prepare = nfs41_free_stateid_prepare, 10165 .rpc_call_done = nfs41_free_stateid_done, 10166 .rpc_release = nfs41_free_stateid_release, 10167 }; 10168 10169 /** 10170 * nfs41_free_stateid - perform a FREE_STATEID operation 10171 * 10172 * @server: server / transport on which to perform the operation 10173 * @stateid: state ID to release 10174 * @cred: credential 10175 * @privileged: set to true if this call needs to be privileged 10176 * 10177 * Note: this function is always asynchronous. 10178 */ 10179 static int nfs41_free_stateid(struct nfs_server *server, 10180 const nfs4_stateid *stateid, 10181 const struct cred *cred, 10182 bool privileged) 10183 { 10184 struct rpc_message msg = { 10185 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 10186 .rpc_cred = cred, 10187 }; 10188 struct rpc_task_setup task_setup = { 10189 .rpc_client = server->client, 10190 .rpc_message = &msg, 10191 .callback_ops = &nfs41_free_stateid_ops, 10192 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, 10193 }; 10194 struct nfs_free_stateid_data *data; 10195 struct rpc_task *task; 10196 10197 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 10198 &task_setup.rpc_client, &msg); 10199 10200 dprintk("NFS call free_stateid %p\n", stateid); 10201 data = kmalloc(sizeof(*data), GFP_NOFS); 10202 if (!data) 10203 return -ENOMEM; 10204 data->server = server; 10205 nfs4_stateid_copy(&data->args.stateid, stateid); 10206 10207 task_setup.callback_data = data; 10208 10209 msg.rpc_argp = &data->args; 10210 msg.rpc_resp = &data->res; 10211 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged); 10212 task = rpc_run_task(&task_setup); 10213 if (IS_ERR(task)) 10214 return PTR_ERR(task); 10215 rpc_put_task(task); 10216 return 0; 10217 } 10218 10219 static void 10220 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 10221 { 10222 const struct cred *cred = lsp->ls_state->owner->so_cred; 10223 10224 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 10225 nfs4_free_lock_state(server, lsp); 10226 } 10227 10228 static bool nfs41_match_stateid(const nfs4_stateid *s1, 10229 const nfs4_stateid *s2) 10230 { 10231 if (s1->type != s2->type) 10232 return false; 10233 10234 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 10235 return false; 10236 10237 if (s1->seqid == s2->seqid) 10238 return true; 10239 10240 return s1->seqid == 0 || s2->seqid == 0; 10241 } 10242 10243 #endif /* CONFIG_NFS_V4_1 */ 10244 10245 static bool nfs4_match_stateid(const nfs4_stateid *s1, 10246 const nfs4_stateid *s2) 10247 { 10248 return nfs4_stateid_match(s1, s2); 10249 } 10250 10251 10252 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 10253 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10254 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10255 .recover_open = nfs4_open_reclaim, 10256 .recover_lock = nfs4_lock_reclaim, 10257 .establish_clid = nfs4_init_clientid, 10258 .detect_trunking = nfs40_discover_server_trunking, 10259 }; 10260 10261 #if defined(CONFIG_NFS_V4_1) 10262 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 10263 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 10264 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 10265 .recover_open = nfs4_open_reclaim, 10266 .recover_lock = nfs4_lock_reclaim, 10267 .establish_clid = nfs41_init_clientid, 10268 .reclaim_complete = nfs41_proc_reclaim_complete, 10269 .detect_trunking = nfs41_discover_server_trunking, 10270 }; 10271 #endif /* CONFIG_NFS_V4_1 */ 10272 10273 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 10274 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10275 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10276 .recover_open = nfs40_open_expired, 10277 .recover_lock = nfs4_lock_expired, 10278 .establish_clid = nfs4_init_clientid, 10279 }; 10280 10281 #if defined(CONFIG_NFS_V4_1) 10282 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 10283 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 10284 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 10285 .recover_open = nfs41_open_expired, 10286 .recover_lock = nfs41_lock_expired, 10287 .establish_clid = nfs41_init_clientid, 10288 }; 10289 #endif /* CONFIG_NFS_V4_1 */ 10290 10291 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 10292 .sched_state_renewal = nfs4_proc_async_renew, 10293 .get_state_renewal_cred = nfs4_get_renew_cred, 10294 .renew_lease = nfs4_proc_renew, 10295 }; 10296 10297 #if defined(CONFIG_NFS_V4_1) 10298 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 10299 .sched_state_renewal = nfs41_proc_async_sequence, 10300 .get_state_renewal_cred = nfs4_get_machine_cred, 10301 .renew_lease = nfs4_proc_sequence, 10302 }; 10303 #endif 10304 10305 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 10306 .get_locations = _nfs40_proc_get_locations, 10307 .fsid_present = _nfs40_proc_fsid_present, 10308 }; 10309 10310 #if defined(CONFIG_NFS_V4_1) 10311 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 10312 .get_locations = _nfs41_proc_get_locations, 10313 .fsid_present = _nfs41_proc_fsid_present, 10314 }; 10315 #endif /* CONFIG_NFS_V4_1 */ 10316 10317 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 10318 .minor_version = 0, 10319 .init_caps = NFS_CAP_READDIRPLUS 10320 | NFS_CAP_ATOMIC_OPEN 10321 | NFS_CAP_POSIX_LOCK, 10322 .init_client = nfs40_init_client, 10323 .shutdown_client = nfs40_shutdown_client, 10324 .match_stateid = nfs4_match_stateid, 10325 .find_root_sec = nfs4_find_root_sec, 10326 .free_lock_state = nfs4_release_lockowner, 10327 .test_and_free_expired = nfs40_test_and_free_expired_stateid, 10328 .alloc_seqid = nfs_alloc_seqid, 10329 .call_sync_ops = &nfs40_call_sync_ops, 10330 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 10331 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 10332 .state_renewal_ops = &nfs40_state_renewal_ops, 10333 .mig_recovery_ops = &nfs40_mig_recovery_ops, 10334 }; 10335 10336 #if defined(CONFIG_NFS_V4_1) 10337 static struct nfs_seqid * 10338 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 10339 { 10340 return NULL; 10341 } 10342 10343 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 10344 .minor_version = 1, 10345 .init_caps = NFS_CAP_READDIRPLUS 10346 | NFS_CAP_ATOMIC_OPEN 10347 | NFS_CAP_POSIX_LOCK 10348 | NFS_CAP_STATEID_NFSV41 10349 | NFS_CAP_ATOMIC_OPEN_V1 10350 | NFS_CAP_LGOPEN, 10351 .init_client = nfs41_init_client, 10352 .shutdown_client = nfs41_shutdown_client, 10353 .match_stateid = nfs41_match_stateid, 10354 .find_root_sec = nfs41_find_root_sec, 10355 .free_lock_state = nfs41_free_lock_state, 10356 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10357 .alloc_seqid = nfs_alloc_no_seqid, 10358 .session_trunk = nfs4_test_session_trunk, 10359 .call_sync_ops = &nfs41_call_sync_ops, 10360 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10361 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10362 .state_renewal_ops = &nfs41_state_renewal_ops, 10363 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10364 }; 10365 #endif 10366 10367 #if defined(CONFIG_NFS_V4_2) 10368 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 10369 .minor_version = 2, 10370 .init_caps = NFS_CAP_READDIRPLUS 10371 | NFS_CAP_ATOMIC_OPEN 10372 | NFS_CAP_POSIX_LOCK 10373 | NFS_CAP_STATEID_NFSV41 10374 | NFS_CAP_ATOMIC_OPEN_V1 10375 | NFS_CAP_LGOPEN 10376 | NFS_CAP_ALLOCATE 10377 | NFS_CAP_COPY 10378 | NFS_CAP_OFFLOAD_CANCEL 10379 | NFS_CAP_COPY_NOTIFY 10380 | NFS_CAP_DEALLOCATE 10381 | NFS_CAP_SEEK 10382 | NFS_CAP_LAYOUTSTATS 10383 | NFS_CAP_CLONE 10384 | NFS_CAP_LAYOUTERROR 10385 | NFS_CAP_READ_PLUS, 10386 .init_client = nfs41_init_client, 10387 .shutdown_client = nfs41_shutdown_client, 10388 .match_stateid = nfs41_match_stateid, 10389 .find_root_sec = nfs41_find_root_sec, 10390 .free_lock_state = nfs41_free_lock_state, 10391 .call_sync_ops = &nfs41_call_sync_ops, 10392 .test_and_free_expired = nfs41_test_and_free_expired_stateid, 10393 .alloc_seqid = nfs_alloc_no_seqid, 10394 .session_trunk = nfs4_test_session_trunk, 10395 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 10396 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 10397 .state_renewal_ops = &nfs41_state_renewal_ops, 10398 .mig_recovery_ops = &nfs41_mig_recovery_ops, 10399 }; 10400 #endif 10401 10402 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 10403 [0] = &nfs_v4_0_minor_ops, 10404 #if defined(CONFIG_NFS_V4_1) 10405 [1] = &nfs_v4_1_minor_ops, 10406 #endif 10407 #if defined(CONFIG_NFS_V4_2) 10408 [2] = &nfs_v4_2_minor_ops, 10409 #endif 10410 }; 10411 10412 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 10413 { 10414 ssize_t error, error2, error3; 10415 10416 error = generic_listxattr(dentry, list, size); 10417 if (error < 0) 10418 return error; 10419 if (list) { 10420 list += error; 10421 size -= error; 10422 } 10423 10424 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size); 10425 if (error2 < 0) 10426 return error2; 10427 10428 if (list) { 10429 list += error2; 10430 size -= error2; 10431 } 10432 10433 error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size); 10434 if (error3 < 0) 10435 return error3; 10436 10437 return error + error2 + error3; 10438 } 10439 10440 static const struct inode_operations nfs4_dir_inode_operations = { 10441 .create = nfs_create, 10442 .lookup = nfs_lookup, 10443 .atomic_open = nfs_atomic_open, 10444 .link = nfs_link, 10445 .unlink = nfs_unlink, 10446 .symlink = nfs_symlink, 10447 .mkdir = nfs_mkdir, 10448 .rmdir = nfs_rmdir, 10449 .mknod = nfs_mknod, 10450 .rename = nfs_rename, 10451 .permission = nfs_permission, 10452 .getattr = nfs_getattr, 10453 .setattr = nfs_setattr, 10454 .listxattr = nfs4_listxattr, 10455 }; 10456 10457 static const struct inode_operations nfs4_file_inode_operations = { 10458 .permission = nfs_permission, 10459 .getattr = nfs_getattr, 10460 .setattr = nfs_setattr, 10461 .listxattr = nfs4_listxattr, 10462 }; 10463 10464 const struct nfs_rpc_ops nfs_v4_clientops = { 10465 .version = 4, /* protocol version */ 10466 .dentry_ops = &nfs4_dentry_operations, 10467 .dir_inode_ops = &nfs4_dir_inode_operations, 10468 .file_inode_ops = &nfs4_file_inode_operations, 10469 .file_ops = &nfs4_file_operations, 10470 .getroot = nfs4_proc_get_root, 10471 .submount = nfs4_submount, 10472 .try_get_tree = nfs4_try_get_tree, 10473 .getattr = nfs4_proc_getattr, 10474 .setattr = nfs4_proc_setattr, 10475 .lookup = nfs4_proc_lookup, 10476 .lookupp = nfs4_proc_lookupp, 10477 .access = nfs4_proc_access, 10478 .readlink = nfs4_proc_readlink, 10479 .create = nfs4_proc_create, 10480 .remove = nfs4_proc_remove, 10481 .unlink_setup = nfs4_proc_unlink_setup, 10482 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 10483 .unlink_done = nfs4_proc_unlink_done, 10484 .rename_setup = nfs4_proc_rename_setup, 10485 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 10486 .rename_done = nfs4_proc_rename_done, 10487 .link = nfs4_proc_link, 10488 .symlink = nfs4_proc_symlink, 10489 .mkdir = nfs4_proc_mkdir, 10490 .rmdir = nfs4_proc_rmdir, 10491 .readdir = nfs4_proc_readdir, 10492 .mknod = nfs4_proc_mknod, 10493 .statfs = nfs4_proc_statfs, 10494 .fsinfo = nfs4_proc_fsinfo, 10495 .pathconf = nfs4_proc_pathconf, 10496 .set_capabilities = nfs4_server_capabilities, 10497 .decode_dirent = nfs4_decode_dirent, 10498 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 10499 .read_setup = nfs4_proc_read_setup, 10500 .read_done = nfs4_read_done, 10501 .write_setup = nfs4_proc_write_setup, 10502 .write_done = nfs4_write_done, 10503 .commit_setup = nfs4_proc_commit_setup, 10504 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 10505 .commit_done = nfs4_commit_done, 10506 .lock = nfs4_proc_lock, 10507 .clear_acl_cache = nfs4_zap_acl_attr, 10508 .close_context = nfs4_close_context, 10509 .open_context = nfs4_atomic_open, 10510 .have_delegation = nfs4_have_delegation, 10511 .alloc_client = nfs4_alloc_client, 10512 .init_client = nfs4_init_client, 10513 .free_client = nfs4_free_client, 10514 .create_server = nfs4_create_server, 10515 .clone_server = nfs_clone_server, 10516 }; 10517 10518 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 10519 .name = XATTR_NAME_NFSV4_ACL, 10520 .list = nfs4_xattr_list_nfs4_acl, 10521 .get = nfs4_xattr_get_nfs4_acl, 10522 .set = nfs4_xattr_set_nfs4_acl, 10523 }; 10524 10525 #ifdef CONFIG_NFS_V4_2 10526 static const struct xattr_handler nfs4_xattr_nfs4_user_handler = { 10527 .prefix = XATTR_USER_PREFIX, 10528 .get = nfs4_xattr_get_nfs4_user, 10529 .set = nfs4_xattr_set_nfs4_user, 10530 }; 10531 #endif 10532 10533 const struct xattr_handler *nfs4_xattr_handlers[] = { 10534 &nfs4_xattr_nfs4_acl_handler, 10535 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 10536 &nfs4_xattr_nfs4_label_handler, 10537 #endif 10538 #ifdef CONFIG_NFS_V4_2 10539 &nfs4_xattr_nfs4_user_handler, 10540 #endif 10541 NULL 10542 }; 10543