1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/file.h> 42 #include <linux/string.h> 43 #include <linux/ratelimit.h> 44 #include <linux/printk.h> 45 #include <linux/slab.h> 46 #include <linux/sunrpc/clnt.h> 47 #include <linux/nfs.h> 48 #include <linux/nfs4.h> 49 #include <linux/nfs_fs.h> 50 #include <linux/nfs_page.h> 51 #include <linux/nfs_mount.h> 52 #include <linux/namei.h> 53 #include <linux/mount.h> 54 #include <linux/module.h> 55 #include <linux/xattr.h> 56 #include <linux/utsname.h> 57 #include <linux/freezer.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "nfs4idmap.h" 67 #include "nfs4session.h" 68 #include "fscache.h" 69 70 #include "nfs4trace.h" 71 72 #define NFSDBG_FACILITY NFSDBG_PROC 73 74 #define NFS4_POLL_RETRY_MIN (HZ/10) 75 #define NFS4_POLL_RETRY_MAX (15*HZ) 76 77 /* file attributes which can be mapped to nfs attributes */ 78 #define NFS4_VALID_ATTRS (ATTR_MODE \ 79 | ATTR_UID \ 80 | ATTR_GID \ 81 | ATTR_SIZE \ 82 | ATTR_ATIME \ 83 | ATTR_MTIME \ 84 | ATTR_CTIME \ 85 | ATTR_ATIME_SET \ 86 | ATTR_MTIME_SET) 87 88 struct nfs4_opendata; 89 static int _nfs4_proc_open(struct nfs4_opendata *data); 90 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 91 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 92 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 93 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label); 94 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label); 95 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 96 struct nfs_fattr *fattr, struct iattr *sattr, 97 struct nfs4_state *state, struct nfs4_label *ilabel, 98 struct nfs4_label *olabel); 99 #ifdef CONFIG_NFS_V4_1 100 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *, 101 struct rpc_cred *); 102 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 103 struct rpc_cred *); 104 #endif 105 106 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 107 static inline struct nfs4_label * 108 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 109 struct iattr *sattr, struct nfs4_label *label) 110 { 111 int err; 112 113 if (label == NULL) 114 return NULL; 115 116 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 117 return NULL; 118 119 err = security_dentry_init_security(dentry, sattr->ia_mode, 120 &dentry->d_name, (void **)&label->label, &label->len); 121 if (err == 0) 122 return label; 123 124 return NULL; 125 } 126 static inline void 127 nfs4_label_release_security(struct nfs4_label *label) 128 { 129 if (label) 130 security_release_secctx(label->label, label->len); 131 } 132 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 133 { 134 if (label) 135 return server->attr_bitmask; 136 137 return server->attr_bitmask_nl; 138 } 139 #else 140 static inline struct nfs4_label * 141 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 142 struct iattr *sattr, struct nfs4_label *l) 143 { return NULL; } 144 static inline void 145 nfs4_label_release_security(struct nfs4_label *label) 146 { return; } 147 static inline u32 * 148 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 149 { return server->attr_bitmask; } 150 #endif 151 152 /* Prevent leaks of NFSv4 errors into userland */ 153 static int nfs4_map_errors(int err) 154 { 155 if (err >= -1000) 156 return err; 157 switch (err) { 158 case -NFS4ERR_RESOURCE: 159 case -NFS4ERR_LAYOUTTRYLATER: 160 case -NFS4ERR_RECALLCONFLICT: 161 return -EREMOTEIO; 162 case -NFS4ERR_WRONGSEC: 163 case -NFS4ERR_WRONG_CRED: 164 return -EPERM; 165 case -NFS4ERR_BADOWNER: 166 case -NFS4ERR_BADNAME: 167 return -EINVAL; 168 case -NFS4ERR_SHARE_DENIED: 169 return -EACCES; 170 case -NFS4ERR_MINOR_VERS_MISMATCH: 171 return -EPROTONOSUPPORT; 172 case -NFS4ERR_FILE_OPEN: 173 return -EBUSY; 174 default: 175 dprintk("%s could not handle NFSv4 error %d\n", 176 __func__, -err); 177 break; 178 } 179 return -EIO; 180 } 181 182 /* 183 * This is our standard bitmap for GETATTR requests. 184 */ 185 const u32 nfs4_fattr_bitmap[3] = { 186 FATTR4_WORD0_TYPE 187 | FATTR4_WORD0_CHANGE 188 | FATTR4_WORD0_SIZE 189 | FATTR4_WORD0_FSID 190 | FATTR4_WORD0_FILEID, 191 FATTR4_WORD1_MODE 192 | FATTR4_WORD1_NUMLINKS 193 | FATTR4_WORD1_OWNER 194 | FATTR4_WORD1_OWNER_GROUP 195 | FATTR4_WORD1_RAWDEV 196 | FATTR4_WORD1_SPACE_USED 197 | FATTR4_WORD1_TIME_ACCESS 198 | FATTR4_WORD1_TIME_METADATA 199 | FATTR4_WORD1_TIME_MODIFY 200 | FATTR4_WORD1_MOUNTED_ON_FILEID, 201 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 202 FATTR4_WORD2_SECURITY_LABEL 203 #endif 204 }; 205 206 static const u32 nfs4_pnfs_open_bitmap[3] = { 207 FATTR4_WORD0_TYPE 208 | FATTR4_WORD0_CHANGE 209 | FATTR4_WORD0_SIZE 210 | FATTR4_WORD0_FSID 211 | FATTR4_WORD0_FILEID, 212 FATTR4_WORD1_MODE 213 | FATTR4_WORD1_NUMLINKS 214 | FATTR4_WORD1_OWNER 215 | FATTR4_WORD1_OWNER_GROUP 216 | FATTR4_WORD1_RAWDEV 217 | FATTR4_WORD1_SPACE_USED 218 | FATTR4_WORD1_TIME_ACCESS 219 | FATTR4_WORD1_TIME_METADATA 220 | FATTR4_WORD1_TIME_MODIFY, 221 FATTR4_WORD2_MDSTHRESHOLD 222 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 223 | FATTR4_WORD2_SECURITY_LABEL 224 #endif 225 }; 226 227 static const u32 nfs4_open_noattr_bitmap[3] = { 228 FATTR4_WORD0_TYPE 229 | FATTR4_WORD0_CHANGE 230 | FATTR4_WORD0_FILEID, 231 }; 232 233 const u32 nfs4_statfs_bitmap[3] = { 234 FATTR4_WORD0_FILES_AVAIL 235 | FATTR4_WORD0_FILES_FREE 236 | FATTR4_WORD0_FILES_TOTAL, 237 FATTR4_WORD1_SPACE_AVAIL 238 | FATTR4_WORD1_SPACE_FREE 239 | FATTR4_WORD1_SPACE_TOTAL 240 }; 241 242 const u32 nfs4_pathconf_bitmap[3] = { 243 FATTR4_WORD0_MAXLINK 244 | FATTR4_WORD0_MAXNAME, 245 0 246 }; 247 248 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 249 | FATTR4_WORD0_MAXREAD 250 | FATTR4_WORD0_MAXWRITE 251 | FATTR4_WORD0_LEASE_TIME, 252 FATTR4_WORD1_TIME_DELTA 253 | FATTR4_WORD1_FS_LAYOUT_TYPES, 254 FATTR4_WORD2_LAYOUT_BLKSIZE 255 | FATTR4_WORD2_CLONE_BLKSIZE 256 }; 257 258 const u32 nfs4_fs_locations_bitmap[3] = { 259 FATTR4_WORD0_TYPE 260 | FATTR4_WORD0_CHANGE 261 | FATTR4_WORD0_SIZE 262 | FATTR4_WORD0_FSID 263 | FATTR4_WORD0_FILEID 264 | FATTR4_WORD0_FS_LOCATIONS, 265 FATTR4_WORD1_MODE 266 | FATTR4_WORD1_NUMLINKS 267 | FATTR4_WORD1_OWNER 268 | FATTR4_WORD1_OWNER_GROUP 269 | FATTR4_WORD1_RAWDEV 270 | FATTR4_WORD1_SPACE_USED 271 | FATTR4_WORD1_TIME_ACCESS 272 | FATTR4_WORD1_TIME_METADATA 273 | FATTR4_WORD1_TIME_MODIFY 274 | FATTR4_WORD1_MOUNTED_ON_FILEID, 275 }; 276 277 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 278 struct nfs4_readdir_arg *readdir) 279 { 280 __be32 *start, *p; 281 282 if (cookie > 2) { 283 readdir->cookie = cookie; 284 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 285 return; 286 } 287 288 readdir->cookie = 0; 289 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 290 if (cookie == 2) 291 return; 292 293 /* 294 * NFSv4 servers do not return entries for '.' and '..' 295 * Therefore, we fake these entries here. We let '.' 296 * have cookie 0 and '..' have cookie 1. Note that 297 * when talking to the server, we always send cookie 0 298 * instead of 1 or 2. 299 */ 300 start = p = kmap_atomic(*readdir->pages); 301 302 if (cookie == 0) { 303 *p++ = xdr_one; /* next */ 304 *p++ = xdr_zero; /* cookie, first word */ 305 *p++ = xdr_one; /* cookie, second word */ 306 *p++ = xdr_one; /* entry len */ 307 memcpy(p, ".\0\0\0", 4); /* entry */ 308 p++; 309 *p++ = xdr_one; /* bitmap length */ 310 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 311 *p++ = htonl(8); /* attribute buffer length */ 312 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 313 } 314 315 *p++ = xdr_one; /* next */ 316 *p++ = xdr_zero; /* cookie, first word */ 317 *p++ = xdr_two; /* cookie, second word */ 318 *p++ = xdr_two; /* entry len */ 319 memcpy(p, "..\0\0", 4); /* entry */ 320 p++; 321 *p++ = xdr_one; /* bitmap length */ 322 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 323 *p++ = htonl(8); /* attribute buffer length */ 324 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 325 326 readdir->pgbase = (char *)p - (char *)start; 327 readdir->count -= readdir->pgbase; 328 kunmap_atomic(start); 329 } 330 331 static long nfs4_update_delay(long *timeout) 332 { 333 long ret; 334 if (!timeout) 335 return NFS4_POLL_RETRY_MAX; 336 if (*timeout <= 0) 337 *timeout = NFS4_POLL_RETRY_MIN; 338 if (*timeout > NFS4_POLL_RETRY_MAX) 339 *timeout = NFS4_POLL_RETRY_MAX; 340 ret = *timeout; 341 *timeout <<= 1; 342 return ret; 343 } 344 345 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 346 { 347 int res = 0; 348 349 might_sleep(); 350 351 freezable_schedule_timeout_killable_unsafe( 352 nfs4_update_delay(timeout)); 353 if (fatal_signal_pending(current)) 354 res = -ERESTARTSYS; 355 return res; 356 } 357 358 /* This is the error handling routine for processes that are allowed 359 * to sleep. 360 */ 361 static int nfs4_do_handle_exception(struct nfs_server *server, 362 int errorcode, struct nfs4_exception *exception) 363 { 364 struct nfs_client *clp = server->nfs_client; 365 struct nfs4_state *state = exception->state; 366 struct inode *inode = exception->inode; 367 int ret = errorcode; 368 369 exception->delay = 0; 370 exception->recovering = 0; 371 exception->retry = 0; 372 switch(errorcode) { 373 case 0: 374 return 0; 375 case -NFS4ERR_OPENMODE: 376 case -NFS4ERR_DELEG_REVOKED: 377 case -NFS4ERR_ADMIN_REVOKED: 378 case -NFS4ERR_BAD_STATEID: 379 if (inode && nfs_async_inode_return_delegation(inode, 380 NULL) == 0) 381 goto wait_on_recovery; 382 if (state == NULL) 383 break; 384 ret = nfs4_schedule_stateid_recovery(server, state); 385 if (ret < 0) 386 break; 387 goto wait_on_recovery; 388 case -NFS4ERR_EXPIRED: 389 if (state != NULL) { 390 ret = nfs4_schedule_stateid_recovery(server, state); 391 if (ret < 0) 392 break; 393 } 394 case -NFS4ERR_STALE_STATEID: 395 case -NFS4ERR_STALE_CLIENTID: 396 nfs4_schedule_lease_recovery(clp); 397 goto wait_on_recovery; 398 case -NFS4ERR_MOVED: 399 ret = nfs4_schedule_migration_recovery(server); 400 if (ret < 0) 401 break; 402 goto wait_on_recovery; 403 case -NFS4ERR_LEASE_MOVED: 404 nfs4_schedule_lease_moved_recovery(clp); 405 goto wait_on_recovery; 406 #if defined(CONFIG_NFS_V4_1) 407 case -NFS4ERR_BADSESSION: 408 case -NFS4ERR_BADSLOT: 409 case -NFS4ERR_BAD_HIGH_SLOT: 410 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 411 case -NFS4ERR_DEADSESSION: 412 case -NFS4ERR_SEQ_FALSE_RETRY: 413 case -NFS4ERR_SEQ_MISORDERED: 414 dprintk("%s ERROR: %d Reset session\n", __func__, 415 errorcode); 416 nfs4_schedule_session_recovery(clp->cl_session, errorcode); 417 goto wait_on_recovery; 418 #endif /* defined(CONFIG_NFS_V4_1) */ 419 case -NFS4ERR_FILE_OPEN: 420 if (exception->timeout > HZ) { 421 /* We have retried a decent amount, time to 422 * fail 423 */ 424 ret = -EBUSY; 425 break; 426 } 427 case -NFS4ERR_DELAY: 428 nfs_inc_server_stats(server, NFSIOS_DELAY); 429 case -NFS4ERR_GRACE: 430 exception->delay = 1; 431 return 0; 432 433 case -NFS4ERR_RETRY_UNCACHED_REP: 434 case -NFS4ERR_OLD_STATEID: 435 exception->retry = 1; 436 break; 437 case -NFS4ERR_BADOWNER: 438 /* The following works around a Linux server bug! */ 439 case -NFS4ERR_BADNAME: 440 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 441 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 442 exception->retry = 1; 443 printk(KERN_WARNING "NFS: v4 server %s " 444 "does not accept raw " 445 "uid/gids. " 446 "Reenabling the idmapper.\n", 447 server->nfs_client->cl_hostname); 448 } 449 } 450 /* We failed to handle the error */ 451 return nfs4_map_errors(ret); 452 wait_on_recovery: 453 exception->recovering = 1; 454 return 0; 455 } 456 457 /* This is the error handling routine for processes that are allowed 458 * to sleep. 459 */ 460 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 461 { 462 struct nfs_client *clp = server->nfs_client; 463 int ret; 464 465 ret = nfs4_do_handle_exception(server, errorcode, exception); 466 if (exception->delay) { 467 ret = nfs4_delay(server->client, &exception->timeout); 468 goto out_retry; 469 } 470 if (exception->recovering) { 471 ret = nfs4_wait_clnt_recover(clp); 472 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 473 return -EIO; 474 goto out_retry; 475 } 476 return ret; 477 out_retry: 478 if (ret == 0) 479 exception->retry = 1; 480 return ret; 481 } 482 483 static int 484 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, 485 int errorcode, struct nfs4_exception *exception) 486 { 487 struct nfs_client *clp = server->nfs_client; 488 int ret; 489 490 ret = nfs4_do_handle_exception(server, errorcode, exception); 491 if (exception->delay) { 492 rpc_delay(task, nfs4_update_delay(&exception->timeout)); 493 goto out_retry; 494 } 495 if (exception->recovering) { 496 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 497 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 498 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 499 goto out_retry; 500 } 501 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 502 ret = -EIO; 503 return ret; 504 out_retry: 505 if (ret == 0) 506 exception->retry = 1; 507 return ret; 508 } 509 510 static int 511 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 512 struct nfs4_state *state, long *timeout) 513 { 514 struct nfs4_exception exception = { 515 .state = state, 516 }; 517 518 if (task->tk_status >= 0) 519 return 0; 520 if (timeout) 521 exception.timeout = *timeout; 522 task->tk_status = nfs4_async_handle_exception(task, server, 523 task->tk_status, 524 &exception); 525 if (exception.delay && timeout) 526 *timeout = exception.timeout; 527 if (exception.retry) 528 return -EAGAIN; 529 return 0; 530 } 531 532 /* 533 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 534 * or 'false' otherwise. 535 */ 536 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 537 { 538 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 539 540 if (flavor == RPC_AUTH_GSS_KRB5I || 541 flavor == RPC_AUTH_GSS_KRB5P) 542 return true; 543 544 return false; 545 } 546 547 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 548 { 549 spin_lock(&clp->cl_lock); 550 if (time_before(clp->cl_last_renewal,timestamp)) 551 clp->cl_last_renewal = timestamp; 552 spin_unlock(&clp->cl_lock); 553 } 554 555 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 556 { 557 struct nfs_client *clp = server->nfs_client; 558 559 if (!nfs4_has_session(clp)) 560 do_renew_lease(clp, timestamp); 561 } 562 563 struct nfs4_call_sync_data { 564 const struct nfs_server *seq_server; 565 struct nfs4_sequence_args *seq_args; 566 struct nfs4_sequence_res *seq_res; 567 }; 568 569 void nfs4_init_sequence(struct nfs4_sequence_args *args, 570 struct nfs4_sequence_res *res, int cache_reply) 571 { 572 args->sa_slot = NULL; 573 args->sa_cache_this = cache_reply; 574 args->sa_privileged = 0; 575 576 res->sr_slot = NULL; 577 } 578 579 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) 580 { 581 args->sa_privileged = 1; 582 } 583 584 int nfs40_setup_sequence(struct nfs4_slot_table *tbl, 585 struct nfs4_sequence_args *args, 586 struct nfs4_sequence_res *res, 587 struct rpc_task *task) 588 { 589 struct nfs4_slot *slot; 590 591 /* slot already allocated? */ 592 if (res->sr_slot != NULL) 593 goto out_start; 594 595 spin_lock(&tbl->slot_tbl_lock); 596 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 597 goto out_sleep; 598 599 slot = nfs4_alloc_slot(tbl); 600 if (IS_ERR(slot)) { 601 if (slot == ERR_PTR(-ENOMEM)) 602 task->tk_timeout = HZ >> 2; 603 goto out_sleep; 604 } 605 spin_unlock(&tbl->slot_tbl_lock); 606 607 args->sa_slot = slot; 608 res->sr_slot = slot; 609 610 out_start: 611 rpc_call_start(task); 612 return 0; 613 614 out_sleep: 615 if (args->sa_privileged) 616 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 617 NULL, RPC_PRIORITY_PRIVILEGED); 618 else 619 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 620 spin_unlock(&tbl->slot_tbl_lock); 621 return -EAGAIN; 622 } 623 EXPORT_SYMBOL_GPL(nfs40_setup_sequence); 624 625 static int nfs40_sequence_done(struct rpc_task *task, 626 struct nfs4_sequence_res *res) 627 { 628 struct nfs4_slot *slot = res->sr_slot; 629 struct nfs4_slot_table *tbl; 630 631 if (slot == NULL) 632 goto out; 633 634 tbl = slot->table; 635 spin_lock(&tbl->slot_tbl_lock); 636 if (!nfs41_wake_and_assign_slot(tbl, slot)) 637 nfs4_free_slot(tbl, slot); 638 spin_unlock(&tbl->slot_tbl_lock); 639 640 res->sr_slot = NULL; 641 out: 642 return 1; 643 } 644 645 #if defined(CONFIG_NFS_V4_1) 646 647 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 648 { 649 struct nfs4_session *session; 650 struct nfs4_slot_table *tbl; 651 struct nfs4_slot *slot = res->sr_slot; 652 bool send_new_highest_used_slotid = false; 653 654 tbl = slot->table; 655 session = tbl->session; 656 657 spin_lock(&tbl->slot_tbl_lock); 658 /* Be nice to the server: try to ensure that the last transmitted 659 * value for highest_user_slotid <= target_highest_slotid 660 */ 661 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 662 send_new_highest_used_slotid = true; 663 664 if (nfs41_wake_and_assign_slot(tbl, slot)) { 665 send_new_highest_used_slotid = false; 666 goto out_unlock; 667 } 668 nfs4_free_slot(tbl, slot); 669 670 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 671 send_new_highest_used_slotid = false; 672 out_unlock: 673 spin_unlock(&tbl->slot_tbl_lock); 674 res->sr_slot = NULL; 675 if (send_new_highest_used_slotid) 676 nfs41_notify_server(session->clp); 677 } 678 679 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 680 { 681 struct nfs4_session *session; 682 struct nfs4_slot *slot = res->sr_slot; 683 struct nfs_client *clp; 684 bool interrupted = false; 685 int ret = 1; 686 687 if (slot == NULL) 688 goto out_noaction; 689 /* don't increment the sequence number if the task wasn't sent */ 690 if (!RPC_WAS_SENT(task)) 691 goto out; 692 693 session = slot->table->session; 694 695 if (slot->interrupted) { 696 slot->interrupted = 0; 697 interrupted = true; 698 } 699 700 trace_nfs4_sequence_done(session, res); 701 /* Check the SEQUENCE operation status */ 702 switch (res->sr_status) { 703 case 0: 704 /* Update the slot's sequence and clientid lease timer */ 705 ++slot->seq_nr; 706 clp = session->clp; 707 do_renew_lease(clp, res->sr_timestamp); 708 /* Check sequence flags */ 709 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 710 nfs41_update_target_slotid(slot->table, slot, res); 711 break; 712 case 1: 713 /* 714 * sr_status remains 1 if an RPC level error occurred. 715 * The server may or may not have processed the sequence 716 * operation.. 717 * Mark the slot as having hosted an interrupted RPC call. 718 */ 719 slot->interrupted = 1; 720 goto out; 721 case -NFS4ERR_DELAY: 722 /* The server detected a resend of the RPC call and 723 * returned NFS4ERR_DELAY as per Section 2.10.6.2 724 * of RFC5661. 725 */ 726 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 727 __func__, 728 slot->slot_nr, 729 slot->seq_nr); 730 goto out_retry; 731 case -NFS4ERR_BADSLOT: 732 /* 733 * The slot id we used was probably retired. Try again 734 * using a different slot id. 735 */ 736 goto retry_nowait; 737 case -NFS4ERR_SEQ_MISORDERED: 738 /* 739 * Was the last operation on this sequence interrupted? 740 * If so, retry after bumping the sequence number. 741 */ 742 if (interrupted) { 743 ++slot->seq_nr; 744 goto retry_nowait; 745 } 746 /* 747 * Could this slot have been previously retired? 748 * If so, then the server may be expecting seq_nr = 1! 749 */ 750 if (slot->seq_nr != 1) { 751 slot->seq_nr = 1; 752 goto retry_nowait; 753 } 754 break; 755 case -NFS4ERR_SEQ_FALSE_RETRY: 756 ++slot->seq_nr; 757 goto retry_nowait; 758 default: 759 /* Just update the slot sequence no. */ 760 ++slot->seq_nr; 761 } 762 out: 763 /* The session may be reset by one of the error handlers. */ 764 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 765 nfs41_sequence_free_slot(res); 766 out_noaction: 767 return ret; 768 retry_nowait: 769 if (rpc_restart_call_prepare(task)) { 770 task->tk_status = 0; 771 ret = 0; 772 } 773 goto out; 774 out_retry: 775 if (!rpc_restart_call(task)) 776 goto out; 777 rpc_delay(task, NFS4_POLL_RETRY_MAX); 778 return 0; 779 } 780 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 781 782 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 783 { 784 if (res->sr_slot == NULL) 785 return 1; 786 if (!res->sr_slot->table->session) 787 return nfs40_sequence_done(task, res); 788 return nfs41_sequence_done(task, res); 789 } 790 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 791 792 int nfs41_setup_sequence(struct nfs4_session *session, 793 struct nfs4_sequence_args *args, 794 struct nfs4_sequence_res *res, 795 struct rpc_task *task) 796 { 797 struct nfs4_slot *slot; 798 struct nfs4_slot_table *tbl; 799 800 dprintk("--> %s\n", __func__); 801 /* slot already allocated? */ 802 if (res->sr_slot != NULL) 803 goto out_success; 804 805 tbl = &session->fc_slot_table; 806 807 task->tk_timeout = 0; 808 809 spin_lock(&tbl->slot_tbl_lock); 810 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) && 811 !args->sa_privileged) { 812 /* The state manager will wait until the slot table is empty */ 813 dprintk("%s session is draining\n", __func__); 814 goto out_sleep; 815 } 816 817 slot = nfs4_alloc_slot(tbl); 818 if (IS_ERR(slot)) { 819 /* If out of memory, try again in 1/4 second */ 820 if (slot == ERR_PTR(-ENOMEM)) 821 task->tk_timeout = HZ >> 2; 822 dprintk("<-- %s: no free slots\n", __func__); 823 goto out_sleep; 824 } 825 spin_unlock(&tbl->slot_tbl_lock); 826 827 args->sa_slot = slot; 828 829 dprintk("<-- %s slotid=%u seqid=%u\n", __func__, 830 slot->slot_nr, slot->seq_nr); 831 832 res->sr_slot = slot; 833 res->sr_timestamp = jiffies; 834 res->sr_status_flags = 0; 835 /* 836 * sr_status is only set in decode_sequence, and so will remain 837 * set to 1 if an rpc level failure occurs. 838 */ 839 res->sr_status = 1; 840 trace_nfs4_setup_sequence(session, args); 841 out_success: 842 rpc_call_start(task); 843 return 0; 844 out_sleep: 845 /* Privileged tasks are queued with top priority */ 846 if (args->sa_privileged) 847 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 848 NULL, RPC_PRIORITY_PRIVILEGED); 849 else 850 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 851 spin_unlock(&tbl->slot_tbl_lock); 852 return -EAGAIN; 853 } 854 EXPORT_SYMBOL_GPL(nfs41_setup_sequence); 855 856 static int nfs4_setup_sequence(const struct nfs_server *server, 857 struct nfs4_sequence_args *args, 858 struct nfs4_sequence_res *res, 859 struct rpc_task *task) 860 { 861 struct nfs4_session *session = nfs4_get_session(server); 862 int ret = 0; 863 864 if (!session) 865 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 866 args, res, task); 867 868 dprintk("--> %s clp %p session %p sr_slot %u\n", 869 __func__, session->clp, session, res->sr_slot ? 870 res->sr_slot->slot_nr : NFS4_NO_SLOT); 871 872 ret = nfs41_setup_sequence(session, args, res, task); 873 874 dprintk("<-- %s status=%d\n", __func__, ret); 875 return ret; 876 } 877 878 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 879 { 880 struct nfs4_call_sync_data *data = calldata; 881 struct nfs4_session *session = nfs4_get_session(data->seq_server); 882 883 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 884 885 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task); 886 } 887 888 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 889 { 890 struct nfs4_call_sync_data *data = calldata; 891 892 nfs41_sequence_done(task, data->seq_res); 893 } 894 895 static const struct rpc_call_ops nfs41_call_sync_ops = { 896 .rpc_call_prepare = nfs41_call_sync_prepare, 897 .rpc_call_done = nfs41_call_sync_done, 898 }; 899 900 #else /* !CONFIG_NFS_V4_1 */ 901 902 static int nfs4_setup_sequence(const struct nfs_server *server, 903 struct nfs4_sequence_args *args, 904 struct nfs4_sequence_res *res, 905 struct rpc_task *task) 906 { 907 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 908 args, res, task); 909 } 910 911 int nfs4_sequence_done(struct rpc_task *task, 912 struct nfs4_sequence_res *res) 913 { 914 return nfs40_sequence_done(task, res); 915 } 916 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 917 918 #endif /* !CONFIG_NFS_V4_1 */ 919 920 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 921 { 922 struct nfs4_call_sync_data *data = calldata; 923 nfs4_setup_sequence(data->seq_server, 924 data->seq_args, data->seq_res, task); 925 } 926 927 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 928 { 929 struct nfs4_call_sync_data *data = calldata; 930 nfs4_sequence_done(task, data->seq_res); 931 } 932 933 static const struct rpc_call_ops nfs40_call_sync_ops = { 934 .rpc_call_prepare = nfs40_call_sync_prepare, 935 .rpc_call_done = nfs40_call_sync_done, 936 }; 937 938 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 939 struct nfs_server *server, 940 struct rpc_message *msg, 941 struct nfs4_sequence_args *args, 942 struct nfs4_sequence_res *res) 943 { 944 int ret; 945 struct rpc_task *task; 946 struct nfs_client *clp = server->nfs_client; 947 struct nfs4_call_sync_data data = { 948 .seq_server = server, 949 .seq_args = args, 950 .seq_res = res, 951 }; 952 struct rpc_task_setup task_setup = { 953 .rpc_client = clnt, 954 .rpc_message = msg, 955 .callback_ops = clp->cl_mvops->call_sync_ops, 956 .callback_data = &data 957 }; 958 959 task = rpc_run_task(&task_setup); 960 if (IS_ERR(task)) 961 ret = PTR_ERR(task); 962 else { 963 ret = task->tk_status; 964 rpc_put_task(task); 965 } 966 return ret; 967 } 968 969 int nfs4_call_sync(struct rpc_clnt *clnt, 970 struct nfs_server *server, 971 struct rpc_message *msg, 972 struct nfs4_sequence_args *args, 973 struct nfs4_sequence_res *res, 974 int cache_reply) 975 { 976 nfs4_init_sequence(args, res, cache_reply); 977 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 978 } 979 980 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 981 { 982 struct nfs_inode *nfsi = NFS_I(dir); 983 984 spin_lock(&dir->i_lock); 985 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 986 if (!cinfo->atomic || cinfo->before != dir->i_version) 987 nfs_force_lookup_revalidate(dir); 988 dir->i_version = cinfo->after; 989 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 990 nfs_fscache_invalidate(dir); 991 spin_unlock(&dir->i_lock); 992 } 993 994 struct nfs4_opendata { 995 struct kref kref; 996 struct nfs_openargs o_arg; 997 struct nfs_openres o_res; 998 struct nfs_open_confirmargs c_arg; 999 struct nfs_open_confirmres c_res; 1000 struct nfs4_string owner_name; 1001 struct nfs4_string group_name; 1002 struct nfs4_label *a_label; 1003 struct nfs_fattr f_attr; 1004 struct nfs4_label *f_label; 1005 struct dentry *dir; 1006 struct dentry *dentry; 1007 struct nfs4_state_owner *owner; 1008 struct nfs4_state *state; 1009 struct iattr attrs; 1010 unsigned long timestamp; 1011 unsigned int rpc_done : 1; 1012 unsigned int file_created : 1; 1013 unsigned int is_recover : 1; 1014 int rpc_status; 1015 int cancelled; 1016 }; 1017 1018 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 1019 int err, struct nfs4_exception *exception) 1020 { 1021 if (err != -EINVAL) 1022 return false; 1023 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1024 return false; 1025 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 1026 exception->retry = 1; 1027 return true; 1028 } 1029 1030 static u32 1031 nfs4_map_atomic_open_share(struct nfs_server *server, 1032 fmode_t fmode, int openflags) 1033 { 1034 u32 res = 0; 1035 1036 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 1037 case FMODE_READ: 1038 res = NFS4_SHARE_ACCESS_READ; 1039 break; 1040 case FMODE_WRITE: 1041 res = NFS4_SHARE_ACCESS_WRITE; 1042 break; 1043 case FMODE_READ|FMODE_WRITE: 1044 res = NFS4_SHARE_ACCESS_BOTH; 1045 } 1046 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1047 goto out; 1048 /* Want no delegation if we're using O_DIRECT */ 1049 if (openflags & O_DIRECT) 1050 res |= NFS4_SHARE_WANT_NO_DELEG; 1051 out: 1052 return res; 1053 } 1054 1055 static enum open_claim_type4 1056 nfs4_map_atomic_open_claim(struct nfs_server *server, 1057 enum open_claim_type4 claim) 1058 { 1059 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 1060 return claim; 1061 switch (claim) { 1062 default: 1063 return claim; 1064 case NFS4_OPEN_CLAIM_FH: 1065 return NFS4_OPEN_CLAIM_NULL; 1066 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1067 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 1068 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1069 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 1070 } 1071 } 1072 1073 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 1074 { 1075 p->o_res.f_attr = &p->f_attr; 1076 p->o_res.f_label = p->f_label; 1077 p->o_res.seqid = p->o_arg.seqid; 1078 p->c_res.seqid = p->c_arg.seqid; 1079 p->o_res.server = p->o_arg.server; 1080 p->o_res.access_request = p->o_arg.access; 1081 nfs_fattr_init(&p->f_attr); 1082 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 1083 } 1084 1085 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1086 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1087 const struct iattr *attrs, 1088 struct nfs4_label *label, 1089 enum open_claim_type4 claim, 1090 gfp_t gfp_mask) 1091 { 1092 struct dentry *parent = dget_parent(dentry); 1093 struct inode *dir = d_inode(parent); 1094 struct nfs_server *server = NFS_SERVER(dir); 1095 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1096 struct nfs4_opendata *p; 1097 1098 p = kzalloc(sizeof(*p), gfp_mask); 1099 if (p == NULL) 1100 goto err; 1101 1102 p->f_label = nfs4_label_alloc(server, gfp_mask); 1103 if (IS_ERR(p->f_label)) 1104 goto err_free_p; 1105 1106 p->a_label = nfs4_label_alloc(server, gfp_mask); 1107 if (IS_ERR(p->a_label)) 1108 goto err_free_f; 1109 1110 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1111 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1112 if (IS_ERR(p->o_arg.seqid)) 1113 goto err_free_label; 1114 nfs_sb_active(dentry->d_sb); 1115 p->dentry = dget(dentry); 1116 p->dir = parent; 1117 p->owner = sp; 1118 atomic_inc(&sp->so_count); 1119 p->o_arg.open_flags = flags; 1120 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1121 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1122 fmode, flags); 1123 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS 1124 * will return permission denied for all bits until close */ 1125 if (!(flags & O_EXCL)) { 1126 /* ask server to check for all possible rights as results 1127 * are cached */ 1128 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1129 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE; 1130 } 1131 p->o_arg.clientid = server->nfs_client->cl_clientid; 1132 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1133 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1134 p->o_arg.name = &dentry->d_name; 1135 p->o_arg.server = server; 1136 p->o_arg.bitmask = nfs4_bitmask(server, label); 1137 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1138 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1139 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1140 switch (p->o_arg.claim) { 1141 case NFS4_OPEN_CLAIM_NULL: 1142 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1143 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1144 p->o_arg.fh = NFS_FH(dir); 1145 break; 1146 case NFS4_OPEN_CLAIM_PREVIOUS: 1147 case NFS4_OPEN_CLAIM_FH: 1148 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1149 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1150 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1151 } 1152 if (attrs != NULL && attrs->ia_valid != 0) { 1153 __u32 verf[2]; 1154 1155 p->o_arg.u.attrs = &p->attrs; 1156 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 1157 1158 verf[0] = jiffies; 1159 verf[1] = current->pid; 1160 memcpy(p->o_arg.u.verifier.data, verf, 1161 sizeof(p->o_arg.u.verifier.data)); 1162 } 1163 p->c_arg.fh = &p->o_res.fh; 1164 p->c_arg.stateid = &p->o_res.stateid; 1165 p->c_arg.seqid = p->o_arg.seqid; 1166 nfs4_init_opendata_res(p); 1167 kref_init(&p->kref); 1168 return p; 1169 1170 err_free_label: 1171 nfs4_label_free(p->a_label); 1172 err_free_f: 1173 nfs4_label_free(p->f_label); 1174 err_free_p: 1175 kfree(p); 1176 err: 1177 dput(parent); 1178 return NULL; 1179 } 1180 1181 static void nfs4_opendata_free(struct kref *kref) 1182 { 1183 struct nfs4_opendata *p = container_of(kref, 1184 struct nfs4_opendata, kref); 1185 struct super_block *sb = p->dentry->d_sb; 1186 1187 nfs_free_seqid(p->o_arg.seqid); 1188 if (p->state != NULL) 1189 nfs4_put_open_state(p->state); 1190 nfs4_put_state_owner(p->owner); 1191 1192 nfs4_label_free(p->a_label); 1193 nfs4_label_free(p->f_label); 1194 1195 dput(p->dir); 1196 dput(p->dentry); 1197 nfs_sb_deactive(sb); 1198 nfs_fattr_free_names(&p->f_attr); 1199 kfree(p->f_attr.mdsthreshold); 1200 kfree(p); 1201 } 1202 1203 static void nfs4_opendata_put(struct nfs4_opendata *p) 1204 { 1205 if (p != NULL) 1206 kref_put(&p->kref, nfs4_opendata_free); 1207 } 1208 1209 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) 1210 { 1211 int ret; 1212 1213 ret = rpc_wait_for_completion_task(task); 1214 return ret; 1215 } 1216 1217 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, 1218 fmode_t fmode) 1219 { 1220 switch(fmode & (FMODE_READ|FMODE_WRITE)) { 1221 case FMODE_READ|FMODE_WRITE: 1222 return state->n_rdwr != 0; 1223 case FMODE_WRITE: 1224 return state->n_wronly != 0; 1225 case FMODE_READ: 1226 return state->n_rdonly != 0; 1227 } 1228 WARN_ON_ONCE(1); 1229 return false; 1230 } 1231 1232 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 1233 { 1234 int ret = 0; 1235 1236 if (open_mode & (O_EXCL|O_TRUNC)) 1237 goto out; 1238 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1239 case FMODE_READ: 1240 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1241 && state->n_rdonly != 0; 1242 break; 1243 case FMODE_WRITE: 1244 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1245 && state->n_wronly != 0; 1246 break; 1247 case FMODE_READ|FMODE_WRITE: 1248 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1249 && state->n_rdwr != 0; 1250 } 1251 out: 1252 return ret; 1253 } 1254 1255 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1256 enum open_claim_type4 claim) 1257 { 1258 if (delegation == NULL) 1259 return 0; 1260 if ((delegation->type & fmode) != fmode) 1261 return 0; 1262 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 1263 return 0; 1264 switch (claim) { 1265 case NFS4_OPEN_CLAIM_NULL: 1266 case NFS4_OPEN_CLAIM_FH: 1267 break; 1268 case NFS4_OPEN_CLAIM_PREVIOUS: 1269 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1270 break; 1271 default: 1272 return 0; 1273 } 1274 nfs_mark_delegation_referenced(delegation); 1275 return 1; 1276 } 1277 1278 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1279 { 1280 switch (fmode) { 1281 case FMODE_WRITE: 1282 state->n_wronly++; 1283 break; 1284 case FMODE_READ: 1285 state->n_rdonly++; 1286 break; 1287 case FMODE_READ|FMODE_WRITE: 1288 state->n_rdwr++; 1289 } 1290 nfs4_state_set_mode_locked(state, state->state | fmode); 1291 } 1292 1293 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1294 { 1295 struct nfs_client *clp = state->owner->so_server->nfs_client; 1296 bool need_recover = false; 1297 1298 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1299 need_recover = true; 1300 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1301 need_recover = true; 1302 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1303 need_recover = true; 1304 if (need_recover) 1305 nfs4_state_mark_reclaim_nograce(clp, state); 1306 } 1307 1308 static bool nfs_need_update_open_stateid(struct nfs4_state *state, 1309 nfs4_stateid *stateid) 1310 { 1311 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0) 1312 return true; 1313 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1314 nfs_test_and_clear_all_open_stateid(state); 1315 return true; 1316 } 1317 if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) 1318 return true; 1319 return false; 1320 } 1321 1322 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1323 { 1324 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1325 return; 1326 if (state->n_wronly) 1327 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1328 if (state->n_rdonly) 1329 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1330 if (state->n_rdwr) 1331 set_bit(NFS_O_RDWR_STATE, &state->flags); 1332 set_bit(NFS_OPEN_STATE, &state->flags); 1333 } 1334 1335 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1336 nfs4_stateid *arg_stateid, 1337 nfs4_stateid *stateid, fmode_t fmode) 1338 { 1339 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1340 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1341 case FMODE_WRITE: 1342 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1343 break; 1344 case FMODE_READ: 1345 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1346 break; 1347 case 0: 1348 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1349 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1350 clear_bit(NFS_OPEN_STATE, &state->flags); 1351 } 1352 if (stateid == NULL) 1353 return; 1354 /* Handle races with OPEN */ 1355 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || 1356 (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1357 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) { 1358 nfs_resync_open_stateid_locked(state); 1359 return; 1360 } 1361 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1362 nfs4_stateid_copy(&state->stateid, stateid); 1363 nfs4_stateid_copy(&state->open_stateid, stateid); 1364 } 1365 1366 static void nfs_clear_open_stateid(struct nfs4_state *state, 1367 nfs4_stateid *arg_stateid, 1368 nfs4_stateid *stateid, fmode_t fmode) 1369 { 1370 write_seqlock(&state->seqlock); 1371 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); 1372 write_sequnlock(&state->seqlock); 1373 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1374 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1375 } 1376 1377 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 1378 { 1379 switch (fmode) { 1380 case FMODE_READ: 1381 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1382 break; 1383 case FMODE_WRITE: 1384 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1385 break; 1386 case FMODE_READ|FMODE_WRITE: 1387 set_bit(NFS_O_RDWR_STATE, &state->flags); 1388 } 1389 if (!nfs_need_update_open_stateid(state, stateid)) 1390 return; 1391 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1392 nfs4_stateid_copy(&state->stateid, stateid); 1393 nfs4_stateid_copy(&state->open_stateid, stateid); 1394 } 1395 1396 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) 1397 { 1398 /* 1399 * Protect the call to nfs4_state_set_mode_locked and 1400 * serialise the stateid update 1401 */ 1402 spin_lock(&state->owner->so_lock); 1403 write_seqlock(&state->seqlock); 1404 if (deleg_stateid != NULL) { 1405 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1406 set_bit(NFS_DELEGATED_STATE, &state->flags); 1407 } 1408 if (open_stateid != NULL) 1409 nfs_set_open_stateid_locked(state, open_stateid, fmode); 1410 write_sequnlock(&state->seqlock); 1411 update_open_stateflags(state, fmode); 1412 spin_unlock(&state->owner->so_lock); 1413 } 1414 1415 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) 1416 { 1417 struct nfs_inode *nfsi = NFS_I(state->inode); 1418 struct nfs_delegation *deleg_cur; 1419 int ret = 0; 1420 1421 fmode &= (FMODE_READ|FMODE_WRITE); 1422 1423 rcu_read_lock(); 1424 deleg_cur = rcu_dereference(nfsi->delegation); 1425 if (deleg_cur == NULL) 1426 goto no_delegation; 1427 1428 spin_lock(&deleg_cur->lock); 1429 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1430 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1431 (deleg_cur->type & fmode) != fmode) 1432 goto no_delegation_unlock; 1433 1434 if (delegation == NULL) 1435 delegation = &deleg_cur->stateid; 1436 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) 1437 goto no_delegation_unlock; 1438 1439 nfs_mark_delegation_referenced(deleg_cur); 1440 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); 1441 ret = 1; 1442 no_delegation_unlock: 1443 spin_unlock(&deleg_cur->lock); 1444 no_delegation: 1445 rcu_read_unlock(); 1446 1447 if (!ret && open_stateid != NULL) { 1448 __update_open_stateid(state, open_stateid, NULL, fmode); 1449 ret = 1; 1450 } 1451 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1452 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1453 1454 return ret; 1455 } 1456 1457 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1458 const nfs4_stateid *stateid) 1459 { 1460 struct nfs4_state *state = lsp->ls_state; 1461 bool ret = false; 1462 1463 spin_lock(&state->state_lock); 1464 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1465 goto out_noupdate; 1466 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1467 goto out_noupdate; 1468 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1469 ret = true; 1470 out_noupdate: 1471 spin_unlock(&state->state_lock); 1472 return ret; 1473 } 1474 1475 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1476 { 1477 struct nfs_delegation *delegation; 1478 1479 rcu_read_lock(); 1480 delegation = rcu_dereference(NFS_I(inode)->delegation); 1481 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1482 rcu_read_unlock(); 1483 return; 1484 } 1485 rcu_read_unlock(); 1486 nfs4_inode_return_delegation(inode); 1487 } 1488 1489 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1490 { 1491 struct nfs4_state *state = opendata->state; 1492 struct nfs_inode *nfsi = NFS_I(state->inode); 1493 struct nfs_delegation *delegation; 1494 int open_mode = opendata->o_arg.open_flags; 1495 fmode_t fmode = opendata->o_arg.fmode; 1496 enum open_claim_type4 claim = opendata->o_arg.claim; 1497 nfs4_stateid stateid; 1498 int ret = -EAGAIN; 1499 1500 for (;;) { 1501 spin_lock(&state->owner->so_lock); 1502 if (can_open_cached(state, fmode, open_mode)) { 1503 update_open_stateflags(state, fmode); 1504 spin_unlock(&state->owner->so_lock); 1505 goto out_return_state; 1506 } 1507 spin_unlock(&state->owner->so_lock); 1508 rcu_read_lock(); 1509 delegation = rcu_dereference(nfsi->delegation); 1510 if (!can_open_delegated(delegation, fmode, claim)) { 1511 rcu_read_unlock(); 1512 break; 1513 } 1514 /* Save the delegation */ 1515 nfs4_stateid_copy(&stateid, &delegation->stateid); 1516 rcu_read_unlock(); 1517 nfs_release_seqid(opendata->o_arg.seqid); 1518 if (!opendata->is_recover) { 1519 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1520 if (ret != 0) 1521 goto out; 1522 } 1523 ret = -EAGAIN; 1524 1525 /* Try to update the stateid using the delegation */ 1526 if (update_open_stateid(state, NULL, &stateid, fmode)) 1527 goto out_return_state; 1528 } 1529 out: 1530 return ERR_PTR(ret); 1531 out_return_state: 1532 atomic_inc(&state->count); 1533 return state; 1534 } 1535 1536 static void 1537 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) 1538 { 1539 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client; 1540 struct nfs_delegation *delegation; 1541 int delegation_flags = 0; 1542 1543 rcu_read_lock(); 1544 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1545 if (delegation) 1546 delegation_flags = delegation->flags; 1547 rcu_read_unlock(); 1548 switch (data->o_arg.claim) { 1549 default: 1550 break; 1551 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1552 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1553 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1554 "returning a delegation for " 1555 "OPEN(CLAIM_DELEGATE_CUR)\n", 1556 clp->cl_hostname); 1557 return; 1558 } 1559 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1560 nfs_inode_set_delegation(state->inode, 1561 data->owner->so_cred, 1562 &data->o_res); 1563 else 1564 nfs_inode_reclaim_delegation(state->inode, 1565 data->owner->so_cred, 1566 &data->o_res); 1567 } 1568 1569 /* 1570 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 1571 * and update the nfs4_state. 1572 */ 1573 static struct nfs4_state * 1574 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 1575 { 1576 struct inode *inode = data->state->inode; 1577 struct nfs4_state *state = data->state; 1578 int ret; 1579 1580 if (!data->rpc_done) { 1581 if (data->rpc_status) { 1582 ret = data->rpc_status; 1583 goto err; 1584 } 1585 /* cached opens have already been processed */ 1586 goto update; 1587 } 1588 1589 ret = nfs_refresh_inode(inode, &data->f_attr); 1590 if (ret) 1591 goto err; 1592 1593 if (data->o_res.delegation_type != 0) 1594 nfs4_opendata_check_deleg(data, state); 1595 update: 1596 update_open_stateid(state, &data->o_res.stateid, NULL, 1597 data->o_arg.fmode); 1598 atomic_inc(&state->count); 1599 1600 return state; 1601 err: 1602 return ERR_PTR(ret); 1603 1604 } 1605 1606 static struct nfs4_state * 1607 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1608 { 1609 struct inode *inode; 1610 struct nfs4_state *state = NULL; 1611 int ret; 1612 1613 if (!data->rpc_done) { 1614 state = nfs4_try_open_cached(data); 1615 trace_nfs4_cached_open(data->state); 1616 goto out; 1617 } 1618 1619 ret = -EAGAIN; 1620 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1621 goto err; 1622 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label); 1623 ret = PTR_ERR(inode); 1624 if (IS_ERR(inode)) 1625 goto err; 1626 ret = -ENOMEM; 1627 state = nfs4_get_open_state(inode, data->owner); 1628 if (state == NULL) 1629 goto err_put_inode; 1630 if (data->o_res.delegation_type != 0) 1631 nfs4_opendata_check_deleg(data, state); 1632 update_open_stateid(state, &data->o_res.stateid, NULL, 1633 data->o_arg.fmode); 1634 iput(inode); 1635 out: 1636 nfs_release_seqid(data->o_arg.seqid); 1637 return state; 1638 err_put_inode: 1639 iput(inode); 1640 err: 1641 return ERR_PTR(ret); 1642 } 1643 1644 static struct nfs4_state * 1645 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1646 { 1647 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 1648 return _nfs4_opendata_reclaim_to_nfs4_state(data); 1649 return _nfs4_opendata_to_nfs4_state(data); 1650 } 1651 1652 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) 1653 { 1654 struct nfs_inode *nfsi = NFS_I(state->inode); 1655 struct nfs_open_context *ctx; 1656 1657 spin_lock(&state->inode->i_lock); 1658 list_for_each_entry(ctx, &nfsi->open_files, list) { 1659 if (ctx->state != state) 1660 continue; 1661 get_nfs_open_context(ctx); 1662 spin_unlock(&state->inode->i_lock); 1663 return ctx; 1664 } 1665 spin_unlock(&state->inode->i_lock); 1666 return ERR_PTR(-ENOENT); 1667 } 1668 1669 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 1670 struct nfs4_state *state, enum open_claim_type4 claim) 1671 { 1672 struct nfs4_opendata *opendata; 1673 1674 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 1675 NULL, NULL, claim, GFP_NOFS); 1676 if (opendata == NULL) 1677 return ERR_PTR(-ENOMEM); 1678 opendata->state = state; 1679 atomic_inc(&state->count); 1680 return opendata; 1681 } 1682 1683 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, 1684 fmode_t fmode) 1685 { 1686 struct nfs4_state *newstate; 1687 int ret; 1688 1689 if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) 1690 return 0; 1691 opendata->o_arg.open_flags = 0; 1692 opendata->o_arg.fmode = fmode; 1693 opendata->o_arg.share_access = nfs4_map_atomic_open_share( 1694 NFS_SB(opendata->dentry->d_sb), 1695 fmode, 0); 1696 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 1697 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 1698 nfs4_init_opendata_res(opendata); 1699 ret = _nfs4_recover_proc_open(opendata); 1700 if (ret != 0) 1701 return ret; 1702 newstate = nfs4_opendata_to_nfs4_state(opendata); 1703 if (IS_ERR(newstate)) 1704 return PTR_ERR(newstate); 1705 if (newstate != opendata->state) 1706 ret = -ESTALE; 1707 nfs4_close_state(newstate, fmode); 1708 return ret; 1709 } 1710 1711 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1712 { 1713 int ret; 1714 1715 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ 1716 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1717 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1718 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1719 /* memory barrier prior to reading state->n_* */ 1720 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1721 clear_bit(NFS_OPEN_STATE, &state->flags); 1722 smp_rmb(); 1723 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 1724 if (ret != 0) 1725 return ret; 1726 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); 1727 if (ret != 0) 1728 return ret; 1729 ret = nfs4_open_recover_helper(opendata, FMODE_READ); 1730 if (ret != 0) 1731 return ret; 1732 /* 1733 * We may have performed cached opens for all three recoveries. 1734 * Check if we need to update the current stateid. 1735 */ 1736 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 1737 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 1738 write_seqlock(&state->seqlock); 1739 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1740 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1741 write_sequnlock(&state->seqlock); 1742 } 1743 return 0; 1744 } 1745 1746 /* 1747 * OPEN_RECLAIM: 1748 * reclaim state on the server after a reboot. 1749 */ 1750 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1751 { 1752 struct nfs_delegation *delegation; 1753 struct nfs4_opendata *opendata; 1754 fmode_t delegation_type = 0; 1755 int status; 1756 1757 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1758 NFS4_OPEN_CLAIM_PREVIOUS); 1759 if (IS_ERR(opendata)) 1760 return PTR_ERR(opendata); 1761 rcu_read_lock(); 1762 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1763 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) 1764 delegation_type = delegation->type; 1765 rcu_read_unlock(); 1766 opendata->o_arg.u.delegation_type = delegation_type; 1767 status = nfs4_open_recover(opendata, state); 1768 nfs4_opendata_put(opendata); 1769 return status; 1770 } 1771 1772 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1773 { 1774 struct nfs_server *server = NFS_SERVER(state->inode); 1775 struct nfs4_exception exception = { }; 1776 int err; 1777 do { 1778 err = _nfs4_do_open_reclaim(ctx, state); 1779 trace_nfs4_open_reclaim(ctx, 0, err); 1780 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 1781 continue; 1782 if (err != -NFS4ERR_DELAY) 1783 break; 1784 nfs4_handle_exception(server, err, &exception); 1785 } while (exception.retry); 1786 return err; 1787 } 1788 1789 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 1790 { 1791 struct nfs_open_context *ctx; 1792 int ret; 1793 1794 ctx = nfs4_state_find_open_context(state); 1795 if (IS_ERR(ctx)) 1796 return -EAGAIN; 1797 ret = nfs4_do_open_reclaim(ctx, state); 1798 put_nfs_open_context(ctx); 1799 return ret; 1800 } 1801 1802 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err) 1803 { 1804 switch (err) { 1805 default: 1806 printk(KERN_ERR "NFS: %s: unhandled error " 1807 "%d.\n", __func__, err); 1808 case 0: 1809 case -ENOENT: 1810 case -EAGAIN: 1811 case -ESTALE: 1812 break; 1813 case -NFS4ERR_BADSESSION: 1814 case -NFS4ERR_BADSLOT: 1815 case -NFS4ERR_BAD_HIGH_SLOT: 1816 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1817 case -NFS4ERR_DEADSESSION: 1818 set_bit(NFS_DELEGATED_STATE, &state->flags); 1819 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 1820 return -EAGAIN; 1821 case -NFS4ERR_STALE_CLIENTID: 1822 case -NFS4ERR_STALE_STATEID: 1823 set_bit(NFS_DELEGATED_STATE, &state->flags); 1824 case -NFS4ERR_EXPIRED: 1825 /* Don't recall a delegation if it was lost */ 1826 nfs4_schedule_lease_recovery(server->nfs_client); 1827 return -EAGAIN; 1828 case -NFS4ERR_MOVED: 1829 nfs4_schedule_migration_recovery(server); 1830 return -EAGAIN; 1831 case -NFS4ERR_LEASE_MOVED: 1832 nfs4_schedule_lease_moved_recovery(server->nfs_client); 1833 return -EAGAIN; 1834 case -NFS4ERR_DELEG_REVOKED: 1835 case -NFS4ERR_ADMIN_REVOKED: 1836 case -NFS4ERR_BAD_STATEID: 1837 case -NFS4ERR_OPENMODE: 1838 nfs_inode_find_state_and_recover(state->inode, 1839 stateid); 1840 nfs4_schedule_stateid_recovery(server, state); 1841 return -EAGAIN; 1842 case -NFS4ERR_DELAY: 1843 case -NFS4ERR_GRACE: 1844 set_bit(NFS_DELEGATED_STATE, &state->flags); 1845 ssleep(1); 1846 return -EAGAIN; 1847 case -ENOMEM: 1848 case -NFS4ERR_DENIED: 1849 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1850 return 0; 1851 } 1852 return err; 1853 } 1854 1855 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 1856 struct nfs4_state *state, const nfs4_stateid *stateid, 1857 fmode_t type) 1858 { 1859 struct nfs_server *server = NFS_SERVER(state->inode); 1860 struct nfs4_opendata *opendata; 1861 int err = 0; 1862 1863 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1864 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 1865 if (IS_ERR(opendata)) 1866 return PTR_ERR(opendata); 1867 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1868 write_seqlock(&state->seqlock); 1869 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1870 write_sequnlock(&state->seqlock); 1871 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1872 switch (type & (FMODE_READ|FMODE_WRITE)) { 1873 case FMODE_READ|FMODE_WRITE: 1874 case FMODE_WRITE: 1875 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 1876 if (err) 1877 break; 1878 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 1879 if (err) 1880 break; 1881 case FMODE_READ: 1882 err = nfs4_open_recover_helper(opendata, FMODE_READ); 1883 } 1884 nfs4_opendata_put(opendata); 1885 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 1886 } 1887 1888 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 1889 { 1890 struct nfs4_opendata *data = calldata; 1891 1892 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl, 1893 &data->c_arg.seq_args, &data->c_res.seq_res, task); 1894 } 1895 1896 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 1897 { 1898 struct nfs4_opendata *data = calldata; 1899 1900 nfs40_sequence_done(task, &data->c_res.seq_res); 1901 1902 data->rpc_status = task->tk_status; 1903 if (data->rpc_status == 0) { 1904 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 1905 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1906 renew_lease(data->o_res.server, data->timestamp); 1907 data->rpc_done = 1; 1908 } 1909 } 1910 1911 static void nfs4_open_confirm_release(void *calldata) 1912 { 1913 struct nfs4_opendata *data = calldata; 1914 struct nfs4_state *state = NULL; 1915 1916 /* If this request hasn't been cancelled, do nothing */ 1917 if (data->cancelled == 0) 1918 goto out_free; 1919 /* In case of error, no cleanup! */ 1920 if (!data->rpc_done) 1921 goto out_free; 1922 state = nfs4_opendata_to_nfs4_state(data); 1923 if (!IS_ERR(state)) 1924 nfs4_close_state(state, data->o_arg.fmode); 1925 out_free: 1926 nfs4_opendata_put(data); 1927 } 1928 1929 static const struct rpc_call_ops nfs4_open_confirm_ops = { 1930 .rpc_call_prepare = nfs4_open_confirm_prepare, 1931 .rpc_call_done = nfs4_open_confirm_done, 1932 .rpc_release = nfs4_open_confirm_release, 1933 }; 1934 1935 /* 1936 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 1937 */ 1938 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 1939 { 1940 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 1941 struct rpc_task *task; 1942 struct rpc_message msg = { 1943 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 1944 .rpc_argp = &data->c_arg, 1945 .rpc_resp = &data->c_res, 1946 .rpc_cred = data->owner->so_cred, 1947 }; 1948 struct rpc_task_setup task_setup_data = { 1949 .rpc_client = server->client, 1950 .rpc_message = &msg, 1951 .callback_ops = &nfs4_open_confirm_ops, 1952 .callback_data = data, 1953 .workqueue = nfsiod_workqueue, 1954 .flags = RPC_TASK_ASYNC, 1955 }; 1956 int status; 1957 1958 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1); 1959 kref_get(&data->kref); 1960 data->rpc_done = 0; 1961 data->rpc_status = 0; 1962 data->timestamp = jiffies; 1963 if (data->is_recover) 1964 nfs4_set_sequence_privileged(&data->c_arg.seq_args); 1965 task = rpc_run_task(&task_setup_data); 1966 if (IS_ERR(task)) 1967 return PTR_ERR(task); 1968 status = nfs4_wait_for_completion_rpc_task(task); 1969 if (status != 0) { 1970 data->cancelled = 1; 1971 smp_wmb(); 1972 } else 1973 status = data->rpc_status; 1974 rpc_put_task(task); 1975 return status; 1976 } 1977 1978 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 1979 { 1980 struct nfs4_opendata *data = calldata; 1981 struct nfs4_state_owner *sp = data->owner; 1982 struct nfs_client *clp = sp->so_server->nfs_client; 1983 enum open_claim_type4 claim = data->o_arg.claim; 1984 1985 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 1986 goto out_wait; 1987 /* 1988 * Check if we still need to send an OPEN call, or if we can use 1989 * a delegation instead. 1990 */ 1991 if (data->state != NULL) { 1992 struct nfs_delegation *delegation; 1993 1994 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) 1995 goto out_no_action; 1996 rcu_read_lock(); 1997 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 1998 if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 1999 goto unlock_no_action; 2000 rcu_read_unlock(); 2001 } 2002 /* Update client id. */ 2003 data->o_arg.clientid = clp->cl_clientid; 2004 switch (claim) { 2005 default: 2006 break; 2007 case NFS4_OPEN_CLAIM_PREVIOUS: 2008 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2009 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 2010 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2011 case NFS4_OPEN_CLAIM_FH: 2012 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2013 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 2014 } 2015 data->timestamp = jiffies; 2016 if (nfs4_setup_sequence(data->o_arg.server, 2017 &data->o_arg.seq_args, 2018 &data->o_res.seq_res, 2019 task) != 0) 2020 nfs_release_seqid(data->o_arg.seqid); 2021 2022 /* Set the create mode (note dependency on the session type) */ 2023 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 2024 if (data->o_arg.open_flags & O_EXCL) { 2025 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 2026 if (nfs4_has_persistent_session(clp)) 2027 data->o_arg.createmode = NFS4_CREATE_GUARDED; 2028 else if (clp->cl_mvops->minor_version > 0) 2029 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 2030 } 2031 return; 2032 unlock_no_action: 2033 trace_nfs4_cached_open(data->state); 2034 rcu_read_unlock(); 2035 out_no_action: 2036 task->tk_action = NULL; 2037 out_wait: 2038 nfs4_sequence_done(task, &data->o_res.seq_res); 2039 } 2040 2041 static void nfs4_open_done(struct rpc_task *task, void *calldata) 2042 { 2043 struct nfs4_opendata *data = calldata; 2044 2045 data->rpc_status = task->tk_status; 2046 2047 if (!nfs4_sequence_done(task, &data->o_res.seq_res)) 2048 return; 2049 2050 if (task->tk_status == 0) { 2051 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 2052 switch (data->o_res.f_attr->mode & S_IFMT) { 2053 case S_IFREG: 2054 break; 2055 case S_IFLNK: 2056 data->rpc_status = -ELOOP; 2057 break; 2058 case S_IFDIR: 2059 data->rpc_status = -EISDIR; 2060 break; 2061 default: 2062 data->rpc_status = -ENOTDIR; 2063 } 2064 } 2065 renew_lease(data->o_res.server, data->timestamp); 2066 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 2067 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2068 } 2069 data->rpc_done = 1; 2070 } 2071 2072 static void nfs4_open_release(void *calldata) 2073 { 2074 struct nfs4_opendata *data = calldata; 2075 struct nfs4_state *state = NULL; 2076 2077 /* If this request hasn't been cancelled, do nothing */ 2078 if (data->cancelled == 0) 2079 goto out_free; 2080 /* In case of error, no cleanup! */ 2081 if (data->rpc_status != 0 || !data->rpc_done) 2082 goto out_free; 2083 /* In case we need an open_confirm, no cleanup! */ 2084 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 2085 goto out_free; 2086 state = nfs4_opendata_to_nfs4_state(data); 2087 if (!IS_ERR(state)) 2088 nfs4_close_state(state, data->o_arg.fmode); 2089 out_free: 2090 nfs4_opendata_put(data); 2091 } 2092 2093 static const struct rpc_call_ops nfs4_open_ops = { 2094 .rpc_call_prepare = nfs4_open_prepare, 2095 .rpc_call_done = nfs4_open_done, 2096 .rpc_release = nfs4_open_release, 2097 }; 2098 2099 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) 2100 { 2101 struct inode *dir = d_inode(data->dir); 2102 struct nfs_server *server = NFS_SERVER(dir); 2103 struct nfs_openargs *o_arg = &data->o_arg; 2104 struct nfs_openres *o_res = &data->o_res; 2105 struct rpc_task *task; 2106 struct rpc_message msg = { 2107 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 2108 .rpc_argp = o_arg, 2109 .rpc_resp = o_res, 2110 .rpc_cred = data->owner->so_cred, 2111 }; 2112 struct rpc_task_setup task_setup_data = { 2113 .rpc_client = server->client, 2114 .rpc_message = &msg, 2115 .callback_ops = &nfs4_open_ops, 2116 .callback_data = data, 2117 .workqueue = nfsiod_workqueue, 2118 .flags = RPC_TASK_ASYNC, 2119 }; 2120 int status; 2121 2122 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); 2123 kref_get(&data->kref); 2124 data->rpc_done = 0; 2125 data->rpc_status = 0; 2126 data->cancelled = 0; 2127 data->is_recover = 0; 2128 if (isrecover) { 2129 nfs4_set_sequence_privileged(&o_arg->seq_args); 2130 data->is_recover = 1; 2131 } 2132 task = rpc_run_task(&task_setup_data); 2133 if (IS_ERR(task)) 2134 return PTR_ERR(task); 2135 status = nfs4_wait_for_completion_rpc_task(task); 2136 if (status != 0) { 2137 data->cancelled = 1; 2138 smp_wmb(); 2139 } else 2140 status = data->rpc_status; 2141 rpc_put_task(task); 2142 2143 return status; 2144 } 2145 2146 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2147 { 2148 struct inode *dir = d_inode(data->dir); 2149 struct nfs_openres *o_res = &data->o_res; 2150 int status; 2151 2152 status = nfs4_run_open_task(data, 1); 2153 if (status != 0 || !data->rpc_done) 2154 return status; 2155 2156 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2157 2158 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2159 status = _nfs4_proc_open_confirm(data); 2160 if (status != 0) 2161 return status; 2162 } 2163 2164 return status; 2165 } 2166 2167 /* 2168 * Additional permission checks in order to distinguish between an 2169 * open for read, and an open for execute. This works around the 2170 * fact that NFSv4 OPEN treats read and execute permissions as being 2171 * the same. 2172 * Note that in the non-execute case, we want to turn off permission 2173 * checking if we just created a new file (POSIX open() semantics). 2174 */ 2175 static int nfs4_opendata_access(struct rpc_cred *cred, 2176 struct nfs4_opendata *opendata, 2177 struct nfs4_state *state, fmode_t fmode, 2178 int openflags) 2179 { 2180 struct nfs_access_entry cache; 2181 u32 mask; 2182 2183 /* access call failed or for some reason the server doesn't 2184 * support any access modes -- defer access call until later */ 2185 if (opendata->o_res.access_supported == 0) 2186 return 0; 2187 2188 mask = 0; 2189 /* 2190 * Use openflags to check for exec, because fmode won't 2191 * always have FMODE_EXEC set when file open for exec. 2192 */ 2193 if (openflags & __FMODE_EXEC) { 2194 /* ONLY check for exec rights */ 2195 mask = MAY_EXEC; 2196 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2197 mask = MAY_READ; 2198 2199 cache.cred = cred; 2200 cache.jiffies = jiffies; 2201 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2202 nfs_access_add_cache(state->inode, &cache); 2203 2204 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) 2205 return 0; 2206 2207 /* even though OPEN succeeded, access is denied. Close the file */ 2208 nfs4_close_state(state, fmode); 2209 return -EACCES; 2210 } 2211 2212 /* 2213 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2214 */ 2215 static int _nfs4_proc_open(struct nfs4_opendata *data) 2216 { 2217 struct inode *dir = d_inode(data->dir); 2218 struct nfs_server *server = NFS_SERVER(dir); 2219 struct nfs_openargs *o_arg = &data->o_arg; 2220 struct nfs_openres *o_res = &data->o_res; 2221 int status; 2222 2223 status = nfs4_run_open_task(data, 0); 2224 if (!data->rpc_done) 2225 return status; 2226 if (status != 0) { 2227 if (status == -NFS4ERR_BADNAME && 2228 !(o_arg->open_flags & O_CREAT)) 2229 return -ENOENT; 2230 return status; 2231 } 2232 2233 nfs_fattr_map_and_free_names(server, &data->f_attr); 2234 2235 if (o_arg->open_flags & O_CREAT) { 2236 update_changeattr(dir, &o_res->cinfo); 2237 if (o_arg->open_flags & O_EXCL) 2238 data->file_created = 1; 2239 else if (o_res->cinfo.before != o_res->cinfo.after) 2240 data->file_created = 1; 2241 } 2242 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2243 server->caps &= ~NFS_CAP_POSIX_LOCK; 2244 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2245 status = _nfs4_proc_open_confirm(data); 2246 if (status != 0) 2247 return status; 2248 } 2249 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 2250 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label); 2251 return 0; 2252 } 2253 2254 static int nfs4_recover_expired_lease(struct nfs_server *server) 2255 { 2256 return nfs4_client_recover_expired_lease(server->nfs_client); 2257 } 2258 2259 /* 2260 * OPEN_EXPIRED: 2261 * reclaim state on the server after a network partition. 2262 * Assumes caller holds the appropriate lock 2263 */ 2264 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2265 { 2266 struct nfs4_opendata *opendata; 2267 int ret; 2268 2269 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2270 NFS4_OPEN_CLAIM_FH); 2271 if (IS_ERR(opendata)) 2272 return PTR_ERR(opendata); 2273 ret = nfs4_open_recover(opendata, state); 2274 if (ret == -ESTALE) 2275 d_drop(ctx->dentry); 2276 nfs4_opendata_put(opendata); 2277 return ret; 2278 } 2279 2280 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2281 { 2282 struct nfs_server *server = NFS_SERVER(state->inode); 2283 struct nfs4_exception exception = { }; 2284 int err; 2285 2286 do { 2287 err = _nfs4_open_expired(ctx, state); 2288 trace_nfs4_open_expired(ctx, 0, err); 2289 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2290 continue; 2291 switch (err) { 2292 default: 2293 goto out; 2294 case -NFS4ERR_GRACE: 2295 case -NFS4ERR_DELAY: 2296 nfs4_handle_exception(server, err, &exception); 2297 err = 0; 2298 } 2299 } while (exception.retry); 2300 out: 2301 return err; 2302 } 2303 2304 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2305 { 2306 struct nfs_open_context *ctx; 2307 int ret; 2308 2309 ctx = nfs4_state_find_open_context(state); 2310 if (IS_ERR(ctx)) 2311 return -EAGAIN; 2312 ret = nfs4_do_open_expired(ctx, state); 2313 put_nfs_open_context(ctx); 2314 return ret; 2315 } 2316 2317 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state) 2318 { 2319 nfs_remove_bad_delegation(state->inode); 2320 write_seqlock(&state->seqlock); 2321 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2322 write_sequnlock(&state->seqlock); 2323 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2324 } 2325 2326 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2327 { 2328 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2329 nfs_finish_clear_delegation_stateid(state); 2330 } 2331 2332 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2333 { 2334 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2335 nfs40_clear_delegation_stateid(state); 2336 return nfs4_open_expired(sp, state); 2337 } 2338 2339 #if defined(CONFIG_NFS_V4_1) 2340 static void nfs41_check_delegation_stateid(struct nfs4_state *state) 2341 { 2342 struct nfs_server *server = NFS_SERVER(state->inode); 2343 nfs4_stateid stateid; 2344 struct nfs_delegation *delegation; 2345 struct rpc_cred *cred; 2346 int status; 2347 2348 /* Get the delegation credential for use by test/free_stateid */ 2349 rcu_read_lock(); 2350 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2351 if (delegation == NULL) { 2352 rcu_read_unlock(); 2353 return; 2354 } 2355 2356 nfs4_stateid_copy(&stateid, &delegation->stateid); 2357 cred = get_rpccred(delegation->cred); 2358 rcu_read_unlock(); 2359 status = nfs41_test_stateid(server, &stateid, cred); 2360 trace_nfs4_test_delegation_stateid(state, NULL, status); 2361 2362 if (status != NFS_OK) { 2363 /* Free the stateid unless the server explicitly 2364 * informs us the stateid is unrecognized. */ 2365 if (status != -NFS4ERR_BAD_STATEID) 2366 nfs41_free_stateid(server, &stateid, cred); 2367 nfs_finish_clear_delegation_stateid(state); 2368 } 2369 2370 put_rpccred(cred); 2371 } 2372 2373 /** 2374 * nfs41_check_open_stateid - possibly free an open stateid 2375 * 2376 * @state: NFSv4 state for an inode 2377 * 2378 * Returns NFS_OK if recovery for this stateid is now finished. 2379 * Otherwise a negative NFS4ERR value is returned. 2380 */ 2381 static int nfs41_check_open_stateid(struct nfs4_state *state) 2382 { 2383 struct nfs_server *server = NFS_SERVER(state->inode); 2384 nfs4_stateid *stateid = &state->open_stateid; 2385 struct rpc_cred *cred = state->owner->so_cred; 2386 int status; 2387 2388 /* If a state reset has been done, test_stateid is unneeded */ 2389 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) && 2390 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) && 2391 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0)) 2392 return -NFS4ERR_BAD_STATEID; 2393 2394 status = nfs41_test_stateid(server, stateid, cred); 2395 trace_nfs4_test_open_stateid(state, NULL, status); 2396 if (status != NFS_OK) { 2397 /* Free the stateid unless the server explicitly 2398 * informs us the stateid is unrecognized. */ 2399 if (status != -NFS4ERR_BAD_STATEID) 2400 nfs41_free_stateid(server, stateid, cred); 2401 2402 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2403 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2404 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2405 clear_bit(NFS_OPEN_STATE, &state->flags); 2406 } 2407 return status; 2408 } 2409 2410 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2411 { 2412 int status; 2413 2414 nfs41_check_delegation_stateid(state); 2415 status = nfs41_check_open_stateid(state); 2416 if (status != NFS_OK) 2417 status = nfs4_open_expired(sp, state); 2418 return status; 2419 } 2420 #endif 2421 2422 /* 2423 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 2424 * fields corresponding to attributes that were used to store the verifier. 2425 * Make sure we clobber those fields in the later setattr call 2426 */ 2427 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 2428 struct iattr *sattr, struct nfs4_label **label) 2429 { 2430 const u32 *attrset = opendata->o_res.attrset; 2431 2432 if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 2433 !(sattr->ia_valid & ATTR_ATIME_SET)) 2434 sattr->ia_valid |= ATTR_ATIME; 2435 2436 if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 2437 !(sattr->ia_valid & ATTR_MTIME_SET)) 2438 sattr->ia_valid |= ATTR_MTIME; 2439 2440 /* Except MODE, it seems harmless of setting twice. */ 2441 if ((attrset[1] & FATTR4_WORD1_MODE)) 2442 sattr->ia_valid &= ~ATTR_MODE; 2443 2444 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) 2445 *label = NULL; 2446 } 2447 2448 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 2449 fmode_t fmode, 2450 int flags, 2451 struct nfs_open_context *ctx) 2452 { 2453 struct nfs4_state_owner *sp = opendata->owner; 2454 struct nfs_server *server = sp->so_server; 2455 struct dentry *dentry; 2456 struct nfs4_state *state; 2457 unsigned int seq; 2458 int ret; 2459 2460 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 2461 2462 ret = _nfs4_proc_open(opendata); 2463 if (ret != 0) 2464 goto out; 2465 2466 state = nfs4_opendata_to_nfs4_state(opendata); 2467 ret = PTR_ERR(state); 2468 if (IS_ERR(state)) 2469 goto out; 2470 if (server->caps & NFS_CAP_POSIX_LOCK) 2471 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 2472 2473 dentry = opendata->dentry; 2474 if (d_really_is_negative(dentry)) { 2475 struct dentry *alias; 2476 d_drop(dentry); 2477 alias = d_exact_alias(dentry, state->inode); 2478 if (!alias) 2479 alias = d_splice_alias(igrab(state->inode), dentry); 2480 /* d_splice_alias() can't fail here - it's a non-directory */ 2481 if (alias) { 2482 dput(ctx->dentry); 2483 ctx->dentry = dentry = alias; 2484 } 2485 nfs_set_verifier(dentry, 2486 nfs_save_change_attribute(d_inode(opendata->dir))); 2487 } 2488 2489 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags); 2490 if (ret != 0) 2491 goto out; 2492 2493 ctx->state = state; 2494 if (d_inode(dentry) == state->inode) { 2495 nfs_inode_attach_open_context(ctx); 2496 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) 2497 nfs4_schedule_stateid_recovery(server, state); 2498 } 2499 out: 2500 return ret; 2501 } 2502 2503 /* 2504 * Returns a referenced nfs4_state 2505 */ 2506 static int _nfs4_do_open(struct inode *dir, 2507 struct nfs_open_context *ctx, 2508 int flags, 2509 struct iattr *sattr, 2510 struct nfs4_label *label, 2511 int *opened) 2512 { 2513 struct nfs4_state_owner *sp; 2514 struct nfs4_state *state = NULL; 2515 struct nfs_server *server = NFS_SERVER(dir); 2516 struct nfs4_opendata *opendata; 2517 struct dentry *dentry = ctx->dentry; 2518 struct rpc_cred *cred = ctx->cred; 2519 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 2520 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 2521 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 2522 struct nfs4_label *olabel = NULL; 2523 int status; 2524 2525 /* Protect against reboot recovery conflicts */ 2526 status = -ENOMEM; 2527 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 2528 if (sp == NULL) { 2529 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 2530 goto out_err; 2531 } 2532 status = nfs4_recover_expired_lease(server); 2533 if (status != 0) 2534 goto err_put_state_owner; 2535 if (d_really_is_positive(dentry)) 2536 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 2537 status = -ENOMEM; 2538 if (d_really_is_positive(dentry)) 2539 claim = NFS4_OPEN_CLAIM_FH; 2540 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, 2541 label, claim, GFP_KERNEL); 2542 if (opendata == NULL) 2543 goto err_put_state_owner; 2544 2545 if (label) { 2546 olabel = nfs4_label_alloc(server, GFP_KERNEL); 2547 if (IS_ERR(olabel)) { 2548 status = PTR_ERR(olabel); 2549 goto err_opendata_put; 2550 } 2551 } 2552 2553 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 2554 if (!opendata->f_attr.mdsthreshold) { 2555 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 2556 if (!opendata->f_attr.mdsthreshold) 2557 goto err_free_label; 2558 } 2559 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 2560 } 2561 if (d_really_is_positive(dentry)) 2562 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 2563 2564 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx); 2565 if (status != 0) 2566 goto err_free_label; 2567 state = ctx->state; 2568 2569 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 2570 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 2571 nfs4_exclusive_attrset(opendata, sattr, &label); 2572 /* 2573 * send create attributes which was not set by open 2574 * with an extra setattr. 2575 */ 2576 if (sattr->ia_valid & NFS4_VALID_ATTRS) { 2577 nfs_fattr_init(opendata->o_res.f_attr); 2578 status = nfs4_do_setattr(state->inode, cred, 2579 opendata->o_res.f_attr, sattr, 2580 state, label, olabel); 2581 if (status == 0) { 2582 nfs_setattr_update_inode(state->inode, sattr, 2583 opendata->o_res.f_attr); 2584 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); 2585 } 2586 } 2587 } 2588 if (opened && opendata->file_created) 2589 *opened |= FILE_CREATED; 2590 2591 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 2592 *ctx_th = opendata->f_attr.mdsthreshold; 2593 opendata->f_attr.mdsthreshold = NULL; 2594 } 2595 2596 nfs4_label_free(olabel); 2597 2598 nfs4_opendata_put(opendata); 2599 nfs4_put_state_owner(sp); 2600 return 0; 2601 err_free_label: 2602 nfs4_label_free(olabel); 2603 err_opendata_put: 2604 nfs4_opendata_put(opendata); 2605 err_put_state_owner: 2606 nfs4_put_state_owner(sp); 2607 out_err: 2608 return status; 2609 } 2610 2611 2612 static struct nfs4_state *nfs4_do_open(struct inode *dir, 2613 struct nfs_open_context *ctx, 2614 int flags, 2615 struct iattr *sattr, 2616 struct nfs4_label *label, 2617 int *opened) 2618 { 2619 struct nfs_server *server = NFS_SERVER(dir); 2620 struct nfs4_exception exception = { }; 2621 struct nfs4_state *res; 2622 int status; 2623 2624 do { 2625 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened); 2626 res = ctx->state; 2627 trace_nfs4_open_file(ctx, flags, status); 2628 if (status == 0) 2629 break; 2630 /* NOTE: BAD_SEQID means the server and client disagree about the 2631 * book-keeping w.r.t. state-changing operations 2632 * (OPEN/CLOSE/LOCK/LOCKU...) 2633 * It is actually a sign of a bug on the client or on the server. 2634 * 2635 * If we receive a BAD_SEQID error in the particular case of 2636 * doing an OPEN, we assume that nfs_increment_open_seqid() will 2637 * have unhashed the old state_owner for us, and that we can 2638 * therefore safely retry using a new one. We should still warn 2639 * the user though... 2640 */ 2641 if (status == -NFS4ERR_BAD_SEQID) { 2642 pr_warn_ratelimited("NFS: v4 server %s " 2643 " returned a bad sequence-id error!\n", 2644 NFS_SERVER(dir)->nfs_client->cl_hostname); 2645 exception.retry = 1; 2646 continue; 2647 } 2648 /* 2649 * BAD_STATEID on OPEN means that the server cancelled our 2650 * state before it received the OPEN_CONFIRM. 2651 * Recover by retrying the request as per the discussion 2652 * on Page 181 of RFC3530. 2653 */ 2654 if (status == -NFS4ERR_BAD_STATEID) { 2655 exception.retry = 1; 2656 continue; 2657 } 2658 if (status == -EAGAIN) { 2659 /* We must have found a delegation */ 2660 exception.retry = 1; 2661 continue; 2662 } 2663 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 2664 continue; 2665 res = ERR_PTR(nfs4_handle_exception(server, 2666 status, &exception)); 2667 } while (exception.retry); 2668 return res; 2669 } 2670 2671 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2672 struct nfs_fattr *fattr, struct iattr *sattr, 2673 struct nfs4_state *state, struct nfs4_label *ilabel, 2674 struct nfs4_label *olabel) 2675 { 2676 struct nfs_server *server = NFS_SERVER(inode); 2677 struct nfs_setattrargs arg = { 2678 .fh = NFS_FH(inode), 2679 .iap = sattr, 2680 .server = server, 2681 .bitmask = server->attr_bitmask, 2682 .label = ilabel, 2683 }; 2684 struct nfs_setattrres res = { 2685 .fattr = fattr, 2686 .label = olabel, 2687 .server = server, 2688 }; 2689 struct rpc_message msg = { 2690 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 2691 .rpc_argp = &arg, 2692 .rpc_resp = &res, 2693 .rpc_cred = cred, 2694 }; 2695 unsigned long timestamp = jiffies; 2696 fmode_t fmode; 2697 bool truncate; 2698 int status; 2699 2700 arg.bitmask = nfs4_bitmask(server, ilabel); 2701 if (ilabel) 2702 arg.bitmask = nfs4_bitmask(server, olabel); 2703 2704 nfs_fattr_init(fattr); 2705 2706 /* Servers should only apply open mode checks for file size changes */ 2707 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false; 2708 fmode = truncate ? FMODE_WRITE : FMODE_READ; 2709 2710 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) { 2711 /* Use that stateid */ 2712 } else if (truncate && state != NULL) { 2713 struct nfs_lockowner lockowner = { 2714 .l_owner = current->files, 2715 .l_pid = current->tgid, 2716 }; 2717 if (!nfs4_valid_open_stateid(state)) 2718 return -EBADF; 2719 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, 2720 &lockowner) == -EIO) 2721 return -EBADF; 2722 } else 2723 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 2724 2725 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 2726 if (status == 0 && state != NULL) 2727 renew_lease(server, timestamp); 2728 trace_nfs4_setattr(inode, &arg.stateid, status); 2729 return status; 2730 } 2731 2732 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2733 struct nfs_fattr *fattr, struct iattr *sattr, 2734 struct nfs4_state *state, struct nfs4_label *ilabel, 2735 struct nfs4_label *olabel) 2736 { 2737 struct nfs_server *server = NFS_SERVER(inode); 2738 struct nfs4_exception exception = { 2739 .state = state, 2740 .inode = inode, 2741 }; 2742 int err; 2743 do { 2744 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel); 2745 switch (err) { 2746 case -NFS4ERR_OPENMODE: 2747 if (!(sattr->ia_valid & ATTR_SIZE)) { 2748 pr_warn_once("NFSv4: server %s is incorrectly " 2749 "applying open mode checks to " 2750 "a SETATTR that is not " 2751 "changing file size.\n", 2752 server->nfs_client->cl_hostname); 2753 } 2754 if (state && !(state->state & FMODE_WRITE)) { 2755 err = -EBADF; 2756 if (sattr->ia_valid & ATTR_OPEN) 2757 err = -EACCES; 2758 goto out; 2759 } 2760 } 2761 err = nfs4_handle_exception(server, err, &exception); 2762 } while (exception.retry); 2763 out: 2764 return err; 2765 } 2766 2767 static bool 2768 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) 2769 { 2770 if (inode == NULL || !nfs_have_layout(inode)) 2771 return false; 2772 2773 return pnfs_wait_on_layoutreturn(inode, task); 2774 } 2775 2776 struct nfs4_closedata { 2777 struct inode *inode; 2778 struct nfs4_state *state; 2779 struct nfs_closeargs arg; 2780 struct nfs_closeres res; 2781 struct nfs_fattr fattr; 2782 unsigned long timestamp; 2783 bool roc; 2784 u32 roc_barrier; 2785 }; 2786 2787 static void nfs4_free_closedata(void *data) 2788 { 2789 struct nfs4_closedata *calldata = data; 2790 struct nfs4_state_owner *sp = calldata->state->owner; 2791 struct super_block *sb = calldata->state->inode->i_sb; 2792 2793 if (calldata->roc) 2794 pnfs_roc_release(calldata->state->inode); 2795 nfs4_put_open_state(calldata->state); 2796 nfs_free_seqid(calldata->arg.seqid); 2797 nfs4_put_state_owner(sp); 2798 nfs_sb_deactive(sb); 2799 kfree(calldata); 2800 } 2801 2802 static void nfs4_close_done(struct rpc_task *task, void *data) 2803 { 2804 struct nfs4_closedata *calldata = data; 2805 struct nfs4_state *state = calldata->state; 2806 struct nfs_server *server = NFS_SERVER(calldata->inode); 2807 nfs4_stateid *res_stateid = NULL; 2808 2809 dprintk("%s: begin!\n", __func__); 2810 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 2811 return; 2812 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 2813 /* hmm. we are done with the inode, and in the process of freeing 2814 * the state_owner. we keep this around to process errors 2815 */ 2816 switch (task->tk_status) { 2817 case 0: 2818 res_stateid = &calldata->res.stateid; 2819 if (calldata->roc) 2820 pnfs_roc_set_barrier(state->inode, 2821 calldata->roc_barrier); 2822 renew_lease(server, calldata->timestamp); 2823 break; 2824 case -NFS4ERR_ADMIN_REVOKED: 2825 case -NFS4ERR_STALE_STATEID: 2826 case -NFS4ERR_OLD_STATEID: 2827 case -NFS4ERR_BAD_STATEID: 2828 case -NFS4ERR_EXPIRED: 2829 if (!nfs4_stateid_match(&calldata->arg.stateid, 2830 &state->open_stateid)) { 2831 rpc_restart_call_prepare(task); 2832 goto out_release; 2833 } 2834 if (calldata->arg.fmode == 0) 2835 break; 2836 default: 2837 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) { 2838 rpc_restart_call_prepare(task); 2839 goto out_release; 2840 } 2841 } 2842 nfs_clear_open_stateid(state, &calldata->arg.stateid, 2843 res_stateid, calldata->arg.fmode); 2844 out_release: 2845 nfs_release_seqid(calldata->arg.seqid); 2846 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2847 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2848 } 2849 2850 static void nfs4_close_prepare(struct rpc_task *task, void *data) 2851 { 2852 struct nfs4_closedata *calldata = data; 2853 struct nfs4_state *state = calldata->state; 2854 struct inode *inode = calldata->inode; 2855 bool is_rdonly, is_wronly, is_rdwr; 2856 int call_close = 0; 2857 2858 dprintk("%s: begin!\n", __func__); 2859 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 2860 goto out_wait; 2861 2862 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 2863 spin_lock(&state->owner->so_lock); 2864 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 2865 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 2866 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 2867 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid); 2868 /* Calculate the change in open mode */ 2869 calldata->arg.fmode = 0; 2870 if (state->n_rdwr == 0) { 2871 if (state->n_rdonly == 0) 2872 call_close |= is_rdonly; 2873 else if (is_rdonly) 2874 calldata->arg.fmode |= FMODE_READ; 2875 if (state->n_wronly == 0) 2876 call_close |= is_wronly; 2877 else if (is_wronly) 2878 calldata->arg.fmode |= FMODE_WRITE; 2879 } else if (is_rdwr) 2880 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 2881 2882 if (calldata->arg.fmode == 0) 2883 call_close |= is_rdwr; 2884 2885 if (!nfs4_valid_open_stateid(state)) 2886 call_close = 0; 2887 spin_unlock(&state->owner->so_lock); 2888 2889 if (!call_close) { 2890 /* Note: exit _without_ calling nfs4_close_done */ 2891 goto out_no_action; 2892 } 2893 2894 if (nfs4_wait_on_layoutreturn(inode, task)) { 2895 nfs_release_seqid(calldata->arg.seqid); 2896 goto out_wait; 2897 } 2898 2899 if (calldata->arg.fmode == 0) 2900 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2901 if (calldata->roc) 2902 pnfs_roc_get_barrier(inode, &calldata->roc_barrier); 2903 2904 calldata->arg.share_access = 2905 nfs4_map_atomic_open_share(NFS_SERVER(inode), 2906 calldata->arg.fmode, 0); 2907 2908 nfs_fattr_init(calldata->res.fattr); 2909 calldata->timestamp = jiffies; 2910 if (nfs4_setup_sequence(NFS_SERVER(inode), 2911 &calldata->arg.seq_args, 2912 &calldata->res.seq_res, 2913 task) != 0) 2914 nfs_release_seqid(calldata->arg.seqid); 2915 dprintk("%s: done!\n", __func__); 2916 return; 2917 out_no_action: 2918 task->tk_action = NULL; 2919 out_wait: 2920 nfs4_sequence_done(task, &calldata->res.seq_res); 2921 } 2922 2923 static const struct rpc_call_ops nfs4_close_ops = { 2924 .rpc_call_prepare = nfs4_close_prepare, 2925 .rpc_call_done = nfs4_close_done, 2926 .rpc_release = nfs4_free_closedata, 2927 }; 2928 2929 static bool nfs4_roc(struct inode *inode) 2930 { 2931 if (!nfs_have_layout(inode)) 2932 return false; 2933 return pnfs_roc(inode); 2934 } 2935 2936 /* 2937 * It is possible for data to be read/written from a mem-mapped file 2938 * after the sys_close call (which hits the vfs layer as a flush). 2939 * This means that we can't safely call nfsv4 close on a file until 2940 * the inode is cleared. This in turn means that we are not good 2941 * NFSv4 citizens - we do not indicate to the server to update the file's 2942 * share state even when we are done with one of the three share 2943 * stateid's in the inode. 2944 * 2945 * NOTE: Caller must be holding the sp->so_owner semaphore! 2946 */ 2947 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 2948 { 2949 struct nfs_server *server = NFS_SERVER(state->inode); 2950 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 2951 struct nfs4_closedata *calldata; 2952 struct nfs4_state_owner *sp = state->owner; 2953 struct rpc_task *task; 2954 struct rpc_message msg = { 2955 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 2956 .rpc_cred = state->owner->so_cred, 2957 }; 2958 struct rpc_task_setup task_setup_data = { 2959 .rpc_client = server->client, 2960 .rpc_message = &msg, 2961 .callback_ops = &nfs4_close_ops, 2962 .workqueue = nfsiod_workqueue, 2963 .flags = RPC_TASK_ASYNC, 2964 }; 2965 int status = -ENOMEM; 2966 2967 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 2968 &task_setup_data.rpc_client, &msg); 2969 2970 calldata = kzalloc(sizeof(*calldata), gfp_mask); 2971 if (calldata == NULL) 2972 goto out; 2973 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); 2974 calldata->inode = state->inode; 2975 calldata->state = state; 2976 calldata->arg.fh = NFS_FH(state->inode); 2977 /* Serialization for the sequence id */ 2978 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 2979 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 2980 if (IS_ERR(calldata->arg.seqid)) 2981 goto out_free_calldata; 2982 calldata->arg.fmode = 0; 2983 calldata->arg.bitmask = server->cache_consistency_bitmask; 2984 calldata->res.fattr = &calldata->fattr; 2985 calldata->res.seqid = calldata->arg.seqid; 2986 calldata->res.server = server; 2987 calldata->roc = nfs4_roc(state->inode); 2988 nfs_sb_active(calldata->inode->i_sb); 2989 2990 msg.rpc_argp = &calldata->arg; 2991 msg.rpc_resp = &calldata->res; 2992 task_setup_data.callback_data = calldata; 2993 task = rpc_run_task(&task_setup_data); 2994 if (IS_ERR(task)) 2995 return PTR_ERR(task); 2996 status = 0; 2997 if (wait) 2998 status = rpc_wait_for_completion_task(task); 2999 rpc_put_task(task); 3000 return status; 3001 out_free_calldata: 3002 kfree(calldata); 3003 out: 3004 nfs4_put_open_state(state); 3005 nfs4_put_state_owner(sp); 3006 return status; 3007 } 3008 3009 static struct inode * 3010 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 3011 int open_flags, struct iattr *attr, int *opened) 3012 { 3013 struct nfs4_state *state; 3014 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; 3015 3016 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 3017 3018 /* Protect against concurrent sillydeletes */ 3019 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 3020 3021 nfs4_label_release_security(label); 3022 3023 if (IS_ERR(state)) 3024 return ERR_CAST(state); 3025 return state->inode; 3026 } 3027 3028 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 3029 { 3030 if (ctx->state == NULL) 3031 return; 3032 if (is_sync) 3033 nfs4_close_sync(ctx->state, ctx->mode); 3034 else 3035 nfs4_close_state(ctx->state, ctx->mode); 3036 } 3037 3038 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 3039 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 3040 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL) 3041 3042 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3043 { 3044 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion; 3045 struct nfs4_server_caps_arg args = { 3046 .fhandle = fhandle, 3047 .bitmask = bitmask, 3048 }; 3049 struct nfs4_server_caps_res res = {}; 3050 struct rpc_message msg = { 3051 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 3052 .rpc_argp = &args, 3053 .rpc_resp = &res, 3054 }; 3055 int status; 3056 3057 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3058 FATTR4_WORD0_FH_EXPIRE_TYPE | 3059 FATTR4_WORD0_LINK_SUPPORT | 3060 FATTR4_WORD0_SYMLINK_SUPPORT | 3061 FATTR4_WORD0_ACLSUPPORT; 3062 if (minorversion) 3063 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 3064 3065 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3066 if (status == 0) { 3067 /* Sanity check the server answers */ 3068 switch (minorversion) { 3069 case 0: 3070 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 3071 res.attr_bitmask[2] = 0; 3072 break; 3073 case 1: 3074 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 3075 break; 3076 case 2: 3077 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 3078 } 3079 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 3080 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| 3081 NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 3082 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| 3083 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| 3084 NFS_CAP_CTIME|NFS_CAP_MTIME| 3085 NFS_CAP_SECURITY_LABEL); 3086 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 3087 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 3088 server->caps |= NFS_CAP_ACLS; 3089 if (res.has_links != 0) 3090 server->caps |= NFS_CAP_HARDLINKS; 3091 if (res.has_symlinks != 0) 3092 server->caps |= NFS_CAP_SYMLINKS; 3093 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) 3094 server->caps |= NFS_CAP_FILEID; 3095 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) 3096 server->caps |= NFS_CAP_MODE; 3097 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) 3098 server->caps |= NFS_CAP_NLINK; 3099 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) 3100 server->caps |= NFS_CAP_OWNER; 3101 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) 3102 server->caps |= NFS_CAP_OWNER_GROUP; 3103 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) 3104 server->caps |= NFS_CAP_ATIME; 3105 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) 3106 server->caps |= NFS_CAP_CTIME; 3107 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) 3108 server->caps |= NFS_CAP_MTIME; 3109 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 3110 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 3111 server->caps |= NFS_CAP_SECURITY_LABEL; 3112 #endif 3113 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 3114 sizeof(server->attr_bitmask)); 3115 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 3116 3117 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 3118 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 3119 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 3120 server->cache_consistency_bitmask[2] = 0; 3121 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 3122 sizeof(server->exclcreat_bitmask)); 3123 server->acl_bitmask = res.acl_bitmask; 3124 server->fh_expire_type = res.fh_expire_type; 3125 } 3126 3127 return status; 3128 } 3129 3130 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3131 { 3132 struct nfs4_exception exception = { }; 3133 int err; 3134 do { 3135 err = nfs4_handle_exception(server, 3136 _nfs4_server_capabilities(server, fhandle), 3137 &exception); 3138 } while (exception.retry); 3139 return err; 3140 } 3141 3142 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 3143 struct nfs_fsinfo *info) 3144 { 3145 u32 bitmask[3]; 3146 struct nfs4_lookup_root_arg args = { 3147 .bitmask = bitmask, 3148 }; 3149 struct nfs4_lookup_res res = { 3150 .server = server, 3151 .fattr = info->fattr, 3152 .fh = fhandle, 3153 }; 3154 struct rpc_message msg = { 3155 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 3156 .rpc_argp = &args, 3157 .rpc_resp = &res, 3158 }; 3159 3160 bitmask[0] = nfs4_fattr_bitmap[0]; 3161 bitmask[1] = nfs4_fattr_bitmap[1]; 3162 /* 3163 * Process the label in the upcoming getfattr 3164 */ 3165 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL; 3166 3167 nfs_fattr_init(info->fattr); 3168 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3169 } 3170 3171 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 3172 struct nfs_fsinfo *info) 3173 { 3174 struct nfs4_exception exception = { }; 3175 int err; 3176 do { 3177 err = _nfs4_lookup_root(server, fhandle, info); 3178 trace_nfs4_lookup_root(server, fhandle, info->fattr, err); 3179 switch (err) { 3180 case 0: 3181 case -NFS4ERR_WRONGSEC: 3182 goto out; 3183 default: 3184 err = nfs4_handle_exception(server, err, &exception); 3185 } 3186 } while (exception.retry); 3187 out: 3188 return err; 3189 } 3190 3191 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3192 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 3193 { 3194 struct rpc_auth_create_args auth_args = { 3195 .pseudoflavor = flavor, 3196 }; 3197 struct rpc_auth *auth; 3198 int ret; 3199 3200 auth = rpcauth_create(&auth_args, server->client); 3201 if (IS_ERR(auth)) { 3202 ret = -EACCES; 3203 goto out; 3204 } 3205 ret = nfs4_lookup_root(server, fhandle, info); 3206 out: 3207 return ret; 3208 } 3209 3210 /* 3211 * Retry pseudoroot lookup with various security flavors. We do this when: 3212 * 3213 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 3214 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 3215 * 3216 * Returns zero on success, or a negative NFS4ERR value, or a 3217 * negative errno value. 3218 */ 3219 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3220 struct nfs_fsinfo *info) 3221 { 3222 /* Per 3530bis 15.33.5 */ 3223 static const rpc_authflavor_t flav_array[] = { 3224 RPC_AUTH_GSS_KRB5P, 3225 RPC_AUTH_GSS_KRB5I, 3226 RPC_AUTH_GSS_KRB5, 3227 RPC_AUTH_UNIX, /* courtesy */ 3228 RPC_AUTH_NULL, 3229 }; 3230 int status = -EPERM; 3231 size_t i; 3232 3233 if (server->auth_info.flavor_len > 0) { 3234 /* try each flavor specified by user */ 3235 for (i = 0; i < server->auth_info.flavor_len; i++) { 3236 status = nfs4_lookup_root_sec(server, fhandle, info, 3237 server->auth_info.flavors[i]); 3238 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3239 continue; 3240 break; 3241 } 3242 } else { 3243 /* no flavors specified by user, try default list */ 3244 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 3245 status = nfs4_lookup_root_sec(server, fhandle, info, 3246 flav_array[i]); 3247 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3248 continue; 3249 break; 3250 } 3251 } 3252 3253 /* 3254 * -EACCESS could mean that the user doesn't have correct permissions 3255 * to access the mount. It could also mean that we tried to mount 3256 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 3257 * existing mount programs don't handle -EACCES very well so it should 3258 * be mapped to -EPERM instead. 3259 */ 3260 if (status == -EACCES) 3261 status = -EPERM; 3262 return status; 3263 } 3264 3265 static int nfs4_do_find_root_sec(struct nfs_server *server, 3266 struct nfs_fh *fhandle, struct nfs_fsinfo *info) 3267 { 3268 int mv = server->nfs_client->cl_minorversion; 3269 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info); 3270 } 3271 3272 /** 3273 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 3274 * @server: initialized nfs_server handle 3275 * @fhandle: we fill in the pseudo-fs root file handle 3276 * @info: we fill in an FSINFO struct 3277 * @auth_probe: probe the auth flavours 3278 * 3279 * Returns zero on success, or a negative errno. 3280 */ 3281 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 3282 struct nfs_fsinfo *info, 3283 bool auth_probe) 3284 { 3285 int status = 0; 3286 3287 if (!auth_probe) 3288 status = nfs4_lookup_root(server, fhandle, info); 3289 3290 if (auth_probe || status == NFS4ERR_WRONGSEC) 3291 status = nfs4_do_find_root_sec(server, fhandle, info); 3292 3293 if (status == 0) 3294 status = nfs4_server_capabilities(server, fhandle); 3295 if (status == 0) 3296 status = nfs4_do_fsinfo(server, fhandle, info); 3297 3298 return nfs4_map_errors(status); 3299 } 3300 3301 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 3302 struct nfs_fsinfo *info) 3303 { 3304 int error; 3305 struct nfs_fattr *fattr = info->fattr; 3306 struct nfs4_label *label = NULL; 3307 3308 error = nfs4_server_capabilities(server, mntfh); 3309 if (error < 0) { 3310 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 3311 return error; 3312 } 3313 3314 label = nfs4_label_alloc(server, GFP_KERNEL); 3315 if (IS_ERR(label)) 3316 return PTR_ERR(label); 3317 3318 error = nfs4_proc_getattr(server, mntfh, fattr, label); 3319 if (error < 0) { 3320 dprintk("nfs4_get_root: getattr error = %d\n", -error); 3321 goto err_free_label; 3322 } 3323 3324 if (fattr->valid & NFS_ATTR_FATTR_FSID && 3325 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 3326 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 3327 3328 err_free_label: 3329 nfs4_label_free(label); 3330 3331 return error; 3332 } 3333 3334 /* 3335 * Get locations and (maybe) other attributes of a referral. 3336 * Note that we'll actually follow the referral later when 3337 * we detect fsid mismatch in inode revalidation 3338 */ 3339 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 3340 const struct qstr *name, struct nfs_fattr *fattr, 3341 struct nfs_fh *fhandle) 3342 { 3343 int status = -ENOMEM; 3344 struct page *page = NULL; 3345 struct nfs4_fs_locations *locations = NULL; 3346 3347 page = alloc_page(GFP_KERNEL); 3348 if (page == NULL) 3349 goto out; 3350 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 3351 if (locations == NULL) 3352 goto out; 3353 3354 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 3355 if (status != 0) 3356 goto out; 3357 3358 /* 3359 * If the fsid didn't change, this is a migration event, not a 3360 * referral. Cause us to drop into the exception handler, which 3361 * will kick off migration recovery. 3362 */ 3363 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 3364 dprintk("%s: server did not return a different fsid for" 3365 " a referral at %s\n", __func__, name->name); 3366 status = -NFS4ERR_MOVED; 3367 goto out; 3368 } 3369 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 3370 nfs_fixup_referral_attributes(&locations->fattr); 3371 3372 /* replace the lookup nfs_fattr with the locations nfs_fattr */ 3373 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 3374 memset(fhandle, 0, sizeof(struct nfs_fh)); 3375 out: 3376 if (page) 3377 __free_page(page); 3378 kfree(locations); 3379 return status; 3380 } 3381 3382 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3383 struct nfs_fattr *fattr, struct nfs4_label *label) 3384 { 3385 struct nfs4_getattr_arg args = { 3386 .fh = fhandle, 3387 .bitmask = server->attr_bitmask, 3388 }; 3389 struct nfs4_getattr_res res = { 3390 .fattr = fattr, 3391 .label = label, 3392 .server = server, 3393 }; 3394 struct rpc_message msg = { 3395 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 3396 .rpc_argp = &args, 3397 .rpc_resp = &res, 3398 }; 3399 3400 args.bitmask = nfs4_bitmask(server, label); 3401 3402 nfs_fattr_init(fattr); 3403 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3404 } 3405 3406 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3407 struct nfs_fattr *fattr, struct nfs4_label *label) 3408 { 3409 struct nfs4_exception exception = { }; 3410 int err; 3411 do { 3412 err = _nfs4_proc_getattr(server, fhandle, fattr, label); 3413 trace_nfs4_getattr(server, fhandle, fattr, err); 3414 err = nfs4_handle_exception(server, err, 3415 &exception); 3416 } while (exception.retry); 3417 return err; 3418 } 3419 3420 /* 3421 * The file is not closed if it is opened due to the a request to change 3422 * the size of the file. The open call will not be needed once the 3423 * VFS layer lookup-intents are implemented. 3424 * 3425 * Close is called when the inode is destroyed. 3426 * If we haven't opened the file for O_WRONLY, we 3427 * need to in the size_change case to obtain a stateid. 3428 * 3429 * Got race? 3430 * Because OPEN is always done by name in nfsv4, it is 3431 * possible that we opened a different file by the same 3432 * name. We can recognize this race condition, but we 3433 * can't do anything about it besides returning an error. 3434 * 3435 * This will be fixed with VFS changes (lookup-intent). 3436 */ 3437 static int 3438 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 3439 struct iattr *sattr) 3440 { 3441 struct inode *inode = d_inode(dentry); 3442 struct rpc_cred *cred = NULL; 3443 struct nfs4_state *state = NULL; 3444 struct nfs4_label *label = NULL; 3445 int status; 3446 3447 if (pnfs_ld_layoutret_on_setattr(inode) && 3448 sattr->ia_valid & ATTR_SIZE && 3449 sattr->ia_size < i_size_read(inode)) 3450 pnfs_commit_and_return_layout(inode); 3451 3452 nfs_fattr_init(fattr); 3453 3454 /* Deal with open(O_TRUNC) */ 3455 if (sattr->ia_valid & ATTR_OPEN) 3456 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 3457 3458 /* Optimization: if the end result is no change, don't RPC */ 3459 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 3460 return 0; 3461 3462 /* Search for an existing open(O_WRITE) file */ 3463 if (sattr->ia_valid & ATTR_FILE) { 3464 struct nfs_open_context *ctx; 3465 3466 ctx = nfs_file_open_context(sattr->ia_file); 3467 if (ctx) { 3468 cred = ctx->cred; 3469 state = ctx->state; 3470 } 3471 } 3472 3473 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 3474 if (IS_ERR(label)) 3475 return PTR_ERR(label); 3476 3477 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label); 3478 if (status == 0) { 3479 nfs_setattr_update_inode(inode, sattr, fattr); 3480 nfs_setsecurity(inode, fattr, label); 3481 } 3482 nfs4_label_free(label); 3483 return status; 3484 } 3485 3486 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 3487 const struct qstr *name, struct nfs_fh *fhandle, 3488 struct nfs_fattr *fattr, struct nfs4_label *label) 3489 { 3490 struct nfs_server *server = NFS_SERVER(dir); 3491 int status; 3492 struct nfs4_lookup_arg args = { 3493 .bitmask = server->attr_bitmask, 3494 .dir_fh = NFS_FH(dir), 3495 .name = name, 3496 }; 3497 struct nfs4_lookup_res res = { 3498 .server = server, 3499 .fattr = fattr, 3500 .label = label, 3501 .fh = fhandle, 3502 }; 3503 struct rpc_message msg = { 3504 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 3505 .rpc_argp = &args, 3506 .rpc_resp = &res, 3507 }; 3508 3509 args.bitmask = nfs4_bitmask(server, label); 3510 3511 nfs_fattr_init(fattr); 3512 3513 dprintk("NFS call lookup %s\n", name->name); 3514 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); 3515 dprintk("NFS reply lookup: %d\n", status); 3516 return status; 3517 } 3518 3519 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 3520 { 3521 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 3522 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 3523 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 3524 fattr->nlink = 2; 3525 } 3526 3527 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 3528 struct qstr *name, struct nfs_fh *fhandle, 3529 struct nfs_fattr *fattr, struct nfs4_label *label) 3530 { 3531 struct nfs4_exception exception = { }; 3532 struct rpc_clnt *client = *clnt; 3533 int err; 3534 do { 3535 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label); 3536 trace_nfs4_lookup(dir, name, err); 3537 switch (err) { 3538 case -NFS4ERR_BADNAME: 3539 err = -ENOENT; 3540 goto out; 3541 case -NFS4ERR_MOVED: 3542 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 3543 if (err == -NFS4ERR_MOVED) 3544 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 3545 goto out; 3546 case -NFS4ERR_WRONGSEC: 3547 err = -EPERM; 3548 if (client != *clnt) 3549 goto out; 3550 client = nfs4_negotiate_security(client, dir, name); 3551 if (IS_ERR(client)) 3552 return PTR_ERR(client); 3553 3554 exception.retry = 1; 3555 break; 3556 default: 3557 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 3558 } 3559 } while (exception.retry); 3560 3561 out: 3562 if (err == 0) 3563 *clnt = client; 3564 else if (client != *clnt) 3565 rpc_shutdown_client(client); 3566 3567 return err; 3568 } 3569 3570 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, 3571 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 3572 struct nfs4_label *label) 3573 { 3574 int status; 3575 struct rpc_clnt *client = NFS_CLIENT(dir); 3576 3577 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label); 3578 if (client != NFS_CLIENT(dir)) { 3579 rpc_shutdown_client(client); 3580 nfs_fixup_secinfo_attributes(fattr); 3581 } 3582 return status; 3583 } 3584 3585 struct rpc_clnt * 3586 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, 3587 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 3588 { 3589 struct rpc_clnt *client = NFS_CLIENT(dir); 3590 int status; 3591 3592 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL); 3593 if (status < 0) 3594 return ERR_PTR(status); 3595 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 3596 } 3597 3598 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3599 { 3600 struct nfs_server *server = NFS_SERVER(inode); 3601 struct nfs4_accessargs args = { 3602 .fh = NFS_FH(inode), 3603 .bitmask = server->cache_consistency_bitmask, 3604 }; 3605 struct nfs4_accessres res = { 3606 .server = server, 3607 }; 3608 struct rpc_message msg = { 3609 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 3610 .rpc_argp = &args, 3611 .rpc_resp = &res, 3612 .rpc_cred = entry->cred, 3613 }; 3614 int mode = entry->mask; 3615 int status = 0; 3616 3617 /* 3618 * Determine which access bits we want to ask for... 3619 */ 3620 if (mode & MAY_READ) 3621 args.access |= NFS4_ACCESS_READ; 3622 if (S_ISDIR(inode->i_mode)) { 3623 if (mode & MAY_WRITE) 3624 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; 3625 if (mode & MAY_EXEC) 3626 args.access |= NFS4_ACCESS_LOOKUP; 3627 } else { 3628 if (mode & MAY_WRITE) 3629 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; 3630 if (mode & MAY_EXEC) 3631 args.access |= NFS4_ACCESS_EXECUTE; 3632 } 3633 3634 res.fattr = nfs_alloc_fattr(); 3635 if (res.fattr == NULL) 3636 return -ENOMEM; 3637 3638 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3639 if (!status) { 3640 nfs_access_set_mask(entry, res.access); 3641 nfs_refresh_inode(inode, res.fattr); 3642 } 3643 nfs_free_fattr(res.fattr); 3644 return status; 3645 } 3646 3647 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3648 { 3649 struct nfs4_exception exception = { }; 3650 int err; 3651 do { 3652 err = _nfs4_proc_access(inode, entry); 3653 trace_nfs4_access(inode, err); 3654 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3655 &exception); 3656 } while (exception.retry); 3657 return err; 3658 } 3659 3660 /* 3661 * TODO: For the time being, we don't try to get any attributes 3662 * along with any of the zero-copy operations READ, READDIR, 3663 * READLINK, WRITE. 3664 * 3665 * In the case of the first three, we want to put the GETATTR 3666 * after the read-type operation -- this is because it is hard 3667 * to predict the length of a GETATTR response in v4, and thus 3668 * align the READ data correctly. This means that the GETATTR 3669 * may end up partially falling into the page cache, and we should 3670 * shift it into the 'tail' of the xdr_buf before processing. 3671 * To do this efficiently, we need to know the total length 3672 * of data received, which doesn't seem to be available outside 3673 * of the RPC layer. 3674 * 3675 * In the case of WRITE, we also want to put the GETATTR after 3676 * the operation -- in this case because we want to make sure 3677 * we get the post-operation mtime and size. 3678 * 3679 * Both of these changes to the XDR layer would in fact be quite 3680 * minor, but I decided to leave them for a subsequent patch. 3681 */ 3682 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 3683 unsigned int pgbase, unsigned int pglen) 3684 { 3685 struct nfs4_readlink args = { 3686 .fh = NFS_FH(inode), 3687 .pgbase = pgbase, 3688 .pglen = pglen, 3689 .pages = &page, 3690 }; 3691 struct nfs4_readlink_res res; 3692 struct rpc_message msg = { 3693 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 3694 .rpc_argp = &args, 3695 .rpc_resp = &res, 3696 }; 3697 3698 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 3699 } 3700 3701 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 3702 unsigned int pgbase, unsigned int pglen) 3703 { 3704 struct nfs4_exception exception = { }; 3705 int err; 3706 do { 3707 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 3708 trace_nfs4_readlink(inode, err); 3709 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3710 &exception); 3711 } while (exception.retry); 3712 return err; 3713 } 3714 3715 /* 3716 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 3717 */ 3718 static int 3719 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 3720 int flags) 3721 { 3722 struct nfs4_label l, *ilabel = NULL; 3723 struct nfs_open_context *ctx; 3724 struct nfs4_state *state; 3725 int status = 0; 3726 3727 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 3728 if (IS_ERR(ctx)) 3729 return PTR_ERR(ctx); 3730 3731 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 3732 3733 sattr->ia_mode &= ~current_umask(); 3734 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 3735 if (IS_ERR(state)) { 3736 status = PTR_ERR(state); 3737 goto out; 3738 } 3739 out: 3740 nfs4_label_release_security(ilabel); 3741 put_nfs_open_context(ctx); 3742 return status; 3743 } 3744 3745 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) 3746 { 3747 struct nfs_server *server = NFS_SERVER(dir); 3748 struct nfs_removeargs args = { 3749 .fh = NFS_FH(dir), 3750 .name = *name, 3751 }; 3752 struct nfs_removeres res = { 3753 .server = server, 3754 }; 3755 struct rpc_message msg = { 3756 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 3757 .rpc_argp = &args, 3758 .rpc_resp = &res, 3759 }; 3760 int status; 3761 3762 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 3763 if (status == 0) 3764 update_changeattr(dir, &res.cinfo); 3765 return status; 3766 } 3767 3768 static int nfs4_proc_remove(struct inode *dir, struct qstr *name) 3769 { 3770 struct nfs4_exception exception = { }; 3771 int err; 3772 do { 3773 err = _nfs4_proc_remove(dir, name); 3774 trace_nfs4_remove(dir, name, err); 3775 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3776 &exception); 3777 } while (exception.retry); 3778 return err; 3779 } 3780 3781 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) 3782 { 3783 struct nfs_server *server = NFS_SERVER(dir); 3784 struct nfs_removeargs *args = msg->rpc_argp; 3785 struct nfs_removeres *res = msg->rpc_resp; 3786 3787 res->server = server; 3788 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 3789 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1); 3790 3791 nfs_fattr_init(res->dir_attr); 3792 } 3793 3794 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 3795 { 3796 nfs4_setup_sequence(NFS_SERVER(data->dir), 3797 &data->args.seq_args, 3798 &data->res.seq_res, 3799 task); 3800 } 3801 3802 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 3803 { 3804 struct nfs_unlinkdata *data = task->tk_calldata; 3805 struct nfs_removeres *res = &data->res; 3806 3807 if (!nfs4_sequence_done(task, &res->seq_res)) 3808 return 0; 3809 if (nfs4_async_handle_error(task, res->server, NULL, 3810 &data->timeout) == -EAGAIN) 3811 return 0; 3812 update_changeattr(dir, &res->cinfo); 3813 return 1; 3814 } 3815 3816 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) 3817 { 3818 struct nfs_server *server = NFS_SERVER(dir); 3819 struct nfs_renameargs *arg = msg->rpc_argp; 3820 struct nfs_renameres *res = msg->rpc_resp; 3821 3822 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 3823 res->server = server; 3824 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1); 3825 } 3826 3827 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 3828 { 3829 nfs4_setup_sequence(NFS_SERVER(data->old_dir), 3830 &data->args.seq_args, 3831 &data->res.seq_res, 3832 task); 3833 } 3834 3835 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 3836 struct inode *new_dir) 3837 { 3838 struct nfs_renamedata *data = task->tk_calldata; 3839 struct nfs_renameres *res = &data->res; 3840 3841 if (!nfs4_sequence_done(task, &res->seq_res)) 3842 return 0; 3843 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 3844 return 0; 3845 3846 update_changeattr(old_dir, &res->old_cinfo); 3847 update_changeattr(new_dir, &res->new_cinfo); 3848 return 1; 3849 } 3850 3851 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3852 { 3853 struct nfs_server *server = NFS_SERVER(inode); 3854 struct nfs4_link_arg arg = { 3855 .fh = NFS_FH(inode), 3856 .dir_fh = NFS_FH(dir), 3857 .name = name, 3858 .bitmask = server->attr_bitmask, 3859 }; 3860 struct nfs4_link_res res = { 3861 .server = server, 3862 .label = NULL, 3863 }; 3864 struct rpc_message msg = { 3865 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 3866 .rpc_argp = &arg, 3867 .rpc_resp = &res, 3868 }; 3869 int status = -ENOMEM; 3870 3871 res.fattr = nfs_alloc_fattr(); 3872 if (res.fattr == NULL) 3873 goto out; 3874 3875 res.label = nfs4_label_alloc(server, GFP_KERNEL); 3876 if (IS_ERR(res.label)) { 3877 status = PTR_ERR(res.label); 3878 goto out; 3879 } 3880 arg.bitmask = nfs4_bitmask(server, res.label); 3881 3882 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3883 if (!status) { 3884 update_changeattr(dir, &res.cinfo); 3885 status = nfs_post_op_update_inode(inode, res.fattr); 3886 if (!status) 3887 nfs_setsecurity(inode, res.fattr, res.label); 3888 } 3889 3890 3891 nfs4_label_free(res.label); 3892 3893 out: 3894 nfs_free_fattr(res.fattr); 3895 return status; 3896 } 3897 3898 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3899 { 3900 struct nfs4_exception exception = { }; 3901 int err; 3902 do { 3903 err = nfs4_handle_exception(NFS_SERVER(inode), 3904 _nfs4_proc_link(inode, dir, name), 3905 &exception); 3906 } while (exception.retry); 3907 return err; 3908 } 3909 3910 struct nfs4_createdata { 3911 struct rpc_message msg; 3912 struct nfs4_create_arg arg; 3913 struct nfs4_create_res res; 3914 struct nfs_fh fh; 3915 struct nfs_fattr fattr; 3916 struct nfs4_label *label; 3917 }; 3918 3919 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 3920 struct qstr *name, struct iattr *sattr, u32 ftype) 3921 { 3922 struct nfs4_createdata *data; 3923 3924 data = kzalloc(sizeof(*data), GFP_KERNEL); 3925 if (data != NULL) { 3926 struct nfs_server *server = NFS_SERVER(dir); 3927 3928 data->label = nfs4_label_alloc(server, GFP_KERNEL); 3929 if (IS_ERR(data->label)) 3930 goto out_free; 3931 3932 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 3933 data->msg.rpc_argp = &data->arg; 3934 data->msg.rpc_resp = &data->res; 3935 data->arg.dir_fh = NFS_FH(dir); 3936 data->arg.server = server; 3937 data->arg.name = name; 3938 data->arg.attrs = sattr; 3939 data->arg.ftype = ftype; 3940 data->arg.bitmask = nfs4_bitmask(server, data->label); 3941 data->res.server = server; 3942 data->res.fh = &data->fh; 3943 data->res.fattr = &data->fattr; 3944 data->res.label = data->label; 3945 nfs_fattr_init(data->res.fattr); 3946 } 3947 return data; 3948 out_free: 3949 kfree(data); 3950 return NULL; 3951 } 3952 3953 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 3954 { 3955 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 3956 &data->arg.seq_args, &data->res.seq_res, 1); 3957 if (status == 0) { 3958 update_changeattr(dir, &data->res.dir_cinfo); 3959 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label); 3960 } 3961 return status; 3962 } 3963 3964 static void nfs4_free_createdata(struct nfs4_createdata *data) 3965 { 3966 nfs4_label_free(data->label); 3967 kfree(data); 3968 } 3969 3970 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3971 struct page *page, unsigned int len, struct iattr *sattr, 3972 struct nfs4_label *label) 3973 { 3974 struct nfs4_createdata *data; 3975 int status = -ENAMETOOLONG; 3976 3977 if (len > NFS4_MAXPATHLEN) 3978 goto out; 3979 3980 status = -ENOMEM; 3981 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 3982 if (data == NULL) 3983 goto out; 3984 3985 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 3986 data->arg.u.symlink.pages = &page; 3987 data->arg.u.symlink.len = len; 3988 data->arg.label = label; 3989 3990 status = nfs4_do_create(dir, dentry, data); 3991 3992 nfs4_free_createdata(data); 3993 out: 3994 return status; 3995 } 3996 3997 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3998 struct page *page, unsigned int len, struct iattr *sattr) 3999 { 4000 struct nfs4_exception exception = { }; 4001 struct nfs4_label l, *label = NULL; 4002 int err; 4003 4004 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4005 4006 do { 4007 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label); 4008 trace_nfs4_symlink(dir, &dentry->d_name, err); 4009 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4010 &exception); 4011 } while (exception.retry); 4012 4013 nfs4_label_release_security(label); 4014 return err; 4015 } 4016 4017 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 4018 struct iattr *sattr, struct nfs4_label *label) 4019 { 4020 struct nfs4_createdata *data; 4021 int status = -ENOMEM; 4022 4023 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 4024 if (data == NULL) 4025 goto out; 4026 4027 data->arg.label = label; 4028 status = nfs4_do_create(dir, dentry, data); 4029 4030 nfs4_free_createdata(data); 4031 out: 4032 return status; 4033 } 4034 4035 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 4036 struct iattr *sattr) 4037 { 4038 struct nfs4_exception exception = { }; 4039 struct nfs4_label l, *label = NULL; 4040 int err; 4041 4042 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4043 4044 sattr->ia_mode &= ~current_umask(); 4045 do { 4046 err = _nfs4_proc_mkdir(dir, dentry, sattr, label); 4047 trace_nfs4_mkdir(dir, &dentry->d_name, err); 4048 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4049 &exception); 4050 } while (exception.retry); 4051 nfs4_label_release_security(label); 4052 4053 return err; 4054 } 4055 4056 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 4057 u64 cookie, struct page **pages, unsigned int count, int plus) 4058 { 4059 struct inode *dir = d_inode(dentry); 4060 struct nfs4_readdir_arg args = { 4061 .fh = NFS_FH(dir), 4062 .pages = pages, 4063 .pgbase = 0, 4064 .count = count, 4065 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask, 4066 .plus = plus, 4067 }; 4068 struct nfs4_readdir_res res; 4069 struct rpc_message msg = { 4070 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 4071 .rpc_argp = &args, 4072 .rpc_resp = &res, 4073 .rpc_cred = cred, 4074 }; 4075 int status; 4076 4077 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__, 4078 dentry, 4079 (unsigned long long)cookie); 4080 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); 4081 res.pgbase = args.pgbase; 4082 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 4083 if (status >= 0) { 4084 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); 4085 status += args.pgbase; 4086 } 4087 4088 nfs_invalidate_atime(dir); 4089 4090 dprintk("%s: returns %d\n", __func__, status); 4091 return status; 4092 } 4093 4094 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 4095 u64 cookie, struct page **pages, unsigned int count, int plus) 4096 { 4097 struct nfs4_exception exception = { }; 4098 int err; 4099 do { 4100 err = _nfs4_proc_readdir(dentry, cred, cookie, 4101 pages, count, plus); 4102 trace_nfs4_readdir(d_inode(dentry), err); 4103 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err, 4104 &exception); 4105 } while (exception.retry); 4106 return err; 4107 } 4108 4109 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 4110 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 4111 { 4112 struct nfs4_createdata *data; 4113 int mode = sattr->ia_mode; 4114 int status = -ENOMEM; 4115 4116 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 4117 if (data == NULL) 4118 goto out; 4119 4120 if (S_ISFIFO(mode)) 4121 data->arg.ftype = NF4FIFO; 4122 else if (S_ISBLK(mode)) { 4123 data->arg.ftype = NF4BLK; 4124 data->arg.u.device.specdata1 = MAJOR(rdev); 4125 data->arg.u.device.specdata2 = MINOR(rdev); 4126 } 4127 else if (S_ISCHR(mode)) { 4128 data->arg.ftype = NF4CHR; 4129 data->arg.u.device.specdata1 = MAJOR(rdev); 4130 data->arg.u.device.specdata2 = MINOR(rdev); 4131 } else if (!S_ISSOCK(mode)) { 4132 status = -EINVAL; 4133 goto out_free; 4134 } 4135 4136 data->arg.label = label; 4137 status = nfs4_do_create(dir, dentry, data); 4138 out_free: 4139 nfs4_free_createdata(data); 4140 out: 4141 return status; 4142 } 4143 4144 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 4145 struct iattr *sattr, dev_t rdev) 4146 { 4147 struct nfs4_exception exception = { }; 4148 struct nfs4_label l, *label = NULL; 4149 int err; 4150 4151 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4152 4153 sattr->ia_mode &= ~current_umask(); 4154 do { 4155 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 4156 trace_nfs4_mknod(dir, &dentry->d_name, err); 4157 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4158 &exception); 4159 } while (exception.retry); 4160 4161 nfs4_label_release_security(label); 4162 4163 return err; 4164 } 4165 4166 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 4167 struct nfs_fsstat *fsstat) 4168 { 4169 struct nfs4_statfs_arg args = { 4170 .fh = fhandle, 4171 .bitmask = server->attr_bitmask, 4172 }; 4173 struct nfs4_statfs_res res = { 4174 .fsstat = fsstat, 4175 }; 4176 struct rpc_message msg = { 4177 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 4178 .rpc_argp = &args, 4179 .rpc_resp = &res, 4180 }; 4181 4182 nfs_fattr_init(fsstat->fattr); 4183 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4184 } 4185 4186 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 4187 { 4188 struct nfs4_exception exception = { }; 4189 int err; 4190 do { 4191 err = nfs4_handle_exception(server, 4192 _nfs4_proc_statfs(server, fhandle, fsstat), 4193 &exception); 4194 } while (exception.retry); 4195 return err; 4196 } 4197 4198 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 4199 struct nfs_fsinfo *fsinfo) 4200 { 4201 struct nfs4_fsinfo_arg args = { 4202 .fh = fhandle, 4203 .bitmask = server->attr_bitmask, 4204 }; 4205 struct nfs4_fsinfo_res res = { 4206 .fsinfo = fsinfo, 4207 }; 4208 struct rpc_message msg = { 4209 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 4210 .rpc_argp = &args, 4211 .rpc_resp = &res, 4212 }; 4213 4214 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4215 } 4216 4217 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4218 { 4219 struct nfs4_exception exception = { }; 4220 unsigned long now = jiffies; 4221 int err; 4222 4223 do { 4224 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 4225 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 4226 if (err == 0) { 4227 struct nfs_client *clp = server->nfs_client; 4228 4229 spin_lock(&clp->cl_lock); 4230 clp->cl_lease_time = fsinfo->lease_time * HZ; 4231 clp->cl_last_renewal = now; 4232 spin_unlock(&clp->cl_lock); 4233 break; 4234 } 4235 err = nfs4_handle_exception(server, err, &exception); 4236 } while (exception.retry); 4237 return err; 4238 } 4239 4240 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4241 { 4242 int error; 4243 4244 nfs_fattr_init(fsinfo->fattr); 4245 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 4246 if (error == 0) { 4247 /* block layout checks this! */ 4248 server->pnfs_blksize = fsinfo->blksize; 4249 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype); 4250 } 4251 4252 return error; 4253 } 4254 4255 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4256 struct nfs_pathconf *pathconf) 4257 { 4258 struct nfs4_pathconf_arg args = { 4259 .fh = fhandle, 4260 .bitmask = server->attr_bitmask, 4261 }; 4262 struct nfs4_pathconf_res res = { 4263 .pathconf = pathconf, 4264 }; 4265 struct rpc_message msg = { 4266 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 4267 .rpc_argp = &args, 4268 .rpc_resp = &res, 4269 }; 4270 4271 /* None of the pathconf attributes are mandatory to implement */ 4272 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 4273 memset(pathconf, 0, sizeof(*pathconf)); 4274 return 0; 4275 } 4276 4277 nfs_fattr_init(pathconf->fattr); 4278 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4279 } 4280 4281 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4282 struct nfs_pathconf *pathconf) 4283 { 4284 struct nfs4_exception exception = { }; 4285 int err; 4286 4287 do { 4288 err = nfs4_handle_exception(server, 4289 _nfs4_proc_pathconf(server, fhandle, pathconf), 4290 &exception); 4291 } while (exception.retry); 4292 return err; 4293 } 4294 4295 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 4296 const struct nfs_open_context *ctx, 4297 const struct nfs_lock_context *l_ctx, 4298 fmode_t fmode) 4299 { 4300 const struct nfs_lockowner *lockowner = NULL; 4301 4302 if (l_ctx != NULL) 4303 lockowner = &l_ctx->lockowner; 4304 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner); 4305 } 4306 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 4307 4308 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 4309 const struct nfs_open_context *ctx, 4310 const struct nfs_lock_context *l_ctx, 4311 fmode_t fmode) 4312 { 4313 nfs4_stateid current_stateid; 4314 4315 /* If the current stateid represents a lost lock, then exit */ 4316 if (nfs4_set_rw_stateid(¤t_stateid, ctx, l_ctx, fmode) == -EIO) 4317 return true; 4318 return nfs4_stateid_match(stateid, ¤t_stateid); 4319 } 4320 4321 static bool nfs4_error_stateid_expired(int err) 4322 { 4323 switch (err) { 4324 case -NFS4ERR_DELEG_REVOKED: 4325 case -NFS4ERR_ADMIN_REVOKED: 4326 case -NFS4ERR_BAD_STATEID: 4327 case -NFS4ERR_STALE_STATEID: 4328 case -NFS4ERR_OLD_STATEID: 4329 case -NFS4ERR_OPENMODE: 4330 case -NFS4ERR_EXPIRED: 4331 return true; 4332 } 4333 return false; 4334 } 4335 4336 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr) 4337 { 4338 nfs_invalidate_atime(hdr->inode); 4339 } 4340 4341 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 4342 { 4343 struct nfs_server *server = NFS_SERVER(hdr->inode); 4344 4345 trace_nfs4_read(hdr, task->tk_status); 4346 if (nfs4_async_handle_error(task, server, 4347 hdr->args.context->state, 4348 NULL) == -EAGAIN) { 4349 rpc_restart_call_prepare(task); 4350 return -EAGAIN; 4351 } 4352 4353 __nfs4_read_done_cb(hdr); 4354 if (task->tk_status > 0) 4355 renew_lease(server, hdr->timestamp); 4356 return 0; 4357 } 4358 4359 static bool nfs4_read_stateid_changed(struct rpc_task *task, 4360 struct nfs_pgio_args *args) 4361 { 4362 4363 if (!nfs4_error_stateid_expired(task->tk_status) || 4364 nfs4_stateid_is_current(&args->stateid, 4365 args->context, 4366 args->lock_context, 4367 FMODE_READ)) 4368 return false; 4369 rpc_restart_call_prepare(task); 4370 return true; 4371 } 4372 4373 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4374 { 4375 4376 dprintk("--> %s\n", __func__); 4377 4378 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4379 return -EAGAIN; 4380 if (nfs4_read_stateid_changed(task, &hdr->args)) 4381 return -EAGAIN; 4382 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4383 nfs4_read_done_cb(task, hdr); 4384 } 4385 4386 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 4387 struct rpc_message *msg) 4388 { 4389 hdr->timestamp = jiffies; 4390 hdr->pgio_done_cb = nfs4_read_done_cb; 4391 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 4392 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0); 4393 } 4394 4395 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 4396 struct nfs_pgio_header *hdr) 4397 { 4398 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode), 4399 &hdr->args.seq_args, 4400 &hdr->res.seq_res, 4401 task)) 4402 return 0; 4403 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 4404 hdr->args.lock_context, 4405 hdr->rw_ops->rw_mode) == -EIO) 4406 return -EIO; 4407 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 4408 return -EIO; 4409 return 0; 4410 } 4411 4412 static int nfs4_write_done_cb(struct rpc_task *task, 4413 struct nfs_pgio_header *hdr) 4414 { 4415 struct inode *inode = hdr->inode; 4416 4417 trace_nfs4_write(hdr, task->tk_status); 4418 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4419 hdr->args.context->state, 4420 NULL) == -EAGAIN) { 4421 rpc_restart_call_prepare(task); 4422 return -EAGAIN; 4423 } 4424 if (task->tk_status >= 0) { 4425 renew_lease(NFS_SERVER(inode), hdr->timestamp); 4426 nfs_writeback_update_inode(hdr); 4427 } 4428 return 0; 4429 } 4430 4431 static bool nfs4_write_stateid_changed(struct rpc_task *task, 4432 struct nfs_pgio_args *args) 4433 { 4434 4435 if (!nfs4_error_stateid_expired(task->tk_status) || 4436 nfs4_stateid_is_current(&args->stateid, 4437 args->context, 4438 args->lock_context, 4439 FMODE_WRITE)) 4440 return false; 4441 rpc_restart_call_prepare(task); 4442 return true; 4443 } 4444 4445 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4446 { 4447 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4448 return -EAGAIN; 4449 if (nfs4_write_stateid_changed(task, &hdr->args)) 4450 return -EAGAIN; 4451 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4452 nfs4_write_done_cb(task, hdr); 4453 } 4454 4455 static 4456 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 4457 { 4458 /* Don't request attributes for pNFS or O_DIRECT writes */ 4459 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 4460 return false; 4461 /* Otherwise, request attributes if and only if we don't hold 4462 * a delegation 4463 */ 4464 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; 4465 } 4466 4467 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 4468 struct rpc_message *msg) 4469 { 4470 struct nfs_server *server = NFS_SERVER(hdr->inode); 4471 4472 if (!nfs4_write_need_cache_consistency_data(hdr)) { 4473 hdr->args.bitmask = NULL; 4474 hdr->res.fattr = NULL; 4475 } else 4476 hdr->args.bitmask = server->cache_consistency_bitmask; 4477 4478 if (!hdr->pgio_done_cb) 4479 hdr->pgio_done_cb = nfs4_write_done_cb; 4480 hdr->res.server = server; 4481 hdr->timestamp = jiffies; 4482 4483 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 4484 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1); 4485 } 4486 4487 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 4488 { 4489 nfs4_setup_sequence(NFS_SERVER(data->inode), 4490 &data->args.seq_args, 4491 &data->res.seq_res, 4492 task); 4493 } 4494 4495 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 4496 { 4497 struct inode *inode = data->inode; 4498 4499 trace_nfs4_commit(data, task->tk_status); 4500 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4501 NULL, NULL) == -EAGAIN) { 4502 rpc_restart_call_prepare(task); 4503 return -EAGAIN; 4504 } 4505 return 0; 4506 } 4507 4508 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 4509 { 4510 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4511 return -EAGAIN; 4512 return data->commit_done_cb(task, data); 4513 } 4514 4515 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) 4516 { 4517 struct nfs_server *server = NFS_SERVER(data->inode); 4518 4519 if (data->commit_done_cb == NULL) 4520 data->commit_done_cb = nfs4_commit_done_cb; 4521 data->res.server = server; 4522 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 4523 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 4524 } 4525 4526 struct nfs4_renewdata { 4527 struct nfs_client *client; 4528 unsigned long timestamp; 4529 }; 4530 4531 /* 4532 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 4533 * standalone procedure for queueing an asynchronous RENEW. 4534 */ 4535 static void nfs4_renew_release(void *calldata) 4536 { 4537 struct nfs4_renewdata *data = calldata; 4538 struct nfs_client *clp = data->client; 4539 4540 if (atomic_read(&clp->cl_count) > 1) 4541 nfs4_schedule_state_renewal(clp); 4542 nfs_put_client(clp); 4543 kfree(data); 4544 } 4545 4546 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 4547 { 4548 struct nfs4_renewdata *data = calldata; 4549 struct nfs_client *clp = data->client; 4550 unsigned long timestamp = data->timestamp; 4551 4552 trace_nfs4_renew_async(clp, task->tk_status); 4553 switch (task->tk_status) { 4554 case 0: 4555 break; 4556 case -NFS4ERR_LEASE_MOVED: 4557 nfs4_schedule_lease_moved_recovery(clp); 4558 break; 4559 default: 4560 /* Unless we're shutting down, schedule state recovery! */ 4561 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 4562 return; 4563 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 4564 nfs4_schedule_lease_recovery(clp); 4565 return; 4566 } 4567 nfs4_schedule_path_down_recovery(clp); 4568 } 4569 do_renew_lease(clp, timestamp); 4570 } 4571 4572 static const struct rpc_call_ops nfs4_renew_ops = { 4573 .rpc_call_done = nfs4_renew_done, 4574 .rpc_release = nfs4_renew_release, 4575 }; 4576 4577 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 4578 { 4579 struct rpc_message msg = { 4580 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4581 .rpc_argp = clp, 4582 .rpc_cred = cred, 4583 }; 4584 struct nfs4_renewdata *data; 4585 4586 if (renew_flags == 0) 4587 return 0; 4588 if (!atomic_inc_not_zero(&clp->cl_count)) 4589 return -EIO; 4590 data = kmalloc(sizeof(*data), GFP_NOFS); 4591 if (data == NULL) 4592 return -ENOMEM; 4593 data->client = clp; 4594 data->timestamp = jiffies; 4595 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 4596 &nfs4_renew_ops, data); 4597 } 4598 4599 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 4600 { 4601 struct rpc_message msg = { 4602 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4603 .rpc_argp = clp, 4604 .rpc_cred = cred, 4605 }; 4606 unsigned long now = jiffies; 4607 int status; 4608 4609 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4610 if (status < 0) 4611 return status; 4612 do_renew_lease(clp, now); 4613 return 0; 4614 } 4615 4616 static inline int nfs4_server_supports_acls(struct nfs_server *server) 4617 { 4618 return server->caps & NFS_CAP_ACLS; 4619 } 4620 4621 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 4622 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 4623 * the stack. 4624 */ 4625 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 4626 4627 static int buf_to_pages_noslab(const void *buf, size_t buflen, 4628 struct page **pages) 4629 { 4630 struct page *newpage, **spages; 4631 int rc = 0; 4632 size_t len; 4633 spages = pages; 4634 4635 do { 4636 len = min_t(size_t, PAGE_SIZE, buflen); 4637 newpage = alloc_page(GFP_KERNEL); 4638 4639 if (newpage == NULL) 4640 goto unwind; 4641 memcpy(page_address(newpage), buf, len); 4642 buf += len; 4643 buflen -= len; 4644 *pages++ = newpage; 4645 rc++; 4646 } while (buflen != 0); 4647 4648 return rc; 4649 4650 unwind: 4651 for(; rc > 0; rc--) 4652 __free_page(spages[rc-1]); 4653 return -ENOMEM; 4654 } 4655 4656 struct nfs4_cached_acl { 4657 int cached; 4658 size_t len; 4659 char data[0]; 4660 }; 4661 4662 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 4663 { 4664 struct nfs_inode *nfsi = NFS_I(inode); 4665 4666 spin_lock(&inode->i_lock); 4667 kfree(nfsi->nfs4_acl); 4668 nfsi->nfs4_acl = acl; 4669 spin_unlock(&inode->i_lock); 4670 } 4671 4672 static void nfs4_zap_acl_attr(struct inode *inode) 4673 { 4674 nfs4_set_cached_acl(inode, NULL); 4675 } 4676 4677 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 4678 { 4679 struct nfs_inode *nfsi = NFS_I(inode); 4680 struct nfs4_cached_acl *acl; 4681 int ret = -ENOENT; 4682 4683 spin_lock(&inode->i_lock); 4684 acl = nfsi->nfs4_acl; 4685 if (acl == NULL) 4686 goto out; 4687 if (buf == NULL) /* user is just asking for length */ 4688 goto out_len; 4689 if (acl->cached == 0) 4690 goto out; 4691 ret = -ERANGE; /* see getxattr(2) man page */ 4692 if (acl->len > buflen) 4693 goto out; 4694 memcpy(buf, acl->data, acl->len); 4695 out_len: 4696 ret = acl->len; 4697 out: 4698 spin_unlock(&inode->i_lock); 4699 return ret; 4700 } 4701 4702 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 4703 { 4704 struct nfs4_cached_acl *acl; 4705 size_t buflen = sizeof(*acl) + acl_len; 4706 4707 if (buflen <= PAGE_SIZE) { 4708 acl = kmalloc(buflen, GFP_KERNEL); 4709 if (acl == NULL) 4710 goto out; 4711 acl->cached = 1; 4712 _copy_from_pages(acl->data, pages, pgbase, acl_len); 4713 } else { 4714 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 4715 if (acl == NULL) 4716 goto out; 4717 acl->cached = 0; 4718 } 4719 acl->len = acl_len; 4720 out: 4721 nfs4_set_cached_acl(inode, acl); 4722 } 4723 4724 /* 4725 * The getxattr API returns the required buffer length when called with a 4726 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 4727 * the required buf. On a NULL buf, we send a page of data to the server 4728 * guessing that the ACL request can be serviced by a page. If so, we cache 4729 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 4730 * the cache. If not so, we throw away the page, and cache the required 4731 * length. The next getxattr call will then produce another round trip to 4732 * the server, this time with the input buf of the required size. 4733 */ 4734 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4735 { 4736 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; 4737 struct nfs_getaclargs args = { 4738 .fh = NFS_FH(inode), 4739 .acl_pages = pages, 4740 .acl_len = buflen, 4741 }; 4742 struct nfs_getaclres res = { 4743 .acl_len = buflen, 4744 }; 4745 struct rpc_message msg = { 4746 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 4747 .rpc_argp = &args, 4748 .rpc_resp = &res, 4749 }; 4750 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4751 int ret = -ENOMEM, i; 4752 4753 /* As long as we're doing a round trip to the server anyway, 4754 * let's be prepared for a page of acl data. */ 4755 if (npages == 0) 4756 npages = 1; 4757 if (npages > ARRAY_SIZE(pages)) 4758 return -ERANGE; 4759 4760 for (i = 0; i < npages; i++) { 4761 pages[i] = alloc_page(GFP_KERNEL); 4762 if (!pages[i]) 4763 goto out_free; 4764 } 4765 4766 /* for decoding across pages */ 4767 res.acl_scratch = alloc_page(GFP_KERNEL); 4768 if (!res.acl_scratch) 4769 goto out_free; 4770 4771 args.acl_len = npages * PAGE_SIZE; 4772 4773 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 4774 __func__, buf, buflen, npages, args.acl_len); 4775 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 4776 &msg, &args.seq_args, &res.seq_res, 0); 4777 if (ret) 4778 goto out_free; 4779 4780 /* Handle the case where the passed-in buffer is too short */ 4781 if (res.acl_flags & NFS4_ACL_TRUNC) { 4782 /* Did the user only issue a request for the acl length? */ 4783 if (buf == NULL) 4784 goto out_ok; 4785 ret = -ERANGE; 4786 goto out_free; 4787 } 4788 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); 4789 if (buf) { 4790 if (res.acl_len > buflen) { 4791 ret = -ERANGE; 4792 goto out_free; 4793 } 4794 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 4795 } 4796 out_ok: 4797 ret = res.acl_len; 4798 out_free: 4799 for (i = 0; i < npages; i++) 4800 if (pages[i]) 4801 __free_page(pages[i]); 4802 if (res.acl_scratch) 4803 __free_page(res.acl_scratch); 4804 return ret; 4805 } 4806 4807 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4808 { 4809 struct nfs4_exception exception = { }; 4810 ssize_t ret; 4811 do { 4812 ret = __nfs4_get_acl_uncached(inode, buf, buflen); 4813 trace_nfs4_get_acl(inode, ret); 4814 if (ret >= 0) 4815 break; 4816 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 4817 } while (exception.retry); 4818 return ret; 4819 } 4820 4821 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 4822 { 4823 struct nfs_server *server = NFS_SERVER(inode); 4824 int ret; 4825 4826 if (!nfs4_server_supports_acls(server)) 4827 return -EOPNOTSUPP; 4828 ret = nfs_revalidate_inode(server, inode); 4829 if (ret < 0) 4830 return ret; 4831 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 4832 nfs_zap_acl_cache(inode); 4833 ret = nfs4_read_cached_acl(inode, buf, buflen); 4834 if (ret != -ENOENT) 4835 /* -ENOENT is returned if there is no ACL or if there is an ACL 4836 * but no cached acl data, just the acl length */ 4837 return ret; 4838 return nfs4_get_acl_uncached(inode, buf, buflen); 4839 } 4840 4841 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4842 { 4843 struct nfs_server *server = NFS_SERVER(inode); 4844 struct page *pages[NFS4ACL_MAXPAGES]; 4845 struct nfs_setaclargs arg = { 4846 .fh = NFS_FH(inode), 4847 .acl_pages = pages, 4848 .acl_len = buflen, 4849 }; 4850 struct nfs_setaclres res; 4851 struct rpc_message msg = { 4852 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 4853 .rpc_argp = &arg, 4854 .rpc_resp = &res, 4855 }; 4856 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4857 int ret, i; 4858 4859 if (!nfs4_server_supports_acls(server)) 4860 return -EOPNOTSUPP; 4861 if (npages > ARRAY_SIZE(pages)) 4862 return -ERANGE; 4863 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages); 4864 if (i < 0) 4865 return i; 4866 nfs4_inode_return_delegation(inode); 4867 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4868 4869 /* 4870 * Free each page after tx, so the only ref left is 4871 * held by the network stack 4872 */ 4873 for (; i > 0; i--) 4874 put_page(pages[i-1]); 4875 4876 /* 4877 * Acl update can result in inode attribute update. 4878 * so mark the attribute cache invalid. 4879 */ 4880 spin_lock(&inode->i_lock); 4881 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 4882 spin_unlock(&inode->i_lock); 4883 nfs_access_zap_cache(inode); 4884 nfs_zap_acl_cache(inode); 4885 return ret; 4886 } 4887 4888 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4889 { 4890 struct nfs4_exception exception = { }; 4891 int err; 4892 do { 4893 err = __nfs4_proc_set_acl(inode, buf, buflen); 4894 trace_nfs4_set_acl(inode, err); 4895 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4896 &exception); 4897 } while (exception.retry); 4898 return err; 4899 } 4900 4901 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 4902 static int _nfs4_get_security_label(struct inode *inode, void *buf, 4903 size_t buflen) 4904 { 4905 struct nfs_server *server = NFS_SERVER(inode); 4906 struct nfs_fattr fattr; 4907 struct nfs4_label label = {0, 0, buflen, buf}; 4908 4909 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 4910 struct nfs4_getattr_arg arg = { 4911 .fh = NFS_FH(inode), 4912 .bitmask = bitmask, 4913 }; 4914 struct nfs4_getattr_res res = { 4915 .fattr = &fattr, 4916 .label = &label, 4917 .server = server, 4918 }; 4919 struct rpc_message msg = { 4920 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4921 .rpc_argp = &arg, 4922 .rpc_resp = &res, 4923 }; 4924 int ret; 4925 4926 nfs_fattr_init(&fattr); 4927 4928 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 4929 if (ret) 4930 return ret; 4931 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 4932 return -ENOENT; 4933 if (buflen < label.len) 4934 return -ERANGE; 4935 return 0; 4936 } 4937 4938 static int nfs4_get_security_label(struct inode *inode, void *buf, 4939 size_t buflen) 4940 { 4941 struct nfs4_exception exception = { }; 4942 int err; 4943 4944 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 4945 return -EOPNOTSUPP; 4946 4947 do { 4948 err = _nfs4_get_security_label(inode, buf, buflen); 4949 trace_nfs4_get_security_label(inode, err); 4950 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4951 &exception); 4952 } while (exception.retry); 4953 return err; 4954 } 4955 4956 static int _nfs4_do_set_security_label(struct inode *inode, 4957 struct nfs4_label *ilabel, 4958 struct nfs_fattr *fattr, 4959 struct nfs4_label *olabel) 4960 { 4961 4962 struct iattr sattr = {0}; 4963 struct nfs_server *server = NFS_SERVER(inode); 4964 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 4965 struct nfs_setattrargs arg = { 4966 .fh = NFS_FH(inode), 4967 .iap = &sattr, 4968 .server = server, 4969 .bitmask = bitmask, 4970 .label = ilabel, 4971 }; 4972 struct nfs_setattrres res = { 4973 .fattr = fattr, 4974 .label = olabel, 4975 .server = server, 4976 }; 4977 struct rpc_message msg = { 4978 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 4979 .rpc_argp = &arg, 4980 .rpc_resp = &res, 4981 }; 4982 int status; 4983 4984 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 4985 4986 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4987 if (status) 4988 dprintk("%s failed: %d\n", __func__, status); 4989 4990 return status; 4991 } 4992 4993 static int nfs4_do_set_security_label(struct inode *inode, 4994 struct nfs4_label *ilabel, 4995 struct nfs_fattr *fattr, 4996 struct nfs4_label *olabel) 4997 { 4998 struct nfs4_exception exception = { }; 4999 int err; 5000 5001 do { 5002 err = _nfs4_do_set_security_label(inode, ilabel, 5003 fattr, olabel); 5004 trace_nfs4_set_security_label(inode, err); 5005 err = nfs4_handle_exception(NFS_SERVER(inode), err, 5006 &exception); 5007 } while (exception.retry); 5008 return err; 5009 } 5010 5011 static int 5012 nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen) 5013 { 5014 struct nfs4_label ilabel, *olabel = NULL; 5015 struct nfs_fattr fattr; 5016 struct rpc_cred *cred; 5017 struct inode *inode = d_inode(dentry); 5018 int status; 5019 5020 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 5021 return -EOPNOTSUPP; 5022 5023 nfs_fattr_init(&fattr); 5024 5025 ilabel.pi = 0; 5026 ilabel.lfs = 0; 5027 ilabel.label = (char *)buf; 5028 ilabel.len = buflen; 5029 5030 cred = rpc_lookup_cred(); 5031 if (IS_ERR(cred)) 5032 return PTR_ERR(cred); 5033 5034 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 5035 if (IS_ERR(olabel)) { 5036 status = -PTR_ERR(olabel); 5037 goto out; 5038 } 5039 5040 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel); 5041 if (status == 0) 5042 nfs_setsecurity(inode, &fattr, olabel); 5043 5044 nfs4_label_free(olabel); 5045 out: 5046 put_rpccred(cred); 5047 return status; 5048 } 5049 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 5050 5051 5052 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 5053 nfs4_verifier *bootverf) 5054 { 5055 __be32 verf[2]; 5056 5057 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 5058 /* An impossible timestamp guarantees this value 5059 * will never match a generated boot time. */ 5060 verf[0] = 0; 5061 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1); 5062 } else { 5063 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 5064 verf[0] = cpu_to_be32(nn->boot_time.tv_sec); 5065 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec); 5066 } 5067 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 5068 } 5069 5070 static int 5071 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 5072 { 5073 size_t len; 5074 char *str; 5075 5076 if (clp->cl_owner_id != NULL) 5077 return 0; 5078 5079 rcu_read_lock(); 5080 len = 14 + strlen(clp->cl_ipaddr) + 1 + 5081 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 5082 1 + 5083 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) + 5084 1; 5085 rcu_read_unlock(); 5086 5087 if (len > NFS4_OPAQUE_LIMIT + 1) 5088 return -EINVAL; 5089 5090 /* 5091 * Since this string is allocated at mount time, and held until the 5092 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5093 * about a memory-reclaim deadlock. 5094 */ 5095 str = kmalloc(len, GFP_KERNEL); 5096 if (!str) 5097 return -ENOMEM; 5098 5099 rcu_read_lock(); 5100 scnprintf(str, len, "Linux NFSv4.0 %s/%s %s", 5101 clp->cl_ipaddr, 5102 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR), 5103 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)); 5104 rcu_read_unlock(); 5105 5106 clp->cl_owner_id = str; 5107 return 0; 5108 } 5109 5110 static int 5111 nfs4_init_uniquifier_client_string(struct nfs_client *clp) 5112 { 5113 size_t len; 5114 char *str; 5115 5116 len = 10 + 10 + 1 + 10 + 1 + 5117 strlen(nfs4_client_id_uniquifier) + 1 + 5118 strlen(clp->cl_rpcclient->cl_nodename) + 1; 5119 5120 if (len > NFS4_OPAQUE_LIMIT + 1) 5121 return -EINVAL; 5122 5123 /* 5124 * Since this string is allocated at mount time, and held until the 5125 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5126 * about a memory-reclaim deadlock. 5127 */ 5128 str = kmalloc(len, GFP_KERNEL); 5129 if (!str) 5130 return -ENOMEM; 5131 5132 scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 5133 clp->rpc_ops->version, clp->cl_minorversion, 5134 nfs4_client_id_uniquifier, 5135 clp->cl_rpcclient->cl_nodename); 5136 clp->cl_owner_id = str; 5137 return 0; 5138 } 5139 5140 static int 5141 nfs4_init_uniform_client_string(struct nfs_client *clp) 5142 { 5143 size_t len; 5144 char *str; 5145 5146 if (clp->cl_owner_id != NULL) 5147 return 0; 5148 5149 if (nfs4_client_id_uniquifier[0] != '\0') 5150 return nfs4_init_uniquifier_client_string(clp); 5151 5152 len = 10 + 10 + 1 + 10 + 1 + 5153 strlen(clp->cl_rpcclient->cl_nodename) + 1; 5154 5155 if (len > NFS4_OPAQUE_LIMIT + 1) 5156 return -EINVAL; 5157 5158 /* 5159 * Since this string is allocated at mount time, and held until the 5160 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5161 * about a memory-reclaim deadlock. 5162 */ 5163 str = kmalloc(len, GFP_KERNEL); 5164 if (!str) 5165 return -ENOMEM; 5166 5167 scnprintf(str, len, "Linux NFSv%u.%u %s", 5168 clp->rpc_ops->version, clp->cl_minorversion, 5169 clp->cl_rpcclient->cl_nodename); 5170 clp->cl_owner_id = str; 5171 return 0; 5172 } 5173 5174 /* 5175 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 5176 * services. Advertise one based on the address family of the 5177 * clientaddr. 5178 */ 5179 static unsigned int 5180 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 5181 { 5182 if (strchr(clp->cl_ipaddr, ':') != NULL) 5183 return scnprintf(buf, len, "tcp6"); 5184 else 5185 return scnprintf(buf, len, "tcp"); 5186 } 5187 5188 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 5189 { 5190 struct nfs4_setclientid *sc = calldata; 5191 5192 if (task->tk_status == 0) 5193 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 5194 } 5195 5196 static const struct rpc_call_ops nfs4_setclientid_ops = { 5197 .rpc_call_done = nfs4_setclientid_done, 5198 }; 5199 5200 /** 5201 * nfs4_proc_setclientid - Negotiate client ID 5202 * @clp: state data structure 5203 * @program: RPC program for NFSv4 callback service 5204 * @port: IP port number for NFS4 callback service 5205 * @cred: RPC credential to use for this call 5206 * @res: where to place the result 5207 * 5208 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5209 */ 5210 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 5211 unsigned short port, struct rpc_cred *cred, 5212 struct nfs4_setclientid_res *res) 5213 { 5214 nfs4_verifier sc_verifier; 5215 struct nfs4_setclientid setclientid = { 5216 .sc_verifier = &sc_verifier, 5217 .sc_prog = program, 5218 .sc_clnt = clp, 5219 }; 5220 struct rpc_message msg = { 5221 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 5222 .rpc_argp = &setclientid, 5223 .rpc_resp = res, 5224 .rpc_cred = cred, 5225 }; 5226 struct rpc_task *task; 5227 struct rpc_task_setup task_setup_data = { 5228 .rpc_client = clp->cl_rpcclient, 5229 .rpc_message = &msg, 5230 .callback_ops = &nfs4_setclientid_ops, 5231 .callback_data = &setclientid, 5232 .flags = RPC_TASK_TIMEOUT, 5233 }; 5234 int status; 5235 5236 /* nfs_client_id4 */ 5237 nfs4_init_boot_verifier(clp, &sc_verifier); 5238 5239 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 5240 status = nfs4_init_uniform_client_string(clp); 5241 else 5242 status = nfs4_init_nonuniform_client_string(clp); 5243 5244 if (status) 5245 goto out; 5246 5247 /* cb_client4 */ 5248 setclientid.sc_netid_len = 5249 nfs4_init_callback_netid(clp, 5250 setclientid.sc_netid, 5251 sizeof(setclientid.sc_netid)); 5252 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 5253 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 5254 clp->cl_ipaddr, port >> 8, port & 255); 5255 5256 dprintk("NFS call setclientid auth=%s, '%s'\n", 5257 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5258 clp->cl_owner_id); 5259 task = rpc_run_task(&task_setup_data); 5260 if (IS_ERR(task)) { 5261 status = PTR_ERR(task); 5262 goto out; 5263 } 5264 status = task->tk_status; 5265 if (setclientid.sc_cred) { 5266 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 5267 put_rpccred(setclientid.sc_cred); 5268 } 5269 rpc_put_task(task); 5270 out: 5271 trace_nfs4_setclientid(clp, status); 5272 dprintk("NFS reply setclientid: %d\n", status); 5273 return status; 5274 } 5275 5276 /** 5277 * nfs4_proc_setclientid_confirm - Confirm client ID 5278 * @clp: state data structure 5279 * @res: result of a previous SETCLIENTID 5280 * @cred: RPC credential to use for this call 5281 * 5282 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5283 */ 5284 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 5285 struct nfs4_setclientid_res *arg, 5286 struct rpc_cred *cred) 5287 { 5288 struct rpc_message msg = { 5289 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 5290 .rpc_argp = arg, 5291 .rpc_cred = cred, 5292 }; 5293 int status; 5294 5295 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 5296 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5297 clp->cl_clientid); 5298 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5299 trace_nfs4_setclientid_confirm(clp, status); 5300 dprintk("NFS reply setclientid_confirm: %d\n", status); 5301 return status; 5302 } 5303 5304 struct nfs4_delegreturndata { 5305 struct nfs4_delegreturnargs args; 5306 struct nfs4_delegreturnres res; 5307 struct nfs_fh fh; 5308 nfs4_stateid stateid; 5309 unsigned long timestamp; 5310 struct nfs_fattr fattr; 5311 int rpc_status; 5312 struct inode *inode; 5313 bool roc; 5314 u32 roc_barrier; 5315 }; 5316 5317 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 5318 { 5319 struct nfs4_delegreturndata *data = calldata; 5320 5321 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5322 return; 5323 5324 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 5325 switch (task->tk_status) { 5326 case 0: 5327 renew_lease(data->res.server, data->timestamp); 5328 case -NFS4ERR_ADMIN_REVOKED: 5329 case -NFS4ERR_DELEG_REVOKED: 5330 case -NFS4ERR_BAD_STATEID: 5331 case -NFS4ERR_OLD_STATEID: 5332 case -NFS4ERR_STALE_STATEID: 5333 case -NFS4ERR_EXPIRED: 5334 task->tk_status = 0; 5335 if (data->roc) 5336 pnfs_roc_set_barrier(data->inode, data->roc_barrier); 5337 break; 5338 default: 5339 if (nfs4_async_handle_error(task, data->res.server, 5340 NULL, NULL) == -EAGAIN) { 5341 rpc_restart_call_prepare(task); 5342 return; 5343 } 5344 } 5345 data->rpc_status = task->tk_status; 5346 } 5347 5348 static void nfs4_delegreturn_release(void *calldata) 5349 { 5350 struct nfs4_delegreturndata *data = calldata; 5351 struct inode *inode = data->inode; 5352 5353 if (inode) { 5354 if (data->roc) 5355 pnfs_roc_release(inode); 5356 nfs_iput_and_deactive(inode); 5357 } 5358 kfree(calldata); 5359 } 5360 5361 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 5362 { 5363 struct nfs4_delegreturndata *d_data; 5364 5365 d_data = (struct nfs4_delegreturndata *)data; 5366 5367 if (nfs4_wait_on_layoutreturn(d_data->inode, task)) 5368 return; 5369 5370 if (d_data->roc) 5371 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier); 5372 5373 nfs4_setup_sequence(d_data->res.server, 5374 &d_data->args.seq_args, 5375 &d_data->res.seq_res, 5376 task); 5377 } 5378 5379 static const struct rpc_call_ops nfs4_delegreturn_ops = { 5380 .rpc_call_prepare = nfs4_delegreturn_prepare, 5381 .rpc_call_done = nfs4_delegreturn_done, 5382 .rpc_release = nfs4_delegreturn_release, 5383 }; 5384 5385 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5386 { 5387 struct nfs4_delegreturndata *data; 5388 struct nfs_server *server = NFS_SERVER(inode); 5389 struct rpc_task *task; 5390 struct rpc_message msg = { 5391 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 5392 .rpc_cred = cred, 5393 }; 5394 struct rpc_task_setup task_setup_data = { 5395 .rpc_client = server->client, 5396 .rpc_message = &msg, 5397 .callback_ops = &nfs4_delegreturn_ops, 5398 .flags = RPC_TASK_ASYNC, 5399 }; 5400 int status = 0; 5401 5402 data = kzalloc(sizeof(*data), GFP_NOFS); 5403 if (data == NULL) 5404 return -ENOMEM; 5405 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 5406 5407 nfs4_state_protect(server->nfs_client, 5408 NFS_SP4_MACH_CRED_CLEANUP, 5409 &task_setup_data.rpc_client, &msg); 5410 5411 data->args.fhandle = &data->fh; 5412 data->args.stateid = &data->stateid; 5413 data->args.bitmask = server->cache_consistency_bitmask; 5414 nfs_copy_fh(&data->fh, NFS_FH(inode)); 5415 nfs4_stateid_copy(&data->stateid, stateid); 5416 data->res.fattr = &data->fattr; 5417 data->res.server = server; 5418 nfs_fattr_init(data->res.fattr); 5419 data->timestamp = jiffies; 5420 data->rpc_status = 0; 5421 data->inode = nfs_igrab_and_active(inode); 5422 if (data->inode) 5423 data->roc = nfs4_roc(inode); 5424 5425 task_setup_data.callback_data = data; 5426 msg.rpc_argp = &data->args; 5427 msg.rpc_resp = &data->res; 5428 task = rpc_run_task(&task_setup_data); 5429 if (IS_ERR(task)) 5430 return PTR_ERR(task); 5431 if (!issync) 5432 goto out; 5433 status = nfs4_wait_for_completion_rpc_task(task); 5434 if (status != 0) 5435 goto out; 5436 status = data->rpc_status; 5437 if (status == 0) 5438 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 5439 else 5440 nfs_refresh_inode(inode, &data->fattr); 5441 out: 5442 rpc_put_task(task); 5443 return status; 5444 } 5445 5446 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5447 { 5448 struct nfs_server *server = NFS_SERVER(inode); 5449 struct nfs4_exception exception = { }; 5450 int err; 5451 do { 5452 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); 5453 trace_nfs4_delegreturn(inode, stateid, err); 5454 switch (err) { 5455 case -NFS4ERR_STALE_STATEID: 5456 case -NFS4ERR_EXPIRED: 5457 case 0: 5458 return 0; 5459 } 5460 err = nfs4_handle_exception(server, err, &exception); 5461 } while (exception.retry); 5462 return err; 5463 } 5464 5465 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 5466 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 5467 5468 /* 5469 * sleep, with exponential backoff, and retry the LOCK operation. 5470 */ 5471 static unsigned long 5472 nfs4_set_lock_task_retry(unsigned long timeout) 5473 { 5474 freezable_schedule_timeout_killable_unsafe(timeout); 5475 timeout <<= 1; 5476 if (timeout > NFS4_LOCK_MAXTIMEOUT) 5477 return NFS4_LOCK_MAXTIMEOUT; 5478 return timeout; 5479 } 5480 5481 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5482 { 5483 struct inode *inode = state->inode; 5484 struct nfs_server *server = NFS_SERVER(inode); 5485 struct nfs_client *clp = server->nfs_client; 5486 struct nfs_lockt_args arg = { 5487 .fh = NFS_FH(inode), 5488 .fl = request, 5489 }; 5490 struct nfs_lockt_res res = { 5491 .denied = request, 5492 }; 5493 struct rpc_message msg = { 5494 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 5495 .rpc_argp = &arg, 5496 .rpc_resp = &res, 5497 .rpc_cred = state->owner->so_cred, 5498 }; 5499 struct nfs4_lock_state *lsp; 5500 int status; 5501 5502 arg.lock_owner.clientid = clp->cl_clientid; 5503 status = nfs4_set_lock_state(state, request); 5504 if (status != 0) 5505 goto out; 5506 lsp = request->fl_u.nfs4_fl.owner; 5507 arg.lock_owner.id = lsp->ls_seqid.owner_id; 5508 arg.lock_owner.s_dev = server->s_dev; 5509 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5510 switch (status) { 5511 case 0: 5512 request->fl_type = F_UNLCK; 5513 break; 5514 case -NFS4ERR_DENIED: 5515 status = 0; 5516 } 5517 request->fl_ops->fl_release_private(request); 5518 request->fl_ops = NULL; 5519 out: 5520 return status; 5521 } 5522 5523 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5524 { 5525 struct nfs4_exception exception = { }; 5526 int err; 5527 5528 do { 5529 err = _nfs4_proc_getlk(state, cmd, request); 5530 trace_nfs4_get_lock(request, state, cmd, err); 5531 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 5532 &exception); 5533 } while (exception.retry); 5534 return err; 5535 } 5536 5537 static int do_vfs_lock(struct inode *inode, struct file_lock *fl) 5538 { 5539 return locks_lock_inode_wait(inode, fl); 5540 } 5541 5542 struct nfs4_unlockdata { 5543 struct nfs_locku_args arg; 5544 struct nfs_locku_res res; 5545 struct nfs4_lock_state *lsp; 5546 struct nfs_open_context *ctx; 5547 struct file_lock fl; 5548 struct nfs_server *server; 5549 unsigned long timestamp; 5550 }; 5551 5552 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 5553 struct nfs_open_context *ctx, 5554 struct nfs4_lock_state *lsp, 5555 struct nfs_seqid *seqid) 5556 { 5557 struct nfs4_unlockdata *p; 5558 struct inode *inode = lsp->ls_state->inode; 5559 5560 p = kzalloc(sizeof(*p), GFP_NOFS); 5561 if (p == NULL) 5562 return NULL; 5563 p->arg.fh = NFS_FH(inode); 5564 p->arg.fl = &p->fl; 5565 p->arg.seqid = seqid; 5566 p->res.seqid = seqid; 5567 p->lsp = lsp; 5568 atomic_inc(&lsp->ls_count); 5569 /* Ensure we don't close file until we're done freeing locks! */ 5570 p->ctx = get_nfs_open_context(ctx); 5571 memcpy(&p->fl, fl, sizeof(p->fl)); 5572 p->server = NFS_SERVER(inode); 5573 return p; 5574 } 5575 5576 static void nfs4_locku_release_calldata(void *data) 5577 { 5578 struct nfs4_unlockdata *calldata = data; 5579 nfs_free_seqid(calldata->arg.seqid); 5580 nfs4_put_lock_state(calldata->lsp); 5581 put_nfs_open_context(calldata->ctx); 5582 kfree(calldata); 5583 } 5584 5585 static void nfs4_locku_done(struct rpc_task *task, void *data) 5586 { 5587 struct nfs4_unlockdata *calldata = data; 5588 5589 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 5590 return; 5591 switch (task->tk_status) { 5592 case 0: 5593 renew_lease(calldata->server, calldata->timestamp); 5594 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl); 5595 if (nfs4_update_lock_stateid(calldata->lsp, 5596 &calldata->res.stateid)) 5597 break; 5598 case -NFS4ERR_BAD_STATEID: 5599 case -NFS4ERR_OLD_STATEID: 5600 case -NFS4ERR_STALE_STATEID: 5601 case -NFS4ERR_EXPIRED: 5602 if (!nfs4_stateid_match(&calldata->arg.stateid, 5603 &calldata->lsp->ls_stateid)) 5604 rpc_restart_call_prepare(task); 5605 break; 5606 default: 5607 if (nfs4_async_handle_error(task, calldata->server, 5608 NULL, NULL) == -EAGAIN) 5609 rpc_restart_call_prepare(task); 5610 } 5611 nfs_release_seqid(calldata->arg.seqid); 5612 } 5613 5614 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 5615 { 5616 struct nfs4_unlockdata *calldata = data; 5617 5618 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 5619 goto out_wait; 5620 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid); 5621 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 5622 /* Note: exit _without_ running nfs4_locku_done */ 5623 goto out_no_action; 5624 } 5625 calldata->timestamp = jiffies; 5626 if (nfs4_setup_sequence(calldata->server, 5627 &calldata->arg.seq_args, 5628 &calldata->res.seq_res, 5629 task) != 0) 5630 nfs_release_seqid(calldata->arg.seqid); 5631 return; 5632 out_no_action: 5633 task->tk_action = NULL; 5634 out_wait: 5635 nfs4_sequence_done(task, &calldata->res.seq_res); 5636 } 5637 5638 static const struct rpc_call_ops nfs4_locku_ops = { 5639 .rpc_call_prepare = nfs4_locku_prepare, 5640 .rpc_call_done = nfs4_locku_done, 5641 .rpc_release = nfs4_locku_release_calldata, 5642 }; 5643 5644 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 5645 struct nfs_open_context *ctx, 5646 struct nfs4_lock_state *lsp, 5647 struct nfs_seqid *seqid) 5648 { 5649 struct nfs4_unlockdata *data; 5650 struct rpc_message msg = { 5651 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 5652 .rpc_cred = ctx->cred, 5653 }; 5654 struct rpc_task_setup task_setup_data = { 5655 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 5656 .rpc_message = &msg, 5657 .callback_ops = &nfs4_locku_ops, 5658 .workqueue = nfsiod_workqueue, 5659 .flags = RPC_TASK_ASYNC, 5660 }; 5661 5662 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 5663 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 5664 5665 /* Ensure this is an unlock - when canceling a lock, the 5666 * canceled lock is passed in, and it won't be an unlock. 5667 */ 5668 fl->fl_type = F_UNLCK; 5669 5670 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 5671 if (data == NULL) { 5672 nfs_free_seqid(seqid); 5673 return ERR_PTR(-ENOMEM); 5674 } 5675 5676 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5677 msg.rpc_argp = &data->arg; 5678 msg.rpc_resp = &data->res; 5679 task_setup_data.callback_data = data; 5680 return rpc_run_task(&task_setup_data); 5681 } 5682 5683 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 5684 { 5685 struct inode *inode = state->inode; 5686 struct nfs4_state_owner *sp = state->owner; 5687 struct nfs_inode *nfsi = NFS_I(inode); 5688 struct nfs_seqid *seqid; 5689 struct nfs4_lock_state *lsp; 5690 struct rpc_task *task; 5691 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5692 int status = 0; 5693 unsigned char fl_flags = request->fl_flags; 5694 5695 status = nfs4_set_lock_state(state, request); 5696 /* Unlock _before_ we do the RPC call */ 5697 request->fl_flags |= FL_EXISTS; 5698 /* Exclude nfs_delegation_claim_locks() */ 5699 mutex_lock(&sp->so_delegreturn_mutex); 5700 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 5701 down_read(&nfsi->rwsem); 5702 if (do_vfs_lock(inode, request) == -ENOENT) { 5703 up_read(&nfsi->rwsem); 5704 mutex_unlock(&sp->so_delegreturn_mutex); 5705 goto out; 5706 } 5707 up_read(&nfsi->rwsem); 5708 mutex_unlock(&sp->so_delegreturn_mutex); 5709 if (status != 0) 5710 goto out; 5711 /* Is this a delegated lock? */ 5712 lsp = request->fl_u.nfs4_fl.owner; 5713 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 5714 goto out; 5715 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 5716 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 5717 status = -ENOMEM; 5718 if (IS_ERR(seqid)) 5719 goto out; 5720 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 5721 status = PTR_ERR(task); 5722 if (IS_ERR(task)) 5723 goto out; 5724 status = nfs4_wait_for_completion_rpc_task(task); 5725 rpc_put_task(task); 5726 out: 5727 request->fl_flags = fl_flags; 5728 trace_nfs4_unlock(request, state, F_SETLK, status); 5729 return status; 5730 } 5731 5732 struct nfs4_lockdata { 5733 struct nfs_lock_args arg; 5734 struct nfs_lock_res res; 5735 struct nfs4_lock_state *lsp; 5736 struct nfs_open_context *ctx; 5737 struct file_lock fl; 5738 unsigned long timestamp; 5739 int rpc_status; 5740 int cancelled; 5741 struct nfs_server *server; 5742 }; 5743 5744 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 5745 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 5746 gfp_t gfp_mask) 5747 { 5748 struct nfs4_lockdata *p; 5749 struct inode *inode = lsp->ls_state->inode; 5750 struct nfs_server *server = NFS_SERVER(inode); 5751 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5752 5753 p = kzalloc(sizeof(*p), gfp_mask); 5754 if (p == NULL) 5755 return NULL; 5756 5757 p->arg.fh = NFS_FH(inode); 5758 p->arg.fl = &p->fl; 5759 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 5760 if (IS_ERR(p->arg.open_seqid)) 5761 goto out_free; 5762 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 5763 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 5764 if (IS_ERR(p->arg.lock_seqid)) 5765 goto out_free_seqid; 5766 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 5767 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 5768 p->arg.lock_owner.s_dev = server->s_dev; 5769 p->res.lock_seqid = p->arg.lock_seqid; 5770 p->lsp = lsp; 5771 p->server = server; 5772 atomic_inc(&lsp->ls_count); 5773 p->ctx = get_nfs_open_context(ctx); 5774 get_file(fl->fl_file); 5775 memcpy(&p->fl, fl, sizeof(p->fl)); 5776 return p; 5777 out_free_seqid: 5778 nfs_free_seqid(p->arg.open_seqid); 5779 out_free: 5780 kfree(p); 5781 return NULL; 5782 } 5783 5784 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 5785 { 5786 struct nfs4_lockdata *data = calldata; 5787 struct nfs4_state *state = data->lsp->ls_state; 5788 5789 dprintk("%s: begin!\n", __func__); 5790 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 5791 goto out_wait; 5792 /* Do we need to do an open_to_lock_owner? */ 5793 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 5794 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 5795 goto out_release_lock_seqid; 5796 } 5797 nfs4_stateid_copy(&data->arg.open_stateid, 5798 &state->open_stateid); 5799 data->arg.new_lock_owner = 1; 5800 data->res.open_seqid = data->arg.open_seqid; 5801 } else { 5802 data->arg.new_lock_owner = 0; 5803 nfs4_stateid_copy(&data->arg.lock_stateid, 5804 &data->lsp->ls_stateid); 5805 } 5806 if (!nfs4_valid_open_stateid(state)) { 5807 data->rpc_status = -EBADF; 5808 task->tk_action = NULL; 5809 goto out_release_open_seqid; 5810 } 5811 data->timestamp = jiffies; 5812 if (nfs4_setup_sequence(data->server, 5813 &data->arg.seq_args, 5814 &data->res.seq_res, 5815 task) == 0) 5816 return; 5817 out_release_open_seqid: 5818 nfs_release_seqid(data->arg.open_seqid); 5819 out_release_lock_seqid: 5820 nfs_release_seqid(data->arg.lock_seqid); 5821 out_wait: 5822 nfs4_sequence_done(task, &data->res.seq_res); 5823 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); 5824 } 5825 5826 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 5827 { 5828 struct nfs4_lockdata *data = calldata; 5829 struct nfs4_lock_state *lsp = data->lsp; 5830 5831 dprintk("%s: begin!\n", __func__); 5832 5833 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5834 return; 5835 5836 data->rpc_status = task->tk_status; 5837 switch (task->tk_status) { 5838 case 0: 5839 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 5840 data->timestamp); 5841 if (data->arg.new_lock) { 5842 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); 5843 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) { 5844 rpc_restart_call_prepare(task); 5845 break; 5846 } 5847 } 5848 if (data->arg.new_lock_owner != 0) { 5849 nfs_confirm_seqid(&lsp->ls_seqid, 0); 5850 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 5851 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 5852 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 5853 rpc_restart_call_prepare(task); 5854 break; 5855 case -NFS4ERR_BAD_STATEID: 5856 case -NFS4ERR_OLD_STATEID: 5857 case -NFS4ERR_STALE_STATEID: 5858 case -NFS4ERR_EXPIRED: 5859 if (data->arg.new_lock_owner != 0) { 5860 if (!nfs4_stateid_match(&data->arg.open_stateid, 5861 &lsp->ls_state->open_stateid)) 5862 rpc_restart_call_prepare(task); 5863 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 5864 &lsp->ls_stateid)) 5865 rpc_restart_call_prepare(task); 5866 } 5867 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 5868 } 5869 5870 static void nfs4_lock_release(void *calldata) 5871 { 5872 struct nfs4_lockdata *data = calldata; 5873 5874 dprintk("%s: begin!\n", __func__); 5875 nfs_free_seqid(data->arg.open_seqid); 5876 if (data->cancelled != 0) { 5877 struct rpc_task *task; 5878 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 5879 data->arg.lock_seqid); 5880 if (!IS_ERR(task)) 5881 rpc_put_task_async(task); 5882 dprintk("%s: cancelling lock!\n", __func__); 5883 } else 5884 nfs_free_seqid(data->arg.lock_seqid); 5885 nfs4_put_lock_state(data->lsp); 5886 put_nfs_open_context(data->ctx); 5887 fput(data->fl.fl_file); 5888 kfree(data); 5889 dprintk("%s: done!\n", __func__); 5890 } 5891 5892 static const struct rpc_call_ops nfs4_lock_ops = { 5893 .rpc_call_prepare = nfs4_lock_prepare, 5894 .rpc_call_done = nfs4_lock_done, 5895 .rpc_release = nfs4_lock_release, 5896 }; 5897 5898 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 5899 { 5900 switch (error) { 5901 case -NFS4ERR_ADMIN_REVOKED: 5902 case -NFS4ERR_BAD_STATEID: 5903 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5904 if (new_lock_owner != 0 || 5905 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 5906 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 5907 break; 5908 case -NFS4ERR_STALE_STATEID: 5909 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5910 case -NFS4ERR_EXPIRED: 5911 nfs4_schedule_lease_recovery(server->nfs_client); 5912 }; 5913 } 5914 5915 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 5916 { 5917 struct nfs4_lockdata *data; 5918 struct rpc_task *task; 5919 struct rpc_message msg = { 5920 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 5921 .rpc_cred = state->owner->so_cred, 5922 }; 5923 struct rpc_task_setup task_setup_data = { 5924 .rpc_client = NFS_CLIENT(state->inode), 5925 .rpc_message = &msg, 5926 .callback_ops = &nfs4_lock_ops, 5927 .workqueue = nfsiod_workqueue, 5928 .flags = RPC_TASK_ASYNC, 5929 }; 5930 int ret; 5931 5932 dprintk("%s: begin!\n", __func__); 5933 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 5934 fl->fl_u.nfs4_fl.owner, 5935 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); 5936 if (data == NULL) 5937 return -ENOMEM; 5938 if (IS_SETLKW(cmd)) 5939 data->arg.block = 1; 5940 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5941 msg.rpc_argp = &data->arg; 5942 msg.rpc_resp = &data->res; 5943 task_setup_data.callback_data = data; 5944 if (recovery_type > NFS_LOCK_NEW) { 5945 if (recovery_type == NFS_LOCK_RECLAIM) 5946 data->arg.reclaim = NFS_LOCK_RECLAIM; 5947 nfs4_set_sequence_privileged(&data->arg.seq_args); 5948 } else 5949 data->arg.new_lock = 1; 5950 task = rpc_run_task(&task_setup_data); 5951 if (IS_ERR(task)) 5952 return PTR_ERR(task); 5953 ret = nfs4_wait_for_completion_rpc_task(task); 5954 if (ret == 0) { 5955 ret = data->rpc_status; 5956 if (ret) 5957 nfs4_handle_setlk_error(data->server, data->lsp, 5958 data->arg.new_lock_owner, ret); 5959 } else 5960 data->cancelled = 1; 5961 rpc_put_task(task); 5962 dprintk("%s: done, ret = %d!\n", __func__, ret); 5963 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); 5964 return ret; 5965 } 5966 5967 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 5968 { 5969 struct nfs_server *server = NFS_SERVER(state->inode); 5970 struct nfs4_exception exception = { 5971 .inode = state->inode, 5972 }; 5973 int err; 5974 5975 do { 5976 /* Cache the lock if possible... */ 5977 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 5978 return 0; 5979 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 5980 if (err != -NFS4ERR_DELAY) 5981 break; 5982 nfs4_handle_exception(server, err, &exception); 5983 } while (exception.retry); 5984 return err; 5985 } 5986 5987 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 5988 { 5989 struct nfs_server *server = NFS_SERVER(state->inode); 5990 struct nfs4_exception exception = { 5991 .inode = state->inode, 5992 }; 5993 int err; 5994 5995 err = nfs4_set_lock_state(state, request); 5996 if (err != 0) 5997 return err; 5998 if (!recover_lost_locks) { 5999 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 6000 return 0; 6001 } 6002 do { 6003 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 6004 return 0; 6005 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 6006 switch (err) { 6007 default: 6008 goto out; 6009 case -NFS4ERR_GRACE: 6010 case -NFS4ERR_DELAY: 6011 nfs4_handle_exception(server, err, &exception); 6012 err = 0; 6013 } 6014 } while (exception.retry); 6015 out: 6016 return err; 6017 } 6018 6019 #if defined(CONFIG_NFS_V4_1) 6020 /** 6021 * nfs41_check_expired_locks - possibly free a lock stateid 6022 * 6023 * @state: NFSv4 state for an inode 6024 * 6025 * Returns NFS_OK if recovery for this stateid is now finished. 6026 * Otherwise a negative NFS4ERR value is returned. 6027 */ 6028 static int nfs41_check_expired_locks(struct nfs4_state *state) 6029 { 6030 int status, ret = -NFS4ERR_BAD_STATEID; 6031 struct nfs4_lock_state *lsp; 6032 struct nfs_server *server = NFS_SERVER(state->inode); 6033 6034 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 6035 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 6036 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 6037 6038 status = nfs41_test_stateid(server, 6039 &lsp->ls_stateid, 6040 cred); 6041 trace_nfs4_test_lock_stateid(state, lsp, status); 6042 if (status != NFS_OK) { 6043 /* Free the stateid unless the server 6044 * informs us the stateid is unrecognized. */ 6045 if (status != -NFS4ERR_BAD_STATEID) 6046 nfs41_free_stateid(server, 6047 &lsp->ls_stateid, 6048 cred); 6049 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 6050 ret = status; 6051 } 6052 } 6053 }; 6054 6055 return ret; 6056 } 6057 6058 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 6059 { 6060 int status = NFS_OK; 6061 6062 if (test_bit(LK_STATE_IN_USE, &state->flags)) 6063 status = nfs41_check_expired_locks(state); 6064 if (status != NFS_OK) 6065 status = nfs4_lock_expired(state, request); 6066 return status; 6067 } 6068 #endif 6069 6070 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6071 { 6072 struct nfs_inode *nfsi = NFS_I(state->inode); 6073 struct nfs4_state_owner *sp = state->owner; 6074 unsigned char fl_flags = request->fl_flags; 6075 int status = -ENOLCK; 6076 6077 if ((fl_flags & FL_POSIX) && 6078 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 6079 goto out; 6080 /* Is this a delegated open? */ 6081 status = nfs4_set_lock_state(state, request); 6082 if (status != 0) 6083 goto out; 6084 request->fl_flags |= FL_ACCESS; 6085 status = do_vfs_lock(state->inode, request); 6086 if (status < 0) 6087 goto out; 6088 mutex_lock(&sp->so_delegreturn_mutex); 6089 down_read(&nfsi->rwsem); 6090 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 6091 /* Yes: cache locks! */ 6092 /* ...but avoid races with delegation recall... */ 6093 request->fl_flags = fl_flags & ~FL_SLEEP; 6094 status = do_vfs_lock(state->inode, request); 6095 up_read(&nfsi->rwsem); 6096 mutex_unlock(&sp->so_delegreturn_mutex); 6097 goto out; 6098 } 6099 up_read(&nfsi->rwsem); 6100 mutex_unlock(&sp->so_delegreturn_mutex); 6101 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 6102 out: 6103 request->fl_flags = fl_flags; 6104 return status; 6105 } 6106 6107 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6108 { 6109 struct nfs4_exception exception = { 6110 .state = state, 6111 .inode = state->inode, 6112 }; 6113 int err; 6114 6115 do { 6116 err = _nfs4_proc_setlk(state, cmd, request); 6117 if (err == -NFS4ERR_DENIED) 6118 err = -EAGAIN; 6119 err = nfs4_handle_exception(NFS_SERVER(state->inode), 6120 err, &exception); 6121 } while (exception.retry); 6122 return err; 6123 } 6124 6125 static int 6126 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 6127 { 6128 struct nfs_open_context *ctx; 6129 struct nfs4_state *state; 6130 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 6131 int status; 6132 6133 /* verify open state */ 6134 ctx = nfs_file_open_context(filp); 6135 state = ctx->state; 6136 6137 if (request->fl_start < 0 || request->fl_end < 0) 6138 return -EINVAL; 6139 6140 if (IS_GETLK(cmd)) { 6141 if (state != NULL) 6142 return nfs4_proc_getlk(state, F_GETLK, request); 6143 return 0; 6144 } 6145 6146 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 6147 return -EINVAL; 6148 6149 if (request->fl_type == F_UNLCK) { 6150 if (state != NULL) 6151 return nfs4_proc_unlck(state, cmd, request); 6152 return 0; 6153 } 6154 6155 if (state == NULL) 6156 return -ENOLCK; 6157 /* 6158 * Don't rely on the VFS having checked the file open mode, 6159 * since it won't do this for flock() locks. 6160 */ 6161 switch (request->fl_type) { 6162 case F_RDLCK: 6163 if (!(filp->f_mode & FMODE_READ)) 6164 return -EBADF; 6165 break; 6166 case F_WRLCK: 6167 if (!(filp->f_mode & FMODE_WRITE)) 6168 return -EBADF; 6169 } 6170 6171 do { 6172 status = nfs4_proc_setlk(state, cmd, request); 6173 if ((status != -EAGAIN) || IS_SETLK(cmd)) 6174 break; 6175 timeout = nfs4_set_lock_task_retry(timeout); 6176 status = -ERESTARTSYS; 6177 if (signalled()) 6178 break; 6179 } while(status < 0); 6180 return status; 6181 } 6182 6183 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 6184 { 6185 struct nfs_server *server = NFS_SERVER(state->inode); 6186 int err; 6187 6188 err = nfs4_set_lock_state(state, fl); 6189 if (err != 0) 6190 return err; 6191 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 6192 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 6193 } 6194 6195 struct nfs_release_lockowner_data { 6196 struct nfs4_lock_state *lsp; 6197 struct nfs_server *server; 6198 struct nfs_release_lockowner_args args; 6199 struct nfs_release_lockowner_res res; 6200 unsigned long timestamp; 6201 }; 6202 6203 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 6204 { 6205 struct nfs_release_lockowner_data *data = calldata; 6206 struct nfs_server *server = data->server; 6207 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 6208 &data->args.seq_args, &data->res.seq_res, task); 6209 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6210 data->timestamp = jiffies; 6211 } 6212 6213 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 6214 { 6215 struct nfs_release_lockowner_data *data = calldata; 6216 struct nfs_server *server = data->server; 6217 6218 nfs40_sequence_done(task, &data->res.seq_res); 6219 6220 switch (task->tk_status) { 6221 case 0: 6222 renew_lease(server, data->timestamp); 6223 break; 6224 case -NFS4ERR_STALE_CLIENTID: 6225 case -NFS4ERR_EXPIRED: 6226 nfs4_schedule_lease_recovery(server->nfs_client); 6227 break; 6228 case -NFS4ERR_LEASE_MOVED: 6229 case -NFS4ERR_DELAY: 6230 if (nfs4_async_handle_error(task, server, 6231 NULL, NULL) == -EAGAIN) 6232 rpc_restart_call_prepare(task); 6233 } 6234 } 6235 6236 static void nfs4_release_lockowner_release(void *calldata) 6237 { 6238 struct nfs_release_lockowner_data *data = calldata; 6239 nfs4_free_lock_state(data->server, data->lsp); 6240 kfree(calldata); 6241 } 6242 6243 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 6244 .rpc_call_prepare = nfs4_release_lockowner_prepare, 6245 .rpc_call_done = nfs4_release_lockowner_done, 6246 .rpc_release = nfs4_release_lockowner_release, 6247 }; 6248 6249 static void 6250 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 6251 { 6252 struct nfs_release_lockowner_data *data; 6253 struct rpc_message msg = { 6254 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 6255 }; 6256 6257 if (server->nfs_client->cl_mvops->minor_version != 0) 6258 return; 6259 6260 data = kmalloc(sizeof(*data), GFP_NOFS); 6261 if (!data) 6262 return; 6263 data->lsp = lsp; 6264 data->server = server; 6265 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6266 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 6267 data->args.lock_owner.s_dev = server->s_dev; 6268 6269 msg.rpc_argp = &data->args; 6270 msg.rpc_resp = &data->res; 6271 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 6272 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 6273 } 6274 6275 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 6276 6277 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 6278 struct dentry *dentry, const char *key, 6279 const void *buf, size_t buflen, 6280 int flags) 6281 { 6282 return nfs4_proc_set_acl(d_inode(dentry), buf, buflen); 6283 } 6284 6285 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 6286 struct dentry *dentry, const char *key, 6287 void *buf, size_t buflen) 6288 { 6289 return nfs4_proc_get_acl(d_inode(dentry), buf, buflen); 6290 } 6291 6292 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) 6293 { 6294 return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry))); 6295 } 6296 6297 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6298 6299 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 6300 struct dentry *dentry, const char *key, 6301 const void *buf, size_t buflen, 6302 int flags) 6303 { 6304 if (security_ismaclabel(key)) 6305 return nfs4_set_security_label(dentry, buf, buflen); 6306 6307 return -EOPNOTSUPP; 6308 } 6309 6310 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, 6311 struct dentry *dentry, const char *key, 6312 void *buf, size_t buflen) 6313 { 6314 if (security_ismaclabel(key)) 6315 return nfs4_get_security_label(d_inode(dentry), buf, buflen); 6316 return -EOPNOTSUPP; 6317 } 6318 6319 static ssize_t 6320 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 6321 { 6322 int len = 0; 6323 6324 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) { 6325 len = security_inode_listsecurity(inode, list, list_len); 6326 if (list_len && len > list_len) 6327 return -ERANGE; 6328 } 6329 return len; 6330 } 6331 6332 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 6333 .prefix = XATTR_SECURITY_PREFIX, 6334 .get = nfs4_xattr_get_nfs4_label, 6335 .set = nfs4_xattr_set_nfs4_label, 6336 }; 6337 6338 #else 6339 6340 static ssize_t 6341 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 6342 { 6343 return 0; 6344 } 6345 6346 #endif 6347 6348 /* 6349 * nfs_fhget will use either the mounted_on_fileid or the fileid 6350 */ 6351 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 6352 { 6353 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 6354 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 6355 (fattr->valid & NFS_ATTR_FATTR_FSID) && 6356 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 6357 return; 6358 6359 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 6360 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 6361 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 6362 fattr->nlink = 2; 6363 } 6364 6365 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6366 const struct qstr *name, 6367 struct nfs4_fs_locations *fs_locations, 6368 struct page *page) 6369 { 6370 struct nfs_server *server = NFS_SERVER(dir); 6371 u32 bitmask[3] = { 6372 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6373 }; 6374 struct nfs4_fs_locations_arg args = { 6375 .dir_fh = NFS_FH(dir), 6376 .name = name, 6377 .page = page, 6378 .bitmask = bitmask, 6379 }; 6380 struct nfs4_fs_locations_res res = { 6381 .fs_locations = fs_locations, 6382 }; 6383 struct rpc_message msg = { 6384 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6385 .rpc_argp = &args, 6386 .rpc_resp = &res, 6387 }; 6388 int status; 6389 6390 dprintk("%s: start\n", __func__); 6391 6392 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 6393 * is not supported */ 6394 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 6395 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; 6396 else 6397 bitmask[0] |= FATTR4_WORD0_FILEID; 6398 6399 nfs_fattr_init(&fs_locations->fattr); 6400 fs_locations->server = server; 6401 fs_locations->nlocations = 0; 6402 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 6403 dprintk("%s: returned status = %d\n", __func__, status); 6404 return status; 6405 } 6406 6407 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6408 const struct qstr *name, 6409 struct nfs4_fs_locations *fs_locations, 6410 struct page *page) 6411 { 6412 struct nfs4_exception exception = { }; 6413 int err; 6414 do { 6415 err = _nfs4_proc_fs_locations(client, dir, name, 6416 fs_locations, page); 6417 trace_nfs4_get_fs_locations(dir, name, err); 6418 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6419 &exception); 6420 } while (exception.retry); 6421 return err; 6422 } 6423 6424 /* 6425 * This operation also signals the server that this client is 6426 * performing migration recovery. The server can stop returning 6427 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 6428 * appended to this compound to identify the client ID which is 6429 * performing recovery. 6430 */ 6431 static int _nfs40_proc_get_locations(struct inode *inode, 6432 struct nfs4_fs_locations *locations, 6433 struct page *page, struct rpc_cred *cred) 6434 { 6435 struct nfs_server *server = NFS_SERVER(inode); 6436 struct rpc_clnt *clnt = server->client; 6437 u32 bitmask[2] = { 6438 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6439 }; 6440 struct nfs4_fs_locations_arg args = { 6441 .clientid = server->nfs_client->cl_clientid, 6442 .fh = NFS_FH(inode), 6443 .page = page, 6444 .bitmask = bitmask, 6445 .migration = 1, /* skip LOOKUP */ 6446 .renew = 1, /* append RENEW */ 6447 }; 6448 struct nfs4_fs_locations_res res = { 6449 .fs_locations = locations, 6450 .migration = 1, 6451 .renew = 1, 6452 }; 6453 struct rpc_message msg = { 6454 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6455 .rpc_argp = &args, 6456 .rpc_resp = &res, 6457 .rpc_cred = cred, 6458 }; 6459 unsigned long now = jiffies; 6460 int status; 6461 6462 nfs_fattr_init(&locations->fattr); 6463 locations->server = server; 6464 locations->nlocations = 0; 6465 6466 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6467 nfs4_set_sequence_privileged(&args.seq_args); 6468 status = nfs4_call_sync_sequence(clnt, server, &msg, 6469 &args.seq_args, &res.seq_res); 6470 if (status) 6471 return status; 6472 6473 renew_lease(server, now); 6474 return 0; 6475 } 6476 6477 #ifdef CONFIG_NFS_V4_1 6478 6479 /* 6480 * This operation also signals the server that this client is 6481 * performing migration recovery. The server can stop asserting 6482 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 6483 * performing this operation is identified in the SEQUENCE 6484 * operation in this compound. 6485 * 6486 * When the client supports GETATTR(fs_locations_info), it can 6487 * be plumbed in here. 6488 */ 6489 static int _nfs41_proc_get_locations(struct inode *inode, 6490 struct nfs4_fs_locations *locations, 6491 struct page *page, struct rpc_cred *cred) 6492 { 6493 struct nfs_server *server = NFS_SERVER(inode); 6494 struct rpc_clnt *clnt = server->client; 6495 u32 bitmask[2] = { 6496 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6497 }; 6498 struct nfs4_fs_locations_arg args = { 6499 .fh = NFS_FH(inode), 6500 .page = page, 6501 .bitmask = bitmask, 6502 .migration = 1, /* skip LOOKUP */ 6503 }; 6504 struct nfs4_fs_locations_res res = { 6505 .fs_locations = locations, 6506 .migration = 1, 6507 }; 6508 struct rpc_message msg = { 6509 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6510 .rpc_argp = &args, 6511 .rpc_resp = &res, 6512 .rpc_cred = cred, 6513 }; 6514 int status; 6515 6516 nfs_fattr_init(&locations->fattr); 6517 locations->server = server; 6518 locations->nlocations = 0; 6519 6520 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6521 nfs4_set_sequence_privileged(&args.seq_args); 6522 status = nfs4_call_sync_sequence(clnt, server, &msg, 6523 &args.seq_args, &res.seq_res); 6524 if (status == NFS4_OK && 6525 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6526 status = -NFS4ERR_LEASE_MOVED; 6527 return status; 6528 } 6529 6530 #endif /* CONFIG_NFS_V4_1 */ 6531 6532 /** 6533 * nfs4_proc_get_locations - discover locations for a migrated FSID 6534 * @inode: inode on FSID that is migrating 6535 * @locations: result of query 6536 * @page: buffer 6537 * @cred: credential to use for this operation 6538 * 6539 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 6540 * operation failed, or a negative errno if a local error occurred. 6541 * 6542 * On success, "locations" is filled in, but if the server has 6543 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 6544 * asserted. 6545 * 6546 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 6547 * from this client that require migration recovery. 6548 */ 6549 int nfs4_proc_get_locations(struct inode *inode, 6550 struct nfs4_fs_locations *locations, 6551 struct page *page, struct rpc_cred *cred) 6552 { 6553 struct nfs_server *server = NFS_SERVER(inode); 6554 struct nfs_client *clp = server->nfs_client; 6555 const struct nfs4_mig_recovery_ops *ops = 6556 clp->cl_mvops->mig_recovery_ops; 6557 struct nfs4_exception exception = { }; 6558 int status; 6559 6560 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6561 (unsigned long long)server->fsid.major, 6562 (unsigned long long)server->fsid.minor, 6563 clp->cl_hostname); 6564 nfs_display_fhandle(NFS_FH(inode), __func__); 6565 6566 do { 6567 status = ops->get_locations(inode, locations, page, cred); 6568 if (status != -NFS4ERR_DELAY) 6569 break; 6570 nfs4_handle_exception(server, status, &exception); 6571 } while (exception.retry); 6572 return status; 6573 } 6574 6575 /* 6576 * This operation also signals the server that this client is 6577 * performing "lease moved" recovery. The server can stop 6578 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 6579 * is appended to this compound to identify the client ID which is 6580 * performing recovery. 6581 */ 6582 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6583 { 6584 struct nfs_server *server = NFS_SERVER(inode); 6585 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 6586 struct rpc_clnt *clnt = server->client; 6587 struct nfs4_fsid_present_arg args = { 6588 .fh = NFS_FH(inode), 6589 .clientid = clp->cl_clientid, 6590 .renew = 1, /* append RENEW */ 6591 }; 6592 struct nfs4_fsid_present_res res = { 6593 .renew = 1, 6594 }; 6595 struct rpc_message msg = { 6596 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6597 .rpc_argp = &args, 6598 .rpc_resp = &res, 6599 .rpc_cred = cred, 6600 }; 6601 unsigned long now = jiffies; 6602 int status; 6603 6604 res.fh = nfs_alloc_fhandle(); 6605 if (res.fh == NULL) 6606 return -ENOMEM; 6607 6608 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6609 nfs4_set_sequence_privileged(&args.seq_args); 6610 status = nfs4_call_sync_sequence(clnt, server, &msg, 6611 &args.seq_args, &res.seq_res); 6612 nfs_free_fhandle(res.fh); 6613 if (status) 6614 return status; 6615 6616 do_renew_lease(clp, now); 6617 return 0; 6618 } 6619 6620 #ifdef CONFIG_NFS_V4_1 6621 6622 /* 6623 * This operation also signals the server that this client is 6624 * performing "lease moved" recovery. The server can stop asserting 6625 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 6626 * this operation is identified in the SEQUENCE operation in this 6627 * compound. 6628 */ 6629 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6630 { 6631 struct nfs_server *server = NFS_SERVER(inode); 6632 struct rpc_clnt *clnt = server->client; 6633 struct nfs4_fsid_present_arg args = { 6634 .fh = NFS_FH(inode), 6635 }; 6636 struct nfs4_fsid_present_res res = { 6637 }; 6638 struct rpc_message msg = { 6639 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6640 .rpc_argp = &args, 6641 .rpc_resp = &res, 6642 .rpc_cred = cred, 6643 }; 6644 int status; 6645 6646 res.fh = nfs_alloc_fhandle(); 6647 if (res.fh == NULL) 6648 return -ENOMEM; 6649 6650 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6651 nfs4_set_sequence_privileged(&args.seq_args); 6652 status = nfs4_call_sync_sequence(clnt, server, &msg, 6653 &args.seq_args, &res.seq_res); 6654 nfs_free_fhandle(res.fh); 6655 if (status == NFS4_OK && 6656 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6657 status = -NFS4ERR_LEASE_MOVED; 6658 return status; 6659 } 6660 6661 #endif /* CONFIG_NFS_V4_1 */ 6662 6663 /** 6664 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 6665 * @inode: inode on FSID to check 6666 * @cred: credential to use for this operation 6667 * 6668 * Server indicates whether the FSID is present, moved, or not 6669 * recognized. This operation is necessary to clear a LEASE_MOVED 6670 * condition for this client ID. 6671 * 6672 * Returns NFS4_OK if the FSID is present on this server, 6673 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 6674 * NFS4ERR code if some error occurred on the server, or a 6675 * negative errno if a local failure occurred. 6676 */ 6677 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6678 { 6679 struct nfs_server *server = NFS_SERVER(inode); 6680 struct nfs_client *clp = server->nfs_client; 6681 const struct nfs4_mig_recovery_ops *ops = 6682 clp->cl_mvops->mig_recovery_ops; 6683 struct nfs4_exception exception = { }; 6684 int status; 6685 6686 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6687 (unsigned long long)server->fsid.major, 6688 (unsigned long long)server->fsid.minor, 6689 clp->cl_hostname); 6690 nfs_display_fhandle(NFS_FH(inode), __func__); 6691 6692 do { 6693 status = ops->fsid_present(inode, cred); 6694 if (status != -NFS4ERR_DELAY) 6695 break; 6696 nfs4_handle_exception(server, status, &exception); 6697 } while (exception.retry); 6698 return status; 6699 } 6700 6701 /** 6702 * If 'use_integrity' is true and the state managment nfs_client 6703 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 6704 * and the machine credential as per RFC3530bis and RFC5661 Security 6705 * Considerations sections. Otherwise, just use the user cred with the 6706 * filesystem's rpc_client. 6707 */ 6708 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 6709 { 6710 int status; 6711 struct nfs4_secinfo_arg args = { 6712 .dir_fh = NFS_FH(dir), 6713 .name = name, 6714 }; 6715 struct nfs4_secinfo_res res = { 6716 .flavors = flavors, 6717 }; 6718 struct rpc_message msg = { 6719 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 6720 .rpc_argp = &args, 6721 .rpc_resp = &res, 6722 }; 6723 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 6724 struct rpc_cred *cred = NULL; 6725 6726 if (use_integrity) { 6727 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient; 6728 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client); 6729 msg.rpc_cred = cred; 6730 } 6731 6732 dprintk("NFS call secinfo %s\n", name->name); 6733 6734 nfs4_state_protect(NFS_SERVER(dir)->nfs_client, 6735 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 6736 6737 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args, 6738 &res.seq_res, 0); 6739 dprintk("NFS reply secinfo: %d\n", status); 6740 6741 if (cred) 6742 put_rpccred(cred); 6743 6744 return status; 6745 } 6746 6747 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 6748 struct nfs4_secinfo_flavors *flavors) 6749 { 6750 struct nfs4_exception exception = { }; 6751 int err; 6752 do { 6753 err = -NFS4ERR_WRONGSEC; 6754 6755 /* try to use integrity protection with machine cred */ 6756 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 6757 err = _nfs4_proc_secinfo(dir, name, flavors, true); 6758 6759 /* 6760 * if unable to use integrity protection, or SECINFO with 6761 * integrity protection returns NFS4ERR_WRONGSEC (which is 6762 * disallowed by spec, but exists in deployed servers) use 6763 * the current filesystem's rpc_client and the user cred. 6764 */ 6765 if (err == -NFS4ERR_WRONGSEC) 6766 err = _nfs4_proc_secinfo(dir, name, flavors, false); 6767 6768 trace_nfs4_secinfo(dir, name, err); 6769 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6770 &exception); 6771 } while (exception.retry); 6772 return err; 6773 } 6774 6775 #ifdef CONFIG_NFS_V4_1 6776 /* 6777 * Check the exchange flags returned by the server for invalid flags, having 6778 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 6779 * DS flags set. 6780 */ 6781 static int nfs4_check_cl_exchange_flags(u32 flags) 6782 { 6783 if (flags & ~EXCHGID4_FLAG_MASK_R) 6784 goto out_inval; 6785 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 6786 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 6787 goto out_inval; 6788 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 6789 goto out_inval; 6790 return NFS_OK; 6791 out_inval: 6792 return -NFS4ERR_INVAL; 6793 } 6794 6795 static bool 6796 nfs41_same_server_scope(struct nfs41_server_scope *a, 6797 struct nfs41_server_scope *b) 6798 { 6799 if (a->server_scope_sz == b->server_scope_sz && 6800 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) 6801 return true; 6802 6803 return false; 6804 } 6805 6806 static void 6807 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) 6808 { 6809 } 6810 6811 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { 6812 .rpc_call_done = &nfs4_bind_one_conn_to_session_done, 6813 }; 6814 6815 /* 6816 * nfs4_proc_bind_one_conn_to_session() 6817 * 6818 * The 4.1 client currently uses the same TCP connection for the 6819 * fore and backchannel. 6820 */ 6821 static 6822 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, 6823 struct rpc_xprt *xprt, 6824 struct nfs_client *clp, 6825 struct rpc_cred *cred) 6826 { 6827 int status; 6828 struct nfs41_bind_conn_to_session_args args = { 6829 .client = clp, 6830 .dir = NFS4_CDFC4_FORE_OR_BOTH, 6831 }; 6832 struct nfs41_bind_conn_to_session_res res; 6833 struct rpc_message msg = { 6834 .rpc_proc = 6835 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 6836 .rpc_argp = &args, 6837 .rpc_resp = &res, 6838 .rpc_cred = cred, 6839 }; 6840 struct rpc_task_setup task_setup_data = { 6841 .rpc_client = clnt, 6842 .rpc_xprt = xprt, 6843 .callback_ops = &nfs4_bind_one_conn_to_session_ops, 6844 .rpc_message = &msg, 6845 .flags = RPC_TASK_TIMEOUT, 6846 }; 6847 struct rpc_task *task; 6848 6849 dprintk("--> %s\n", __func__); 6850 6851 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 6852 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 6853 args.dir = NFS4_CDFC4_FORE; 6854 6855 /* Do not set the backchannel flag unless this is clnt->cl_xprt */ 6856 if (xprt != rcu_access_pointer(clnt->cl_xprt)) 6857 args.dir = NFS4_CDFC4_FORE; 6858 6859 task = rpc_run_task(&task_setup_data); 6860 if (!IS_ERR(task)) { 6861 status = task->tk_status; 6862 rpc_put_task(task); 6863 } else 6864 status = PTR_ERR(task); 6865 trace_nfs4_bind_conn_to_session(clp, status); 6866 if (status == 0) { 6867 if (memcmp(res.sessionid.data, 6868 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 6869 dprintk("NFS: %s: Session ID mismatch\n", __func__); 6870 status = -EIO; 6871 goto out; 6872 } 6873 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 6874 dprintk("NFS: %s: Unexpected direction from server\n", 6875 __func__); 6876 status = -EIO; 6877 goto out; 6878 } 6879 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 6880 dprintk("NFS: %s: Server returned RDMA mode = true\n", 6881 __func__); 6882 status = -EIO; 6883 goto out; 6884 } 6885 } 6886 out: 6887 dprintk("<-- %s status= %d\n", __func__, status); 6888 return status; 6889 } 6890 6891 struct rpc_bind_conn_calldata { 6892 struct nfs_client *clp; 6893 struct rpc_cred *cred; 6894 }; 6895 6896 static int 6897 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt, 6898 struct rpc_xprt *xprt, 6899 void *calldata) 6900 { 6901 struct rpc_bind_conn_calldata *p = calldata; 6902 6903 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred); 6904 } 6905 6906 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) 6907 { 6908 struct rpc_bind_conn_calldata data = { 6909 .clp = clp, 6910 .cred = cred, 6911 }; 6912 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient, 6913 nfs4_proc_bind_conn_to_session_callback, &data); 6914 } 6915 6916 /* 6917 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 6918 * and operations we'd like to see to enable certain features in the allow map 6919 */ 6920 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 6921 .how = SP4_MACH_CRED, 6922 .enforce.u.words = { 6923 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 6924 1 << (OP_EXCHANGE_ID - 32) | 6925 1 << (OP_CREATE_SESSION - 32) | 6926 1 << (OP_DESTROY_SESSION - 32) | 6927 1 << (OP_DESTROY_CLIENTID - 32) 6928 }, 6929 .allow.u.words = { 6930 [0] = 1 << (OP_CLOSE) | 6931 1 << (OP_OPEN_DOWNGRADE) | 6932 1 << (OP_LOCKU) | 6933 1 << (OP_DELEGRETURN) | 6934 1 << (OP_COMMIT), 6935 [1] = 1 << (OP_SECINFO - 32) | 6936 1 << (OP_SECINFO_NO_NAME - 32) | 6937 1 << (OP_LAYOUTRETURN - 32) | 6938 1 << (OP_TEST_STATEID - 32) | 6939 1 << (OP_FREE_STATEID - 32) | 6940 1 << (OP_WRITE - 32) 6941 } 6942 }; 6943 6944 /* 6945 * Select the state protection mode for client `clp' given the server results 6946 * from exchange_id in `sp'. 6947 * 6948 * Returns 0 on success, negative errno otherwise. 6949 */ 6950 static int nfs4_sp4_select_mode(struct nfs_client *clp, 6951 struct nfs41_state_protection *sp) 6952 { 6953 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 6954 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 6955 1 << (OP_EXCHANGE_ID - 32) | 6956 1 << (OP_CREATE_SESSION - 32) | 6957 1 << (OP_DESTROY_SESSION - 32) | 6958 1 << (OP_DESTROY_CLIENTID - 32) 6959 }; 6960 unsigned int i; 6961 6962 if (sp->how == SP4_MACH_CRED) { 6963 /* Print state protect result */ 6964 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 6965 for (i = 0; i <= LAST_NFS4_OP; i++) { 6966 if (test_bit(i, sp->enforce.u.longs)) 6967 dfprintk(MOUNT, " enforce op %d\n", i); 6968 if (test_bit(i, sp->allow.u.longs)) 6969 dfprintk(MOUNT, " allow op %d\n", i); 6970 } 6971 6972 /* make sure nothing is on enforce list that isn't supported */ 6973 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 6974 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 6975 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 6976 return -EINVAL; 6977 } 6978 } 6979 6980 /* 6981 * Minimal mode - state operations are allowed to use machine 6982 * credential. Note this already happens by default, so the 6983 * client doesn't have to do anything more than the negotiation. 6984 * 6985 * NOTE: we don't care if EXCHANGE_ID is in the list - 6986 * we're already using the machine cred for exchange_id 6987 * and will never use a different cred. 6988 */ 6989 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 6990 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 6991 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 6992 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 6993 dfprintk(MOUNT, "sp4_mach_cred:\n"); 6994 dfprintk(MOUNT, " minimal mode enabled\n"); 6995 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags); 6996 } else { 6997 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 6998 return -EINVAL; 6999 } 7000 7001 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 7002 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) && 7003 test_bit(OP_DELEGRETURN, sp->allow.u.longs) && 7004 test_bit(OP_LOCKU, sp->allow.u.longs)) { 7005 dfprintk(MOUNT, " cleanup mode enabled\n"); 7006 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags); 7007 } 7008 7009 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) { 7010 dfprintk(MOUNT, " pnfs cleanup mode enabled\n"); 7011 set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, 7012 &clp->cl_sp4_flags); 7013 } 7014 7015 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 7016 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 7017 dfprintk(MOUNT, " secinfo mode enabled\n"); 7018 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags); 7019 } 7020 7021 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 7022 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 7023 dfprintk(MOUNT, " stateid mode enabled\n"); 7024 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags); 7025 } 7026 7027 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 7028 dfprintk(MOUNT, " write mode enabled\n"); 7029 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags); 7030 } 7031 7032 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 7033 dfprintk(MOUNT, " commit mode enabled\n"); 7034 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags); 7035 } 7036 } 7037 7038 return 0; 7039 } 7040 7041 /* 7042 * _nfs4_proc_exchange_id() 7043 * 7044 * Wrapper for EXCHANGE_ID operation. 7045 */ 7046 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, 7047 u32 sp4_how) 7048 { 7049 nfs4_verifier verifier; 7050 struct nfs41_exchange_id_args args = { 7051 .verifier = &verifier, 7052 .client = clp, 7053 #ifdef CONFIG_NFS_V4_1_MIGRATION 7054 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 7055 EXCHGID4_FLAG_BIND_PRINC_STATEID | 7056 EXCHGID4_FLAG_SUPP_MOVED_MIGR, 7057 #else 7058 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 7059 EXCHGID4_FLAG_BIND_PRINC_STATEID, 7060 #endif 7061 }; 7062 struct nfs41_exchange_id_res res = { 7063 0 7064 }; 7065 int status; 7066 struct rpc_message msg = { 7067 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 7068 .rpc_argp = &args, 7069 .rpc_resp = &res, 7070 .rpc_cred = cred, 7071 }; 7072 7073 nfs4_init_boot_verifier(clp, &verifier); 7074 7075 status = nfs4_init_uniform_client_string(clp); 7076 if (status) 7077 goto out; 7078 7079 dprintk("NFS call exchange_id auth=%s, '%s'\n", 7080 clp->cl_rpcclient->cl_auth->au_ops->au_name, 7081 clp->cl_owner_id); 7082 7083 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 7084 GFP_NOFS); 7085 if (unlikely(res.server_owner == NULL)) { 7086 status = -ENOMEM; 7087 goto out; 7088 } 7089 7090 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 7091 GFP_NOFS); 7092 if (unlikely(res.server_scope == NULL)) { 7093 status = -ENOMEM; 7094 goto out_server_owner; 7095 } 7096 7097 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 7098 if (unlikely(res.impl_id == NULL)) { 7099 status = -ENOMEM; 7100 goto out_server_scope; 7101 } 7102 7103 switch (sp4_how) { 7104 case SP4_NONE: 7105 args.state_protect.how = SP4_NONE; 7106 break; 7107 7108 case SP4_MACH_CRED: 7109 args.state_protect = nfs4_sp4_mach_cred_request; 7110 break; 7111 7112 default: 7113 /* unsupported! */ 7114 WARN_ON_ONCE(1); 7115 status = -EINVAL; 7116 goto out_impl_id; 7117 } 7118 7119 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7120 trace_nfs4_exchange_id(clp, status); 7121 if (status == 0) 7122 status = nfs4_check_cl_exchange_flags(res.flags); 7123 7124 if (status == 0) 7125 status = nfs4_sp4_select_mode(clp, &res.state_protect); 7126 7127 if (status == 0) { 7128 clp->cl_clientid = res.clientid; 7129 clp->cl_exchange_flags = res.flags; 7130 /* Client ID is not confirmed */ 7131 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) { 7132 clear_bit(NFS4_SESSION_ESTABLISHED, 7133 &clp->cl_session->session_state); 7134 clp->cl_seqid = res.seqid; 7135 } 7136 7137 kfree(clp->cl_serverowner); 7138 clp->cl_serverowner = res.server_owner; 7139 res.server_owner = NULL; 7140 7141 /* use the most recent implementation id */ 7142 kfree(clp->cl_implid); 7143 clp->cl_implid = res.impl_id; 7144 res.impl_id = NULL; 7145 7146 if (clp->cl_serverscope != NULL && 7147 !nfs41_same_server_scope(clp->cl_serverscope, 7148 res.server_scope)) { 7149 dprintk("%s: server_scope mismatch detected\n", 7150 __func__); 7151 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 7152 kfree(clp->cl_serverscope); 7153 clp->cl_serverscope = NULL; 7154 } 7155 7156 if (clp->cl_serverscope == NULL) { 7157 clp->cl_serverscope = res.server_scope; 7158 res.server_scope = NULL; 7159 } 7160 } 7161 7162 out_impl_id: 7163 kfree(res.impl_id); 7164 out_server_scope: 7165 kfree(res.server_scope); 7166 out_server_owner: 7167 kfree(res.server_owner); 7168 out: 7169 if (clp->cl_implid != NULL) 7170 dprintk("NFS reply exchange_id: Server Implementation ID: " 7171 "domain: %s, name: %s, date: %llu,%u\n", 7172 clp->cl_implid->domain, clp->cl_implid->name, 7173 clp->cl_implid->date.seconds, 7174 clp->cl_implid->date.nseconds); 7175 dprintk("NFS reply exchange_id: %d\n", status); 7176 return status; 7177 } 7178 7179 /* 7180 * nfs4_proc_exchange_id() 7181 * 7182 * Returns zero, a negative errno, or a negative NFS4ERR status code. 7183 * 7184 * Since the clientid has expired, all compounds using sessions 7185 * associated with the stale clientid will be returning 7186 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 7187 * be in some phase of session reset. 7188 * 7189 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 7190 */ 7191 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) 7192 { 7193 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 7194 int status; 7195 7196 /* try SP4_MACH_CRED if krb5i/p */ 7197 if (authflavor == RPC_AUTH_GSS_KRB5I || 7198 authflavor == RPC_AUTH_GSS_KRB5P) { 7199 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 7200 if (!status) 7201 return 0; 7202 } 7203 7204 /* try SP4_NONE */ 7205 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 7206 } 7207 7208 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 7209 struct rpc_cred *cred) 7210 { 7211 struct rpc_message msg = { 7212 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 7213 .rpc_argp = clp, 7214 .rpc_cred = cred, 7215 }; 7216 int status; 7217 7218 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7219 trace_nfs4_destroy_clientid(clp, status); 7220 if (status) 7221 dprintk("NFS: Got error %d from the server %s on " 7222 "DESTROY_CLIENTID.", status, clp->cl_hostname); 7223 return status; 7224 } 7225 7226 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 7227 struct rpc_cred *cred) 7228 { 7229 unsigned int loop; 7230 int ret; 7231 7232 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 7233 ret = _nfs4_proc_destroy_clientid(clp, cred); 7234 switch (ret) { 7235 case -NFS4ERR_DELAY: 7236 case -NFS4ERR_CLIENTID_BUSY: 7237 ssleep(1); 7238 break; 7239 default: 7240 return ret; 7241 } 7242 } 7243 return 0; 7244 } 7245 7246 int nfs4_destroy_clientid(struct nfs_client *clp) 7247 { 7248 struct rpc_cred *cred; 7249 int ret = 0; 7250 7251 if (clp->cl_mvops->minor_version < 1) 7252 goto out; 7253 if (clp->cl_exchange_flags == 0) 7254 goto out; 7255 if (clp->cl_preserve_clid) 7256 goto out; 7257 cred = nfs4_get_clid_cred(clp); 7258 ret = nfs4_proc_destroy_clientid(clp, cred); 7259 if (cred) 7260 put_rpccred(cred); 7261 switch (ret) { 7262 case 0: 7263 case -NFS4ERR_STALE_CLIENTID: 7264 clp->cl_exchange_flags = 0; 7265 } 7266 out: 7267 return ret; 7268 } 7269 7270 struct nfs4_get_lease_time_data { 7271 struct nfs4_get_lease_time_args *args; 7272 struct nfs4_get_lease_time_res *res; 7273 struct nfs_client *clp; 7274 }; 7275 7276 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 7277 void *calldata) 7278 { 7279 struct nfs4_get_lease_time_data *data = 7280 (struct nfs4_get_lease_time_data *)calldata; 7281 7282 dprintk("--> %s\n", __func__); 7283 /* just setup sequence, do not trigger session recovery 7284 since we're invoked within one */ 7285 nfs41_setup_sequence(data->clp->cl_session, 7286 &data->args->la_seq_args, 7287 &data->res->lr_seq_res, 7288 task); 7289 dprintk("<-- %s\n", __func__); 7290 } 7291 7292 /* 7293 * Called from nfs4_state_manager thread for session setup, so don't recover 7294 * from sequence operation or clientid errors. 7295 */ 7296 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 7297 { 7298 struct nfs4_get_lease_time_data *data = 7299 (struct nfs4_get_lease_time_data *)calldata; 7300 7301 dprintk("--> %s\n", __func__); 7302 if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) 7303 return; 7304 switch (task->tk_status) { 7305 case -NFS4ERR_DELAY: 7306 case -NFS4ERR_GRACE: 7307 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 7308 rpc_delay(task, NFS4_POLL_RETRY_MIN); 7309 task->tk_status = 0; 7310 /* fall through */ 7311 case -NFS4ERR_RETRY_UNCACHED_REP: 7312 rpc_restart_call_prepare(task); 7313 return; 7314 } 7315 dprintk("<-- %s\n", __func__); 7316 } 7317 7318 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 7319 .rpc_call_prepare = nfs4_get_lease_time_prepare, 7320 .rpc_call_done = nfs4_get_lease_time_done, 7321 }; 7322 7323 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 7324 { 7325 struct rpc_task *task; 7326 struct nfs4_get_lease_time_args args; 7327 struct nfs4_get_lease_time_res res = { 7328 .lr_fsinfo = fsinfo, 7329 }; 7330 struct nfs4_get_lease_time_data data = { 7331 .args = &args, 7332 .res = &res, 7333 .clp = clp, 7334 }; 7335 struct rpc_message msg = { 7336 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 7337 .rpc_argp = &args, 7338 .rpc_resp = &res, 7339 }; 7340 struct rpc_task_setup task_setup = { 7341 .rpc_client = clp->cl_rpcclient, 7342 .rpc_message = &msg, 7343 .callback_ops = &nfs4_get_lease_time_ops, 7344 .callback_data = &data, 7345 .flags = RPC_TASK_TIMEOUT, 7346 }; 7347 int status; 7348 7349 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); 7350 nfs4_set_sequence_privileged(&args.la_seq_args); 7351 dprintk("--> %s\n", __func__); 7352 task = rpc_run_task(&task_setup); 7353 7354 if (IS_ERR(task)) 7355 status = PTR_ERR(task); 7356 else { 7357 status = task->tk_status; 7358 rpc_put_task(task); 7359 } 7360 dprintk("<-- %s return %d\n", __func__, status); 7361 7362 return status; 7363 } 7364 7365 /* 7366 * Initialize the values to be used by the client in CREATE_SESSION 7367 * If nfs4_init_session set the fore channel request and response sizes, 7368 * use them. 7369 * 7370 * Set the back channel max_resp_sz_cached to zero to force the client to 7371 * always set csa_cachethis to FALSE because the current implementation 7372 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 7373 */ 7374 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args, 7375 struct rpc_clnt *clnt) 7376 { 7377 unsigned int max_rqst_sz, max_resp_sz; 7378 unsigned int max_bc_payload = rpc_max_bc_payload(clnt); 7379 7380 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 7381 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 7382 7383 /* Fore channel attributes */ 7384 args->fc_attrs.max_rqst_sz = max_rqst_sz; 7385 args->fc_attrs.max_resp_sz = max_resp_sz; 7386 args->fc_attrs.max_ops = NFS4_MAX_OPS; 7387 args->fc_attrs.max_reqs = max_session_slots; 7388 7389 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 7390 "max_ops=%u max_reqs=%u\n", 7391 __func__, 7392 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 7393 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 7394 7395 /* Back channel attributes */ 7396 args->bc_attrs.max_rqst_sz = max_bc_payload; 7397 args->bc_attrs.max_resp_sz = max_bc_payload; 7398 args->bc_attrs.max_resp_sz_cached = 0; 7399 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 7400 args->bc_attrs.max_reqs = NFS41_BC_MAX_CALLBACKS; 7401 7402 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 7403 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 7404 __func__, 7405 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 7406 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 7407 args->bc_attrs.max_reqs); 7408 } 7409 7410 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 7411 struct nfs41_create_session_res *res) 7412 { 7413 struct nfs4_channel_attrs *sent = &args->fc_attrs; 7414 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 7415 7416 if (rcvd->max_resp_sz > sent->max_resp_sz) 7417 return -EINVAL; 7418 /* 7419 * Our requested max_ops is the minimum we need; we're not 7420 * prepared to break up compounds into smaller pieces than that. 7421 * So, no point even trying to continue if the server won't 7422 * cooperate: 7423 */ 7424 if (rcvd->max_ops < sent->max_ops) 7425 return -EINVAL; 7426 if (rcvd->max_reqs == 0) 7427 return -EINVAL; 7428 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 7429 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 7430 return 0; 7431 } 7432 7433 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 7434 struct nfs41_create_session_res *res) 7435 { 7436 struct nfs4_channel_attrs *sent = &args->bc_attrs; 7437 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 7438 7439 if (!(res->flags & SESSION4_BACK_CHAN)) 7440 goto out; 7441 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 7442 return -EINVAL; 7443 if (rcvd->max_resp_sz < sent->max_resp_sz) 7444 return -EINVAL; 7445 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 7446 return -EINVAL; 7447 /* These would render the backchannel useless: */ 7448 if (rcvd->max_ops != sent->max_ops) 7449 return -EINVAL; 7450 if (rcvd->max_reqs != sent->max_reqs) 7451 return -EINVAL; 7452 out: 7453 return 0; 7454 } 7455 7456 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 7457 struct nfs41_create_session_res *res) 7458 { 7459 int ret; 7460 7461 ret = nfs4_verify_fore_channel_attrs(args, res); 7462 if (ret) 7463 return ret; 7464 return nfs4_verify_back_channel_attrs(args, res); 7465 } 7466 7467 static void nfs4_update_session(struct nfs4_session *session, 7468 struct nfs41_create_session_res *res) 7469 { 7470 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 7471 /* Mark client id and session as being confirmed */ 7472 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 7473 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 7474 session->flags = res->flags; 7475 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 7476 if (res->flags & SESSION4_BACK_CHAN) 7477 memcpy(&session->bc_attrs, &res->bc_attrs, 7478 sizeof(session->bc_attrs)); 7479 } 7480 7481 static int _nfs4_proc_create_session(struct nfs_client *clp, 7482 struct rpc_cred *cred) 7483 { 7484 struct nfs4_session *session = clp->cl_session; 7485 struct nfs41_create_session_args args = { 7486 .client = clp, 7487 .clientid = clp->cl_clientid, 7488 .seqid = clp->cl_seqid, 7489 .cb_program = NFS4_CALLBACK, 7490 }; 7491 struct nfs41_create_session_res res; 7492 7493 struct rpc_message msg = { 7494 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 7495 .rpc_argp = &args, 7496 .rpc_resp = &res, 7497 .rpc_cred = cred, 7498 }; 7499 int status; 7500 7501 nfs4_init_channel_attrs(&args, clp->cl_rpcclient); 7502 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 7503 7504 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7505 trace_nfs4_create_session(clp, status); 7506 7507 if (!status) { 7508 /* Verify the session's negotiated channel_attrs values */ 7509 status = nfs4_verify_channel_attrs(&args, &res); 7510 /* Increment the clientid slot sequence id */ 7511 if (clp->cl_seqid == res.seqid) 7512 clp->cl_seqid++; 7513 if (status) 7514 goto out; 7515 nfs4_update_session(session, &res); 7516 } 7517 out: 7518 return status; 7519 } 7520 7521 /* 7522 * Issues a CREATE_SESSION operation to the server. 7523 * It is the responsibility of the caller to verify the session is 7524 * expired before calling this routine. 7525 */ 7526 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred) 7527 { 7528 int status; 7529 unsigned *ptr; 7530 struct nfs4_session *session = clp->cl_session; 7531 7532 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 7533 7534 status = _nfs4_proc_create_session(clp, cred); 7535 if (status) 7536 goto out; 7537 7538 /* Init or reset the session slot tables */ 7539 status = nfs4_setup_session_slot_tables(session); 7540 dprintk("slot table setup returned %d\n", status); 7541 if (status) 7542 goto out; 7543 7544 ptr = (unsigned *)&session->sess_id.data[0]; 7545 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 7546 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 7547 out: 7548 dprintk("<-- %s\n", __func__); 7549 return status; 7550 } 7551 7552 /* 7553 * Issue the over-the-wire RPC DESTROY_SESSION. 7554 * The caller must serialize access to this routine. 7555 */ 7556 int nfs4_proc_destroy_session(struct nfs4_session *session, 7557 struct rpc_cred *cred) 7558 { 7559 struct rpc_message msg = { 7560 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 7561 .rpc_argp = session, 7562 .rpc_cred = cred, 7563 }; 7564 int status = 0; 7565 7566 dprintk("--> nfs4_proc_destroy_session\n"); 7567 7568 /* session is still being setup */ 7569 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 7570 return 0; 7571 7572 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7573 trace_nfs4_destroy_session(session->clp, status); 7574 7575 if (status) 7576 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 7577 "Session has been destroyed regardless...\n", status); 7578 7579 dprintk("<-- nfs4_proc_destroy_session\n"); 7580 return status; 7581 } 7582 7583 /* 7584 * Renew the cl_session lease. 7585 */ 7586 struct nfs4_sequence_data { 7587 struct nfs_client *clp; 7588 struct nfs4_sequence_args args; 7589 struct nfs4_sequence_res res; 7590 }; 7591 7592 static void nfs41_sequence_release(void *data) 7593 { 7594 struct nfs4_sequence_data *calldata = data; 7595 struct nfs_client *clp = calldata->clp; 7596 7597 if (atomic_read(&clp->cl_count) > 1) 7598 nfs4_schedule_state_renewal(clp); 7599 nfs_put_client(clp); 7600 kfree(calldata); 7601 } 7602 7603 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 7604 { 7605 switch(task->tk_status) { 7606 case -NFS4ERR_DELAY: 7607 rpc_delay(task, NFS4_POLL_RETRY_MAX); 7608 return -EAGAIN; 7609 default: 7610 nfs4_schedule_lease_recovery(clp); 7611 } 7612 return 0; 7613 } 7614 7615 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 7616 { 7617 struct nfs4_sequence_data *calldata = data; 7618 struct nfs_client *clp = calldata->clp; 7619 7620 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 7621 return; 7622 7623 trace_nfs4_sequence(clp, task->tk_status); 7624 if (task->tk_status < 0) { 7625 dprintk("%s ERROR %d\n", __func__, task->tk_status); 7626 if (atomic_read(&clp->cl_count) == 1) 7627 goto out; 7628 7629 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 7630 rpc_restart_call_prepare(task); 7631 return; 7632 } 7633 } 7634 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 7635 out: 7636 dprintk("<-- %s\n", __func__); 7637 } 7638 7639 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 7640 { 7641 struct nfs4_sequence_data *calldata = data; 7642 struct nfs_client *clp = calldata->clp; 7643 struct nfs4_sequence_args *args; 7644 struct nfs4_sequence_res *res; 7645 7646 args = task->tk_msg.rpc_argp; 7647 res = task->tk_msg.rpc_resp; 7648 7649 nfs41_setup_sequence(clp->cl_session, args, res, task); 7650 } 7651 7652 static const struct rpc_call_ops nfs41_sequence_ops = { 7653 .rpc_call_done = nfs41_sequence_call_done, 7654 .rpc_call_prepare = nfs41_sequence_prepare, 7655 .rpc_release = nfs41_sequence_release, 7656 }; 7657 7658 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 7659 struct rpc_cred *cred, 7660 bool is_privileged) 7661 { 7662 struct nfs4_sequence_data *calldata; 7663 struct rpc_message msg = { 7664 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 7665 .rpc_cred = cred, 7666 }; 7667 struct rpc_task_setup task_setup_data = { 7668 .rpc_client = clp->cl_rpcclient, 7669 .rpc_message = &msg, 7670 .callback_ops = &nfs41_sequence_ops, 7671 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 7672 }; 7673 7674 if (!atomic_inc_not_zero(&clp->cl_count)) 7675 return ERR_PTR(-EIO); 7676 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7677 if (calldata == NULL) { 7678 nfs_put_client(clp); 7679 return ERR_PTR(-ENOMEM); 7680 } 7681 nfs4_init_sequence(&calldata->args, &calldata->res, 0); 7682 if (is_privileged) 7683 nfs4_set_sequence_privileged(&calldata->args); 7684 msg.rpc_argp = &calldata->args; 7685 msg.rpc_resp = &calldata->res; 7686 calldata->clp = clp; 7687 task_setup_data.callback_data = calldata; 7688 7689 return rpc_run_task(&task_setup_data); 7690 } 7691 7692 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 7693 { 7694 struct rpc_task *task; 7695 int ret = 0; 7696 7697 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 7698 return -EAGAIN; 7699 task = _nfs41_proc_sequence(clp, cred, false); 7700 if (IS_ERR(task)) 7701 ret = PTR_ERR(task); 7702 else 7703 rpc_put_task_async(task); 7704 dprintk("<-- %s status=%d\n", __func__, ret); 7705 return ret; 7706 } 7707 7708 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 7709 { 7710 struct rpc_task *task; 7711 int ret; 7712 7713 task = _nfs41_proc_sequence(clp, cred, true); 7714 if (IS_ERR(task)) { 7715 ret = PTR_ERR(task); 7716 goto out; 7717 } 7718 ret = rpc_wait_for_completion_task(task); 7719 if (!ret) 7720 ret = task->tk_status; 7721 rpc_put_task(task); 7722 out: 7723 dprintk("<-- %s status=%d\n", __func__, ret); 7724 return ret; 7725 } 7726 7727 struct nfs4_reclaim_complete_data { 7728 struct nfs_client *clp; 7729 struct nfs41_reclaim_complete_args arg; 7730 struct nfs41_reclaim_complete_res res; 7731 }; 7732 7733 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 7734 { 7735 struct nfs4_reclaim_complete_data *calldata = data; 7736 7737 nfs41_setup_sequence(calldata->clp->cl_session, 7738 &calldata->arg.seq_args, 7739 &calldata->res.seq_res, 7740 task); 7741 } 7742 7743 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 7744 { 7745 switch(task->tk_status) { 7746 case 0: 7747 case -NFS4ERR_COMPLETE_ALREADY: 7748 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 7749 break; 7750 case -NFS4ERR_DELAY: 7751 rpc_delay(task, NFS4_POLL_RETRY_MAX); 7752 /* fall through */ 7753 case -NFS4ERR_RETRY_UNCACHED_REP: 7754 return -EAGAIN; 7755 default: 7756 nfs4_schedule_lease_recovery(clp); 7757 } 7758 return 0; 7759 } 7760 7761 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 7762 { 7763 struct nfs4_reclaim_complete_data *calldata = data; 7764 struct nfs_client *clp = calldata->clp; 7765 struct nfs4_sequence_res *res = &calldata->res.seq_res; 7766 7767 dprintk("--> %s\n", __func__); 7768 if (!nfs41_sequence_done(task, res)) 7769 return; 7770 7771 trace_nfs4_reclaim_complete(clp, task->tk_status); 7772 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 7773 rpc_restart_call_prepare(task); 7774 return; 7775 } 7776 dprintk("<-- %s\n", __func__); 7777 } 7778 7779 static void nfs4_free_reclaim_complete_data(void *data) 7780 { 7781 struct nfs4_reclaim_complete_data *calldata = data; 7782 7783 kfree(calldata); 7784 } 7785 7786 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 7787 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 7788 .rpc_call_done = nfs4_reclaim_complete_done, 7789 .rpc_release = nfs4_free_reclaim_complete_data, 7790 }; 7791 7792 /* 7793 * Issue a global reclaim complete. 7794 */ 7795 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 7796 struct rpc_cred *cred) 7797 { 7798 struct nfs4_reclaim_complete_data *calldata; 7799 struct rpc_task *task; 7800 struct rpc_message msg = { 7801 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 7802 .rpc_cred = cred, 7803 }; 7804 struct rpc_task_setup task_setup_data = { 7805 .rpc_client = clp->cl_rpcclient, 7806 .rpc_message = &msg, 7807 .callback_ops = &nfs4_reclaim_complete_call_ops, 7808 .flags = RPC_TASK_ASYNC, 7809 }; 7810 int status = -ENOMEM; 7811 7812 dprintk("--> %s\n", __func__); 7813 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7814 if (calldata == NULL) 7815 goto out; 7816 calldata->clp = clp; 7817 calldata->arg.one_fs = 0; 7818 7819 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); 7820 nfs4_set_sequence_privileged(&calldata->arg.seq_args); 7821 msg.rpc_argp = &calldata->arg; 7822 msg.rpc_resp = &calldata->res; 7823 task_setup_data.callback_data = calldata; 7824 task = rpc_run_task(&task_setup_data); 7825 if (IS_ERR(task)) { 7826 status = PTR_ERR(task); 7827 goto out; 7828 } 7829 status = nfs4_wait_for_completion_rpc_task(task); 7830 if (status == 0) 7831 status = task->tk_status; 7832 rpc_put_task(task); 7833 return 0; 7834 out: 7835 dprintk("<-- %s status=%d\n", __func__, status); 7836 return status; 7837 } 7838 7839 static void 7840 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 7841 { 7842 struct nfs4_layoutget *lgp = calldata; 7843 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 7844 struct nfs4_session *session = nfs4_get_session(server); 7845 int ret; 7846 7847 dprintk("--> %s\n", __func__); 7848 /* Note the is a race here, where a CB_LAYOUTRECALL can come in 7849 * right now covering the LAYOUTGET we are about to send. 7850 * However, that is not so catastrophic, and there seems 7851 * to be no way to prevent it completely. 7852 */ 7853 if (nfs41_setup_sequence(session, &lgp->args.seq_args, 7854 &lgp->res.seq_res, task)) 7855 return; 7856 ret = pnfs_choose_layoutget_stateid(&lgp->args.stateid, 7857 NFS_I(lgp->args.inode)->layout, 7858 &lgp->args.range, 7859 lgp->args.ctx->state); 7860 if (ret < 0) 7861 rpc_exit(task, ret); 7862 } 7863 7864 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 7865 { 7866 struct nfs4_layoutget *lgp = calldata; 7867 struct inode *inode = lgp->args.inode; 7868 struct nfs_server *server = NFS_SERVER(inode); 7869 struct pnfs_layout_hdr *lo; 7870 struct nfs4_state *state = NULL; 7871 unsigned long timeo, now, giveup; 7872 7873 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 7874 7875 if (!nfs41_sequence_done(task, &lgp->res.seq_res)) 7876 goto out; 7877 7878 switch (task->tk_status) { 7879 case 0: 7880 goto out; 7881 7882 /* 7883 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs 7884 * on the file. set tk_status to -ENODATA to tell upper layer to 7885 * retry go inband. 7886 */ 7887 case -NFS4ERR_LAYOUTUNAVAILABLE: 7888 task->tk_status = -ENODATA; 7889 goto out; 7890 /* 7891 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 7892 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 7893 */ 7894 case -NFS4ERR_BADLAYOUT: 7895 goto out_overflow; 7896 /* 7897 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 7898 * (or clients) writing to the same RAID stripe except when 7899 * the minlength argument is 0 (see RFC5661 section 18.43.3). 7900 */ 7901 case -NFS4ERR_LAYOUTTRYLATER: 7902 if (lgp->args.minlength == 0) 7903 goto out_overflow; 7904 /* 7905 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall 7906 * existing layout before getting a new one). 7907 */ 7908 case -NFS4ERR_RECALLCONFLICT: 7909 timeo = rpc_get_timeout(task->tk_client); 7910 giveup = lgp->args.timestamp + timeo; 7911 now = jiffies; 7912 if (time_after(giveup, now)) { 7913 unsigned long delay; 7914 7915 /* Delay for: 7916 * - Not less then NFS4_POLL_RETRY_MIN. 7917 * - One last time a jiffie before we give up 7918 * - exponential backoff (time_now minus start_attempt) 7919 */ 7920 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN, 7921 min((giveup - now - 1), 7922 now - lgp->args.timestamp)); 7923 7924 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n", 7925 __func__, delay); 7926 rpc_delay(task, delay); 7927 /* Do not call nfs4_async_handle_error() */ 7928 goto out_restart; 7929 } 7930 break; 7931 case -NFS4ERR_EXPIRED: 7932 case -NFS4ERR_BAD_STATEID: 7933 spin_lock(&inode->i_lock); 7934 if (nfs4_stateid_match(&lgp->args.stateid, 7935 &lgp->args.ctx->state->stateid)) { 7936 spin_unlock(&inode->i_lock); 7937 /* If the open stateid was bad, then recover it. */ 7938 state = lgp->args.ctx->state; 7939 break; 7940 } 7941 lo = NFS_I(inode)->layout; 7942 if (lo && nfs4_stateid_match(&lgp->args.stateid, 7943 &lo->plh_stateid)) { 7944 LIST_HEAD(head); 7945 7946 /* 7947 * Mark the bad layout state as invalid, then retry 7948 * with the current stateid. 7949 */ 7950 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); 7951 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL); 7952 spin_unlock(&inode->i_lock); 7953 pnfs_free_lseg_list(&head); 7954 } else 7955 spin_unlock(&inode->i_lock); 7956 goto out_restart; 7957 } 7958 if (nfs4_async_handle_error(task, server, state, &lgp->timeout) == -EAGAIN) 7959 goto out_restart; 7960 out: 7961 dprintk("<-- %s\n", __func__); 7962 return; 7963 out_restart: 7964 task->tk_status = 0; 7965 rpc_restart_call_prepare(task); 7966 return; 7967 out_overflow: 7968 task->tk_status = -EOVERFLOW; 7969 goto out; 7970 } 7971 7972 static size_t max_response_pages(struct nfs_server *server) 7973 { 7974 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 7975 return nfs_page_array_len(0, max_resp_sz); 7976 } 7977 7978 static void nfs4_free_pages(struct page **pages, size_t size) 7979 { 7980 int i; 7981 7982 if (!pages) 7983 return; 7984 7985 for (i = 0; i < size; i++) { 7986 if (!pages[i]) 7987 break; 7988 __free_page(pages[i]); 7989 } 7990 kfree(pages); 7991 } 7992 7993 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) 7994 { 7995 struct page **pages; 7996 int i; 7997 7998 pages = kcalloc(size, sizeof(struct page *), gfp_flags); 7999 if (!pages) { 8000 dprintk("%s: can't alloc array of %zu pages\n", __func__, size); 8001 return NULL; 8002 } 8003 8004 for (i = 0; i < size; i++) { 8005 pages[i] = alloc_page(gfp_flags); 8006 if (!pages[i]) { 8007 dprintk("%s: failed to allocate page\n", __func__); 8008 nfs4_free_pages(pages, size); 8009 return NULL; 8010 } 8011 } 8012 8013 return pages; 8014 } 8015 8016 static void nfs4_layoutget_release(void *calldata) 8017 { 8018 struct nfs4_layoutget *lgp = calldata; 8019 struct inode *inode = lgp->args.inode; 8020 struct nfs_server *server = NFS_SERVER(inode); 8021 size_t max_pages = max_response_pages(server); 8022 8023 dprintk("--> %s\n", __func__); 8024 nfs4_free_pages(lgp->args.layout.pages, max_pages); 8025 pnfs_put_layout_hdr(NFS_I(inode)->layout); 8026 put_nfs_open_context(lgp->args.ctx); 8027 kfree(calldata); 8028 dprintk("<-- %s\n", __func__); 8029 } 8030 8031 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 8032 .rpc_call_prepare = nfs4_layoutget_prepare, 8033 .rpc_call_done = nfs4_layoutget_done, 8034 .rpc_release = nfs4_layoutget_release, 8035 }; 8036 8037 struct pnfs_layout_segment * 8038 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) 8039 { 8040 struct inode *inode = lgp->args.inode; 8041 struct nfs_server *server = NFS_SERVER(inode); 8042 size_t max_pages = max_response_pages(server); 8043 struct rpc_task *task; 8044 struct rpc_message msg = { 8045 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 8046 .rpc_argp = &lgp->args, 8047 .rpc_resp = &lgp->res, 8048 .rpc_cred = lgp->cred, 8049 }; 8050 struct rpc_task_setup task_setup_data = { 8051 .rpc_client = server->client, 8052 .rpc_message = &msg, 8053 .callback_ops = &nfs4_layoutget_call_ops, 8054 .callback_data = lgp, 8055 .flags = RPC_TASK_ASYNC, 8056 }; 8057 struct pnfs_layout_segment *lseg = NULL; 8058 int status = 0; 8059 8060 dprintk("--> %s\n", __func__); 8061 8062 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */ 8063 pnfs_get_layout_hdr(NFS_I(inode)->layout); 8064 8065 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); 8066 if (!lgp->args.layout.pages) { 8067 nfs4_layoutget_release(lgp); 8068 return ERR_PTR(-ENOMEM); 8069 } 8070 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 8071 lgp->args.timestamp = jiffies; 8072 8073 lgp->res.layoutp = &lgp->args.layout; 8074 lgp->res.seq_res.sr_slot = NULL; 8075 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 8076 8077 task = rpc_run_task(&task_setup_data); 8078 if (IS_ERR(task)) 8079 return ERR_CAST(task); 8080 status = nfs4_wait_for_completion_rpc_task(task); 8081 if (status == 0) 8082 status = task->tk_status; 8083 trace_nfs4_layoutget(lgp->args.ctx, 8084 &lgp->args.range, 8085 &lgp->res.range, 8086 &lgp->res.stateid, 8087 status); 8088 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ 8089 if (status == 0 && lgp->res.layoutp->len) 8090 lseg = pnfs_layout_process(lgp); 8091 rpc_put_task(task); 8092 dprintk("<-- %s status=%d\n", __func__, status); 8093 if (status) 8094 return ERR_PTR(status); 8095 return lseg; 8096 } 8097 8098 static void 8099 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 8100 { 8101 struct nfs4_layoutreturn *lrp = calldata; 8102 8103 dprintk("--> %s\n", __func__); 8104 nfs41_setup_sequence(lrp->clp->cl_session, 8105 &lrp->args.seq_args, 8106 &lrp->res.seq_res, 8107 task); 8108 } 8109 8110 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 8111 { 8112 struct nfs4_layoutreturn *lrp = calldata; 8113 struct nfs_server *server; 8114 8115 dprintk("--> %s\n", __func__); 8116 8117 if (!nfs41_sequence_done(task, &lrp->res.seq_res)) 8118 return; 8119 8120 server = NFS_SERVER(lrp->args.inode); 8121 switch (task->tk_status) { 8122 default: 8123 task->tk_status = 0; 8124 case 0: 8125 break; 8126 case -NFS4ERR_DELAY: 8127 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN) 8128 break; 8129 rpc_restart_call_prepare(task); 8130 return; 8131 } 8132 dprintk("<-- %s\n", __func__); 8133 } 8134 8135 static void nfs4_layoutreturn_release(void *calldata) 8136 { 8137 struct nfs4_layoutreturn *lrp = calldata; 8138 struct pnfs_layout_hdr *lo = lrp->args.layout; 8139 LIST_HEAD(freeme); 8140 8141 dprintk("--> %s\n", __func__); 8142 spin_lock(&lo->plh_inode->i_lock); 8143 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range); 8144 pnfs_mark_layout_returned_if_empty(lo); 8145 if (lrp->res.lrs_present) 8146 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 8147 pnfs_clear_layoutreturn_waitbit(lo); 8148 spin_unlock(&lo->plh_inode->i_lock); 8149 pnfs_free_lseg_list(&freeme); 8150 pnfs_put_layout_hdr(lrp->args.layout); 8151 nfs_iput_and_deactive(lrp->inode); 8152 kfree(calldata); 8153 dprintk("<-- %s\n", __func__); 8154 } 8155 8156 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 8157 .rpc_call_prepare = nfs4_layoutreturn_prepare, 8158 .rpc_call_done = nfs4_layoutreturn_done, 8159 .rpc_release = nfs4_layoutreturn_release, 8160 }; 8161 8162 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync) 8163 { 8164 struct rpc_task *task; 8165 struct rpc_message msg = { 8166 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 8167 .rpc_argp = &lrp->args, 8168 .rpc_resp = &lrp->res, 8169 .rpc_cred = lrp->cred, 8170 }; 8171 struct rpc_task_setup task_setup_data = { 8172 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 8173 .rpc_message = &msg, 8174 .callback_ops = &nfs4_layoutreturn_call_ops, 8175 .callback_data = lrp, 8176 }; 8177 int status = 0; 8178 8179 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client, 8180 NFS_SP4_MACH_CRED_PNFS_CLEANUP, 8181 &task_setup_data.rpc_client, &msg); 8182 8183 dprintk("--> %s\n", __func__); 8184 if (!sync) { 8185 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 8186 if (!lrp->inode) { 8187 nfs4_layoutreturn_release(lrp); 8188 return -EAGAIN; 8189 } 8190 task_setup_data.flags |= RPC_TASK_ASYNC; 8191 } 8192 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); 8193 task = rpc_run_task(&task_setup_data); 8194 if (IS_ERR(task)) 8195 return PTR_ERR(task); 8196 if (sync) 8197 status = task->tk_status; 8198 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status); 8199 dprintk("<-- %s status=%d\n", __func__, status); 8200 rpc_put_task(task); 8201 return status; 8202 } 8203 8204 static int 8205 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 8206 struct pnfs_device *pdev, 8207 struct rpc_cred *cred) 8208 { 8209 struct nfs4_getdeviceinfo_args args = { 8210 .pdev = pdev, 8211 .notify_types = NOTIFY_DEVICEID4_CHANGE | 8212 NOTIFY_DEVICEID4_DELETE, 8213 }; 8214 struct nfs4_getdeviceinfo_res res = { 8215 .pdev = pdev, 8216 }; 8217 struct rpc_message msg = { 8218 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 8219 .rpc_argp = &args, 8220 .rpc_resp = &res, 8221 .rpc_cred = cred, 8222 }; 8223 int status; 8224 8225 dprintk("--> %s\n", __func__); 8226 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 8227 if (res.notification & ~args.notify_types) 8228 dprintk("%s: unsupported notification\n", __func__); 8229 if (res.notification != args.notify_types) 8230 pdev->nocache = 1; 8231 8232 dprintk("<-- %s status=%d\n", __func__, status); 8233 8234 return status; 8235 } 8236 8237 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 8238 struct pnfs_device *pdev, 8239 struct rpc_cred *cred) 8240 { 8241 struct nfs4_exception exception = { }; 8242 int err; 8243 8244 do { 8245 err = nfs4_handle_exception(server, 8246 _nfs4_proc_getdeviceinfo(server, pdev, cred), 8247 &exception); 8248 } while (exception.retry); 8249 return err; 8250 } 8251 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 8252 8253 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 8254 { 8255 struct nfs4_layoutcommit_data *data = calldata; 8256 struct nfs_server *server = NFS_SERVER(data->args.inode); 8257 struct nfs4_session *session = nfs4_get_session(server); 8258 8259 nfs41_setup_sequence(session, 8260 &data->args.seq_args, 8261 &data->res.seq_res, 8262 task); 8263 } 8264 8265 static void 8266 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 8267 { 8268 struct nfs4_layoutcommit_data *data = calldata; 8269 struct nfs_server *server = NFS_SERVER(data->args.inode); 8270 8271 if (!nfs41_sequence_done(task, &data->res.seq_res)) 8272 return; 8273 8274 switch (task->tk_status) { /* Just ignore these failures */ 8275 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 8276 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 8277 case -NFS4ERR_BADLAYOUT: /* no layout */ 8278 case -NFS4ERR_GRACE: /* loca_recalim always false */ 8279 task->tk_status = 0; 8280 case 0: 8281 break; 8282 default: 8283 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 8284 rpc_restart_call_prepare(task); 8285 return; 8286 } 8287 } 8288 } 8289 8290 static void nfs4_layoutcommit_release(void *calldata) 8291 { 8292 struct nfs4_layoutcommit_data *data = calldata; 8293 8294 pnfs_cleanup_layoutcommit(data); 8295 nfs_post_op_update_inode_force_wcc(data->args.inode, 8296 data->res.fattr); 8297 put_rpccred(data->cred); 8298 nfs_iput_and_deactive(data->inode); 8299 kfree(data); 8300 } 8301 8302 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 8303 .rpc_call_prepare = nfs4_layoutcommit_prepare, 8304 .rpc_call_done = nfs4_layoutcommit_done, 8305 .rpc_release = nfs4_layoutcommit_release, 8306 }; 8307 8308 int 8309 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 8310 { 8311 struct rpc_message msg = { 8312 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 8313 .rpc_argp = &data->args, 8314 .rpc_resp = &data->res, 8315 .rpc_cred = data->cred, 8316 }; 8317 struct rpc_task_setup task_setup_data = { 8318 .task = &data->task, 8319 .rpc_client = NFS_CLIENT(data->args.inode), 8320 .rpc_message = &msg, 8321 .callback_ops = &nfs4_layoutcommit_ops, 8322 .callback_data = data, 8323 }; 8324 struct rpc_task *task; 8325 int status = 0; 8326 8327 dprintk("NFS: initiating layoutcommit call. sync %d " 8328 "lbw: %llu inode %lu\n", sync, 8329 data->args.lastbytewritten, 8330 data->args.inode->i_ino); 8331 8332 if (!sync) { 8333 data->inode = nfs_igrab_and_active(data->args.inode); 8334 if (data->inode == NULL) { 8335 nfs4_layoutcommit_release(data); 8336 return -EAGAIN; 8337 } 8338 task_setup_data.flags = RPC_TASK_ASYNC; 8339 } 8340 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 8341 task = rpc_run_task(&task_setup_data); 8342 if (IS_ERR(task)) 8343 return PTR_ERR(task); 8344 if (sync) 8345 status = task->tk_status; 8346 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status); 8347 dprintk("%s: status %d\n", __func__, status); 8348 rpc_put_task(task); 8349 return status; 8350 } 8351 8352 /** 8353 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 8354 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 8355 */ 8356 static int 8357 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8358 struct nfs_fsinfo *info, 8359 struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8360 { 8361 struct nfs41_secinfo_no_name_args args = { 8362 .style = SECINFO_STYLE_CURRENT_FH, 8363 }; 8364 struct nfs4_secinfo_res res = { 8365 .flavors = flavors, 8366 }; 8367 struct rpc_message msg = { 8368 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 8369 .rpc_argp = &args, 8370 .rpc_resp = &res, 8371 }; 8372 struct rpc_clnt *clnt = server->client; 8373 struct rpc_cred *cred = NULL; 8374 int status; 8375 8376 if (use_integrity) { 8377 clnt = server->nfs_client->cl_rpcclient; 8378 cred = nfs4_get_clid_cred(server->nfs_client); 8379 msg.rpc_cred = cred; 8380 } 8381 8382 dprintk("--> %s\n", __func__); 8383 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, 8384 &res.seq_res, 0); 8385 dprintk("<-- %s status=%d\n", __func__, status); 8386 8387 if (cred) 8388 put_rpccred(cred); 8389 8390 return status; 8391 } 8392 8393 static int 8394 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8395 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 8396 { 8397 struct nfs4_exception exception = { }; 8398 int err; 8399 do { 8400 /* first try using integrity protection */ 8401 err = -NFS4ERR_WRONGSEC; 8402 8403 /* try to use integrity protection with machine cred */ 8404 if (_nfs4_is_integrity_protected(server->nfs_client)) 8405 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8406 flavors, true); 8407 8408 /* 8409 * if unable to use integrity protection, or SECINFO with 8410 * integrity protection returns NFS4ERR_WRONGSEC (which is 8411 * disallowed by spec, but exists in deployed servers) use 8412 * the current filesystem's rpc_client and the user cred. 8413 */ 8414 if (err == -NFS4ERR_WRONGSEC) 8415 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8416 flavors, false); 8417 8418 switch (err) { 8419 case 0: 8420 case -NFS4ERR_WRONGSEC: 8421 case -ENOTSUPP: 8422 goto out; 8423 default: 8424 err = nfs4_handle_exception(server, err, &exception); 8425 } 8426 } while (exception.retry); 8427 out: 8428 return err; 8429 } 8430 8431 static int 8432 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 8433 struct nfs_fsinfo *info) 8434 { 8435 int err; 8436 struct page *page; 8437 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 8438 struct nfs4_secinfo_flavors *flavors; 8439 struct nfs4_secinfo4 *secinfo; 8440 int i; 8441 8442 page = alloc_page(GFP_KERNEL); 8443 if (!page) { 8444 err = -ENOMEM; 8445 goto out; 8446 } 8447 8448 flavors = page_address(page); 8449 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 8450 8451 /* 8452 * Fall back on "guess and check" method if 8453 * the server doesn't support SECINFO_NO_NAME 8454 */ 8455 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 8456 err = nfs4_find_root_sec(server, fhandle, info); 8457 goto out_freepage; 8458 } 8459 if (err) 8460 goto out_freepage; 8461 8462 for (i = 0; i < flavors->num_flavors; i++) { 8463 secinfo = &flavors->flavors[i]; 8464 8465 switch (secinfo->flavor) { 8466 case RPC_AUTH_NULL: 8467 case RPC_AUTH_UNIX: 8468 case RPC_AUTH_GSS: 8469 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 8470 &secinfo->flavor_info); 8471 break; 8472 default: 8473 flavor = RPC_AUTH_MAXFLAVOR; 8474 break; 8475 } 8476 8477 if (!nfs_auth_info_match(&server->auth_info, flavor)) 8478 flavor = RPC_AUTH_MAXFLAVOR; 8479 8480 if (flavor != RPC_AUTH_MAXFLAVOR) { 8481 err = nfs4_lookup_root_sec(server, fhandle, 8482 info, flavor); 8483 if (!err) 8484 break; 8485 } 8486 } 8487 8488 if (flavor == RPC_AUTH_MAXFLAVOR) 8489 err = -EPERM; 8490 8491 out_freepage: 8492 put_page(page); 8493 if (err == -EACCES) 8494 return -EPERM; 8495 out: 8496 return err; 8497 } 8498 8499 static int _nfs41_test_stateid(struct nfs_server *server, 8500 nfs4_stateid *stateid, 8501 struct rpc_cred *cred) 8502 { 8503 int status; 8504 struct nfs41_test_stateid_args args = { 8505 .stateid = stateid, 8506 }; 8507 struct nfs41_test_stateid_res res; 8508 struct rpc_message msg = { 8509 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 8510 .rpc_argp = &args, 8511 .rpc_resp = &res, 8512 .rpc_cred = cred, 8513 }; 8514 struct rpc_clnt *rpc_client = server->client; 8515 8516 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8517 &rpc_client, &msg); 8518 8519 dprintk("NFS call test_stateid %p\n", stateid); 8520 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 8521 nfs4_set_sequence_privileged(&args.seq_args); 8522 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 8523 &args.seq_args, &res.seq_res); 8524 if (status != NFS_OK) { 8525 dprintk("NFS reply test_stateid: failed, %d\n", status); 8526 return status; 8527 } 8528 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 8529 return -res.status; 8530 } 8531 8532 /** 8533 * nfs41_test_stateid - perform a TEST_STATEID operation 8534 * 8535 * @server: server / transport on which to perform the operation 8536 * @stateid: state ID to test 8537 * @cred: credential 8538 * 8539 * Returns NFS_OK if the server recognizes that "stateid" is valid. 8540 * Otherwise a negative NFS4ERR value is returned if the operation 8541 * failed or the state ID is not currently valid. 8542 */ 8543 static int nfs41_test_stateid(struct nfs_server *server, 8544 nfs4_stateid *stateid, 8545 struct rpc_cred *cred) 8546 { 8547 struct nfs4_exception exception = { }; 8548 int err; 8549 do { 8550 err = _nfs41_test_stateid(server, stateid, cred); 8551 if (err != -NFS4ERR_DELAY) 8552 break; 8553 nfs4_handle_exception(server, err, &exception); 8554 } while (exception.retry); 8555 return err; 8556 } 8557 8558 struct nfs_free_stateid_data { 8559 struct nfs_server *server; 8560 struct nfs41_free_stateid_args args; 8561 struct nfs41_free_stateid_res res; 8562 }; 8563 8564 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 8565 { 8566 struct nfs_free_stateid_data *data = calldata; 8567 nfs41_setup_sequence(nfs4_get_session(data->server), 8568 &data->args.seq_args, 8569 &data->res.seq_res, 8570 task); 8571 } 8572 8573 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 8574 { 8575 struct nfs_free_stateid_data *data = calldata; 8576 8577 nfs41_sequence_done(task, &data->res.seq_res); 8578 8579 switch (task->tk_status) { 8580 case -NFS4ERR_DELAY: 8581 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 8582 rpc_restart_call_prepare(task); 8583 } 8584 } 8585 8586 static void nfs41_free_stateid_release(void *calldata) 8587 { 8588 kfree(calldata); 8589 } 8590 8591 static const struct rpc_call_ops nfs41_free_stateid_ops = { 8592 .rpc_call_prepare = nfs41_free_stateid_prepare, 8593 .rpc_call_done = nfs41_free_stateid_done, 8594 .rpc_release = nfs41_free_stateid_release, 8595 }; 8596 8597 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server, 8598 nfs4_stateid *stateid, 8599 struct rpc_cred *cred, 8600 bool privileged) 8601 { 8602 struct rpc_message msg = { 8603 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 8604 .rpc_cred = cred, 8605 }; 8606 struct rpc_task_setup task_setup = { 8607 .rpc_client = server->client, 8608 .rpc_message = &msg, 8609 .callback_ops = &nfs41_free_stateid_ops, 8610 .flags = RPC_TASK_ASYNC, 8611 }; 8612 struct nfs_free_stateid_data *data; 8613 8614 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8615 &task_setup.rpc_client, &msg); 8616 8617 dprintk("NFS call free_stateid %p\n", stateid); 8618 data = kmalloc(sizeof(*data), GFP_NOFS); 8619 if (!data) 8620 return ERR_PTR(-ENOMEM); 8621 data->server = server; 8622 nfs4_stateid_copy(&data->args.stateid, stateid); 8623 8624 task_setup.callback_data = data; 8625 8626 msg.rpc_argp = &data->args; 8627 msg.rpc_resp = &data->res; 8628 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 8629 if (privileged) 8630 nfs4_set_sequence_privileged(&data->args.seq_args); 8631 8632 return rpc_run_task(&task_setup); 8633 } 8634 8635 /** 8636 * nfs41_free_stateid - perform a FREE_STATEID operation 8637 * 8638 * @server: server / transport on which to perform the operation 8639 * @stateid: state ID to release 8640 * @cred: credential 8641 * 8642 * Returns NFS_OK if the server freed "stateid". Otherwise a 8643 * negative NFS4ERR value is returned. 8644 */ 8645 static int nfs41_free_stateid(struct nfs_server *server, 8646 nfs4_stateid *stateid, 8647 struct rpc_cred *cred) 8648 { 8649 struct rpc_task *task; 8650 int ret; 8651 8652 task = _nfs41_free_stateid(server, stateid, cred, true); 8653 if (IS_ERR(task)) 8654 return PTR_ERR(task); 8655 ret = rpc_wait_for_completion_task(task); 8656 if (!ret) 8657 ret = task->tk_status; 8658 rpc_put_task(task); 8659 return ret; 8660 } 8661 8662 static void 8663 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 8664 { 8665 struct rpc_task *task; 8666 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 8667 8668 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 8669 nfs4_free_lock_state(server, lsp); 8670 if (IS_ERR(task)) 8671 return; 8672 rpc_put_task(task); 8673 } 8674 8675 static bool nfs41_match_stateid(const nfs4_stateid *s1, 8676 const nfs4_stateid *s2) 8677 { 8678 if (s1->type != s2->type) 8679 return false; 8680 8681 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 8682 return false; 8683 8684 if (s1->seqid == s2->seqid) 8685 return true; 8686 if (s1->seqid == 0 || s2->seqid == 0) 8687 return true; 8688 8689 return false; 8690 } 8691 8692 #endif /* CONFIG_NFS_V4_1 */ 8693 8694 static bool nfs4_match_stateid(const nfs4_stateid *s1, 8695 const nfs4_stateid *s2) 8696 { 8697 return nfs4_stateid_match(s1, s2); 8698 } 8699 8700 8701 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 8702 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8703 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8704 .recover_open = nfs4_open_reclaim, 8705 .recover_lock = nfs4_lock_reclaim, 8706 .establish_clid = nfs4_init_clientid, 8707 .detect_trunking = nfs40_discover_server_trunking, 8708 }; 8709 8710 #if defined(CONFIG_NFS_V4_1) 8711 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 8712 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8713 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8714 .recover_open = nfs4_open_reclaim, 8715 .recover_lock = nfs4_lock_reclaim, 8716 .establish_clid = nfs41_init_clientid, 8717 .reclaim_complete = nfs41_proc_reclaim_complete, 8718 .detect_trunking = nfs41_discover_server_trunking, 8719 }; 8720 #endif /* CONFIG_NFS_V4_1 */ 8721 8722 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 8723 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 8724 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 8725 .recover_open = nfs40_open_expired, 8726 .recover_lock = nfs4_lock_expired, 8727 .establish_clid = nfs4_init_clientid, 8728 }; 8729 8730 #if defined(CONFIG_NFS_V4_1) 8731 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 8732 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 8733 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 8734 .recover_open = nfs41_open_expired, 8735 .recover_lock = nfs41_lock_expired, 8736 .establish_clid = nfs41_init_clientid, 8737 }; 8738 #endif /* CONFIG_NFS_V4_1 */ 8739 8740 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 8741 .sched_state_renewal = nfs4_proc_async_renew, 8742 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, 8743 .renew_lease = nfs4_proc_renew, 8744 }; 8745 8746 #if defined(CONFIG_NFS_V4_1) 8747 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 8748 .sched_state_renewal = nfs41_proc_async_sequence, 8749 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, 8750 .renew_lease = nfs4_proc_sequence, 8751 }; 8752 #endif 8753 8754 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 8755 .get_locations = _nfs40_proc_get_locations, 8756 .fsid_present = _nfs40_proc_fsid_present, 8757 }; 8758 8759 #if defined(CONFIG_NFS_V4_1) 8760 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 8761 .get_locations = _nfs41_proc_get_locations, 8762 .fsid_present = _nfs41_proc_fsid_present, 8763 }; 8764 #endif /* CONFIG_NFS_V4_1 */ 8765 8766 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 8767 .minor_version = 0, 8768 .init_caps = NFS_CAP_READDIRPLUS 8769 | NFS_CAP_ATOMIC_OPEN 8770 | NFS_CAP_POSIX_LOCK, 8771 .init_client = nfs40_init_client, 8772 .shutdown_client = nfs40_shutdown_client, 8773 .match_stateid = nfs4_match_stateid, 8774 .find_root_sec = nfs4_find_root_sec, 8775 .free_lock_state = nfs4_release_lockowner, 8776 .alloc_seqid = nfs_alloc_seqid, 8777 .call_sync_ops = &nfs40_call_sync_ops, 8778 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 8779 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 8780 .state_renewal_ops = &nfs40_state_renewal_ops, 8781 .mig_recovery_ops = &nfs40_mig_recovery_ops, 8782 }; 8783 8784 #if defined(CONFIG_NFS_V4_1) 8785 static struct nfs_seqid * 8786 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 8787 { 8788 return NULL; 8789 } 8790 8791 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 8792 .minor_version = 1, 8793 .init_caps = NFS_CAP_READDIRPLUS 8794 | NFS_CAP_ATOMIC_OPEN 8795 | NFS_CAP_POSIX_LOCK 8796 | NFS_CAP_STATEID_NFSV41 8797 | NFS_CAP_ATOMIC_OPEN_V1, 8798 .init_client = nfs41_init_client, 8799 .shutdown_client = nfs41_shutdown_client, 8800 .match_stateid = nfs41_match_stateid, 8801 .find_root_sec = nfs41_find_root_sec, 8802 .free_lock_state = nfs41_free_lock_state, 8803 .alloc_seqid = nfs_alloc_no_seqid, 8804 .call_sync_ops = &nfs41_call_sync_ops, 8805 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 8806 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 8807 .state_renewal_ops = &nfs41_state_renewal_ops, 8808 .mig_recovery_ops = &nfs41_mig_recovery_ops, 8809 }; 8810 #endif 8811 8812 #if defined(CONFIG_NFS_V4_2) 8813 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 8814 .minor_version = 2, 8815 .init_caps = NFS_CAP_READDIRPLUS 8816 | NFS_CAP_ATOMIC_OPEN 8817 | NFS_CAP_POSIX_LOCK 8818 | NFS_CAP_STATEID_NFSV41 8819 | NFS_CAP_ATOMIC_OPEN_V1 8820 | NFS_CAP_ALLOCATE 8821 | NFS_CAP_COPY 8822 | NFS_CAP_DEALLOCATE 8823 | NFS_CAP_SEEK 8824 | NFS_CAP_LAYOUTSTATS 8825 | NFS_CAP_CLONE, 8826 .init_client = nfs41_init_client, 8827 .shutdown_client = nfs41_shutdown_client, 8828 .match_stateid = nfs41_match_stateid, 8829 .find_root_sec = nfs41_find_root_sec, 8830 .free_lock_state = nfs41_free_lock_state, 8831 .call_sync_ops = &nfs41_call_sync_ops, 8832 .alloc_seqid = nfs_alloc_no_seqid, 8833 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 8834 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 8835 .state_renewal_ops = &nfs41_state_renewal_ops, 8836 .mig_recovery_ops = &nfs41_mig_recovery_ops, 8837 }; 8838 #endif 8839 8840 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 8841 [0] = &nfs_v4_0_minor_ops, 8842 #if defined(CONFIG_NFS_V4_1) 8843 [1] = &nfs_v4_1_minor_ops, 8844 #endif 8845 #if defined(CONFIG_NFS_V4_2) 8846 [2] = &nfs_v4_2_minor_ops, 8847 #endif 8848 }; 8849 8850 ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 8851 { 8852 ssize_t error, error2; 8853 8854 error = generic_listxattr(dentry, list, size); 8855 if (error < 0) 8856 return error; 8857 if (list) { 8858 list += error; 8859 size -= error; 8860 } 8861 8862 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size); 8863 if (error2 < 0) 8864 return error2; 8865 return error + error2; 8866 } 8867 8868 static const struct inode_operations nfs4_dir_inode_operations = { 8869 .create = nfs_create, 8870 .lookup = nfs_lookup, 8871 .atomic_open = nfs_atomic_open, 8872 .link = nfs_link, 8873 .unlink = nfs_unlink, 8874 .symlink = nfs_symlink, 8875 .mkdir = nfs_mkdir, 8876 .rmdir = nfs_rmdir, 8877 .mknod = nfs_mknod, 8878 .rename = nfs_rename, 8879 .permission = nfs_permission, 8880 .getattr = nfs_getattr, 8881 .setattr = nfs_setattr, 8882 .getxattr = generic_getxattr, 8883 .setxattr = generic_setxattr, 8884 .listxattr = nfs4_listxattr, 8885 .removexattr = generic_removexattr, 8886 }; 8887 8888 static const struct inode_operations nfs4_file_inode_operations = { 8889 .permission = nfs_permission, 8890 .getattr = nfs_getattr, 8891 .setattr = nfs_setattr, 8892 .getxattr = generic_getxattr, 8893 .setxattr = generic_setxattr, 8894 .listxattr = nfs4_listxattr, 8895 .removexattr = generic_removexattr, 8896 }; 8897 8898 const struct nfs_rpc_ops nfs_v4_clientops = { 8899 .version = 4, /* protocol version */ 8900 .dentry_ops = &nfs4_dentry_operations, 8901 .dir_inode_ops = &nfs4_dir_inode_operations, 8902 .file_inode_ops = &nfs4_file_inode_operations, 8903 .file_ops = &nfs4_file_operations, 8904 .getroot = nfs4_proc_get_root, 8905 .submount = nfs4_submount, 8906 .try_mount = nfs4_try_mount, 8907 .getattr = nfs4_proc_getattr, 8908 .setattr = nfs4_proc_setattr, 8909 .lookup = nfs4_proc_lookup, 8910 .access = nfs4_proc_access, 8911 .readlink = nfs4_proc_readlink, 8912 .create = nfs4_proc_create, 8913 .remove = nfs4_proc_remove, 8914 .unlink_setup = nfs4_proc_unlink_setup, 8915 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 8916 .unlink_done = nfs4_proc_unlink_done, 8917 .rename_setup = nfs4_proc_rename_setup, 8918 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 8919 .rename_done = nfs4_proc_rename_done, 8920 .link = nfs4_proc_link, 8921 .symlink = nfs4_proc_symlink, 8922 .mkdir = nfs4_proc_mkdir, 8923 .rmdir = nfs4_proc_remove, 8924 .readdir = nfs4_proc_readdir, 8925 .mknod = nfs4_proc_mknod, 8926 .statfs = nfs4_proc_statfs, 8927 .fsinfo = nfs4_proc_fsinfo, 8928 .pathconf = nfs4_proc_pathconf, 8929 .set_capabilities = nfs4_server_capabilities, 8930 .decode_dirent = nfs4_decode_dirent, 8931 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 8932 .read_setup = nfs4_proc_read_setup, 8933 .read_done = nfs4_read_done, 8934 .write_setup = nfs4_proc_write_setup, 8935 .write_done = nfs4_write_done, 8936 .commit_setup = nfs4_proc_commit_setup, 8937 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 8938 .commit_done = nfs4_commit_done, 8939 .lock = nfs4_proc_lock, 8940 .clear_acl_cache = nfs4_zap_acl_attr, 8941 .close_context = nfs4_close_context, 8942 .open_context = nfs4_atomic_open, 8943 .have_delegation = nfs4_have_delegation, 8944 .return_delegation = nfs4_inode_return_delegation, 8945 .alloc_client = nfs4_alloc_client, 8946 .init_client = nfs4_init_client, 8947 .free_client = nfs4_free_client, 8948 .create_server = nfs4_create_server, 8949 .clone_server = nfs_clone_server, 8950 }; 8951 8952 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 8953 .name = XATTR_NAME_NFSV4_ACL, 8954 .list = nfs4_xattr_list_nfs4_acl, 8955 .get = nfs4_xattr_get_nfs4_acl, 8956 .set = nfs4_xattr_set_nfs4_acl, 8957 }; 8958 8959 const struct xattr_handler *nfs4_xattr_handlers[] = { 8960 &nfs4_xattr_nfs4_acl_handler, 8961 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 8962 &nfs4_xattr_nfs4_label_handler, 8963 #endif 8964 NULL 8965 }; 8966 8967 /* 8968 * Local variables: 8969 * c-basic-offset: 8 8970 * End: 8971 */ 8972