1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/file.h> 42 #include <linux/string.h> 43 #include <linux/ratelimit.h> 44 #include <linux/printk.h> 45 #include <linux/slab.h> 46 #include <linux/sunrpc/clnt.h> 47 #include <linux/nfs.h> 48 #include <linux/nfs4.h> 49 #include <linux/nfs_fs.h> 50 #include <linux/nfs_page.h> 51 #include <linux/nfs_mount.h> 52 #include <linux/namei.h> 53 #include <linux/mount.h> 54 #include <linux/module.h> 55 #include <linux/xattr.h> 56 #include <linux/utsname.h> 57 #include <linux/freezer.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "nfs4idmap.h" 67 #include "nfs4session.h" 68 #include "fscache.h" 69 70 #include "nfs4trace.h" 71 72 #define NFSDBG_FACILITY NFSDBG_PROC 73 74 #define NFS4_POLL_RETRY_MIN (HZ/10) 75 #define NFS4_POLL_RETRY_MAX (15*HZ) 76 77 /* file attributes which can be mapped to nfs attributes */ 78 #define NFS4_VALID_ATTRS (ATTR_MODE \ 79 | ATTR_UID \ 80 | ATTR_GID \ 81 | ATTR_SIZE \ 82 | ATTR_ATIME \ 83 | ATTR_MTIME \ 84 | ATTR_CTIME \ 85 | ATTR_ATIME_SET \ 86 | ATTR_MTIME_SET) 87 88 struct nfs4_opendata; 89 static int _nfs4_proc_open(struct nfs4_opendata *data); 90 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 91 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 92 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 93 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label); 94 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label); 95 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 96 struct nfs_fattr *fattr, struct iattr *sattr, 97 struct nfs4_state *state, struct nfs4_label *ilabel, 98 struct nfs4_label *olabel); 99 #ifdef CONFIG_NFS_V4_1 100 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *, 101 struct rpc_cred *); 102 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 103 struct rpc_cred *); 104 #endif 105 106 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 107 static inline struct nfs4_label * 108 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 109 struct iattr *sattr, struct nfs4_label *label) 110 { 111 int err; 112 113 if (label == NULL) 114 return NULL; 115 116 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 117 return NULL; 118 119 err = security_dentry_init_security(dentry, sattr->ia_mode, 120 &dentry->d_name, (void **)&label->label, &label->len); 121 if (err == 0) 122 return label; 123 124 return NULL; 125 } 126 static inline void 127 nfs4_label_release_security(struct nfs4_label *label) 128 { 129 if (label) 130 security_release_secctx(label->label, label->len); 131 } 132 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 133 { 134 if (label) 135 return server->attr_bitmask; 136 137 return server->attr_bitmask_nl; 138 } 139 #else 140 static inline struct nfs4_label * 141 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 142 struct iattr *sattr, struct nfs4_label *l) 143 { return NULL; } 144 static inline void 145 nfs4_label_release_security(struct nfs4_label *label) 146 { return; } 147 static inline u32 * 148 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 149 { return server->attr_bitmask; } 150 #endif 151 152 /* Prevent leaks of NFSv4 errors into userland */ 153 static int nfs4_map_errors(int err) 154 { 155 if (err >= -1000) 156 return err; 157 switch (err) { 158 case -NFS4ERR_RESOURCE: 159 case -NFS4ERR_LAYOUTTRYLATER: 160 case -NFS4ERR_RECALLCONFLICT: 161 return -EREMOTEIO; 162 case -NFS4ERR_WRONGSEC: 163 case -NFS4ERR_WRONG_CRED: 164 return -EPERM; 165 case -NFS4ERR_BADOWNER: 166 case -NFS4ERR_BADNAME: 167 return -EINVAL; 168 case -NFS4ERR_SHARE_DENIED: 169 return -EACCES; 170 case -NFS4ERR_MINOR_VERS_MISMATCH: 171 return -EPROTONOSUPPORT; 172 case -NFS4ERR_FILE_OPEN: 173 return -EBUSY; 174 default: 175 dprintk("%s could not handle NFSv4 error %d\n", 176 __func__, -err); 177 break; 178 } 179 return -EIO; 180 } 181 182 /* 183 * This is our standard bitmap for GETATTR requests. 184 */ 185 const u32 nfs4_fattr_bitmap[3] = { 186 FATTR4_WORD0_TYPE 187 | FATTR4_WORD0_CHANGE 188 | FATTR4_WORD0_SIZE 189 | FATTR4_WORD0_FSID 190 | FATTR4_WORD0_FILEID, 191 FATTR4_WORD1_MODE 192 | FATTR4_WORD1_NUMLINKS 193 | FATTR4_WORD1_OWNER 194 | FATTR4_WORD1_OWNER_GROUP 195 | FATTR4_WORD1_RAWDEV 196 | FATTR4_WORD1_SPACE_USED 197 | FATTR4_WORD1_TIME_ACCESS 198 | FATTR4_WORD1_TIME_METADATA 199 | FATTR4_WORD1_TIME_MODIFY 200 | FATTR4_WORD1_MOUNTED_ON_FILEID, 201 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 202 FATTR4_WORD2_SECURITY_LABEL 203 #endif 204 }; 205 206 static const u32 nfs4_pnfs_open_bitmap[3] = { 207 FATTR4_WORD0_TYPE 208 | FATTR4_WORD0_CHANGE 209 | FATTR4_WORD0_SIZE 210 | FATTR4_WORD0_FSID 211 | FATTR4_WORD0_FILEID, 212 FATTR4_WORD1_MODE 213 | FATTR4_WORD1_NUMLINKS 214 | FATTR4_WORD1_OWNER 215 | FATTR4_WORD1_OWNER_GROUP 216 | FATTR4_WORD1_RAWDEV 217 | FATTR4_WORD1_SPACE_USED 218 | FATTR4_WORD1_TIME_ACCESS 219 | FATTR4_WORD1_TIME_METADATA 220 | FATTR4_WORD1_TIME_MODIFY, 221 FATTR4_WORD2_MDSTHRESHOLD 222 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 223 | FATTR4_WORD2_SECURITY_LABEL 224 #endif 225 }; 226 227 static const u32 nfs4_open_noattr_bitmap[3] = { 228 FATTR4_WORD0_TYPE 229 | FATTR4_WORD0_CHANGE 230 | FATTR4_WORD0_FILEID, 231 }; 232 233 const u32 nfs4_statfs_bitmap[3] = { 234 FATTR4_WORD0_FILES_AVAIL 235 | FATTR4_WORD0_FILES_FREE 236 | FATTR4_WORD0_FILES_TOTAL, 237 FATTR4_WORD1_SPACE_AVAIL 238 | FATTR4_WORD1_SPACE_FREE 239 | FATTR4_WORD1_SPACE_TOTAL 240 }; 241 242 const u32 nfs4_pathconf_bitmap[3] = { 243 FATTR4_WORD0_MAXLINK 244 | FATTR4_WORD0_MAXNAME, 245 0 246 }; 247 248 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 249 | FATTR4_WORD0_MAXREAD 250 | FATTR4_WORD0_MAXWRITE 251 | FATTR4_WORD0_LEASE_TIME, 252 FATTR4_WORD1_TIME_DELTA 253 | FATTR4_WORD1_FS_LAYOUT_TYPES, 254 FATTR4_WORD2_LAYOUT_BLKSIZE 255 | FATTR4_WORD2_CLONE_BLKSIZE 256 }; 257 258 const u32 nfs4_fs_locations_bitmap[3] = { 259 FATTR4_WORD0_TYPE 260 | FATTR4_WORD0_CHANGE 261 | FATTR4_WORD0_SIZE 262 | FATTR4_WORD0_FSID 263 | FATTR4_WORD0_FILEID 264 | FATTR4_WORD0_FS_LOCATIONS, 265 FATTR4_WORD1_MODE 266 | FATTR4_WORD1_NUMLINKS 267 | FATTR4_WORD1_OWNER 268 | FATTR4_WORD1_OWNER_GROUP 269 | FATTR4_WORD1_RAWDEV 270 | FATTR4_WORD1_SPACE_USED 271 | FATTR4_WORD1_TIME_ACCESS 272 | FATTR4_WORD1_TIME_METADATA 273 | FATTR4_WORD1_TIME_MODIFY 274 | FATTR4_WORD1_MOUNTED_ON_FILEID, 275 }; 276 277 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 278 struct nfs4_readdir_arg *readdir) 279 { 280 __be32 *start, *p; 281 282 if (cookie > 2) { 283 readdir->cookie = cookie; 284 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 285 return; 286 } 287 288 readdir->cookie = 0; 289 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 290 if (cookie == 2) 291 return; 292 293 /* 294 * NFSv4 servers do not return entries for '.' and '..' 295 * Therefore, we fake these entries here. We let '.' 296 * have cookie 0 and '..' have cookie 1. Note that 297 * when talking to the server, we always send cookie 0 298 * instead of 1 or 2. 299 */ 300 start = p = kmap_atomic(*readdir->pages); 301 302 if (cookie == 0) { 303 *p++ = xdr_one; /* next */ 304 *p++ = xdr_zero; /* cookie, first word */ 305 *p++ = xdr_one; /* cookie, second word */ 306 *p++ = xdr_one; /* entry len */ 307 memcpy(p, ".\0\0\0", 4); /* entry */ 308 p++; 309 *p++ = xdr_one; /* bitmap length */ 310 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 311 *p++ = htonl(8); /* attribute buffer length */ 312 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 313 } 314 315 *p++ = xdr_one; /* next */ 316 *p++ = xdr_zero; /* cookie, first word */ 317 *p++ = xdr_two; /* cookie, second word */ 318 *p++ = xdr_two; /* entry len */ 319 memcpy(p, "..\0\0", 4); /* entry */ 320 p++; 321 *p++ = xdr_one; /* bitmap length */ 322 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 323 *p++ = htonl(8); /* attribute buffer length */ 324 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 325 326 readdir->pgbase = (char *)p - (char *)start; 327 readdir->count -= readdir->pgbase; 328 kunmap_atomic(start); 329 } 330 331 static long nfs4_update_delay(long *timeout) 332 { 333 long ret; 334 if (!timeout) 335 return NFS4_POLL_RETRY_MAX; 336 if (*timeout <= 0) 337 *timeout = NFS4_POLL_RETRY_MIN; 338 if (*timeout > NFS4_POLL_RETRY_MAX) 339 *timeout = NFS4_POLL_RETRY_MAX; 340 ret = *timeout; 341 *timeout <<= 1; 342 return ret; 343 } 344 345 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 346 { 347 int res = 0; 348 349 might_sleep(); 350 351 freezable_schedule_timeout_killable_unsafe( 352 nfs4_update_delay(timeout)); 353 if (fatal_signal_pending(current)) 354 res = -ERESTARTSYS; 355 return res; 356 } 357 358 /* This is the error handling routine for processes that are allowed 359 * to sleep. 360 */ 361 static int nfs4_do_handle_exception(struct nfs_server *server, 362 int errorcode, struct nfs4_exception *exception) 363 { 364 struct nfs_client *clp = server->nfs_client; 365 struct nfs4_state *state = exception->state; 366 struct inode *inode = exception->inode; 367 int ret = errorcode; 368 369 exception->delay = 0; 370 exception->recovering = 0; 371 exception->retry = 0; 372 switch(errorcode) { 373 case 0: 374 return 0; 375 case -NFS4ERR_OPENMODE: 376 case -NFS4ERR_DELEG_REVOKED: 377 case -NFS4ERR_ADMIN_REVOKED: 378 case -NFS4ERR_BAD_STATEID: 379 if (inode && nfs_async_inode_return_delegation(inode, 380 NULL) == 0) 381 goto wait_on_recovery; 382 if (state == NULL) 383 break; 384 ret = nfs4_schedule_stateid_recovery(server, state); 385 if (ret < 0) 386 break; 387 goto wait_on_recovery; 388 case -NFS4ERR_EXPIRED: 389 if (state != NULL) { 390 ret = nfs4_schedule_stateid_recovery(server, state); 391 if (ret < 0) 392 break; 393 } 394 case -NFS4ERR_STALE_STATEID: 395 case -NFS4ERR_STALE_CLIENTID: 396 nfs4_schedule_lease_recovery(clp); 397 goto wait_on_recovery; 398 case -NFS4ERR_MOVED: 399 ret = nfs4_schedule_migration_recovery(server); 400 if (ret < 0) 401 break; 402 goto wait_on_recovery; 403 case -NFS4ERR_LEASE_MOVED: 404 nfs4_schedule_lease_moved_recovery(clp); 405 goto wait_on_recovery; 406 #if defined(CONFIG_NFS_V4_1) 407 case -NFS4ERR_BADSESSION: 408 case -NFS4ERR_BADSLOT: 409 case -NFS4ERR_BAD_HIGH_SLOT: 410 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 411 case -NFS4ERR_DEADSESSION: 412 case -NFS4ERR_SEQ_FALSE_RETRY: 413 case -NFS4ERR_SEQ_MISORDERED: 414 dprintk("%s ERROR: %d Reset session\n", __func__, 415 errorcode); 416 nfs4_schedule_session_recovery(clp->cl_session, errorcode); 417 goto wait_on_recovery; 418 #endif /* defined(CONFIG_NFS_V4_1) */ 419 case -NFS4ERR_FILE_OPEN: 420 if (exception->timeout > HZ) { 421 /* We have retried a decent amount, time to 422 * fail 423 */ 424 ret = -EBUSY; 425 break; 426 } 427 case -NFS4ERR_DELAY: 428 nfs_inc_server_stats(server, NFSIOS_DELAY); 429 case -NFS4ERR_GRACE: 430 case -NFS4ERR_RECALLCONFLICT: 431 exception->delay = 1; 432 return 0; 433 434 case -NFS4ERR_RETRY_UNCACHED_REP: 435 case -NFS4ERR_OLD_STATEID: 436 exception->retry = 1; 437 break; 438 case -NFS4ERR_BADOWNER: 439 /* The following works around a Linux server bug! */ 440 case -NFS4ERR_BADNAME: 441 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 442 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 443 exception->retry = 1; 444 printk(KERN_WARNING "NFS: v4 server %s " 445 "does not accept raw " 446 "uid/gids. " 447 "Reenabling the idmapper.\n", 448 server->nfs_client->cl_hostname); 449 } 450 } 451 /* We failed to handle the error */ 452 return nfs4_map_errors(ret); 453 wait_on_recovery: 454 exception->recovering = 1; 455 return 0; 456 } 457 458 /* This is the error handling routine for processes that are allowed 459 * to sleep. 460 */ 461 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 462 { 463 struct nfs_client *clp = server->nfs_client; 464 int ret; 465 466 ret = nfs4_do_handle_exception(server, errorcode, exception); 467 if (exception->delay) { 468 ret = nfs4_delay(server->client, &exception->timeout); 469 goto out_retry; 470 } 471 if (exception->recovering) { 472 ret = nfs4_wait_clnt_recover(clp); 473 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 474 return -EIO; 475 goto out_retry; 476 } 477 return ret; 478 out_retry: 479 if (ret == 0) 480 exception->retry = 1; 481 return ret; 482 } 483 484 static int 485 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, 486 int errorcode, struct nfs4_exception *exception) 487 { 488 struct nfs_client *clp = server->nfs_client; 489 int ret; 490 491 ret = nfs4_do_handle_exception(server, errorcode, exception); 492 if (exception->delay) { 493 rpc_delay(task, nfs4_update_delay(&exception->timeout)); 494 goto out_retry; 495 } 496 if (exception->recovering) { 497 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 498 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 499 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 500 goto out_retry; 501 } 502 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 503 ret = -EIO; 504 return ret; 505 out_retry: 506 if (ret == 0) 507 exception->retry = 1; 508 return ret; 509 } 510 511 static int 512 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 513 struct nfs4_state *state, long *timeout) 514 { 515 struct nfs4_exception exception = { 516 .state = state, 517 }; 518 519 if (task->tk_status >= 0) 520 return 0; 521 if (timeout) 522 exception.timeout = *timeout; 523 task->tk_status = nfs4_async_handle_exception(task, server, 524 task->tk_status, 525 &exception); 526 if (exception.delay && timeout) 527 *timeout = exception.timeout; 528 if (exception.retry) 529 return -EAGAIN; 530 return 0; 531 } 532 533 /* 534 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 535 * or 'false' otherwise. 536 */ 537 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 538 { 539 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 540 541 if (flavor == RPC_AUTH_GSS_KRB5I || 542 flavor == RPC_AUTH_GSS_KRB5P) 543 return true; 544 545 return false; 546 } 547 548 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 549 { 550 spin_lock(&clp->cl_lock); 551 if (time_before(clp->cl_last_renewal,timestamp)) 552 clp->cl_last_renewal = timestamp; 553 spin_unlock(&clp->cl_lock); 554 } 555 556 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 557 { 558 struct nfs_client *clp = server->nfs_client; 559 560 if (!nfs4_has_session(clp)) 561 do_renew_lease(clp, timestamp); 562 } 563 564 struct nfs4_call_sync_data { 565 const struct nfs_server *seq_server; 566 struct nfs4_sequence_args *seq_args; 567 struct nfs4_sequence_res *seq_res; 568 }; 569 570 void nfs4_init_sequence(struct nfs4_sequence_args *args, 571 struct nfs4_sequence_res *res, int cache_reply) 572 { 573 args->sa_slot = NULL; 574 args->sa_cache_this = cache_reply; 575 args->sa_privileged = 0; 576 577 res->sr_slot = NULL; 578 } 579 580 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) 581 { 582 args->sa_privileged = 1; 583 } 584 585 int nfs40_setup_sequence(struct nfs4_slot_table *tbl, 586 struct nfs4_sequence_args *args, 587 struct nfs4_sequence_res *res, 588 struct rpc_task *task) 589 { 590 struct nfs4_slot *slot; 591 592 /* slot already allocated? */ 593 if (res->sr_slot != NULL) 594 goto out_start; 595 596 spin_lock(&tbl->slot_tbl_lock); 597 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 598 goto out_sleep; 599 600 slot = nfs4_alloc_slot(tbl); 601 if (IS_ERR(slot)) { 602 if (slot == ERR_PTR(-ENOMEM)) 603 task->tk_timeout = HZ >> 2; 604 goto out_sleep; 605 } 606 spin_unlock(&tbl->slot_tbl_lock); 607 608 args->sa_slot = slot; 609 res->sr_slot = slot; 610 611 out_start: 612 rpc_call_start(task); 613 return 0; 614 615 out_sleep: 616 if (args->sa_privileged) 617 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 618 NULL, RPC_PRIORITY_PRIVILEGED); 619 else 620 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 621 spin_unlock(&tbl->slot_tbl_lock); 622 return -EAGAIN; 623 } 624 EXPORT_SYMBOL_GPL(nfs40_setup_sequence); 625 626 static int nfs40_sequence_done(struct rpc_task *task, 627 struct nfs4_sequence_res *res) 628 { 629 struct nfs4_slot *slot = res->sr_slot; 630 struct nfs4_slot_table *tbl; 631 632 if (slot == NULL) 633 goto out; 634 635 tbl = slot->table; 636 spin_lock(&tbl->slot_tbl_lock); 637 if (!nfs41_wake_and_assign_slot(tbl, slot)) 638 nfs4_free_slot(tbl, slot); 639 spin_unlock(&tbl->slot_tbl_lock); 640 641 res->sr_slot = NULL; 642 out: 643 return 1; 644 } 645 646 #if defined(CONFIG_NFS_V4_1) 647 648 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 649 { 650 struct nfs4_session *session; 651 struct nfs4_slot_table *tbl; 652 struct nfs4_slot *slot = res->sr_slot; 653 bool send_new_highest_used_slotid = false; 654 655 tbl = slot->table; 656 session = tbl->session; 657 658 spin_lock(&tbl->slot_tbl_lock); 659 /* Be nice to the server: try to ensure that the last transmitted 660 * value for highest_user_slotid <= target_highest_slotid 661 */ 662 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 663 send_new_highest_used_slotid = true; 664 665 if (nfs41_wake_and_assign_slot(tbl, slot)) { 666 send_new_highest_used_slotid = false; 667 goto out_unlock; 668 } 669 nfs4_free_slot(tbl, slot); 670 671 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 672 send_new_highest_used_slotid = false; 673 out_unlock: 674 spin_unlock(&tbl->slot_tbl_lock); 675 res->sr_slot = NULL; 676 if (send_new_highest_used_slotid) 677 nfs41_notify_server(session->clp); 678 } 679 680 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 681 { 682 struct nfs4_session *session; 683 struct nfs4_slot *slot = res->sr_slot; 684 struct nfs_client *clp; 685 bool interrupted = false; 686 int ret = 1; 687 688 if (slot == NULL) 689 goto out_noaction; 690 /* don't increment the sequence number if the task wasn't sent */ 691 if (!RPC_WAS_SENT(task)) 692 goto out; 693 694 session = slot->table->session; 695 696 if (slot->interrupted) { 697 slot->interrupted = 0; 698 interrupted = true; 699 } 700 701 trace_nfs4_sequence_done(session, res); 702 /* Check the SEQUENCE operation status */ 703 switch (res->sr_status) { 704 case 0: 705 /* Update the slot's sequence and clientid lease timer */ 706 ++slot->seq_nr; 707 clp = session->clp; 708 do_renew_lease(clp, res->sr_timestamp); 709 /* Check sequence flags */ 710 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 711 nfs41_update_target_slotid(slot->table, slot, res); 712 break; 713 case 1: 714 /* 715 * sr_status remains 1 if an RPC level error occurred. 716 * The server may or may not have processed the sequence 717 * operation.. 718 * Mark the slot as having hosted an interrupted RPC call. 719 */ 720 slot->interrupted = 1; 721 goto out; 722 case -NFS4ERR_DELAY: 723 /* The server detected a resend of the RPC call and 724 * returned NFS4ERR_DELAY as per Section 2.10.6.2 725 * of RFC5661. 726 */ 727 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 728 __func__, 729 slot->slot_nr, 730 slot->seq_nr); 731 goto out_retry; 732 case -NFS4ERR_BADSLOT: 733 /* 734 * The slot id we used was probably retired. Try again 735 * using a different slot id. 736 */ 737 goto retry_nowait; 738 case -NFS4ERR_SEQ_MISORDERED: 739 /* 740 * Was the last operation on this sequence interrupted? 741 * If so, retry after bumping the sequence number. 742 */ 743 if (interrupted) { 744 ++slot->seq_nr; 745 goto retry_nowait; 746 } 747 /* 748 * Could this slot have been previously retired? 749 * If so, then the server may be expecting seq_nr = 1! 750 */ 751 if (slot->seq_nr != 1) { 752 slot->seq_nr = 1; 753 goto retry_nowait; 754 } 755 break; 756 case -NFS4ERR_SEQ_FALSE_RETRY: 757 ++slot->seq_nr; 758 goto retry_nowait; 759 default: 760 /* Just update the slot sequence no. */ 761 ++slot->seq_nr; 762 } 763 out: 764 /* The session may be reset by one of the error handlers. */ 765 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 766 nfs41_sequence_free_slot(res); 767 out_noaction: 768 return ret; 769 retry_nowait: 770 if (rpc_restart_call_prepare(task)) { 771 task->tk_status = 0; 772 ret = 0; 773 } 774 goto out; 775 out_retry: 776 if (!rpc_restart_call(task)) 777 goto out; 778 rpc_delay(task, NFS4_POLL_RETRY_MAX); 779 return 0; 780 } 781 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 782 783 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 784 { 785 if (res->sr_slot == NULL) 786 return 1; 787 if (!res->sr_slot->table->session) 788 return nfs40_sequence_done(task, res); 789 return nfs41_sequence_done(task, res); 790 } 791 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 792 793 int nfs41_setup_sequence(struct nfs4_session *session, 794 struct nfs4_sequence_args *args, 795 struct nfs4_sequence_res *res, 796 struct rpc_task *task) 797 { 798 struct nfs4_slot *slot; 799 struct nfs4_slot_table *tbl; 800 801 dprintk("--> %s\n", __func__); 802 /* slot already allocated? */ 803 if (res->sr_slot != NULL) 804 goto out_success; 805 806 tbl = &session->fc_slot_table; 807 808 task->tk_timeout = 0; 809 810 spin_lock(&tbl->slot_tbl_lock); 811 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) && 812 !args->sa_privileged) { 813 /* The state manager will wait until the slot table is empty */ 814 dprintk("%s session is draining\n", __func__); 815 goto out_sleep; 816 } 817 818 slot = nfs4_alloc_slot(tbl); 819 if (IS_ERR(slot)) { 820 /* If out of memory, try again in 1/4 second */ 821 if (slot == ERR_PTR(-ENOMEM)) 822 task->tk_timeout = HZ >> 2; 823 dprintk("<-- %s: no free slots\n", __func__); 824 goto out_sleep; 825 } 826 spin_unlock(&tbl->slot_tbl_lock); 827 828 args->sa_slot = slot; 829 830 dprintk("<-- %s slotid=%u seqid=%u\n", __func__, 831 slot->slot_nr, slot->seq_nr); 832 833 res->sr_slot = slot; 834 res->sr_timestamp = jiffies; 835 res->sr_status_flags = 0; 836 /* 837 * sr_status is only set in decode_sequence, and so will remain 838 * set to 1 if an rpc level failure occurs. 839 */ 840 res->sr_status = 1; 841 trace_nfs4_setup_sequence(session, args); 842 out_success: 843 rpc_call_start(task); 844 return 0; 845 out_sleep: 846 /* Privileged tasks are queued with top priority */ 847 if (args->sa_privileged) 848 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 849 NULL, RPC_PRIORITY_PRIVILEGED); 850 else 851 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 852 spin_unlock(&tbl->slot_tbl_lock); 853 return -EAGAIN; 854 } 855 EXPORT_SYMBOL_GPL(nfs41_setup_sequence); 856 857 static int nfs4_setup_sequence(const struct nfs_server *server, 858 struct nfs4_sequence_args *args, 859 struct nfs4_sequence_res *res, 860 struct rpc_task *task) 861 { 862 struct nfs4_session *session = nfs4_get_session(server); 863 int ret = 0; 864 865 if (!session) 866 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 867 args, res, task); 868 869 dprintk("--> %s clp %p session %p sr_slot %u\n", 870 __func__, session->clp, session, res->sr_slot ? 871 res->sr_slot->slot_nr : NFS4_NO_SLOT); 872 873 ret = nfs41_setup_sequence(session, args, res, task); 874 875 dprintk("<-- %s status=%d\n", __func__, ret); 876 return ret; 877 } 878 879 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 880 { 881 struct nfs4_call_sync_data *data = calldata; 882 struct nfs4_session *session = nfs4_get_session(data->seq_server); 883 884 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 885 886 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task); 887 } 888 889 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 890 { 891 struct nfs4_call_sync_data *data = calldata; 892 893 nfs41_sequence_done(task, data->seq_res); 894 } 895 896 static const struct rpc_call_ops nfs41_call_sync_ops = { 897 .rpc_call_prepare = nfs41_call_sync_prepare, 898 .rpc_call_done = nfs41_call_sync_done, 899 }; 900 901 #else /* !CONFIG_NFS_V4_1 */ 902 903 static int nfs4_setup_sequence(const struct nfs_server *server, 904 struct nfs4_sequence_args *args, 905 struct nfs4_sequence_res *res, 906 struct rpc_task *task) 907 { 908 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 909 args, res, task); 910 } 911 912 int nfs4_sequence_done(struct rpc_task *task, 913 struct nfs4_sequence_res *res) 914 { 915 return nfs40_sequence_done(task, res); 916 } 917 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 918 919 #endif /* !CONFIG_NFS_V4_1 */ 920 921 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 922 { 923 struct nfs4_call_sync_data *data = calldata; 924 nfs4_setup_sequence(data->seq_server, 925 data->seq_args, data->seq_res, task); 926 } 927 928 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 929 { 930 struct nfs4_call_sync_data *data = calldata; 931 nfs4_sequence_done(task, data->seq_res); 932 } 933 934 static const struct rpc_call_ops nfs40_call_sync_ops = { 935 .rpc_call_prepare = nfs40_call_sync_prepare, 936 .rpc_call_done = nfs40_call_sync_done, 937 }; 938 939 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 940 struct nfs_server *server, 941 struct rpc_message *msg, 942 struct nfs4_sequence_args *args, 943 struct nfs4_sequence_res *res) 944 { 945 int ret; 946 struct rpc_task *task; 947 struct nfs_client *clp = server->nfs_client; 948 struct nfs4_call_sync_data data = { 949 .seq_server = server, 950 .seq_args = args, 951 .seq_res = res, 952 }; 953 struct rpc_task_setup task_setup = { 954 .rpc_client = clnt, 955 .rpc_message = msg, 956 .callback_ops = clp->cl_mvops->call_sync_ops, 957 .callback_data = &data 958 }; 959 960 task = rpc_run_task(&task_setup); 961 if (IS_ERR(task)) 962 ret = PTR_ERR(task); 963 else { 964 ret = task->tk_status; 965 rpc_put_task(task); 966 } 967 return ret; 968 } 969 970 int nfs4_call_sync(struct rpc_clnt *clnt, 971 struct nfs_server *server, 972 struct rpc_message *msg, 973 struct nfs4_sequence_args *args, 974 struct nfs4_sequence_res *res, 975 int cache_reply) 976 { 977 nfs4_init_sequence(args, res, cache_reply); 978 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 979 } 980 981 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 982 { 983 struct nfs_inode *nfsi = NFS_I(dir); 984 985 spin_lock(&dir->i_lock); 986 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 987 if (!cinfo->atomic || cinfo->before != dir->i_version) 988 nfs_force_lookup_revalidate(dir); 989 dir->i_version = cinfo->after; 990 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 991 nfs_fscache_invalidate(dir); 992 spin_unlock(&dir->i_lock); 993 } 994 995 struct nfs4_opendata { 996 struct kref kref; 997 struct nfs_openargs o_arg; 998 struct nfs_openres o_res; 999 struct nfs_open_confirmargs c_arg; 1000 struct nfs_open_confirmres c_res; 1001 struct nfs4_string owner_name; 1002 struct nfs4_string group_name; 1003 struct nfs4_label *a_label; 1004 struct nfs_fattr f_attr; 1005 struct nfs4_label *f_label; 1006 struct dentry *dir; 1007 struct dentry *dentry; 1008 struct nfs4_state_owner *owner; 1009 struct nfs4_state *state; 1010 struct iattr attrs; 1011 unsigned long timestamp; 1012 unsigned int rpc_done : 1; 1013 unsigned int file_created : 1; 1014 unsigned int is_recover : 1; 1015 int rpc_status; 1016 int cancelled; 1017 }; 1018 1019 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 1020 int err, struct nfs4_exception *exception) 1021 { 1022 if (err != -EINVAL) 1023 return false; 1024 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1025 return false; 1026 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 1027 exception->retry = 1; 1028 return true; 1029 } 1030 1031 static u32 1032 nfs4_map_atomic_open_share(struct nfs_server *server, 1033 fmode_t fmode, int openflags) 1034 { 1035 u32 res = 0; 1036 1037 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 1038 case FMODE_READ: 1039 res = NFS4_SHARE_ACCESS_READ; 1040 break; 1041 case FMODE_WRITE: 1042 res = NFS4_SHARE_ACCESS_WRITE; 1043 break; 1044 case FMODE_READ|FMODE_WRITE: 1045 res = NFS4_SHARE_ACCESS_BOTH; 1046 } 1047 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1048 goto out; 1049 /* Want no delegation if we're using O_DIRECT */ 1050 if (openflags & O_DIRECT) 1051 res |= NFS4_SHARE_WANT_NO_DELEG; 1052 out: 1053 return res; 1054 } 1055 1056 static enum open_claim_type4 1057 nfs4_map_atomic_open_claim(struct nfs_server *server, 1058 enum open_claim_type4 claim) 1059 { 1060 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 1061 return claim; 1062 switch (claim) { 1063 default: 1064 return claim; 1065 case NFS4_OPEN_CLAIM_FH: 1066 return NFS4_OPEN_CLAIM_NULL; 1067 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1068 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 1069 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1070 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 1071 } 1072 } 1073 1074 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 1075 { 1076 p->o_res.f_attr = &p->f_attr; 1077 p->o_res.f_label = p->f_label; 1078 p->o_res.seqid = p->o_arg.seqid; 1079 p->c_res.seqid = p->c_arg.seqid; 1080 p->o_res.server = p->o_arg.server; 1081 p->o_res.access_request = p->o_arg.access; 1082 nfs_fattr_init(&p->f_attr); 1083 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 1084 } 1085 1086 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1087 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1088 const struct iattr *attrs, 1089 struct nfs4_label *label, 1090 enum open_claim_type4 claim, 1091 gfp_t gfp_mask) 1092 { 1093 struct dentry *parent = dget_parent(dentry); 1094 struct inode *dir = d_inode(parent); 1095 struct nfs_server *server = NFS_SERVER(dir); 1096 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1097 struct nfs4_opendata *p; 1098 1099 p = kzalloc(sizeof(*p), gfp_mask); 1100 if (p == NULL) 1101 goto err; 1102 1103 p->f_label = nfs4_label_alloc(server, gfp_mask); 1104 if (IS_ERR(p->f_label)) 1105 goto err_free_p; 1106 1107 p->a_label = nfs4_label_alloc(server, gfp_mask); 1108 if (IS_ERR(p->a_label)) 1109 goto err_free_f; 1110 1111 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1112 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1113 if (IS_ERR(p->o_arg.seqid)) 1114 goto err_free_label; 1115 nfs_sb_active(dentry->d_sb); 1116 p->dentry = dget(dentry); 1117 p->dir = parent; 1118 p->owner = sp; 1119 atomic_inc(&sp->so_count); 1120 p->o_arg.open_flags = flags; 1121 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1122 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1123 fmode, flags); 1124 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS 1125 * will return permission denied for all bits until close */ 1126 if (!(flags & O_EXCL)) { 1127 /* ask server to check for all possible rights as results 1128 * are cached */ 1129 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1130 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE; 1131 } 1132 p->o_arg.clientid = server->nfs_client->cl_clientid; 1133 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1134 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1135 p->o_arg.name = &dentry->d_name; 1136 p->o_arg.server = server; 1137 p->o_arg.bitmask = nfs4_bitmask(server, label); 1138 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1139 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1140 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1141 switch (p->o_arg.claim) { 1142 case NFS4_OPEN_CLAIM_NULL: 1143 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1144 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1145 p->o_arg.fh = NFS_FH(dir); 1146 break; 1147 case NFS4_OPEN_CLAIM_PREVIOUS: 1148 case NFS4_OPEN_CLAIM_FH: 1149 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1150 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1151 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1152 } 1153 if (attrs != NULL && attrs->ia_valid != 0) { 1154 __u32 verf[2]; 1155 1156 p->o_arg.u.attrs = &p->attrs; 1157 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 1158 1159 verf[0] = jiffies; 1160 verf[1] = current->pid; 1161 memcpy(p->o_arg.u.verifier.data, verf, 1162 sizeof(p->o_arg.u.verifier.data)); 1163 } 1164 p->c_arg.fh = &p->o_res.fh; 1165 p->c_arg.stateid = &p->o_res.stateid; 1166 p->c_arg.seqid = p->o_arg.seqid; 1167 nfs4_init_opendata_res(p); 1168 kref_init(&p->kref); 1169 return p; 1170 1171 err_free_label: 1172 nfs4_label_free(p->a_label); 1173 err_free_f: 1174 nfs4_label_free(p->f_label); 1175 err_free_p: 1176 kfree(p); 1177 err: 1178 dput(parent); 1179 return NULL; 1180 } 1181 1182 static void nfs4_opendata_free(struct kref *kref) 1183 { 1184 struct nfs4_opendata *p = container_of(kref, 1185 struct nfs4_opendata, kref); 1186 struct super_block *sb = p->dentry->d_sb; 1187 1188 nfs_free_seqid(p->o_arg.seqid); 1189 if (p->state != NULL) 1190 nfs4_put_open_state(p->state); 1191 nfs4_put_state_owner(p->owner); 1192 1193 nfs4_label_free(p->a_label); 1194 nfs4_label_free(p->f_label); 1195 1196 dput(p->dir); 1197 dput(p->dentry); 1198 nfs_sb_deactive(sb); 1199 nfs_fattr_free_names(&p->f_attr); 1200 kfree(p->f_attr.mdsthreshold); 1201 kfree(p); 1202 } 1203 1204 static void nfs4_opendata_put(struct nfs4_opendata *p) 1205 { 1206 if (p != NULL) 1207 kref_put(&p->kref, nfs4_opendata_free); 1208 } 1209 1210 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) 1211 { 1212 int ret; 1213 1214 ret = rpc_wait_for_completion_task(task); 1215 return ret; 1216 } 1217 1218 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, 1219 fmode_t fmode) 1220 { 1221 switch(fmode & (FMODE_READ|FMODE_WRITE)) { 1222 case FMODE_READ|FMODE_WRITE: 1223 return state->n_rdwr != 0; 1224 case FMODE_WRITE: 1225 return state->n_wronly != 0; 1226 case FMODE_READ: 1227 return state->n_rdonly != 0; 1228 } 1229 WARN_ON_ONCE(1); 1230 return false; 1231 } 1232 1233 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 1234 { 1235 int ret = 0; 1236 1237 if (open_mode & (O_EXCL|O_TRUNC)) 1238 goto out; 1239 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1240 case FMODE_READ: 1241 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1242 && state->n_rdonly != 0; 1243 break; 1244 case FMODE_WRITE: 1245 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1246 && state->n_wronly != 0; 1247 break; 1248 case FMODE_READ|FMODE_WRITE: 1249 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1250 && state->n_rdwr != 0; 1251 } 1252 out: 1253 return ret; 1254 } 1255 1256 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1257 enum open_claim_type4 claim) 1258 { 1259 if (delegation == NULL) 1260 return 0; 1261 if ((delegation->type & fmode) != fmode) 1262 return 0; 1263 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 1264 return 0; 1265 switch (claim) { 1266 case NFS4_OPEN_CLAIM_NULL: 1267 case NFS4_OPEN_CLAIM_FH: 1268 break; 1269 case NFS4_OPEN_CLAIM_PREVIOUS: 1270 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1271 break; 1272 default: 1273 return 0; 1274 } 1275 nfs_mark_delegation_referenced(delegation); 1276 return 1; 1277 } 1278 1279 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1280 { 1281 switch (fmode) { 1282 case FMODE_WRITE: 1283 state->n_wronly++; 1284 break; 1285 case FMODE_READ: 1286 state->n_rdonly++; 1287 break; 1288 case FMODE_READ|FMODE_WRITE: 1289 state->n_rdwr++; 1290 } 1291 nfs4_state_set_mode_locked(state, state->state | fmode); 1292 } 1293 1294 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1295 { 1296 struct nfs_client *clp = state->owner->so_server->nfs_client; 1297 bool need_recover = false; 1298 1299 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1300 need_recover = true; 1301 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1302 need_recover = true; 1303 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1304 need_recover = true; 1305 if (need_recover) 1306 nfs4_state_mark_reclaim_nograce(clp, state); 1307 } 1308 1309 static bool nfs_need_update_open_stateid(struct nfs4_state *state, 1310 nfs4_stateid *stateid) 1311 { 1312 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0) 1313 return true; 1314 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1315 nfs_test_and_clear_all_open_stateid(state); 1316 return true; 1317 } 1318 if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) 1319 return true; 1320 return false; 1321 } 1322 1323 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1324 { 1325 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1326 return; 1327 if (state->n_wronly) 1328 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1329 if (state->n_rdonly) 1330 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1331 if (state->n_rdwr) 1332 set_bit(NFS_O_RDWR_STATE, &state->flags); 1333 set_bit(NFS_OPEN_STATE, &state->flags); 1334 } 1335 1336 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1337 nfs4_stateid *arg_stateid, 1338 nfs4_stateid *stateid, fmode_t fmode) 1339 { 1340 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1341 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1342 case FMODE_WRITE: 1343 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1344 break; 1345 case FMODE_READ: 1346 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1347 break; 1348 case 0: 1349 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1350 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1351 clear_bit(NFS_OPEN_STATE, &state->flags); 1352 } 1353 if (stateid == NULL) 1354 return; 1355 /* Handle races with OPEN */ 1356 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || 1357 (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1358 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) { 1359 nfs_resync_open_stateid_locked(state); 1360 return; 1361 } 1362 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1363 nfs4_stateid_copy(&state->stateid, stateid); 1364 nfs4_stateid_copy(&state->open_stateid, stateid); 1365 } 1366 1367 static void nfs_clear_open_stateid(struct nfs4_state *state, 1368 nfs4_stateid *arg_stateid, 1369 nfs4_stateid *stateid, fmode_t fmode) 1370 { 1371 write_seqlock(&state->seqlock); 1372 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); 1373 write_sequnlock(&state->seqlock); 1374 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1375 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1376 } 1377 1378 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 1379 { 1380 switch (fmode) { 1381 case FMODE_READ: 1382 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1383 break; 1384 case FMODE_WRITE: 1385 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1386 break; 1387 case FMODE_READ|FMODE_WRITE: 1388 set_bit(NFS_O_RDWR_STATE, &state->flags); 1389 } 1390 if (!nfs_need_update_open_stateid(state, stateid)) 1391 return; 1392 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1393 nfs4_stateid_copy(&state->stateid, stateid); 1394 nfs4_stateid_copy(&state->open_stateid, stateid); 1395 } 1396 1397 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) 1398 { 1399 /* 1400 * Protect the call to nfs4_state_set_mode_locked and 1401 * serialise the stateid update 1402 */ 1403 spin_lock(&state->owner->so_lock); 1404 write_seqlock(&state->seqlock); 1405 if (deleg_stateid != NULL) { 1406 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1407 set_bit(NFS_DELEGATED_STATE, &state->flags); 1408 } 1409 if (open_stateid != NULL) 1410 nfs_set_open_stateid_locked(state, open_stateid, fmode); 1411 write_sequnlock(&state->seqlock); 1412 update_open_stateflags(state, fmode); 1413 spin_unlock(&state->owner->so_lock); 1414 } 1415 1416 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) 1417 { 1418 struct nfs_inode *nfsi = NFS_I(state->inode); 1419 struct nfs_delegation *deleg_cur; 1420 int ret = 0; 1421 1422 fmode &= (FMODE_READ|FMODE_WRITE); 1423 1424 rcu_read_lock(); 1425 deleg_cur = rcu_dereference(nfsi->delegation); 1426 if (deleg_cur == NULL) 1427 goto no_delegation; 1428 1429 spin_lock(&deleg_cur->lock); 1430 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1431 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1432 (deleg_cur->type & fmode) != fmode) 1433 goto no_delegation_unlock; 1434 1435 if (delegation == NULL) 1436 delegation = &deleg_cur->stateid; 1437 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) 1438 goto no_delegation_unlock; 1439 1440 nfs_mark_delegation_referenced(deleg_cur); 1441 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); 1442 ret = 1; 1443 no_delegation_unlock: 1444 spin_unlock(&deleg_cur->lock); 1445 no_delegation: 1446 rcu_read_unlock(); 1447 1448 if (!ret && open_stateid != NULL) { 1449 __update_open_stateid(state, open_stateid, NULL, fmode); 1450 ret = 1; 1451 } 1452 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1453 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1454 1455 return ret; 1456 } 1457 1458 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1459 const nfs4_stateid *stateid) 1460 { 1461 struct nfs4_state *state = lsp->ls_state; 1462 bool ret = false; 1463 1464 spin_lock(&state->state_lock); 1465 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1466 goto out_noupdate; 1467 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1468 goto out_noupdate; 1469 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1470 ret = true; 1471 out_noupdate: 1472 spin_unlock(&state->state_lock); 1473 return ret; 1474 } 1475 1476 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1477 { 1478 struct nfs_delegation *delegation; 1479 1480 rcu_read_lock(); 1481 delegation = rcu_dereference(NFS_I(inode)->delegation); 1482 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1483 rcu_read_unlock(); 1484 return; 1485 } 1486 rcu_read_unlock(); 1487 nfs4_inode_return_delegation(inode); 1488 } 1489 1490 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1491 { 1492 struct nfs4_state *state = opendata->state; 1493 struct nfs_inode *nfsi = NFS_I(state->inode); 1494 struct nfs_delegation *delegation; 1495 int open_mode = opendata->o_arg.open_flags; 1496 fmode_t fmode = opendata->o_arg.fmode; 1497 enum open_claim_type4 claim = opendata->o_arg.claim; 1498 nfs4_stateid stateid; 1499 int ret = -EAGAIN; 1500 1501 for (;;) { 1502 spin_lock(&state->owner->so_lock); 1503 if (can_open_cached(state, fmode, open_mode)) { 1504 update_open_stateflags(state, fmode); 1505 spin_unlock(&state->owner->so_lock); 1506 goto out_return_state; 1507 } 1508 spin_unlock(&state->owner->so_lock); 1509 rcu_read_lock(); 1510 delegation = rcu_dereference(nfsi->delegation); 1511 if (!can_open_delegated(delegation, fmode, claim)) { 1512 rcu_read_unlock(); 1513 break; 1514 } 1515 /* Save the delegation */ 1516 nfs4_stateid_copy(&stateid, &delegation->stateid); 1517 rcu_read_unlock(); 1518 nfs_release_seqid(opendata->o_arg.seqid); 1519 if (!opendata->is_recover) { 1520 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1521 if (ret != 0) 1522 goto out; 1523 } 1524 ret = -EAGAIN; 1525 1526 /* Try to update the stateid using the delegation */ 1527 if (update_open_stateid(state, NULL, &stateid, fmode)) 1528 goto out_return_state; 1529 } 1530 out: 1531 return ERR_PTR(ret); 1532 out_return_state: 1533 atomic_inc(&state->count); 1534 return state; 1535 } 1536 1537 static void 1538 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) 1539 { 1540 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client; 1541 struct nfs_delegation *delegation; 1542 int delegation_flags = 0; 1543 1544 rcu_read_lock(); 1545 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1546 if (delegation) 1547 delegation_flags = delegation->flags; 1548 rcu_read_unlock(); 1549 switch (data->o_arg.claim) { 1550 default: 1551 break; 1552 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1553 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1554 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1555 "returning a delegation for " 1556 "OPEN(CLAIM_DELEGATE_CUR)\n", 1557 clp->cl_hostname); 1558 return; 1559 } 1560 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1561 nfs_inode_set_delegation(state->inode, 1562 data->owner->so_cred, 1563 &data->o_res); 1564 else 1565 nfs_inode_reclaim_delegation(state->inode, 1566 data->owner->so_cred, 1567 &data->o_res); 1568 } 1569 1570 /* 1571 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 1572 * and update the nfs4_state. 1573 */ 1574 static struct nfs4_state * 1575 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 1576 { 1577 struct inode *inode = data->state->inode; 1578 struct nfs4_state *state = data->state; 1579 int ret; 1580 1581 if (!data->rpc_done) { 1582 if (data->rpc_status) { 1583 ret = data->rpc_status; 1584 goto err; 1585 } 1586 /* cached opens have already been processed */ 1587 goto update; 1588 } 1589 1590 ret = nfs_refresh_inode(inode, &data->f_attr); 1591 if (ret) 1592 goto err; 1593 1594 if (data->o_res.delegation_type != 0) 1595 nfs4_opendata_check_deleg(data, state); 1596 update: 1597 update_open_stateid(state, &data->o_res.stateid, NULL, 1598 data->o_arg.fmode); 1599 atomic_inc(&state->count); 1600 1601 return state; 1602 err: 1603 return ERR_PTR(ret); 1604 1605 } 1606 1607 static struct nfs4_state * 1608 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1609 { 1610 struct inode *inode; 1611 struct nfs4_state *state = NULL; 1612 int ret; 1613 1614 if (!data->rpc_done) { 1615 state = nfs4_try_open_cached(data); 1616 trace_nfs4_cached_open(data->state); 1617 goto out; 1618 } 1619 1620 ret = -EAGAIN; 1621 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1622 goto err; 1623 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label); 1624 ret = PTR_ERR(inode); 1625 if (IS_ERR(inode)) 1626 goto err; 1627 ret = -ENOMEM; 1628 state = nfs4_get_open_state(inode, data->owner); 1629 if (state == NULL) 1630 goto err_put_inode; 1631 if (data->o_res.delegation_type != 0) 1632 nfs4_opendata_check_deleg(data, state); 1633 update_open_stateid(state, &data->o_res.stateid, NULL, 1634 data->o_arg.fmode); 1635 iput(inode); 1636 out: 1637 nfs_release_seqid(data->o_arg.seqid); 1638 return state; 1639 err_put_inode: 1640 iput(inode); 1641 err: 1642 return ERR_PTR(ret); 1643 } 1644 1645 static struct nfs4_state * 1646 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1647 { 1648 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 1649 return _nfs4_opendata_reclaim_to_nfs4_state(data); 1650 return _nfs4_opendata_to_nfs4_state(data); 1651 } 1652 1653 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) 1654 { 1655 struct nfs_inode *nfsi = NFS_I(state->inode); 1656 struct nfs_open_context *ctx; 1657 1658 spin_lock(&state->inode->i_lock); 1659 list_for_each_entry(ctx, &nfsi->open_files, list) { 1660 if (ctx->state != state) 1661 continue; 1662 get_nfs_open_context(ctx); 1663 spin_unlock(&state->inode->i_lock); 1664 return ctx; 1665 } 1666 spin_unlock(&state->inode->i_lock); 1667 return ERR_PTR(-ENOENT); 1668 } 1669 1670 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 1671 struct nfs4_state *state, enum open_claim_type4 claim) 1672 { 1673 struct nfs4_opendata *opendata; 1674 1675 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 1676 NULL, NULL, claim, GFP_NOFS); 1677 if (opendata == NULL) 1678 return ERR_PTR(-ENOMEM); 1679 opendata->state = state; 1680 atomic_inc(&state->count); 1681 return opendata; 1682 } 1683 1684 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, 1685 fmode_t fmode) 1686 { 1687 struct nfs4_state *newstate; 1688 int ret; 1689 1690 if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) 1691 return 0; 1692 opendata->o_arg.open_flags = 0; 1693 opendata->o_arg.fmode = fmode; 1694 opendata->o_arg.share_access = nfs4_map_atomic_open_share( 1695 NFS_SB(opendata->dentry->d_sb), 1696 fmode, 0); 1697 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 1698 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 1699 nfs4_init_opendata_res(opendata); 1700 ret = _nfs4_recover_proc_open(opendata); 1701 if (ret != 0) 1702 return ret; 1703 newstate = nfs4_opendata_to_nfs4_state(opendata); 1704 if (IS_ERR(newstate)) 1705 return PTR_ERR(newstate); 1706 if (newstate != opendata->state) 1707 ret = -ESTALE; 1708 nfs4_close_state(newstate, fmode); 1709 return ret; 1710 } 1711 1712 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1713 { 1714 int ret; 1715 1716 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ 1717 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1718 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1719 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1720 /* memory barrier prior to reading state->n_* */ 1721 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1722 clear_bit(NFS_OPEN_STATE, &state->flags); 1723 smp_rmb(); 1724 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 1725 if (ret != 0) 1726 return ret; 1727 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); 1728 if (ret != 0) 1729 return ret; 1730 ret = nfs4_open_recover_helper(opendata, FMODE_READ); 1731 if (ret != 0) 1732 return ret; 1733 /* 1734 * We may have performed cached opens for all three recoveries. 1735 * Check if we need to update the current stateid. 1736 */ 1737 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 1738 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 1739 write_seqlock(&state->seqlock); 1740 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1741 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1742 write_sequnlock(&state->seqlock); 1743 } 1744 return 0; 1745 } 1746 1747 /* 1748 * OPEN_RECLAIM: 1749 * reclaim state on the server after a reboot. 1750 */ 1751 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1752 { 1753 struct nfs_delegation *delegation; 1754 struct nfs4_opendata *opendata; 1755 fmode_t delegation_type = 0; 1756 int status; 1757 1758 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1759 NFS4_OPEN_CLAIM_PREVIOUS); 1760 if (IS_ERR(opendata)) 1761 return PTR_ERR(opendata); 1762 rcu_read_lock(); 1763 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1764 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) 1765 delegation_type = delegation->type; 1766 rcu_read_unlock(); 1767 opendata->o_arg.u.delegation_type = delegation_type; 1768 status = nfs4_open_recover(opendata, state); 1769 nfs4_opendata_put(opendata); 1770 return status; 1771 } 1772 1773 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1774 { 1775 struct nfs_server *server = NFS_SERVER(state->inode); 1776 struct nfs4_exception exception = { }; 1777 int err; 1778 do { 1779 err = _nfs4_do_open_reclaim(ctx, state); 1780 trace_nfs4_open_reclaim(ctx, 0, err); 1781 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 1782 continue; 1783 if (err != -NFS4ERR_DELAY) 1784 break; 1785 nfs4_handle_exception(server, err, &exception); 1786 } while (exception.retry); 1787 return err; 1788 } 1789 1790 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 1791 { 1792 struct nfs_open_context *ctx; 1793 int ret; 1794 1795 ctx = nfs4_state_find_open_context(state); 1796 if (IS_ERR(ctx)) 1797 return -EAGAIN; 1798 ret = nfs4_do_open_reclaim(ctx, state); 1799 put_nfs_open_context(ctx); 1800 return ret; 1801 } 1802 1803 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err) 1804 { 1805 switch (err) { 1806 default: 1807 printk(KERN_ERR "NFS: %s: unhandled error " 1808 "%d.\n", __func__, err); 1809 case 0: 1810 case -ENOENT: 1811 case -EAGAIN: 1812 case -ESTALE: 1813 break; 1814 case -NFS4ERR_BADSESSION: 1815 case -NFS4ERR_BADSLOT: 1816 case -NFS4ERR_BAD_HIGH_SLOT: 1817 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1818 case -NFS4ERR_DEADSESSION: 1819 set_bit(NFS_DELEGATED_STATE, &state->flags); 1820 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 1821 return -EAGAIN; 1822 case -NFS4ERR_STALE_CLIENTID: 1823 case -NFS4ERR_STALE_STATEID: 1824 set_bit(NFS_DELEGATED_STATE, &state->flags); 1825 case -NFS4ERR_EXPIRED: 1826 /* Don't recall a delegation if it was lost */ 1827 nfs4_schedule_lease_recovery(server->nfs_client); 1828 return -EAGAIN; 1829 case -NFS4ERR_MOVED: 1830 nfs4_schedule_migration_recovery(server); 1831 return -EAGAIN; 1832 case -NFS4ERR_LEASE_MOVED: 1833 nfs4_schedule_lease_moved_recovery(server->nfs_client); 1834 return -EAGAIN; 1835 case -NFS4ERR_DELEG_REVOKED: 1836 case -NFS4ERR_ADMIN_REVOKED: 1837 case -NFS4ERR_BAD_STATEID: 1838 case -NFS4ERR_OPENMODE: 1839 nfs_inode_find_state_and_recover(state->inode, 1840 stateid); 1841 nfs4_schedule_stateid_recovery(server, state); 1842 return -EAGAIN; 1843 case -NFS4ERR_DELAY: 1844 case -NFS4ERR_GRACE: 1845 set_bit(NFS_DELEGATED_STATE, &state->flags); 1846 ssleep(1); 1847 return -EAGAIN; 1848 case -ENOMEM: 1849 case -NFS4ERR_DENIED: 1850 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1851 return 0; 1852 } 1853 return err; 1854 } 1855 1856 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 1857 struct nfs4_state *state, const nfs4_stateid *stateid, 1858 fmode_t type) 1859 { 1860 struct nfs_server *server = NFS_SERVER(state->inode); 1861 struct nfs4_opendata *opendata; 1862 int err = 0; 1863 1864 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1865 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 1866 if (IS_ERR(opendata)) 1867 return PTR_ERR(opendata); 1868 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1869 write_seqlock(&state->seqlock); 1870 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1871 write_sequnlock(&state->seqlock); 1872 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1873 switch (type & (FMODE_READ|FMODE_WRITE)) { 1874 case FMODE_READ|FMODE_WRITE: 1875 case FMODE_WRITE: 1876 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 1877 if (err) 1878 break; 1879 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 1880 if (err) 1881 break; 1882 case FMODE_READ: 1883 err = nfs4_open_recover_helper(opendata, FMODE_READ); 1884 } 1885 nfs4_opendata_put(opendata); 1886 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 1887 } 1888 1889 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 1890 { 1891 struct nfs4_opendata *data = calldata; 1892 1893 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl, 1894 &data->c_arg.seq_args, &data->c_res.seq_res, task); 1895 } 1896 1897 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 1898 { 1899 struct nfs4_opendata *data = calldata; 1900 1901 nfs40_sequence_done(task, &data->c_res.seq_res); 1902 1903 data->rpc_status = task->tk_status; 1904 if (data->rpc_status == 0) { 1905 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 1906 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1907 renew_lease(data->o_res.server, data->timestamp); 1908 data->rpc_done = 1; 1909 } 1910 } 1911 1912 static void nfs4_open_confirm_release(void *calldata) 1913 { 1914 struct nfs4_opendata *data = calldata; 1915 struct nfs4_state *state = NULL; 1916 1917 /* If this request hasn't been cancelled, do nothing */ 1918 if (data->cancelled == 0) 1919 goto out_free; 1920 /* In case of error, no cleanup! */ 1921 if (!data->rpc_done) 1922 goto out_free; 1923 state = nfs4_opendata_to_nfs4_state(data); 1924 if (!IS_ERR(state)) 1925 nfs4_close_state(state, data->o_arg.fmode); 1926 out_free: 1927 nfs4_opendata_put(data); 1928 } 1929 1930 static const struct rpc_call_ops nfs4_open_confirm_ops = { 1931 .rpc_call_prepare = nfs4_open_confirm_prepare, 1932 .rpc_call_done = nfs4_open_confirm_done, 1933 .rpc_release = nfs4_open_confirm_release, 1934 }; 1935 1936 /* 1937 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 1938 */ 1939 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 1940 { 1941 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 1942 struct rpc_task *task; 1943 struct rpc_message msg = { 1944 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 1945 .rpc_argp = &data->c_arg, 1946 .rpc_resp = &data->c_res, 1947 .rpc_cred = data->owner->so_cred, 1948 }; 1949 struct rpc_task_setup task_setup_data = { 1950 .rpc_client = server->client, 1951 .rpc_message = &msg, 1952 .callback_ops = &nfs4_open_confirm_ops, 1953 .callback_data = data, 1954 .workqueue = nfsiod_workqueue, 1955 .flags = RPC_TASK_ASYNC, 1956 }; 1957 int status; 1958 1959 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1); 1960 kref_get(&data->kref); 1961 data->rpc_done = 0; 1962 data->rpc_status = 0; 1963 data->timestamp = jiffies; 1964 if (data->is_recover) 1965 nfs4_set_sequence_privileged(&data->c_arg.seq_args); 1966 task = rpc_run_task(&task_setup_data); 1967 if (IS_ERR(task)) 1968 return PTR_ERR(task); 1969 status = nfs4_wait_for_completion_rpc_task(task); 1970 if (status != 0) { 1971 data->cancelled = 1; 1972 smp_wmb(); 1973 } else 1974 status = data->rpc_status; 1975 rpc_put_task(task); 1976 return status; 1977 } 1978 1979 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 1980 { 1981 struct nfs4_opendata *data = calldata; 1982 struct nfs4_state_owner *sp = data->owner; 1983 struct nfs_client *clp = sp->so_server->nfs_client; 1984 enum open_claim_type4 claim = data->o_arg.claim; 1985 1986 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 1987 goto out_wait; 1988 /* 1989 * Check if we still need to send an OPEN call, or if we can use 1990 * a delegation instead. 1991 */ 1992 if (data->state != NULL) { 1993 struct nfs_delegation *delegation; 1994 1995 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) 1996 goto out_no_action; 1997 rcu_read_lock(); 1998 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 1999 if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 2000 goto unlock_no_action; 2001 rcu_read_unlock(); 2002 } 2003 /* Update client id. */ 2004 data->o_arg.clientid = clp->cl_clientid; 2005 switch (claim) { 2006 default: 2007 break; 2008 case NFS4_OPEN_CLAIM_PREVIOUS: 2009 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2010 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 2011 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2012 case NFS4_OPEN_CLAIM_FH: 2013 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2014 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 2015 } 2016 data->timestamp = jiffies; 2017 if (nfs4_setup_sequence(data->o_arg.server, 2018 &data->o_arg.seq_args, 2019 &data->o_res.seq_res, 2020 task) != 0) 2021 nfs_release_seqid(data->o_arg.seqid); 2022 2023 /* Set the create mode (note dependency on the session type) */ 2024 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 2025 if (data->o_arg.open_flags & O_EXCL) { 2026 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 2027 if (nfs4_has_persistent_session(clp)) 2028 data->o_arg.createmode = NFS4_CREATE_GUARDED; 2029 else if (clp->cl_mvops->minor_version > 0) 2030 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 2031 } 2032 return; 2033 unlock_no_action: 2034 trace_nfs4_cached_open(data->state); 2035 rcu_read_unlock(); 2036 out_no_action: 2037 task->tk_action = NULL; 2038 out_wait: 2039 nfs4_sequence_done(task, &data->o_res.seq_res); 2040 } 2041 2042 static void nfs4_open_done(struct rpc_task *task, void *calldata) 2043 { 2044 struct nfs4_opendata *data = calldata; 2045 2046 data->rpc_status = task->tk_status; 2047 2048 if (!nfs4_sequence_done(task, &data->o_res.seq_res)) 2049 return; 2050 2051 if (task->tk_status == 0) { 2052 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 2053 switch (data->o_res.f_attr->mode & S_IFMT) { 2054 case S_IFREG: 2055 break; 2056 case S_IFLNK: 2057 data->rpc_status = -ELOOP; 2058 break; 2059 case S_IFDIR: 2060 data->rpc_status = -EISDIR; 2061 break; 2062 default: 2063 data->rpc_status = -ENOTDIR; 2064 } 2065 } 2066 renew_lease(data->o_res.server, data->timestamp); 2067 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 2068 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2069 } 2070 data->rpc_done = 1; 2071 } 2072 2073 static void nfs4_open_release(void *calldata) 2074 { 2075 struct nfs4_opendata *data = calldata; 2076 struct nfs4_state *state = NULL; 2077 2078 /* If this request hasn't been cancelled, do nothing */ 2079 if (data->cancelled == 0) 2080 goto out_free; 2081 /* In case of error, no cleanup! */ 2082 if (data->rpc_status != 0 || !data->rpc_done) 2083 goto out_free; 2084 /* In case we need an open_confirm, no cleanup! */ 2085 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 2086 goto out_free; 2087 state = nfs4_opendata_to_nfs4_state(data); 2088 if (!IS_ERR(state)) 2089 nfs4_close_state(state, data->o_arg.fmode); 2090 out_free: 2091 nfs4_opendata_put(data); 2092 } 2093 2094 static const struct rpc_call_ops nfs4_open_ops = { 2095 .rpc_call_prepare = nfs4_open_prepare, 2096 .rpc_call_done = nfs4_open_done, 2097 .rpc_release = nfs4_open_release, 2098 }; 2099 2100 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) 2101 { 2102 struct inode *dir = d_inode(data->dir); 2103 struct nfs_server *server = NFS_SERVER(dir); 2104 struct nfs_openargs *o_arg = &data->o_arg; 2105 struct nfs_openres *o_res = &data->o_res; 2106 struct rpc_task *task; 2107 struct rpc_message msg = { 2108 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 2109 .rpc_argp = o_arg, 2110 .rpc_resp = o_res, 2111 .rpc_cred = data->owner->so_cred, 2112 }; 2113 struct rpc_task_setup task_setup_data = { 2114 .rpc_client = server->client, 2115 .rpc_message = &msg, 2116 .callback_ops = &nfs4_open_ops, 2117 .callback_data = data, 2118 .workqueue = nfsiod_workqueue, 2119 .flags = RPC_TASK_ASYNC, 2120 }; 2121 int status; 2122 2123 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); 2124 kref_get(&data->kref); 2125 data->rpc_done = 0; 2126 data->rpc_status = 0; 2127 data->cancelled = 0; 2128 data->is_recover = 0; 2129 if (isrecover) { 2130 nfs4_set_sequence_privileged(&o_arg->seq_args); 2131 data->is_recover = 1; 2132 } 2133 task = rpc_run_task(&task_setup_data); 2134 if (IS_ERR(task)) 2135 return PTR_ERR(task); 2136 status = nfs4_wait_for_completion_rpc_task(task); 2137 if (status != 0) { 2138 data->cancelled = 1; 2139 smp_wmb(); 2140 } else 2141 status = data->rpc_status; 2142 rpc_put_task(task); 2143 2144 return status; 2145 } 2146 2147 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2148 { 2149 struct inode *dir = d_inode(data->dir); 2150 struct nfs_openres *o_res = &data->o_res; 2151 int status; 2152 2153 status = nfs4_run_open_task(data, 1); 2154 if (status != 0 || !data->rpc_done) 2155 return status; 2156 2157 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2158 2159 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2160 status = _nfs4_proc_open_confirm(data); 2161 if (status != 0) 2162 return status; 2163 } 2164 2165 return status; 2166 } 2167 2168 /* 2169 * Additional permission checks in order to distinguish between an 2170 * open for read, and an open for execute. This works around the 2171 * fact that NFSv4 OPEN treats read and execute permissions as being 2172 * the same. 2173 * Note that in the non-execute case, we want to turn off permission 2174 * checking if we just created a new file (POSIX open() semantics). 2175 */ 2176 static int nfs4_opendata_access(struct rpc_cred *cred, 2177 struct nfs4_opendata *opendata, 2178 struct nfs4_state *state, fmode_t fmode, 2179 int openflags) 2180 { 2181 struct nfs_access_entry cache; 2182 u32 mask; 2183 2184 /* access call failed or for some reason the server doesn't 2185 * support any access modes -- defer access call until later */ 2186 if (opendata->o_res.access_supported == 0) 2187 return 0; 2188 2189 mask = 0; 2190 /* 2191 * Use openflags to check for exec, because fmode won't 2192 * always have FMODE_EXEC set when file open for exec. 2193 */ 2194 if (openflags & __FMODE_EXEC) { 2195 /* ONLY check for exec rights */ 2196 mask = MAY_EXEC; 2197 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2198 mask = MAY_READ; 2199 2200 cache.cred = cred; 2201 cache.jiffies = jiffies; 2202 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2203 nfs_access_add_cache(state->inode, &cache); 2204 2205 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) 2206 return 0; 2207 2208 /* even though OPEN succeeded, access is denied. Close the file */ 2209 nfs4_close_state(state, fmode); 2210 return -EACCES; 2211 } 2212 2213 /* 2214 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2215 */ 2216 static int _nfs4_proc_open(struct nfs4_opendata *data) 2217 { 2218 struct inode *dir = d_inode(data->dir); 2219 struct nfs_server *server = NFS_SERVER(dir); 2220 struct nfs_openargs *o_arg = &data->o_arg; 2221 struct nfs_openres *o_res = &data->o_res; 2222 int status; 2223 2224 status = nfs4_run_open_task(data, 0); 2225 if (!data->rpc_done) 2226 return status; 2227 if (status != 0) { 2228 if (status == -NFS4ERR_BADNAME && 2229 !(o_arg->open_flags & O_CREAT)) 2230 return -ENOENT; 2231 return status; 2232 } 2233 2234 nfs_fattr_map_and_free_names(server, &data->f_attr); 2235 2236 if (o_arg->open_flags & O_CREAT) { 2237 update_changeattr(dir, &o_res->cinfo); 2238 if (o_arg->open_flags & O_EXCL) 2239 data->file_created = 1; 2240 else if (o_res->cinfo.before != o_res->cinfo.after) 2241 data->file_created = 1; 2242 } 2243 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2244 server->caps &= ~NFS_CAP_POSIX_LOCK; 2245 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2246 status = _nfs4_proc_open_confirm(data); 2247 if (status != 0) 2248 return status; 2249 } 2250 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 2251 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label); 2252 return 0; 2253 } 2254 2255 static int nfs4_recover_expired_lease(struct nfs_server *server) 2256 { 2257 return nfs4_client_recover_expired_lease(server->nfs_client); 2258 } 2259 2260 /* 2261 * OPEN_EXPIRED: 2262 * reclaim state on the server after a network partition. 2263 * Assumes caller holds the appropriate lock 2264 */ 2265 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2266 { 2267 struct nfs4_opendata *opendata; 2268 int ret; 2269 2270 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2271 NFS4_OPEN_CLAIM_FH); 2272 if (IS_ERR(opendata)) 2273 return PTR_ERR(opendata); 2274 ret = nfs4_open_recover(opendata, state); 2275 if (ret == -ESTALE) 2276 d_drop(ctx->dentry); 2277 nfs4_opendata_put(opendata); 2278 return ret; 2279 } 2280 2281 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2282 { 2283 struct nfs_server *server = NFS_SERVER(state->inode); 2284 struct nfs4_exception exception = { }; 2285 int err; 2286 2287 do { 2288 err = _nfs4_open_expired(ctx, state); 2289 trace_nfs4_open_expired(ctx, 0, err); 2290 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2291 continue; 2292 switch (err) { 2293 default: 2294 goto out; 2295 case -NFS4ERR_GRACE: 2296 case -NFS4ERR_DELAY: 2297 nfs4_handle_exception(server, err, &exception); 2298 err = 0; 2299 } 2300 } while (exception.retry); 2301 out: 2302 return err; 2303 } 2304 2305 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2306 { 2307 struct nfs_open_context *ctx; 2308 int ret; 2309 2310 ctx = nfs4_state_find_open_context(state); 2311 if (IS_ERR(ctx)) 2312 return -EAGAIN; 2313 ret = nfs4_do_open_expired(ctx, state); 2314 put_nfs_open_context(ctx); 2315 return ret; 2316 } 2317 2318 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state) 2319 { 2320 nfs_remove_bad_delegation(state->inode); 2321 write_seqlock(&state->seqlock); 2322 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2323 write_sequnlock(&state->seqlock); 2324 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2325 } 2326 2327 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2328 { 2329 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2330 nfs_finish_clear_delegation_stateid(state); 2331 } 2332 2333 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2334 { 2335 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2336 nfs40_clear_delegation_stateid(state); 2337 return nfs4_open_expired(sp, state); 2338 } 2339 2340 #if defined(CONFIG_NFS_V4_1) 2341 static void nfs41_check_delegation_stateid(struct nfs4_state *state) 2342 { 2343 struct nfs_server *server = NFS_SERVER(state->inode); 2344 nfs4_stateid stateid; 2345 struct nfs_delegation *delegation; 2346 struct rpc_cred *cred; 2347 int status; 2348 2349 /* Get the delegation credential for use by test/free_stateid */ 2350 rcu_read_lock(); 2351 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2352 if (delegation == NULL) { 2353 rcu_read_unlock(); 2354 return; 2355 } 2356 2357 nfs4_stateid_copy(&stateid, &delegation->stateid); 2358 cred = get_rpccred(delegation->cred); 2359 rcu_read_unlock(); 2360 status = nfs41_test_stateid(server, &stateid, cred); 2361 trace_nfs4_test_delegation_stateid(state, NULL, status); 2362 2363 if (status != NFS_OK) { 2364 /* Free the stateid unless the server explicitly 2365 * informs us the stateid is unrecognized. */ 2366 if (status != -NFS4ERR_BAD_STATEID) 2367 nfs41_free_stateid(server, &stateid, cred); 2368 nfs_finish_clear_delegation_stateid(state); 2369 } 2370 2371 put_rpccred(cred); 2372 } 2373 2374 /** 2375 * nfs41_check_open_stateid - possibly free an open stateid 2376 * 2377 * @state: NFSv4 state for an inode 2378 * 2379 * Returns NFS_OK if recovery for this stateid is now finished. 2380 * Otherwise a negative NFS4ERR value is returned. 2381 */ 2382 static int nfs41_check_open_stateid(struct nfs4_state *state) 2383 { 2384 struct nfs_server *server = NFS_SERVER(state->inode); 2385 nfs4_stateid *stateid = &state->open_stateid; 2386 struct rpc_cred *cred = state->owner->so_cred; 2387 int status; 2388 2389 /* If a state reset has been done, test_stateid is unneeded */ 2390 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) && 2391 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) && 2392 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0)) 2393 return -NFS4ERR_BAD_STATEID; 2394 2395 status = nfs41_test_stateid(server, stateid, cred); 2396 trace_nfs4_test_open_stateid(state, NULL, status); 2397 if (status != NFS_OK) { 2398 /* Free the stateid unless the server explicitly 2399 * informs us the stateid is unrecognized. */ 2400 if (status != -NFS4ERR_BAD_STATEID) 2401 nfs41_free_stateid(server, stateid, cred); 2402 2403 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2404 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2405 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2406 clear_bit(NFS_OPEN_STATE, &state->flags); 2407 } 2408 return status; 2409 } 2410 2411 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2412 { 2413 int status; 2414 2415 nfs41_check_delegation_stateid(state); 2416 status = nfs41_check_open_stateid(state); 2417 if (status != NFS_OK) 2418 status = nfs4_open_expired(sp, state); 2419 return status; 2420 } 2421 #endif 2422 2423 /* 2424 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 2425 * fields corresponding to attributes that were used to store the verifier. 2426 * Make sure we clobber those fields in the later setattr call 2427 */ 2428 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 2429 struct iattr *sattr, struct nfs4_label **label) 2430 { 2431 const u32 *attrset = opendata->o_res.attrset; 2432 2433 if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 2434 !(sattr->ia_valid & ATTR_ATIME_SET)) 2435 sattr->ia_valid |= ATTR_ATIME; 2436 2437 if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 2438 !(sattr->ia_valid & ATTR_MTIME_SET)) 2439 sattr->ia_valid |= ATTR_MTIME; 2440 2441 /* Except MODE, it seems harmless of setting twice. */ 2442 if ((attrset[1] & FATTR4_WORD1_MODE)) 2443 sattr->ia_valid &= ~ATTR_MODE; 2444 2445 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) 2446 *label = NULL; 2447 } 2448 2449 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 2450 fmode_t fmode, 2451 int flags, 2452 struct nfs_open_context *ctx) 2453 { 2454 struct nfs4_state_owner *sp = opendata->owner; 2455 struct nfs_server *server = sp->so_server; 2456 struct dentry *dentry; 2457 struct nfs4_state *state; 2458 unsigned int seq; 2459 int ret; 2460 2461 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 2462 2463 ret = _nfs4_proc_open(opendata); 2464 if (ret != 0) 2465 goto out; 2466 2467 state = nfs4_opendata_to_nfs4_state(opendata); 2468 ret = PTR_ERR(state); 2469 if (IS_ERR(state)) 2470 goto out; 2471 if (server->caps & NFS_CAP_POSIX_LOCK) 2472 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 2473 2474 dentry = opendata->dentry; 2475 if (d_really_is_negative(dentry)) { 2476 struct dentry *alias; 2477 d_drop(dentry); 2478 alias = d_exact_alias(dentry, state->inode); 2479 if (!alias) 2480 alias = d_splice_alias(igrab(state->inode), dentry); 2481 /* d_splice_alias() can't fail here - it's a non-directory */ 2482 if (alias) { 2483 dput(ctx->dentry); 2484 ctx->dentry = dentry = alias; 2485 } 2486 nfs_set_verifier(dentry, 2487 nfs_save_change_attribute(d_inode(opendata->dir))); 2488 } 2489 2490 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags); 2491 if (ret != 0) 2492 goto out; 2493 2494 ctx->state = state; 2495 if (d_inode(dentry) == state->inode) { 2496 nfs_inode_attach_open_context(ctx); 2497 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) 2498 nfs4_schedule_stateid_recovery(server, state); 2499 } 2500 out: 2501 return ret; 2502 } 2503 2504 /* 2505 * Returns a referenced nfs4_state 2506 */ 2507 static int _nfs4_do_open(struct inode *dir, 2508 struct nfs_open_context *ctx, 2509 int flags, 2510 struct iattr *sattr, 2511 struct nfs4_label *label, 2512 int *opened) 2513 { 2514 struct nfs4_state_owner *sp; 2515 struct nfs4_state *state = NULL; 2516 struct nfs_server *server = NFS_SERVER(dir); 2517 struct nfs4_opendata *opendata; 2518 struct dentry *dentry = ctx->dentry; 2519 struct rpc_cred *cred = ctx->cred; 2520 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 2521 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 2522 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 2523 struct nfs4_label *olabel = NULL; 2524 int status; 2525 2526 /* Protect against reboot recovery conflicts */ 2527 status = -ENOMEM; 2528 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 2529 if (sp == NULL) { 2530 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 2531 goto out_err; 2532 } 2533 status = nfs4_recover_expired_lease(server); 2534 if (status != 0) 2535 goto err_put_state_owner; 2536 if (d_really_is_positive(dentry)) 2537 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 2538 status = -ENOMEM; 2539 if (d_really_is_positive(dentry)) 2540 claim = NFS4_OPEN_CLAIM_FH; 2541 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, 2542 label, claim, GFP_KERNEL); 2543 if (opendata == NULL) 2544 goto err_put_state_owner; 2545 2546 if (label) { 2547 olabel = nfs4_label_alloc(server, GFP_KERNEL); 2548 if (IS_ERR(olabel)) { 2549 status = PTR_ERR(olabel); 2550 goto err_opendata_put; 2551 } 2552 } 2553 2554 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 2555 if (!opendata->f_attr.mdsthreshold) { 2556 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 2557 if (!opendata->f_attr.mdsthreshold) 2558 goto err_free_label; 2559 } 2560 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 2561 } 2562 if (d_really_is_positive(dentry)) 2563 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 2564 2565 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx); 2566 if (status != 0) 2567 goto err_free_label; 2568 state = ctx->state; 2569 2570 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 2571 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 2572 nfs4_exclusive_attrset(opendata, sattr, &label); 2573 /* 2574 * send create attributes which was not set by open 2575 * with an extra setattr. 2576 */ 2577 if (sattr->ia_valid & NFS4_VALID_ATTRS) { 2578 nfs_fattr_init(opendata->o_res.f_attr); 2579 status = nfs4_do_setattr(state->inode, cred, 2580 opendata->o_res.f_attr, sattr, 2581 state, label, olabel); 2582 if (status == 0) { 2583 nfs_setattr_update_inode(state->inode, sattr, 2584 opendata->o_res.f_attr); 2585 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); 2586 } 2587 } 2588 } 2589 if (opened && opendata->file_created) 2590 *opened |= FILE_CREATED; 2591 2592 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 2593 *ctx_th = opendata->f_attr.mdsthreshold; 2594 opendata->f_attr.mdsthreshold = NULL; 2595 } 2596 2597 nfs4_label_free(olabel); 2598 2599 nfs4_opendata_put(opendata); 2600 nfs4_put_state_owner(sp); 2601 return 0; 2602 err_free_label: 2603 nfs4_label_free(olabel); 2604 err_opendata_put: 2605 nfs4_opendata_put(opendata); 2606 err_put_state_owner: 2607 nfs4_put_state_owner(sp); 2608 out_err: 2609 return status; 2610 } 2611 2612 2613 static struct nfs4_state *nfs4_do_open(struct inode *dir, 2614 struct nfs_open_context *ctx, 2615 int flags, 2616 struct iattr *sattr, 2617 struct nfs4_label *label, 2618 int *opened) 2619 { 2620 struct nfs_server *server = NFS_SERVER(dir); 2621 struct nfs4_exception exception = { }; 2622 struct nfs4_state *res; 2623 int status; 2624 2625 do { 2626 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened); 2627 res = ctx->state; 2628 trace_nfs4_open_file(ctx, flags, status); 2629 if (status == 0) 2630 break; 2631 /* NOTE: BAD_SEQID means the server and client disagree about the 2632 * book-keeping w.r.t. state-changing operations 2633 * (OPEN/CLOSE/LOCK/LOCKU...) 2634 * It is actually a sign of a bug on the client or on the server. 2635 * 2636 * If we receive a BAD_SEQID error in the particular case of 2637 * doing an OPEN, we assume that nfs_increment_open_seqid() will 2638 * have unhashed the old state_owner for us, and that we can 2639 * therefore safely retry using a new one. We should still warn 2640 * the user though... 2641 */ 2642 if (status == -NFS4ERR_BAD_SEQID) { 2643 pr_warn_ratelimited("NFS: v4 server %s " 2644 " returned a bad sequence-id error!\n", 2645 NFS_SERVER(dir)->nfs_client->cl_hostname); 2646 exception.retry = 1; 2647 continue; 2648 } 2649 /* 2650 * BAD_STATEID on OPEN means that the server cancelled our 2651 * state before it received the OPEN_CONFIRM. 2652 * Recover by retrying the request as per the discussion 2653 * on Page 181 of RFC3530. 2654 */ 2655 if (status == -NFS4ERR_BAD_STATEID) { 2656 exception.retry = 1; 2657 continue; 2658 } 2659 if (status == -EAGAIN) { 2660 /* We must have found a delegation */ 2661 exception.retry = 1; 2662 continue; 2663 } 2664 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 2665 continue; 2666 res = ERR_PTR(nfs4_handle_exception(server, 2667 status, &exception)); 2668 } while (exception.retry); 2669 return res; 2670 } 2671 2672 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2673 struct nfs_fattr *fattr, struct iattr *sattr, 2674 struct nfs4_state *state, struct nfs4_label *ilabel, 2675 struct nfs4_label *olabel) 2676 { 2677 struct nfs_server *server = NFS_SERVER(inode); 2678 struct nfs_setattrargs arg = { 2679 .fh = NFS_FH(inode), 2680 .iap = sattr, 2681 .server = server, 2682 .bitmask = server->attr_bitmask, 2683 .label = ilabel, 2684 }; 2685 struct nfs_setattrres res = { 2686 .fattr = fattr, 2687 .label = olabel, 2688 .server = server, 2689 }; 2690 struct rpc_message msg = { 2691 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 2692 .rpc_argp = &arg, 2693 .rpc_resp = &res, 2694 .rpc_cred = cred, 2695 }; 2696 struct rpc_cred *delegation_cred = NULL; 2697 unsigned long timestamp = jiffies; 2698 fmode_t fmode; 2699 bool truncate; 2700 int status; 2701 2702 arg.bitmask = nfs4_bitmask(server, ilabel); 2703 if (ilabel) 2704 arg.bitmask = nfs4_bitmask(server, olabel); 2705 2706 nfs_fattr_init(fattr); 2707 2708 /* Servers should only apply open mode checks for file size changes */ 2709 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false; 2710 fmode = truncate ? FMODE_WRITE : FMODE_READ; 2711 2712 if (nfs4_copy_delegation_stateid(inode, fmode, &arg.stateid, &delegation_cred)) { 2713 /* Use that stateid */ 2714 } else if (truncate && state != NULL) { 2715 struct nfs_lockowner lockowner = { 2716 .l_owner = current->files, 2717 .l_pid = current->tgid, 2718 }; 2719 if (!nfs4_valid_open_stateid(state)) 2720 return -EBADF; 2721 if (nfs4_select_rw_stateid(state, FMODE_WRITE, &lockowner, 2722 &arg.stateid, &delegation_cred) == -EIO) 2723 return -EBADF; 2724 } else 2725 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 2726 if (delegation_cred) 2727 msg.rpc_cred = delegation_cred; 2728 2729 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 2730 2731 put_rpccred(delegation_cred); 2732 if (status == 0 && state != NULL) 2733 renew_lease(server, timestamp); 2734 trace_nfs4_setattr(inode, &arg.stateid, status); 2735 return status; 2736 } 2737 2738 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2739 struct nfs_fattr *fattr, struct iattr *sattr, 2740 struct nfs4_state *state, struct nfs4_label *ilabel, 2741 struct nfs4_label *olabel) 2742 { 2743 struct nfs_server *server = NFS_SERVER(inode); 2744 struct nfs4_exception exception = { 2745 .state = state, 2746 .inode = inode, 2747 }; 2748 int err; 2749 do { 2750 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel); 2751 switch (err) { 2752 case -NFS4ERR_OPENMODE: 2753 if (!(sattr->ia_valid & ATTR_SIZE)) { 2754 pr_warn_once("NFSv4: server %s is incorrectly " 2755 "applying open mode checks to " 2756 "a SETATTR that is not " 2757 "changing file size.\n", 2758 server->nfs_client->cl_hostname); 2759 } 2760 if (state && !(state->state & FMODE_WRITE)) { 2761 err = -EBADF; 2762 if (sattr->ia_valid & ATTR_OPEN) 2763 err = -EACCES; 2764 goto out; 2765 } 2766 } 2767 err = nfs4_handle_exception(server, err, &exception); 2768 } while (exception.retry); 2769 out: 2770 return err; 2771 } 2772 2773 static bool 2774 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) 2775 { 2776 if (inode == NULL || !nfs_have_layout(inode)) 2777 return false; 2778 2779 return pnfs_wait_on_layoutreturn(inode, task); 2780 } 2781 2782 struct nfs4_closedata { 2783 struct inode *inode; 2784 struct nfs4_state *state; 2785 struct nfs_closeargs arg; 2786 struct nfs_closeres res; 2787 struct nfs_fattr fattr; 2788 unsigned long timestamp; 2789 bool roc; 2790 u32 roc_barrier; 2791 }; 2792 2793 static void nfs4_free_closedata(void *data) 2794 { 2795 struct nfs4_closedata *calldata = data; 2796 struct nfs4_state_owner *sp = calldata->state->owner; 2797 struct super_block *sb = calldata->state->inode->i_sb; 2798 2799 if (calldata->roc) 2800 pnfs_roc_release(calldata->state->inode); 2801 nfs4_put_open_state(calldata->state); 2802 nfs_free_seqid(calldata->arg.seqid); 2803 nfs4_put_state_owner(sp); 2804 nfs_sb_deactive(sb); 2805 kfree(calldata); 2806 } 2807 2808 static void nfs4_close_done(struct rpc_task *task, void *data) 2809 { 2810 struct nfs4_closedata *calldata = data; 2811 struct nfs4_state *state = calldata->state; 2812 struct nfs_server *server = NFS_SERVER(calldata->inode); 2813 nfs4_stateid *res_stateid = NULL; 2814 2815 dprintk("%s: begin!\n", __func__); 2816 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 2817 return; 2818 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 2819 /* hmm. we are done with the inode, and in the process of freeing 2820 * the state_owner. we keep this around to process errors 2821 */ 2822 switch (task->tk_status) { 2823 case 0: 2824 res_stateid = &calldata->res.stateid; 2825 if (calldata->roc) 2826 pnfs_roc_set_barrier(state->inode, 2827 calldata->roc_barrier); 2828 renew_lease(server, calldata->timestamp); 2829 break; 2830 case -NFS4ERR_ADMIN_REVOKED: 2831 case -NFS4ERR_STALE_STATEID: 2832 case -NFS4ERR_OLD_STATEID: 2833 case -NFS4ERR_BAD_STATEID: 2834 case -NFS4ERR_EXPIRED: 2835 if (!nfs4_stateid_match(&calldata->arg.stateid, 2836 &state->open_stateid)) { 2837 rpc_restart_call_prepare(task); 2838 goto out_release; 2839 } 2840 if (calldata->arg.fmode == 0) 2841 break; 2842 default: 2843 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) { 2844 rpc_restart_call_prepare(task); 2845 goto out_release; 2846 } 2847 } 2848 nfs_clear_open_stateid(state, &calldata->arg.stateid, 2849 res_stateid, calldata->arg.fmode); 2850 out_release: 2851 nfs_release_seqid(calldata->arg.seqid); 2852 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2853 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2854 } 2855 2856 static void nfs4_close_prepare(struct rpc_task *task, void *data) 2857 { 2858 struct nfs4_closedata *calldata = data; 2859 struct nfs4_state *state = calldata->state; 2860 struct inode *inode = calldata->inode; 2861 bool is_rdonly, is_wronly, is_rdwr; 2862 int call_close = 0; 2863 2864 dprintk("%s: begin!\n", __func__); 2865 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 2866 goto out_wait; 2867 2868 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 2869 spin_lock(&state->owner->so_lock); 2870 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 2871 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 2872 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 2873 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid); 2874 /* Calculate the change in open mode */ 2875 calldata->arg.fmode = 0; 2876 if (state->n_rdwr == 0) { 2877 if (state->n_rdonly == 0) 2878 call_close |= is_rdonly; 2879 else if (is_rdonly) 2880 calldata->arg.fmode |= FMODE_READ; 2881 if (state->n_wronly == 0) 2882 call_close |= is_wronly; 2883 else if (is_wronly) 2884 calldata->arg.fmode |= FMODE_WRITE; 2885 } else if (is_rdwr) 2886 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 2887 2888 if (calldata->arg.fmode == 0) 2889 call_close |= is_rdwr; 2890 2891 if (!nfs4_valid_open_stateid(state)) 2892 call_close = 0; 2893 spin_unlock(&state->owner->so_lock); 2894 2895 if (!call_close) { 2896 /* Note: exit _without_ calling nfs4_close_done */ 2897 goto out_no_action; 2898 } 2899 2900 if (nfs4_wait_on_layoutreturn(inode, task)) { 2901 nfs_release_seqid(calldata->arg.seqid); 2902 goto out_wait; 2903 } 2904 2905 if (calldata->arg.fmode == 0) 2906 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2907 if (calldata->roc) 2908 pnfs_roc_get_barrier(inode, &calldata->roc_barrier); 2909 2910 calldata->arg.share_access = 2911 nfs4_map_atomic_open_share(NFS_SERVER(inode), 2912 calldata->arg.fmode, 0); 2913 2914 nfs_fattr_init(calldata->res.fattr); 2915 calldata->timestamp = jiffies; 2916 if (nfs4_setup_sequence(NFS_SERVER(inode), 2917 &calldata->arg.seq_args, 2918 &calldata->res.seq_res, 2919 task) != 0) 2920 nfs_release_seqid(calldata->arg.seqid); 2921 dprintk("%s: done!\n", __func__); 2922 return; 2923 out_no_action: 2924 task->tk_action = NULL; 2925 out_wait: 2926 nfs4_sequence_done(task, &calldata->res.seq_res); 2927 } 2928 2929 static const struct rpc_call_ops nfs4_close_ops = { 2930 .rpc_call_prepare = nfs4_close_prepare, 2931 .rpc_call_done = nfs4_close_done, 2932 .rpc_release = nfs4_free_closedata, 2933 }; 2934 2935 static bool nfs4_roc(struct inode *inode) 2936 { 2937 if (!nfs_have_layout(inode)) 2938 return false; 2939 return pnfs_roc(inode); 2940 } 2941 2942 /* 2943 * It is possible for data to be read/written from a mem-mapped file 2944 * after the sys_close call (which hits the vfs layer as a flush). 2945 * This means that we can't safely call nfsv4 close on a file until 2946 * the inode is cleared. This in turn means that we are not good 2947 * NFSv4 citizens - we do not indicate to the server to update the file's 2948 * share state even when we are done with one of the three share 2949 * stateid's in the inode. 2950 * 2951 * NOTE: Caller must be holding the sp->so_owner semaphore! 2952 */ 2953 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 2954 { 2955 struct nfs_server *server = NFS_SERVER(state->inode); 2956 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 2957 struct nfs4_closedata *calldata; 2958 struct nfs4_state_owner *sp = state->owner; 2959 struct rpc_task *task; 2960 struct rpc_message msg = { 2961 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 2962 .rpc_cred = state->owner->so_cred, 2963 }; 2964 struct rpc_task_setup task_setup_data = { 2965 .rpc_client = server->client, 2966 .rpc_message = &msg, 2967 .callback_ops = &nfs4_close_ops, 2968 .workqueue = nfsiod_workqueue, 2969 .flags = RPC_TASK_ASYNC, 2970 }; 2971 int status = -ENOMEM; 2972 2973 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 2974 &task_setup_data.rpc_client, &msg); 2975 2976 calldata = kzalloc(sizeof(*calldata), gfp_mask); 2977 if (calldata == NULL) 2978 goto out; 2979 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); 2980 calldata->inode = state->inode; 2981 calldata->state = state; 2982 calldata->arg.fh = NFS_FH(state->inode); 2983 /* Serialization for the sequence id */ 2984 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 2985 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 2986 if (IS_ERR(calldata->arg.seqid)) 2987 goto out_free_calldata; 2988 calldata->arg.fmode = 0; 2989 calldata->arg.bitmask = server->cache_consistency_bitmask; 2990 calldata->res.fattr = &calldata->fattr; 2991 calldata->res.seqid = calldata->arg.seqid; 2992 calldata->res.server = server; 2993 calldata->roc = nfs4_roc(state->inode); 2994 nfs_sb_active(calldata->inode->i_sb); 2995 2996 msg.rpc_argp = &calldata->arg; 2997 msg.rpc_resp = &calldata->res; 2998 task_setup_data.callback_data = calldata; 2999 task = rpc_run_task(&task_setup_data); 3000 if (IS_ERR(task)) 3001 return PTR_ERR(task); 3002 status = 0; 3003 if (wait) 3004 status = rpc_wait_for_completion_task(task); 3005 rpc_put_task(task); 3006 return status; 3007 out_free_calldata: 3008 kfree(calldata); 3009 out: 3010 nfs4_put_open_state(state); 3011 nfs4_put_state_owner(sp); 3012 return status; 3013 } 3014 3015 static struct inode * 3016 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 3017 int open_flags, struct iattr *attr, int *opened) 3018 { 3019 struct nfs4_state *state; 3020 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; 3021 3022 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 3023 3024 /* Protect against concurrent sillydeletes */ 3025 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 3026 3027 nfs4_label_release_security(label); 3028 3029 if (IS_ERR(state)) 3030 return ERR_CAST(state); 3031 return state->inode; 3032 } 3033 3034 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 3035 { 3036 if (ctx->state == NULL) 3037 return; 3038 if (is_sync) 3039 nfs4_close_sync(ctx->state, ctx->mode); 3040 else 3041 nfs4_close_state(ctx->state, ctx->mode); 3042 } 3043 3044 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 3045 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 3046 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL) 3047 3048 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3049 { 3050 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion; 3051 struct nfs4_server_caps_arg args = { 3052 .fhandle = fhandle, 3053 .bitmask = bitmask, 3054 }; 3055 struct nfs4_server_caps_res res = {}; 3056 struct rpc_message msg = { 3057 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 3058 .rpc_argp = &args, 3059 .rpc_resp = &res, 3060 }; 3061 int status; 3062 3063 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3064 FATTR4_WORD0_FH_EXPIRE_TYPE | 3065 FATTR4_WORD0_LINK_SUPPORT | 3066 FATTR4_WORD0_SYMLINK_SUPPORT | 3067 FATTR4_WORD0_ACLSUPPORT; 3068 if (minorversion) 3069 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 3070 3071 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3072 if (status == 0) { 3073 /* Sanity check the server answers */ 3074 switch (minorversion) { 3075 case 0: 3076 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 3077 res.attr_bitmask[2] = 0; 3078 break; 3079 case 1: 3080 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 3081 break; 3082 case 2: 3083 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 3084 } 3085 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 3086 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| 3087 NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 3088 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| 3089 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| 3090 NFS_CAP_CTIME|NFS_CAP_MTIME| 3091 NFS_CAP_SECURITY_LABEL); 3092 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 3093 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 3094 server->caps |= NFS_CAP_ACLS; 3095 if (res.has_links != 0) 3096 server->caps |= NFS_CAP_HARDLINKS; 3097 if (res.has_symlinks != 0) 3098 server->caps |= NFS_CAP_SYMLINKS; 3099 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) 3100 server->caps |= NFS_CAP_FILEID; 3101 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) 3102 server->caps |= NFS_CAP_MODE; 3103 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) 3104 server->caps |= NFS_CAP_NLINK; 3105 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) 3106 server->caps |= NFS_CAP_OWNER; 3107 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) 3108 server->caps |= NFS_CAP_OWNER_GROUP; 3109 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) 3110 server->caps |= NFS_CAP_ATIME; 3111 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) 3112 server->caps |= NFS_CAP_CTIME; 3113 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) 3114 server->caps |= NFS_CAP_MTIME; 3115 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 3116 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 3117 server->caps |= NFS_CAP_SECURITY_LABEL; 3118 #endif 3119 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 3120 sizeof(server->attr_bitmask)); 3121 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 3122 3123 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 3124 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 3125 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 3126 server->cache_consistency_bitmask[2] = 0; 3127 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 3128 sizeof(server->exclcreat_bitmask)); 3129 server->acl_bitmask = res.acl_bitmask; 3130 server->fh_expire_type = res.fh_expire_type; 3131 } 3132 3133 return status; 3134 } 3135 3136 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3137 { 3138 struct nfs4_exception exception = { }; 3139 int err; 3140 do { 3141 err = nfs4_handle_exception(server, 3142 _nfs4_server_capabilities(server, fhandle), 3143 &exception); 3144 } while (exception.retry); 3145 return err; 3146 } 3147 3148 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 3149 struct nfs_fsinfo *info) 3150 { 3151 u32 bitmask[3]; 3152 struct nfs4_lookup_root_arg args = { 3153 .bitmask = bitmask, 3154 }; 3155 struct nfs4_lookup_res res = { 3156 .server = server, 3157 .fattr = info->fattr, 3158 .fh = fhandle, 3159 }; 3160 struct rpc_message msg = { 3161 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 3162 .rpc_argp = &args, 3163 .rpc_resp = &res, 3164 }; 3165 3166 bitmask[0] = nfs4_fattr_bitmap[0]; 3167 bitmask[1] = nfs4_fattr_bitmap[1]; 3168 /* 3169 * Process the label in the upcoming getfattr 3170 */ 3171 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL; 3172 3173 nfs_fattr_init(info->fattr); 3174 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3175 } 3176 3177 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 3178 struct nfs_fsinfo *info) 3179 { 3180 struct nfs4_exception exception = { }; 3181 int err; 3182 do { 3183 err = _nfs4_lookup_root(server, fhandle, info); 3184 trace_nfs4_lookup_root(server, fhandle, info->fattr, err); 3185 switch (err) { 3186 case 0: 3187 case -NFS4ERR_WRONGSEC: 3188 goto out; 3189 default: 3190 err = nfs4_handle_exception(server, err, &exception); 3191 } 3192 } while (exception.retry); 3193 out: 3194 return err; 3195 } 3196 3197 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3198 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 3199 { 3200 struct rpc_auth_create_args auth_args = { 3201 .pseudoflavor = flavor, 3202 }; 3203 struct rpc_auth *auth; 3204 int ret; 3205 3206 auth = rpcauth_create(&auth_args, server->client); 3207 if (IS_ERR(auth)) { 3208 ret = -EACCES; 3209 goto out; 3210 } 3211 ret = nfs4_lookup_root(server, fhandle, info); 3212 out: 3213 return ret; 3214 } 3215 3216 /* 3217 * Retry pseudoroot lookup with various security flavors. We do this when: 3218 * 3219 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 3220 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 3221 * 3222 * Returns zero on success, or a negative NFS4ERR value, or a 3223 * negative errno value. 3224 */ 3225 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3226 struct nfs_fsinfo *info) 3227 { 3228 /* Per 3530bis 15.33.5 */ 3229 static const rpc_authflavor_t flav_array[] = { 3230 RPC_AUTH_GSS_KRB5P, 3231 RPC_AUTH_GSS_KRB5I, 3232 RPC_AUTH_GSS_KRB5, 3233 RPC_AUTH_UNIX, /* courtesy */ 3234 RPC_AUTH_NULL, 3235 }; 3236 int status = -EPERM; 3237 size_t i; 3238 3239 if (server->auth_info.flavor_len > 0) { 3240 /* try each flavor specified by user */ 3241 for (i = 0; i < server->auth_info.flavor_len; i++) { 3242 status = nfs4_lookup_root_sec(server, fhandle, info, 3243 server->auth_info.flavors[i]); 3244 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3245 continue; 3246 break; 3247 } 3248 } else { 3249 /* no flavors specified by user, try default list */ 3250 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 3251 status = nfs4_lookup_root_sec(server, fhandle, info, 3252 flav_array[i]); 3253 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3254 continue; 3255 break; 3256 } 3257 } 3258 3259 /* 3260 * -EACCESS could mean that the user doesn't have correct permissions 3261 * to access the mount. It could also mean that we tried to mount 3262 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 3263 * existing mount programs don't handle -EACCES very well so it should 3264 * be mapped to -EPERM instead. 3265 */ 3266 if (status == -EACCES) 3267 status = -EPERM; 3268 return status; 3269 } 3270 3271 static int nfs4_do_find_root_sec(struct nfs_server *server, 3272 struct nfs_fh *fhandle, struct nfs_fsinfo *info) 3273 { 3274 int mv = server->nfs_client->cl_minorversion; 3275 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info); 3276 } 3277 3278 /** 3279 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 3280 * @server: initialized nfs_server handle 3281 * @fhandle: we fill in the pseudo-fs root file handle 3282 * @info: we fill in an FSINFO struct 3283 * @auth_probe: probe the auth flavours 3284 * 3285 * Returns zero on success, or a negative errno. 3286 */ 3287 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 3288 struct nfs_fsinfo *info, 3289 bool auth_probe) 3290 { 3291 int status = 0; 3292 3293 if (!auth_probe) 3294 status = nfs4_lookup_root(server, fhandle, info); 3295 3296 if (auth_probe || status == NFS4ERR_WRONGSEC) 3297 status = nfs4_do_find_root_sec(server, fhandle, info); 3298 3299 if (status == 0) 3300 status = nfs4_server_capabilities(server, fhandle); 3301 if (status == 0) 3302 status = nfs4_do_fsinfo(server, fhandle, info); 3303 3304 return nfs4_map_errors(status); 3305 } 3306 3307 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 3308 struct nfs_fsinfo *info) 3309 { 3310 int error; 3311 struct nfs_fattr *fattr = info->fattr; 3312 struct nfs4_label *label = NULL; 3313 3314 error = nfs4_server_capabilities(server, mntfh); 3315 if (error < 0) { 3316 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 3317 return error; 3318 } 3319 3320 label = nfs4_label_alloc(server, GFP_KERNEL); 3321 if (IS_ERR(label)) 3322 return PTR_ERR(label); 3323 3324 error = nfs4_proc_getattr(server, mntfh, fattr, label); 3325 if (error < 0) { 3326 dprintk("nfs4_get_root: getattr error = %d\n", -error); 3327 goto err_free_label; 3328 } 3329 3330 if (fattr->valid & NFS_ATTR_FATTR_FSID && 3331 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 3332 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 3333 3334 err_free_label: 3335 nfs4_label_free(label); 3336 3337 return error; 3338 } 3339 3340 /* 3341 * Get locations and (maybe) other attributes of a referral. 3342 * Note that we'll actually follow the referral later when 3343 * we detect fsid mismatch in inode revalidation 3344 */ 3345 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 3346 const struct qstr *name, struct nfs_fattr *fattr, 3347 struct nfs_fh *fhandle) 3348 { 3349 int status = -ENOMEM; 3350 struct page *page = NULL; 3351 struct nfs4_fs_locations *locations = NULL; 3352 3353 page = alloc_page(GFP_KERNEL); 3354 if (page == NULL) 3355 goto out; 3356 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 3357 if (locations == NULL) 3358 goto out; 3359 3360 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 3361 if (status != 0) 3362 goto out; 3363 3364 /* 3365 * If the fsid didn't change, this is a migration event, not a 3366 * referral. Cause us to drop into the exception handler, which 3367 * will kick off migration recovery. 3368 */ 3369 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 3370 dprintk("%s: server did not return a different fsid for" 3371 " a referral at %s\n", __func__, name->name); 3372 status = -NFS4ERR_MOVED; 3373 goto out; 3374 } 3375 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 3376 nfs_fixup_referral_attributes(&locations->fattr); 3377 3378 /* replace the lookup nfs_fattr with the locations nfs_fattr */ 3379 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 3380 memset(fhandle, 0, sizeof(struct nfs_fh)); 3381 out: 3382 if (page) 3383 __free_page(page); 3384 kfree(locations); 3385 return status; 3386 } 3387 3388 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3389 struct nfs_fattr *fattr, struct nfs4_label *label) 3390 { 3391 struct nfs4_getattr_arg args = { 3392 .fh = fhandle, 3393 .bitmask = server->attr_bitmask, 3394 }; 3395 struct nfs4_getattr_res res = { 3396 .fattr = fattr, 3397 .label = label, 3398 .server = server, 3399 }; 3400 struct rpc_message msg = { 3401 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 3402 .rpc_argp = &args, 3403 .rpc_resp = &res, 3404 }; 3405 3406 args.bitmask = nfs4_bitmask(server, label); 3407 3408 nfs_fattr_init(fattr); 3409 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3410 } 3411 3412 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3413 struct nfs_fattr *fattr, struct nfs4_label *label) 3414 { 3415 struct nfs4_exception exception = { }; 3416 int err; 3417 do { 3418 err = _nfs4_proc_getattr(server, fhandle, fattr, label); 3419 trace_nfs4_getattr(server, fhandle, fattr, err); 3420 err = nfs4_handle_exception(server, err, 3421 &exception); 3422 } while (exception.retry); 3423 return err; 3424 } 3425 3426 /* 3427 * The file is not closed if it is opened due to the a request to change 3428 * the size of the file. The open call will not be needed once the 3429 * VFS layer lookup-intents are implemented. 3430 * 3431 * Close is called when the inode is destroyed. 3432 * If we haven't opened the file for O_WRONLY, we 3433 * need to in the size_change case to obtain a stateid. 3434 * 3435 * Got race? 3436 * Because OPEN is always done by name in nfsv4, it is 3437 * possible that we opened a different file by the same 3438 * name. We can recognize this race condition, but we 3439 * can't do anything about it besides returning an error. 3440 * 3441 * This will be fixed with VFS changes (lookup-intent). 3442 */ 3443 static int 3444 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 3445 struct iattr *sattr) 3446 { 3447 struct inode *inode = d_inode(dentry); 3448 struct rpc_cred *cred = NULL; 3449 struct nfs4_state *state = NULL; 3450 struct nfs4_label *label = NULL; 3451 int status; 3452 3453 if (pnfs_ld_layoutret_on_setattr(inode) && 3454 sattr->ia_valid & ATTR_SIZE && 3455 sattr->ia_size < i_size_read(inode)) 3456 pnfs_commit_and_return_layout(inode); 3457 3458 nfs_fattr_init(fattr); 3459 3460 /* Deal with open(O_TRUNC) */ 3461 if (sattr->ia_valid & ATTR_OPEN) 3462 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 3463 3464 /* Optimization: if the end result is no change, don't RPC */ 3465 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 3466 return 0; 3467 3468 /* Search for an existing open(O_WRITE) file */ 3469 if (sattr->ia_valid & ATTR_FILE) { 3470 struct nfs_open_context *ctx; 3471 3472 ctx = nfs_file_open_context(sattr->ia_file); 3473 if (ctx) { 3474 cred = ctx->cred; 3475 state = ctx->state; 3476 } 3477 } 3478 3479 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 3480 if (IS_ERR(label)) 3481 return PTR_ERR(label); 3482 3483 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label); 3484 if (status == 0) { 3485 nfs_setattr_update_inode(inode, sattr, fattr); 3486 nfs_setsecurity(inode, fattr, label); 3487 } 3488 nfs4_label_free(label); 3489 return status; 3490 } 3491 3492 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 3493 const struct qstr *name, struct nfs_fh *fhandle, 3494 struct nfs_fattr *fattr, struct nfs4_label *label) 3495 { 3496 struct nfs_server *server = NFS_SERVER(dir); 3497 int status; 3498 struct nfs4_lookup_arg args = { 3499 .bitmask = server->attr_bitmask, 3500 .dir_fh = NFS_FH(dir), 3501 .name = name, 3502 }; 3503 struct nfs4_lookup_res res = { 3504 .server = server, 3505 .fattr = fattr, 3506 .label = label, 3507 .fh = fhandle, 3508 }; 3509 struct rpc_message msg = { 3510 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 3511 .rpc_argp = &args, 3512 .rpc_resp = &res, 3513 }; 3514 3515 args.bitmask = nfs4_bitmask(server, label); 3516 3517 nfs_fattr_init(fattr); 3518 3519 dprintk("NFS call lookup %s\n", name->name); 3520 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); 3521 dprintk("NFS reply lookup: %d\n", status); 3522 return status; 3523 } 3524 3525 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 3526 { 3527 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 3528 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 3529 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 3530 fattr->nlink = 2; 3531 } 3532 3533 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 3534 struct qstr *name, struct nfs_fh *fhandle, 3535 struct nfs_fattr *fattr, struct nfs4_label *label) 3536 { 3537 struct nfs4_exception exception = { }; 3538 struct rpc_clnt *client = *clnt; 3539 int err; 3540 do { 3541 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label); 3542 trace_nfs4_lookup(dir, name, err); 3543 switch (err) { 3544 case -NFS4ERR_BADNAME: 3545 err = -ENOENT; 3546 goto out; 3547 case -NFS4ERR_MOVED: 3548 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 3549 if (err == -NFS4ERR_MOVED) 3550 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 3551 goto out; 3552 case -NFS4ERR_WRONGSEC: 3553 err = -EPERM; 3554 if (client != *clnt) 3555 goto out; 3556 client = nfs4_negotiate_security(client, dir, name); 3557 if (IS_ERR(client)) 3558 return PTR_ERR(client); 3559 3560 exception.retry = 1; 3561 break; 3562 default: 3563 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 3564 } 3565 } while (exception.retry); 3566 3567 out: 3568 if (err == 0) 3569 *clnt = client; 3570 else if (client != *clnt) 3571 rpc_shutdown_client(client); 3572 3573 return err; 3574 } 3575 3576 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, 3577 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 3578 struct nfs4_label *label) 3579 { 3580 int status; 3581 struct rpc_clnt *client = NFS_CLIENT(dir); 3582 3583 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label); 3584 if (client != NFS_CLIENT(dir)) { 3585 rpc_shutdown_client(client); 3586 nfs_fixup_secinfo_attributes(fattr); 3587 } 3588 return status; 3589 } 3590 3591 struct rpc_clnt * 3592 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, 3593 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 3594 { 3595 struct rpc_clnt *client = NFS_CLIENT(dir); 3596 int status; 3597 3598 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL); 3599 if (status < 0) 3600 return ERR_PTR(status); 3601 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 3602 } 3603 3604 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3605 { 3606 struct nfs_server *server = NFS_SERVER(inode); 3607 struct nfs4_accessargs args = { 3608 .fh = NFS_FH(inode), 3609 .bitmask = server->cache_consistency_bitmask, 3610 }; 3611 struct nfs4_accessres res = { 3612 .server = server, 3613 }; 3614 struct rpc_message msg = { 3615 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 3616 .rpc_argp = &args, 3617 .rpc_resp = &res, 3618 .rpc_cred = entry->cred, 3619 }; 3620 int mode = entry->mask; 3621 int status = 0; 3622 3623 /* 3624 * Determine which access bits we want to ask for... 3625 */ 3626 if (mode & MAY_READ) 3627 args.access |= NFS4_ACCESS_READ; 3628 if (S_ISDIR(inode->i_mode)) { 3629 if (mode & MAY_WRITE) 3630 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; 3631 if (mode & MAY_EXEC) 3632 args.access |= NFS4_ACCESS_LOOKUP; 3633 } else { 3634 if (mode & MAY_WRITE) 3635 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; 3636 if (mode & MAY_EXEC) 3637 args.access |= NFS4_ACCESS_EXECUTE; 3638 } 3639 3640 res.fattr = nfs_alloc_fattr(); 3641 if (res.fattr == NULL) 3642 return -ENOMEM; 3643 3644 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3645 if (!status) { 3646 nfs_access_set_mask(entry, res.access); 3647 nfs_refresh_inode(inode, res.fattr); 3648 } 3649 nfs_free_fattr(res.fattr); 3650 return status; 3651 } 3652 3653 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3654 { 3655 struct nfs4_exception exception = { }; 3656 int err; 3657 do { 3658 err = _nfs4_proc_access(inode, entry); 3659 trace_nfs4_access(inode, err); 3660 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3661 &exception); 3662 } while (exception.retry); 3663 return err; 3664 } 3665 3666 /* 3667 * TODO: For the time being, we don't try to get any attributes 3668 * along with any of the zero-copy operations READ, READDIR, 3669 * READLINK, WRITE. 3670 * 3671 * In the case of the first three, we want to put the GETATTR 3672 * after the read-type operation -- this is because it is hard 3673 * to predict the length of a GETATTR response in v4, and thus 3674 * align the READ data correctly. This means that the GETATTR 3675 * may end up partially falling into the page cache, and we should 3676 * shift it into the 'tail' of the xdr_buf before processing. 3677 * To do this efficiently, we need to know the total length 3678 * of data received, which doesn't seem to be available outside 3679 * of the RPC layer. 3680 * 3681 * In the case of WRITE, we also want to put the GETATTR after 3682 * the operation -- in this case because we want to make sure 3683 * we get the post-operation mtime and size. 3684 * 3685 * Both of these changes to the XDR layer would in fact be quite 3686 * minor, but I decided to leave them for a subsequent patch. 3687 */ 3688 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 3689 unsigned int pgbase, unsigned int pglen) 3690 { 3691 struct nfs4_readlink args = { 3692 .fh = NFS_FH(inode), 3693 .pgbase = pgbase, 3694 .pglen = pglen, 3695 .pages = &page, 3696 }; 3697 struct nfs4_readlink_res res; 3698 struct rpc_message msg = { 3699 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 3700 .rpc_argp = &args, 3701 .rpc_resp = &res, 3702 }; 3703 3704 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 3705 } 3706 3707 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 3708 unsigned int pgbase, unsigned int pglen) 3709 { 3710 struct nfs4_exception exception = { }; 3711 int err; 3712 do { 3713 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 3714 trace_nfs4_readlink(inode, err); 3715 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3716 &exception); 3717 } while (exception.retry); 3718 return err; 3719 } 3720 3721 /* 3722 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 3723 */ 3724 static int 3725 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 3726 int flags) 3727 { 3728 struct nfs4_label l, *ilabel = NULL; 3729 struct nfs_open_context *ctx; 3730 struct nfs4_state *state; 3731 int status = 0; 3732 3733 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 3734 if (IS_ERR(ctx)) 3735 return PTR_ERR(ctx); 3736 3737 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 3738 3739 sattr->ia_mode &= ~current_umask(); 3740 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 3741 if (IS_ERR(state)) { 3742 status = PTR_ERR(state); 3743 goto out; 3744 } 3745 out: 3746 nfs4_label_release_security(ilabel); 3747 put_nfs_open_context(ctx); 3748 return status; 3749 } 3750 3751 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) 3752 { 3753 struct nfs_server *server = NFS_SERVER(dir); 3754 struct nfs_removeargs args = { 3755 .fh = NFS_FH(dir), 3756 .name = *name, 3757 }; 3758 struct nfs_removeres res = { 3759 .server = server, 3760 }; 3761 struct rpc_message msg = { 3762 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 3763 .rpc_argp = &args, 3764 .rpc_resp = &res, 3765 }; 3766 int status; 3767 3768 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 3769 if (status == 0) 3770 update_changeattr(dir, &res.cinfo); 3771 return status; 3772 } 3773 3774 static int nfs4_proc_remove(struct inode *dir, struct qstr *name) 3775 { 3776 struct nfs4_exception exception = { }; 3777 int err; 3778 do { 3779 err = _nfs4_proc_remove(dir, name); 3780 trace_nfs4_remove(dir, name, err); 3781 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3782 &exception); 3783 } while (exception.retry); 3784 return err; 3785 } 3786 3787 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) 3788 { 3789 struct nfs_server *server = NFS_SERVER(dir); 3790 struct nfs_removeargs *args = msg->rpc_argp; 3791 struct nfs_removeres *res = msg->rpc_resp; 3792 3793 res->server = server; 3794 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 3795 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1); 3796 3797 nfs_fattr_init(res->dir_attr); 3798 } 3799 3800 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 3801 { 3802 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb), 3803 &data->args.seq_args, 3804 &data->res.seq_res, 3805 task); 3806 } 3807 3808 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 3809 { 3810 struct nfs_unlinkdata *data = task->tk_calldata; 3811 struct nfs_removeres *res = &data->res; 3812 3813 if (!nfs4_sequence_done(task, &res->seq_res)) 3814 return 0; 3815 if (nfs4_async_handle_error(task, res->server, NULL, 3816 &data->timeout) == -EAGAIN) 3817 return 0; 3818 update_changeattr(dir, &res->cinfo); 3819 return 1; 3820 } 3821 3822 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) 3823 { 3824 struct nfs_server *server = NFS_SERVER(dir); 3825 struct nfs_renameargs *arg = msg->rpc_argp; 3826 struct nfs_renameres *res = msg->rpc_resp; 3827 3828 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 3829 res->server = server; 3830 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1); 3831 } 3832 3833 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 3834 { 3835 nfs4_setup_sequence(NFS_SERVER(data->old_dir), 3836 &data->args.seq_args, 3837 &data->res.seq_res, 3838 task); 3839 } 3840 3841 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 3842 struct inode *new_dir) 3843 { 3844 struct nfs_renamedata *data = task->tk_calldata; 3845 struct nfs_renameres *res = &data->res; 3846 3847 if (!nfs4_sequence_done(task, &res->seq_res)) 3848 return 0; 3849 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 3850 return 0; 3851 3852 update_changeattr(old_dir, &res->old_cinfo); 3853 update_changeattr(new_dir, &res->new_cinfo); 3854 return 1; 3855 } 3856 3857 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3858 { 3859 struct nfs_server *server = NFS_SERVER(inode); 3860 struct nfs4_link_arg arg = { 3861 .fh = NFS_FH(inode), 3862 .dir_fh = NFS_FH(dir), 3863 .name = name, 3864 .bitmask = server->attr_bitmask, 3865 }; 3866 struct nfs4_link_res res = { 3867 .server = server, 3868 .label = NULL, 3869 }; 3870 struct rpc_message msg = { 3871 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 3872 .rpc_argp = &arg, 3873 .rpc_resp = &res, 3874 }; 3875 int status = -ENOMEM; 3876 3877 res.fattr = nfs_alloc_fattr(); 3878 if (res.fattr == NULL) 3879 goto out; 3880 3881 res.label = nfs4_label_alloc(server, GFP_KERNEL); 3882 if (IS_ERR(res.label)) { 3883 status = PTR_ERR(res.label); 3884 goto out; 3885 } 3886 arg.bitmask = nfs4_bitmask(server, res.label); 3887 3888 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3889 if (!status) { 3890 update_changeattr(dir, &res.cinfo); 3891 status = nfs_post_op_update_inode(inode, res.fattr); 3892 if (!status) 3893 nfs_setsecurity(inode, res.fattr, res.label); 3894 } 3895 3896 3897 nfs4_label_free(res.label); 3898 3899 out: 3900 nfs_free_fattr(res.fattr); 3901 return status; 3902 } 3903 3904 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3905 { 3906 struct nfs4_exception exception = { }; 3907 int err; 3908 do { 3909 err = nfs4_handle_exception(NFS_SERVER(inode), 3910 _nfs4_proc_link(inode, dir, name), 3911 &exception); 3912 } while (exception.retry); 3913 return err; 3914 } 3915 3916 struct nfs4_createdata { 3917 struct rpc_message msg; 3918 struct nfs4_create_arg arg; 3919 struct nfs4_create_res res; 3920 struct nfs_fh fh; 3921 struct nfs_fattr fattr; 3922 struct nfs4_label *label; 3923 }; 3924 3925 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 3926 struct qstr *name, struct iattr *sattr, u32 ftype) 3927 { 3928 struct nfs4_createdata *data; 3929 3930 data = kzalloc(sizeof(*data), GFP_KERNEL); 3931 if (data != NULL) { 3932 struct nfs_server *server = NFS_SERVER(dir); 3933 3934 data->label = nfs4_label_alloc(server, GFP_KERNEL); 3935 if (IS_ERR(data->label)) 3936 goto out_free; 3937 3938 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 3939 data->msg.rpc_argp = &data->arg; 3940 data->msg.rpc_resp = &data->res; 3941 data->arg.dir_fh = NFS_FH(dir); 3942 data->arg.server = server; 3943 data->arg.name = name; 3944 data->arg.attrs = sattr; 3945 data->arg.ftype = ftype; 3946 data->arg.bitmask = nfs4_bitmask(server, data->label); 3947 data->res.server = server; 3948 data->res.fh = &data->fh; 3949 data->res.fattr = &data->fattr; 3950 data->res.label = data->label; 3951 nfs_fattr_init(data->res.fattr); 3952 } 3953 return data; 3954 out_free: 3955 kfree(data); 3956 return NULL; 3957 } 3958 3959 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 3960 { 3961 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 3962 &data->arg.seq_args, &data->res.seq_res, 1); 3963 if (status == 0) { 3964 update_changeattr(dir, &data->res.dir_cinfo); 3965 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label); 3966 } 3967 return status; 3968 } 3969 3970 static void nfs4_free_createdata(struct nfs4_createdata *data) 3971 { 3972 nfs4_label_free(data->label); 3973 kfree(data); 3974 } 3975 3976 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3977 struct page *page, unsigned int len, struct iattr *sattr, 3978 struct nfs4_label *label) 3979 { 3980 struct nfs4_createdata *data; 3981 int status = -ENAMETOOLONG; 3982 3983 if (len > NFS4_MAXPATHLEN) 3984 goto out; 3985 3986 status = -ENOMEM; 3987 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 3988 if (data == NULL) 3989 goto out; 3990 3991 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 3992 data->arg.u.symlink.pages = &page; 3993 data->arg.u.symlink.len = len; 3994 data->arg.label = label; 3995 3996 status = nfs4_do_create(dir, dentry, data); 3997 3998 nfs4_free_createdata(data); 3999 out: 4000 return status; 4001 } 4002 4003 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 4004 struct page *page, unsigned int len, struct iattr *sattr) 4005 { 4006 struct nfs4_exception exception = { }; 4007 struct nfs4_label l, *label = NULL; 4008 int err; 4009 4010 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4011 4012 do { 4013 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label); 4014 trace_nfs4_symlink(dir, &dentry->d_name, err); 4015 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4016 &exception); 4017 } while (exception.retry); 4018 4019 nfs4_label_release_security(label); 4020 return err; 4021 } 4022 4023 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 4024 struct iattr *sattr, struct nfs4_label *label) 4025 { 4026 struct nfs4_createdata *data; 4027 int status = -ENOMEM; 4028 4029 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 4030 if (data == NULL) 4031 goto out; 4032 4033 data->arg.label = label; 4034 status = nfs4_do_create(dir, dentry, data); 4035 4036 nfs4_free_createdata(data); 4037 out: 4038 return status; 4039 } 4040 4041 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 4042 struct iattr *sattr) 4043 { 4044 struct nfs4_exception exception = { }; 4045 struct nfs4_label l, *label = NULL; 4046 int err; 4047 4048 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4049 4050 sattr->ia_mode &= ~current_umask(); 4051 do { 4052 err = _nfs4_proc_mkdir(dir, dentry, sattr, label); 4053 trace_nfs4_mkdir(dir, &dentry->d_name, err); 4054 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4055 &exception); 4056 } while (exception.retry); 4057 nfs4_label_release_security(label); 4058 4059 return err; 4060 } 4061 4062 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 4063 u64 cookie, struct page **pages, unsigned int count, int plus) 4064 { 4065 struct inode *dir = d_inode(dentry); 4066 struct nfs4_readdir_arg args = { 4067 .fh = NFS_FH(dir), 4068 .pages = pages, 4069 .pgbase = 0, 4070 .count = count, 4071 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask, 4072 .plus = plus, 4073 }; 4074 struct nfs4_readdir_res res; 4075 struct rpc_message msg = { 4076 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 4077 .rpc_argp = &args, 4078 .rpc_resp = &res, 4079 .rpc_cred = cred, 4080 }; 4081 int status; 4082 4083 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__, 4084 dentry, 4085 (unsigned long long)cookie); 4086 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); 4087 res.pgbase = args.pgbase; 4088 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 4089 if (status >= 0) { 4090 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); 4091 status += args.pgbase; 4092 } 4093 4094 nfs_invalidate_atime(dir); 4095 4096 dprintk("%s: returns %d\n", __func__, status); 4097 return status; 4098 } 4099 4100 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 4101 u64 cookie, struct page **pages, unsigned int count, int plus) 4102 { 4103 struct nfs4_exception exception = { }; 4104 int err; 4105 do { 4106 err = _nfs4_proc_readdir(dentry, cred, cookie, 4107 pages, count, plus); 4108 trace_nfs4_readdir(d_inode(dentry), err); 4109 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err, 4110 &exception); 4111 } while (exception.retry); 4112 return err; 4113 } 4114 4115 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 4116 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 4117 { 4118 struct nfs4_createdata *data; 4119 int mode = sattr->ia_mode; 4120 int status = -ENOMEM; 4121 4122 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 4123 if (data == NULL) 4124 goto out; 4125 4126 if (S_ISFIFO(mode)) 4127 data->arg.ftype = NF4FIFO; 4128 else if (S_ISBLK(mode)) { 4129 data->arg.ftype = NF4BLK; 4130 data->arg.u.device.specdata1 = MAJOR(rdev); 4131 data->arg.u.device.specdata2 = MINOR(rdev); 4132 } 4133 else if (S_ISCHR(mode)) { 4134 data->arg.ftype = NF4CHR; 4135 data->arg.u.device.specdata1 = MAJOR(rdev); 4136 data->arg.u.device.specdata2 = MINOR(rdev); 4137 } else if (!S_ISSOCK(mode)) { 4138 status = -EINVAL; 4139 goto out_free; 4140 } 4141 4142 data->arg.label = label; 4143 status = nfs4_do_create(dir, dentry, data); 4144 out_free: 4145 nfs4_free_createdata(data); 4146 out: 4147 return status; 4148 } 4149 4150 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 4151 struct iattr *sattr, dev_t rdev) 4152 { 4153 struct nfs4_exception exception = { }; 4154 struct nfs4_label l, *label = NULL; 4155 int err; 4156 4157 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4158 4159 sattr->ia_mode &= ~current_umask(); 4160 do { 4161 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 4162 trace_nfs4_mknod(dir, &dentry->d_name, err); 4163 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4164 &exception); 4165 } while (exception.retry); 4166 4167 nfs4_label_release_security(label); 4168 4169 return err; 4170 } 4171 4172 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 4173 struct nfs_fsstat *fsstat) 4174 { 4175 struct nfs4_statfs_arg args = { 4176 .fh = fhandle, 4177 .bitmask = server->attr_bitmask, 4178 }; 4179 struct nfs4_statfs_res res = { 4180 .fsstat = fsstat, 4181 }; 4182 struct rpc_message msg = { 4183 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 4184 .rpc_argp = &args, 4185 .rpc_resp = &res, 4186 }; 4187 4188 nfs_fattr_init(fsstat->fattr); 4189 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4190 } 4191 4192 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 4193 { 4194 struct nfs4_exception exception = { }; 4195 int err; 4196 do { 4197 err = nfs4_handle_exception(server, 4198 _nfs4_proc_statfs(server, fhandle, fsstat), 4199 &exception); 4200 } while (exception.retry); 4201 return err; 4202 } 4203 4204 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 4205 struct nfs_fsinfo *fsinfo) 4206 { 4207 struct nfs4_fsinfo_arg args = { 4208 .fh = fhandle, 4209 .bitmask = server->attr_bitmask, 4210 }; 4211 struct nfs4_fsinfo_res res = { 4212 .fsinfo = fsinfo, 4213 }; 4214 struct rpc_message msg = { 4215 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 4216 .rpc_argp = &args, 4217 .rpc_resp = &res, 4218 }; 4219 4220 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4221 } 4222 4223 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4224 { 4225 struct nfs4_exception exception = { }; 4226 unsigned long now = jiffies; 4227 int err; 4228 4229 do { 4230 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 4231 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 4232 if (err == 0) { 4233 struct nfs_client *clp = server->nfs_client; 4234 4235 spin_lock(&clp->cl_lock); 4236 clp->cl_lease_time = fsinfo->lease_time * HZ; 4237 clp->cl_last_renewal = now; 4238 spin_unlock(&clp->cl_lock); 4239 break; 4240 } 4241 err = nfs4_handle_exception(server, err, &exception); 4242 } while (exception.retry); 4243 return err; 4244 } 4245 4246 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4247 { 4248 int error; 4249 4250 nfs_fattr_init(fsinfo->fattr); 4251 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 4252 if (error == 0) { 4253 /* block layout checks this! */ 4254 server->pnfs_blksize = fsinfo->blksize; 4255 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype); 4256 } 4257 4258 return error; 4259 } 4260 4261 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4262 struct nfs_pathconf *pathconf) 4263 { 4264 struct nfs4_pathconf_arg args = { 4265 .fh = fhandle, 4266 .bitmask = server->attr_bitmask, 4267 }; 4268 struct nfs4_pathconf_res res = { 4269 .pathconf = pathconf, 4270 }; 4271 struct rpc_message msg = { 4272 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 4273 .rpc_argp = &args, 4274 .rpc_resp = &res, 4275 }; 4276 4277 /* None of the pathconf attributes are mandatory to implement */ 4278 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 4279 memset(pathconf, 0, sizeof(*pathconf)); 4280 return 0; 4281 } 4282 4283 nfs_fattr_init(pathconf->fattr); 4284 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4285 } 4286 4287 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4288 struct nfs_pathconf *pathconf) 4289 { 4290 struct nfs4_exception exception = { }; 4291 int err; 4292 4293 do { 4294 err = nfs4_handle_exception(server, 4295 _nfs4_proc_pathconf(server, fhandle, pathconf), 4296 &exception); 4297 } while (exception.retry); 4298 return err; 4299 } 4300 4301 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 4302 const struct nfs_open_context *ctx, 4303 const struct nfs_lock_context *l_ctx, 4304 fmode_t fmode) 4305 { 4306 const struct nfs_lockowner *lockowner = NULL; 4307 4308 if (l_ctx != NULL) 4309 lockowner = &l_ctx->lockowner; 4310 return nfs4_select_rw_stateid(ctx->state, fmode, lockowner, stateid, NULL); 4311 } 4312 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 4313 4314 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 4315 const struct nfs_open_context *ctx, 4316 const struct nfs_lock_context *l_ctx, 4317 fmode_t fmode) 4318 { 4319 nfs4_stateid current_stateid; 4320 4321 /* If the current stateid represents a lost lock, then exit */ 4322 if (nfs4_set_rw_stateid(¤t_stateid, ctx, l_ctx, fmode) == -EIO) 4323 return true; 4324 return nfs4_stateid_match(stateid, ¤t_stateid); 4325 } 4326 4327 static bool nfs4_error_stateid_expired(int err) 4328 { 4329 switch (err) { 4330 case -NFS4ERR_DELEG_REVOKED: 4331 case -NFS4ERR_ADMIN_REVOKED: 4332 case -NFS4ERR_BAD_STATEID: 4333 case -NFS4ERR_STALE_STATEID: 4334 case -NFS4ERR_OLD_STATEID: 4335 case -NFS4ERR_OPENMODE: 4336 case -NFS4ERR_EXPIRED: 4337 return true; 4338 } 4339 return false; 4340 } 4341 4342 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr) 4343 { 4344 nfs_invalidate_atime(hdr->inode); 4345 } 4346 4347 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 4348 { 4349 struct nfs_server *server = NFS_SERVER(hdr->inode); 4350 4351 trace_nfs4_read(hdr, task->tk_status); 4352 if (nfs4_async_handle_error(task, server, 4353 hdr->args.context->state, 4354 NULL) == -EAGAIN) { 4355 rpc_restart_call_prepare(task); 4356 return -EAGAIN; 4357 } 4358 4359 __nfs4_read_done_cb(hdr); 4360 if (task->tk_status > 0) 4361 renew_lease(server, hdr->timestamp); 4362 return 0; 4363 } 4364 4365 static bool nfs4_read_stateid_changed(struct rpc_task *task, 4366 struct nfs_pgio_args *args) 4367 { 4368 4369 if (!nfs4_error_stateid_expired(task->tk_status) || 4370 nfs4_stateid_is_current(&args->stateid, 4371 args->context, 4372 args->lock_context, 4373 FMODE_READ)) 4374 return false; 4375 rpc_restart_call_prepare(task); 4376 return true; 4377 } 4378 4379 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4380 { 4381 4382 dprintk("--> %s\n", __func__); 4383 4384 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4385 return -EAGAIN; 4386 if (nfs4_read_stateid_changed(task, &hdr->args)) 4387 return -EAGAIN; 4388 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4389 nfs4_read_done_cb(task, hdr); 4390 } 4391 4392 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 4393 struct rpc_message *msg) 4394 { 4395 hdr->timestamp = jiffies; 4396 hdr->pgio_done_cb = nfs4_read_done_cb; 4397 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 4398 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0); 4399 } 4400 4401 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 4402 struct nfs_pgio_header *hdr) 4403 { 4404 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode), 4405 &hdr->args.seq_args, 4406 &hdr->res.seq_res, 4407 task)) 4408 return 0; 4409 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 4410 hdr->args.lock_context, 4411 hdr->rw_ops->rw_mode) == -EIO) 4412 return -EIO; 4413 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 4414 return -EIO; 4415 return 0; 4416 } 4417 4418 static int nfs4_write_done_cb(struct rpc_task *task, 4419 struct nfs_pgio_header *hdr) 4420 { 4421 struct inode *inode = hdr->inode; 4422 4423 trace_nfs4_write(hdr, task->tk_status); 4424 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4425 hdr->args.context->state, 4426 NULL) == -EAGAIN) { 4427 rpc_restart_call_prepare(task); 4428 return -EAGAIN; 4429 } 4430 if (task->tk_status >= 0) { 4431 renew_lease(NFS_SERVER(inode), hdr->timestamp); 4432 nfs_writeback_update_inode(hdr); 4433 } 4434 return 0; 4435 } 4436 4437 static bool nfs4_write_stateid_changed(struct rpc_task *task, 4438 struct nfs_pgio_args *args) 4439 { 4440 4441 if (!nfs4_error_stateid_expired(task->tk_status) || 4442 nfs4_stateid_is_current(&args->stateid, 4443 args->context, 4444 args->lock_context, 4445 FMODE_WRITE)) 4446 return false; 4447 rpc_restart_call_prepare(task); 4448 return true; 4449 } 4450 4451 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4452 { 4453 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4454 return -EAGAIN; 4455 if (nfs4_write_stateid_changed(task, &hdr->args)) 4456 return -EAGAIN; 4457 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4458 nfs4_write_done_cb(task, hdr); 4459 } 4460 4461 static 4462 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 4463 { 4464 /* Don't request attributes for pNFS or O_DIRECT writes */ 4465 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 4466 return false; 4467 /* Otherwise, request attributes if and only if we don't hold 4468 * a delegation 4469 */ 4470 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; 4471 } 4472 4473 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 4474 struct rpc_message *msg) 4475 { 4476 struct nfs_server *server = NFS_SERVER(hdr->inode); 4477 4478 if (!nfs4_write_need_cache_consistency_data(hdr)) { 4479 hdr->args.bitmask = NULL; 4480 hdr->res.fattr = NULL; 4481 } else 4482 hdr->args.bitmask = server->cache_consistency_bitmask; 4483 4484 if (!hdr->pgio_done_cb) 4485 hdr->pgio_done_cb = nfs4_write_done_cb; 4486 hdr->res.server = server; 4487 hdr->timestamp = jiffies; 4488 4489 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 4490 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1); 4491 } 4492 4493 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 4494 { 4495 nfs4_setup_sequence(NFS_SERVER(data->inode), 4496 &data->args.seq_args, 4497 &data->res.seq_res, 4498 task); 4499 } 4500 4501 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 4502 { 4503 struct inode *inode = data->inode; 4504 4505 trace_nfs4_commit(data, task->tk_status); 4506 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4507 NULL, NULL) == -EAGAIN) { 4508 rpc_restart_call_prepare(task); 4509 return -EAGAIN; 4510 } 4511 return 0; 4512 } 4513 4514 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 4515 { 4516 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4517 return -EAGAIN; 4518 return data->commit_done_cb(task, data); 4519 } 4520 4521 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) 4522 { 4523 struct nfs_server *server = NFS_SERVER(data->inode); 4524 4525 if (data->commit_done_cb == NULL) 4526 data->commit_done_cb = nfs4_commit_done_cb; 4527 data->res.server = server; 4528 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 4529 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 4530 } 4531 4532 struct nfs4_renewdata { 4533 struct nfs_client *client; 4534 unsigned long timestamp; 4535 }; 4536 4537 /* 4538 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 4539 * standalone procedure for queueing an asynchronous RENEW. 4540 */ 4541 static void nfs4_renew_release(void *calldata) 4542 { 4543 struct nfs4_renewdata *data = calldata; 4544 struct nfs_client *clp = data->client; 4545 4546 if (atomic_read(&clp->cl_count) > 1) 4547 nfs4_schedule_state_renewal(clp); 4548 nfs_put_client(clp); 4549 kfree(data); 4550 } 4551 4552 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 4553 { 4554 struct nfs4_renewdata *data = calldata; 4555 struct nfs_client *clp = data->client; 4556 unsigned long timestamp = data->timestamp; 4557 4558 trace_nfs4_renew_async(clp, task->tk_status); 4559 switch (task->tk_status) { 4560 case 0: 4561 break; 4562 case -NFS4ERR_LEASE_MOVED: 4563 nfs4_schedule_lease_moved_recovery(clp); 4564 break; 4565 default: 4566 /* Unless we're shutting down, schedule state recovery! */ 4567 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 4568 return; 4569 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 4570 nfs4_schedule_lease_recovery(clp); 4571 return; 4572 } 4573 nfs4_schedule_path_down_recovery(clp); 4574 } 4575 do_renew_lease(clp, timestamp); 4576 } 4577 4578 static const struct rpc_call_ops nfs4_renew_ops = { 4579 .rpc_call_done = nfs4_renew_done, 4580 .rpc_release = nfs4_renew_release, 4581 }; 4582 4583 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 4584 { 4585 struct rpc_message msg = { 4586 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4587 .rpc_argp = clp, 4588 .rpc_cred = cred, 4589 }; 4590 struct nfs4_renewdata *data; 4591 4592 if (renew_flags == 0) 4593 return 0; 4594 if (!atomic_inc_not_zero(&clp->cl_count)) 4595 return -EIO; 4596 data = kmalloc(sizeof(*data), GFP_NOFS); 4597 if (data == NULL) 4598 return -ENOMEM; 4599 data->client = clp; 4600 data->timestamp = jiffies; 4601 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 4602 &nfs4_renew_ops, data); 4603 } 4604 4605 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 4606 { 4607 struct rpc_message msg = { 4608 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4609 .rpc_argp = clp, 4610 .rpc_cred = cred, 4611 }; 4612 unsigned long now = jiffies; 4613 int status; 4614 4615 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4616 if (status < 0) 4617 return status; 4618 do_renew_lease(clp, now); 4619 return 0; 4620 } 4621 4622 static inline int nfs4_server_supports_acls(struct nfs_server *server) 4623 { 4624 return server->caps & NFS_CAP_ACLS; 4625 } 4626 4627 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 4628 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 4629 * the stack. 4630 */ 4631 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 4632 4633 static int buf_to_pages_noslab(const void *buf, size_t buflen, 4634 struct page **pages) 4635 { 4636 struct page *newpage, **spages; 4637 int rc = 0; 4638 size_t len; 4639 spages = pages; 4640 4641 do { 4642 len = min_t(size_t, PAGE_SIZE, buflen); 4643 newpage = alloc_page(GFP_KERNEL); 4644 4645 if (newpage == NULL) 4646 goto unwind; 4647 memcpy(page_address(newpage), buf, len); 4648 buf += len; 4649 buflen -= len; 4650 *pages++ = newpage; 4651 rc++; 4652 } while (buflen != 0); 4653 4654 return rc; 4655 4656 unwind: 4657 for(; rc > 0; rc--) 4658 __free_page(spages[rc-1]); 4659 return -ENOMEM; 4660 } 4661 4662 struct nfs4_cached_acl { 4663 int cached; 4664 size_t len; 4665 char data[0]; 4666 }; 4667 4668 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 4669 { 4670 struct nfs_inode *nfsi = NFS_I(inode); 4671 4672 spin_lock(&inode->i_lock); 4673 kfree(nfsi->nfs4_acl); 4674 nfsi->nfs4_acl = acl; 4675 spin_unlock(&inode->i_lock); 4676 } 4677 4678 static void nfs4_zap_acl_attr(struct inode *inode) 4679 { 4680 nfs4_set_cached_acl(inode, NULL); 4681 } 4682 4683 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 4684 { 4685 struct nfs_inode *nfsi = NFS_I(inode); 4686 struct nfs4_cached_acl *acl; 4687 int ret = -ENOENT; 4688 4689 spin_lock(&inode->i_lock); 4690 acl = nfsi->nfs4_acl; 4691 if (acl == NULL) 4692 goto out; 4693 if (buf == NULL) /* user is just asking for length */ 4694 goto out_len; 4695 if (acl->cached == 0) 4696 goto out; 4697 ret = -ERANGE; /* see getxattr(2) man page */ 4698 if (acl->len > buflen) 4699 goto out; 4700 memcpy(buf, acl->data, acl->len); 4701 out_len: 4702 ret = acl->len; 4703 out: 4704 spin_unlock(&inode->i_lock); 4705 return ret; 4706 } 4707 4708 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 4709 { 4710 struct nfs4_cached_acl *acl; 4711 size_t buflen = sizeof(*acl) + acl_len; 4712 4713 if (buflen <= PAGE_SIZE) { 4714 acl = kmalloc(buflen, GFP_KERNEL); 4715 if (acl == NULL) 4716 goto out; 4717 acl->cached = 1; 4718 _copy_from_pages(acl->data, pages, pgbase, acl_len); 4719 } else { 4720 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 4721 if (acl == NULL) 4722 goto out; 4723 acl->cached = 0; 4724 } 4725 acl->len = acl_len; 4726 out: 4727 nfs4_set_cached_acl(inode, acl); 4728 } 4729 4730 /* 4731 * The getxattr API returns the required buffer length when called with a 4732 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 4733 * the required buf. On a NULL buf, we send a page of data to the server 4734 * guessing that the ACL request can be serviced by a page. If so, we cache 4735 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 4736 * the cache. If not so, we throw away the page, and cache the required 4737 * length. The next getxattr call will then produce another round trip to 4738 * the server, this time with the input buf of the required size. 4739 */ 4740 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4741 { 4742 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; 4743 struct nfs_getaclargs args = { 4744 .fh = NFS_FH(inode), 4745 .acl_pages = pages, 4746 .acl_len = buflen, 4747 }; 4748 struct nfs_getaclres res = { 4749 .acl_len = buflen, 4750 }; 4751 struct rpc_message msg = { 4752 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 4753 .rpc_argp = &args, 4754 .rpc_resp = &res, 4755 }; 4756 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4757 int ret = -ENOMEM, i; 4758 4759 /* As long as we're doing a round trip to the server anyway, 4760 * let's be prepared for a page of acl data. */ 4761 if (npages == 0) 4762 npages = 1; 4763 if (npages > ARRAY_SIZE(pages)) 4764 return -ERANGE; 4765 4766 for (i = 0; i < npages; i++) { 4767 pages[i] = alloc_page(GFP_KERNEL); 4768 if (!pages[i]) 4769 goto out_free; 4770 } 4771 4772 /* for decoding across pages */ 4773 res.acl_scratch = alloc_page(GFP_KERNEL); 4774 if (!res.acl_scratch) 4775 goto out_free; 4776 4777 args.acl_len = npages * PAGE_SIZE; 4778 4779 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 4780 __func__, buf, buflen, npages, args.acl_len); 4781 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 4782 &msg, &args.seq_args, &res.seq_res, 0); 4783 if (ret) 4784 goto out_free; 4785 4786 /* Handle the case where the passed-in buffer is too short */ 4787 if (res.acl_flags & NFS4_ACL_TRUNC) { 4788 /* Did the user only issue a request for the acl length? */ 4789 if (buf == NULL) 4790 goto out_ok; 4791 ret = -ERANGE; 4792 goto out_free; 4793 } 4794 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); 4795 if (buf) { 4796 if (res.acl_len > buflen) { 4797 ret = -ERANGE; 4798 goto out_free; 4799 } 4800 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 4801 } 4802 out_ok: 4803 ret = res.acl_len; 4804 out_free: 4805 for (i = 0; i < npages; i++) 4806 if (pages[i]) 4807 __free_page(pages[i]); 4808 if (res.acl_scratch) 4809 __free_page(res.acl_scratch); 4810 return ret; 4811 } 4812 4813 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4814 { 4815 struct nfs4_exception exception = { }; 4816 ssize_t ret; 4817 do { 4818 ret = __nfs4_get_acl_uncached(inode, buf, buflen); 4819 trace_nfs4_get_acl(inode, ret); 4820 if (ret >= 0) 4821 break; 4822 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 4823 } while (exception.retry); 4824 return ret; 4825 } 4826 4827 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 4828 { 4829 struct nfs_server *server = NFS_SERVER(inode); 4830 int ret; 4831 4832 if (!nfs4_server_supports_acls(server)) 4833 return -EOPNOTSUPP; 4834 ret = nfs_revalidate_inode(server, inode); 4835 if (ret < 0) 4836 return ret; 4837 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 4838 nfs_zap_acl_cache(inode); 4839 ret = nfs4_read_cached_acl(inode, buf, buflen); 4840 if (ret != -ENOENT) 4841 /* -ENOENT is returned if there is no ACL or if there is an ACL 4842 * but no cached acl data, just the acl length */ 4843 return ret; 4844 return nfs4_get_acl_uncached(inode, buf, buflen); 4845 } 4846 4847 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4848 { 4849 struct nfs_server *server = NFS_SERVER(inode); 4850 struct page *pages[NFS4ACL_MAXPAGES]; 4851 struct nfs_setaclargs arg = { 4852 .fh = NFS_FH(inode), 4853 .acl_pages = pages, 4854 .acl_len = buflen, 4855 }; 4856 struct nfs_setaclres res; 4857 struct rpc_message msg = { 4858 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 4859 .rpc_argp = &arg, 4860 .rpc_resp = &res, 4861 }; 4862 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4863 int ret, i; 4864 4865 if (!nfs4_server_supports_acls(server)) 4866 return -EOPNOTSUPP; 4867 if (npages > ARRAY_SIZE(pages)) 4868 return -ERANGE; 4869 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages); 4870 if (i < 0) 4871 return i; 4872 nfs4_inode_return_delegation(inode); 4873 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4874 4875 /* 4876 * Free each page after tx, so the only ref left is 4877 * held by the network stack 4878 */ 4879 for (; i > 0; i--) 4880 put_page(pages[i-1]); 4881 4882 /* 4883 * Acl update can result in inode attribute update. 4884 * so mark the attribute cache invalid. 4885 */ 4886 spin_lock(&inode->i_lock); 4887 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 4888 spin_unlock(&inode->i_lock); 4889 nfs_access_zap_cache(inode); 4890 nfs_zap_acl_cache(inode); 4891 return ret; 4892 } 4893 4894 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4895 { 4896 struct nfs4_exception exception = { }; 4897 int err; 4898 do { 4899 err = __nfs4_proc_set_acl(inode, buf, buflen); 4900 trace_nfs4_set_acl(inode, err); 4901 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4902 &exception); 4903 } while (exception.retry); 4904 return err; 4905 } 4906 4907 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 4908 static int _nfs4_get_security_label(struct inode *inode, void *buf, 4909 size_t buflen) 4910 { 4911 struct nfs_server *server = NFS_SERVER(inode); 4912 struct nfs_fattr fattr; 4913 struct nfs4_label label = {0, 0, buflen, buf}; 4914 4915 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 4916 struct nfs4_getattr_arg arg = { 4917 .fh = NFS_FH(inode), 4918 .bitmask = bitmask, 4919 }; 4920 struct nfs4_getattr_res res = { 4921 .fattr = &fattr, 4922 .label = &label, 4923 .server = server, 4924 }; 4925 struct rpc_message msg = { 4926 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4927 .rpc_argp = &arg, 4928 .rpc_resp = &res, 4929 }; 4930 int ret; 4931 4932 nfs_fattr_init(&fattr); 4933 4934 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 4935 if (ret) 4936 return ret; 4937 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 4938 return -ENOENT; 4939 if (buflen < label.len) 4940 return -ERANGE; 4941 return 0; 4942 } 4943 4944 static int nfs4_get_security_label(struct inode *inode, void *buf, 4945 size_t buflen) 4946 { 4947 struct nfs4_exception exception = { }; 4948 int err; 4949 4950 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 4951 return -EOPNOTSUPP; 4952 4953 do { 4954 err = _nfs4_get_security_label(inode, buf, buflen); 4955 trace_nfs4_get_security_label(inode, err); 4956 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4957 &exception); 4958 } while (exception.retry); 4959 return err; 4960 } 4961 4962 static int _nfs4_do_set_security_label(struct inode *inode, 4963 struct nfs4_label *ilabel, 4964 struct nfs_fattr *fattr, 4965 struct nfs4_label *olabel) 4966 { 4967 4968 struct iattr sattr = {0}; 4969 struct nfs_server *server = NFS_SERVER(inode); 4970 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 4971 struct nfs_setattrargs arg = { 4972 .fh = NFS_FH(inode), 4973 .iap = &sattr, 4974 .server = server, 4975 .bitmask = bitmask, 4976 .label = ilabel, 4977 }; 4978 struct nfs_setattrres res = { 4979 .fattr = fattr, 4980 .label = olabel, 4981 .server = server, 4982 }; 4983 struct rpc_message msg = { 4984 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 4985 .rpc_argp = &arg, 4986 .rpc_resp = &res, 4987 }; 4988 int status; 4989 4990 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 4991 4992 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4993 if (status) 4994 dprintk("%s failed: %d\n", __func__, status); 4995 4996 return status; 4997 } 4998 4999 static int nfs4_do_set_security_label(struct inode *inode, 5000 struct nfs4_label *ilabel, 5001 struct nfs_fattr *fattr, 5002 struct nfs4_label *olabel) 5003 { 5004 struct nfs4_exception exception = { }; 5005 int err; 5006 5007 do { 5008 err = _nfs4_do_set_security_label(inode, ilabel, 5009 fattr, olabel); 5010 trace_nfs4_set_security_label(inode, err); 5011 err = nfs4_handle_exception(NFS_SERVER(inode), err, 5012 &exception); 5013 } while (exception.retry); 5014 return err; 5015 } 5016 5017 static int 5018 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen) 5019 { 5020 struct nfs4_label ilabel, *olabel = NULL; 5021 struct nfs_fattr fattr; 5022 struct rpc_cred *cred; 5023 int status; 5024 5025 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 5026 return -EOPNOTSUPP; 5027 5028 nfs_fattr_init(&fattr); 5029 5030 ilabel.pi = 0; 5031 ilabel.lfs = 0; 5032 ilabel.label = (char *)buf; 5033 ilabel.len = buflen; 5034 5035 cred = rpc_lookup_cred(); 5036 if (IS_ERR(cred)) 5037 return PTR_ERR(cred); 5038 5039 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 5040 if (IS_ERR(olabel)) { 5041 status = -PTR_ERR(olabel); 5042 goto out; 5043 } 5044 5045 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel); 5046 if (status == 0) 5047 nfs_setsecurity(inode, &fattr, olabel); 5048 5049 nfs4_label_free(olabel); 5050 out: 5051 put_rpccred(cred); 5052 return status; 5053 } 5054 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 5055 5056 5057 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 5058 nfs4_verifier *bootverf) 5059 { 5060 __be32 verf[2]; 5061 5062 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 5063 /* An impossible timestamp guarantees this value 5064 * will never match a generated boot time. */ 5065 verf[0] = 0; 5066 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1); 5067 } else { 5068 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 5069 verf[0] = cpu_to_be32(nn->boot_time.tv_sec); 5070 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec); 5071 } 5072 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 5073 } 5074 5075 static int 5076 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 5077 { 5078 size_t len; 5079 char *str; 5080 5081 if (clp->cl_owner_id != NULL) 5082 return 0; 5083 5084 rcu_read_lock(); 5085 len = 14 + strlen(clp->cl_ipaddr) + 1 + 5086 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 5087 1 + 5088 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) + 5089 1; 5090 rcu_read_unlock(); 5091 5092 if (len > NFS4_OPAQUE_LIMIT + 1) 5093 return -EINVAL; 5094 5095 /* 5096 * Since this string is allocated at mount time, and held until the 5097 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5098 * about a memory-reclaim deadlock. 5099 */ 5100 str = kmalloc(len, GFP_KERNEL); 5101 if (!str) 5102 return -ENOMEM; 5103 5104 rcu_read_lock(); 5105 scnprintf(str, len, "Linux NFSv4.0 %s/%s %s", 5106 clp->cl_ipaddr, 5107 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR), 5108 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)); 5109 rcu_read_unlock(); 5110 5111 clp->cl_owner_id = str; 5112 return 0; 5113 } 5114 5115 static int 5116 nfs4_init_uniquifier_client_string(struct nfs_client *clp) 5117 { 5118 size_t len; 5119 char *str; 5120 5121 len = 10 + 10 + 1 + 10 + 1 + 5122 strlen(nfs4_client_id_uniquifier) + 1 + 5123 strlen(clp->cl_rpcclient->cl_nodename) + 1; 5124 5125 if (len > NFS4_OPAQUE_LIMIT + 1) 5126 return -EINVAL; 5127 5128 /* 5129 * Since this string is allocated at mount time, and held until the 5130 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5131 * about a memory-reclaim deadlock. 5132 */ 5133 str = kmalloc(len, GFP_KERNEL); 5134 if (!str) 5135 return -ENOMEM; 5136 5137 scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 5138 clp->rpc_ops->version, clp->cl_minorversion, 5139 nfs4_client_id_uniquifier, 5140 clp->cl_rpcclient->cl_nodename); 5141 clp->cl_owner_id = str; 5142 return 0; 5143 } 5144 5145 static int 5146 nfs4_init_uniform_client_string(struct nfs_client *clp) 5147 { 5148 size_t len; 5149 char *str; 5150 5151 if (clp->cl_owner_id != NULL) 5152 return 0; 5153 5154 if (nfs4_client_id_uniquifier[0] != '\0') 5155 return nfs4_init_uniquifier_client_string(clp); 5156 5157 len = 10 + 10 + 1 + 10 + 1 + 5158 strlen(clp->cl_rpcclient->cl_nodename) + 1; 5159 5160 if (len > NFS4_OPAQUE_LIMIT + 1) 5161 return -EINVAL; 5162 5163 /* 5164 * Since this string is allocated at mount time, and held until the 5165 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5166 * about a memory-reclaim deadlock. 5167 */ 5168 str = kmalloc(len, GFP_KERNEL); 5169 if (!str) 5170 return -ENOMEM; 5171 5172 scnprintf(str, len, "Linux NFSv%u.%u %s", 5173 clp->rpc_ops->version, clp->cl_minorversion, 5174 clp->cl_rpcclient->cl_nodename); 5175 clp->cl_owner_id = str; 5176 return 0; 5177 } 5178 5179 /* 5180 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 5181 * services. Advertise one based on the address family of the 5182 * clientaddr. 5183 */ 5184 static unsigned int 5185 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 5186 { 5187 if (strchr(clp->cl_ipaddr, ':') != NULL) 5188 return scnprintf(buf, len, "tcp6"); 5189 else 5190 return scnprintf(buf, len, "tcp"); 5191 } 5192 5193 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 5194 { 5195 struct nfs4_setclientid *sc = calldata; 5196 5197 if (task->tk_status == 0) 5198 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 5199 } 5200 5201 static const struct rpc_call_ops nfs4_setclientid_ops = { 5202 .rpc_call_done = nfs4_setclientid_done, 5203 }; 5204 5205 /** 5206 * nfs4_proc_setclientid - Negotiate client ID 5207 * @clp: state data structure 5208 * @program: RPC program for NFSv4 callback service 5209 * @port: IP port number for NFS4 callback service 5210 * @cred: RPC credential to use for this call 5211 * @res: where to place the result 5212 * 5213 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5214 */ 5215 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 5216 unsigned short port, struct rpc_cred *cred, 5217 struct nfs4_setclientid_res *res) 5218 { 5219 nfs4_verifier sc_verifier; 5220 struct nfs4_setclientid setclientid = { 5221 .sc_verifier = &sc_verifier, 5222 .sc_prog = program, 5223 .sc_clnt = clp, 5224 }; 5225 struct rpc_message msg = { 5226 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 5227 .rpc_argp = &setclientid, 5228 .rpc_resp = res, 5229 .rpc_cred = cred, 5230 }; 5231 struct rpc_task *task; 5232 struct rpc_task_setup task_setup_data = { 5233 .rpc_client = clp->cl_rpcclient, 5234 .rpc_message = &msg, 5235 .callback_ops = &nfs4_setclientid_ops, 5236 .callback_data = &setclientid, 5237 .flags = RPC_TASK_TIMEOUT, 5238 }; 5239 int status; 5240 5241 /* nfs_client_id4 */ 5242 nfs4_init_boot_verifier(clp, &sc_verifier); 5243 5244 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 5245 status = nfs4_init_uniform_client_string(clp); 5246 else 5247 status = nfs4_init_nonuniform_client_string(clp); 5248 5249 if (status) 5250 goto out; 5251 5252 /* cb_client4 */ 5253 setclientid.sc_netid_len = 5254 nfs4_init_callback_netid(clp, 5255 setclientid.sc_netid, 5256 sizeof(setclientid.sc_netid)); 5257 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 5258 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 5259 clp->cl_ipaddr, port >> 8, port & 255); 5260 5261 dprintk("NFS call setclientid auth=%s, '%s'\n", 5262 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5263 clp->cl_owner_id); 5264 task = rpc_run_task(&task_setup_data); 5265 if (IS_ERR(task)) { 5266 status = PTR_ERR(task); 5267 goto out; 5268 } 5269 status = task->tk_status; 5270 if (setclientid.sc_cred) { 5271 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 5272 put_rpccred(setclientid.sc_cred); 5273 } 5274 rpc_put_task(task); 5275 out: 5276 trace_nfs4_setclientid(clp, status); 5277 dprintk("NFS reply setclientid: %d\n", status); 5278 return status; 5279 } 5280 5281 /** 5282 * nfs4_proc_setclientid_confirm - Confirm client ID 5283 * @clp: state data structure 5284 * @res: result of a previous SETCLIENTID 5285 * @cred: RPC credential to use for this call 5286 * 5287 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5288 */ 5289 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 5290 struct nfs4_setclientid_res *arg, 5291 struct rpc_cred *cred) 5292 { 5293 struct rpc_message msg = { 5294 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 5295 .rpc_argp = arg, 5296 .rpc_cred = cred, 5297 }; 5298 int status; 5299 5300 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 5301 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5302 clp->cl_clientid); 5303 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5304 trace_nfs4_setclientid_confirm(clp, status); 5305 dprintk("NFS reply setclientid_confirm: %d\n", status); 5306 return status; 5307 } 5308 5309 struct nfs4_delegreturndata { 5310 struct nfs4_delegreturnargs args; 5311 struct nfs4_delegreturnres res; 5312 struct nfs_fh fh; 5313 nfs4_stateid stateid; 5314 unsigned long timestamp; 5315 struct nfs_fattr fattr; 5316 int rpc_status; 5317 struct inode *inode; 5318 bool roc; 5319 u32 roc_barrier; 5320 }; 5321 5322 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 5323 { 5324 struct nfs4_delegreturndata *data = calldata; 5325 5326 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5327 return; 5328 5329 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 5330 switch (task->tk_status) { 5331 case 0: 5332 renew_lease(data->res.server, data->timestamp); 5333 case -NFS4ERR_ADMIN_REVOKED: 5334 case -NFS4ERR_DELEG_REVOKED: 5335 case -NFS4ERR_BAD_STATEID: 5336 case -NFS4ERR_OLD_STATEID: 5337 case -NFS4ERR_STALE_STATEID: 5338 case -NFS4ERR_EXPIRED: 5339 task->tk_status = 0; 5340 if (data->roc) 5341 pnfs_roc_set_barrier(data->inode, data->roc_barrier); 5342 break; 5343 default: 5344 if (nfs4_async_handle_error(task, data->res.server, 5345 NULL, NULL) == -EAGAIN) { 5346 rpc_restart_call_prepare(task); 5347 return; 5348 } 5349 } 5350 data->rpc_status = task->tk_status; 5351 } 5352 5353 static void nfs4_delegreturn_release(void *calldata) 5354 { 5355 struct nfs4_delegreturndata *data = calldata; 5356 struct inode *inode = data->inode; 5357 5358 if (inode) { 5359 if (data->roc) 5360 pnfs_roc_release(inode); 5361 nfs_iput_and_deactive(inode); 5362 } 5363 kfree(calldata); 5364 } 5365 5366 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 5367 { 5368 struct nfs4_delegreturndata *d_data; 5369 5370 d_data = (struct nfs4_delegreturndata *)data; 5371 5372 if (nfs4_wait_on_layoutreturn(d_data->inode, task)) 5373 return; 5374 5375 if (d_data->roc) 5376 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier); 5377 5378 nfs4_setup_sequence(d_data->res.server, 5379 &d_data->args.seq_args, 5380 &d_data->res.seq_res, 5381 task); 5382 } 5383 5384 static const struct rpc_call_ops nfs4_delegreturn_ops = { 5385 .rpc_call_prepare = nfs4_delegreturn_prepare, 5386 .rpc_call_done = nfs4_delegreturn_done, 5387 .rpc_release = nfs4_delegreturn_release, 5388 }; 5389 5390 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5391 { 5392 struct nfs4_delegreturndata *data; 5393 struct nfs_server *server = NFS_SERVER(inode); 5394 struct rpc_task *task; 5395 struct rpc_message msg = { 5396 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 5397 .rpc_cred = cred, 5398 }; 5399 struct rpc_task_setup task_setup_data = { 5400 .rpc_client = server->client, 5401 .rpc_message = &msg, 5402 .callback_ops = &nfs4_delegreturn_ops, 5403 .flags = RPC_TASK_ASYNC, 5404 }; 5405 int status = 0; 5406 5407 data = kzalloc(sizeof(*data), GFP_NOFS); 5408 if (data == NULL) 5409 return -ENOMEM; 5410 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 5411 5412 nfs4_state_protect(server->nfs_client, 5413 NFS_SP4_MACH_CRED_CLEANUP, 5414 &task_setup_data.rpc_client, &msg); 5415 5416 data->args.fhandle = &data->fh; 5417 data->args.stateid = &data->stateid; 5418 data->args.bitmask = server->cache_consistency_bitmask; 5419 nfs_copy_fh(&data->fh, NFS_FH(inode)); 5420 nfs4_stateid_copy(&data->stateid, stateid); 5421 data->res.fattr = &data->fattr; 5422 data->res.server = server; 5423 nfs_fattr_init(data->res.fattr); 5424 data->timestamp = jiffies; 5425 data->rpc_status = 0; 5426 data->inode = nfs_igrab_and_active(inode); 5427 if (data->inode) 5428 data->roc = nfs4_roc(inode); 5429 5430 task_setup_data.callback_data = data; 5431 msg.rpc_argp = &data->args; 5432 msg.rpc_resp = &data->res; 5433 task = rpc_run_task(&task_setup_data); 5434 if (IS_ERR(task)) 5435 return PTR_ERR(task); 5436 if (!issync) 5437 goto out; 5438 status = nfs4_wait_for_completion_rpc_task(task); 5439 if (status != 0) 5440 goto out; 5441 status = data->rpc_status; 5442 if (status == 0) 5443 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 5444 else 5445 nfs_refresh_inode(inode, &data->fattr); 5446 out: 5447 rpc_put_task(task); 5448 return status; 5449 } 5450 5451 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5452 { 5453 struct nfs_server *server = NFS_SERVER(inode); 5454 struct nfs4_exception exception = { }; 5455 int err; 5456 do { 5457 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); 5458 trace_nfs4_delegreturn(inode, stateid, err); 5459 switch (err) { 5460 case -NFS4ERR_STALE_STATEID: 5461 case -NFS4ERR_EXPIRED: 5462 case 0: 5463 return 0; 5464 } 5465 err = nfs4_handle_exception(server, err, &exception); 5466 } while (exception.retry); 5467 return err; 5468 } 5469 5470 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 5471 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 5472 5473 /* 5474 * sleep, with exponential backoff, and retry the LOCK operation. 5475 */ 5476 static unsigned long 5477 nfs4_set_lock_task_retry(unsigned long timeout) 5478 { 5479 freezable_schedule_timeout_killable_unsafe(timeout); 5480 timeout <<= 1; 5481 if (timeout > NFS4_LOCK_MAXTIMEOUT) 5482 return NFS4_LOCK_MAXTIMEOUT; 5483 return timeout; 5484 } 5485 5486 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5487 { 5488 struct inode *inode = state->inode; 5489 struct nfs_server *server = NFS_SERVER(inode); 5490 struct nfs_client *clp = server->nfs_client; 5491 struct nfs_lockt_args arg = { 5492 .fh = NFS_FH(inode), 5493 .fl = request, 5494 }; 5495 struct nfs_lockt_res res = { 5496 .denied = request, 5497 }; 5498 struct rpc_message msg = { 5499 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 5500 .rpc_argp = &arg, 5501 .rpc_resp = &res, 5502 .rpc_cred = state->owner->so_cred, 5503 }; 5504 struct nfs4_lock_state *lsp; 5505 int status; 5506 5507 arg.lock_owner.clientid = clp->cl_clientid; 5508 status = nfs4_set_lock_state(state, request); 5509 if (status != 0) 5510 goto out; 5511 lsp = request->fl_u.nfs4_fl.owner; 5512 arg.lock_owner.id = lsp->ls_seqid.owner_id; 5513 arg.lock_owner.s_dev = server->s_dev; 5514 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5515 switch (status) { 5516 case 0: 5517 request->fl_type = F_UNLCK; 5518 break; 5519 case -NFS4ERR_DENIED: 5520 status = 0; 5521 } 5522 request->fl_ops->fl_release_private(request); 5523 request->fl_ops = NULL; 5524 out: 5525 return status; 5526 } 5527 5528 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5529 { 5530 struct nfs4_exception exception = { }; 5531 int err; 5532 5533 do { 5534 err = _nfs4_proc_getlk(state, cmd, request); 5535 trace_nfs4_get_lock(request, state, cmd, err); 5536 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 5537 &exception); 5538 } while (exception.retry); 5539 return err; 5540 } 5541 5542 static int do_vfs_lock(struct inode *inode, struct file_lock *fl) 5543 { 5544 return locks_lock_inode_wait(inode, fl); 5545 } 5546 5547 struct nfs4_unlockdata { 5548 struct nfs_locku_args arg; 5549 struct nfs_locku_res res; 5550 struct nfs4_lock_state *lsp; 5551 struct nfs_open_context *ctx; 5552 struct file_lock fl; 5553 struct nfs_server *server; 5554 unsigned long timestamp; 5555 }; 5556 5557 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 5558 struct nfs_open_context *ctx, 5559 struct nfs4_lock_state *lsp, 5560 struct nfs_seqid *seqid) 5561 { 5562 struct nfs4_unlockdata *p; 5563 struct inode *inode = lsp->ls_state->inode; 5564 5565 p = kzalloc(sizeof(*p), GFP_NOFS); 5566 if (p == NULL) 5567 return NULL; 5568 p->arg.fh = NFS_FH(inode); 5569 p->arg.fl = &p->fl; 5570 p->arg.seqid = seqid; 5571 p->res.seqid = seqid; 5572 p->lsp = lsp; 5573 atomic_inc(&lsp->ls_count); 5574 /* Ensure we don't close file until we're done freeing locks! */ 5575 p->ctx = get_nfs_open_context(ctx); 5576 memcpy(&p->fl, fl, sizeof(p->fl)); 5577 p->server = NFS_SERVER(inode); 5578 return p; 5579 } 5580 5581 static void nfs4_locku_release_calldata(void *data) 5582 { 5583 struct nfs4_unlockdata *calldata = data; 5584 nfs_free_seqid(calldata->arg.seqid); 5585 nfs4_put_lock_state(calldata->lsp); 5586 put_nfs_open_context(calldata->ctx); 5587 kfree(calldata); 5588 } 5589 5590 static void nfs4_locku_done(struct rpc_task *task, void *data) 5591 { 5592 struct nfs4_unlockdata *calldata = data; 5593 5594 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 5595 return; 5596 switch (task->tk_status) { 5597 case 0: 5598 renew_lease(calldata->server, calldata->timestamp); 5599 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl); 5600 if (nfs4_update_lock_stateid(calldata->lsp, 5601 &calldata->res.stateid)) 5602 break; 5603 case -NFS4ERR_BAD_STATEID: 5604 case -NFS4ERR_OLD_STATEID: 5605 case -NFS4ERR_STALE_STATEID: 5606 case -NFS4ERR_EXPIRED: 5607 if (!nfs4_stateid_match(&calldata->arg.stateid, 5608 &calldata->lsp->ls_stateid)) 5609 rpc_restart_call_prepare(task); 5610 break; 5611 default: 5612 if (nfs4_async_handle_error(task, calldata->server, 5613 NULL, NULL) == -EAGAIN) 5614 rpc_restart_call_prepare(task); 5615 } 5616 nfs_release_seqid(calldata->arg.seqid); 5617 } 5618 5619 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 5620 { 5621 struct nfs4_unlockdata *calldata = data; 5622 5623 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 5624 goto out_wait; 5625 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid); 5626 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 5627 /* Note: exit _without_ running nfs4_locku_done */ 5628 goto out_no_action; 5629 } 5630 calldata->timestamp = jiffies; 5631 if (nfs4_setup_sequence(calldata->server, 5632 &calldata->arg.seq_args, 5633 &calldata->res.seq_res, 5634 task) != 0) 5635 nfs_release_seqid(calldata->arg.seqid); 5636 return; 5637 out_no_action: 5638 task->tk_action = NULL; 5639 out_wait: 5640 nfs4_sequence_done(task, &calldata->res.seq_res); 5641 } 5642 5643 static const struct rpc_call_ops nfs4_locku_ops = { 5644 .rpc_call_prepare = nfs4_locku_prepare, 5645 .rpc_call_done = nfs4_locku_done, 5646 .rpc_release = nfs4_locku_release_calldata, 5647 }; 5648 5649 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 5650 struct nfs_open_context *ctx, 5651 struct nfs4_lock_state *lsp, 5652 struct nfs_seqid *seqid) 5653 { 5654 struct nfs4_unlockdata *data; 5655 struct rpc_message msg = { 5656 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 5657 .rpc_cred = ctx->cred, 5658 }; 5659 struct rpc_task_setup task_setup_data = { 5660 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 5661 .rpc_message = &msg, 5662 .callback_ops = &nfs4_locku_ops, 5663 .workqueue = nfsiod_workqueue, 5664 .flags = RPC_TASK_ASYNC, 5665 }; 5666 5667 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 5668 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 5669 5670 /* Ensure this is an unlock - when canceling a lock, the 5671 * canceled lock is passed in, and it won't be an unlock. 5672 */ 5673 fl->fl_type = F_UNLCK; 5674 5675 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 5676 if (data == NULL) { 5677 nfs_free_seqid(seqid); 5678 return ERR_PTR(-ENOMEM); 5679 } 5680 5681 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5682 msg.rpc_argp = &data->arg; 5683 msg.rpc_resp = &data->res; 5684 task_setup_data.callback_data = data; 5685 return rpc_run_task(&task_setup_data); 5686 } 5687 5688 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 5689 { 5690 struct inode *inode = state->inode; 5691 struct nfs4_state_owner *sp = state->owner; 5692 struct nfs_inode *nfsi = NFS_I(inode); 5693 struct nfs_seqid *seqid; 5694 struct nfs4_lock_state *lsp; 5695 struct rpc_task *task; 5696 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5697 int status = 0; 5698 unsigned char fl_flags = request->fl_flags; 5699 5700 status = nfs4_set_lock_state(state, request); 5701 /* Unlock _before_ we do the RPC call */ 5702 request->fl_flags |= FL_EXISTS; 5703 /* Exclude nfs_delegation_claim_locks() */ 5704 mutex_lock(&sp->so_delegreturn_mutex); 5705 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 5706 down_read(&nfsi->rwsem); 5707 if (do_vfs_lock(inode, request) == -ENOENT) { 5708 up_read(&nfsi->rwsem); 5709 mutex_unlock(&sp->so_delegreturn_mutex); 5710 goto out; 5711 } 5712 up_read(&nfsi->rwsem); 5713 mutex_unlock(&sp->so_delegreturn_mutex); 5714 if (status != 0) 5715 goto out; 5716 /* Is this a delegated lock? */ 5717 lsp = request->fl_u.nfs4_fl.owner; 5718 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 5719 goto out; 5720 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 5721 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 5722 status = -ENOMEM; 5723 if (IS_ERR(seqid)) 5724 goto out; 5725 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 5726 status = PTR_ERR(task); 5727 if (IS_ERR(task)) 5728 goto out; 5729 status = nfs4_wait_for_completion_rpc_task(task); 5730 rpc_put_task(task); 5731 out: 5732 request->fl_flags = fl_flags; 5733 trace_nfs4_unlock(request, state, F_SETLK, status); 5734 return status; 5735 } 5736 5737 struct nfs4_lockdata { 5738 struct nfs_lock_args arg; 5739 struct nfs_lock_res res; 5740 struct nfs4_lock_state *lsp; 5741 struct nfs_open_context *ctx; 5742 struct file_lock fl; 5743 unsigned long timestamp; 5744 int rpc_status; 5745 int cancelled; 5746 struct nfs_server *server; 5747 }; 5748 5749 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 5750 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 5751 gfp_t gfp_mask) 5752 { 5753 struct nfs4_lockdata *p; 5754 struct inode *inode = lsp->ls_state->inode; 5755 struct nfs_server *server = NFS_SERVER(inode); 5756 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5757 5758 p = kzalloc(sizeof(*p), gfp_mask); 5759 if (p == NULL) 5760 return NULL; 5761 5762 p->arg.fh = NFS_FH(inode); 5763 p->arg.fl = &p->fl; 5764 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 5765 if (IS_ERR(p->arg.open_seqid)) 5766 goto out_free; 5767 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 5768 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 5769 if (IS_ERR(p->arg.lock_seqid)) 5770 goto out_free_seqid; 5771 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 5772 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 5773 p->arg.lock_owner.s_dev = server->s_dev; 5774 p->res.lock_seqid = p->arg.lock_seqid; 5775 p->lsp = lsp; 5776 p->server = server; 5777 atomic_inc(&lsp->ls_count); 5778 p->ctx = get_nfs_open_context(ctx); 5779 get_file(fl->fl_file); 5780 memcpy(&p->fl, fl, sizeof(p->fl)); 5781 return p; 5782 out_free_seqid: 5783 nfs_free_seqid(p->arg.open_seqid); 5784 out_free: 5785 kfree(p); 5786 return NULL; 5787 } 5788 5789 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 5790 { 5791 struct nfs4_lockdata *data = calldata; 5792 struct nfs4_state *state = data->lsp->ls_state; 5793 5794 dprintk("%s: begin!\n", __func__); 5795 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 5796 goto out_wait; 5797 /* Do we need to do an open_to_lock_owner? */ 5798 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 5799 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 5800 goto out_release_lock_seqid; 5801 } 5802 nfs4_stateid_copy(&data->arg.open_stateid, 5803 &state->open_stateid); 5804 data->arg.new_lock_owner = 1; 5805 data->res.open_seqid = data->arg.open_seqid; 5806 } else { 5807 data->arg.new_lock_owner = 0; 5808 nfs4_stateid_copy(&data->arg.lock_stateid, 5809 &data->lsp->ls_stateid); 5810 } 5811 if (!nfs4_valid_open_stateid(state)) { 5812 data->rpc_status = -EBADF; 5813 task->tk_action = NULL; 5814 goto out_release_open_seqid; 5815 } 5816 data->timestamp = jiffies; 5817 if (nfs4_setup_sequence(data->server, 5818 &data->arg.seq_args, 5819 &data->res.seq_res, 5820 task) == 0) 5821 return; 5822 out_release_open_seqid: 5823 nfs_release_seqid(data->arg.open_seqid); 5824 out_release_lock_seqid: 5825 nfs_release_seqid(data->arg.lock_seqid); 5826 out_wait: 5827 nfs4_sequence_done(task, &data->res.seq_res); 5828 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); 5829 } 5830 5831 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 5832 { 5833 struct nfs4_lockdata *data = calldata; 5834 struct nfs4_lock_state *lsp = data->lsp; 5835 5836 dprintk("%s: begin!\n", __func__); 5837 5838 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5839 return; 5840 5841 data->rpc_status = task->tk_status; 5842 switch (task->tk_status) { 5843 case 0: 5844 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 5845 data->timestamp); 5846 if (data->arg.new_lock) { 5847 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); 5848 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) { 5849 rpc_restart_call_prepare(task); 5850 break; 5851 } 5852 } 5853 if (data->arg.new_lock_owner != 0) { 5854 nfs_confirm_seqid(&lsp->ls_seqid, 0); 5855 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 5856 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 5857 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 5858 rpc_restart_call_prepare(task); 5859 break; 5860 case -NFS4ERR_BAD_STATEID: 5861 case -NFS4ERR_OLD_STATEID: 5862 case -NFS4ERR_STALE_STATEID: 5863 case -NFS4ERR_EXPIRED: 5864 if (data->arg.new_lock_owner != 0) { 5865 if (!nfs4_stateid_match(&data->arg.open_stateid, 5866 &lsp->ls_state->open_stateid)) 5867 rpc_restart_call_prepare(task); 5868 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 5869 &lsp->ls_stateid)) 5870 rpc_restart_call_prepare(task); 5871 } 5872 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 5873 } 5874 5875 static void nfs4_lock_release(void *calldata) 5876 { 5877 struct nfs4_lockdata *data = calldata; 5878 5879 dprintk("%s: begin!\n", __func__); 5880 nfs_free_seqid(data->arg.open_seqid); 5881 if (data->cancelled != 0) { 5882 struct rpc_task *task; 5883 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 5884 data->arg.lock_seqid); 5885 if (!IS_ERR(task)) 5886 rpc_put_task_async(task); 5887 dprintk("%s: cancelling lock!\n", __func__); 5888 } else 5889 nfs_free_seqid(data->arg.lock_seqid); 5890 nfs4_put_lock_state(data->lsp); 5891 put_nfs_open_context(data->ctx); 5892 fput(data->fl.fl_file); 5893 kfree(data); 5894 dprintk("%s: done!\n", __func__); 5895 } 5896 5897 static const struct rpc_call_ops nfs4_lock_ops = { 5898 .rpc_call_prepare = nfs4_lock_prepare, 5899 .rpc_call_done = nfs4_lock_done, 5900 .rpc_release = nfs4_lock_release, 5901 }; 5902 5903 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 5904 { 5905 switch (error) { 5906 case -NFS4ERR_ADMIN_REVOKED: 5907 case -NFS4ERR_BAD_STATEID: 5908 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5909 if (new_lock_owner != 0 || 5910 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 5911 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 5912 break; 5913 case -NFS4ERR_STALE_STATEID: 5914 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5915 case -NFS4ERR_EXPIRED: 5916 nfs4_schedule_lease_recovery(server->nfs_client); 5917 }; 5918 } 5919 5920 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 5921 { 5922 struct nfs4_lockdata *data; 5923 struct rpc_task *task; 5924 struct rpc_message msg = { 5925 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 5926 .rpc_cred = state->owner->so_cred, 5927 }; 5928 struct rpc_task_setup task_setup_data = { 5929 .rpc_client = NFS_CLIENT(state->inode), 5930 .rpc_message = &msg, 5931 .callback_ops = &nfs4_lock_ops, 5932 .workqueue = nfsiod_workqueue, 5933 .flags = RPC_TASK_ASYNC, 5934 }; 5935 int ret; 5936 5937 dprintk("%s: begin!\n", __func__); 5938 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 5939 fl->fl_u.nfs4_fl.owner, 5940 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); 5941 if (data == NULL) 5942 return -ENOMEM; 5943 if (IS_SETLKW(cmd)) 5944 data->arg.block = 1; 5945 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5946 msg.rpc_argp = &data->arg; 5947 msg.rpc_resp = &data->res; 5948 task_setup_data.callback_data = data; 5949 if (recovery_type > NFS_LOCK_NEW) { 5950 if (recovery_type == NFS_LOCK_RECLAIM) 5951 data->arg.reclaim = NFS_LOCK_RECLAIM; 5952 nfs4_set_sequence_privileged(&data->arg.seq_args); 5953 } else 5954 data->arg.new_lock = 1; 5955 task = rpc_run_task(&task_setup_data); 5956 if (IS_ERR(task)) 5957 return PTR_ERR(task); 5958 ret = nfs4_wait_for_completion_rpc_task(task); 5959 if (ret == 0) { 5960 ret = data->rpc_status; 5961 if (ret) 5962 nfs4_handle_setlk_error(data->server, data->lsp, 5963 data->arg.new_lock_owner, ret); 5964 } else 5965 data->cancelled = 1; 5966 rpc_put_task(task); 5967 dprintk("%s: done, ret = %d!\n", __func__, ret); 5968 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); 5969 return ret; 5970 } 5971 5972 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 5973 { 5974 struct nfs_server *server = NFS_SERVER(state->inode); 5975 struct nfs4_exception exception = { 5976 .inode = state->inode, 5977 }; 5978 int err; 5979 5980 do { 5981 /* Cache the lock if possible... */ 5982 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 5983 return 0; 5984 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 5985 if (err != -NFS4ERR_DELAY) 5986 break; 5987 nfs4_handle_exception(server, err, &exception); 5988 } while (exception.retry); 5989 return err; 5990 } 5991 5992 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 5993 { 5994 struct nfs_server *server = NFS_SERVER(state->inode); 5995 struct nfs4_exception exception = { 5996 .inode = state->inode, 5997 }; 5998 int err; 5999 6000 err = nfs4_set_lock_state(state, request); 6001 if (err != 0) 6002 return err; 6003 if (!recover_lost_locks) { 6004 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 6005 return 0; 6006 } 6007 do { 6008 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 6009 return 0; 6010 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 6011 switch (err) { 6012 default: 6013 goto out; 6014 case -NFS4ERR_GRACE: 6015 case -NFS4ERR_DELAY: 6016 nfs4_handle_exception(server, err, &exception); 6017 err = 0; 6018 } 6019 } while (exception.retry); 6020 out: 6021 return err; 6022 } 6023 6024 #if defined(CONFIG_NFS_V4_1) 6025 /** 6026 * nfs41_check_expired_locks - possibly free a lock stateid 6027 * 6028 * @state: NFSv4 state for an inode 6029 * 6030 * Returns NFS_OK if recovery for this stateid is now finished. 6031 * Otherwise a negative NFS4ERR value is returned. 6032 */ 6033 static int nfs41_check_expired_locks(struct nfs4_state *state) 6034 { 6035 int status, ret = -NFS4ERR_BAD_STATEID; 6036 struct nfs4_lock_state *lsp; 6037 struct nfs_server *server = NFS_SERVER(state->inode); 6038 6039 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 6040 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 6041 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 6042 6043 status = nfs41_test_stateid(server, 6044 &lsp->ls_stateid, 6045 cred); 6046 trace_nfs4_test_lock_stateid(state, lsp, status); 6047 if (status != NFS_OK) { 6048 /* Free the stateid unless the server 6049 * informs us the stateid is unrecognized. */ 6050 if (status != -NFS4ERR_BAD_STATEID) 6051 nfs41_free_stateid(server, 6052 &lsp->ls_stateid, 6053 cred); 6054 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 6055 ret = status; 6056 } 6057 } 6058 }; 6059 6060 return ret; 6061 } 6062 6063 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 6064 { 6065 int status = NFS_OK; 6066 6067 if (test_bit(LK_STATE_IN_USE, &state->flags)) 6068 status = nfs41_check_expired_locks(state); 6069 if (status != NFS_OK) 6070 status = nfs4_lock_expired(state, request); 6071 return status; 6072 } 6073 #endif 6074 6075 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6076 { 6077 struct nfs_inode *nfsi = NFS_I(state->inode); 6078 struct nfs4_state_owner *sp = state->owner; 6079 unsigned char fl_flags = request->fl_flags; 6080 int status = -ENOLCK; 6081 6082 if ((fl_flags & FL_POSIX) && 6083 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 6084 goto out; 6085 /* Is this a delegated open? */ 6086 status = nfs4_set_lock_state(state, request); 6087 if (status != 0) 6088 goto out; 6089 request->fl_flags |= FL_ACCESS; 6090 status = do_vfs_lock(state->inode, request); 6091 if (status < 0) 6092 goto out; 6093 mutex_lock(&sp->so_delegreturn_mutex); 6094 down_read(&nfsi->rwsem); 6095 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 6096 /* Yes: cache locks! */ 6097 /* ...but avoid races with delegation recall... */ 6098 request->fl_flags = fl_flags & ~FL_SLEEP; 6099 status = do_vfs_lock(state->inode, request); 6100 up_read(&nfsi->rwsem); 6101 mutex_unlock(&sp->so_delegreturn_mutex); 6102 goto out; 6103 } 6104 up_read(&nfsi->rwsem); 6105 mutex_unlock(&sp->so_delegreturn_mutex); 6106 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 6107 out: 6108 request->fl_flags = fl_flags; 6109 return status; 6110 } 6111 6112 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6113 { 6114 struct nfs4_exception exception = { 6115 .state = state, 6116 .inode = state->inode, 6117 }; 6118 int err; 6119 6120 do { 6121 err = _nfs4_proc_setlk(state, cmd, request); 6122 if (err == -NFS4ERR_DENIED) 6123 err = -EAGAIN; 6124 err = nfs4_handle_exception(NFS_SERVER(state->inode), 6125 err, &exception); 6126 } while (exception.retry); 6127 return err; 6128 } 6129 6130 static int 6131 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 6132 { 6133 struct nfs_open_context *ctx; 6134 struct nfs4_state *state; 6135 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 6136 int status; 6137 6138 /* verify open state */ 6139 ctx = nfs_file_open_context(filp); 6140 state = ctx->state; 6141 6142 if (request->fl_start < 0 || request->fl_end < 0) 6143 return -EINVAL; 6144 6145 if (IS_GETLK(cmd)) { 6146 if (state != NULL) 6147 return nfs4_proc_getlk(state, F_GETLK, request); 6148 return 0; 6149 } 6150 6151 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 6152 return -EINVAL; 6153 6154 if (request->fl_type == F_UNLCK) { 6155 if (state != NULL) 6156 return nfs4_proc_unlck(state, cmd, request); 6157 return 0; 6158 } 6159 6160 if (state == NULL) 6161 return -ENOLCK; 6162 /* 6163 * Don't rely on the VFS having checked the file open mode, 6164 * since it won't do this for flock() locks. 6165 */ 6166 switch (request->fl_type) { 6167 case F_RDLCK: 6168 if (!(filp->f_mode & FMODE_READ)) 6169 return -EBADF; 6170 break; 6171 case F_WRLCK: 6172 if (!(filp->f_mode & FMODE_WRITE)) 6173 return -EBADF; 6174 } 6175 6176 do { 6177 status = nfs4_proc_setlk(state, cmd, request); 6178 if ((status != -EAGAIN) || IS_SETLK(cmd)) 6179 break; 6180 timeout = nfs4_set_lock_task_retry(timeout); 6181 status = -ERESTARTSYS; 6182 if (signalled()) 6183 break; 6184 } while(status < 0); 6185 return status; 6186 } 6187 6188 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 6189 { 6190 struct nfs_server *server = NFS_SERVER(state->inode); 6191 int err; 6192 6193 err = nfs4_set_lock_state(state, fl); 6194 if (err != 0) 6195 return err; 6196 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 6197 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 6198 } 6199 6200 struct nfs_release_lockowner_data { 6201 struct nfs4_lock_state *lsp; 6202 struct nfs_server *server; 6203 struct nfs_release_lockowner_args args; 6204 struct nfs_release_lockowner_res res; 6205 unsigned long timestamp; 6206 }; 6207 6208 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 6209 { 6210 struct nfs_release_lockowner_data *data = calldata; 6211 struct nfs_server *server = data->server; 6212 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 6213 &data->args.seq_args, &data->res.seq_res, task); 6214 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6215 data->timestamp = jiffies; 6216 } 6217 6218 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 6219 { 6220 struct nfs_release_lockowner_data *data = calldata; 6221 struct nfs_server *server = data->server; 6222 6223 nfs40_sequence_done(task, &data->res.seq_res); 6224 6225 switch (task->tk_status) { 6226 case 0: 6227 renew_lease(server, data->timestamp); 6228 break; 6229 case -NFS4ERR_STALE_CLIENTID: 6230 case -NFS4ERR_EXPIRED: 6231 nfs4_schedule_lease_recovery(server->nfs_client); 6232 break; 6233 case -NFS4ERR_LEASE_MOVED: 6234 case -NFS4ERR_DELAY: 6235 if (nfs4_async_handle_error(task, server, 6236 NULL, NULL) == -EAGAIN) 6237 rpc_restart_call_prepare(task); 6238 } 6239 } 6240 6241 static void nfs4_release_lockowner_release(void *calldata) 6242 { 6243 struct nfs_release_lockowner_data *data = calldata; 6244 nfs4_free_lock_state(data->server, data->lsp); 6245 kfree(calldata); 6246 } 6247 6248 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 6249 .rpc_call_prepare = nfs4_release_lockowner_prepare, 6250 .rpc_call_done = nfs4_release_lockowner_done, 6251 .rpc_release = nfs4_release_lockowner_release, 6252 }; 6253 6254 static void 6255 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 6256 { 6257 struct nfs_release_lockowner_data *data; 6258 struct rpc_message msg = { 6259 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 6260 }; 6261 6262 if (server->nfs_client->cl_mvops->minor_version != 0) 6263 return; 6264 6265 data = kmalloc(sizeof(*data), GFP_NOFS); 6266 if (!data) 6267 return; 6268 data->lsp = lsp; 6269 data->server = server; 6270 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6271 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 6272 data->args.lock_owner.s_dev = server->s_dev; 6273 6274 msg.rpc_argp = &data->args; 6275 msg.rpc_resp = &data->res; 6276 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 6277 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 6278 } 6279 6280 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 6281 6282 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 6283 struct dentry *unused, struct inode *inode, 6284 const char *key, const void *buf, 6285 size_t buflen, int flags) 6286 { 6287 return nfs4_proc_set_acl(inode, buf, buflen); 6288 } 6289 6290 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 6291 struct dentry *unused, struct inode *inode, 6292 const char *key, void *buf, size_t buflen) 6293 { 6294 return nfs4_proc_get_acl(inode, buf, buflen); 6295 } 6296 6297 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) 6298 { 6299 return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry))); 6300 } 6301 6302 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6303 6304 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 6305 struct dentry *unused, struct inode *inode, 6306 const char *key, const void *buf, 6307 size_t buflen, int flags) 6308 { 6309 if (security_ismaclabel(key)) 6310 return nfs4_set_security_label(inode, buf, buflen); 6311 6312 return -EOPNOTSUPP; 6313 } 6314 6315 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, 6316 struct dentry *unused, struct inode *inode, 6317 const char *key, void *buf, size_t buflen) 6318 { 6319 if (security_ismaclabel(key)) 6320 return nfs4_get_security_label(inode, buf, buflen); 6321 return -EOPNOTSUPP; 6322 } 6323 6324 static ssize_t 6325 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 6326 { 6327 int len = 0; 6328 6329 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) { 6330 len = security_inode_listsecurity(inode, list, list_len); 6331 if (list_len && len > list_len) 6332 return -ERANGE; 6333 } 6334 return len; 6335 } 6336 6337 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 6338 .prefix = XATTR_SECURITY_PREFIX, 6339 .get = nfs4_xattr_get_nfs4_label, 6340 .set = nfs4_xattr_set_nfs4_label, 6341 }; 6342 6343 #else 6344 6345 static ssize_t 6346 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 6347 { 6348 return 0; 6349 } 6350 6351 #endif 6352 6353 /* 6354 * nfs_fhget will use either the mounted_on_fileid or the fileid 6355 */ 6356 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 6357 { 6358 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 6359 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 6360 (fattr->valid & NFS_ATTR_FATTR_FSID) && 6361 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 6362 return; 6363 6364 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 6365 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 6366 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 6367 fattr->nlink = 2; 6368 } 6369 6370 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6371 const struct qstr *name, 6372 struct nfs4_fs_locations *fs_locations, 6373 struct page *page) 6374 { 6375 struct nfs_server *server = NFS_SERVER(dir); 6376 u32 bitmask[3] = { 6377 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6378 }; 6379 struct nfs4_fs_locations_arg args = { 6380 .dir_fh = NFS_FH(dir), 6381 .name = name, 6382 .page = page, 6383 .bitmask = bitmask, 6384 }; 6385 struct nfs4_fs_locations_res res = { 6386 .fs_locations = fs_locations, 6387 }; 6388 struct rpc_message msg = { 6389 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6390 .rpc_argp = &args, 6391 .rpc_resp = &res, 6392 }; 6393 int status; 6394 6395 dprintk("%s: start\n", __func__); 6396 6397 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 6398 * is not supported */ 6399 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 6400 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; 6401 else 6402 bitmask[0] |= FATTR4_WORD0_FILEID; 6403 6404 nfs_fattr_init(&fs_locations->fattr); 6405 fs_locations->server = server; 6406 fs_locations->nlocations = 0; 6407 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 6408 dprintk("%s: returned status = %d\n", __func__, status); 6409 return status; 6410 } 6411 6412 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6413 const struct qstr *name, 6414 struct nfs4_fs_locations *fs_locations, 6415 struct page *page) 6416 { 6417 struct nfs4_exception exception = { }; 6418 int err; 6419 do { 6420 err = _nfs4_proc_fs_locations(client, dir, name, 6421 fs_locations, page); 6422 trace_nfs4_get_fs_locations(dir, name, err); 6423 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6424 &exception); 6425 } while (exception.retry); 6426 return err; 6427 } 6428 6429 /* 6430 * This operation also signals the server that this client is 6431 * performing migration recovery. The server can stop returning 6432 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 6433 * appended to this compound to identify the client ID which is 6434 * performing recovery. 6435 */ 6436 static int _nfs40_proc_get_locations(struct inode *inode, 6437 struct nfs4_fs_locations *locations, 6438 struct page *page, struct rpc_cred *cred) 6439 { 6440 struct nfs_server *server = NFS_SERVER(inode); 6441 struct rpc_clnt *clnt = server->client; 6442 u32 bitmask[2] = { 6443 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6444 }; 6445 struct nfs4_fs_locations_arg args = { 6446 .clientid = server->nfs_client->cl_clientid, 6447 .fh = NFS_FH(inode), 6448 .page = page, 6449 .bitmask = bitmask, 6450 .migration = 1, /* skip LOOKUP */ 6451 .renew = 1, /* append RENEW */ 6452 }; 6453 struct nfs4_fs_locations_res res = { 6454 .fs_locations = locations, 6455 .migration = 1, 6456 .renew = 1, 6457 }; 6458 struct rpc_message msg = { 6459 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6460 .rpc_argp = &args, 6461 .rpc_resp = &res, 6462 .rpc_cred = cred, 6463 }; 6464 unsigned long now = jiffies; 6465 int status; 6466 6467 nfs_fattr_init(&locations->fattr); 6468 locations->server = server; 6469 locations->nlocations = 0; 6470 6471 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6472 nfs4_set_sequence_privileged(&args.seq_args); 6473 status = nfs4_call_sync_sequence(clnt, server, &msg, 6474 &args.seq_args, &res.seq_res); 6475 if (status) 6476 return status; 6477 6478 renew_lease(server, now); 6479 return 0; 6480 } 6481 6482 #ifdef CONFIG_NFS_V4_1 6483 6484 /* 6485 * This operation also signals the server that this client is 6486 * performing migration recovery. The server can stop asserting 6487 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 6488 * performing this operation is identified in the SEQUENCE 6489 * operation in this compound. 6490 * 6491 * When the client supports GETATTR(fs_locations_info), it can 6492 * be plumbed in here. 6493 */ 6494 static int _nfs41_proc_get_locations(struct inode *inode, 6495 struct nfs4_fs_locations *locations, 6496 struct page *page, struct rpc_cred *cred) 6497 { 6498 struct nfs_server *server = NFS_SERVER(inode); 6499 struct rpc_clnt *clnt = server->client; 6500 u32 bitmask[2] = { 6501 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6502 }; 6503 struct nfs4_fs_locations_arg args = { 6504 .fh = NFS_FH(inode), 6505 .page = page, 6506 .bitmask = bitmask, 6507 .migration = 1, /* skip LOOKUP */ 6508 }; 6509 struct nfs4_fs_locations_res res = { 6510 .fs_locations = locations, 6511 .migration = 1, 6512 }; 6513 struct rpc_message msg = { 6514 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6515 .rpc_argp = &args, 6516 .rpc_resp = &res, 6517 .rpc_cred = cred, 6518 }; 6519 int status; 6520 6521 nfs_fattr_init(&locations->fattr); 6522 locations->server = server; 6523 locations->nlocations = 0; 6524 6525 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6526 nfs4_set_sequence_privileged(&args.seq_args); 6527 status = nfs4_call_sync_sequence(clnt, server, &msg, 6528 &args.seq_args, &res.seq_res); 6529 if (status == NFS4_OK && 6530 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6531 status = -NFS4ERR_LEASE_MOVED; 6532 return status; 6533 } 6534 6535 #endif /* CONFIG_NFS_V4_1 */ 6536 6537 /** 6538 * nfs4_proc_get_locations - discover locations for a migrated FSID 6539 * @inode: inode on FSID that is migrating 6540 * @locations: result of query 6541 * @page: buffer 6542 * @cred: credential to use for this operation 6543 * 6544 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 6545 * operation failed, or a negative errno if a local error occurred. 6546 * 6547 * On success, "locations" is filled in, but if the server has 6548 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 6549 * asserted. 6550 * 6551 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 6552 * from this client that require migration recovery. 6553 */ 6554 int nfs4_proc_get_locations(struct inode *inode, 6555 struct nfs4_fs_locations *locations, 6556 struct page *page, struct rpc_cred *cred) 6557 { 6558 struct nfs_server *server = NFS_SERVER(inode); 6559 struct nfs_client *clp = server->nfs_client; 6560 const struct nfs4_mig_recovery_ops *ops = 6561 clp->cl_mvops->mig_recovery_ops; 6562 struct nfs4_exception exception = { }; 6563 int status; 6564 6565 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6566 (unsigned long long)server->fsid.major, 6567 (unsigned long long)server->fsid.minor, 6568 clp->cl_hostname); 6569 nfs_display_fhandle(NFS_FH(inode), __func__); 6570 6571 do { 6572 status = ops->get_locations(inode, locations, page, cred); 6573 if (status != -NFS4ERR_DELAY) 6574 break; 6575 nfs4_handle_exception(server, status, &exception); 6576 } while (exception.retry); 6577 return status; 6578 } 6579 6580 /* 6581 * This operation also signals the server that this client is 6582 * performing "lease moved" recovery. The server can stop 6583 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 6584 * is appended to this compound to identify the client ID which is 6585 * performing recovery. 6586 */ 6587 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6588 { 6589 struct nfs_server *server = NFS_SERVER(inode); 6590 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 6591 struct rpc_clnt *clnt = server->client; 6592 struct nfs4_fsid_present_arg args = { 6593 .fh = NFS_FH(inode), 6594 .clientid = clp->cl_clientid, 6595 .renew = 1, /* append RENEW */ 6596 }; 6597 struct nfs4_fsid_present_res res = { 6598 .renew = 1, 6599 }; 6600 struct rpc_message msg = { 6601 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6602 .rpc_argp = &args, 6603 .rpc_resp = &res, 6604 .rpc_cred = cred, 6605 }; 6606 unsigned long now = jiffies; 6607 int status; 6608 6609 res.fh = nfs_alloc_fhandle(); 6610 if (res.fh == NULL) 6611 return -ENOMEM; 6612 6613 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6614 nfs4_set_sequence_privileged(&args.seq_args); 6615 status = nfs4_call_sync_sequence(clnt, server, &msg, 6616 &args.seq_args, &res.seq_res); 6617 nfs_free_fhandle(res.fh); 6618 if (status) 6619 return status; 6620 6621 do_renew_lease(clp, now); 6622 return 0; 6623 } 6624 6625 #ifdef CONFIG_NFS_V4_1 6626 6627 /* 6628 * This operation also signals the server that this client is 6629 * performing "lease moved" recovery. The server can stop asserting 6630 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 6631 * this operation is identified in the SEQUENCE operation in this 6632 * compound. 6633 */ 6634 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6635 { 6636 struct nfs_server *server = NFS_SERVER(inode); 6637 struct rpc_clnt *clnt = server->client; 6638 struct nfs4_fsid_present_arg args = { 6639 .fh = NFS_FH(inode), 6640 }; 6641 struct nfs4_fsid_present_res res = { 6642 }; 6643 struct rpc_message msg = { 6644 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6645 .rpc_argp = &args, 6646 .rpc_resp = &res, 6647 .rpc_cred = cred, 6648 }; 6649 int status; 6650 6651 res.fh = nfs_alloc_fhandle(); 6652 if (res.fh == NULL) 6653 return -ENOMEM; 6654 6655 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6656 nfs4_set_sequence_privileged(&args.seq_args); 6657 status = nfs4_call_sync_sequence(clnt, server, &msg, 6658 &args.seq_args, &res.seq_res); 6659 nfs_free_fhandle(res.fh); 6660 if (status == NFS4_OK && 6661 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6662 status = -NFS4ERR_LEASE_MOVED; 6663 return status; 6664 } 6665 6666 #endif /* CONFIG_NFS_V4_1 */ 6667 6668 /** 6669 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 6670 * @inode: inode on FSID to check 6671 * @cred: credential to use for this operation 6672 * 6673 * Server indicates whether the FSID is present, moved, or not 6674 * recognized. This operation is necessary to clear a LEASE_MOVED 6675 * condition for this client ID. 6676 * 6677 * Returns NFS4_OK if the FSID is present on this server, 6678 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 6679 * NFS4ERR code if some error occurred on the server, or a 6680 * negative errno if a local failure occurred. 6681 */ 6682 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6683 { 6684 struct nfs_server *server = NFS_SERVER(inode); 6685 struct nfs_client *clp = server->nfs_client; 6686 const struct nfs4_mig_recovery_ops *ops = 6687 clp->cl_mvops->mig_recovery_ops; 6688 struct nfs4_exception exception = { }; 6689 int status; 6690 6691 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6692 (unsigned long long)server->fsid.major, 6693 (unsigned long long)server->fsid.minor, 6694 clp->cl_hostname); 6695 nfs_display_fhandle(NFS_FH(inode), __func__); 6696 6697 do { 6698 status = ops->fsid_present(inode, cred); 6699 if (status != -NFS4ERR_DELAY) 6700 break; 6701 nfs4_handle_exception(server, status, &exception); 6702 } while (exception.retry); 6703 return status; 6704 } 6705 6706 /** 6707 * If 'use_integrity' is true and the state managment nfs_client 6708 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 6709 * and the machine credential as per RFC3530bis and RFC5661 Security 6710 * Considerations sections. Otherwise, just use the user cred with the 6711 * filesystem's rpc_client. 6712 */ 6713 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 6714 { 6715 int status; 6716 struct nfs4_secinfo_arg args = { 6717 .dir_fh = NFS_FH(dir), 6718 .name = name, 6719 }; 6720 struct nfs4_secinfo_res res = { 6721 .flavors = flavors, 6722 }; 6723 struct rpc_message msg = { 6724 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 6725 .rpc_argp = &args, 6726 .rpc_resp = &res, 6727 }; 6728 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 6729 struct rpc_cred *cred = NULL; 6730 6731 if (use_integrity) { 6732 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient; 6733 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client); 6734 msg.rpc_cred = cred; 6735 } 6736 6737 dprintk("NFS call secinfo %s\n", name->name); 6738 6739 nfs4_state_protect(NFS_SERVER(dir)->nfs_client, 6740 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 6741 6742 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args, 6743 &res.seq_res, 0); 6744 dprintk("NFS reply secinfo: %d\n", status); 6745 6746 if (cred) 6747 put_rpccred(cred); 6748 6749 return status; 6750 } 6751 6752 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 6753 struct nfs4_secinfo_flavors *flavors) 6754 { 6755 struct nfs4_exception exception = { }; 6756 int err; 6757 do { 6758 err = -NFS4ERR_WRONGSEC; 6759 6760 /* try to use integrity protection with machine cred */ 6761 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 6762 err = _nfs4_proc_secinfo(dir, name, flavors, true); 6763 6764 /* 6765 * if unable to use integrity protection, or SECINFO with 6766 * integrity protection returns NFS4ERR_WRONGSEC (which is 6767 * disallowed by spec, but exists in deployed servers) use 6768 * the current filesystem's rpc_client and the user cred. 6769 */ 6770 if (err == -NFS4ERR_WRONGSEC) 6771 err = _nfs4_proc_secinfo(dir, name, flavors, false); 6772 6773 trace_nfs4_secinfo(dir, name, err); 6774 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6775 &exception); 6776 } while (exception.retry); 6777 return err; 6778 } 6779 6780 #ifdef CONFIG_NFS_V4_1 6781 /* 6782 * Check the exchange flags returned by the server for invalid flags, having 6783 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 6784 * DS flags set. 6785 */ 6786 static int nfs4_check_cl_exchange_flags(u32 flags) 6787 { 6788 if (flags & ~EXCHGID4_FLAG_MASK_R) 6789 goto out_inval; 6790 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 6791 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 6792 goto out_inval; 6793 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 6794 goto out_inval; 6795 return NFS_OK; 6796 out_inval: 6797 return -NFS4ERR_INVAL; 6798 } 6799 6800 static bool 6801 nfs41_same_server_scope(struct nfs41_server_scope *a, 6802 struct nfs41_server_scope *b) 6803 { 6804 if (a->server_scope_sz == b->server_scope_sz && 6805 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) 6806 return true; 6807 6808 return false; 6809 } 6810 6811 static void 6812 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) 6813 { 6814 } 6815 6816 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { 6817 .rpc_call_done = &nfs4_bind_one_conn_to_session_done, 6818 }; 6819 6820 /* 6821 * nfs4_proc_bind_one_conn_to_session() 6822 * 6823 * The 4.1 client currently uses the same TCP connection for the 6824 * fore and backchannel. 6825 */ 6826 static 6827 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, 6828 struct rpc_xprt *xprt, 6829 struct nfs_client *clp, 6830 struct rpc_cred *cred) 6831 { 6832 int status; 6833 struct nfs41_bind_conn_to_session_args args = { 6834 .client = clp, 6835 .dir = NFS4_CDFC4_FORE_OR_BOTH, 6836 }; 6837 struct nfs41_bind_conn_to_session_res res; 6838 struct rpc_message msg = { 6839 .rpc_proc = 6840 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 6841 .rpc_argp = &args, 6842 .rpc_resp = &res, 6843 .rpc_cred = cred, 6844 }; 6845 struct rpc_task_setup task_setup_data = { 6846 .rpc_client = clnt, 6847 .rpc_xprt = xprt, 6848 .callback_ops = &nfs4_bind_one_conn_to_session_ops, 6849 .rpc_message = &msg, 6850 .flags = RPC_TASK_TIMEOUT, 6851 }; 6852 struct rpc_task *task; 6853 6854 dprintk("--> %s\n", __func__); 6855 6856 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 6857 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 6858 args.dir = NFS4_CDFC4_FORE; 6859 6860 /* Do not set the backchannel flag unless this is clnt->cl_xprt */ 6861 if (xprt != rcu_access_pointer(clnt->cl_xprt)) 6862 args.dir = NFS4_CDFC4_FORE; 6863 6864 task = rpc_run_task(&task_setup_data); 6865 if (!IS_ERR(task)) { 6866 status = task->tk_status; 6867 rpc_put_task(task); 6868 } else 6869 status = PTR_ERR(task); 6870 trace_nfs4_bind_conn_to_session(clp, status); 6871 if (status == 0) { 6872 if (memcmp(res.sessionid.data, 6873 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 6874 dprintk("NFS: %s: Session ID mismatch\n", __func__); 6875 status = -EIO; 6876 goto out; 6877 } 6878 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 6879 dprintk("NFS: %s: Unexpected direction from server\n", 6880 __func__); 6881 status = -EIO; 6882 goto out; 6883 } 6884 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 6885 dprintk("NFS: %s: Server returned RDMA mode = true\n", 6886 __func__); 6887 status = -EIO; 6888 goto out; 6889 } 6890 } 6891 out: 6892 dprintk("<-- %s status= %d\n", __func__, status); 6893 return status; 6894 } 6895 6896 struct rpc_bind_conn_calldata { 6897 struct nfs_client *clp; 6898 struct rpc_cred *cred; 6899 }; 6900 6901 static int 6902 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt, 6903 struct rpc_xprt *xprt, 6904 void *calldata) 6905 { 6906 struct rpc_bind_conn_calldata *p = calldata; 6907 6908 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred); 6909 } 6910 6911 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) 6912 { 6913 struct rpc_bind_conn_calldata data = { 6914 .clp = clp, 6915 .cred = cred, 6916 }; 6917 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient, 6918 nfs4_proc_bind_conn_to_session_callback, &data); 6919 } 6920 6921 /* 6922 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 6923 * and operations we'd like to see to enable certain features in the allow map 6924 */ 6925 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 6926 .how = SP4_MACH_CRED, 6927 .enforce.u.words = { 6928 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 6929 1 << (OP_EXCHANGE_ID - 32) | 6930 1 << (OP_CREATE_SESSION - 32) | 6931 1 << (OP_DESTROY_SESSION - 32) | 6932 1 << (OP_DESTROY_CLIENTID - 32) 6933 }, 6934 .allow.u.words = { 6935 [0] = 1 << (OP_CLOSE) | 6936 1 << (OP_OPEN_DOWNGRADE) | 6937 1 << (OP_LOCKU) | 6938 1 << (OP_DELEGRETURN) | 6939 1 << (OP_COMMIT), 6940 [1] = 1 << (OP_SECINFO - 32) | 6941 1 << (OP_SECINFO_NO_NAME - 32) | 6942 1 << (OP_LAYOUTRETURN - 32) | 6943 1 << (OP_TEST_STATEID - 32) | 6944 1 << (OP_FREE_STATEID - 32) | 6945 1 << (OP_WRITE - 32) 6946 } 6947 }; 6948 6949 /* 6950 * Select the state protection mode for client `clp' given the server results 6951 * from exchange_id in `sp'. 6952 * 6953 * Returns 0 on success, negative errno otherwise. 6954 */ 6955 static int nfs4_sp4_select_mode(struct nfs_client *clp, 6956 struct nfs41_state_protection *sp) 6957 { 6958 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 6959 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 6960 1 << (OP_EXCHANGE_ID - 32) | 6961 1 << (OP_CREATE_SESSION - 32) | 6962 1 << (OP_DESTROY_SESSION - 32) | 6963 1 << (OP_DESTROY_CLIENTID - 32) 6964 }; 6965 unsigned int i; 6966 6967 if (sp->how == SP4_MACH_CRED) { 6968 /* Print state protect result */ 6969 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 6970 for (i = 0; i <= LAST_NFS4_OP; i++) { 6971 if (test_bit(i, sp->enforce.u.longs)) 6972 dfprintk(MOUNT, " enforce op %d\n", i); 6973 if (test_bit(i, sp->allow.u.longs)) 6974 dfprintk(MOUNT, " allow op %d\n", i); 6975 } 6976 6977 /* make sure nothing is on enforce list that isn't supported */ 6978 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 6979 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 6980 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 6981 return -EINVAL; 6982 } 6983 } 6984 6985 /* 6986 * Minimal mode - state operations are allowed to use machine 6987 * credential. Note this already happens by default, so the 6988 * client doesn't have to do anything more than the negotiation. 6989 * 6990 * NOTE: we don't care if EXCHANGE_ID is in the list - 6991 * we're already using the machine cred for exchange_id 6992 * and will never use a different cred. 6993 */ 6994 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 6995 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 6996 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 6997 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 6998 dfprintk(MOUNT, "sp4_mach_cred:\n"); 6999 dfprintk(MOUNT, " minimal mode enabled\n"); 7000 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags); 7001 } else { 7002 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 7003 return -EINVAL; 7004 } 7005 7006 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 7007 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) && 7008 test_bit(OP_DELEGRETURN, sp->allow.u.longs) && 7009 test_bit(OP_LOCKU, sp->allow.u.longs)) { 7010 dfprintk(MOUNT, " cleanup mode enabled\n"); 7011 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags); 7012 } 7013 7014 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) { 7015 dfprintk(MOUNT, " pnfs cleanup mode enabled\n"); 7016 set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, 7017 &clp->cl_sp4_flags); 7018 } 7019 7020 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 7021 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 7022 dfprintk(MOUNT, " secinfo mode enabled\n"); 7023 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags); 7024 } 7025 7026 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 7027 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 7028 dfprintk(MOUNT, " stateid mode enabled\n"); 7029 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags); 7030 } 7031 7032 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 7033 dfprintk(MOUNT, " write mode enabled\n"); 7034 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags); 7035 } 7036 7037 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 7038 dfprintk(MOUNT, " commit mode enabled\n"); 7039 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags); 7040 } 7041 } 7042 7043 return 0; 7044 } 7045 7046 /* 7047 * _nfs4_proc_exchange_id() 7048 * 7049 * Wrapper for EXCHANGE_ID operation. 7050 */ 7051 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, 7052 u32 sp4_how) 7053 { 7054 nfs4_verifier verifier; 7055 struct nfs41_exchange_id_args args = { 7056 .verifier = &verifier, 7057 .client = clp, 7058 #ifdef CONFIG_NFS_V4_1_MIGRATION 7059 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 7060 EXCHGID4_FLAG_BIND_PRINC_STATEID | 7061 EXCHGID4_FLAG_SUPP_MOVED_MIGR, 7062 #else 7063 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 7064 EXCHGID4_FLAG_BIND_PRINC_STATEID, 7065 #endif 7066 }; 7067 struct nfs41_exchange_id_res res = { 7068 0 7069 }; 7070 int status; 7071 struct rpc_message msg = { 7072 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 7073 .rpc_argp = &args, 7074 .rpc_resp = &res, 7075 .rpc_cred = cred, 7076 }; 7077 7078 nfs4_init_boot_verifier(clp, &verifier); 7079 7080 status = nfs4_init_uniform_client_string(clp); 7081 if (status) 7082 goto out; 7083 7084 dprintk("NFS call exchange_id auth=%s, '%s'\n", 7085 clp->cl_rpcclient->cl_auth->au_ops->au_name, 7086 clp->cl_owner_id); 7087 7088 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 7089 GFP_NOFS); 7090 if (unlikely(res.server_owner == NULL)) { 7091 status = -ENOMEM; 7092 goto out; 7093 } 7094 7095 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 7096 GFP_NOFS); 7097 if (unlikely(res.server_scope == NULL)) { 7098 status = -ENOMEM; 7099 goto out_server_owner; 7100 } 7101 7102 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 7103 if (unlikely(res.impl_id == NULL)) { 7104 status = -ENOMEM; 7105 goto out_server_scope; 7106 } 7107 7108 switch (sp4_how) { 7109 case SP4_NONE: 7110 args.state_protect.how = SP4_NONE; 7111 break; 7112 7113 case SP4_MACH_CRED: 7114 args.state_protect = nfs4_sp4_mach_cred_request; 7115 break; 7116 7117 default: 7118 /* unsupported! */ 7119 WARN_ON_ONCE(1); 7120 status = -EINVAL; 7121 goto out_impl_id; 7122 } 7123 7124 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7125 trace_nfs4_exchange_id(clp, status); 7126 if (status == 0) 7127 status = nfs4_check_cl_exchange_flags(res.flags); 7128 7129 if (status == 0) 7130 status = nfs4_sp4_select_mode(clp, &res.state_protect); 7131 7132 if (status == 0) { 7133 clp->cl_clientid = res.clientid; 7134 clp->cl_exchange_flags = res.flags; 7135 /* Client ID is not confirmed */ 7136 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) { 7137 clear_bit(NFS4_SESSION_ESTABLISHED, 7138 &clp->cl_session->session_state); 7139 clp->cl_seqid = res.seqid; 7140 } 7141 7142 kfree(clp->cl_serverowner); 7143 clp->cl_serverowner = res.server_owner; 7144 res.server_owner = NULL; 7145 7146 /* use the most recent implementation id */ 7147 kfree(clp->cl_implid); 7148 clp->cl_implid = res.impl_id; 7149 res.impl_id = NULL; 7150 7151 if (clp->cl_serverscope != NULL && 7152 !nfs41_same_server_scope(clp->cl_serverscope, 7153 res.server_scope)) { 7154 dprintk("%s: server_scope mismatch detected\n", 7155 __func__); 7156 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 7157 kfree(clp->cl_serverscope); 7158 clp->cl_serverscope = NULL; 7159 } 7160 7161 if (clp->cl_serverscope == NULL) { 7162 clp->cl_serverscope = res.server_scope; 7163 res.server_scope = NULL; 7164 } 7165 } 7166 7167 out_impl_id: 7168 kfree(res.impl_id); 7169 out_server_scope: 7170 kfree(res.server_scope); 7171 out_server_owner: 7172 kfree(res.server_owner); 7173 out: 7174 if (clp->cl_implid != NULL) 7175 dprintk("NFS reply exchange_id: Server Implementation ID: " 7176 "domain: %s, name: %s, date: %llu,%u\n", 7177 clp->cl_implid->domain, clp->cl_implid->name, 7178 clp->cl_implid->date.seconds, 7179 clp->cl_implid->date.nseconds); 7180 dprintk("NFS reply exchange_id: %d\n", status); 7181 return status; 7182 } 7183 7184 /* 7185 * nfs4_proc_exchange_id() 7186 * 7187 * Returns zero, a negative errno, or a negative NFS4ERR status code. 7188 * 7189 * Since the clientid has expired, all compounds using sessions 7190 * associated with the stale clientid will be returning 7191 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 7192 * be in some phase of session reset. 7193 * 7194 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 7195 */ 7196 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) 7197 { 7198 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 7199 int status; 7200 7201 /* try SP4_MACH_CRED if krb5i/p */ 7202 if (authflavor == RPC_AUTH_GSS_KRB5I || 7203 authflavor == RPC_AUTH_GSS_KRB5P) { 7204 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 7205 if (!status) 7206 return 0; 7207 } 7208 7209 /* try SP4_NONE */ 7210 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 7211 } 7212 7213 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 7214 struct rpc_cred *cred) 7215 { 7216 struct rpc_message msg = { 7217 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 7218 .rpc_argp = clp, 7219 .rpc_cred = cred, 7220 }; 7221 int status; 7222 7223 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7224 trace_nfs4_destroy_clientid(clp, status); 7225 if (status) 7226 dprintk("NFS: Got error %d from the server %s on " 7227 "DESTROY_CLIENTID.", status, clp->cl_hostname); 7228 return status; 7229 } 7230 7231 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 7232 struct rpc_cred *cred) 7233 { 7234 unsigned int loop; 7235 int ret; 7236 7237 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 7238 ret = _nfs4_proc_destroy_clientid(clp, cred); 7239 switch (ret) { 7240 case -NFS4ERR_DELAY: 7241 case -NFS4ERR_CLIENTID_BUSY: 7242 ssleep(1); 7243 break; 7244 default: 7245 return ret; 7246 } 7247 } 7248 return 0; 7249 } 7250 7251 int nfs4_destroy_clientid(struct nfs_client *clp) 7252 { 7253 struct rpc_cred *cred; 7254 int ret = 0; 7255 7256 if (clp->cl_mvops->minor_version < 1) 7257 goto out; 7258 if (clp->cl_exchange_flags == 0) 7259 goto out; 7260 if (clp->cl_preserve_clid) 7261 goto out; 7262 cred = nfs4_get_clid_cred(clp); 7263 ret = nfs4_proc_destroy_clientid(clp, cred); 7264 if (cred) 7265 put_rpccred(cred); 7266 switch (ret) { 7267 case 0: 7268 case -NFS4ERR_STALE_CLIENTID: 7269 clp->cl_exchange_flags = 0; 7270 } 7271 out: 7272 return ret; 7273 } 7274 7275 struct nfs4_get_lease_time_data { 7276 struct nfs4_get_lease_time_args *args; 7277 struct nfs4_get_lease_time_res *res; 7278 struct nfs_client *clp; 7279 }; 7280 7281 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 7282 void *calldata) 7283 { 7284 struct nfs4_get_lease_time_data *data = 7285 (struct nfs4_get_lease_time_data *)calldata; 7286 7287 dprintk("--> %s\n", __func__); 7288 /* just setup sequence, do not trigger session recovery 7289 since we're invoked within one */ 7290 nfs41_setup_sequence(data->clp->cl_session, 7291 &data->args->la_seq_args, 7292 &data->res->lr_seq_res, 7293 task); 7294 dprintk("<-- %s\n", __func__); 7295 } 7296 7297 /* 7298 * Called from nfs4_state_manager thread for session setup, so don't recover 7299 * from sequence operation or clientid errors. 7300 */ 7301 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 7302 { 7303 struct nfs4_get_lease_time_data *data = 7304 (struct nfs4_get_lease_time_data *)calldata; 7305 7306 dprintk("--> %s\n", __func__); 7307 if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) 7308 return; 7309 switch (task->tk_status) { 7310 case -NFS4ERR_DELAY: 7311 case -NFS4ERR_GRACE: 7312 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 7313 rpc_delay(task, NFS4_POLL_RETRY_MIN); 7314 task->tk_status = 0; 7315 /* fall through */ 7316 case -NFS4ERR_RETRY_UNCACHED_REP: 7317 rpc_restart_call_prepare(task); 7318 return; 7319 } 7320 dprintk("<-- %s\n", __func__); 7321 } 7322 7323 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 7324 .rpc_call_prepare = nfs4_get_lease_time_prepare, 7325 .rpc_call_done = nfs4_get_lease_time_done, 7326 }; 7327 7328 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 7329 { 7330 struct rpc_task *task; 7331 struct nfs4_get_lease_time_args args; 7332 struct nfs4_get_lease_time_res res = { 7333 .lr_fsinfo = fsinfo, 7334 }; 7335 struct nfs4_get_lease_time_data data = { 7336 .args = &args, 7337 .res = &res, 7338 .clp = clp, 7339 }; 7340 struct rpc_message msg = { 7341 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 7342 .rpc_argp = &args, 7343 .rpc_resp = &res, 7344 }; 7345 struct rpc_task_setup task_setup = { 7346 .rpc_client = clp->cl_rpcclient, 7347 .rpc_message = &msg, 7348 .callback_ops = &nfs4_get_lease_time_ops, 7349 .callback_data = &data, 7350 .flags = RPC_TASK_TIMEOUT, 7351 }; 7352 int status; 7353 7354 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); 7355 nfs4_set_sequence_privileged(&args.la_seq_args); 7356 dprintk("--> %s\n", __func__); 7357 task = rpc_run_task(&task_setup); 7358 7359 if (IS_ERR(task)) 7360 status = PTR_ERR(task); 7361 else { 7362 status = task->tk_status; 7363 rpc_put_task(task); 7364 } 7365 dprintk("<-- %s return %d\n", __func__, status); 7366 7367 return status; 7368 } 7369 7370 /* 7371 * Initialize the values to be used by the client in CREATE_SESSION 7372 * If nfs4_init_session set the fore channel request and response sizes, 7373 * use them. 7374 * 7375 * Set the back channel max_resp_sz_cached to zero to force the client to 7376 * always set csa_cachethis to FALSE because the current implementation 7377 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 7378 */ 7379 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args, 7380 struct rpc_clnt *clnt) 7381 { 7382 unsigned int max_rqst_sz, max_resp_sz; 7383 unsigned int max_bc_payload = rpc_max_bc_payload(clnt); 7384 7385 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 7386 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 7387 7388 /* Fore channel attributes */ 7389 args->fc_attrs.max_rqst_sz = max_rqst_sz; 7390 args->fc_attrs.max_resp_sz = max_resp_sz; 7391 args->fc_attrs.max_ops = NFS4_MAX_OPS; 7392 args->fc_attrs.max_reqs = max_session_slots; 7393 7394 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 7395 "max_ops=%u max_reqs=%u\n", 7396 __func__, 7397 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 7398 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 7399 7400 /* Back channel attributes */ 7401 args->bc_attrs.max_rqst_sz = max_bc_payload; 7402 args->bc_attrs.max_resp_sz = max_bc_payload; 7403 args->bc_attrs.max_resp_sz_cached = 0; 7404 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 7405 args->bc_attrs.max_reqs = NFS41_BC_MAX_CALLBACKS; 7406 7407 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 7408 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 7409 __func__, 7410 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 7411 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 7412 args->bc_attrs.max_reqs); 7413 } 7414 7415 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 7416 struct nfs41_create_session_res *res) 7417 { 7418 struct nfs4_channel_attrs *sent = &args->fc_attrs; 7419 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 7420 7421 if (rcvd->max_resp_sz > sent->max_resp_sz) 7422 return -EINVAL; 7423 /* 7424 * Our requested max_ops is the minimum we need; we're not 7425 * prepared to break up compounds into smaller pieces than that. 7426 * So, no point even trying to continue if the server won't 7427 * cooperate: 7428 */ 7429 if (rcvd->max_ops < sent->max_ops) 7430 return -EINVAL; 7431 if (rcvd->max_reqs == 0) 7432 return -EINVAL; 7433 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 7434 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 7435 return 0; 7436 } 7437 7438 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 7439 struct nfs41_create_session_res *res) 7440 { 7441 struct nfs4_channel_attrs *sent = &args->bc_attrs; 7442 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 7443 7444 if (!(res->flags & SESSION4_BACK_CHAN)) 7445 goto out; 7446 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 7447 return -EINVAL; 7448 if (rcvd->max_resp_sz < sent->max_resp_sz) 7449 return -EINVAL; 7450 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 7451 return -EINVAL; 7452 /* These would render the backchannel useless: */ 7453 if (rcvd->max_ops != sent->max_ops) 7454 return -EINVAL; 7455 if (rcvd->max_reqs != sent->max_reqs) 7456 return -EINVAL; 7457 out: 7458 return 0; 7459 } 7460 7461 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 7462 struct nfs41_create_session_res *res) 7463 { 7464 int ret; 7465 7466 ret = nfs4_verify_fore_channel_attrs(args, res); 7467 if (ret) 7468 return ret; 7469 return nfs4_verify_back_channel_attrs(args, res); 7470 } 7471 7472 static void nfs4_update_session(struct nfs4_session *session, 7473 struct nfs41_create_session_res *res) 7474 { 7475 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 7476 /* Mark client id and session as being confirmed */ 7477 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 7478 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 7479 session->flags = res->flags; 7480 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 7481 if (res->flags & SESSION4_BACK_CHAN) 7482 memcpy(&session->bc_attrs, &res->bc_attrs, 7483 sizeof(session->bc_attrs)); 7484 } 7485 7486 static int _nfs4_proc_create_session(struct nfs_client *clp, 7487 struct rpc_cred *cred) 7488 { 7489 struct nfs4_session *session = clp->cl_session; 7490 struct nfs41_create_session_args args = { 7491 .client = clp, 7492 .clientid = clp->cl_clientid, 7493 .seqid = clp->cl_seqid, 7494 .cb_program = NFS4_CALLBACK, 7495 }; 7496 struct nfs41_create_session_res res; 7497 7498 struct rpc_message msg = { 7499 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 7500 .rpc_argp = &args, 7501 .rpc_resp = &res, 7502 .rpc_cred = cred, 7503 }; 7504 int status; 7505 7506 nfs4_init_channel_attrs(&args, clp->cl_rpcclient); 7507 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 7508 7509 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7510 trace_nfs4_create_session(clp, status); 7511 7512 if (!status) { 7513 /* Verify the session's negotiated channel_attrs values */ 7514 status = nfs4_verify_channel_attrs(&args, &res); 7515 /* Increment the clientid slot sequence id */ 7516 if (clp->cl_seqid == res.seqid) 7517 clp->cl_seqid++; 7518 if (status) 7519 goto out; 7520 nfs4_update_session(session, &res); 7521 } 7522 out: 7523 return status; 7524 } 7525 7526 /* 7527 * Issues a CREATE_SESSION operation to the server. 7528 * It is the responsibility of the caller to verify the session is 7529 * expired before calling this routine. 7530 */ 7531 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred) 7532 { 7533 int status; 7534 unsigned *ptr; 7535 struct nfs4_session *session = clp->cl_session; 7536 7537 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 7538 7539 status = _nfs4_proc_create_session(clp, cred); 7540 if (status) 7541 goto out; 7542 7543 /* Init or reset the session slot tables */ 7544 status = nfs4_setup_session_slot_tables(session); 7545 dprintk("slot table setup returned %d\n", status); 7546 if (status) 7547 goto out; 7548 7549 ptr = (unsigned *)&session->sess_id.data[0]; 7550 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 7551 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 7552 out: 7553 dprintk("<-- %s\n", __func__); 7554 return status; 7555 } 7556 7557 /* 7558 * Issue the over-the-wire RPC DESTROY_SESSION. 7559 * The caller must serialize access to this routine. 7560 */ 7561 int nfs4_proc_destroy_session(struct nfs4_session *session, 7562 struct rpc_cred *cred) 7563 { 7564 struct rpc_message msg = { 7565 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 7566 .rpc_argp = session, 7567 .rpc_cred = cred, 7568 }; 7569 int status = 0; 7570 7571 dprintk("--> nfs4_proc_destroy_session\n"); 7572 7573 /* session is still being setup */ 7574 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 7575 return 0; 7576 7577 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7578 trace_nfs4_destroy_session(session->clp, status); 7579 7580 if (status) 7581 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 7582 "Session has been destroyed regardless...\n", status); 7583 7584 dprintk("<-- nfs4_proc_destroy_session\n"); 7585 return status; 7586 } 7587 7588 /* 7589 * Renew the cl_session lease. 7590 */ 7591 struct nfs4_sequence_data { 7592 struct nfs_client *clp; 7593 struct nfs4_sequence_args args; 7594 struct nfs4_sequence_res res; 7595 }; 7596 7597 static void nfs41_sequence_release(void *data) 7598 { 7599 struct nfs4_sequence_data *calldata = data; 7600 struct nfs_client *clp = calldata->clp; 7601 7602 if (atomic_read(&clp->cl_count) > 1) 7603 nfs4_schedule_state_renewal(clp); 7604 nfs_put_client(clp); 7605 kfree(calldata); 7606 } 7607 7608 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 7609 { 7610 switch(task->tk_status) { 7611 case -NFS4ERR_DELAY: 7612 rpc_delay(task, NFS4_POLL_RETRY_MAX); 7613 return -EAGAIN; 7614 default: 7615 nfs4_schedule_lease_recovery(clp); 7616 } 7617 return 0; 7618 } 7619 7620 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 7621 { 7622 struct nfs4_sequence_data *calldata = data; 7623 struct nfs_client *clp = calldata->clp; 7624 7625 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 7626 return; 7627 7628 trace_nfs4_sequence(clp, task->tk_status); 7629 if (task->tk_status < 0) { 7630 dprintk("%s ERROR %d\n", __func__, task->tk_status); 7631 if (atomic_read(&clp->cl_count) == 1) 7632 goto out; 7633 7634 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 7635 rpc_restart_call_prepare(task); 7636 return; 7637 } 7638 } 7639 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 7640 out: 7641 dprintk("<-- %s\n", __func__); 7642 } 7643 7644 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 7645 { 7646 struct nfs4_sequence_data *calldata = data; 7647 struct nfs_client *clp = calldata->clp; 7648 struct nfs4_sequence_args *args; 7649 struct nfs4_sequence_res *res; 7650 7651 args = task->tk_msg.rpc_argp; 7652 res = task->tk_msg.rpc_resp; 7653 7654 nfs41_setup_sequence(clp->cl_session, args, res, task); 7655 } 7656 7657 static const struct rpc_call_ops nfs41_sequence_ops = { 7658 .rpc_call_done = nfs41_sequence_call_done, 7659 .rpc_call_prepare = nfs41_sequence_prepare, 7660 .rpc_release = nfs41_sequence_release, 7661 }; 7662 7663 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 7664 struct rpc_cred *cred, 7665 bool is_privileged) 7666 { 7667 struct nfs4_sequence_data *calldata; 7668 struct rpc_message msg = { 7669 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 7670 .rpc_cred = cred, 7671 }; 7672 struct rpc_task_setup task_setup_data = { 7673 .rpc_client = clp->cl_rpcclient, 7674 .rpc_message = &msg, 7675 .callback_ops = &nfs41_sequence_ops, 7676 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 7677 }; 7678 7679 if (!atomic_inc_not_zero(&clp->cl_count)) 7680 return ERR_PTR(-EIO); 7681 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7682 if (calldata == NULL) { 7683 nfs_put_client(clp); 7684 return ERR_PTR(-ENOMEM); 7685 } 7686 nfs4_init_sequence(&calldata->args, &calldata->res, 0); 7687 if (is_privileged) 7688 nfs4_set_sequence_privileged(&calldata->args); 7689 msg.rpc_argp = &calldata->args; 7690 msg.rpc_resp = &calldata->res; 7691 calldata->clp = clp; 7692 task_setup_data.callback_data = calldata; 7693 7694 return rpc_run_task(&task_setup_data); 7695 } 7696 7697 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 7698 { 7699 struct rpc_task *task; 7700 int ret = 0; 7701 7702 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 7703 return -EAGAIN; 7704 task = _nfs41_proc_sequence(clp, cred, false); 7705 if (IS_ERR(task)) 7706 ret = PTR_ERR(task); 7707 else 7708 rpc_put_task_async(task); 7709 dprintk("<-- %s status=%d\n", __func__, ret); 7710 return ret; 7711 } 7712 7713 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 7714 { 7715 struct rpc_task *task; 7716 int ret; 7717 7718 task = _nfs41_proc_sequence(clp, cred, true); 7719 if (IS_ERR(task)) { 7720 ret = PTR_ERR(task); 7721 goto out; 7722 } 7723 ret = rpc_wait_for_completion_task(task); 7724 if (!ret) 7725 ret = task->tk_status; 7726 rpc_put_task(task); 7727 out: 7728 dprintk("<-- %s status=%d\n", __func__, ret); 7729 return ret; 7730 } 7731 7732 struct nfs4_reclaim_complete_data { 7733 struct nfs_client *clp; 7734 struct nfs41_reclaim_complete_args arg; 7735 struct nfs41_reclaim_complete_res res; 7736 }; 7737 7738 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 7739 { 7740 struct nfs4_reclaim_complete_data *calldata = data; 7741 7742 nfs41_setup_sequence(calldata->clp->cl_session, 7743 &calldata->arg.seq_args, 7744 &calldata->res.seq_res, 7745 task); 7746 } 7747 7748 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 7749 { 7750 switch(task->tk_status) { 7751 case 0: 7752 case -NFS4ERR_COMPLETE_ALREADY: 7753 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 7754 break; 7755 case -NFS4ERR_DELAY: 7756 rpc_delay(task, NFS4_POLL_RETRY_MAX); 7757 /* fall through */ 7758 case -NFS4ERR_RETRY_UNCACHED_REP: 7759 return -EAGAIN; 7760 default: 7761 nfs4_schedule_lease_recovery(clp); 7762 } 7763 return 0; 7764 } 7765 7766 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 7767 { 7768 struct nfs4_reclaim_complete_data *calldata = data; 7769 struct nfs_client *clp = calldata->clp; 7770 struct nfs4_sequence_res *res = &calldata->res.seq_res; 7771 7772 dprintk("--> %s\n", __func__); 7773 if (!nfs41_sequence_done(task, res)) 7774 return; 7775 7776 trace_nfs4_reclaim_complete(clp, task->tk_status); 7777 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 7778 rpc_restart_call_prepare(task); 7779 return; 7780 } 7781 dprintk("<-- %s\n", __func__); 7782 } 7783 7784 static void nfs4_free_reclaim_complete_data(void *data) 7785 { 7786 struct nfs4_reclaim_complete_data *calldata = data; 7787 7788 kfree(calldata); 7789 } 7790 7791 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 7792 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 7793 .rpc_call_done = nfs4_reclaim_complete_done, 7794 .rpc_release = nfs4_free_reclaim_complete_data, 7795 }; 7796 7797 /* 7798 * Issue a global reclaim complete. 7799 */ 7800 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 7801 struct rpc_cred *cred) 7802 { 7803 struct nfs4_reclaim_complete_data *calldata; 7804 struct rpc_task *task; 7805 struct rpc_message msg = { 7806 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 7807 .rpc_cred = cred, 7808 }; 7809 struct rpc_task_setup task_setup_data = { 7810 .rpc_client = clp->cl_rpcclient, 7811 .rpc_message = &msg, 7812 .callback_ops = &nfs4_reclaim_complete_call_ops, 7813 .flags = RPC_TASK_ASYNC, 7814 }; 7815 int status = -ENOMEM; 7816 7817 dprintk("--> %s\n", __func__); 7818 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7819 if (calldata == NULL) 7820 goto out; 7821 calldata->clp = clp; 7822 calldata->arg.one_fs = 0; 7823 7824 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); 7825 nfs4_set_sequence_privileged(&calldata->arg.seq_args); 7826 msg.rpc_argp = &calldata->arg; 7827 msg.rpc_resp = &calldata->res; 7828 task_setup_data.callback_data = calldata; 7829 task = rpc_run_task(&task_setup_data); 7830 if (IS_ERR(task)) { 7831 status = PTR_ERR(task); 7832 goto out; 7833 } 7834 status = nfs4_wait_for_completion_rpc_task(task); 7835 if (status == 0) 7836 status = task->tk_status; 7837 rpc_put_task(task); 7838 return 0; 7839 out: 7840 dprintk("<-- %s status=%d\n", __func__, status); 7841 return status; 7842 } 7843 7844 static void 7845 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 7846 { 7847 struct nfs4_layoutget *lgp = calldata; 7848 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 7849 struct nfs4_session *session = nfs4_get_session(server); 7850 7851 dprintk("--> %s\n", __func__); 7852 nfs41_setup_sequence(session, &lgp->args.seq_args, 7853 &lgp->res.seq_res, task); 7854 dprintk("<-- %s\n", __func__); 7855 } 7856 7857 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 7858 { 7859 struct nfs4_layoutget *lgp = calldata; 7860 7861 dprintk("--> %s\n", __func__); 7862 nfs41_sequence_done(task, &lgp->res.seq_res); 7863 dprintk("<-- %s\n", __func__); 7864 } 7865 7866 static int 7867 nfs4_layoutget_handle_exception(struct rpc_task *task, 7868 struct nfs4_layoutget *lgp, struct nfs4_exception *exception) 7869 { 7870 struct inode *inode = lgp->args.inode; 7871 struct nfs_server *server = NFS_SERVER(inode); 7872 struct pnfs_layout_hdr *lo; 7873 int status = task->tk_status; 7874 7875 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 7876 7877 switch (status) { 7878 case 0: 7879 goto out; 7880 7881 /* 7882 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs 7883 * on the file. set tk_status to -ENODATA to tell upper layer to 7884 * retry go inband. 7885 */ 7886 case -NFS4ERR_LAYOUTUNAVAILABLE: 7887 status = -ENODATA; 7888 goto out; 7889 /* 7890 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 7891 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 7892 */ 7893 case -NFS4ERR_BADLAYOUT: 7894 status = -EOVERFLOW; 7895 goto out; 7896 /* 7897 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 7898 * (or clients) writing to the same RAID stripe except when 7899 * the minlength argument is 0 (see RFC5661 section 18.43.3). 7900 * 7901 * Treat it like we would RECALLCONFLICT -- we retry for a little 7902 * while, and then eventually give up. 7903 */ 7904 case -NFS4ERR_LAYOUTTRYLATER: 7905 if (lgp->args.minlength == 0) { 7906 status = -EOVERFLOW; 7907 goto out; 7908 } 7909 /* Fallthrough */ 7910 case -NFS4ERR_RECALLCONFLICT: 7911 nfs4_handle_exception(server, -NFS4ERR_RECALLCONFLICT, 7912 exception); 7913 status = -ERECALLCONFLICT; 7914 goto out; 7915 case -NFS4ERR_EXPIRED: 7916 case -NFS4ERR_BAD_STATEID: 7917 exception->timeout = 0; 7918 spin_lock(&inode->i_lock); 7919 if (nfs4_stateid_match(&lgp->args.stateid, 7920 &lgp->args.ctx->state->stateid)) { 7921 spin_unlock(&inode->i_lock); 7922 /* If the open stateid was bad, then recover it. */ 7923 exception->state = lgp->args.ctx->state; 7924 break; 7925 } 7926 lo = NFS_I(inode)->layout; 7927 if (lo && nfs4_stateid_match(&lgp->args.stateid, 7928 &lo->plh_stateid)) { 7929 LIST_HEAD(head); 7930 7931 /* 7932 * Mark the bad layout state as invalid, then retry 7933 * with the current stateid. 7934 */ 7935 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); 7936 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0); 7937 spin_unlock(&inode->i_lock); 7938 pnfs_free_lseg_list(&head); 7939 } else 7940 spin_unlock(&inode->i_lock); 7941 status = -EAGAIN; 7942 goto out; 7943 } 7944 7945 status = nfs4_handle_exception(server, status, exception); 7946 if (exception->retry) 7947 status = -EAGAIN; 7948 out: 7949 dprintk("<-- %s\n", __func__); 7950 return status; 7951 } 7952 7953 static size_t max_response_pages(struct nfs_server *server) 7954 { 7955 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 7956 return nfs_page_array_len(0, max_resp_sz); 7957 } 7958 7959 static void nfs4_free_pages(struct page **pages, size_t size) 7960 { 7961 int i; 7962 7963 if (!pages) 7964 return; 7965 7966 for (i = 0; i < size; i++) { 7967 if (!pages[i]) 7968 break; 7969 __free_page(pages[i]); 7970 } 7971 kfree(pages); 7972 } 7973 7974 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) 7975 { 7976 struct page **pages; 7977 int i; 7978 7979 pages = kcalloc(size, sizeof(struct page *), gfp_flags); 7980 if (!pages) { 7981 dprintk("%s: can't alloc array of %zu pages\n", __func__, size); 7982 return NULL; 7983 } 7984 7985 for (i = 0; i < size; i++) { 7986 pages[i] = alloc_page(gfp_flags); 7987 if (!pages[i]) { 7988 dprintk("%s: failed to allocate page\n", __func__); 7989 nfs4_free_pages(pages, size); 7990 return NULL; 7991 } 7992 } 7993 7994 return pages; 7995 } 7996 7997 static void nfs4_layoutget_release(void *calldata) 7998 { 7999 struct nfs4_layoutget *lgp = calldata; 8000 struct inode *inode = lgp->args.inode; 8001 struct nfs_server *server = NFS_SERVER(inode); 8002 size_t max_pages = max_response_pages(server); 8003 8004 dprintk("--> %s\n", __func__); 8005 nfs4_free_pages(lgp->args.layout.pages, max_pages); 8006 pnfs_put_layout_hdr(NFS_I(inode)->layout); 8007 put_nfs_open_context(lgp->args.ctx); 8008 kfree(calldata); 8009 dprintk("<-- %s\n", __func__); 8010 } 8011 8012 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 8013 .rpc_call_prepare = nfs4_layoutget_prepare, 8014 .rpc_call_done = nfs4_layoutget_done, 8015 .rpc_release = nfs4_layoutget_release, 8016 }; 8017 8018 struct pnfs_layout_segment * 8019 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags) 8020 { 8021 struct inode *inode = lgp->args.inode; 8022 struct nfs_server *server = NFS_SERVER(inode); 8023 size_t max_pages = max_response_pages(server); 8024 struct rpc_task *task; 8025 struct rpc_message msg = { 8026 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 8027 .rpc_argp = &lgp->args, 8028 .rpc_resp = &lgp->res, 8029 .rpc_cred = lgp->cred, 8030 }; 8031 struct rpc_task_setup task_setup_data = { 8032 .rpc_client = server->client, 8033 .rpc_message = &msg, 8034 .callback_ops = &nfs4_layoutget_call_ops, 8035 .callback_data = lgp, 8036 .flags = RPC_TASK_ASYNC, 8037 }; 8038 struct pnfs_layout_segment *lseg = NULL; 8039 struct nfs4_exception exception = { .timeout = *timeout }; 8040 int status = 0; 8041 8042 dprintk("--> %s\n", __func__); 8043 8044 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */ 8045 pnfs_get_layout_hdr(NFS_I(inode)->layout); 8046 8047 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); 8048 if (!lgp->args.layout.pages) { 8049 nfs4_layoutget_release(lgp); 8050 return ERR_PTR(-ENOMEM); 8051 } 8052 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 8053 8054 lgp->res.layoutp = &lgp->args.layout; 8055 lgp->res.seq_res.sr_slot = NULL; 8056 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 8057 8058 task = rpc_run_task(&task_setup_data); 8059 if (IS_ERR(task)) 8060 return ERR_CAST(task); 8061 status = nfs4_wait_for_completion_rpc_task(task); 8062 if (status == 0) { 8063 status = nfs4_layoutget_handle_exception(task, lgp, &exception); 8064 *timeout = exception.timeout; 8065 } 8066 8067 trace_nfs4_layoutget(lgp->args.ctx, 8068 &lgp->args.range, 8069 &lgp->res.range, 8070 &lgp->res.stateid, 8071 status); 8072 8073 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ 8074 if (status == 0 && lgp->res.layoutp->len) 8075 lseg = pnfs_layout_process(lgp); 8076 rpc_put_task(task); 8077 dprintk("<-- %s status=%d\n", __func__, status); 8078 if (status) 8079 return ERR_PTR(status); 8080 return lseg; 8081 } 8082 8083 static void 8084 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 8085 { 8086 struct nfs4_layoutreturn *lrp = calldata; 8087 8088 dprintk("--> %s\n", __func__); 8089 nfs41_setup_sequence(lrp->clp->cl_session, 8090 &lrp->args.seq_args, 8091 &lrp->res.seq_res, 8092 task); 8093 } 8094 8095 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 8096 { 8097 struct nfs4_layoutreturn *lrp = calldata; 8098 struct nfs_server *server; 8099 8100 dprintk("--> %s\n", __func__); 8101 8102 if (!nfs41_sequence_done(task, &lrp->res.seq_res)) 8103 return; 8104 8105 server = NFS_SERVER(lrp->args.inode); 8106 switch (task->tk_status) { 8107 default: 8108 task->tk_status = 0; 8109 case 0: 8110 break; 8111 case -NFS4ERR_DELAY: 8112 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN) 8113 break; 8114 rpc_restart_call_prepare(task); 8115 return; 8116 } 8117 dprintk("<-- %s\n", __func__); 8118 } 8119 8120 static void nfs4_layoutreturn_release(void *calldata) 8121 { 8122 struct nfs4_layoutreturn *lrp = calldata; 8123 struct pnfs_layout_hdr *lo = lrp->args.layout; 8124 LIST_HEAD(freeme); 8125 8126 dprintk("--> %s\n", __func__); 8127 spin_lock(&lo->plh_inode->i_lock); 8128 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range, 8129 be32_to_cpu(lrp->args.stateid.seqid)); 8130 pnfs_mark_layout_returned_if_empty(lo); 8131 if (lrp->res.lrs_present) 8132 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 8133 pnfs_clear_layoutreturn_waitbit(lo); 8134 spin_unlock(&lo->plh_inode->i_lock); 8135 pnfs_free_lseg_list(&freeme); 8136 pnfs_put_layout_hdr(lrp->args.layout); 8137 nfs_iput_and_deactive(lrp->inode); 8138 kfree(calldata); 8139 dprintk("<-- %s\n", __func__); 8140 } 8141 8142 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 8143 .rpc_call_prepare = nfs4_layoutreturn_prepare, 8144 .rpc_call_done = nfs4_layoutreturn_done, 8145 .rpc_release = nfs4_layoutreturn_release, 8146 }; 8147 8148 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync) 8149 { 8150 struct rpc_task *task; 8151 struct rpc_message msg = { 8152 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 8153 .rpc_argp = &lrp->args, 8154 .rpc_resp = &lrp->res, 8155 .rpc_cred = lrp->cred, 8156 }; 8157 struct rpc_task_setup task_setup_data = { 8158 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 8159 .rpc_message = &msg, 8160 .callback_ops = &nfs4_layoutreturn_call_ops, 8161 .callback_data = lrp, 8162 }; 8163 int status = 0; 8164 8165 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client, 8166 NFS_SP4_MACH_CRED_PNFS_CLEANUP, 8167 &task_setup_data.rpc_client, &msg); 8168 8169 dprintk("--> %s\n", __func__); 8170 if (!sync) { 8171 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 8172 if (!lrp->inode) { 8173 nfs4_layoutreturn_release(lrp); 8174 return -EAGAIN; 8175 } 8176 task_setup_data.flags |= RPC_TASK_ASYNC; 8177 } 8178 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); 8179 task = rpc_run_task(&task_setup_data); 8180 if (IS_ERR(task)) 8181 return PTR_ERR(task); 8182 if (sync) 8183 status = task->tk_status; 8184 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status); 8185 dprintk("<-- %s status=%d\n", __func__, status); 8186 rpc_put_task(task); 8187 return status; 8188 } 8189 8190 static int 8191 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 8192 struct pnfs_device *pdev, 8193 struct rpc_cred *cred) 8194 { 8195 struct nfs4_getdeviceinfo_args args = { 8196 .pdev = pdev, 8197 .notify_types = NOTIFY_DEVICEID4_CHANGE | 8198 NOTIFY_DEVICEID4_DELETE, 8199 }; 8200 struct nfs4_getdeviceinfo_res res = { 8201 .pdev = pdev, 8202 }; 8203 struct rpc_message msg = { 8204 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 8205 .rpc_argp = &args, 8206 .rpc_resp = &res, 8207 .rpc_cred = cred, 8208 }; 8209 int status; 8210 8211 dprintk("--> %s\n", __func__); 8212 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 8213 if (res.notification & ~args.notify_types) 8214 dprintk("%s: unsupported notification\n", __func__); 8215 if (res.notification != args.notify_types) 8216 pdev->nocache = 1; 8217 8218 dprintk("<-- %s status=%d\n", __func__, status); 8219 8220 return status; 8221 } 8222 8223 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 8224 struct pnfs_device *pdev, 8225 struct rpc_cred *cred) 8226 { 8227 struct nfs4_exception exception = { }; 8228 int err; 8229 8230 do { 8231 err = nfs4_handle_exception(server, 8232 _nfs4_proc_getdeviceinfo(server, pdev, cred), 8233 &exception); 8234 } while (exception.retry); 8235 return err; 8236 } 8237 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 8238 8239 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 8240 { 8241 struct nfs4_layoutcommit_data *data = calldata; 8242 struct nfs_server *server = NFS_SERVER(data->args.inode); 8243 struct nfs4_session *session = nfs4_get_session(server); 8244 8245 nfs41_setup_sequence(session, 8246 &data->args.seq_args, 8247 &data->res.seq_res, 8248 task); 8249 } 8250 8251 static void 8252 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 8253 { 8254 struct nfs4_layoutcommit_data *data = calldata; 8255 struct nfs_server *server = NFS_SERVER(data->args.inode); 8256 8257 if (!nfs41_sequence_done(task, &data->res.seq_res)) 8258 return; 8259 8260 switch (task->tk_status) { /* Just ignore these failures */ 8261 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 8262 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 8263 case -NFS4ERR_BADLAYOUT: /* no layout */ 8264 case -NFS4ERR_GRACE: /* loca_recalim always false */ 8265 task->tk_status = 0; 8266 case 0: 8267 break; 8268 default: 8269 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 8270 rpc_restart_call_prepare(task); 8271 return; 8272 } 8273 } 8274 } 8275 8276 static void nfs4_layoutcommit_release(void *calldata) 8277 { 8278 struct nfs4_layoutcommit_data *data = calldata; 8279 8280 pnfs_cleanup_layoutcommit(data); 8281 nfs_post_op_update_inode_force_wcc(data->args.inode, 8282 data->res.fattr); 8283 put_rpccred(data->cred); 8284 nfs_iput_and_deactive(data->inode); 8285 kfree(data); 8286 } 8287 8288 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 8289 .rpc_call_prepare = nfs4_layoutcommit_prepare, 8290 .rpc_call_done = nfs4_layoutcommit_done, 8291 .rpc_release = nfs4_layoutcommit_release, 8292 }; 8293 8294 int 8295 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 8296 { 8297 struct rpc_message msg = { 8298 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 8299 .rpc_argp = &data->args, 8300 .rpc_resp = &data->res, 8301 .rpc_cred = data->cred, 8302 }; 8303 struct rpc_task_setup task_setup_data = { 8304 .task = &data->task, 8305 .rpc_client = NFS_CLIENT(data->args.inode), 8306 .rpc_message = &msg, 8307 .callback_ops = &nfs4_layoutcommit_ops, 8308 .callback_data = data, 8309 }; 8310 struct rpc_task *task; 8311 int status = 0; 8312 8313 dprintk("NFS: initiating layoutcommit call. sync %d " 8314 "lbw: %llu inode %lu\n", sync, 8315 data->args.lastbytewritten, 8316 data->args.inode->i_ino); 8317 8318 if (!sync) { 8319 data->inode = nfs_igrab_and_active(data->args.inode); 8320 if (data->inode == NULL) { 8321 nfs4_layoutcommit_release(data); 8322 return -EAGAIN; 8323 } 8324 task_setup_data.flags = RPC_TASK_ASYNC; 8325 } 8326 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 8327 task = rpc_run_task(&task_setup_data); 8328 if (IS_ERR(task)) 8329 return PTR_ERR(task); 8330 if (sync) 8331 status = task->tk_status; 8332 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status); 8333 dprintk("%s: status %d\n", __func__, status); 8334 rpc_put_task(task); 8335 return status; 8336 } 8337 8338 /** 8339 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 8340 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 8341 */ 8342 static int 8343 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8344 struct nfs_fsinfo *info, 8345 struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8346 { 8347 struct nfs41_secinfo_no_name_args args = { 8348 .style = SECINFO_STYLE_CURRENT_FH, 8349 }; 8350 struct nfs4_secinfo_res res = { 8351 .flavors = flavors, 8352 }; 8353 struct rpc_message msg = { 8354 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 8355 .rpc_argp = &args, 8356 .rpc_resp = &res, 8357 }; 8358 struct rpc_clnt *clnt = server->client; 8359 struct rpc_cred *cred = NULL; 8360 int status; 8361 8362 if (use_integrity) { 8363 clnt = server->nfs_client->cl_rpcclient; 8364 cred = nfs4_get_clid_cred(server->nfs_client); 8365 msg.rpc_cred = cred; 8366 } 8367 8368 dprintk("--> %s\n", __func__); 8369 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, 8370 &res.seq_res, 0); 8371 dprintk("<-- %s status=%d\n", __func__, status); 8372 8373 if (cred) 8374 put_rpccred(cred); 8375 8376 return status; 8377 } 8378 8379 static int 8380 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8381 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 8382 { 8383 struct nfs4_exception exception = { }; 8384 int err; 8385 do { 8386 /* first try using integrity protection */ 8387 err = -NFS4ERR_WRONGSEC; 8388 8389 /* try to use integrity protection with machine cred */ 8390 if (_nfs4_is_integrity_protected(server->nfs_client)) 8391 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8392 flavors, true); 8393 8394 /* 8395 * if unable to use integrity protection, or SECINFO with 8396 * integrity protection returns NFS4ERR_WRONGSEC (which is 8397 * disallowed by spec, but exists in deployed servers) use 8398 * the current filesystem's rpc_client and the user cred. 8399 */ 8400 if (err == -NFS4ERR_WRONGSEC) 8401 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8402 flavors, false); 8403 8404 switch (err) { 8405 case 0: 8406 case -NFS4ERR_WRONGSEC: 8407 case -ENOTSUPP: 8408 goto out; 8409 default: 8410 err = nfs4_handle_exception(server, err, &exception); 8411 } 8412 } while (exception.retry); 8413 out: 8414 return err; 8415 } 8416 8417 static int 8418 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 8419 struct nfs_fsinfo *info) 8420 { 8421 int err; 8422 struct page *page; 8423 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 8424 struct nfs4_secinfo_flavors *flavors; 8425 struct nfs4_secinfo4 *secinfo; 8426 int i; 8427 8428 page = alloc_page(GFP_KERNEL); 8429 if (!page) { 8430 err = -ENOMEM; 8431 goto out; 8432 } 8433 8434 flavors = page_address(page); 8435 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 8436 8437 /* 8438 * Fall back on "guess and check" method if 8439 * the server doesn't support SECINFO_NO_NAME 8440 */ 8441 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 8442 err = nfs4_find_root_sec(server, fhandle, info); 8443 goto out_freepage; 8444 } 8445 if (err) 8446 goto out_freepage; 8447 8448 for (i = 0; i < flavors->num_flavors; i++) { 8449 secinfo = &flavors->flavors[i]; 8450 8451 switch (secinfo->flavor) { 8452 case RPC_AUTH_NULL: 8453 case RPC_AUTH_UNIX: 8454 case RPC_AUTH_GSS: 8455 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 8456 &secinfo->flavor_info); 8457 break; 8458 default: 8459 flavor = RPC_AUTH_MAXFLAVOR; 8460 break; 8461 } 8462 8463 if (!nfs_auth_info_match(&server->auth_info, flavor)) 8464 flavor = RPC_AUTH_MAXFLAVOR; 8465 8466 if (flavor != RPC_AUTH_MAXFLAVOR) { 8467 err = nfs4_lookup_root_sec(server, fhandle, 8468 info, flavor); 8469 if (!err) 8470 break; 8471 } 8472 } 8473 8474 if (flavor == RPC_AUTH_MAXFLAVOR) 8475 err = -EPERM; 8476 8477 out_freepage: 8478 put_page(page); 8479 if (err == -EACCES) 8480 return -EPERM; 8481 out: 8482 return err; 8483 } 8484 8485 static int _nfs41_test_stateid(struct nfs_server *server, 8486 nfs4_stateid *stateid, 8487 struct rpc_cred *cred) 8488 { 8489 int status; 8490 struct nfs41_test_stateid_args args = { 8491 .stateid = stateid, 8492 }; 8493 struct nfs41_test_stateid_res res; 8494 struct rpc_message msg = { 8495 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 8496 .rpc_argp = &args, 8497 .rpc_resp = &res, 8498 .rpc_cred = cred, 8499 }; 8500 struct rpc_clnt *rpc_client = server->client; 8501 8502 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8503 &rpc_client, &msg); 8504 8505 dprintk("NFS call test_stateid %p\n", stateid); 8506 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 8507 nfs4_set_sequence_privileged(&args.seq_args); 8508 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 8509 &args.seq_args, &res.seq_res); 8510 if (status != NFS_OK) { 8511 dprintk("NFS reply test_stateid: failed, %d\n", status); 8512 return status; 8513 } 8514 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 8515 return -res.status; 8516 } 8517 8518 /** 8519 * nfs41_test_stateid - perform a TEST_STATEID operation 8520 * 8521 * @server: server / transport on which to perform the operation 8522 * @stateid: state ID to test 8523 * @cred: credential 8524 * 8525 * Returns NFS_OK if the server recognizes that "stateid" is valid. 8526 * Otherwise a negative NFS4ERR value is returned if the operation 8527 * failed or the state ID is not currently valid. 8528 */ 8529 static int nfs41_test_stateid(struct nfs_server *server, 8530 nfs4_stateid *stateid, 8531 struct rpc_cred *cred) 8532 { 8533 struct nfs4_exception exception = { }; 8534 int err; 8535 do { 8536 err = _nfs41_test_stateid(server, stateid, cred); 8537 if (err != -NFS4ERR_DELAY) 8538 break; 8539 nfs4_handle_exception(server, err, &exception); 8540 } while (exception.retry); 8541 return err; 8542 } 8543 8544 struct nfs_free_stateid_data { 8545 struct nfs_server *server; 8546 struct nfs41_free_stateid_args args; 8547 struct nfs41_free_stateid_res res; 8548 }; 8549 8550 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 8551 { 8552 struct nfs_free_stateid_data *data = calldata; 8553 nfs41_setup_sequence(nfs4_get_session(data->server), 8554 &data->args.seq_args, 8555 &data->res.seq_res, 8556 task); 8557 } 8558 8559 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 8560 { 8561 struct nfs_free_stateid_data *data = calldata; 8562 8563 nfs41_sequence_done(task, &data->res.seq_res); 8564 8565 switch (task->tk_status) { 8566 case -NFS4ERR_DELAY: 8567 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 8568 rpc_restart_call_prepare(task); 8569 } 8570 } 8571 8572 static void nfs41_free_stateid_release(void *calldata) 8573 { 8574 kfree(calldata); 8575 } 8576 8577 static const struct rpc_call_ops nfs41_free_stateid_ops = { 8578 .rpc_call_prepare = nfs41_free_stateid_prepare, 8579 .rpc_call_done = nfs41_free_stateid_done, 8580 .rpc_release = nfs41_free_stateid_release, 8581 }; 8582 8583 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server, 8584 nfs4_stateid *stateid, 8585 struct rpc_cred *cred, 8586 bool privileged) 8587 { 8588 struct rpc_message msg = { 8589 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 8590 .rpc_cred = cred, 8591 }; 8592 struct rpc_task_setup task_setup = { 8593 .rpc_client = server->client, 8594 .rpc_message = &msg, 8595 .callback_ops = &nfs41_free_stateid_ops, 8596 .flags = RPC_TASK_ASYNC, 8597 }; 8598 struct nfs_free_stateid_data *data; 8599 8600 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8601 &task_setup.rpc_client, &msg); 8602 8603 dprintk("NFS call free_stateid %p\n", stateid); 8604 data = kmalloc(sizeof(*data), GFP_NOFS); 8605 if (!data) 8606 return ERR_PTR(-ENOMEM); 8607 data->server = server; 8608 nfs4_stateid_copy(&data->args.stateid, stateid); 8609 8610 task_setup.callback_data = data; 8611 8612 msg.rpc_argp = &data->args; 8613 msg.rpc_resp = &data->res; 8614 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 8615 if (privileged) 8616 nfs4_set_sequence_privileged(&data->args.seq_args); 8617 8618 return rpc_run_task(&task_setup); 8619 } 8620 8621 /** 8622 * nfs41_free_stateid - perform a FREE_STATEID operation 8623 * 8624 * @server: server / transport on which to perform the operation 8625 * @stateid: state ID to release 8626 * @cred: credential 8627 * 8628 * Returns NFS_OK if the server freed "stateid". Otherwise a 8629 * negative NFS4ERR value is returned. 8630 */ 8631 static int nfs41_free_stateid(struct nfs_server *server, 8632 nfs4_stateid *stateid, 8633 struct rpc_cred *cred) 8634 { 8635 struct rpc_task *task; 8636 int ret; 8637 8638 task = _nfs41_free_stateid(server, stateid, cred, true); 8639 if (IS_ERR(task)) 8640 return PTR_ERR(task); 8641 ret = rpc_wait_for_completion_task(task); 8642 if (!ret) 8643 ret = task->tk_status; 8644 rpc_put_task(task); 8645 return ret; 8646 } 8647 8648 static void 8649 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 8650 { 8651 struct rpc_task *task; 8652 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 8653 8654 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 8655 nfs4_free_lock_state(server, lsp); 8656 if (IS_ERR(task)) 8657 return; 8658 rpc_put_task(task); 8659 } 8660 8661 static bool nfs41_match_stateid(const nfs4_stateid *s1, 8662 const nfs4_stateid *s2) 8663 { 8664 if (s1->type != s2->type) 8665 return false; 8666 8667 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 8668 return false; 8669 8670 if (s1->seqid == s2->seqid) 8671 return true; 8672 if (s1->seqid == 0 || s2->seqid == 0) 8673 return true; 8674 8675 return false; 8676 } 8677 8678 #endif /* CONFIG_NFS_V4_1 */ 8679 8680 static bool nfs4_match_stateid(const nfs4_stateid *s1, 8681 const nfs4_stateid *s2) 8682 { 8683 return nfs4_stateid_match(s1, s2); 8684 } 8685 8686 8687 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 8688 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8689 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8690 .recover_open = nfs4_open_reclaim, 8691 .recover_lock = nfs4_lock_reclaim, 8692 .establish_clid = nfs4_init_clientid, 8693 .detect_trunking = nfs40_discover_server_trunking, 8694 }; 8695 8696 #if defined(CONFIG_NFS_V4_1) 8697 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 8698 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8699 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8700 .recover_open = nfs4_open_reclaim, 8701 .recover_lock = nfs4_lock_reclaim, 8702 .establish_clid = nfs41_init_clientid, 8703 .reclaim_complete = nfs41_proc_reclaim_complete, 8704 .detect_trunking = nfs41_discover_server_trunking, 8705 }; 8706 #endif /* CONFIG_NFS_V4_1 */ 8707 8708 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 8709 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 8710 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 8711 .recover_open = nfs40_open_expired, 8712 .recover_lock = nfs4_lock_expired, 8713 .establish_clid = nfs4_init_clientid, 8714 }; 8715 8716 #if defined(CONFIG_NFS_V4_1) 8717 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 8718 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 8719 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 8720 .recover_open = nfs41_open_expired, 8721 .recover_lock = nfs41_lock_expired, 8722 .establish_clid = nfs41_init_clientid, 8723 }; 8724 #endif /* CONFIG_NFS_V4_1 */ 8725 8726 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 8727 .sched_state_renewal = nfs4_proc_async_renew, 8728 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, 8729 .renew_lease = nfs4_proc_renew, 8730 }; 8731 8732 #if defined(CONFIG_NFS_V4_1) 8733 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 8734 .sched_state_renewal = nfs41_proc_async_sequence, 8735 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, 8736 .renew_lease = nfs4_proc_sequence, 8737 }; 8738 #endif 8739 8740 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 8741 .get_locations = _nfs40_proc_get_locations, 8742 .fsid_present = _nfs40_proc_fsid_present, 8743 }; 8744 8745 #if defined(CONFIG_NFS_V4_1) 8746 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 8747 .get_locations = _nfs41_proc_get_locations, 8748 .fsid_present = _nfs41_proc_fsid_present, 8749 }; 8750 #endif /* CONFIG_NFS_V4_1 */ 8751 8752 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 8753 .minor_version = 0, 8754 .init_caps = NFS_CAP_READDIRPLUS 8755 | NFS_CAP_ATOMIC_OPEN 8756 | NFS_CAP_POSIX_LOCK, 8757 .init_client = nfs40_init_client, 8758 .shutdown_client = nfs40_shutdown_client, 8759 .match_stateid = nfs4_match_stateid, 8760 .find_root_sec = nfs4_find_root_sec, 8761 .free_lock_state = nfs4_release_lockowner, 8762 .alloc_seqid = nfs_alloc_seqid, 8763 .call_sync_ops = &nfs40_call_sync_ops, 8764 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 8765 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 8766 .state_renewal_ops = &nfs40_state_renewal_ops, 8767 .mig_recovery_ops = &nfs40_mig_recovery_ops, 8768 }; 8769 8770 #if defined(CONFIG_NFS_V4_1) 8771 static struct nfs_seqid * 8772 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 8773 { 8774 return NULL; 8775 } 8776 8777 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 8778 .minor_version = 1, 8779 .init_caps = NFS_CAP_READDIRPLUS 8780 | NFS_CAP_ATOMIC_OPEN 8781 | NFS_CAP_POSIX_LOCK 8782 | NFS_CAP_STATEID_NFSV41 8783 | NFS_CAP_ATOMIC_OPEN_V1, 8784 .init_client = nfs41_init_client, 8785 .shutdown_client = nfs41_shutdown_client, 8786 .match_stateid = nfs41_match_stateid, 8787 .find_root_sec = nfs41_find_root_sec, 8788 .free_lock_state = nfs41_free_lock_state, 8789 .alloc_seqid = nfs_alloc_no_seqid, 8790 .call_sync_ops = &nfs41_call_sync_ops, 8791 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 8792 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 8793 .state_renewal_ops = &nfs41_state_renewal_ops, 8794 .mig_recovery_ops = &nfs41_mig_recovery_ops, 8795 }; 8796 #endif 8797 8798 #if defined(CONFIG_NFS_V4_2) 8799 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 8800 .minor_version = 2, 8801 .init_caps = NFS_CAP_READDIRPLUS 8802 | NFS_CAP_ATOMIC_OPEN 8803 | NFS_CAP_POSIX_LOCK 8804 | NFS_CAP_STATEID_NFSV41 8805 | NFS_CAP_ATOMIC_OPEN_V1 8806 | NFS_CAP_ALLOCATE 8807 | NFS_CAP_COPY 8808 | NFS_CAP_DEALLOCATE 8809 | NFS_CAP_SEEK 8810 | NFS_CAP_LAYOUTSTATS 8811 | NFS_CAP_CLONE, 8812 .init_client = nfs41_init_client, 8813 .shutdown_client = nfs41_shutdown_client, 8814 .match_stateid = nfs41_match_stateid, 8815 .find_root_sec = nfs41_find_root_sec, 8816 .free_lock_state = nfs41_free_lock_state, 8817 .call_sync_ops = &nfs41_call_sync_ops, 8818 .alloc_seqid = nfs_alloc_no_seqid, 8819 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 8820 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 8821 .state_renewal_ops = &nfs41_state_renewal_ops, 8822 .mig_recovery_ops = &nfs41_mig_recovery_ops, 8823 }; 8824 #endif 8825 8826 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 8827 [0] = &nfs_v4_0_minor_ops, 8828 #if defined(CONFIG_NFS_V4_1) 8829 [1] = &nfs_v4_1_minor_ops, 8830 #endif 8831 #if defined(CONFIG_NFS_V4_2) 8832 [2] = &nfs_v4_2_minor_ops, 8833 #endif 8834 }; 8835 8836 ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 8837 { 8838 ssize_t error, error2; 8839 8840 error = generic_listxattr(dentry, list, size); 8841 if (error < 0) 8842 return error; 8843 if (list) { 8844 list += error; 8845 size -= error; 8846 } 8847 8848 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size); 8849 if (error2 < 0) 8850 return error2; 8851 return error + error2; 8852 } 8853 8854 static const struct inode_operations nfs4_dir_inode_operations = { 8855 .create = nfs_create, 8856 .lookup = nfs_lookup, 8857 .atomic_open = nfs_atomic_open, 8858 .link = nfs_link, 8859 .unlink = nfs_unlink, 8860 .symlink = nfs_symlink, 8861 .mkdir = nfs_mkdir, 8862 .rmdir = nfs_rmdir, 8863 .mknod = nfs_mknod, 8864 .rename = nfs_rename, 8865 .permission = nfs_permission, 8866 .getattr = nfs_getattr, 8867 .setattr = nfs_setattr, 8868 .getxattr = generic_getxattr, 8869 .setxattr = generic_setxattr, 8870 .listxattr = nfs4_listxattr, 8871 .removexattr = generic_removexattr, 8872 }; 8873 8874 static const struct inode_operations nfs4_file_inode_operations = { 8875 .permission = nfs_permission, 8876 .getattr = nfs_getattr, 8877 .setattr = nfs_setattr, 8878 .getxattr = generic_getxattr, 8879 .setxattr = generic_setxattr, 8880 .listxattr = nfs4_listxattr, 8881 .removexattr = generic_removexattr, 8882 }; 8883 8884 const struct nfs_rpc_ops nfs_v4_clientops = { 8885 .version = 4, /* protocol version */ 8886 .dentry_ops = &nfs4_dentry_operations, 8887 .dir_inode_ops = &nfs4_dir_inode_operations, 8888 .file_inode_ops = &nfs4_file_inode_operations, 8889 .file_ops = &nfs4_file_operations, 8890 .getroot = nfs4_proc_get_root, 8891 .submount = nfs4_submount, 8892 .try_mount = nfs4_try_mount, 8893 .getattr = nfs4_proc_getattr, 8894 .setattr = nfs4_proc_setattr, 8895 .lookup = nfs4_proc_lookup, 8896 .access = nfs4_proc_access, 8897 .readlink = nfs4_proc_readlink, 8898 .create = nfs4_proc_create, 8899 .remove = nfs4_proc_remove, 8900 .unlink_setup = nfs4_proc_unlink_setup, 8901 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 8902 .unlink_done = nfs4_proc_unlink_done, 8903 .rename_setup = nfs4_proc_rename_setup, 8904 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 8905 .rename_done = nfs4_proc_rename_done, 8906 .link = nfs4_proc_link, 8907 .symlink = nfs4_proc_symlink, 8908 .mkdir = nfs4_proc_mkdir, 8909 .rmdir = nfs4_proc_remove, 8910 .readdir = nfs4_proc_readdir, 8911 .mknod = nfs4_proc_mknod, 8912 .statfs = nfs4_proc_statfs, 8913 .fsinfo = nfs4_proc_fsinfo, 8914 .pathconf = nfs4_proc_pathconf, 8915 .set_capabilities = nfs4_server_capabilities, 8916 .decode_dirent = nfs4_decode_dirent, 8917 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 8918 .read_setup = nfs4_proc_read_setup, 8919 .read_done = nfs4_read_done, 8920 .write_setup = nfs4_proc_write_setup, 8921 .write_done = nfs4_write_done, 8922 .commit_setup = nfs4_proc_commit_setup, 8923 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 8924 .commit_done = nfs4_commit_done, 8925 .lock = nfs4_proc_lock, 8926 .clear_acl_cache = nfs4_zap_acl_attr, 8927 .close_context = nfs4_close_context, 8928 .open_context = nfs4_atomic_open, 8929 .have_delegation = nfs4_have_delegation, 8930 .return_delegation = nfs4_inode_return_delegation, 8931 .alloc_client = nfs4_alloc_client, 8932 .init_client = nfs4_init_client, 8933 .free_client = nfs4_free_client, 8934 .create_server = nfs4_create_server, 8935 .clone_server = nfs_clone_server, 8936 }; 8937 8938 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 8939 .name = XATTR_NAME_NFSV4_ACL, 8940 .list = nfs4_xattr_list_nfs4_acl, 8941 .get = nfs4_xattr_get_nfs4_acl, 8942 .set = nfs4_xattr_set_nfs4_acl, 8943 }; 8944 8945 const struct xattr_handler *nfs4_xattr_handlers[] = { 8946 &nfs4_xattr_nfs4_acl_handler, 8947 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 8948 &nfs4_xattr_nfs4_label_handler, 8949 #endif 8950 NULL 8951 }; 8952 8953 /* 8954 * Local variables: 8955 * c-basic-offset: 8 8956 * End: 8957 */ 8958