1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/file.h> 42 #include <linux/string.h> 43 #include <linux/ratelimit.h> 44 #include <linux/printk.h> 45 #include <linux/slab.h> 46 #include <linux/sunrpc/clnt.h> 47 #include <linux/nfs.h> 48 #include <linux/nfs4.h> 49 #include <linux/nfs_fs.h> 50 #include <linux/nfs_page.h> 51 #include <linux/nfs_mount.h> 52 #include <linux/namei.h> 53 #include <linux/mount.h> 54 #include <linux/module.h> 55 #include <linux/xattr.h> 56 #include <linux/utsname.h> 57 #include <linux/freezer.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "nfs4idmap.h" 67 #include "nfs4session.h" 68 #include "fscache.h" 69 70 #include "nfs4trace.h" 71 72 #define NFSDBG_FACILITY NFSDBG_PROC 73 74 #define NFS4_POLL_RETRY_MIN (HZ/10) 75 #define NFS4_POLL_RETRY_MAX (15*HZ) 76 77 /* file attributes which can be mapped to nfs attributes */ 78 #define NFS4_VALID_ATTRS (ATTR_MODE \ 79 | ATTR_UID \ 80 | ATTR_GID \ 81 | ATTR_SIZE \ 82 | ATTR_ATIME \ 83 | ATTR_MTIME \ 84 | ATTR_CTIME \ 85 | ATTR_ATIME_SET \ 86 | ATTR_MTIME_SET) 87 88 struct nfs4_opendata; 89 static int _nfs4_proc_open(struct nfs4_opendata *data); 90 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 91 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 92 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 93 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label); 94 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label); 95 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 96 struct nfs_fattr *fattr, struct iattr *sattr, 97 struct nfs4_state *state, struct nfs4_label *ilabel, 98 struct nfs4_label *olabel); 99 #ifdef CONFIG_NFS_V4_1 100 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *, 101 struct rpc_cred *); 102 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 103 struct rpc_cred *); 104 #endif 105 106 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 107 static inline struct nfs4_label * 108 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 109 struct iattr *sattr, struct nfs4_label *label) 110 { 111 int err; 112 113 if (label == NULL) 114 return NULL; 115 116 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 117 return NULL; 118 119 err = security_dentry_init_security(dentry, sattr->ia_mode, 120 &dentry->d_name, (void **)&label->label, &label->len); 121 if (err == 0) 122 return label; 123 124 return NULL; 125 } 126 static inline void 127 nfs4_label_release_security(struct nfs4_label *label) 128 { 129 if (label) 130 security_release_secctx(label->label, label->len); 131 } 132 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 133 { 134 if (label) 135 return server->attr_bitmask; 136 137 return server->attr_bitmask_nl; 138 } 139 #else 140 static inline struct nfs4_label * 141 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 142 struct iattr *sattr, struct nfs4_label *l) 143 { return NULL; } 144 static inline void 145 nfs4_label_release_security(struct nfs4_label *label) 146 { return; } 147 static inline u32 * 148 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 149 { return server->attr_bitmask; } 150 #endif 151 152 /* Prevent leaks of NFSv4 errors into userland */ 153 static int nfs4_map_errors(int err) 154 { 155 if (err >= -1000) 156 return err; 157 switch (err) { 158 case -NFS4ERR_RESOURCE: 159 case -NFS4ERR_LAYOUTTRYLATER: 160 case -NFS4ERR_RECALLCONFLICT: 161 return -EREMOTEIO; 162 case -NFS4ERR_WRONGSEC: 163 case -NFS4ERR_WRONG_CRED: 164 return -EPERM; 165 case -NFS4ERR_BADOWNER: 166 case -NFS4ERR_BADNAME: 167 return -EINVAL; 168 case -NFS4ERR_SHARE_DENIED: 169 return -EACCES; 170 case -NFS4ERR_MINOR_VERS_MISMATCH: 171 return -EPROTONOSUPPORT; 172 case -NFS4ERR_FILE_OPEN: 173 return -EBUSY; 174 default: 175 dprintk("%s could not handle NFSv4 error %d\n", 176 __func__, -err); 177 break; 178 } 179 return -EIO; 180 } 181 182 /* 183 * This is our standard bitmap for GETATTR requests. 184 */ 185 const u32 nfs4_fattr_bitmap[3] = { 186 FATTR4_WORD0_TYPE 187 | FATTR4_WORD0_CHANGE 188 | FATTR4_WORD0_SIZE 189 | FATTR4_WORD0_FSID 190 | FATTR4_WORD0_FILEID, 191 FATTR4_WORD1_MODE 192 | FATTR4_WORD1_NUMLINKS 193 | FATTR4_WORD1_OWNER 194 | FATTR4_WORD1_OWNER_GROUP 195 | FATTR4_WORD1_RAWDEV 196 | FATTR4_WORD1_SPACE_USED 197 | FATTR4_WORD1_TIME_ACCESS 198 | FATTR4_WORD1_TIME_METADATA 199 | FATTR4_WORD1_TIME_MODIFY 200 | FATTR4_WORD1_MOUNTED_ON_FILEID, 201 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 202 FATTR4_WORD2_SECURITY_LABEL 203 #endif 204 }; 205 206 static const u32 nfs4_pnfs_open_bitmap[3] = { 207 FATTR4_WORD0_TYPE 208 | FATTR4_WORD0_CHANGE 209 | FATTR4_WORD0_SIZE 210 | FATTR4_WORD0_FSID 211 | FATTR4_WORD0_FILEID, 212 FATTR4_WORD1_MODE 213 | FATTR4_WORD1_NUMLINKS 214 | FATTR4_WORD1_OWNER 215 | FATTR4_WORD1_OWNER_GROUP 216 | FATTR4_WORD1_RAWDEV 217 | FATTR4_WORD1_SPACE_USED 218 | FATTR4_WORD1_TIME_ACCESS 219 | FATTR4_WORD1_TIME_METADATA 220 | FATTR4_WORD1_TIME_MODIFY, 221 FATTR4_WORD2_MDSTHRESHOLD 222 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 223 | FATTR4_WORD2_SECURITY_LABEL 224 #endif 225 }; 226 227 static const u32 nfs4_open_noattr_bitmap[3] = { 228 FATTR4_WORD0_TYPE 229 | FATTR4_WORD0_CHANGE 230 | FATTR4_WORD0_FILEID, 231 }; 232 233 const u32 nfs4_statfs_bitmap[3] = { 234 FATTR4_WORD0_FILES_AVAIL 235 | FATTR4_WORD0_FILES_FREE 236 | FATTR4_WORD0_FILES_TOTAL, 237 FATTR4_WORD1_SPACE_AVAIL 238 | FATTR4_WORD1_SPACE_FREE 239 | FATTR4_WORD1_SPACE_TOTAL 240 }; 241 242 const u32 nfs4_pathconf_bitmap[3] = { 243 FATTR4_WORD0_MAXLINK 244 | FATTR4_WORD0_MAXNAME, 245 0 246 }; 247 248 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 249 | FATTR4_WORD0_MAXREAD 250 | FATTR4_WORD0_MAXWRITE 251 | FATTR4_WORD0_LEASE_TIME, 252 FATTR4_WORD1_TIME_DELTA 253 | FATTR4_WORD1_FS_LAYOUT_TYPES, 254 FATTR4_WORD2_LAYOUT_BLKSIZE 255 | FATTR4_WORD2_CLONE_BLKSIZE 256 }; 257 258 const u32 nfs4_fs_locations_bitmap[3] = { 259 FATTR4_WORD0_TYPE 260 | FATTR4_WORD0_CHANGE 261 | FATTR4_WORD0_SIZE 262 | FATTR4_WORD0_FSID 263 | FATTR4_WORD0_FILEID 264 | FATTR4_WORD0_FS_LOCATIONS, 265 FATTR4_WORD1_MODE 266 | FATTR4_WORD1_NUMLINKS 267 | FATTR4_WORD1_OWNER 268 | FATTR4_WORD1_OWNER_GROUP 269 | FATTR4_WORD1_RAWDEV 270 | FATTR4_WORD1_SPACE_USED 271 | FATTR4_WORD1_TIME_ACCESS 272 | FATTR4_WORD1_TIME_METADATA 273 | FATTR4_WORD1_TIME_MODIFY 274 | FATTR4_WORD1_MOUNTED_ON_FILEID, 275 }; 276 277 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 278 struct nfs4_readdir_arg *readdir) 279 { 280 __be32 *start, *p; 281 282 if (cookie > 2) { 283 readdir->cookie = cookie; 284 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 285 return; 286 } 287 288 readdir->cookie = 0; 289 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 290 if (cookie == 2) 291 return; 292 293 /* 294 * NFSv4 servers do not return entries for '.' and '..' 295 * Therefore, we fake these entries here. We let '.' 296 * have cookie 0 and '..' have cookie 1. Note that 297 * when talking to the server, we always send cookie 0 298 * instead of 1 or 2. 299 */ 300 start = p = kmap_atomic(*readdir->pages); 301 302 if (cookie == 0) { 303 *p++ = xdr_one; /* next */ 304 *p++ = xdr_zero; /* cookie, first word */ 305 *p++ = xdr_one; /* cookie, second word */ 306 *p++ = xdr_one; /* entry len */ 307 memcpy(p, ".\0\0\0", 4); /* entry */ 308 p++; 309 *p++ = xdr_one; /* bitmap length */ 310 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 311 *p++ = htonl(8); /* attribute buffer length */ 312 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 313 } 314 315 *p++ = xdr_one; /* next */ 316 *p++ = xdr_zero; /* cookie, first word */ 317 *p++ = xdr_two; /* cookie, second word */ 318 *p++ = xdr_two; /* entry len */ 319 memcpy(p, "..\0\0", 4); /* entry */ 320 p++; 321 *p++ = xdr_one; /* bitmap length */ 322 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 323 *p++ = htonl(8); /* attribute buffer length */ 324 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 325 326 readdir->pgbase = (char *)p - (char *)start; 327 readdir->count -= readdir->pgbase; 328 kunmap_atomic(start); 329 } 330 331 static long nfs4_update_delay(long *timeout) 332 { 333 long ret; 334 if (!timeout) 335 return NFS4_POLL_RETRY_MAX; 336 if (*timeout <= 0) 337 *timeout = NFS4_POLL_RETRY_MIN; 338 if (*timeout > NFS4_POLL_RETRY_MAX) 339 *timeout = NFS4_POLL_RETRY_MAX; 340 ret = *timeout; 341 *timeout <<= 1; 342 return ret; 343 } 344 345 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 346 { 347 int res = 0; 348 349 might_sleep(); 350 351 freezable_schedule_timeout_killable_unsafe( 352 nfs4_update_delay(timeout)); 353 if (fatal_signal_pending(current)) 354 res = -ERESTARTSYS; 355 return res; 356 } 357 358 /* This is the error handling routine for processes that are allowed 359 * to sleep. 360 */ 361 static int nfs4_do_handle_exception(struct nfs_server *server, 362 int errorcode, struct nfs4_exception *exception) 363 { 364 struct nfs_client *clp = server->nfs_client; 365 struct nfs4_state *state = exception->state; 366 const nfs4_stateid *stateid = exception->stateid; 367 struct inode *inode = exception->inode; 368 int ret = errorcode; 369 370 exception->delay = 0; 371 exception->recovering = 0; 372 exception->retry = 0; 373 switch(errorcode) { 374 case 0: 375 return 0; 376 case -NFS4ERR_OPENMODE: 377 case -NFS4ERR_DELEG_REVOKED: 378 case -NFS4ERR_ADMIN_REVOKED: 379 case -NFS4ERR_BAD_STATEID: 380 if (inode) { 381 int err; 382 383 err = nfs_async_inode_return_delegation(inode, 384 stateid); 385 if (err == 0) 386 goto wait_on_recovery; 387 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) { 388 exception->retry = 1; 389 break; 390 } 391 } 392 if (state == NULL) 393 break; 394 ret = nfs4_schedule_stateid_recovery(server, state); 395 if (ret < 0) 396 break; 397 goto wait_on_recovery; 398 case -NFS4ERR_EXPIRED: 399 if (state != NULL) { 400 ret = nfs4_schedule_stateid_recovery(server, state); 401 if (ret < 0) 402 break; 403 } 404 case -NFS4ERR_STALE_STATEID: 405 case -NFS4ERR_STALE_CLIENTID: 406 nfs4_schedule_lease_recovery(clp); 407 goto wait_on_recovery; 408 case -NFS4ERR_MOVED: 409 ret = nfs4_schedule_migration_recovery(server); 410 if (ret < 0) 411 break; 412 goto wait_on_recovery; 413 case -NFS4ERR_LEASE_MOVED: 414 nfs4_schedule_lease_moved_recovery(clp); 415 goto wait_on_recovery; 416 #if defined(CONFIG_NFS_V4_1) 417 case -NFS4ERR_BADSESSION: 418 case -NFS4ERR_BADSLOT: 419 case -NFS4ERR_BAD_HIGH_SLOT: 420 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 421 case -NFS4ERR_DEADSESSION: 422 case -NFS4ERR_SEQ_FALSE_RETRY: 423 case -NFS4ERR_SEQ_MISORDERED: 424 dprintk("%s ERROR: %d Reset session\n", __func__, 425 errorcode); 426 nfs4_schedule_session_recovery(clp->cl_session, errorcode); 427 goto wait_on_recovery; 428 #endif /* defined(CONFIG_NFS_V4_1) */ 429 case -NFS4ERR_FILE_OPEN: 430 if (exception->timeout > HZ) { 431 /* We have retried a decent amount, time to 432 * fail 433 */ 434 ret = -EBUSY; 435 break; 436 } 437 case -NFS4ERR_DELAY: 438 nfs_inc_server_stats(server, NFSIOS_DELAY); 439 case -NFS4ERR_GRACE: 440 case -NFS4ERR_LAYOUTTRYLATER: 441 case -NFS4ERR_RECALLCONFLICT: 442 exception->delay = 1; 443 return 0; 444 445 case -NFS4ERR_RETRY_UNCACHED_REP: 446 case -NFS4ERR_OLD_STATEID: 447 exception->retry = 1; 448 break; 449 case -NFS4ERR_BADOWNER: 450 /* The following works around a Linux server bug! */ 451 case -NFS4ERR_BADNAME: 452 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 453 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 454 exception->retry = 1; 455 printk(KERN_WARNING "NFS: v4 server %s " 456 "does not accept raw " 457 "uid/gids. " 458 "Reenabling the idmapper.\n", 459 server->nfs_client->cl_hostname); 460 } 461 } 462 /* We failed to handle the error */ 463 return nfs4_map_errors(ret); 464 wait_on_recovery: 465 exception->recovering = 1; 466 return 0; 467 } 468 469 /* This is the error handling routine for processes that are allowed 470 * to sleep. 471 */ 472 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 473 { 474 struct nfs_client *clp = server->nfs_client; 475 int ret; 476 477 ret = nfs4_do_handle_exception(server, errorcode, exception); 478 if (exception->delay) { 479 ret = nfs4_delay(server->client, &exception->timeout); 480 goto out_retry; 481 } 482 if (exception->recovering) { 483 ret = nfs4_wait_clnt_recover(clp); 484 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 485 return -EIO; 486 goto out_retry; 487 } 488 return ret; 489 out_retry: 490 if (ret == 0) 491 exception->retry = 1; 492 return ret; 493 } 494 495 static int 496 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, 497 int errorcode, struct nfs4_exception *exception) 498 { 499 struct nfs_client *clp = server->nfs_client; 500 int ret; 501 502 ret = nfs4_do_handle_exception(server, errorcode, exception); 503 if (exception->delay) { 504 rpc_delay(task, nfs4_update_delay(&exception->timeout)); 505 goto out_retry; 506 } 507 if (exception->recovering) { 508 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 509 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 510 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 511 goto out_retry; 512 } 513 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 514 ret = -EIO; 515 return ret; 516 out_retry: 517 if (ret == 0) 518 exception->retry = 1; 519 return ret; 520 } 521 522 static int 523 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server, 524 struct nfs4_state *state, long *timeout) 525 { 526 struct nfs4_exception exception = { 527 .state = state, 528 }; 529 530 if (task->tk_status >= 0) 531 return 0; 532 if (timeout) 533 exception.timeout = *timeout; 534 task->tk_status = nfs4_async_handle_exception(task, server, 535 task->tk_status, 536 &exception); 537 if (exception.delay && timeout) 538 *timeout = exception.timeout; 539 if (exception.retry) 540 return -EAGAIN; 541 return 0; 542 } 543 544 /* 545 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 546 * or 'false' otherwise. 547 */ 548 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 549 { 550 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 551 552 if (flavor == RPC_AUTH_GSS_KRB5I || 553 flavor == RPC_AUTH_GSS_KRB5P) 554 return true; 555 556 return false; 557 } 558 559 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 560 { 561 spin_lock(&clp->cl_lock); 562 if (time_before(clp->cl_last_renewal,timestamp)) 563 clp->cl_last_renewal = timestamp; 564 spin_unlock(&clp->cl_lock); 565 } 566 567 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 568 { 569 struct nfs_client *clp = server->nfs_client; 570 571 if (!nfs4_has_session(clp)) 572 do_renew_lease(clp, timestamp); 573 } 574 575 struct nfs4_call_sync_data { 576 const struct nfs_server *seq_server; 577 struct nfs4_sequence_args *seq_args; 578 struct nfs4_sequence_res *seq_res; 579 }; 580 581 void nfs4_init_sequence(struct nfs4_sequence_args *args, 582 struct nfs4_sequence_res *res, int cache_reply) 583 { 584 args->sa_slot = NULL; 585 args->sa_cache_this = cache_reply; 586 args->sa_privileged = 0; 587 588 res->sr_slot = NULL; 589 } 590 591 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) 592 { 593 args->sa_privileged = 1; 594 } 595 596 int nfs40_setup_sequence(struct nfs4_slot_table *tbl, 597 struct nfs4_sequence_args *args, 598 struct nfs4_sequence_res *res, 599 struct rpc_task *task) 600 { 601 struct nfs4_slot *slot; 602 603 /* slot already allocated? */ 604 if (res->sr_slot != NULL) 605 goto out_start; 606 607 spin_lock(&tbl->slot_tbl_lock); 608 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 609 goto out_sleep; 610 611 slot = nfs4_alloc_slot(tbl); 612 if (IS_ERR(slot)) { 613 if (slot == ERR_PTR(-ENOMEM)) 614 task->tk_timeout = HZ >> 2; 615 goto out_sleep; 616 } 617 spin_unlock(&tbl->slot_tbl_lock); 618 619 slot->privileged = args->sa_privileged ? 1 : 0; 620 args->sa_slot = slot; 621 res->sr_slot = slot; 622 623 out_start: 624 rpc_call_start(task); 625 return 0; 626 627 out_sleep: 628 if (args->sa_privileged) 629 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 630 NULL, RPC_PRIORITY_PRIVILEGED); 631 else 632 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 633 spin_unlock(&tbl->slot_tbl_lock); 634 return -EAGAIN; 635 } 636 EXPORT_SYMBOL_GPL(nfs40_setup_sequence); 637 638 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res) 639 { 640 struct nfs4_slot *slot = res->sr_slot; 641 struct nfs4_slot_table *tbl; 642 643 tbl = slot->table; 644 spin_lock(&tbl->slot_tbl_lock); 645 if (!nfs41_wake_and_assign_slot(tbl, slot)) 646 nfs4_free_slot(tbl, slot); 647 spin_unlock(&tbl->slot_tbl_lock); 648 649 res->sr_slot = NULL; 650 } 651 652 static int nfs40_sequence_done(struct rpc_task *task, 653 struct nfs4_sequence_res *res) 654 { 655 if (res->sr_slot != NULL) 656 nfs40_sequence_free_slot(res); 657 return 1; 658 } 659 660 #if defined(CONFIG_NFS_V4_1) 661 662 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 663 { 664 struct nfs4_session *session; 665 struct nfs4_slot_table *tbl; 666 struct nfs4_slot *slot = res->sr_slot; 667 bool send_new_highest_used_slotid = false; 668 669 tbl = slot->table; 670 session = tbl->session; 671 672 /* Bump the slot sequence number */ 673 if (slot->seq_done) 674 slot->seq_nr++; 675 slot->seq_done = 0; 676 677 spin_lock(&tbl->slot_tbl_lock); 678 /* Be nice to the server: try to ensure that the last transmitted 679 * value for highest_user_slotid <= target_highest_slotid 680 */ 681 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 682 send_new_highest_used_slotid = true; 683 684 if (nfs41_wake_and_assign_slot(tbl, slot)) { 685 send_new_highest_used_slotid = false; 686 goto out_unlock; 687 } 688 nfs4_free_slot(tbl, slot); 689 690 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 691 send_new_highest_used_slotid = false; 692 out_unlock: 693 spin_unlock(&tbl->slot_tbl_lock); 694 res->sr_slot = NULL; 695 if (send_new_highest_used_slotid) 696 nfs41_notify_server(session->clp); 697 if (waitqueue_active(&tbl->slot_waitq)) 698 wake_up_all(&tbl->slot_waitq); 699 } 700 701 static int nfs41_sequence_process(struct rpc_task *task, 702 struct nfs4_sequence_res *res) 703 { 704 struct nfs4_session *session; 705 struct nfs4_slot *slot = res->sr_slot; 706 struct nfs_client *clp; 707 bool interrupted = false; 708 int ret = 1; 709 710 if (slot == NULL) 711 goto out_noaction; 712 /* don't increment the sequence number if the task wasn't sent */ 713 if (!RPC_WAS_SENT(task)) 714 goto out; 715 716 session = slot->table->session; 717 718 if (slot->interrupted) { 719 slot->interrupted = 0; 720 interrupted = true; 721 } 722 723 trace_nfs4_sequence_done(session, res); 724 /* Check the SEQUENCE operation status */ 725 switch (res->sr_status) { 726 case 0: 727 /* Update the slot's sequence and clientid lease timer */ 728 slot->seq_done = 1; 729 clp = session->clp; 730 do_renew_lease(clp, res->sr_timestamp); 731 /* Check sequence flags */ 732 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags, 733 !!slot->privileged); 734 nfs41_update_target_slotid(slot->table, slot, res); 735 break; 736 case 1: 737 /* 738 * sr_status remains 1 if an RPC level error occurred. 739 * The server may or may not have processed the sequence 740 * operation.. 741 * Mark the slot as having hosted an interrupted RPC call. 742 */ 743 slot->interrupted = 1; 744 goto out; 745 case -NFS4ERR_DELAY: 746 /* The server detected a resend of the RPC call and 747 * returned NFS4ERR_DELAY as per Section 2.10.6.2 748 * of RFC5661. 749 */ 750 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 751 __func__, 752 slot->slot_nr, 753 slot->seq_nr); 754 goto out_retry; 755 case -NFS4ERR_BADSLOT: 756 /* 757 * The slot id we used was probably retired. Try again 758 * using a different slot id. 759 */ 760 goto retry_nowait; 761 case -NFS4ERR_SEQ_MISORDERED: 762 /* 763 * Was the last operation on this sequence interrupted? 764 * If so, retry after bumping the sequence number. 765 */ 766 if (interrupted) { 767 ++slot->seq_nr; 768 goto retry_nowait; 769 } 770 /* 771 * Could this slot have been previously retired? 772 * If so, then the server may be expecting seq_nr = 1! 773 */ 774 if (slot->seq_nr != 1) { 775 slot->seq_nr = 1; 776 goto retry_nowait; 777 } 778 break; 779 case -NFS4ERR_SEQ_FALSE_RETRY: 780 ++slot->seq_nr; 781 goto retry_nowait; 782 default: 783 /* Just update the slot sequence no. */ 784 slot->seq_done = 1; 785 } 786 out: 787 /* The session may be reset by one of the error handlers. */ 788 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 789 out_noaction: 790 return ret; 791 retry_nowait: 792 if (rpc_restart_call_prepare(task)) { 793 nfs41_sequence_free_slot(res); 794 task->tk_status = 0; 795 ret = 0; 796 } 797 goto out; 798 out_retry: 799 if (!rpc_restart_call(task)) 800 goto out; 801 rpc_delay(task, NFS4_POLL_RETRY_MAX); 802 return 0; 803 } 804 805 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 806 { 807 if (!nfs41_sequence_process(task, res)) 808 return 0; 809 if (res->sr_slot != NULL) 810 nfs41_sequence_free_slot(res); 811 return 1; 812 813 } 814 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 815 816 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 817 { 818 if (res->sr_slot == NULL) 819 return 1; 820 if (res->sr_slot->table->session != NULL) 821 return nfs41_sequence_process(task, res); 822 return nfs40_sequence_done(task, res); 823 } 824 825 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 826 { 827 if (res->sr_slot != NULL) { 828 if (res->sr_slot->table->session != NULL) 829 nfs41_sequence_free_slot(res); 830 else 831 nfs40_sequence_free_slot(res); 832 } 833 } 834 835 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 836 { 837 if (res->sr_slot == NULL) 838 return 1; 839 if (!res->sr_slot->table->session) 840 return nfs40_sequence_done(task, res); 841 return nfs41_sequence_done(task, res); 842 } 843 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 844 845 int nfs41_setup_sequence(struct nfs4_session *session, 846 struct nfs4_sequence_args *args, 847 struct nfs4_sequence_res *res, 848 struct rpc_task *task) 849 { 850 struct nfs4_slot *slot; 851 struct nfs4_slot_table *tbl; 852 853 dprintk("--> %s\n", __func__); 854 /* slot already allocated? */ 855 if (res->sr_slot != NULL) 856 goto out_success; 857 858 tbl = &session->fc_slot_table; 859 860 task->tk_timeout = 0; 861 862 spin_lock(&tbl->slot_tbl_lock); 863 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) && 864 !args->sa_privileged) { 865 /* The state manager will wait until the slot table is empty */ 866 dprintk("%s session is draining\n", __func__); 867 goto out_sleep; 868 } 869 870 slot = nfs4_alloc_slot(tbl); 871 if (IS_ERR(slot)) { 872 /* If out of memory, try again in 1/4 second */ 873 if (slot == ERR_PTR(-ENOMEM)) 874 task->tk_timeout = HZ >> 2; 875 dprintk("<-- %s: no free slots\n", __func__); 876 goto out_sleep; 877 } 878 spin_unlock(&tbl->slot_tbl_lock); 879 880 slot->privileged = args->sa_privileged ? 1 : 0; 881 args->sa_slot = slot; 882 883 dprintk("<-- %s slotid=%u seqid=%u\n", __func__, 884 slot->slot_nr, slot->seq_nr); 885 886 res->sr_slot = slot; 887 res->sr_timestamp = jiffies; 888 res->sr_status_flags = 0; 889 /* 890 * sr_status is only set in decode_sequence, and so will remain 891 * set to 1 if an rpc level failure occurs. 892 */ 893 res->sr_status = 1; 894 trace_nfs4_setup_sequence(session, args); 895 out_success: 896 rpc_call_start(task); 897 return 0; 898 out_sleep: 899 /* Privileged tasks are queued with top priority */ 900 if (args->sa_privileged) 901 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 902 NULL, RPC_PRIORITY_PRIVILEGED); 903 else 904 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 905 spin_unlock(&tbl->slot_tbl_lock); 906 return -EAGAIN; 907 } 908 EXPORT_SYMBOL_GPL(nfs41_setup_sequence); 909 910 static int nfs4_setup_sequence(const struct nfs_server *server, 911 struct nfs4_sequence_args *args, 912 struct nfs4_sequence_res *res, 913 struct rpc_task *task) 914 { 915 struct nfs4_session *session = nfs4_get_session(server); 916 int ret = 0; 917 918 if (!session) 919 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 920 args, res, task); 921 922 dprintk("--> %s clp %p session %p sr_slot %u\n", 923 __func__, session->clp, session, res->sr_slot ? 924 res->sr_slot->slot_nr : NFS4_NO_SLOT); 925 926 ret = nfs41_setup_sequence(session, args, res, task); 927 928 dprintk("<-- %s status=%d\n", __func__, ret); 929 return ret; 930 } 931 932 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 933 { 934 struct nfs4_call_sync_data *data = calldata; 935 struct nfs4_session *session = nfs4_get_session(data->seq_server); 936 937 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 938 939 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task); 940 } 941 942 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 943 { 944 struct nfs4_call_sync_data *data = calldata; 945 946 nfs41_sequence_done(task, data->seq_res); 947 } 948 949 static const struct rpc_call_ops nfs41_call_sync_ops = { 950 .rpc_call_prepare = nfs41_call_sync_prepare, 951 .rpc_call_done = nfs41_call_sync_done, 952 }; 953 954 #else /* !CONFIG_NFS_V4_1 */ 955 956 static int nfs4_setup_sequence(const struct nfs_server *server, 957 struct nfs4_sequence_args *args, 958 struct nfs4_sequence_res *res, 959 struct rpc_task *task) 960 { 961 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 962 args, res, task); 963 } 964 965 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res) 966 { 967 return nfs40_sequence_done(task, res); 968 } 969 970 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res) 971 { 972 if (res->sr_slot != NULL) 973 nfs40_sequence_free_slot(res); 974 } 975 976 int nfs4_sequence_done(struct rpc_task *task, 977 struct nfs4_sequence_res *res) 978 { 979 return nfs40_sequence_done(task, res); 980 } 981 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 982 983 #endif /* !CONFIG_NFS_V4_1 */ 984 985 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 986 { 987 struct nfs4_call_sync_data *data = calldata; 988 nfs4_setup_sequence(data->seq_server, 989 data->seq_args, data->seq_res, task); 990 } 991 992 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 993 { 994 struct nfs4_call_sync_data *data = calldata; 995 nfs4_sequence_done(task, data->seq_res); 996 } 997 998 static const struct rpc_call_ops nfs40_call_sync_ops = { 999 .rpc_call_prepare = nfs40_call_sync_prepare, 1000 .rpc_call_done = nfs40_call_sync_done, 1001 }; 1002 1003 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 1004 struct nfs_server *server, 1005 struct rpc_message *msg, 1006 struct nfs4_sequence_args *args, 1007 struct nfs4_sequence_res *res) 1008 { 1009 int ret; 1010 struct rpc_task *task; 1011 struct nfs_client *clp = server->nfs_client; 1012 struct nfs4_call_sync_data data = { 1013 .seq_server = server, 1014 .seq_args = args, 1015 .seq_res = res, 1016 }; 1017 struct rpc_task_setup task_setup = { 1018 .rpc_client = clnt, 1019 .rpc_message = msg, 1020 .callback_ops = clp->cl_mvops->call_sync_ops, 1021 .callback_data = &data 1022 }; 1023 1024 task = rpc_run_task(&task_setup); 1025 if (IS_ERR(task)) 1026 ret = PTR_ERR(task); 1027 else { 1028 ret = task->tk_status; 1029 rpc_put_task(task); 1030 } 1031 return ret; 1032 } 1033 1034 int nfs4_call_sync(struct rpc_clnt *clnt, 1035 struct nfs_server *server, 1036 struct rpc_message *msg, 1037 struct nfs4_sequence_args *args, 1038 struct nfs4_sequence_res *res, 1039 int cache_reply) 1040 { 1041 nfs4_init_sequence(args, res, cache_reply); 1042 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 1043 } 1044 1045 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 1046 { 1047 struct nfs_inode *nfsi = NFS_I(dir); 1048 1049 spin_lock(&dir->i_lock); 1050 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1051 if (!cinfo->atomic || cinfo->before != dir->i_version) 1052 nfs_force_lookup_revalidate(dir); 1053 dir->i_version = cinfo->after; 1054 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 1055 nfs_fscache_invalidate(dir); 1056 spin_unlock(&dir->i_lock); 1057 } 1058 1059 struct nfs4_opendata { 1060 struct kref kref; 1061 struct nfs_openargs o_arg; 1062 struct nfs_openres o_res; 1063 struct nfs_open_confirmargs c_arg; 1064 struct nfs_open_confirmres c_res; 1065 struct nfs4_string owner_name; 1066 struct nfs4_string group_name; 1067 struct nfs4_label *a_label; 1068 struct nfs_fattr f_attr; 1069 struct nfs4_label *f_label; 1070 struct dentry *dir; 1071 struct dentry *dentry; 1072 struct nfs4_state_owner *owner; 1073 struct nfs4_state *state; 1074 struct iattr attrs; 1075 unsigned long timestamp; 1076 unsigned int rpc_done : 1; 1077 unsigned int file_created : 1; 1078 unsigned int is_recover : 1; 1079 int rpc_status; 1080 int cancelled; 1081 }; 1082 1083 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 1084 int err, struct nfs4_exception *exception) 1085 { 1086 if (err != -EINVAL) 1087 return false; 1088 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1089 return false; 1090 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 1091 exception->retry = 1; 1092 return true; 1093 } 1094 1095 static u32 1096 nfs4_map_atomic_open_share(struct nfs_server *server, 1097 fmode_t fmode, int openflags) 1098 { 1099 u32 res = 0; 1100 1101 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 1102 case FMODE_READ: 1103 res = NFS4_SHARE_ACCESS_READ; 1104 break; 1105 case FMODE_WRITE: 1106 res = NFS4_SHARE_ACCESS_WRITE; 1107 break; 1108 case FMODE_READ|FMODE_WRITE: 1109 res = NFS4_SHARE_ACCESS_BOTH; 1110 } 1111 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 1112 goto out; 1113 /* Want no delegation if we're using O_DIRECT */ 1114 if (openflags & O_DIRECT) 1115 res |= NFS4_SHARE_WANT_NO_DELEG; 1116 out: 1117 return res; 1118 } 1119 1120 static enum open_claim_type4 1121 nfs4_map_atomic_open_claim(struct nfs_server *server, 1122 enum open_claim_type4 claim) 1123 { 1124 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 1125 return claim; 1126 switch (claim) { 1127 default: 1128 return claim; 1129 case NFS4_OPEN_CLAIM_FH: 1130 return NFS4_OPEN_CLAIM_NULL; 1131 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1132 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 1133 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1134 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 1135 } 1136 } 1137 1138 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 1139 { 1140 p->o_res.f_attr = &p->f_attr; 1141 p->o_res.f_label = p->f_label; 1142 p->o_res.seqid = p->o_arg.seqid; 1143 p->c_res.seqid = p->c_arg.seqid; 1144 p->o_res.server = p->o_arg.server; 1145 p->o_res.access_request = p->o_arg.access; 1146 nfs_fattr_init(&p->f_attr); 1147 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 1148 } 1149 1150 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 1151 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1152 const struct iattr *attrs, 1153 struct nfs4_label *label, 1154 enum open_claim_type4 claim, 1155 gfp_t gfp_mask) 1156 { 1157 struct dentry *parent = dget_parent(dentry); 1158 struct inode *dir = d_inode(parent); 1159 struct nfs_server *server = NFS_SERVER(dir); 1160 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1161 struct nfs4_opendata *p; 1162 1163 p = kzalloc(sizeof(*p), gfp_mask); 1164 if (p == NULL) 1165 goto err; 1166 1167 p->f_label = nfs4_label_alloc(server, gfp_mask); 1168 if (IS_ERR(p->f_label)) 1169 goto err_free_p; 1170 1171 p->a_label = nfs4_label_alloc(server, gfp_mask); 1172 if (IS_ERR(p->a_label)) 1173 goto err_free_f; 1174 1175 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1176 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1177 if (IS_ERR(p->o_arg.seqid)) 1178 goto err_free_label; 1179 nfs_sb_active(dentry->d_sb); 1180 p->dentry = dget(dentry); 1181 p->dir = parent; 1182 p->owner = sp; 1183 atomic_inc(&sp->so_count); 1184 p->o_arg.open_flags = flags; 1185 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1186 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1187 fmode, flags); 1188 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS 1189 * will return permission denied for all bits until close */ 1190 if (!(flags & O_EXCL)) { 1191 /* ask server to check for all possible rights as results 1192 * are cached */ 1193 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1194 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE; 1195 } 1196 p->o_arg.clientid = server->nfs_client->cl_clientid; 1197 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1198 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1199 p->o_arg.name = &dentry->d_name; 1200 p->o_arg.server = server; 1201 p->o_arg.bitmask = nfs4_bitmask(server, label); 1202 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1203 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1204 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1205 switch (p->o_arg.claim) { 1206 case NFS4_OPEN_CLAIM_NULL: 1207 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1208 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1209 p->o_arg.fh = NFS_FH(dir); 1210 break; 1211 case NFS4_OPEN_CLAIM_PREVIOUS: 1212 case NFS4_OPEN_CLAIM_FH: 1213 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1214 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1215 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1216 } 1217 if (attrs != NULL && attrs->ia_valid != 0) { 1218 __u32 verf[2]; 1219 1220 p->o_arg.u.attrs = &p->attrs; 1221 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 1222 1223 verf[0] = jiffies; 1224 verf[1] = current->pid; 1225 memcpy(p->o_arg.u.verifier.data, verf, 1226 sizeof(p->o_arg.u.verifier.data)); 1227 } 1228 p->c_arg.fh = &p->o_res.fh; 1229 p->c_arg.stateid = &p->o_res.stateid; 1230 p->c_arg.seqid = p->o_arg.seqid; 1231 nfs4_init_opendata_res(p); 1232 kref_init(&p->kref); 1233 return p; 1234 1235 err_free_label: 1236 nfs4_label_free(p->a_label); 1237 err_free_f: 1238 nfs4_label_free(p->f_label); 1239 err_free_p: 1240 kfree(p); 1241 err: 1242 dput(parent); 1243 return NULL; 1244 } 1245 1246 static void nfs4_opendata_free(struct kref *kref) 1247 { 1248 struct nfs4_opendata *p = container_of(kref, 1249 struct nfs4_opendata, kref); 1250 struct super_block *sb = p->dentry->d_sb; 1251 1252 nfs_free_seqid(p->o_arg.seqid); 1253 nfs4_sequence_free_slot(&p->o_res.seq_res); 1254 if (p->state != NULL) 1255 nfs4_put_open_state(p->state); 1256 nfs4_put_state_owner(p->owner); 1257 1258 nfs4_label_free(p->a_label); 1259 nfs4_label_free(p->f_label); 1260 1261 dput(p->dir); 1262 dput(p->dentry); 1263 nfs_sb_deactive(sb); 1264 nfs_fattr_free_names(&p->f_attr); 1265 kfree(p->f_attr.mdsthreshold); 1266 kfree(p); 1267 } 1268 1269 static void nfs4_opendata_put(struct nfs4_opendata *p) 1270 { 1271 if (p != NULL) 1272 kref_put(&p->kref, nfs4_opendata_free); 1273 } 1274 1275 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) 1276 { 1277 int ret; 1278 1279 ret = rpc_wait_for_completion_task(task); 1280 return ret; 1281 } 1282 1283 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, 1284 fmode_t fmode) 1285 { 1286 switch(fmode & (FMODE_READ|FMODE_WRITE)) { 1287 case FMODE_READ|FMODE_WRITE: 1288 return state->n_rdwr != 0; 1289 case FMODE_WRITE: 1290 return state->n_wronly != 0; 1291 case FMODE_READ: 1292 return state->n_rdonly != 0; 1293 } 1294 WARN_ON_ONCE(1); 1295 return false; 1296 } 1297 1298 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 1299 { 1300 int ret = 0; 1301 1302 if (open_mode & (O_EXCL|O_TRUNC)) 1303 goto out; 1304 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1305 case FMODE_READ: 1306 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1307 && state->n_rdonly != 0; 1308 break; 1309 case FMODE_WRITE: 1310 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1311 && state->n_wronly != 0; 1312 break; 1313 case FMODE_READ|FMODE_WRITE: 1314 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1315 && state->n_rdwr != 0; 1316 } 1317 out: 1318 return ret; 1319 } 1320 1321 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1322 enum open_claim_type4 claim) 1323 { 1324 if (delegation == NULL) 1325 return 0; 1326 if ((delegation->type & fmode) != fmode) 1327 return 0; 1328 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 1329 return 0; 1330 switch (claim) { 1331 case NFS4_OPEN_CLAIM_NULL: 1332 case NFS4_OPEN_CLAIM_FH: 1333 break; 1334 case NFS4_OPEN_CLAIM_PREVIOUS: 1335 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1336 break; 1337 default: 1338 return 0; 1339 } 1340 nfs_mark_delegation_referenced(delegation); 1341 return 1; 1342 } 1343 1344 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1345 { 1346 switch (fmode) { 1347 case FMODE_WRITE: 1348 state->n_wronly++; 1349 break; 1350 case FMODE_READ: 1351 state->n_rdonly++; 1352 break; 1353 case FMODE_READ|FMODE_WRITE: 1354 state->n_rdwr++; 1355 } 1356 nfs4_state_set_mode_locked(state, state->state | fmode); 1357 } 1358 1359 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1360 { 1361 struct nfs_client *clp = state->owner->so_server->nfs_client; 1362 bool need_recover = false; 1363 1364 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1365 need_recover = true; 1366 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1367 need_recover = true; 1368 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1369 need_recover = true; 1370 if (need_recover) 1371 nfs4_state_mark_reclaim_nograce(clp, state); 1372 } 1373 1374 static bool nfs_need_update_open_stateid(struct nfs4_state *state, 1375 nfs4_stateid *stateid) 1376 { 1377 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0) 1378 return true; 1379 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1380 nfs_test_and_clear_all_open_stateid(state); 1381 return true; 1382 } 1383 if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) 1384 return true; 1385 return false; 1386 } 1387 1388 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1389 { 1390 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1391 return; 1392 if (state->n_wronly) 1393 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1394 if (state->n_rdonly) 1395 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1396 if (state->n_rdwr) 1397 set_bit(NFS_O_RDWR_STATE, &state->flags); 1398 set_bit(NFS_OPEN_STATE, &state->flags); 1399 } 1400 1401 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1402 nfs4_stateid *arg_stateid, 1403 nfs4_stateid *stateid, fmode_t fmode) 1404 { 1405 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1406 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1407 case FMODE_WRITE: 1408 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1409 break; 1410 case FMODE_READ: 1411 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1412 break; 1413 case 0: 1414 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1415 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1416 clear_bit(NFS_OPEN_STATE, &state->flags); 1417 } 1418 if (stateid == NULL) 1419 return; 1420 /* Handle races with OPEN */ 1421 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || 1422 (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1423 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) { 1424 nfs_resync_open_stateid_locked(state); 1425 return; 1426 } 1427 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1428 nfs4_stateid_copy(&state->stateid, stateid); 1429 nfs4_stateid_copy(&state->open_stateid, stateid); 1430 } 1431 1432 static void nfs_clear_open_stateid(struct nfs4_state *state, 1433 nfs4_stateid *arg_stateid, 1434 nfs4_stateid *stateid, fmode_t fmode) 1435 { 1436 write_seqlock(&state->seqlock); 1437 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); 1438 write_sequnlock(&state->seqlock); 1439 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1440 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1441 } 1442 1443 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 1444 { 1445 switch (fmode) { 1446 case FMODE_READ: 1447 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1448 break; 1449 case FMODE_WRITE: 1450 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1451 break; 1452 case FMODE_READ|FMODE_WRITE: 1453 set_bit(NFS_O_RDWR_STATE, &state->flags); 1454 } 1455 if (!nfs_need_update_open_stateid(state, stateid)) 1456 return; 1457 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1458 nfs4_stateid_copy(&state->stateid, stateid); 1459 nfs4_stateid_copy(&state->open_stateid, stateid); 1460 } 1461 1462 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) 1463 { 1464 /* 1465 * Protect the call to nfs4_state_set_mode_locked and 1466 * serialise the stateid update 1467 */ 1468 spin_lock(&state->owner->so_lock); 1469 write_seqlock(&state->seqlock); 1470 if (deleg_stateid != NULL) { 1471 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1472 set_bit(NFS_DELEGATED_STATE, &state->flags); 1473 } 1474 if (open_stateid != NULL) 1475 nfs_set_open_stateid_locked(state, open_stateid, fmode); 1476 write_sequnlock(&state->seqlock); 1477 update_open_stateflags(state, fmode); 1478 spin_unlock(&state->owner->so_lock); 1479 } 1480 1481 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) 1482 { 1483 struct nfs_inode *nfsi = NFS_I(state->inode); 1484 struct nfs_delegation *deleg_cur; 1485 int ret = 0; 1486 1487 fmode &= (FMODE_READ|FMODE_WRITE); 1488 1489 rcu_read_lock(); 1490 deleg_cur = rcu_dereference(nfsi->delegation); 1491 if (deleg_cur == NULL) 1492 goto no_delegation; 1493 1494 spin_lock(&deleg_cur->lock); 1495 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1496 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1497 (deleg_cur->type & fmode) != fmode) 1498 goto no_delegation_unlock; 1499 1500 if (delegation == NULL) 1501 delegation = &deleg_cur->stateid; 1502 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) 1503 goto no_delegation_unlock; 1504 1505 nfs_mark_delegation_referenced(deleg_cur); 1506 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); 1507 ret = 1; 1508 no_delegation_unlock: 1509 spin_unlock(&deleg_cur->lock); 1510 no_delegation: 1511 rcu_read_unlock(); 1512 1513 if (!ret && open_stateid != NULL) { 1514 __update_open_stateid(state, open_stateid, NULL, fmode); 1515 ret = 1; 1516 } 1517 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1518 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1519 1520 return ret; 1521 } 1522 1523 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1524 const nfs4_stateid *stateid) 1525 { 1526 struct nfs4_state *state = lsp->ls_state; 1527 bool ret = false; 1528 1529 spin_lock(&state->state_lock); 1530 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1531 goto out_noupdate; 1532 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1533 goto out_noupdate; 1534 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1535 ret = true; 1536 out_noupdate: 1537 spin_unlock(&state->state_lock); 1538 return ret; 1539 } 1540 1541 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1542 { 1543 struct nfs_delegation *delegation; 1544 1545 rcu_read_lock(); 1546 delegation = rcu_dereference(NFS_I(inode)->delegation); 1547 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1548 rcu_read_unlock(); 1549 return; 1550 } 1551 rcu_read_unlock(); 1552 nfs4_inode_return_delegation(inode); 1553 } 1554 1555 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1556 { 1557 struct nfs4_state *state = opendata->state; 1558 struct nfs_inode *nfsi = NFS_I(state->inode); 1559 struct nfs_delegation *delegation; 1560 int open_mode = opendata->o_arg.open_flags; 1561 fmode_t fmode = opendata->o_arg.fmode; 1562 enum open_claim_type4 claim = opendata->o_arg.claim; 1563 nfs4_stateid stateid; 1564 int ret = -EAGAIN; 1565 1566 for (;;) { 1567 spin_lock(&state->owner->so_lock); 1568 if (can_open_cached(state, fmode, open_mode)) { 1569 update_open_stateflags(state, fmode); 1570 spin_unlock(&state->owner->so_lock); 1571 goto out_return_state; 1572 } 1573 spin_unlock(&state->owner->so_lock); 1574 rcu_read_lock(); 1575 delegation = rcu_dereference(nfsi->delegation); 1576 if (!can_open_delegated(delegation, fmode, claim)) { 1577 rcu_read_unlock(); 1578 break; 1579 } 1580 /* Save the delegation */ 1581 nfs4_stateid_copy(&stateid, &delegation->stateid); 1582 rcu_read_unlock(); 1583 nfs_release_seqid(opendata->o_arg.seqid); 1584 if (!opendata->is_recover) { 1585 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1586 if (ret != 0) 1587 goto out; 1588 } 1589 ret = -EAGAIN; 1590 1591 /* Try to update the stateid using the delegation */ 1592 if (update_open_stateid(state, NULL, &stateid, fmode)) 1593 goto out_return_state; 1594 } 1595 out: 1596 return ERR_PTR(ret); 1597 out_return_state: 1598 atomic_inc(&state->count); 1599 return state; 1600 } 1601 1602 static void 1603 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) 1604 { 1605 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client; 1606 struct nfs_delegation *delegation; 1607 int delegation_flags = 0; 1608 1609 rcu_read_lock(); 1610 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1611 if (delegation) 1612 delegation_flags = delegation->flags; 1613 rcu_read_unlock(); 1614 switch (data->o_arg.claim) { 1615 default: 1616 break; 1617 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1618 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1619 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1620 "returning a delegation for " 1621 "OPEN(CLAIM_DELEGATE_CUR)\n", 1622 clp->cl_hostname); 1623 return; 1624 } 1625 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1626 nfs_inode_set_delegation(state->inode, 1627 data->owner->so_cred, 1628 &data->o_res); 1629 else 1630 nfs_inode_reclaim_delegation(state->inode, 1631 data->owner->so_cred, 1632 &data->o_res); 1633 } 1634 1635 /* 1636 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 1637 * and update the nfs4_state. 1638 */ 1639 static struct nfs4_state * 1640 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 1641 { 1642 struct inode *inode = data->state->inode; 1643 struct nfs4_state *state = data->state; 1644 int ret; 1645 1646 if (!data->rpc_done) { 1647 if (data->rpc_status) { 1648 ret = data->rpc_status; 1649 goto err; 1650 } 1651 /* cached opens have already been processed */ 1652 goto update; 1653 } 1654 1655 ret = nfs_refresh_inode(inode, &data->f_attr); 1656 if (ret) 1657 goto err; 1658 1659 if (data->o_res.delegation_type != 0) 1660 nfs4_opendata_check_deleg(data, state); 1661 update: 1662 update_open_stateid(state, &data->o_res.stateid, NULL, 1663 data->o_arg.fmode); 1664 atomic_inc(&state->count); 1665 1666 return state; 1667 err: 1668 return ERR_PTR(ret); 1669 1670 } 1671 1672 static struct nfs4_state * 1673 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1674 { 1675 struct inode *inode; 1676 struct nfs4_state *state = NULL; 1677 int ret; 1678 1679 if (!data->rpc_done) { 1680 state = nfs4_try_open_cached(data); 1681 trace_nfs4_cached_open(data->state); 1682 goto out; 1683 } 1684 1685 ret = -EAGAIN; 1686 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1687 goto err; 1688 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label); 1689 ret = PTR_ERR(inode); 1690 if (IS_ERR(inode)) 1691 goto err; 1692 ret = -ENOMEM; 1693 state = nfs4_get_open_state(inode, data->owner); 1694 if (state == NULL) 1695 goto err_put_inode; 1696 if (data->o_res.delegation_type != 0) 1697 nfs4_opendata_check_deleg(data, state); 1698 update_open_stateid(state, &data->o_res.stateid, NULL, 1699 data->o_arg.fmode); 1700 iput(inode); 1701 out: 1702 nfs_release_seqid(data->o_arg.seqid); 1703 return state; 1704 err_put_inode: 1705 iput(inode); 1706 err: 1707 return ERR_PTR(ret); 1708 } 1709 1710 static struct nfs4_state * 1711 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1712 { 1713 struct nfs4_state *ret; 1714 1715 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 1716 ret =_nfs4_opendata_reclaim_to_nfs4_state(data); 1717 else 1718 ret = _nfs4_opendata_to_nfs4_state(data); 1719 nfs4_sequence_free_slot(&data->o_res.seq_res); 1720 return ret; 1721 } 1722 1723 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) 1724 { 1725 struct nfs_inode *nfsi = NFS_I(state->inode); 1726 struct nfs_open_context *ctx; 1727 1728 spin_lock(&state->inode->i_lock); 1729 list_for_each_entry(ctx, &nfsi->open_files, list) { 1730 if (ctx->state != state) 1731 continue; 1732 get_nfs_open_context(ctx); 1733 spin_unlock(&state->inode->i_lock); 1734 return ctx; 1735 } 1736 spin_unlock(&state->inode->i_lock); 1737 return ERR_PTR(-ENOENT); 1738 } 1739 1740 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 1741 struct nfs4_state *state, enum open_claim_type4 claim) 1742 { 1743 struct nfs4_opendata *opendata; 1744 1745 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 1746 NULL, NULL, claim, GFP_NOFS); 1747 if (opendata == NULL) 1748 return ERR_PTR(-ENOMEM); 1749 opendata->state = state; 1750 atomic_inc(&state->count); 1751 return opendata; 1752 } 1753 1754 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, 1755 fmode_t fmode) 1756 { 1757 struct nfs4_state *newstate; 1758 int ret; 1759 1760 if (!nfs4_mode_match_open_stateid(opendata->state, fmode)) 1761 return 0; 1762 opendata->o_arg.open_flags = 0; 1763 opendata->o_arg.fmode = fmode; 1764 opendata->o_arg.share_access = nfs4_map_atomic_open_share( 1765 NFS_SB(opendata->dentry->d_sb), 1766 fmode, 0); 1767 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 1768 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 1769 nfs4_init_opendata_res(opendata); 1770 ret = _nfs4_recover_proc_open(opendata); 1771 if (ret != 0) 1772 return ret; 1773 newstate = nfs4_opendata_to_nfs4_state(opendata); 1774 if (IS_ERR(newstate)) 1775 return PTR_ERR(newstate); 1776 if (newstate != opendata->state) 1777 ret = -ESTALE; 1778 nfs4_close_state(newstate, fmode); 1779 return ret; 1780 } 1781 1782 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1783 { 1784 int ret; 1785 1786 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ 1787 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1788 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1789 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1790 /* memory barrier prior to reading state->n_* */ 1791 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1792 clear_bit(NFS_OPEN_STATE, &state->flags); 1793 smp_rmb(); 1794 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 1795 if (ret != 0) 1796 return ret; 1797 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE); 1798 if (ret != 0) 1799 return ret; 1800 ret = nfs4_open_recover_helper(opendata, FMODE_READ); 1801 if (ret != 0) 1802 return ret; 1803 /* 1804 * We may have performed cached opens for all three recoveries. 1805 * Check if we need to update the current stateid. 1806 */ 1807 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 1808 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 1809 write_seqlock(&state->seqlock); 1810 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1811 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1812 write_sequnlock(&state->seqlock); 1813 } 1814 return 0; 1815 } 1816 1817 /* 1818 * OPEN_RECLAIM: 1819 * reclaim state on the server after a reboot. 1820 */ 1821 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1822 { 1823 struct nfs_delegation *delegation; 1824 struct nfs4_opendata *opendata; 1825 fmode_t delegation_type = 0; 1826 int status; 1827 1828 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1829 NFS4_OPEN_CLAIM_PREVIOUS); 1830 if (IS_ERR(opendata)) 1831 return PTR_ERR(opendata); 1832 rcu_read_lock(); 1833 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1834 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) 1835 delegation_type = delegation->type; 1836 rcu_read_unlock(); 1837 opendata->o_arg.u.delegation_type = delegation_type; 1838 status = nfs4_open_recover(opendata, state); 1839 nfs4_opendata_put(opendata); 1840 return status; 1841 } 1842 1843 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1844 { 1845 struct nfs_server *server = NFS_SERVER(state->inode); 1846 struct nfs4_exception exception = { }; 1847 int err; 1848 do { 1849 err = _nfs4_do_open_reclaim(ctx, state); 1850 trace_nfs4_open_reclaim(ctx, 0, err); 1851 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 1852 continue; 1853 if (err != -NFS4ERR_DELAY) 1854 break; 1855 nfs4_handle_exception(server, err, &exception); 1856 } while (exception.retry); 1857 return err; 1858 } 1859 1860 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 1861 { 1862 struct nfs_open_context *ctx; 1863 int ret; 1864 1865 ctx = nfs4_state_find_open_context(state); 1866 if (IS_ERR(ctx)) 1867 return -EAGAIN; 1868 ret = nfs4_do_open_reclaim(ctx, state); 1869 put_nfs_open_context(ctx); 1870 return ret; 1871 } 1872 1873 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err) 1874 { 1875 switch (err) { 1876 default: 1877 printk(KERN_ERR "NFS: %s: unhandled error " 1878 "%d.\n", __func__, err); 1879 case 0: 1880 case -ENOENT: 1881 case -EAGAIN: 1882 case -ESTALE: 1883 break; 1884 case -NFS4ERR_BADSESSION: 1885 case -NFS4ERR_BADSLOT: 1886 case -NFS4ERR_BAD_HIGH_SLOT: 1887 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1888 case -NFS4ERR_DEADSESSION: 1889 set_bit(NFS_DELEGATED_STATE, &state->flags); 1890 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 1891 return -EAGAIN; 1892 case -NFS4ERR_STALE_CLIENTID: 1893 case -NFS4ERR_STALE_STATEID: 1894 set_bit(NFS_DELEGATED_STATE, &state->flags); 1895 case -NFS4ERR_EXPIRED: 1896 /* Don't recall a delegation if it was lost */ 1897 nfs4_schedule_lease_recovery(server->nfs_client); 1898 return -EAGAIN; 1899 case -NFS4ERR_MOVED: 1900 nfs4_schedule_migration_recovery(server); 1901 return -EAGAIN; 1902 case -NFS4ERR_LEASE_MOVED: 1903 nfs4_schedule_lease_moved_recovery(server->nfs_client); 1904 return -EAGAIN; 1905 case -NFS4ERR_DELEG_REVOKED: 1906 case -NFS4ERR_ADMIN_REVOKED: 1907 case -NFS4ERR_BAD_STATEID: 1908 case -NFS4ERR_OPENMODE: 1909 nfs_inode_find_state_and_recover(state->inode, 1910 stateid); 1911 nfs4_schedule_stateid_recovery(server, state); 1912 return -EAGAIN; 1913 case -NFS4ERR_DELAY: 1914 case -NFS4ERR_GRACE: 1915 set_bit(NFS_DELEGATED_STATE, &state->flags); 1916 ssleep(1); 1917 return -EAGAIN; 1918 case -ENOMEM: 1919 case -NFS4ERR_DENIED: 1920 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1921 return 0; 1922 } 1923 return err; 1924 } 1925 1926 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, 1927 struct nfs4_state *state, const nfs4_stateid *stateid, 1928 fmode_t type) 1929 { 1930 struct nfs_server *server = NFS_SERVER(state->inode); 1931 struct nfs4_opendata *opendata; 1932 int err = 0; 1933 1934 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1935 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 1936 if (IS_ERR(opendata)) 1937 return PTR_ERR(opendata); 1938 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1939 write_seqlock(&state->seqlock); 1940 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1941 write_sequnlock(&state->seqlock); 1942 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1943 switch (type & (FMODE_READ|FMODE_WRITE)) { 1944 case FMODE_READ|FMODE_WRITE: 1945 case FMODE_WRITE: 1946 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); 1947 if (err) 1948 break; 1949 err = nfs4_open_recover_helper(opendata, FMODE_WRITE); 1950 if (err) 1951 break; 1952 case FMODE_READ: 1953 err = nfs4_open_recover_helper(opendata, FMODE_READ); 1954 } 1955 nfs4_opendata_put(opendata); 1956 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 1957 } 1958 1959 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 1960 { 1961 struct nfs4_opendata *data = calldata; 1962 1963 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl, 1964 &data->c_arg.seq_args, &data->c_res.seq_res, task); 1965 } 1966 1967 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 1968 { 1969 struct nfs4_opendata *data = calldata; 1970 1971 nfs40_sequence_done(task, &data->c_res.seq_res); 1972 1973 data->rpc_status = task->tk_status; 1974 if (data->rpc_status == 0) { 1975 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 1976 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1977 renew_lease(data->o_res.server, data->timestamp); 1978 data->rpc_done = 1; 1979 } 1980 } 1981 1982 static void nfs4_open_confirm_release(void *calldata) 1983 { 1984 struct nfs4_opendata *data = calldata; 1985 struct nfs4_state *state = NULL; 1986 1987 /* If this request hasn't been cancelled, do nothing */ 1988 if (data->cancelled == 0) 1989 goto out_free; 1990 /* In case of error, no cleanup! */ 1991 if (!data->rpc_done) 1992 goto out_free; 1993 state = nfs4_opendata_to_nfs4_state(data); 1994 if (!IS_ERR(state)) 1995 nfs4_close_state(state, data->o_arg.fmode); 1996 out_free: 1997 nfs4_opendata_put(data); 1998 } 1999 2000 static const struct rpc_call_ops nfs4_open_confirm_ops = { 2001 .rpc_call_prepare = nfs4_open_confirm_prepare, 2002 .rpc_call_done = nfs4_open_confirm_done, 2003 .rpc_release = nfs4_open_confirm_release, 2004 }; 2005 2006 /* 2007 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 2008 */ 2009 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 2010 { 2011 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 2012 struct rpc_task *task; 2013 struct rpc_message msg = { 2014 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 2015 .rpc_argp = &data->c_arg, 2016 .rpc_resp = &data->c_res, 2017 .rpc_cred = data->owner->so_cred, 2018 }; 2019 struct rpc_task_setup task_setup_data = { 2020 .rpc_client = server->client, 2021 .rpc_message = &msg, 2022 .callback_ops = &nfs4_open_confirm_ops, 2023 .callback_data = data, 2024 .workqueue = nfsiod_workqueue, 2025 .flags = RPC_TASK_ASYNC, 2026 }; 2027 int status; 2028 2029 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1); 2030 kref_get(&data->kref); 2031 data->rpc_done = 0; 2032 data->rpc_status = 0; 2033 data->timestamp = jiffies; 2034 if (data->is_recover) 2035 nfs4_set_sequence_privileged(&data->c_arg.seq_args); 2036 task = rpc_run_task(&task_setup_data); 2037 if (IS_ERR(task)) 2038 return PTR_ERR(task); 2039 status = nfs4_wait_for_completion_rpc_task(task); 2040 if (status != 0) { 2041 data->cancelled = 1; 2042 smp_wmb(); 2043 } else 2044 status = data->rpc_status; 2045 rpc_put_task(task); 2046 return status; 2047 } 2048 2049 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 2050 { 2051 struct nfs4_opendata *data = calldata; 2052 struct nfs4_state_owner *sp = data->owner; 2053 struct nfs_client *clp = sp->so_server->nfs_client; 2054 enum open_claim_type4 claim = data->o_arg.claim; 2055 2056 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 2057 goto out_wait; 2058 /* 2059 * Check if we still need to send an OPEN call, or if we can use 2060 * a delegation instead. 2061 */ 2062 if (data->state != NULL) { 2063 struct nfs_delegation *delegation; 2064 2065 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) 2066 goto out_no_action; 2067 rcu_read_lock(); 2068 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 2069 if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 2070 goto unlock_no_action; 2071 rcu_read_unlock(); 2072 } 2073 /* Update client id. */ 2074 data->o_arg.clientid = clp->cl_clientid; 2075 switch (claim) { 2076 default: 2077 break; 2078 case NFS4_OPEN_CLAIM_PREVIOUS: 2079 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 2080 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 2081 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 2082 case NFS4_OPEN_CLAIM_FH: 2083 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 2084 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 2085 } 2086 data->timestamp = jiffies; 2087 if (nfs4_setup_sequence(data->o_arg.server, 2088 &data->o_arg.seq_args, 2089 &data->o_res.seq_res, 2090 task) != 0) 2091 nfs_release_seqid(data->o_arg.seqid); 2092 2093 /* Set the create mode (note dependency on the session type) */ 2094 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 2095 if (data->o_arg.open_flags & O_EXCL) { 2096 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 2097 if (nfs4_has_persistent_session(clp)) 2098 data->o_arg.createmode = NFS4_CREATE_GUARDED; 2099 else if (clp->cl_mvops->minor_version > 0) 2100 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 2101 } 2102 return; 2103 unlock_no_action: 2104 trace_nfs4_cached_open(data->state); 2105 rcu_read_unlock(); 2106 out_no_action: 2107 task->tk_action = NULL; 2108 out_wait: 2109 nfs4_sequence_done(task, &data->o_res.seq_res); 2110 } 2111 2112 static void nfs4_open_done(struct rpc_task *task, void *calldata) 2113 { 2114 struct nfs4_opendata *data = calldata; 2115 2116 data->rpc_status = task->tk_status; 2117 2118 if (!nfs4_sequence_process(task, &data->o_res.seq_res)) 2119 return; 2120 2121 if (task->tk_status == 0) { 2122 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 2123 switch (data->o_res.f_attr->mode & S_IFMT) { 2124 case S_IFREG: 2125 break; 2126 case S_IFLNK: 2127 data->rpc_status = -ELOOP; 2128 break; 2129 case S_IFDIR: 2130 data->rpc_status = -EISDIR; 2131 break; 2132 default: 2133 data->rpc_status = -ENOTDIR; 2134 } 2135 } 2136 renew_lease(data->o_res.server, data->timestamp); 2137 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 2138 nfs_confirm_seqid(&data->owner->so_seqid, 0); 2139 } 2140 data->rpc_done = 1; 2141 } 2142 2143 static void nfs4_open_release(void *calldata) 2144 { 2145 struct nfs4_opendata *data = calldata; 2146 struct nfs4_state *state = NULL; 2147 2148 /* If this request hasn't been cancelled, do nothing */ 2149 if (data->cancelled == 0) 2150 goto out_free; 2151 /* In case of error, no cleanup! */ 2152 if (data->rpc_status != 0 || !data->rpc_done) 2153 goto out_free; 2154 /* In case we need an open_confirm, no cleanup! */ 2155 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 2156 goto out_free; 2157 state = nfs4_opendata_to_nfs4_state(data); 2158 if (!IS_ERR(state)) 2159 nfs4_close_state(state, data->o_arg.fmode); 2160 out_free: 2161 nfs4_opendata_put(data); 2162 } 2163 2164 static const struct rpc_call_ops nfs4_open_ops = { 2165 .rpc_call_prepare = nfs4_open_prepare, 2166 .rpc_call_done = nfs4_open_done, 2167 .rpc_release = nfs4_open_release, 2168 }; 2169 2170 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) 2171 { 2172 struct inode *dir = d_inode(data->dir); 2173 struct nfs_server *server = NFS_SERVER(dir); 2174 struct nfs_openargs *o_arg = &data->o_arg; 2175 struct nfs_openres *o_res = &data->o_res; 2176 struct rpc_task *task; 2177 struct rpc_message msg = { 2178 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 2179 .rpc_argp = o_arg, 2180 .rpc_resp = o_res, 2181 .rpc_cred = data->owner->so_cred, 2182 }; 2183 struct rpc_task_setup task_setup_data = { 2184 .rpc_client = server->client, 2185 .rpc_message = &msg, 2186 .callback_ops = &nfs4_open_ops, 2187 .callback_data = data, 2188 .workqueue = nfsiod_workqueue, 2189 .flags = RPC_TASK_ASYNC, 2190 }; 2191 int status; 2192 2193 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); 2194 kref_get(&data->kref); 2195 data->rpc_done = 0; 2196 data->rpc_status = 0; 2197 data->cancelled = 0; 2198 data->is_recover = 0; 2199 if (isrecover) { 2200 nfs4_set_sequence_privileged(&o_arg->seq_args); 2201 data->is_recover = 1; 2202 } 2203 task = rpc_run_task(&task_setup_data); 2204 if (IS_ERR(task)) 2205 return PTR_ERR(task); 2206 status = nfs4_wait_for_completion_rpc_task(task); 2207 if (status != 0) { 2208 data->cancelled = 1; 2209 smp_wmb(); 2210 } else 2211 status = data->rpc_status; 2212 rpc_put_task(task); 2213 2214 return status; 2215 } 2216 2217 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2218 { 2219 struct inode *dir = d_inode(data->dir); 2220 struct nfs_openres *o_res = &data->o_res; 2221 int status; 2222 2223 status = nfs4_run_open_task(data, 1); 2224 if (status != 0 || !data->rpc_done) 2225 return status; 2226 2227 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2228 2229 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2230 status = _nfs4_proc_open_confirm(data); 2231 if (status != 0) 2232 return status; 2233 } 2234 2235 return status; 2236 } 2237 2238 /* 2239 * Additional permission checks in order to distinguish between an 2240 * open for read, and an open for execute. This works around the 2241 * fact that NFSv4 OPEN treats read and execute permissions as being 2242 * the same. 2243 * Note that in the non-execute case, we want to turn off permission 2244 * checking if we just created a new file (POSIX open() semantics). 2245 */ 2246 static int nfs4_opendata_access(struct rpc_cred *cred, 2247 struct nfs4_opendata *opendata, 2248 struct nfs4_state *state, fmode_t fmode, 2249 int openflags) 2250 { 2251 struct nfs_access_entry cache; 2252 u32 mask; 2253 2254 /* access call failed or for some reason the server doesn't 2255 * support any access modes -- defer access call until later */ 2256 if (opendata->o_res.access_supported == 0) 2257 return 0; 2258 2259 mask = 0; 2260 /* 2261 * Use openflags to check for exec, because fmode won't 2262 * always have FMODE_EXEC set when file open for exec. 2263 */ 2264 if (openflags & __FMODE_EXEC) { 2265 /* ONLY check for exec rights */ 2266 mask = MAY_EXEC; 2267 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2268 mask = MAY_READ; 2269 2270 cache.cred = cred; 2271 cache.jiffies = jiffies; 2272 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2273 nfs_access_add_cache(state->inode, &cache); 2274 2275 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) 2276 return 0; 2277 2278 /* even though OPEN succeeded, access is denied. Close the file */ 2279 nfs4_close_state(state, fmode); 2280 return -EACCES; 2281 } 2282 2283 /* 2284 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2285 */ 2286 static int _nfs4_proc_open(struct nfs4_opendata *data) 2287 { 2288 struct inode *dir = d_inode(data->dir); 2289 struct nfs_server *server = NFS_SERVER(dir); 2290 struct nfs_openargs *o_arg = &data->o_arg; 2291 struct nfs_openres *o_res = &data->o_res; 2292 int status; 2293 2294 status = nfs4_run_open_task(data, 0); 2295 if (!data->rpc_done) 2296 return status; 2297 if (status != 0) { 2298 if (status == -NFS4ERR_BADNAME && 2299 !(o_arg->open_flags & O_CREAT)) 2300 return -ENOENT; 2301 return status; 2302 } 2303 2304 nfs_fattr_map_and_free_names(server, &data->f_attr); 2305 2306 if (o_arg->open_flags & O_CREAT) { 2307 update_changeattr(dir, &o_res->cinfo); 2308 if (o_arg->open_flags & O_EXCL) 2309 data->file_created = 1; 2310 else if (o_res->cinfo.before != o_res->cinfo.after) 2311 data->file_created = 1; 2312 } 2313 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2314 server->caps &= ~NFS_CAP_POSIX_LOCK; 2315 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2316 status = _nfs4_proc_open_confirm(data); 2317 if (status != 0) 2318 return status; 2319 } 2320 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 2321 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label); 2322 return 0; 2323 } 2324 2325 static int nfs4_recover_expired_lease(struct nfs_server *server) 2326 { 2327 return nfs4_client_recover_expired_lease(server->nfs_client); 2328 } 2329 2330 /* 2331 * OPEN_EXPIRED: 2332 * reclaim state on the server after a network partition. 2333 * Assumes caller holds the appropriate lock 2334 */ 2335 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2336 { 2337 struct nfs4_opendata *opendata; 2338 int ret; 2339 2340 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2341 NFS4_OPEN_CLAIM_FH); 2342 if (IS_ERR(opendata)) 2343 return PTR_ERR(opendata); 2344 ret = nfs4_open_recover(opendata, state); 2345 if (ret == -ESTALE) 2346 d_drop(ctx->dentry); 2347 nfs4_opendata_put(opendata); 2348 return ret; 2349 } 2350 2351 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2352 { 2353 struct nfs_server *server = NFS_SERVER(state->inode); 2354 struct nfs4_exception exception = { }; 2355 int err; 2356 2357 do { 2358 err = _nfs4_open_expired(ctx, state); 2359 trace_nfs4_open_expired(ctx, 0, err); 2360 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2361 continue; 2362 switch (err) { 2363 default: 2364 goto out; 2365 case -NFS4ERR_GRACE: 2366 case -NFS4ERR_DELAY: 2367 nfs4_handle_exception(server, err, &exception); 2368 err = 0; 2369 } 2370 } while (exception.retry); 2371 out: 2372 return err; 2373 } 2374 2375 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2376 { 2377 struct nfs_open_context *ctx; 2378 int ret; 2379 2380 ctx = nfs4_state_find_open_context(state); 2381 if (IS_ERR(ctx)) 2382 return -EAGAIN; 2383 ret = nfs4_do_open_expired(ctx, state); 2384 put_nfs_open_context(ctx); 2385 return ret; 2386 } 2387 2388 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state) 2389 { 2390 nfs_remove_bad_delegation(state->inode); 2391 write_seqlock(&state->seqlock); 2392 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2393 write_sequnlock(&state->seqlock); 2394 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2395 } 2396 2397 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2398 { 2399 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2400 nfs_finish_clear_delegation_stateid(state); 2401 } 2402 2403 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2404 { 2405 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2406 nfs40_clear_delegation_stateid(state); 2407 return nfs4_open_expired(sp, state); 2408 } 2409 2410 #if defined(CONFIG_NFS_V4_1) 2411 static void nfs41_check_delegation_stateid(struct nfs4_state *state) 2412 { 2413 struct nfs_server *server = NFS_SERVER(state->inode); 2414 nfs4_stateid stateid; 2415 struct nfs_delegation *delegation; 2416 struct rpc_cred *cred; 2417 int status; 2418 2419 /* Get the delegation credential for use by test/free_stateid */ 2420 rcu_read_lock(); 2421 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2422 if (delegation == NULL) { 2423 rcu_read_unlock(); 2424 return; 2425 } 2426 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { 2427 rcu_read_unlock(); 2428 nfs_finish_clear_delegation_stateid(state); 2429 return; 2430 } 2431 2432 nfs4_stateid_copy(&stateid, &delegation->stateid); 2433 cred = get_rpccred(delegation->cred); 2434 rcu_read_unlock(); 2435 status = nfs41_test_stateid(server, &stateid, cred); 2436 trace_nfs4_test_delegation_stateid(state, NULL, status); 2437 2438 if (status != NFS_OK) { 2439 /* Free the stateid unless the server explicitly 2440 * informs us the stateid is unrecognized. */ 2441 if (status != -NFS4ERR_BAD_STATEID) 2442 nfs41_free_stateid(server, &stateid, cred); 2443 nfs_finish_clear_delegation_stateid(state); 2444 } 2445 2446 put_rpccred(cred); 2447 } 2448 2449 /** 2450 * nfs41_check_open_stateid - possibly free an open stateid 2451 * 2452 * @state: NFSv4 state for an inode 2453 * 2454 * Returns NFS_OK if recovery for this stateid is now finished. 2455 * Otherwise a negative NFS4ERR value is returned. 2456 */ 2457 static int nfs41_check_open_stateid(struct nfs4_state *state) 2458 { 2459 struct nfs_server *server = NFS_SERVER(state->inode); 2460 nfs4_stateid *stateid = &state->open_stateid; 2461 struct rpc_cred *cred = state->owner->so_cred; 2462 int status; 2463 2464 /* If a state reset has been done, test_stateid is unneeded */ 2465 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) && 2466 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) && 2467 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0)) 2468 return -NFS4ERR_BAD_STATEID; 2469 2470 status = nfs41_test_stateid(server, stateid, cred); 2471 trace_nfs4_test_open_stateid(state, NULL, status); 2472 if (status != NFS_OK) { 2473 /* Free the stateid unless the server explicitly 2474 * informs us the stateid is unrecognized. */ 2475 if (status != -NFS4ERR_BAD_STATEID) 2476 nfs41_free_stateid(server, stateid, cred); 2477 2478 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2479 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2480 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2481 clear_bit(NFS_OPEN_STATE, &state->flags); 2482 } 2483 return status; 2484 } 2485 2486 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2487 { 2488 int status; 2489 2490 nfs41_check_delegation_stateid(state); 2491 status = nfs41_check_open_stateid(state); 2492 if (status != NFS_OK) 2493 status = nfs4_open_expired(sp, state); 2494 return status; 2495 } 2496 #endif 2497 2498 /* 2499 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 2500 * fields corresponding to attributes that were used to store the verifier. 2501 * Make sure we clobber those fields in the later setattr call 2502 */ 2503 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 2504 struct iattr *sattr, struct nfs4_label **label) 2505 { 2506 const u32 *attrset = opendata->o_res.attrset; 2507 2508 if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 2509 !(sattr->ia_valid & ATTR_ATIME_SET)) 2510 sattr->ia_valid |= ATTR_ATIME; 2511 2512 if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 2513 !(sattr->ia_valid & ATTR_MTIME_SET)) 2514 sattr->ia_valid |= ATTR_MTIME; 2515 2516 /* Except MODE, it seems harmless of setting twice. */ 2517 if ((attrset[1] & FATTR4_WORD1_MODE)) 2518 sattr->ia_valid &= ~ATTR_MODE; 2519 2520 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) 2521 *label = NULL; 2522 } 2523 2524 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 2525 fmode_t fmode, 2526 int flags, 2527 struct nfs_open_context *ctx) 2528 { 2529 struct nfs4_state_owner *sp = opendata->owner; 2530 struct nfs_server *server = sp->so_server; 2531 struct dentry *dentry; 2532 struct nfs4_state *state; 2533 unsigned int seq; 2534 int ret; 2535 2536 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 2537 2538 ret = _nfs4_proc_open(opendata); 2539 if (ret != 0) 2540 goto out; 2541 2542 state = nfs4_opendata_to_nfs4_state(opendata); 2543 ret = PTR_ERR(state); 2544 if (IS_ERR(state)) 2545 goto out; 2546 if (server->caps & NFS_CAP_POSIX_LOCK) 2547 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 2548 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK) 2549 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags); 2550 2551 dentry = opendata->dentry; 2552 if (d_really_is_negative(dentry)) { 2553 struct dentry *alias; 2554 d_drop(dentry); 2555 alias = d_exact_alias(dentry, state->inode); 2556 if (!alias) 2557 alias = d_splice_alias(igrab(state->inode), dentry); 2558 /* d_splice_alias() can't fail here - it's a non-directory */ 2559 if (alias) { 2560 dput(ctx->dentry); 2561 ctx->dentry = dentry = alias; 2562 } 2563 nfs_set_verifier(dentry, 2564 nfs_save_change_attribute(d_inode(opendata->dir))); 2565 } 2566 2567 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags); 2568 if (ret != 0) 2569 goto out; 2570 2571 ctx->state = state; 2572 if (d_inode(dentry) == state->inode) { 2573 nfs_inode_attach_open_context(ctx); 2574 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) 2575 nfs4_schedule_stateid_recovery(server, state); 2576 } 2577 out: 2578 return ret; 2579 } 2580 2581 /* 2582 * Returns a referenced nfs4_state 2583 */ 2584 static int _nfs4_do_open(struct inode *dir, 2585 struct nfs_open_context *ctx, 2586 int flags, 2587 struct iattr *sattr, 2588 struct nfs4_label *label, 2589 int *opened) 2590 { 2591 struct nfs4_state_owner *sp; 2592 struct nfs4_state *state = NULL; 2593 struct nfs_server *server = NFS_SERVER(dir); 2594 struct nfs4_opendata *opendata; 2595 struct dentry *dentry = ctx->dentry; 2596 struct rpc_cred *cred = ctx->cred; 2597 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 2598 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 2599 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 2600 struct nfs4_label *olabel = NULL; 2601 int status; 2602 2603 /* Protect against reboot recovery conflicts */ 2604 status = -ENOMEM; 2605 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 2606 if (sp == NULL) { 2607 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 2608 goto out_err; 2609 } 2610 status = nfs4_recover_expired_lease(server); 2611 if (status != 0) 2612 goto err_put_state_owner; 2613 if (d_really_is_positive(dentry)) 2614 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 2615 status = -ENOMEM; 2616 if (d_really_is_positive(dentry)) 2617 claim = NFS4_OPEN_CLAIM_FH; 2618 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, 2619 label, claim, GFP_KERNEL); 2620 if (opendata == NULL) 2621 goto err_put_state_owner; 2622 2623 if (label) { 2624 olabel = nfs4_label_alloc(server, GFP_KERNEL); 2625 if (IS_ERR(olabel)) { 2626 status = PTR_ERR(olabel); 2627 goto err_opendata_put; 2628 } 2629 } 2630 2631 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 2632 if (!opendata->f_attr.mdsthreshold) { 2633 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 2634 if (!opendata->f_attr.mdsthreshold) 2635 goto err_free_label; 2636 } 2637 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 2638 } 2639 if (d_really_is_positive(dentry)) 2640 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 2641 2642 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx); 2643 if (status != 0) 2644 goto err_free_label; 2645 state = ctx->state; 2646 2647 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 2648 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 2649 nfs4_exclusive_attrset(opendata, sattr, &label); 2650 /* 2651 * send create attributes which was not set by open 2652 * with an extra setattr. 2653 */ 2654 if (sattr->ia_valid & NFS4_VALID_ATTRS) { 2655 nfs_fattr_init(opendata->o_res.f_attr); 2656 status = nfs4_do_setattr(state->inode, cred, 2657 opendata->o_res.f_attr, sattr, 2658 state, label, olabel); 2659 if (status == 0) { 2660 nfs_setattr_update_inode(state->inode, sattr, 2661 opendata->o_res.f_attr); 2662 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); 2663 } 2664 } 2665 } 2666 if (opened && opendata->file_created) 2667 *opened |= FILE_CREATED; 2668 2669 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 2670 *ctx_th = opendata->f_attr.mdsthreshold; 2671 opendata->f_attr.mdsthreshold = NULL; 2672 } 2673 2674 nfs4_label_free(olabel); 2675 2676 nfs4_opendata_put(opendata); 2677 nfs4_put_state_owner(sp); 2678 return 0; 2679 err_free_label: 2680 nfs4_label_free(olabel); 2681 err_opendata_put: 2682 nfs4_opendata_put(opendata); 2683 err_put_state_owner: 2684 nfs4_put_state_owner(sp); 2685 out_err: 2686 return status; 2687 } 2688 2689 2690 static struct nfs4_state *nfs4_do_open(struct inode *dir, 2691 struct nfs_open_context *ctx, 2692 int flags, 2693 struct iattr *sattr, 2694 struct nfs4_label *label, 2695 int *opened) 2696 { 2697 struct nfs_server *server = NFS_SERVER(dir); 2698 struct nfs4_exception exception = { }; 2699 struct nfs4_state *res; 2700 int status; 2701 2702 do { 2703 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened); 2704 res = ctx->state; 2705 trace_nfs4_open_file(ctx, flags, status); 2706 if (status == 0) 2707 break; 2708 /* NOTE: BAD_SEQID means the server and client disagree about the 2709 * book-keeping w.r.t. state-changing operations 2710 * (OPEN/CLOSE/LOCK/LOCKU...) 2711 * It is actually a sign of a bug on the client or on the server. 2712 * 2713 * If we receive a BAD_SEQID error in the particular case of 2714 * doing an OPEN, we assume that nfs_increment_open_seqid() will 2715 * have unhashed the old state_owner for us, and that we can 2716 * therefore safely retry using a new one. We should still warn 2717 * the user though... 2718 */ 2719 if (status == -NFS4ERR_BAD_SEQID) { 2720 pr_warn_ratelimited("NFS: v4 server %s " 2721 " returned a bad sequence-id error!\n", 2722 NFS_SERVER(dir)->nfs_client->cl_hostname); 2723 exception.retry = 1; 2724 continue; 2725 } 2726 /* 2727 * BAD_STATEID on OPEN means that the server cancelled our 2728 * state before it received the OPEN_CONFIRM. 2729 * Recover by retrying the request as per the discussion 2730 * on Page 181 of RFC3530. 2731 */ 2732 if (status == -NFS4ERR_BAD_STATEID) { 2733 exception.retry = 1; 2734 continue; 2735 } 2736 if (status == -EAGAIN) { 2737 /* We must have found a delegation */ 2738 exception.retry = 1; 2739 continue; 2740 } 2741 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 2742 continue; 2743 res = ERR_PTR(nfs4_handle_exception(server, 2744 status, &exception)); 2745 } while (exception.retry); 2746 return res; 2747 } 2748 2749 static int _nfs4_do_setattr(struct inode *inode, 2750 struct nfs_setattrargs *arg, 2751 struct nfs_setattrres *res, 2752 struct rpc_cred *cred, 2753 struct nfs4_state *state) 2754 { 2755 struct nfs_server *server = NFS_SERVER(inode); 2756 struct rpc_message msg = { 2757 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 2758 .rpc_argp = arg, 2759 .rpc_resp = res, 2760 .rpc_cred = cred, 2761 }; 2762 struct rpc_cred *delegation_cred = NULL; 2763 unsigned long timestamp = jiffies; 2764 fmode_t fmode; 2765 bool truncate; 2766 int status; 2767 2768 nfs_fattr_init(res->fattr); 2769 2770 /* Servers should only apply open mode checks for file size changes */ 2771 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; 2772 fmode = truncate ? FMODE_WRITE : FMODE_READ; 2773 2774 if (nfs4_copy_delegation_stateid(inode, fmode, &arg->stateid, &delegation_cred)) { 2775 /* Use that stateid */ 2776 } else if (truncate && state != NULL) { 2777 struct nfs_lockowner lockowner = { 2778 .l_owner = current->files, 2779 .l_pid = current->tgid, 2780 }; 2781 if (!nfs4_valid_open_stateid(state)) 2782 return -EBADF; 2783 if (nfs4_select_rw_stateid(state, FMODE_WRITE, &lockowner, 2784 &arg->stateid, &delegation_cred) == -EIO) 2785 return -EBADF; 2786 } else 2787 nfs4_stateid_copy(&arg->stateid, &zero_stateid); 2788 if (delegation_cred) 2789 msg.rpc_cred = delegation_cred; 2790 2791 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1); 2792 2793 put_rpccred(delegation_cred); 2794 if (status == 0 && state != NULL) 2795 renew_lease(server, timestamp); 2796 trace_nfs4_setattr(inode, &arg->stateid, status); 2797 return status; 2798 } 2799 2800 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2801 struct nfs_fattr *fattr, struct iattr *sattr, 2802 struct nfs4_state *state, struct nfs4_label *ilabel, 2803 struct nfs4_label *olabel) 2804 { 2805 struct nfs_server *server = NFS_SERVER(inode); 2806 struct nfs_setattrargs arg = { 2807 .fh = NFS_FH(inode), 2808 .iap = sattr, 2809 .server = server, 2810 .bitmask = server->attr_bitmask, 2811 .label = ilabel, 2812 }; 2813 struct nfs_setattrres res = { 2814 .fattr = fattr, 2815 .label = olabel, 2816 .server = server, 2817 }; 2818 struct nfs4_exception exception = { 2819 .state = state, 2820 .inode = inode, 2821 .stateid = &arg.stateid, 2822 }; 2823 int err; 2824 2825 arg.bitmask = nfs4_bitmask(server, ilabel); 2826 if (ilabel) 2827 arg.bitmask = nfs4_bitmask(server, olabel); 2828 2829 do { 2830 err = _nfs4_do_setattr(inode, &arg, &res, cred, state); 2831 switch (err) { 2832 case -NFS4ERR_OPENMODE: 2833 if (!(sattr->ia_valid & ATTR_SIZE)) { 2834 pr_warn_once("NFSv4: server %s is incorrectly " 2835 "applying open mode checks to " 2836 "a SETATTR that is not " 2837 "changing file size.\n", 2838 server->nfs_client->cl_hostname); 2839 } 2840 if (state && !(state->state & FMODE_WRITE)) { 2841 err = -EBADF; 2842 if (sattr->ia_valid & ATTR_OPEN) 2843 err = -EACCES; 2844 goto out; 2845 } 2846 } 2847 err = nfs4_handle_exception(server, err, &exception); 2848 } while (exception.retry); 2849 out: 2850 return err; 2851 } 2852 2853 static bool 2854 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task) 2855 { 2856 if (inode == NULL || !nfs_have_layout(inode)) 2857 return false; 2858 2859 return pnfs_wait_on_layoutreturn(inode, task); 2860 } 2861 2862 struct nfs4_closedata { 2863 struct inode *inode; 2864 struct nfs4_state *state; 2865 struct nfs_closeargs arg; 2866 struct nfs_closeres res; 2867 struct nfs_fattr fattr; 2868 unsigned long timestamp; 2869 bool roc; 2870 u32 roc_barrier; 2871 }; 2872 2873 static void nfs4_free_closedata(void *data) 2874 { 2875 struct nfs4_closedata *calldata = data; 2876 struct nfs4_state_owner *sp = calldata->state->owner; 2877 struct super_block *sb = calldata->state->inode->i_sb; 2878 2879 if (calldata->roc) 2880 pnfs_roc_release(calldata->state->inode); 2881 nfs4_put_open_state(calldata->state); 2882 nfs_free_seqid(calldata->arg.seqid); 2883 nfs4_put_state_owner(sp); 2884 nfs_sb_deactive(sb); 2885 kfree(calldata); 2886 } 2887 2888 static void nfs4_close_done(struct rpc_task *task, void *data) 2889 { 2890 struct nfs4_closedata *calldata = data; 2891 struct nfs4_state *state = calldata->state; 2892 struct nfs_server *server = NFS_SERVER(calldata->inode); 2893 nfs4_stateid *res_stateid = NULL; 2894 2895 dprintk("%s: begin!\n", __func__); 2896 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 2897 return; 2898 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 2899 /* hmm. we are done with the inode, and in the process of freeing 2900 * the state_owner. we keep this around to process errors 2901 */ 2902 switch (task->tk_status) { 2903 case 0: 2904 res_stateid = &calldata->res.stateid; 2905 if (calldata->roc) 2906 pnfs_roc_set_barrier(state->inode, 2907 calldata->roc_barrier); 2908 renew_lease(server, calldata->timestamp); 2909 break; 2910 case -NFS4ERR_ADMIN_REVOKED: 2911 case -NFS4ERR_STALE_STATEID: 2912 case -NFS4ERR_OLD_STATEID: 2913 case -NFS4ERR_BAD_STATEID: 2914 case -NFS4ERR_EXPIRED: 2915 if (!nfs4_stateid_match(&calldata->arg.stateid, 2916 &state->open_stateid)) { 2917 rpc_restart_call_prepare(task); 2918 goto out_release; 2919 } 2920 if (calldata->arg.fmode == 0) 2921 break; 2922 default: 2923 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) { 2924 rpc_restart_call_prepare(task); 2925 goto out_release; 2926 } 2927 } 2928 nfs_clear_open_stateid(state, &calldata->arg.stateid, 2929 res_stateid, calldata->arg.fmode); 2930 out_release: 2931 nfs_release_seqid(calldata->arg.seqid); 2932 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2933 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2934 } 2935 2936 static void nfs4_close_prepare(struct rpc_task *task, void *data) 2937 { 2938 struct nfs4_closedata *calldata = data; 2939 struct nfs4_state *state = calldata->state; 2940 struct inode *inode = calldata->inode; 2941 bool is_rdonly, is_wronly, is_rdwr; 2942 int call_close = 0; 2943 2944 dprintk("%s: begin!\n", __func__); 2945 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 2946 goto out_wait; 2947 2948 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 2949 spin_lock(&state->owner->so_lock); 2950 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 2951 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 2952 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 2953 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid); 2954 /* Calculate the change in open mode */ 2955 calldata->arg.fmode = 0; 2956 if (state->n_rdwr == 0) { 2957 if (state->n_rdonly == 0) 2958 call_close |= is_rdonly; 2959 else if (is_rdonly) 2960 calldata->arg.fmode |= FMODE_READ; 2961 if (state->n_wronly == 0) 2962 call_close |= is_wronly; 2963 else if (is_wronly) 2964 calldata->arg.fmode |= FMODE_WRITE; 2965 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE)) 2966 call_close |= is_rdwr; 2967 } else if (is_rdwr) 2968 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 2969 2970 if (!nfs4_valid_open_stateid(state)) 2971 call_close = 0; 2972 spin_unlock(&state->owner->so_lock); 2973 2974 if (!call_close) { 2975 /* Note: exit _without_ calling nfs4_close_done */ 2976 goto out_no_action; 2977 } 2978 2979 if (nfs4_wait_on_layoutreturn(inode, task)) { 2980 nfs_release_seqid(calldata->arg.seqid); 2981 goto out_wait; 2982 } 2983 2984 if (calldata->arg.fmode == 0) 2985 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2986 if (calldata->roc) 2987 pnfs_roc_get_barrier(inode, &calldata->roc_barrier); 2988 2989 calldata->arg.share_access = 2990 nfs4_map_atomic_open_share(NFS_SERVER(inode), 2991 calldata->arg.fmode, 0); 2992 2993 nfs_fattr_init(calldata->res.fattr); 2994 calldata->timestamp = jiffies; 2995 if (nfs4_setup_sequence(NFS_SERVER(inode), 2996 &calldata->arg.seq_args, 2997 &calldata->res.seq_res, 2998 task) != 0) 2999 nfs_release_seqid(calldata->arg.seqid); 3000 dprintk("%s: done!\n", __func__); 3001 return; 3002 out_no_action: 3003 task->tk_action = NULL; 3004 out_wait: 3005 nfs4_sequence_done(task, &calldata->res.seq_res); 3006 } 3007 3008 static const struct rpc_call_ops nfs4_close_ops = { 3009 .rpc_call_prepare = nfs4_close_prepare, 3010 .rpc_call_done = nfs4_close_done, 3011 .rpc_release = nfs4_free_closedata, 3012 }; 3013 3014 static bool nfs4_roc(struct inode *inode) 3015 { 3016 if (!nfs_have_layout(inode)) 3017 return false; 3018 return pnfs_roc(inode); 3019 } 3020 3021 /* 3022 * It is possible for data to be read/written from a mem-mapped file 3023 * after the sys_close call (which hits the vfs layer as a flush). 3024 * This means that we can't safely call nfsv4 close on a file until 3025 * the inode is cleared. This in turn means that we are not good 3026 * NFSv4 citizens - we do not indicate to the server to update the file's 3027 * share state even when we are done with one of the three share 3028 * stateid's in the inode. 3029 * 3030 * NOTE: Caller must be holding the sp->so_owner semaphore! 3031 */ 3032 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 3033 { 3034 struct nfs_server *server = NFS_SERVER(state->inode); 3035 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 3036 struct nfs4_closedata *calldata; 3037 struct nfs4_state_owner *sp = state->owner; 3038 struct rpc_task *task; 3039 struct rpc_message msg = { 3040 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 3041 .rpc_cred = state->owner->so_cred, 3042 }; 3043 struct rpc_task_setup task_setup_data = { 3044 .rpc_client = server->client, 3045 .rpc_message = &msg, 3046 .callback_ops = &nfs4_close_ops, 3047 .workqueue = nfsiod_workqueue, 3048 .flags = RPC_TASK_ASYNC, 3049 }; 3050 int status = -ENOMEM; 3051 3052 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 3053 &task_setup_data.rpc_client, &msg); 3054 3055 calldata = kzalloc(sizeof(*calldata), gfp_mask); 3056 if (calldata == NULL) 3057 goto out; 3058 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); 3059 calldata->inode = state->inode; 3060 calldata->state = state; 3061 calldata->arg.fh = NFS_FH(state->inode); 3062 /* Serialization for the sequence id */ 3063 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 3064 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 3065 if (IS_ERR(calldata->arg.seqid)) 3066 goto out_free_calldata; 3067 calldata->arg.fmode = 0; 3068 calldata->arg.bitmask = server->cache_consistency_bitmask; 3069 calldata->res.fattr = &calldata->fattr; 3070 calldata->res.seqid = calldata->arg.seqid; 3071 calldata->res.server = server; 3072 calldata->roc = nfs4_roc(state->inode); 3073 nfs_sb_active(calldata->inode->i_sb); 3074 3075 msg.rpc_argp = &calldata->arg; 3076 msg.rpc_resp = &calldata->res; 3077 task_setup_data.callback_data = calldata; 3078 task = rpc_run_task(&task_setup_data); 3079 if (IS_ERR(task)) 3080 return PTR_ERR(task); 3081 status = 0; 3082 if (wait) 3083 status = rpc_wait_for_completion_task(task); 3084 rpc_put_task(task); 3085 return status; 3086 out_free_calldata: 3087 kfree(calldata); 3088 out: 3089 nfs4_put_open_state(state); 3090 nfs4_put_state_owner(sp); 3091 return status; 3092 } 3093 3094 static struct inode * 3095 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 3096 int open_flags, struct iattr *attr, int *opened) 3097 { 3098 struct nfs4_state *state; 3099 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; 3100 3101 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 3102 3103 /* Protect against concurrent sillydeletes */ 3104 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 3105 3106 nfs4_label_release_security(label); 3107 3108 if (IS_ERR(state)) 3109 return ERR_CAST(state); 3110 return state->inode; 3111 } 3112 3113 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 3114 { 3115 if (ctx->state == NULL) 3116 return; 3117 if (is_sync) 3118 nfs4_close_sync(ctx->state, ctx->mode); 3119 else 3120 nfs4_close_state(ctx->state, ctx->mode); 3121 } 3122 3123 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 3124 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 3125 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL) 3126 3127 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3128 { 3129 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion; 3130 struct nfs4_server_caps_arg args = { 3131 .fhandle = fhandle, 3132 .bitmask = bitmask, 3133 }; 3134 struct nfs4_server_caps_res res = {}; 3135 struct rpc_message msg = { 3136 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 3137 .rpc_argp = &args, 3138 .rpc_resp = &res, 3139 }; 3140 int status; 3141 3142 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 3143 FATTR4_WORD0_FH_EXPIRE_TYPE | 3144 FATTR4_WORD0_LINK_SUPPORT | 3145 FATTR4_WORD0_SYMLINK_SUPPORT | 3146 FATTR4_WORD0_ACLSUPPORT; 3147 if (minorversion) 3148 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 3149 3150 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3151 if (status == 0) { 3152 /* Sanity check the server answers */ 3153 switch (minorversion) { 3154 case 0: 3155 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 3156 res.attr_bitmask[2] = 0; 3157 break; 3158 case 1: 3159 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 3160 break; 3161 case 2: 3162 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 3163 } 3164 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 3165 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| 3166 NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 3167 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| 3168 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| 3169 NFS_CAP_CTIME|NFS_CAP_MTIME| 3170 NFS_CAP_SECURITY_LABEL); 3171 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 3172 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 3173 server->caps |= NFS_CAP_ACLS; 3174 if (res.has_links != 0) 3175 server->caps |= NFS_CAP_HARDLINKS; 3176 if (res.has_symlinks != 0) 3177 server->caps |= NFS_CAP_SYMLINKS; 3178 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) 3179 server->caps |= NFS_CAP_FILEID; 3180 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) 3181 server->caps |= NFS_CAP_MODE; 3182 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) 3183 server->caps |= NFS_CAP_NLINK; 3184 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) 3185 server->caps |= NFS_CAP_OWNER; 3186 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) 3187 server->caps |= NFS_CAP_OWNER_GROUP; 3188 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) 3189 server->caps |= NFS_CAP_ATIME; 3190 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) 3191 server->caps |= NFS_CAP_CTIME; 3192 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) 3193 server->caps |= NFS_CAP_MTIME; 3194 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 3195 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 3196 server->caps |= NFS_CAP_SECURITY_LABEL; 3197 #endif 3198 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 3199 sizeof(server->attr_bitmask)); 3200 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 3201 3202 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 3203 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 3204 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 3205 server->cache_consistency_bitmask[2] = 0; 3206 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 3207 sizeof(server->exclcreat_bitmask)); 3208 server->acl_bitmask = res.acl_bitmask; 3209 server->fh_expire_type = res.fh_expire_type; 3210 } 3211 3212 return status; 3213 } 3214 3215 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 3216 { 3217 struct nfs4_exception exception = { }; 3218 int err; 3219 do { 3220 err = nfs4_handle_exception(server, 3221 _nfs4_server_capabilities(server, fhandle), 3222 &exception); 3223 } while (exception.retry); 3224 return err; 3225 } 3226 3227 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 3228 struct nfs_fsinfo *info) 3229 { 3230 u32 bitmask[3]; 3231 struct nfs4_lookup_root_arg args = { 3232 .bitmask = bitmask, 3233 }; 3234 struct nfs4_lookup_res res = { 3235 .server = server, 3236 .fattr = info->fattr, 3237 .fh = fhandle, 3238 }; 3239 struct rpc_message msg = { 3240 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 3241 .rpc_argp = &args, 3242 .rpc_resp = &res, 3243 }; 3244 3245 bitmask[0] = nfs4_fattr_bitmap[0]; 3246 bitmask[1] = nfs4_fattr_bitmap[1]; 3247 /* 3248 * Process the label in the upcoming getfattr 3249 */ 3250 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL; 3251 3252 nfs_fattr_init(info->fattr); 3253 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3254 } 3255 3256 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 3257 struct nfs_fsinfo *info) 3258 { 3259 struct nfs4_exception exception = { }; 3260 int err; 3261 do { 3262 err = _nfs4_lookup_root(server, fhandle, info); 3263 trace_nfs4_lookup_root(server, fhandle, info->fattr, err); 3264 switch (err) { 3265 case 0: 3266 case -NFS4ERR_WRONGSEC: 3267 goto out; 3268 default: 3269 err = nfs4_handle_exception(server, err, &exception); 3270 } 3271 } while (exception.retry); 3272 out: 3273 return err; 3274 } 3275 3276 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3277 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 3278 { 3279 struct rpc_auth_create_args auth_args = { 3280 .pseudoflavor = flavor, 3281 }; 3282 struct rpc_auth *auth; 3283 int ret; 3284 3285 auth = rpcauth_create(&auth_args, server->client); 3286 if (IS_ERR(auth)) { 3287 ret = -EACCES; 3288 goto out; 3289 } 3290 ret = nfs4_lookup_root(server, fhandle, info); 3291 out: 3292 return ret; 3293 } 3294 3295 /* 3296 * Retry pseudoroot lookup with various security flavors. We do this when: 3297 * 3298 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 3299 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 3300 * 3301 * Returns zero on success, or a negative NFS4ERR value, or a 3302 * negative errno value. 3303 */ 3304 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3305 struct nfs_fsinfo *info) 3306 { 3307 /* Per 3530bis 15.33.5 */ 3308 static const rpc_authflavor_t flav_array[] = { 3309 RPC_AUTH_GSS_KRB5P, 3310 RPC_AUTH_GSS_KRB5I, 3311 RPC_AUTH_GSS_KRB5, 3312 RPC_AUTH_UNIX, /* courtesy */ 3313 RPC_AUTH_NULL, 3314 }; 3315 int status = -EPERM; 3316 size_t i; 3317 3318 if (server->auth_info.flavor_len > 0) { 3319 /* try each flavor specified by user */ 3320 for (i = 0; i < server->auth_info.flavor_len; i++) { 3321 status = nfs4_lookup_root_sec(server, fhandle, info, 3322 server->auth_info.flavors[i]); 3323 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3324 continue; 3325 break; 3326 } 3327 } else { 3328 /* no flavors specified by user, try default list */ 3329 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 3330 status = nfs4_lookup_root_sec(server, fhandle, info, 3331 flav_array[i]); 3332 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3333 continue; 3334 break; 3335 } 3336 } 3337 3338 /* 3339 * -EACCESS could mean that the user doesn't have correct permissions 3340 * to access the mount. It could also mean that we tried to mount 3341 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 3342 * existing mount programs don't handle -EACCES very well so it should 3343 * be mapped to -EPERM instead. 3344 */ 3345 if (status == -EACCES) 3346 status = -EPERM; 3347 return status; 3348 } 3349 3350 /** 3351 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 3352 * @server: initialized nfs_server handle 3353 * @fhandle: we fill in the pseudo-fs root file handle 3354 * @info: we fill in an FSINFO struct 3355 * @auth_probe: probe the auth flavours 3356 * 3357 * Returns zero on success, or a negative errno. 3358 */ 3359 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 3360 struct nfs_fsinfo *info, 3361 bool auth_probe) 3362 { 3363 int status = 0; 3364 3365 if (!auth_probe) 3366 status = nfs4_lookup_root(server, fhandle, info); 3367 3368 if (auth_probe || status == NFS4ERR_WRONGSEC) 3369 status = server->nfs_client->cl_mvops->find_root_sec(server, 3370 fhandle, info); 3371 3372 if (status == 0) 3373 status = nfs4_server_capabilities(server, fhandle); 3374 if (status == 0) 3375 status = nfs4_do_fsinfo(server, fhandle, info); 3376 3377 return nfs4_map_errors(status); 3378 } 3379 3380 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 3381 struct nfs_fsinfo *info) 3382 { 3383 int error; 3384 struct nfs_fattr *fattr = info->fattr; 3385 struct nfs4_label *label = NULL; 3386 3387 error = nfs4_server_capabilities(server, mntfh); 3388 if (error < 0) { 3389 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 3390 return error; 3391 } 3392 3393 label = nfs4_label_alloc(server, GFP_KERNEL); 3394 if (IS_ERR(label)) 3395 return PTR_ERR(label); 3396 3397 error = nfs4_proc_getattr(server, mntfh, fattr, label); 3398 if (error < 0) { 3399 dprintk("nfs4_get_root: getattr error = %d\n", -error); 3400 goto err_free_label; 3401 } 3402 3403 if (fattr->valid & NFS_ATTR_FATTR_FSID && 3404 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 3405 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 3406 3407 err_free_label: 3408 nfs4_label_free(label); 3409 3410 return error; 3411 } 3412 3413 /* 3414 * Get locations and (maybe) other attributes of a referral. 3415 * Note that we'll actually follow the referral later when 3416 * we detect fsid mismatch in inode revalidation 3417 */ 3418 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 3419 const struct qstr *name, struct nfs_fattr *fattr, 3420 struct nfs_fh *fhandle) 3421 { 3422 int status = -ENOMEM; 3423 struct page *page = NULL; 3424 struct nfs4_fs_locations *locations = NULL; 3425 3426 page = alloc_page(GFP_KERNEL); 3427 if (page == NULL) 3428 goto out; 3429 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 3430 if (locations == NULL) 3431 goto out; 3432 3433 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 3434 if (status != 0) 3435 goto out; 3436 3437 /* 3438 * If the fsid didn't change, this is a migration event, not a 3439 * referral. Cause us to drop into the exception handler, which 3440 * will kick off migration recovery. 3441 */ 3442 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 3443 dprintk("%s: server did not return a different fsid for" 3444 " a referral at %s\n", __func__, name->name); 3445 status = -NFS4ERR_MOVED; 3446 goto out; 3447 } 3448 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 3449 nfs_fixup_referral_attributes(&locations->fattr); 3450 3451 /* replace the lookup nfs_fattr with the locations nfs_fattr */ 3452 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 3453 memset(fhandle, 0, sizeof(struct nfs_fh)); 3454 out: 3455 if (page) 3456 __free_page(page); 3457 kfree(locations); 3458 return status; 3459 } 3460 3461 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3462 struct nfs_fattr *fattr, struct nfs4_label *label) 3463 { 3464 struct nfs4_getattr_arg args = { 3465 .fh = fhandle, 3466 .bitmask = server->attr_bitmask, 3467 }; 3468 struct nfs4_getattr_res res = { 3469 .fattr = fattr, 3470 .label = label, 3471 .server = server, 3472 }; 3473 struct rpc_message msg = { 3474 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 3475 .rpc_argp = &args, 3476 .rpc_resp = &res, 3477 }; 3478 3479 args.bitmask = nfs4_bitmask(server, label); 3480 3481 nfs_fattr_init(fattr); 3482 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3483 } 3484 3485 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3486 struct nfs_fattr *fattr, struct nfs4_label *label) 3487 { 3488 struct nfs4_exception exception = { }; 3489 int err; 3490 do { 3491 err = _nfs4_proc_getattr(server, fhandle, fattr, label); 3492 trace_nfs4_getattr(server, fhandle, fattr, err); 3493 err = nfs4_handle_exception(server, err, 3494 &exception); 3495 } while (exception.retry); 3496 return err; 3497 } 3498 3499 /* 3500 * The file is not closed if it is opened due to the a request to change 3501 * the size of the file. The open call will not be needed once the 3502 * VFS layer lookup-intents are implemented. 3503 * 3504 * Close is called when the inode is destroyed. 3505 * If we haven't opened the file for O_WRONLY, we 3506 * need to in the size_change case to obtain a stateid. 3507 * 3508 * Got race? 3509 * Because OPEN is always done by name in nfsv4, it is 3510 * possible that we opened a different file by the same 3511 * name. We can recognize this race condition, but we 3512 * can't do anything about it besides returning an error. 3513 * 3514 * This will be fixed with VFS changes (lookup-intent). 3515 */ 3516 static int 3517 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 3518 struct iattr *sattr) 3519 { 3520 struct inode *inode = d_inode(dentry); 3521 struct rpc_cred *cred = NULL; 3522 struct nfs4_state *state = NULL; 3523 struct nfs4_label *label = NULL; 3524 int status; 3525 3526 if (pnfs_ld_layoutret_on_setattr(inode) && 3527 sattr->ia_valid & ATTR_SIZE && 3528 sattr->ia_size < i_size_read(inode)) 3529 pnfs_commit_and_return_layout(inode); 3530 3531 nfs_fattr_init(fattr); 3532 3533 /* Deal with open(O_TRUNC) */ 3534 if (sattr->ia_valid & ATTR_OPEN) 3535 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 3536 3537 /* Optimization: if the end result is no change, don't RPC */ 3538 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 3539 return 0; 3540 3541 /* Search for an existing open(O_WRITE) file */ 3542 if (sattr->ia_valid & ATTR_FILE) { 3543 struct nfs_open_context *ctx; 3544 3545 ctx = nfs_file_open_context(sattr->ia_file); 3546 if (ctx) { 3547 cred = ctx->cred; 3548 state = ctx->state; 3549 } 3550 } 3551 3552 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 3553 if (IS_ERR(label)) 3554 return PTR_ERR(label); 3555 3556 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label); 3557 if (status == 0) { 3558 nfs_setattr_update_inode(inode, sattr, fattr); 3559 nfs_setsecurity(inode, fattr, label); 3560 } 3561 nfs4_label_free(label); 3562 return status; 3563 } 3564 3565 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 3566 const struct qstr *name, struct nfs_fh *fhandle, 3567 struct nfs_fattr *fattr, struct nfs4_label *label) 3568 { 3569 struct nfs_server *server = NFS_SERVER(dir); 3570 int status; 3571 struct nfs4_lookup_arg args = { 3572 .bitmask = server->attr_bitmask, 3573 .dir_fh = NFS_FH(dir), 3574 .name = name, 3575 }; 3576 struct nfs4_lookup_res res = { 3577 .server = server, 3578 .fattr = fattr, 3579 .label = label, 3580 .fh = fhandle, 3581 }; 3582 struct rpc_message msg = { 3583 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 3584 .rpc_argp = &args, 3585 .rpc_resp = &res, 3586 }; 3587 3588 args.bitmask = nfs4_bitmask(server, label); 3589 3590 nfs_fattr_init(fattr); 3591 3592 dprintk("NFS call lookup %s\n", name->name); 3593 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); 3594 dprintk("NFS reply lookup: %d\n", status); 3595 return status; 3596 } 3597 3598 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 3599 { 3600 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 3601 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 3602 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 3603 fattr->nlink = 2; 3604 } 3605 3606 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 3607 const struct qstr *name, struct nfs_fh *fhandle, 3608 struct nfs_fattr *fattr, struct nfs4_label *label) 3609 { 3610 struct nfs4_exception exception = { }; 3611 struct rpc_clnt *client = *clnt; 3612 int err; 3613 do { 3614 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label); 3615 trace_nfs4_lookup(dir, name, err); 3616 switch (err) { 3617 case -NFS4ERR_BADNAME: 3618 err = -ENOENT; 3619 goto out; 3620 case -NFS4ERR_MOVED: 3621 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 3622 if (err == -NFS4ERR_MOVED) 3623 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 3624 goto out; 3625 case -NFS4ERR_WRONGSEC: 3626 err = -EPERM; 3627 if (client != *clnt) 3628 goto out; 3629 client = nfs4_negotiate_security(client, dir, name); 3630 if (IS_ERR(client)) 3631 return PTR_ERR(client); 3632 3633 exception.retry = 1; 3634 break; 3635 default: 3636 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 3637 } 3638 } while (exception.retry); 3639 3640 out: 3641 if (err == 0) 3642 *clnt = client; 3643 else if (client != *clnt) 3644 rpc_shutdown_client(client); 3645 3646 return err; 3647 } 3648 3649 static int nfs4_proc_lookup(struct inode *dir, const struct qstr *name, 3650 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 3651 struct nfs4_label *label) 3652 { 3653 int status; 3654 struct rpc_clnt *client = NFS_CLIENT(dir); 3655 3656 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label); 3657 if (client != NFS_CLIENT(dir)) { 3658 rpc_shutdown_client(client); 3659 nfs_fixup_secinfo_attributes(fattr); 3660 } 3661 return status; 3662 } 3663 3664 struct rpc_clnt * 3665 nfs4_proc_lookup_mountpoint(struct inode *dir, const struct qstr *name, 3666 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 3667 { 3668 struct rpc_clnt *client = NFS_CLIENT(dir); 3669 int status; 3670 3671 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL); 3672 if (status < 0) 3673 return ERR_PTR(status); 3674 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 3675 } 3676 3677 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3678 { 3679 struct nfs_server *server = NFS_SERVER(inode); 3680 struct nfs4_accessargs args = { 3681 .fh = NFS_FH(inode), 3682 .bitmask = server->cache_consistency_bitmask, 3683 }; 3684 struct nfs4_accessres res = { 3685 .server = server, 3686 }; 3687 struct rpc_message msg = { 3688 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 3689 .rpc_argp = &args, 3690 .rpc_resp = &res, 3691 .rpc_cred = entry->cred, 3692 }; 3693 int mode = entry->mask; 3694 int status = 0; 3695 3696 /* 3697 * Determine which access bits we want to ask for... 3698 */ 3699 if (mode & MAY_READ) 3700 args.access |= NFS4_ACCESS_READ; 3701 if (S_ISDIR(inode->i_mode)) { 3702 if (mode & MAY_WRITE) 3703 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; 3704 if (mode & MAY_EXEC) 3705 args.access |= NFS4_ACCESS_LOOKUP; 3706 } else { 3707 if (mode & MAY_WRITE) 3708 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; 3709 if (mode & MAY_EXEC) 3710 args.access |= NFS4_ACCESS_EXECUTE; 3711 } 3712 3713 res.fattr = nfs_alloc_fattr(); 3714 if (res.fattr == NULL) 3715 return -ENOMEM; 3716 3717 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3718 if (!status) { 3719 nfs_access_set_mask(entry, res.access); 3720 nfs_refresh_inode(inode, res.fattr); 3721 } 3722 nfs_free_fattr(res.fattr); 3723 return status; 3724 } 3725 3726 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3727 { 3728 struct nfs4_exception exception = { }; 3729 int err; 3730 do { 3731 err = _nfs4_proc_access(inode, entry); 3732 trace_nfs4_access(inode, err); 3733 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3734 &exception); 3735 } while (exception.retry); 3736 return err; 3737 } 3738 3739 /* 3740 * TODO: For the time being, we don't try to get any attributes 3741 * along with any of the zero-copy operations READ, READDIR, 3742 * READLINK, WRITE. 3743 * 3744 * In the case of the first three, we want to put the GETATTR 3745 * after the read-type operation -- this is because it is hard 3746 * to predict the length of a GETATTR response in v4, and thus 3747 * align the READ data correctly. This means that the GETATTR 3748 * may end up partially falling into the page cache, and we should 3749 * shift it into the 'tail' of the xdr_buf before processing. 3750 * To do this efficiently, we need to know the total length 3751 * of data received, which doesn't seem to be available outside 3752 * of the RPC layer. 3753 * 3754 * In the case of WRITE, we also want to put the GETATTR after 3755 * the operation -- in this case because we want to make sure 3756 * we get the post-operation mtime and size. 3757 * 3758 * Both of these changes to the XDR layer would in fact be quite 3759 * minor, but I decided to leave them for a subsequent patch. 3760 */ 3761 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 3762 unsigned int pgbase, unsigned int pglen) 3763 { 3764 struct nfs4_readlink args = { 3765 .fh = NFS_FH(inode), 3766 .pgbase = pgbase, 3767 .pglen = pglen, 3768 .pages = &page, 3769 }; 3770 struct nfs4_readlink_res res; 3771 struct rpc_message msg = { 3772 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 3773 .rpc_argp = &args, 3774 .rpc_resp = &res, 3775 }; 3776 3777 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 3778 } 3779 3780 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 3781 unsigned int pgbase, unsigned int pglen) 3782 { 3783 struct nfs4_exception exception = { }; 3784 int err; 3785 do { 3786 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 3787 trace_nfs4_readlink(inode, err); 3788 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3789 &exception); 3790 } while (exception.retry); 3791 return err; 3792 } 3793 3794 /* 3795 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 3796 */ 3797 static int 3798 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 3799 int flags) 3800 { 3801 struct nfs4_label l, *ilabel = NULL; 3802 struct nfs_open_context *ctx; 3803 struct nfs4_state *state; 3804 int status = 0; 3805 3806 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 3807 if (IS_ERR(ctx)) 3808 return PTR_ERR(ctx); 3809 3810 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 3811 3812 sattr->ia_mode &= ~current_umask(); 3813 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 3814 if (IS_ERR(state)) { 3815 status = PTR_ERR(state); 3816 goto out; 3817 } 3818 out: 3819 nfs4_label_release_security(ilabel); 3820 put_nfs_open_context(ctx); 3821 return status; 3822 } 3823 3824 static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name) 3825 { 3826 struct nfs_server *server = NFS_SERVER(dir); 3827 struct nfs_removeargs args = { 3828 .fh = NFS_FH(dir), 3829 .name = *name, 3830 }; 3831 struct nfs_removeres res = { 3832 .server = server, 3833 }; 3834 struct rpc_message msg = { 3835 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 3836 .rpc_argp = &args, 3837 .rpc_resp = &res, 3838 }; 3839 int status; 3840 3841 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 3842 if (status == 0) 3843 update_changeattr(dir, &res.cinfo); 3844 return status; 3845 } 3846 3847 static int nfs4_proc_remove(struct inode *dir, const struct qstr *name) 3848 { 3849 struct nfs4_exception exception = { }; 3850 int err; 3851 do { 3852 err = _nfs4_proc_remove(dir, name); 3853 trace_nfs4_remove(dir, name, err); 3854 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3855 &exception); 3856 } while (exception.retry); 3857 return err; 3858 } 3859 3860 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) 3861 { 3862 struct nfs_server *server = NFS_SERVER(dir); 3863 struct nfs_removeargs *args = msg->rpc_argp; 3864 struct nfs_removeres *res = msg->rpc_resp; 3865 3866 res->server = server; 3867 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 3868 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1); 3869 3870 nfs_fattr_init(res->dir_attr); 3871 } 3872 3873 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 3874 { 3875 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb), 3876 &data->args.seq_args, 3877 &data->res.seq_res, 3878 task); 3879 } 3880 3881 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 3882 { 3883 struct nfs_unlinkdata *data = task->tk_calldata; 3884 struct nfs_removeres *res = &data->res; 3885 3886 if (!nfs4_sequence_done(task, &res->seq_res)) 3887 return 0; 3888 if (nfs4_async_handle_error(task, res->server, NULL, 3889 &data->timeout) == -EAGAIN) 3890 return 0; 3891 update_changeattr(dir, &res->cinfo); 3892 return 1; 3893 } 3894 3895 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) 3896 { 3897 struct nfs_server *server = NFS_SERVER(dir); 3898 struct nfs_renameargs *arg = msg->rpc_argp; 3899 struct nfs_renameres *res = msg->rpc_resp; 3900 3901 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 3902 res->server = server; 3903 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1); 3904 } 3905 3906 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 3907 { 3908 nfs4_setup_sequence(NFS_SERVER(data->old_dir), 3909 &data->args.seq_args, 3910 &data->res.seq_res, 3911 task); 3912 } 3913 3914 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 3915 struct inode *new_dir) 3916 { 3917 struct nfs_renamedata *data = task->tk_calldata; 3918 struct nfs_renameres *res = &data->res; 3919 3920 if (!nfs4_sequence_done(task, &res->seq_res)) 3921 return 0; 3922 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 3923 return 0; 3924 3925 update_changeattr(old_dir, &res->old_cinfo); 3926 update_changeattr(new_dir, &res->new_cinfo); 3927 return 1; 3928 } 3929 3930 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 3931 { 3932 struct nfs_server *server = NFS_SERVER(inode); 3933 struct nfs4_link_arg arg = { 3934 .fh = NFS_FH(inode), 3935 .dir_fh = NFS_FH(dir), 3936 .name = name, 3937 .bitmask = server->attr_bitmask, 3938 }; 3939 struct nfs4_link_res res = { 3940 .server = server, 3941 .label = NULL, 3942 }; 3943 struct rpc_message msg = { 3944 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 3945 .rpc_argp = &arg, 3946 .rpc_resp = &res, 3947 }; 3948 int status = -ENOMEM; 3949 3950 res.fattr = nfs_alloc_fattr(); 3951 if (res.fattr == NULL) 3952 goto out; 3953 3954 res.label = nfs4_label_alloc(server, GFP_KERNEL); 3955 if (IS_ERR(res.label)) { 3956 status = PTR_ERR(res.label); 3957 goto out; 3958 } 3959 arg.bitmask = nfs4_bitmask(server, res.label); 3960 3961 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3962 if (!status) { 3963 update_changeattr(dir, &res.cinfo); 3964 status = nfs_post_op_update_inode(inode, res.fattr); 3965 if (!status) 3966 nfs_setsecurity(inode, res.fattr, res.label); 3967 } 3968 3969 3970 nfs4_label_free(res.label); 3971 3972 out: 3973 nfs_free_fattr(res.fattr); 3974 return status; 3975 } 3976 3977 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name) 3978 { 3979 struct nfs4_exception exception = { }; 3980 int err; 3981 do { 3982 err = nfs4_handle_exception(NFS_SERVER(inode), 3983 _nfs4_proc_link(inode, dir, name), 3984 &exception); 3985 } while (exception.retry); 3986 return err; 3987 } 3988 3989 struct nfs4_createdata { 3990 struct rpc_message msg; 3991 struct nfs4_create_arg arg; 3992 struct nfs4_create_res res; 3993 struct nfs_fh fh; 3994 struct nfs_fattr fattr; 3995 struct nfs4_label *label; 3996 }; 3997 3998 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 3999 const struct qstr *name, struct iattr *sattr, u32 ftype) 4000 { 4001 struct nfs4_createdata *data; 4002 4003 data = kzalloc(sizeof(*data), GFP_KERNEL); 4004 if (data != NULL) { 4005 struct nfs_server *server = NFS_SERVER(dir); 4006 4007 data->label = nfs4_label_alloc(server, GFP_KERNEL); 4008 if (IS_ERR(data->label)) 4009 goto out_free; 4010 4011 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 4012 data->msg.rpc_argp = &data->arg; 4013 data->msg.rpc_resp = &data->res; 4014 data->arg.dir_fh = NFS_FH(dir); 4015 data->arg.server = server; 4016 data->arg.name = name; 4017 data->arg.attrs = sattr; 4018 data->arg.ftype = ftype; 4019 data->arg.bitmask = nfs4_bitmask(server, data->label); 4020 data->res.server = server; 4021 data->res.fh = &data->fh; 4022 data->res.fattr = &data->fattr; 4023 data->res.label = data->label; 4024 nfs_fattr_init(data->res.fattr); 4025 } 4026 return data; 4027 out_free: 4028 kfree(data); 4029 return NULL; 4030 } 4031 4032 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 4033 { 4034 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 4035 &data->arg.seq_args, &data->res.seq_res, 1); 4036 if (status == 0) { 4037 update_changeattr(dir, &data->res.dir_cinfo); 4038 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label); 4039 } 4040 return status; 4041 } 4042 4043 static void nfs4_free_createdata(struct nfs4_createdata *data) 4044 { 4045 nfs4_label_free(data->label); 4046 kfree(data); 4047 } 4048 4049 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 4050 struct page *page, unsigned int len, struct iattr *sattr, 4051 struct nfs4_label *label) 4052 { 4053 struct nfs4_createdata *data; 4054 int status = -ENAMETOOLONG; 4055 4056 if (len > NFS4_MAXPATHLEN) 4057 goto out; 4058 4059 status = -ENOMEM; 4060 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 4061 if (data == NULL) 4062 goto out; 4063 4064 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 4065 data->arg.u.symlink.pages = &page; 4066 data->arg.u.symlink.len = len; 4067 data->arg.label = label; 4068 4069 status = nfs4_do_create(dir, dentry, data); 4070 4071 nfs4_free_createdata(data); 4072 out: 4073 return status; 4074 } 4075 4076 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 4077 struct page *page, unsigned int len, struct iattr *sattr) 4078 { 4079 struct nfs4_exception exception = { }; 4080 struct nfs4_label l, *label = NULL; 4081 int err; 4082 4083 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4084 4085 do { 4086 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label); 4087 trace_nfs4_symlink(dir, &dentry->d_name, err); 4088 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4089 &exception); 4090 } while (exception.retry); 4091 4092 nfs4_label_release_security(label); 4093 return err; 4094 } 4095 4096 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 4097 struct iattr *sattr, struct nfs4_label *label) 4098 { 4099 struct nfs4_createdata *data; 4100 int status = -ENOMEM; 4101 4102 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 4103 if (data == NULL) 4104 goto out; 4105 4106 data->arg.label = label; 4107 status = nfs4_do_create(dir, dentry, data); 4108 4109 nfs4_free_createdata(data); 4110 out: 4111 return status; 4112 } 4113 4114 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 4115 struct iattr *sattr) 4116 { 4117 struct nfs4_exception exception = { }; 4118 struct nfs4_label l, *label = NULL; 4119 int err; 4120 4121 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4122 4123 sattr->ia_mode &= ~current_umask(); 4124 do { 4125 err = _nfs4_proc_mkdir(dir, dentry, sattr, label); 4126 trace_nfs4_mkdir(dir, &dentry->d_name, err); 4127 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4128 &exception); 4129 } while (exception.retry); 4130 nfs4_label_release_security(label); 4131 4132 return err; 4133 } 4134 4135 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 4136 u64 cookie, struct page **pages, unsigned int count, int plus) 4137 { 4138 struct inode *dir = d_inode(dentry); 4139 struct nfs4_readdir_arg args = { 4140 .fh = NFS_FH(dir), 4141 .pages = pages, 4142 .pgbase = 0, 4143 .count = count, 4144 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask, 4145 .plus = plus, 4146 }; 4147 struct nfs4_readdir_res res; 4148 struct rpc_message msg = { 4149 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 4150 .rpc_argp = &args, 4151 .rpc_resp = &res, 4152 .rpc_cred = cred, 4153 }; 4154 int status; 4155 4156 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__, 4157 dentry, 4158 (unsigned long long)cookie); 4159 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); 4160 res.pgbase = args.pgbase; 4161 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 4162 if (status >= 0) { 4163 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); 4164 status += args.pgbase; 4165 } 4166 4167 nfs_invalidate_atime(dir); 4168 4169 dprintk("%s: returns %d\n", __func__, status); 4170 return status; 4171 } 4172 4173 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 4174 u64 cookie, struct page **pages, unsigned int count, int plus) 4175 { 4176 struct nfs4_exception exception = { }; 4177 int err; 4178 do { 4179 err = _nfs4_proc_readdir(dentry, cred, cookie, 4180 pages, count, plus); 4181 trace_nfs4_readdir(d_inode(dentry), err); 4182 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err, 4183 &exception); 4184 } while (exception.retry); 4185 return err; 4186 } 4187 4188 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 4189 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 4190 { 4191 struct nfs4_createdata *data; 4192 int mode = sattr->ia_mode; 4193 int status = -ENOMEM; 4194 4195 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 4196 if (data == NULL) 4197 goto out; 4198 4199 if (S_ISFIFO(mode)) 4200 data->arg.ftype = NF4FIFO; 4201 else if (S_ISBLK(mode)) { 4202 data->arg.ftype = NF4BLK; 4203 data->arg.u.device.specdata1 = MAJOR(rdev); 4204 data->arg.u.device.specdata2 = MINOR(rdev); 4205 } 4206 else if (S_ISCHR(mode)) { 4207 data->arg.ftype = NF4CHR; 4208 data->arg.u.device.specdata1 = MAJOR(rdev); 4209 data->arg.u.device.specdata2 = MINOR(rdev); 4210 } else if (!S_ISSOCK(mode)) { 4211 status = -EINVAL; 4212 goto out_free; 4213 } 4214 4215 data->arg.label = label; 4216 status = nfs4_do_create(dir, dentry, data); 4217 out_free: 4218 nfs4_free_createdata(data); 4219 out: 4220 return status; 4221 } 4222 4223 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 4224 struct iattr *sattr, dev_t rdev) 4225 { 4226 struct nfs4_exception exception = { }; 4227 struct nfs4_label l, *label = NULL; 4228 int err; 4229 4230 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4231 4232 sattr->ia_mode &= ~current_umask(); 4233 do { 4234 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 4235 trace_nfs4_mknod(dir, &dentry->d_name, err); 4236 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4237 &exception); 4238 } while (exception.retry); 4239 4240 nfs4_label_release_security(label); 4241 4242 return err; 4243 } 4244 4245 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 4246 struct nfs_fsstat *fsstat) 4247 { 4248 struct nfs4_statfs_arg args = { 4249 .fh = fhandle, 4250 .bitmask = server->attr_bitmask, 4251 }; 4252 struct nfs4_statfs_res res = { 4253 .fsstat = fsstat, 4254 }; 4255 struct rpc_message msg = { 4256 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 4257 .rpc_argp = &args, 4258 .rpc_resp = &res, 4259 }; 4260 4261 nfs_fattr_init(fsstat->fattr); 4262 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4263 } 4264 4265 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 4266 { 4267 struct nfs4_exception exception = { }; 4268 int err; 4269 do { 4270 err = nfs4_handle_exception(server, 4271 _nfs4_proc_statfs(server, fhandle, fsstat), 4272 &exception); 4273 } while (exception.retry); 4274 return err; 4275 } 4276 4277 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 4278 struct nfs_fsinfo *fsinfo) 4279 { 4280 struct nfs4_fsinfo_arg args = { 4281 .fh = fhandle, 4282 .bitmask = server->attr_bitmask, 4283 }; 4284 struct nfs4_fsinfo_res res = { 4285 .fsinfo = fsinfo, 4286 }; 4287 struct rpc_message msg = { 4288 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 4289 .rpc_argp = &args, 4290 .rpc_resp = &res, 4291 }; 4292 4293 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4294 } 4295 4296 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4297 { 4298 struct nfs4_exception exception = { }; 4299 unsigned long now = jiffies; 4300 int err; 4301 4302 do { 4303 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 4304 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 4305 if (err == 0) { 4306 nfs4_set_lease_period(server->nfs_client, 4307 fsinfo->lease_time * HZ, 4308 now); 4309 break; 4310 } 4311 err = nfs4_handle_exception(server, err, &exception); 4312 } while (exception.retry); 4313 return err; 4314 } 4315 4316 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4317 { 4318 int error; 4319 4320 nfs_fattr_init(fsinfo->fattr); 4321 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 4322 if (error == 0) { 4323 /* block layout checks this! */ 4324 server->pnfs_blksize = fsinfo->blksize; 4325 set_pnfs_layoutdriver(server, fhandle, fsinfo); 4326 } 4327 4328 return error; 4329 } 4330 4331 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4332 struct nfs_pathconf *pathconf) 4333 { 4334 struct nfs4_pathconf_arg args = { 4335 .fh = fhandle, 4336 .bitmask = server->attr_bitmask, 4337 }; 4338 struct nfs4_pathconf_res res = { 4339 .pathconf = pathconf, 4340 }; 4341 struct rpc_message msg = { 4342 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 4343 .rpc_argp = &args, 4344 .rpc_resp = &res, 4345 }; 4346 4347 /* None of the pathconf attributes are mandatory to implement */ 4348 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 4349 memset(pathconf, 0, sizeof(*pathconf)); 4350 return 0; 4351 } 4352 4353 nfs_fattr_init(pathconf->fattr); 4354 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4355 } 4356 4357 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4358 struct nfs_pathconf *pathconf) 4359 { 4360 struct nfs4_exception exception = { }; 4361 int err; 4362 4363 do { 4364 err = nfs4_handle_exception(server, 4365 _nfs4_proc_pathconf(server, fhandle, pathconf), 4366 &exception); 4367 } while (exception.retry); 4368 return err; 4369 } 4370 4371 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 4372 const struct nfs_open_context *ctx, 4373 const struct nfs_lock_context *l_ctx, 4374 fmode_t fmode) 4375 { 4376 const struct nfs_lockowner *lockowner = NULL; 4377 4378 if (l_ctx != NULL) 4379 lockowner = &l_ctx->lockowner; 4380 return nfs4_select_rw_stateid(ctx->state, fmode, lockowner, stateid, NULL); 4381 } 4382 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 4383 4384 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 4385 const struct nfs_open_context *ctx, 4386 const struct nfs_lock_context *l_ctx, 4387 fmode_t fmode) 4388 { 4389 nfs4_stateid current_stateid; 4390 4391 /* If the current stateid represents a lost lock, then exit */ 4392 if (nfs4_set_rw_stateid(¤t_stateid, ctx, l_ctx, fmode) == -EIO) 4393 return true; 4394 return nfs4_stateid_match(stateid, ¤t_stateid); 4395 } 4396 4397 static bool nfs4_error_stateid_expired(int err) 4398 { 4399 switch (err) { 4400 case -NFS4ERR_DELEG_REVOKED: 4401 case -NFS4ERR_ADMIN_REVOKED: 4402 case -NFS4ERR_BAD_STATEID: 4403 case -NFS4ERR_STALE_STATEID: 4404 case -NFS4ERR_OLD_STATEID: 4405 case -NFS4ERR_OPENMODE: 4406 case -NFS4ERR_EXPIRED: 4407 return true; 4408 } 4409 return false; 4410 } 4411 4412 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr) 4413 { 4414 nfs_invalidate_atime(hdr->inode); 4415 } 4416 4417 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 4418 { 4419 struct nfs_server *server = NFS_SERVER(hdr->inode); 4420 4421 trace_nfs4_read(hdr, task->tk_status); 4422 if (nfs4_async_handle_error(task, server, 4423 hdr->args.context->state, 4424 NULL) == -EAGAIN) { 4425 rpc_restart_call_prepare(task); 4426 return -EAGAIN; 4427 } 4428 4429 __nfs4_read_done_cb(hdr); 4430 if (task->tk_status > 0) 4431 renew_lease(server, hdr->timestamp); 4432 return 0; 4433 } 4434 4435 static bool nfs4_read_stateid_changed(struct rpc_task *task, 4436 struct nfs_pgio_args *args) 4437 { 4438 4439 if (!nfs4_error_stateid_expired(task->tk_status) || 4440 nfs4_stateid_is_current(&args->stateid, 4441 args->context, 4442 args->lock_context, 4443 FMODE_READ)) 4444 return false; 4445 rpc_restart_call_prepare(task); 4446 return true; 4447 } 4448 4449 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4450 { 4451 4452 dprintk("--> %s\n", __func__); 4453 4454 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4455 return -EAGAIN; 4456 if (nfs4_read_stateid_changed(task, &hdr->args)) 4457 return -EAGAIN; 4458 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4459 nfs4_read_done_cb(task, hdr); 4460 } 4461 4462 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 4463 struct rpc_message *msg) 4464 { 4465 hdr->timestamp = jiffies; 4466 if (!hdr->pgio_done_cb) 4467 hdr->pgio_done_cb = nfs4_read_done_cb; 4468 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 4469 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0); 4470 } 4471 4472 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 4473 struct nfs_pgio_header *hdr) 4474 { 4475 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode), 4476 &hdr->args.seq_args, 4477 &hdr->res.seq_res, 4478 task)) 4479 return 0; 4480 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 4481 hdr->args.lock_context, 4482 hdr->rw_ops->rw_mode) == -EIO) 4483 return -EIO; 4484 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 4485 return -EIO; 4486 return 0; 4487 } 4488 4489 static int nfs4_write_done_cb(struct rpc_task *task, 4490 struct nfs_pgio_header *hdr) 4491 { 4492 struct inode *inode = hdr->inode; 4493 4494 trace_nfs4_write(hdr, task->tk_status); 4495 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4496 hdr->args.context->state, 4497 NULL) == -EAGAIN) { 4498 rpc_restart_call_prepare(task); 4499 return -EAGAIN; 4500 } 4501 if (task->tk_status >= 0) { 4502 renew_lease(NFS_SERVER(inode), hdr->timestamp); 4503 nfs_writeback_update_inode(hdr); 4504 } 4505 return 0; 4506 } 4507 4508 static bool nfs4_write_stateid_changed(struct rpc_task *task, 4509 struct nfs_pgio_args *args) 4510 { 4511 4512 if (!nfs4_error_stateid_expired(task->tk_status) || 4513 nfs4_stateid_is_current(&args->stateid, 4514 args->context, 4515 args->lock_context, 4516 FMODE_WRITE)) 4517 return false; 4518 rpc_restart_call_prepare(task); 4519 return true; 4520 } 4521 4522 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4523 { 4524 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4525 return -EAGAIN; 4526 if (nfs4_write_stateid_changed(task, &hdr->args)) 4527 return -EAGAIN; 4528 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4529 nfs4_write_done_cb(task, hdr); 4530 } 4531 4532 static 4533 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 4534 { 4535 /* Don't request attributes for pNFS or O_DIRECT writes */ 4536 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 4537 return false; 4538 /* Otherwise, request attributes if and only if we don't hold 4539 * a delegation 4540 */ 4541 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; 4542 } 4543 4544 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 4545 struct rpc_message *msg) 4546 { 4547 struct nfs_server *server = NFS_SERVER(hdr->inode); 4548 4549 if (!nfs4_write_need_cache_consistency_data(hdr)) { 4550 hdr->args.bitmask = NULL; 4551 hdr->res.fattr = NULL; 4552 } else 4553 hdr->args.bitmask = server->cache_consistency_bitmask; 4554 4555 if (!hdr->pgio_done_cb) 4556 hdr->pgio_done_cb = nfs4_write_done_cb; 4557 hdr->res.server = server; 4558 hdr->timestamp = jiffies; 4559 4560 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 4561 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1); 4562 } 4563 4564 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 4565 { 4566 nfs4_setup_sequence(NFS_SERVER(data->inode), 4567 &data->args.seq_args, 4568 &data->res.seq_res, 4569 task); 4570 } 4571 4572 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 4573 { 4574 struct inode *inode = data->inode; 4575 4576 trace_nfs4_commit(data, task->tk_status); 4577 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4578 NULL, NULL) == -EAGAIN) { 4579 rpc_restart_call_prepare(task); 4580 return -EAGAIN; 4581 } 4582 return 0; 4583 } 4584 4585 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 4586 { 4587 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4588 return -EAGAIN; 4589 return data->commit_done_cb(task, data); 4590 } 4591 4592 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) 4593 { 4594 struct nfs_server *server = NFS_SERVER(data->inode); 4595 4596 if (data->commit_done_cb == NULL) 4597 data->commit_done_cb = nfs4_commit_done_cb; 4598 data->res.server = server; 4599 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 4600 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 4601 } 4602 4603 struct nfs4_renewdata { 4604 struct nfs_client *client; 4605 unsigned long timestamp; 4606 }; 4607 4608 /* 4609 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 4610 * standalone procedure for queueing an asynchronous RENEW. 4611 */ 4612 static void nfs4_renew_release(void *calldata) 4613 { 4614 struct nfs4_renewdata *data = calldata; 4615 struct nfs_client *clp = data->client; 4616 4617 if (atomic_read(&clp->cl_count) > 1) 4618 nfs4_schedule_state_renewal(clp); 4619 nfs_put_client(clp); 4620 kfree(data); 4621 } 4622 4623 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 4624 { 4625 struct nfs4_renewdata *data = calldata; 4626 struct nfs_client *clp = data->client; 4627 unsigned long timestamp = data->timestamp; 4628 4629 trace_nfs4_renew_async(clp, task->tk_status); 4630 switch (task->tk_status) { 4631 case 0: 4632 break; 4633 case -NFS4ERR_LEASE_MOVED: 4634 nfs4_schedule_lease_moved_recovery(clp); 4635 break; 4636 default: 4637 /* Unless we're shutting down, schedule state recovery! */ 4638 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 4639 return; 4640 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 4641 nfs4_schedule_lease_recovery(clp); 4642 return; 4643 } 4644 nfs4_schedule_path_down_recovery(clp); 4645 } 4646 do_renew_lease(clp, timestamp); 4647 } 4648 4649 static const struct rpc_call_ops nfs4_renew_ops = { 4650 .rpc_call_done = nfs4_renew_done, 4651 .rpc_release = nfs4_renew_release, 4652 }; 4653 4654 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 4655 { 4656 struct rpc_message msg = { 4657 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4658 .rpc_argp = clp, 4659 .rpc_cred = cred, 4660 }; 4661 struct nfs4_renewdata *data; 4662 4663 if (renew_flags == 0) 4664 return 0; 4665 if (!atomic_inc_not_zero(&clp->cl_count)) 4666 return -EIO; 4667 data = kmalloc(sizeof(*data), GFP_NOFS); 4668 if (data == NULL) 4669 return -ENOMEM; 4670 data->client = clp; 4671 data->timestamp = jiffies; 4672 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 4673 &nfs4_renew_ops, data); 4674 } 4675 4676 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 4677 { 4678 struct rpc_message msg = { 4679 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4680 .rpc_argp = clp, 4681 .rpc_cred = cred, 4682 }; 4683 unsigned long now = jiffies; 4684 int status; 4685 4686 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4687 if (status < 0) 4688 return status; 4689 do_renew_lease(clp, now); 4690 return 0; 4691 } 4692 4693 static inline int nfs4_server_supports_acls(struct nfs_server *server) 4694 { 4695 return server->caps & NFS_CAP_ACLS; 4696 } 4697 4698 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 4699 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 4700 * the stack. 4701 */ 4702 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 4703 4704 static int buf_to_pages_noslab(const void *buf, size_t buflen, 4705 struct page **pages) 4706 { 4707 struct page *newpage, **spages; 4708 int rc = 0; 4709 size_t len; 4710 spages = pages; 4711 4712 do { 4713 len = min_t(size_t, PAGE_SIZE, buflen); 4714 newpage = alloc_page(GFP_KERNEL); 4715 4716 if (newpage == NULL) 4717 goto unwind; 4718 memcpy(page_address(newpage), buf, len); 4719 buf += len; 4720 buflen -= len; 4721 *pages++ = newpage; 4722 rc++; 4723 } while (buflen != 0); 4724 4725 return rc; 4726 4727 unwind: 4728 for(; rc > 0; rc--) 4729 __free_page(spages[rc-1]); 4730 return -ENOMEM; 4731 } 4732 4733 struct nfs4_cached_acl { 4734 int cached; 4735 size_t len; 4736 char data[0]; 4737 }; 4738 4739 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 4740 { 4741 struct nfs_inode *nfsi = NFS_I(inode); 4742 4743 spin_lock(&inode->i_lock); 4744 kfree(nfsi->nfs4_acl); 4745 nfsi->nfs4_acl = acl; 4746 spin_unlock(&inode->i_lock); 4747 } 4748 4749 static void nfs4_zap_acl_attr(struct inode *inode) 4750 { 4751 nfs4_set_cached_acl(inode, NULL); 4752 } 4753 4754 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 4755 { 4756 struct nfs_inode *nfsi = NFS_I(inode); 4757 struct nfs4_cached_acl *acl; 4758 int ret = -ENOENT; 4759 4760 spin_lock(&inode->i_lock); 4761 acl = nfsi->nfs4_acl; 4762 if (acl == NULL) 4763 goto out; 4764 if (buf == NULL) /* user is just asking for length */ 4765 goto out_len; 4766 if (acl->cached == 0) 4767 goto out; 4768 ret = -ERANGE; /* see getxattr(2) man page */ 4769 if (acl->len > buflen) 4770 goto out; 4771 memcpy(buf, acl->data, acl->len); 4772 out_len: 4773 ret = acl->len; 4774 out: 4775 spin_unlock(&inode->i_lock); 4776 return ret; 4777 } 4778 4779 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 4780 { 4781 struct nfs4_cached_acl *acl; 4782 size_t buflen = sizeof(*acl) + acl_len; 4783 4784 if (buflen <= PAGE_SIZE) { 4785 acl = kmalloc(buflen, GFP_KERNEL); 4786 if (acl == NULL) 4787 goto out; 4788 acl->cached = 1; 4789 _copy_from_pages(acl->data, pages, pgbase, acl_len); 4790 } else { 4791 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 4792 if (acl == NULL) 4793 goto out; 4794 acl->cached = 0; 4795 } 4796 acl->len = acl_len; 4797 out: 4798 nfs4_set_cached_acl(inode, acl); 4799 } 4800 4801 /* 4802 * The getxattr API returns the required buffer length when called with a 4803 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 4804 * the required buf. On a NULL buf, we send a page of data to the server 4805 * guessing that the ACL request can be serviced by a page. If so, we cache 4806 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 4807 * the cache. If not so, we throw away the page, and cache the required 4808 * length. The next getxattr call will then produce another round trip to 4809 * the server, this time with the input buf of the required size. 4810 */ 4811 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4812 { 4813 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; 4814 struct nfs_getaclargs args = { 4815 .fh = NFS_FH(inode), 4816 .acl_pages = pages, 4817 .acl_len = buflen, 4818 }; 4819 struct nfs_getaclres res = { 4820 .acl_len = buflen, 4821 }; 4822 struct rpc_message msg = { 4823 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 4824 .rpc_argp = &args, 4825 .rpc_resp = &res, 4826 }; 4827 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4828 int ret = -ENOMEM, i; 4829 4830 /* As long as we're doing a round trip to the server anyway, 4831 * let's be prepared for a page of acl data. */ 4832 if (npages == 0) 4833 npages = 1; 4834 if (npages > ARRAY_SIZE(pages)) 4835 return -ERANGE; 4836 4837 for (i = 0; i < npages; i++) { 4838 pages[i] = alloc_page(GFP_KERNEL); 4839 if (!pages[i]) 4840 goto out_free; 4841 } 4842 4843 /* for decoding across pages */ 4844 res.acl_scratch = alloc_page(GFP_KERNEL); 4845 if (!res.acl_scratch) 4846 goto out_free; 4847 4848 args.acl_len = npages * PAGE_SIZE; 4849 4850 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 4851 __func__, buf, buflen, npages, args.acl_len); 4852 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 4853 &msg, &args.seq_args, &res.seq_res, 0); 4854 if (ret) 4855 goto out_free; 4856 4857 /* Handle the case where the passed-in buffer is too short */ 4858 if (res.acl_flags & NFS4_ACL_TRUNC) { 4859 /* Did the user only issue a request for the acl length? */ 4860 if (buf == NULL) 4861 goto out_ok; 4862 ret = -ERANGE; 4863 goto out_free; 4864 } 4865 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); 4866 if (buf) { 4867 if (res.acl_len > buflen) { 4868 ret = -ERANGE; 4869 goto out_free; 4870 } 4871 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 4872 } 4873 out_ok: 4874 ret = res.acl_len; 4875 out_free: 4876 for (i = 0; i < npages; i++) 4877 if (pages[i]) 4878 __free_page(pages[i]); 4879 if (res.acl_scratch) 4880 __free_page(res.acl_scratch); 4881 return ret; 4882 } 4883 4884 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4885 { 4886 struct nfs4_exception exception = { }; 4887 ssize_t ret; 4888 do { 4889 ret = __nfs4_get_acl_uncached(inode, buf, buflen); 4890 trace_nfs4_get_acl(inode, ret); 4891 if (ret >= 0) 4892 break; 4893 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 4894 } while (exception.retry); 4895 return ret; 4896 } 4897 4898 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 4899 { 4900 struct nfs_server *server = NFS_SERVER(inode); 4901 int ret; 4902 4903 if (!nfs4_server_supports_acls(server)) 4904 return -EOPNOTSUPP; 4905 ret = nfs_revalidate_inode(server, inode); 4906 if (ret < 0) 4907 return ret; 4908 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 4909 nfs_zap_acl_cache(inode); 4910 ret = nfs4_read_cached_acl(inode, buf, buflen); 4911 if (ret != -ENOENT) 4912 /* -ENOENT is returned if there is no ACL or if there is an ACL 4913 * but no cached acl data, just the acl length */ 4914 return ret; 4915 return nfs4_get_acl_uncached(inode, buf, buflen); 4916 } 4917 4918 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4919 { 4920 struct nfs_server *server = NFS_SERVER(inode); 4921 struct page *pages[NFS4ACL_MAXPAGES]; 4922 struct nfs_setaclargs arg = { 4923 .fh = NFS_FH(inode), 4924 .acl_pages = pages, 4925 .acl_len = buflen, 4926 }; 4927 struct nfs_setaclres res; 4928 struct rpc_message msg = { 4929 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 4930 .rpc_argp = &arg, 4931 .rpc_resp = &res, 4932 }; 4933 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4934 int ret, i; 4935 4936 if (!nfs4_server_supports_acls(server)) 4937 return -EOPNOTSUPP; 4938 if (npages > ARRAY_SIZE(pages)) 4939 return -ERANGE; 4940 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages); 4941 if (i < 0) 4942 return i; 4943 nfs4_inode_return_delegation(inode); 4944 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4945 4946 /* 4947 * Free each page after tx, so the only ref left is 4948 * held by the network stack 4949 */ 4950 for (; i > 0; i--) 4951 put_page(pages[i-1]); 4952 4953 /* 4954 * Acl update can result in inode attribute update. 4955 * so mark the attribute cache invalid. 4956 */ 4957 spin_lock(&inode->i_lock); 4958 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 4959 spin_unlock(&inode->i_lock); 4960 nfs_access_zap_cache(inode); 4961 nfs_zap_acl_cache(inode); 4962 return ret; 4963 } 4964 4965 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4966 { 4967 struct nfs4_exception exception = { }; 4968 int err; 4969 do { 4970 err = __nfs4_proc_set_acl(inode, buf, buflen); 4971 trace_nfs4_set_acl(inode, err); 4972 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4973 &exception); 4974 } while (exception.retry); 4975 return err; 4976 } 4977 4978 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 4979 static int _nfs4_get_security_label(struct inode *inode, void *buf, 4980 size_t buflen) 4981 { 4982 struct nfs_server *server = NFS_SERVER(inode); 4983 struct nfs_fattr fattr; 4984 struct nfs4_label label = {0, 0, buflen, buf}; 4985 4986 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 4987 struct nfs4_getattr_arg arg = { 4988 .fh = NFS_FH(inode), 4989 .bitmask = bitmask, 4990 }; 4991 struct nfs4_getattr_res res = { 4992 .fattr = &fattr, 4993 .label = &label, 4994 .server = server, 4995 }; 4996 struct rpc_message msg = { 4997 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4998 .rpc_argp = &arg, 4999 .rpc_resp = &res, 5000 }; 5001 int ret; 5002 5003 nfs_fattr_init(&fattr); 5004 5005 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 5006 if (ret) 5007 return ret; 5008 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 5009 return -ENOENT; 5010 if (buflen < label.len) 5011 return -ERANGE; 5012 return 0; 5013 } 5014 5015 static int nfs4_get_security_label(struct inode *inode, void *buf, 5016 size_t buflen) 5017 { 5018 struct nfs4_exception exception = { }; 5019 int err; 5020 5021 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 5022 return -EOPNOTSUPP; 5023 5024 do { 5025 err = _nfs4_get_security_label(inode, buf, buflen); 5026 trace_nfs4_get_security_label(inode, err); 5027 err = nfs4_handle_exception(NFS_SERVER(inode), err, 5028 &exception); 5029 } while (exception.retry); 5030 return err; 5031 } 5032 5033 static int _nfs4_do_set_security_label(struct inode *inode, 5034 struct nfs4_label *ilabel, 5035 struct nfs_fattr *fattr, 5036 struct nfs4_label *olabel) 5037 { 5038 5039 struct iattr sattr = {0}; 5040 struct nfs_server *server = NFS_SERVER(inode); 5041 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 5042 struct nfs_setattrargs arg = { 5043 .fh = NFS_FH(inode), 5044 .iap = &sattr, 5045 .server = server, 5046 .bitmask = bitmask, 5047 .label = ilabel, 5048 }; 5049 struct nfs_setattrres res = { 5050 .fattr = fattr, 5051 .label = olabel, 5052 .server = server, 5053 }; 5054 struct rpc_message msg = { 5055 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 5056 .rpc_argp = &arg, 5057 .rpc_resp = &res, 5058 }; 5059 int status; 5060 5061 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 5062 5063 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5064 if (status) 5065 dprintk("%s failed: %d\n", __func__, status); 5066 5067 return status; 5068 } 5069 5070 static int nfs4_do_set_security_label(struct inode *inode, 5071 struct nfs4_label *ilabel, 5072 struct nfs_fattr *fattr, 5073 struct nfs4_label *olabel) 5074 { 5075 struct nfs4_exception exception = { }; 5076 int err; 5077 5078 do { 5079 err = _nfs4_do_set_security_label(inode, ilabel, 5080 fattr, olabel); 5081 trace_nfs4_set_security_label(inode, err); 5082 err = nfs4_handle_exception(NFS_SERVER(inode), err, 5083 &exception); 5084 } while (exception.retry); 5085 return err; 5086 } 5087 5088 static int 5089 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen) 5090 { 5091 struct nfs4_label ilabel, *olabel = NULL; 5092 struct nfs_fattr fattr; 5093 struct rpc_cred *cred; 5094 int status; 5095 5096 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 5097 return -EOPNOTSUPP; 5098 5099 nfs_fattr_init(&fattr); 5100 5101 ilabel.pi = 0; 5102 ilabel.lfs = 0; 5103 ilabel.label = (char *)buf; 5104 ilabel.len = buflen; 5105 5106 cred = rpc_lookup_cred(); 5107 if (IS_ERR(cred)) 5108 return PTR_ERR(cred); 5109 5110 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 5111 if (IS_ERR(olabel)) { 5112 status = -PTR_ERR(olabel); 5113 goto out; 5114 } 5115 5116 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel); 5117 if (status == 0) 5118 nfs_setsecurity(inode, &fattr, olabel); 5119 5120 nfs4_label_free(olabel); 5121 out: 5122 put_rpccred(cred); 5123 return status; 5124 } 5125 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 5126 5127 5128 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 5129 nfs4_verifier *bootverf) 5130 { 5131 __be32 verf[2]; 5132 5133 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 5134 /* An impossible timestamp guarantees this value 5135 * will never match a generated boot time. */ 5136 verf[0] = 0; 5137 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1); 5138 } else { 5139 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 5140 verf[0] = cpu_to_be32(nn->boot_time.tv_sec); 5141 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec); 5142 } 5143 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 5144 } 5145 5146 static int 5147 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 5148 { 5149 size_t len; 5150 char *str; 5151 5152 if (clp->cl_owner_id != NULL) 5153 return 0; 5154 5155 rcu_read_lock(); 5156 len = 14 + strlen(clp->cl_ipaddr) + 1 + 5157 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 5158 1 + 5159 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) + 5160 1; 5161 rcu_read_unlock(); 5162 5163 if (len > NFS4_OPAQUE_LIMIT + 1) 5164 return -EINVAL; 5165 5166 /* 5167 * Since this string is allocated at mount time, and held until the 5168 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5169 * about a memory-reclaim deadlock. 5170 */ 5171 str = kmalloc(len, GFP_KERNEL); 5172 if (!str) 5173 return -ENOMEM; 5174 5175 rcu_read_lock(); 5176 scnprintf(str, len, "Linux NFSv4.0 %s/%s %s", 5177 clp->cl_ipaddr, 5178 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR), 5179 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)); 5180 rcu_read_unlock(); 5181 5182 clp->cl_owner_id = str; 5183 return 0; 5184 } 5185 5186 static int 5187 nfs4_init_uniquifier_client_string(struct nfs_client *clp) 5188 { 5189 size_t len; 5190 char *str; 5191 5192 len = 10 + 10 + 1 + 10 + 1 + 5193 strlen(nfs4_client_id_uniquifier) + 1 + 5194 strlen(clp->cl_rpcclient->cl_nodename) + 1; 5195 5196 if (len > NFS4_OPAQUE_LIMIT + 1) 5197 return -EINVAL; 5198 5199 /* 5200 * Since this string is allocated at mount time, and held until the 5201 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5202 * about a memory-reclaim deadlock. 5203 */ 5204 str = kmalloc(len, GFP_KERNEL); 5205 if (!str) 5206 return -ENOMEM; 5207 5208 scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 5209 clp->rpc_ops->version, clp->cl_minorversion, 5210 nfs4_client_id_uniquifier, 5211 clp->cl_rpcclient->cl_nodename); 5212 clp->cl_owner_id = str; 5213 return 0; 5214 } 5215 5216 static int 5217 nfs4_init_uniform_client_string(struct nfs_client *clp) 5218 { 5219 size_t len; 5220 char *str; 5221 5222 if (clp->cl_owner_id != NULL) 5223 return 0; 5224 5225 if (nfs4_client_id_uniquifier[0] != '\0') 5226 return nfs4_init_uniquifier_client_string(clp); 5227 5228 len = 10 + 10 + 1 + 10 + 1 + 5229 strlen(clp->cl_rpcclient->cl_nodename) + 1; 5230 5231 if (len > NFS4_OPAQUE_LIMIT + 1) 5232 return -EINVAL; 5233 5234 /* 5235 * Since this string is allocated at mount time, and held until the 5236 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5237 * about a memory-reclaim deadlock. 5238 */ 5239 str = kmalloc(len, GFP_KERNEL); 5240 if (!str) 5241 return -ENOMEM; 5242 5243 scnprintf(str, len, "Linux NFSv%u.%u %s", 5244 clp->rpc_ops->version, clp->cl_minorversion, 5245 clp->cl_rpcclient->cl_nodename); 5246 clp->cl_owner_id = str; 5247 return 0; 5248 } 5249 5250 /* 5251 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 5252 * services. Advertise one based on the address family of the 5253 * clientaddr. 5254 */ 5255 static unsigned int 5256 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 5257 { 5258 if (strchr(clp->cl_ipaddr, ':') != NULL) 5259 return scnprintf(buf, len, "tcp6"); 5260 else 5261 return scnprintf(buf, len, "tcp"); 5262 } 5263 5264 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 5265 { 5266 struct nfs4_setclientid *sc = calldata; 5267 5268 if (task->tk_status == 0) 5269 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 5270 } 5271 5272 static const struct rpc_call_ops nfs4_setclientid_ops = { 5273 .rpc_call_done = nfs4_setclientid_done, 5274 }; 5275 5276 /** 5277 * nfs4_proc_setclientid - Negotiate client ID 5278 * @clp: state data structure 5279 * @program: RPC program for NFSv4 callback service 5280 * @port: IP port number for NFS4 callback service 5281 * @cred: RPC credential to use for this call 5282 * @res: where to place the result 5283 * 5284 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5285 */ 5286 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 5287 unsigned short port, struct rpc_cred *cred, 5288 struct nfs4_setclientid_res *res) 5289 { 5290 nfs4_verifier sc_verifier; 5291 struct nfs4_setclientid setclientid = { 5292 .sc_verifier = &sc_verifier, 5293 .sc_prog = program, 5294 .sc_clnt = clp, 5295 }; 5296 struct rpc_message msg = { 5297 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 5298 .rpc_argp = &setclientid, 5299 .rpc_resp = res, 5300 .rpc_cred = cred, 5301 }; 5302 struct rpc_task *task; 5303 struct rpc_task_setup task_setup_data = { 5304 .rpc_client = clp->cl_rpcclient, 5305 .rpc_message = &msg, 5306 .callback_ops = &nfs4_setclientid_ops, 5307 .callback_data = &setclientid, 5308 .flags = RPC_TASK_TIMEOUT, 5309 }; 5310 int status; 5311 5312 /* nfs_client_id4 */ 5313 nfs4_init_boot_verifier(clp, &sc_verifier); 5314 5315 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 5316 status = nfs4_init_uniform_client_string(clp); 5317 else 5318 status = nfs4_init_nonuniform_client_string(clp); 5319 5320 if (status) 5321 goto out; 5322 5323 /* cb_client4 */ 5324 setclientid.sc_netid_len = 5325 nfs4_init_callback_netid(clp, 5326 setclientid.sc_netid, 5327 sizeof(setclientid.sc_netid)); 5328 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 5329 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 5330 clp->cl_ipaddr, port >> 8, port & 255); 5331 5332 dprintk("NFS call setclientid auth=%s, '%s'\n", 5333 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5334 clp->cl_owner_id); 5335 task = rpc_run_task(&task_setup_data); 5336 if (IS_ERR(task)) { 5337 status = PTR_ERR(task); 5338 goto out; 5339 } 5340 status = task->tk_status; 5341 if (setclientid.sc_cred) { 5342 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 5343 put_rpccred(setclientid.sc_cred); 5344 } 5345 rpc_put_task(task); 5346 out: 5347 trace_nfs4_setclientid(clp, status); 5348 dprintk("NFS reply setclientid: %d\n", status); 5349 return status; 5350 } 5351 5352 /** 5353 * nfs4_proc_setclientid_confirm - Confirm client ID 5354 * @clp: state data structure 5355 * @res: result of a previous SETCLIENTID 5356 * @cred: RPC credential to use for this call 5357 * 5358 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5359 */ 5360 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 5361 struct nfs4_setclientid_res *arg, 5362 struct rpc_cred *cred) 5363 { 5364 struct rpc_message msg = { 5365 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 5366 .rpc_argp = arg, 5367 .rpc_cred = cred, 5368 }; 5369 int status; 5370 5371 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 5372 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5373 clp->cl_clientid); 5374 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5375 trace_nfs4_setclientid_confirm(clp, status); 5376 dprintk("NFS reply setclientid_confirm: %d\n", status); 5377 return status; 5378 } 5379 5380 struct nfs4_delegreturndata { 5381 struct nfs4_delegreturnargs args; 5382 struct nfs4_delegreturnres res; 5383 struct nfs_fh fh; 5384 nfs4_stateid stateid; 5385 unsigned long timestamp; 5386 struct nfs_fattr fattr; 5387 int rpc_status; 5388 struct inode *inode; 5389 bool roc; 5390 u32 roc_barrier; 5391 }; 5392 5393 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 5394 { 5395 struct nfs4_delegreturndata *data = calldata; 5396 5397 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5398 return; 5399 5400 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 5401 switch (task->tk_status) { 5402 case 0: 5403 renew_lease(data->res.server, data->timestamp); 5404 case -NFS4ERR_ADMIN_REVOKED: 5405 case -NFS4ERR_DELEG_REVOKED: 5406 case -NFS4ERR_BAD_STATEID: 5407 case -NFS4ERR_OLD_STATEID: 5408 case -NFS4ERR_STALE_STATEID: 5409 case -NFS4ERR_EXPIRED: 5410 task->tk_status = 0; 5411 if (data->roc) 5412 pnfs_roc_set_barrier(data->inode, data->roc_barrier); 5413 break; 5414 default: 5415 if (nfs4_async_handle_error(task, data->res.server, 5416 NULL, NULL) == -EAGAIN) { 5417 rpc_restart_call_prepare(task); 5418 return; 5419 } 5420 } 5421 data->rpc_status = task->tk_status; 5422 } 5423 5424 static void nfs4_delegreturn_release(void *calldata) 5425 { 5426 struct nfs4_delegreturndata *data = calldata; 5427 struct inode *inode = data->inode; 5428 5429 if (inode) { 5430 if (data->roc) 5431 pnfs_roc_release(inode); 5432 nfs_iput_and_deactive(inode); 5433 } 5434 kfree(calldata); 5435 } 5436 5437 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 5438 { 5439 struct nfs4_delegreturndata *d_data; 5440 5441 d_data = (struct nfs4_delegreturndata *)data; 5442 5443 if (nfs4_wait_on_layoutreturn(d_data->inode, task)) 5444 return; 5445 5446 if (d_data->roc) 5447 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier); 5448 5449 nfs4_setup_sequence(d_data->res.server, 5450 &d_data->args.seq_args, 5451 &d_data->res.seq_res, 5452 task); 5453 } 5454 5455 static const struct rpc_call_ops nfs4_delegreturn_ops = { 5456 .rpc_call_prepare = nfs4_delegreturn_prepare, 5457 .rpc_call_done = nfs4_delegreturn_done, 5458 .rpc_release = nfs4_delegreturn_release, 5459 }; 5460 5461 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5462 { 5463 struct nfs4_delegreturndata *data; 5464 struct nfs_server *server = NFS_SERVER(inode); 5465 struct rpc_task *task; 5466 struct rpc_message msg = { 5467 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 5468 .rpc_cred = cred, 5469 }; 5470 struct rpc_task_setup task_setup_data = { 5471 .rpc_client = server->client, 5472 .rpc_message = &msg, 5473 .callback_ops = &nfs4_delegreturn_ops, 5474 .flags = RPC_TASK_ASYNC, 5475 }; 5476 int status = 0; 5477 5478 data = kzalloc(sizeof(*data), GFP_NOFS); 5479 if (data == NULL) 5480 return -ENOMEM; 5481 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 5482 5483 nfs4_state_protect(server->nfs_client, 5484 NFS_SP4_MACH_CRED_CLEANUP, 5485 &task_setup_data.rpc_client, &msg); 5486 5487 data->args.fhandle = &data->fh; 5488 data->args.stateid = &data->stateid; 5489 data->args.bitmask = server->cache_consistency_bitmask; 5490 nfs_copy_fh(&data->fh, NFS_FH(inode)); 5491 nfs4_stateid_copy(&data->stateid, stateid); 5492 data->res.fattr = &data->fattr; 5493 data->res.server = server; 5494 nfs_fattr_init(data->res.fattr); 5495 data->timestamp = jiffies; 5496 data->rpc_status = 0; 5497 data->inode = nfs_igrab_and_active(inode); 5498 if (data->inode) 5499 data->roc = nfs4_roc(inode); 5500 5501 task_setup_data.callback_data = data; 5502 msg.rpc_argp = &data->args; 5503 msg.rpc_resp = &data->res; 5504 task = rpc_run_task(&task_setup_data); 5505 if (IS_ERR(task)) 5506 return PTR_ERR(task); 5507 if (!issync) 5508 goto out; 5509 status = nfs4_wait_for_completion_rpc_task(task); 5510 if (status != 0) 5511 goto out; 5512 status = data->rpc_status; 5513 if (status == 0) 5514 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 5515 else 5516 nfs_refresh_inode(inode, &data->fattr); 5517 out: 5518 rpc_put_task(task); 5519 return status; 5520 } 5521 5522 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5523 { 5524 struct nfs_server *server = NFS_SERVER(inode); 5525 struct nfs4_exception exception = { }; 5526 int err; 5527 do { 5528 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); 5529 trace_nfs4_delegreturn(inode, stateid, err); 5530 switch (err) { 5531 case -NFS4ERR_STALE_STATEID: 5532 case -NFS4ERR_EXPIRED: 5533 case 0: 5534 return 0; 5535 } 5536 err = nfs4_handle_exception(server, err, &exception); 5537 } while (exception.retry); 5538 return err; 5539 } 5540 5541 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5542 { 5543 struct inode *inode = state->inode; 5544 struct nfs_server *server = NFS_SERVER(inode); 5545 struct nfs_client *clp = server->nfs_client; 5546 struct nfs_lockt_args arg = { 5547 .fh = NFS_FH(inode), 5548 .fl = request, 5549 }; 5550 struct nfs_lockt_res res = { 5551 .denied = request, 5552 }; 5553 struct rpc_message msg = { 5554 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 5555 .rpc_argp = &arg, 5556 .rpc_resp = &res, 5557 .rpc_cred = state->owner->so_cred, 5558 }; 5559 struct nfs4_lock_state *lsp; 5560 int status; 5561 5562 arg.lock_owner.clientid = clp->cl_clientid; 5563 status = nfs4_set_lock_state(state, request); 5564 if (status != 0) 5565 goto out; 5566 lsp = request->fl_u.nfs4_fl.owner; 5567 arg.lock_owner.id = lsp->ls_seqid.owner_id; 5568 arg.lock_owner.s_dev = server->s_dev; 5569 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5570 switch (status) { 5571 case 0: 5572 request->fl_type = F_UNLCK; 5573 break; 5574 case -NFS4ERR_DENIED: 5575 status = 0; 5576 } 5577 request->fl_ops->fl_release_private(request); 5578 request->fl_ops = NULL; 5579 out: 5580 return status; 5581 } 5582 5583 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5584 { 5585 struct nfs4_exception exception = { }; 5586 int err; 5587 5588 do { 5589 err = _nfs4_proc_getlk(state, cmd, request); 5590 trace_nfs4_get_lock(request, state, cmd, err); 5591 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 5592 &exception); 5593 } while (exception.retry); 5594 return err; 5595 } 5596 5597 struct nfs4_unlockdata { 5598 struct nfs_locku_args arg; 5599 struct nfs_locku_res res; 5600 struct nfs4_lock_state *lsp; 5601 struct nfs_open_context *ctx; 5602 struct file_lock fl; 5603 struct nfs_server *server; 5604 unsigned long timestamp; 5605 }; 5606 5607 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 5608 struct nfs_open_context *ctx, 5609 struct nfs4_lock_state *lsp, 5610 struct nfs_seqid *seqid) 5611 { 5612 struct nfs4_unlockdata *p; 5613 struct inode *inode = lsp->ls_state->inode; 5614 5615 p = kzalloc(sizeof(*p), GFP_NOFS); 5616 if (p == NULL) 5617 return NULL; 5618 p->arg.fh = NFS_FH(inode); 5619 p->arg.fl = &p->fl; 5620 p->arg.seqid = seqid; 5621 p->res.seqid = seqid; 5622 p->lsp = lsp; 5623 atomic_inc(&lsp->ls_count); 5624 /* Ensure we don't close file until we're done freeing locks! */ 5625 p->ctx = get_nfs_open_context(ctx); 5626 memcpy(&p->fl, fl, sizeof(p->fl)); 5627 p->server = NFS_SERVER(inode); 5628 return p; 5629 } 5630 5631 static void nfs4_locku_release_calldata(void *data) 5632 { 5633 struct nfs4_unlockdata *calldata = data; 5634 nfs_free_seqid(calldata->arg.seqid); 5635 nfs4_put_lock_state(calldata->lsp); 5636 put_nfs_open_context(calldata->ctx); 5637 kfree(calldata); 5638 } 5639 5640 static void nfs4_locku_done(struct rpc_task *task, void *data) 5641 { 5642 struct nfs4_unlockdata *calldata = data; 5643 5644 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 5645 return; 5646 switch (task->tk_status) { 5647 case 0: 5648 renew_lease(calldata->server, calldata->timestamp); 5649 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl); 5650 if (nfs4_update_lock_stateid(calldata->lsp, 5651 &calldata->res.stateid)) 5652 break; 5653 case -NFS4ERR_BAD_STATEID: 5654 case -NFS4ERR_OLD_STATEID: 5655 case -NFS4ERR_STALE_STATEID: 5656 case -NFS4ERR_EXPIRED: 5657 if (!nfs4_stateid_match(&calldata->arg.stateid, 5658 &calldata->lsp->ls_stateid)) 5659 rpc_restart_call_prepare(task); 5660 break; 5661 default: 5662 if (nfs4_async_handle_error(task, calldata->server, 5663 NULL, NULL) == -EAGAIN) 5664 rpc_restart_call_prepare(task); 5665 } 5666 nfs_release_seqid(calldata->arg.seqid); 5667 } 5668 5669 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 5670 { 5671 struct nfs4_unlockdata *calldata = data; 5672 5673 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 5674 goto out_wait; 5675 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid); 5676 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 5677 /* Note: exit _without_ running nfs4_locku_done */ 5678 goto out_no_action; 5679 } 5680 calldata->timestamp = jiffies; 5681 if (nfs4_setup_sequence(calldata->server, 5682 &calldata->arg.seq_args, 5683 &calldata->res.seq_res, 5684 task) != 0) 5685 nfs_release_seqid(calldata->arg.seqid); 5686 return; 5687 out_no_action: 5688 task->tk_action = NULL; 5689 out_wait: 5690 nfs4_sequence_done(task, &calldata->res.seq_res); 5691 } 5692 5693 static const struct rpc_call_ops nfs4_locku_ops = { 5694 .rpc_call_prepare = nfs4_locku_prepare, 5695 .rpc_call_done = nfs4_locku_done, 5696 .rpc_release = nfs4_locku_release_calldata, 5697 }; 5698 5699 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 5700 struct nfs_open_context *ctx, 5701 struct nfs4_lock_state *lsp, 5702 struct nfs_seqid *seqid) 5703 { 5704 struct nfs4_unlockdata *data; 5705 struct rpc_message msg = { 5706 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 5707 .rpc_cred = ctx->cred, 5708 }; 5709 struct rpc_task_setup task_setup_data = { 5710 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 5711 .rpc_message = &msg, 5712 .callback_ops = &nfs4_locku_ops, 5713 .workqueue = nfsiod_workqueue, 5714 .flags = RPC_TASK_ASYNC, 5715 }; 5716 5717 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 5718 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 5719 5720 /* Ensure this is an unlock - when canceling a lock, the 5721 * canceled lock is passed in, and it won't be an unlock. 5722 */ 5723 fl->fl_type = F_UNLCK; 5724 5725 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 5726 if (data == NULL) { 5727 nfs_free_seqid(seqid); 5728 return ERR_PTR(-ENOMEM); 5729 } 5730 5731 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5732 msg.rpc_argp = &data->arg; 5733 msg.rpc_resp = &data->res; 5734 task_setup_data.callback_data = data; 5735 return rpc_run_task(&task_setup_data); 5736 } 5737 5738 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 5739 { 5740 struct inode *inode = state->inode; 5741 struct nfs4_state_owner *sp = state->owner; 5742 struct nfs_inode *nfsi = NFS_I(inode); 5743 struct nfs_seqid *seqid; 5744 struct nfs4_lock_state *lsp; 5745 struct rpc_task *task; 5746 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5747 int status = 0; 5748 unsigned char fl_flags = request->fl_flags; 5749 5750 status = nfs4_set_lock_state(state, request); 5751 /* Unlock _before_ we do the RPC call */ 5752 request->fl_flags |= FL_EXISTS; 5753 /* Exclude nfs_delegation_claim_locks() */ 5754 mutex_lock(&sp->so_delegreturn_mutex); 5755 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 5756 down_read(&nfsi->rwsem); 5757 if (locks_lock_inode_wait(inode, request) == -ENOENT) { 5758 up_read(&nfsi->rwsem); 5759 mutex_unlock(&sp->so_delegreturn_mutex); 5760 goto out; 5761 } 5762 up_read(&nfsi->rwsem); 5763 mutex_unlock(&sp->so_delegreturn_mutex); 5764 if (status != 0) 5765 goto out; 5766 /* Is this a delegated lock? */ 5767 lsp = request->fl_u.nfs4_fl.owner; 5768 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 5769 goto out; 5770 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 5771 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 5772 status = -ENOMEM; 5773 if (IS_ERR(seqid)) 5774 goto out; 5775 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 5776 status = PTR_ERR(task); 5777 if (IS_ERR(task)) 5778 goto out; 5779 status = nfs4_wait_for_completion_rpc_task(task); 5780 rpc_put_task(task); 5781 out: 5782 request->fl_flags = fl_flags; 5783 trace_nfs4_unlock(request, state, F_SETLK, status); 5784 return status; 5785 } 5786 5787 struct nfs4_lockdata { 5788 struct nfs_lock_args arg; 5789 struct nfs_lock_res res; 5790 struct nfs4_lock_state *lsp; 5791 struct nfs_open_context *ctx; 5792 struct file_lock fl; 5793 unsigned long timestamp; 5794 int rpc_status; 5795 int cancelled; 5796 struct nfs_server *server; 5797 }; 5798 5799 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 5800 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 5801 gfp_t gfp_mask) 5802 { 5803 struct nfs4_lockdata *p; 5804 struct inode *inode = lsp->ls_state->inode; 5805 struct nfs_server *server = NFS_SERVER(inode); 5806 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5807 5808 p = kzalloc(sizeof(*p), gfp_mask); 5809 if (p == NULL) 5810 return NULL; 5811 5812 p->arg.fh = NFS_FH(inode); 5813 p->arg.fl = &p->fl; 5814 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 5815 if (IS_ERR(p->arg.open_seqid)) 5816 goto out_free; 5817 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 5818 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 5819 if (IS_ERR(p->arg.lock_seqid)) 5820 goto out_free_seqid; 5821 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 5822 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 5823 p->arg.lock_owner.s_dev = server->s_dev; 5824 p->res.lock_seqid = p->arg.lock_seqid; 5825 p->lsp = lsp; 5826 p->server = server; 5827 atomic_inc(&lsp->ls_count); 5828 p->ctx = get_nfs_open_context(ctx); 5829 get_file(fl->fl_file); 5830 memcpy(&p->fl, fl, sizeof(p->fl)); 5831 return p; 5832 out_free_seqid: 5833 nfs_free_seqid(p->arg.open_seqid); 5834 out_free: 5835 kfree(p); 5836 return NULL; 5837 } 5838 5839 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 5840 { 5841 struct nfs4_lockdata *data = calldata; 5842 struct nfs4_state *state = data->lsp->ls_state; 5843 5844 dprintk("%s: begin!\n", __func__); 5845 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 5846 goto out_wait; 5847 /* Do we need to do an open_to_lock_owner? */ 5848 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 5849 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 5850 goto out_release_lock_seqid; 5851 } 5852 nfs4_stateid_copy(&data->arg.open_stateid, 5853 &state->open_stateid); 5854 data->arg.new_lock_owner = 1; 5855 data->res.open_seqid = data->arg.open_seqid; 5856 } else { 5857 data->arg.new_lock_owner = 0; 5858 nfs4_stateid_copy(&data->arg.lock_stateid, 5859 &data->lsp->ls_stateid); 5860 } 5861 if (!nfs4_valid_open_stateid(state)) { 5862 data->rpc_status = -EBADF; 5863 task->tk_action = NULL; 5864 goto out_release_open_seqid; 5865 } 5866 data->timestamp = jiffies; 5867 if (nfs4_setup_sequence(data->server, 5868 &data->arg.seq_args, 5869 &data->res.seq_res, 5870 task) == 0) 5871 return; 5872 out_release_open_seqid: 5873 nfs_release_seqid(data->arg.open_seqid); 5874 out_release_lock_seqid: 5875 nfs_release_seqid(data->arg.lock_seqid); 5876 out_wait: 5877 nfs4_sequence_done(task, &data->res.seq_res); 5878 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); 5879 } 5880 5881 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 5882 { 5883 struct nfs4_lockdata *data = calldata; 5884 struct nfs4_lock_state *lsp = data->lsp; 5885 5886 dprintk("%s: begin!\n", __func__); 5887 5888 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5889 return; 5890 5891 data->rpc_status = task->tk_status; 5892 switch (task->tk_status) { 5893 case 0: 5894 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 5895 data->timestamp); 5896 if (data->arg.new_lock) { 5897 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); 5898 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) { 5899 rpc_restart_call_prepare(task); 5900 break; 5901 } 5902 } 5903 if (data->arg.new_lock_owner != 0) { 5904 nfs_confirm_seqid(&lsp->ls_seqid, 0); 5905 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 5906 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 5907 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 5908 rpc_restart_call_prepare(task); 5909 break; 5910 case -NFS4ERR_BAD_STATEID: 5911 case -NFS4ERR_OLD_STATEID: 5912 case -NFS4ERR_STALE_STATEID: 5913 case -NFS4ERR_EXPIRED: 5914 if (data->arg.new_lock_owner != 0) { 5915 if (!nfs4_stateid_match(&data->arg.open_stateid, 5916 &lsp->ls_state->open_stateid)) 5917 rpc_restart_call_prepare(task); 5918 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 5919 &lsp->ls_stateid)) 5920 rpc_restart_call_prepare(task); 5921 } 5922 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 5923 } 5924 5925 static void nfs4_lock_release(void *calldata) 5926 { 5927 struct nfs4_lockdata *data = calldata; 5928 5929 dprintk("%s: begin!\n", __func__); 5930 nfs_free_seqid(data->arg.open_seqid); 5931 if (data->cancelled != 0) { 5932 struct rpc_task *task; 5933 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 5934 data->arg.lock_seqid); 5935 if (!IS_ERR(task)) 5936 rpc_put_task_async(task); 5937 dprintk("%s: cancelling lock!\n", __func__); 5938 } else 5939 nfs_free_seqid(data->arg.lock_seqid); 5940 nfs4_put_lock_state(data->lsp); 5941 put_nfs_open_context(data->ctx); 5942 fput(data->fl.fl_file); 5943 kfree(data); 5944 dprintk("%s: done!\n", __func__); 5945 } 5946 5947 static const struct rpc_call_ops nfs4_lock_ops = { 5948 .rpc_call_prepare = nfs4_lock_prepare, 5949 .rpc_call_done = nfs4_lock_done, 5950 .rpc_release = nfs4_lock_release, 5951 }; 5952 5953 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 5954 { 5955 switch (error) { 5956 case -NFS4ERR_ADMIN_REVOKED: 5957 case -NFS4ERR_BAD_STATEID: 5958 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5959 if (new_lock_owner != 0 || 5960 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 5961 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 5962 break; 5963 case -NFS4ERR_STALE_STATEID: 5964 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5965 case -NFS4ERR_EXPIRED: 5966 nfs4_schedule_lease_recovery(server->nfs_client); 5967 }; 5968 } 5969 5970 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 5971 { 5972 struct nfs4_lockdata *data; 5973 struct rpc_task *task; 5974 struct rpc_message msg = { 5975 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 5976 .rpc_cred = state->owner->so_cred, 5977 }; 5978 struct rpc_task_setup task_setup_data = { 5979 .rpc_client = NFS_CLIENT(state->inode), 5980 .rpc_message = &msg, 5981 .callback_ops = &nfs4_lock_ops, 5982 .workqueue = nfsiod_workqueue, 5983 .flags = RPC_TASK_ASYNC, 5984 }; 5985 int ret; 5986 5987 dprintk("%s: begin!\n", __func__); 5988 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 5989 fl->fl_u.nfs4_fl.owner, 5990 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); 5991 if (data == NULL) 5992 return -ENOMEM; 5993 if (IS_SETLKW(cmd)) 5994 data->arg.block = 1; 5995 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5996 msg.rpc_argp = &data->arg; 5997 msg.rpc_resp = &data->res; 5998 task_setup_data.callback_data = data; 5999 if (recovery_type > NFS_LOCK_NEW) { 6000 if (recovery_type == NFS_LOCK_RECLAIM) 6001 data->arg.reclaim = NFS_LOCK_RECLAIM; 6002 nfs4_set_sequence_privileged(&data->arg.seq_args); 6003 } else 6004 data->arg.new_lock = 1; 6005 task = rpc_run_task(&task_setup_data); 6006 if (IS_ERR(task)) 6007 return PTR_ERR(task); 6008 ret = nfs4_wait_for_completion_rpc_task(task); 6009 if (ret == 0) { 6010 ret = data->rpc_status; 6011 if (ret) 6012 nfs4_handle_setlk_error(data->server, data->lsp, 6013 data->arg.new_lock_owner, ret); 6014 } else 6015 data->cancelled = 1; 6016 rpc_put_task(task); 6017 dprintk("%s: done, ret = %d!\n", __func__, ret); 6018 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret); 6019 return ret; 6020 } 6021 6022 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 6023 { 6024 struct nfs_server *server = NFS_SERVER(state->inode); 6025 struct nfs4_exception exception = { 6026 .inode = state->inode, 6027 }; 6028 int err; 6029 6030 do { 6031 /* Cache the lock if possible... */ 6032 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 6033 return 0; 6034 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 6035 if (err != -NFS4ERR_DELAY) 6036 break; 6037 nfs4_handle_exception(server, err, &exception); 6038 } while (exception.retry); 6039 return err; 6040 } 6041 6042 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 6043 { 6044 struct nfs_server *server = NFS_SERVER(state->inode); 6045 struct nfs4_exception exception = { 6046 .inode = state->inode, 6047 }; 6048 int err; 6049 6050 err = nfs4_set_lock_state(state, request); 6051 if (err != 0) 6052 return err; 6053 if (!recover_lost_locks) { 6054 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 6055 return 0; 6056 } 6057 do { 6058 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 6059 return 0; 6060 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 6061 switch (err) { 6062 default: 6063 goto out; 6064 case -NFS4ERR_GRACE: 6065 case -NFS4ERR_DELAY: 6066 nfs4_handle_exception(server, err, &exception); 6067 err = 0; 6068 } 6069 } while (exception.retry); 6070 out: 6071 return err; 6072 } 6073 6074 #if defined(CONFIG_NFS_V4_1) 6075 /** 6076 * nfs41_check_expired_locks - possibly free a lock stateid 6077 * 6078 * @state: NFSv4 state for an inode 6079 * 6080 * Returns NFS_OK if recovery for this stateid is now finished. 6081 * Otherwise a negative NFS4ERR value is returned. 6082 */ 6083 static int nfs41_check_expired_locks(struct nfs4_state *state) 6084 { 6085 int status, ret = -NFS4ERR_BAD_STATEID; 6086 struct nfs4_lock_state *lsp; 6087 struct nfs_server *server = NFS_SERVER(state->inode); 6088 6089 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 6090 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 6091 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 6092 6093 status = nfs41_test_stateid(server, 6094 &lsp->ls_stateid, 6095 cred); 6096 trace_nfs4_test_lock_stateid(state, lsp, status); 6097 if (status != NFS_OK) { 6098 /* Free the stateid unless the server 6099 * informs us the stateid is unrecognized. */ 6100 if (status != -NFS4ERR_BAD_STATEID) 6101 nfs41_free_stateid(server, 6102 &lsp->ls_stateid, 6103 cred); 6104 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 6105 ret = status; 6106 } 6107 } 6108 }; 6109 6110 return ret; 6111 } 6112 6113 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 6114 { 6115 int status = NFS_OK; 6116 6117 if (test_bit(LK_STATE_IN_USE, &state->flags)) 6118 status = nfs41_check_expired_locks(state); 6119 if (status != NFS_OK) 6120 status = nfs4_lock_expired(state, request); 6121 return status; 6122 } 6123 #endif 6124 6125 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6126 { 6127 struct nfs_inode *nfsi = NFS_I(state->inode); 6128 struct nfs4_state_owner *sp = state->owner; 6129 unsigned char fl_flags = request->fl_flags; 6130 int status; 6131 6132 request->fl_flags |= FL_ACCESS; 6133 status = locks_lock_inode_wait(state->inode, request); 6134 if (status < 0) 6135 goto out; 6136 mutex_lock(&sp->so_delegreturn_mutex); 6137 down_read(&nfsi->rwsem); 6138 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 6139 /* Yes: cache locks! */ 6140 /* ...but avoid races with delegation recall... */ 6141 request->fl_flags = fl_flags & ~FL_SLEEP; 6142 status = locks_lock_inode_wait(state->inode, request); 6143 up_read(&nfsi->rwsem); 6144 mutex_unlock(&sp->so_delegreturn_mutex); 6145 goto out; 6146 } 6147 up_read(&nfsi->rwsem); 6148 mutex_unlock(&sp->so_delegreturn_mutex); 6149 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 6150 out: 6151 request->fl_flags = fl_flags; 6152 return status; 6153 } 6154 6155 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6156 { 6157 struct nfs4_exception exception = { 6158 .state = state, 6159 .inode = state->inode, 6160 }; 6161 int err; 6162 6163 do { 6164 err = _nfs4_proc_setlk(state, cmd, request); 6165 if (err == -NFS4ERR_DENIED) 6166 err = -EAGAIN; 6167 err = nfs4_handle_exception(NFS_SERVER(state->inode), 6168 err, &exception); 6169 } while (exception.retry); 6170 return err; 6171 } 6172 6173 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 6174 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 6175 6176 static int 6177 nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd, 6178 struct file_lock *request) 6179 { 6180 int status = -ERESTARTSYS; 6181 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 6182 6183 while(!signalled()) { 6184 status = nfs4_proc_setlk(state, cmd, request); 6185 if ((status != -EAGAIN) || IS_SETLK(cmd)) 6186 break; 6187 freezable_schedule_timeout_interruptible(timeout); 6188 timeout *= 2; 6189 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout); 6190 status = -ERESTARTSYS; 6191 } 6192 return status; 6193 } 6194 6195 #ifdef CONFIG_NFS_V4_1 6196 struct nfs4_lock_waiter { 6197 struct task_struct *task; 6198 struct inode *inode; 6199 struct nfs_lowner *owner; 6200 bool notified; 6201 }; 6202 6203 static int 6204 nfs4_wake_lock_waiter(wait_queue_t *wait, unsigned int mode, int flags, void *key) 6205 { 6206 int ret; 6207 struct cb_notify_lock_args *cbnl = key; 6208 struct nfs4_lock_waiter *waiter = wait->private; 6209 struct nfs_lowner *lowner = &cbnl->cbnl_owner, 6210 *wowner = waiter->owner; 6211 6212 /* Only wake if the callback was for the same owner */ 6213 if (lowner->clientid != wowner->clientid || 6214 lowner->id != wowner->id || 6215 lowner->s_dev != wowner->s_dev) 6216 return 0; 6217 6218 /* Make sure it's for the right inode */ 6219 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh)) 6220 return 0; 6221 6222 waiter->notified = true; 6223 6224 /* override "private" so we can use default_wake_function */ 6225 wait->private = waiter->task; 6226 ret = autoremove_wake_function(wait, mode, flags, key); 6227 wait->private = waiter; 6228 return ret; 6229 } 6230 6231 static int 6232 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6233 { 6234 int status = -ERESTARTSYS; 6235 unsigned long flags; 6236 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; 6237 struct nfs_server *server = NFS_SERVER(state->inode); 6238 struct nfs_client *clp = server->nfs_client; 6239 wait_queue_head_t *q = &clp->cl_lock_waitq; 6240 struct nfs_lowner owner = { .clientid = clp->cl_clientid, 6241 .id = lsp->ls_seqid.owner_id, 6242 .s_dev = server->s_dev }; 6243 struct nfs4_lock_waiter waiter = { .task = current, 6244 .inode = state->inode, 6245 .owner = &owner, 6246 .notified = false }; 6247 wait_queue_t wait; 6248 6249 /* Don't bother with waitqueue if we don't expect a callback */ 6250 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags)) 6251 return nfs4_retry_setlk_simple(state, cmd, request); 6252 6253 init_wait(&wait); 6254 wait.private = &waiter; 6255 wait.func = nfs4_wake_lock_waiter; 6256 add_wait_queue(q, &wait); 6257 6258 while(!signalled()) { 6259 status = nfs4_proc_setlk(state, cmd, request); 6260 if ((status != -EAGAIN) || IS_SETLK(cmd)) 6261 break; 6262 6263 status = -ERESTARTSYS; 6264 spin_lock_irqsave(&q->lock, flags); 6265 if (waiter.notified) { 6266 spin_unlock_irqrestore(&q->lock, flags); 6267 continue; 6268 } 6269 set_current_state(TASK_INTERRUPTIBLE); 6270 spin_unlock_irqrestore(&q->lock, flags); 6271 6272 freezable_schedule_timeout_interruptible(NFS4_LOCK_MAXTIMEOUT); 6273 } 6274 6275 finish_wait(q, &wait); 6276 return status; 6277 } 6278 #else /* !CONFIG_NFS_V4_1 */ 6279 static inline int 6280 nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6281 { 6282 return nfs4_retry_setlk_simple(state, cmd, request); 6283 } 6284 #endif 6285 6286 static int 6287 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 6288 { 6289 struct nfs_open_context *ctx; 6290 struct nfs4_state *state; 6291 int status; 6292 6293 /* verify open state */ 6294 ctx = nfs_file_open_context(filp); 6295 state = ctx->state; 6296 6297 if (request->fl_start < 0 || request->fl_end < 0) 6298 return -EINVAL; 6299 6300 if (IS_GETLK(cmd)) { 6301 if (state != NULL) 6302 return nfs4_proc_getlk(state, F_GETLK, request); 6303 return 0; 6304 } 6305 6306 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 6307 return -EINVAL; 6308 6309 if (request->fl_type == F_UNLCK) { 6310 if (state != NULL) 6311 return nfs4_proc_unlck(state, cmd, request); 6312 return 0; 6313 } 6314 6315 if (state == NULL) 6316 return -ENOLCK; 6317 6318 if ((request->fl_flags & FL_POSIX) && 6319 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 6320 return -ENOLCK; 6321 6322 /* 6323 * Don't rely on the VFS having checked the file open mode, 6324 * since it won't do this for flock() locks. 6325 */ 6326 switch (request->fl_type) { 6327 case F_RDLCK: 6328 if (!(filp->f_mode & FMODE_READ)) 6329 return -EBADF; 6330 break; 6331 case F_WRLCK: 6332 if (!(filp->f_mode & FMODE_WRITE)) 6333 return -EBADF; 6334 } 6335 6336 status = nfs4_set_lock_state(state, request); 6337 if (status != 0) 6338 return status; 6339 6340 return nfs4_retry_setlk(state, cmd, request); 6341 } 6342 6343 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 6344 { 6345 struct nfs_server *server = NFS_SERVER(state->inode); 6346 int err; 6347 6348 err = nfs4_set_lock_state(state, fl); 6349 if (err != 0) 6350 return err; 6351 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 6352 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 6353 } 6354 6355 struct nfs_release_lockowner_data { 6356 struct nfs4_lock_state *lsp; 6357 struct nfs_server *server; 6358 struct nfs_release_lockowner_args args; 6359 struct nfs_release_lockowner_res res; 6360 unsigned long timestamp; 6361 }; 6362 6363 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 6364 { 6365 struct nfs_release_lockowner_data *data = calldata; 6366 struct nfs_server *server = data->server; 6367 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 6368 &data->args.seq_args, &data->res.seq_res, task); 6369 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6370 data->timestamp = jiffies; 6371 } 6372 6373 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 6374 { 6375 struct nfs_release_lockowner_data *data = calldata; 6376 struct nfs_server *server = data->server; 6377 6378 nfs40_sequence_done(task, &data->res.seq_res); 6379 6380 switch (task->tk_status) { 6381 case 0: 6382 renew_lease(server, data->timestamp); 6383 break; 6384 case -NFS4ERR_STALE_CLIENTID: 6385 case -NFS4ERR_EXPIRED: 6386 nfs4_schedule_lease_recovery(server->nfs_client); 6387 break; 6388 case -NFS4ERR_LEASE_MOVED: 6389 case -NFS4ERR_DELAY: 6390 if (nfs4_async_handle_error(task, server, 6391 NULL, NULL) == -EAGAIN) 6392 rpc_restart_call_prepare(task); 6393 } 6394 } 6395 6396 static void nfs4_release_lockowner_release(void *calldata) 6397 { 6398 struct nfs_release_lockowner_data *data = calldata; 6399 nfs4_free_lock_state(data->server, data->lsp); 6400 kfree(calldata); 6401 } 6402 6403 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 6404 .rpc_call_prepare = nfs4_release_lockowner_prepare, 6405 .rpc_call_done = nfs4_release_lockowner_done, 6406 .rpc_release = nfs4_release_lockowner_release, 6407 }; 6408 6409 static void 6410 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 6411 { 6412 struct nfs_release_lockowner_data *data; 6413 struct rpc_message msg = { 6414 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 6415 }; 6416 6417 if (server->nfs_client->cl_mvops->minor_version != 0) 6418 return; 6419 6420 data = kmalloc(sizeof(*data), GFP_NOFS); 6421 if (!data) 6422 return; 6423 data->lsp = lsp; 6424 data->server = server; 6425 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6426 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 6427 data->args.lock_owner.s_dev = server->s_dev; 6428 6429 msg.rpc_argp = &data->args; 6430 msg.rpc_resp = &data->res; 6431 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 6432 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 6433 } 6434 6435 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 6436 6437 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, 6438 struct dentry *unused, struct inode *inode, 6439 const char *key, const void *buf, 6440 size_t buflen, int flags) 6441 { 6442 return nfs4_proc_set_acl(inode, buf, buflen); 6443 } 6444 6445 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, 6446 struct dentry *unused, struct inode *inode, 6447 const char *key, void *buf, size_t buflen) 6448 { 6449 return nfs4_proc_get_acl(inode, buf, buflen); 6450 } 6451 6452 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) 6453 { 6454 return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry))); 6455 } 6456 6457 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6458 6459 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, 6460 struct dentry *unused, struct inode *inode, 6461 const char *key, const void *buf, 6462 size_t buflen, int flags) 6463 { 6464 if (security_ismaclabel(key)) 6465 return nfs4_set_security_label(inode, buf, buflen); 6466 6467 return -EOPNOTSUPP; 6468 } 6469 6470 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, 6471 struct dentry *unused, struct inode *inode, 6472 const char *key, void *buf, size_t buflen) 6473 { 6474 if (security_ismaclabel(key)) 6475 return nfs4_get_security_label(inode, buf, buflen); 6476 return -EOPNOTSUPP; 6477 } 6478 6479 static ssize_t 6480 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 6481 { 6482 int len = 0; 6483 6484 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) { 6485 len = security_inode_listsecurity(inode, list, list_len); 6486 if (list_len && len > list_len) 6487 return -ERANGE; 6488 } 6489 return len; 6490 } 6491 6492 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 6493 .prefix = XATTR_SECURITY_PREFIX, 6494 .get = nfs4_xattr_get_nfs4_label, 6495 .set = nfs4_xattr_set_nfs4_label, 6496 }; 6497 6498 #else 6499 6500 static ssize_t 6501 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) 6502 { 6503 return 0; 6504 } 6505 6506 #endif 6507 6508 /* 6509 * nfs_fhget will use either the mounted_on_fileid or the fileid 6510 */ 6511 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 6512 { 6513 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 6514 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 6515 (fattr->valid & NFS_ATTR_FATTR_FSID) && 6516 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 6517 return; 6518 6519 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 6520 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 6521 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 6522 fattr->nlink = 2; 6523 } 6524 6525 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6526 const struct qstr *name, 6527 struct nfs4_fs_locations *fs_locations, 6528 struct page *page) 6529 { 6530 struct nfs_server *server = NFS_SERVER(dir); 6531 u32 bitmask[3] = { 6532 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6533 }; 6534 struct nfs4_fs_locations_arg args = { 6535 .dir_fh = NFS_FH(dir), 6536 .name = name, 6537 .page = page, 6538 .bitmask = bitmask, 6539 }; 6540 struct nfs4_fs_locations_res res = { 6541 .fs_locations = fs_locations, 6542 }; 6543 struct rpc_message msg = { 6544 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6545 .rpc_argp = &args, 6546 .rpc_resp = &res, 6547 }; 6548 int status; 6549 6550 dprintk("%s: start\n", __func__); 6551 6552 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 6553 * is not supported */ 6554 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 6555 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; 6556 else 6557 bitmask[0] |= FATTR4_WORD0_FILEID; 6558 6559 nfs_fattr_init(&fs_locations->fattr); 6560 fs_locations->server = server; 6561 fs_locations->nlocations = 0; 6562 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 6563 dprintk("%s: returned status = %d\n", __func__, status); 6564 return status; 6565 } 6566 6567 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6568 const struct qstr *name, 6569 struct nfs4_fs_locations *fs_locations, 6570 struct page *page) 6571 { 6572 struct nfs4_exception exception = { }; 6573 int err; 6574 do { 6575 err = _nfs4_proc_fs_locations(client, dir, name, 6576 fs_locations, page); 6577 trace_nfs4_get_fs_locations(dir, name, err); 6578 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6579 &exception); 6580 } while (exception.retry); 6581 return err; 6582 } 6583 6584 /* 6585 * This operation also signals the server that this client is 6586 * performing migration recovery. The server can stop returning 6587 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 6588 * appended to this compound to identify the client ID which is 6589 * performing recovery. 6590 */ 6591 static int _nfs40_proc_get_locations(struct inode *inode, 6592 struct nfs4_fs_locations *locations, 6593 struct page *page, struct rpc_cred *cred) 6594 { 6595 struct nfs_server *server = NFS_SERVER(inode); 6596 struct rpc_clnt *clnt = server->client; 6597 u32 bitmask[2] = { 6598 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6599 }; 6600 struct nfs4_fs_locations_arg args = { 6601 .clientid = server->nfs_client->cl_clientid, 6602 .fh = NFS_FH(inode), 6603 .page = page, 6604 .bitmask = bitmask, 6605 .migration = 1, /* skip LOOKUP */ 6606 .renew = 1, /* append RENEW */ 6607 }; 6608 struct nfs4_fs_locations_res res = { 6609 .fs_locations = locations, 6610 .migration = 1, 6611 .renew = 1, 6612 }; 6613 struct rpc_message msg = { 6614 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6615 .rpc_argp = &args, 6616 .rpc_resp = &res, 6617 .rpc_cred = cred, 6618 }; 6619 unsigned long now = jiffies; 6620 int status; 6621 6622 nfs_fattr_init(&locations->fattr); 6623 locations->server = server; 6624 locations->nlocations = 0; 6625 6626 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6627 nfs4_set_sequence_privileged(&args.seq_args); 6628 status = nfs4_call_sync_sequence(clnt, server, &msg, 6629 &args.seq_args, &res.seq_res); 6630 if (status) 6631 return status; 6632 6633 renew_lease(server, now); 6634 return 0; 6635 } 6636 6637 #ifdef CONFIG_NFS_V4_1 6638 6639 /* 6640 * This operation also signals the server that this client is 6641 * performing migration recovery. The server can stop asserting 6642 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 6643 * performing this operation is identified in the SEQUENCE 6644 * operation in this compound. 6645 * 6646 * When the client supports GETATTR(fs_locations_info), it can 6647 * be plumbed in here. 6648 */ 6649 static int _nfs41_proc_get_locations(struct inode *inode, 6650 struct nfs4_fs_locations *locations, 6651 struct page *page, struct rpc_cred *cred) 6652 { 6653 struct nfs_server *server = NFS_SERVER(inode); 6654 struct rpc_clnt *clnt = server->client; 6655 u32 bitmask[2] = { 6656 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6657 }; 6658 struct nfs4_fs_locations_arg args = { 6659 .fh = NFS_FH(inode), 6660 .page = page, 6661 .bitmask = bitmask, 6662 .migration = 1, /* skip LOOKUP */ 6663 }; 6664 struct nfs4_fs_locations_res res = { 6665 .fs_locations = locations, 6666 .migration = 1, 6667 }; 6668 struct rpc_message msg = { 6669 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6670 .rpc_argp = &args, 6671 .rpc_resp = &res, 6672 .rpc_cred = cred, 6673 }; 6674 int status; 6675 6676 nfs_fattr_init(&locations->fattr); 6677 locations->server = server; 6678 locations->nlocations = 0; 6679 6680 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6681 nfs4_set_sequence_privileged(&args.seq_args); 6682 status = nfs4_call_sync_sequence(clnt, server, &msg, 6683 &args.seq_args, &res.seq_res); 6684 if (status == NFS4_OK && 6685 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6686 status = -NFS4ERR_LEASE_MOVED; 6687 return status; 6688 } 6689 6690 #endif /* CONFIG_NFS_V4_1 */ 6691 6692 /** 6693 * nfs4_proc_get_locations - discover locations for a migrated FSID 6694 * @inode: inode on FSID that is migrating 6695 * @locations: result of query 6696 * @page: buffer 6697 * @cred: credential to use for this operation 6698 * 6699 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 6700 * operation failed, or a negative errno if a local error occurred. 6701 * 6702 * On success, "locations" is filled in, but if the server has 6703 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 6704 * asserted. 6705 * 6706 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 6707 * from this client that require migration recovery. 6708 */ 6709 int nfs4_proc_get_locations(struct inode *inode, 6710 struct nfs4_fs_locations *locations, 6711 struct page *page, struct rpc_cred *cred) 6712 { 6713 struct nfs_server *server = NFS_SERVER(inode); 6714 struct nfs_client *clp = server->nfs_client; 6715 const struct nfs4_mig_recovery_ops *ops = 6716 clp->cl_mvops->mig_recovery_ops; 6717 struct nfs4_exception exception = { }; 6718 int status; 6719 6720 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6721 (unsigned long long)server->fsid.major, 6722 (unsigned long long)server->fsid.minor, 6723 clp->cl_hostname); 6724 nfs_display_fhandle(NFS_FH(inode), __func__); 6725 6726 do { 6727 status = ops->get_locations(inode, locations, page, cred); 6728 if (status != -NFS4ERR_DELAY) 6729 break; 6730 nfs4_handle_exception(server, status, &exception); 6731 } while (exception.retry); 6732 return status; 6733 } 6734 6735 /* 6736 * This operation also signals the server that this client is 6737 * performing "lease moved" recovery. The server can stop 6738 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 6739 * is appended to this compound to identify the client ID which is 6740 * performing recovery. 6741 */ 6742 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6743 { 6744 struct nfs_server *server = NFS_SERVER(inode); 6745 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 6746 struct rpc_clnt *clnt = server->client; 6747 struct nfs4_fsid_present_arg args = { 6748 .fh = NFS_FH(inode), 6749 .clientid = clp->cl_clientid, 6750 .renew = 1, /* append RENEW */ 6751 }; 6752 struct nfs4_fsid_present_res res = { 6753 .renew = 1, 6754 }; 6755 struct rpc_message msg = { 6756 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6757 .rpc_argp = &args, 6758 .rpc_resp = &res, 6759 .rpc_cred = cred, 6760 }; 6761 unsigned long now = jiffies; 6762 int status; 6763 6764 res.fh = nfs_alloc_fhandle(); 6765 if (res.fh == NULL) 6766 return -ENOMEM; 6767 6768 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6769 nfs4_set_sequence_privileged(&args.seq_args); 6770 status = nfs4_call_sync_sequence(clnt, server, &msg, 6771 &args.seq_args, &res.seq_res); 6772 nfs_free_fhandle(res.fh); 6773 if (status) 6774 return status; 6775 6776 do_renew_lease(clp, now); 6777 return 0; 6778 } 6779 6780 #ifdef CONFIG_NFS_V4_1 6781 6782 /* 6783 * This operation also signals the server that this client is 6784 * performing "lease moved" recovery. The server can stop asserting 6785 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 6786 * this operation is identified in the SEQUENCE operation in this 6787 * compound. 6788 */ 6789 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6790 { 6791 struct nfs_server *server = NFS_SERVER(inode); 6792 struct rpc_clnt *clnt = server->client; 6793 struct nfs4_fsid_present_arg args = { 6794 .fh = NFS_FH(inode), 6795 }; 6796 struct nfs4_fsid_present_res res = { 6797 }; 6798 struct rpc_message msg = { 6799 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6800 .rpc_argp = &args, 6801 .rpc_resp = &res, 6802 .rpc_cred = cred, 6803 }; 6804 int status; 6805 6806 res.fh = nfs_alloc_fhandle(); 6807 if (res.fh == NULL) 6808 return -ENOMEM; 6809 6810 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6811 nfs4_set_sequence_privileged(&args.seq_args); 6812 status = nfs4_call_sync_sequence(clnt, server, &msg, 6813 &args.seq_args, &res.seq_res); 6814 nfs_free_fhandle(res.fh); 6815 if (status == NFS4_OK && 6816 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6817 status = -NFS4ERR_LEASE_MOVED; 6818 return status; 6819 } 6820 6821 #endif /* CONFIG_NFS_V4_1 */ 6822 6823 /** 6824 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 6825 * @inode: inode on FSID to check 6826 * @cred: credential to use for this operation 6827 * 6828 * Server indicates whether the FSID is present, moved, or not 6829 * recognized. This operation is necessary to clear a LEASE_MOVED 6830 * condition for this client ID. 6831 * 6832 * Returns NFS4_OK if the FSID is present on this server, 6833 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 6834 * NFS4ERR code if some error occurred on the server, or a 6835 * negative errno if a local failure occurred. 6836 */ 6837 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6838 { 6839 struct nfs_server *server = NFS_SERVER(inode); 6840 struct nfs_client *clp = server->nfs_client; 6841 const struct nfs4_mig_recovery_ops *ops = 6842 clp->cl_mvops->mig_recovery_ops; 6843 struct nfs4_exception exception = { }; 6844 int status; 6845 6846 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6847 (unsigned long long)server->fsid.major, 6848 (unsigned long long)server->fsid.minor, 6849 clp->cl_hostname); 6850 nfs_display_fhandle(NFS_FH(inode), __func__); 6851 6852 do { 6853 status = ops->fsid_present(inode, cred); 6854 if (status != -NFS4ERR_DELAY) 6855 break; 6856 nfs4_handle_exception(server, status, &exception); 6857 } while (exception.retry); 6858 return status; 6859 } 6860 6861 /** 6862 * If 'use_integrity' is true and the state managment nfs_client 6863 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 6864 * and the machine credential as per RFC3530bis and RFC5661 Security 6865 * Considerations sections. Otherwise, just use the user cred with the 6866 * filesystem's rpc_client. 6867 */ 6868 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 6869 { 6870 int status; 6871 struct nfs4_secinfo_arg args = { 6872 .dir_fh = NFS_FH(dir), 6873 .name = name, 6874 }; 6875 struct nfs4_secinfo_res res = { 6876 .flavors = flavors, 6877 }; 6878 struct rpc_message msg = { 6879 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 6880 .rpc_argp = &args, 6881 .rpc_resp = &res, 6882 }; 6883 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 6884 struct rpc_cred *cred = NULL; 6885 6886 if (use_integrity) { 6887 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient; 6888 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client); 6889 msg.rpc_cred = cred; 6890 } 6891 6892 dprintk("NFS call secinfo %s\n", name->name); 6893 6894 nfs4_state_protect(NFS_SERVER(dir)->nfs_client, 6895 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 6896 6897 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args, 6898 &res.seq_res, 0); 6899 dprintk("NFS reply secinfo: %d\n", status); 6900 6901 if (cred) 6902 put_rpccred(cred); 6903 6904 return status; 6905 } 6906 6907 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 6908 struct nfs4_secinfo_flavors *flavors) 6909 { 6910 struct nfs4_exception exception = { }; 6911 int err; 6912 do { 6913 err = -NFS4ERR_WRONGSEC; 6914 6915 /* try to use integrity protection with machine cred */ 6916 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 6917 err = _nfs4_proc_secinfo(dir, name, flavors, true); 6918 6919 /* 6920 * if unable to use integrity protection, or SECINFO with 6921 * integrity protection returns NFS4ERR_WRONGSEC (which is 6922 * disallowed by spec, but exists in deployed servers) use 6923 * the current filesystem's rpc_client and the user cred. 6924 */ 6925 if (err == -NFS4ERR_WRONGSEC) 6926 err = _nfs4_proc_secinfo(dir, name, flavors, false); 6927 6928 trace_nfs4_secinfo(dir, name, err); 6929 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6930 &exception); 6931 } while (exception.retry); 6932 return err; 6933 } 6934 6935 #ifdef CONFIG_NFS_V4_1 6936 /* 6937 * Check the exchange flags returned by the server for invalid flags, having 6938 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 6939 * DS flags set. 6940 */ 6941 static int nfs4_check_cl_exchange_flags(u32 flags) 6942 { 6943 if (flags & ~EXCHGID4_FLAG_MASK_R) 6944 goto out_inval; 6945 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 6946 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 6947 goto out_inval; 6948 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 6949 goto out_inval; 6950 return NFS_OK; 6951 out_inval: 6952 return -NFS4ERR_INVAL; 6953 } 6954 6955 static bool 6956 nfs41_same_server_scope(struct nfs41_server_scope *a, 6957 struct nfs41_server_scope *b) 6958 { 6959 if (a->server_scope_sz == b->server_scope_sz && 6960 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) 6961 return true; 6962 6963 return false; 6964 } 6965 6966 static void 6967 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) 6968 { 6969 } 6970 6971 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { 6972 .rpc_call_done = &nfs4_bind_one_conn_to_session_done, 6973 }; 6974 6975 /* 6976 * nfs4_proc_bind_one_conn_to_session() 6977 * 6978 * The 4.1 client currently uses the same TCP connection for the 6979 * fore and backchannel. 6980 */ 6981 static 6982 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt, 6983 struct rpc_xprt *xprt, 6984 struct nfs_client *clp, 6985 struct rpc_cred *cred) 6986 { 6987 int status; 6988 struct nfs41_bind_conn_to_session_args args = { 6989 .client = clp, 6990 .dir = NFS4_CDFC4_FORE_OR_BOTH, 6991 }; 6992 struct nfs41_bind_conn_to_session_res res; 6993 struct rpc_message msg = { 6994 .rpc_proc = 6995 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 6996 .rpc_argp = &args, 6997 .rpc_resp = &res, 6998 .rpc_cred = cred, 6999 }; 7000 struct rpc_task_setup task_setup_data = { 7001 .rpc_client = clnt, 7002 .rpc_xprt = xprt, 7003 .callback_ops = &nfs4_bind_one_conn_to_session_ops, 7004 .rpc_message = &msg, 7005 .flags = RPC_TASK_TIMEOUT, 7006 }; 7007 struct rpc_task *task; 7008 7009 dprintk("--> %s\n", __func__); 7010 7011 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 7012 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 7013 args.dir = NFS4_CDFC4_FORE; 7014 7015 /* Do not set the backchannel flag unless this is clnt->cl_xprt */ 7016 if (xprt != rcu_access_pointer(clnt->cl_xprt)) 7017 args.dir = NFS4_CDFC4_FORE; 7018 7019 task = rpc_run_task(&task_setup_data); 7020 if (!IS_ERR(task)) { 7021 status = task->tk_status; 7022 rpc_put_task(task); 7023 } else 7024 status = PTR_ERR(task); 7025 trace_nfs4_bind_conn_to_session(clp, status); 7026 if (status == 0) { 7027 if (memcmp(res.sessionid.data, 7028 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 7029 dprintk("NFS: %s: Session ID mismatch\n", __func__); 7030 status = -EIO; 7031 goto out; 7032 } 7033 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 7034 dprintk("NFS: %s: Unexpected direction from server\n", 7035 __func__); 7036 status = -EIO; 7037 goto out; 7038 } 7039 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 7040 dprintk("NFS: %s: Server returned RDMA mode = true\n", 7041 __func__); 7042 status = -EIO; 7043 goto out; 7044 } 7045 } 7046 out: 7047 dprintk("<-- %s status= %d\n", __func__, status); 7048 return status; 7049 } 7050 7051 struct rpc_bind_conn_calldata { 7052 struct nfs_client *clp; 7053 struct rpc_cred *cred; 7054 }; 7055 7056 static int 7057 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt, 7058 struct rpc_xprt *xprt, 7059 void *calldata) 7060 { 7061 struct rpc_bind_conn_calldata *p = calldata; 7062 7063 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred); 7064 } 7065 7066 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) 7067 { 7068 struct rpc_bind_conn_calldata data = { 7069 .clp = clp, 7070 .cred = cred, 7071 }; 7072 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient, 7073 nfs4_proc_bind_conn_to_session_callback, &data); 7074 } 7075 7076 /* 7077 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 7078 * and operations we'd like to see to enable certain features in the allow map 7079 */ 7080 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 7081 .how = SP4_MACH_CRED, 7082 .enforce.u.words = { 7083 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 7084 1 << (OP_EXCHANGE_ID - 32) | 7085 1 << (OP_CREATE_SESSION - 32) | 7086 1 << (OP_DESTROY_SESSION - 32) | 7087 1 << (OP_DESTROY_CLIENTID - 32) 7088 }, 7089 .allow.u.words = { 7090 [0] = 1 << (OP_CLOSE) | 7091 1 << (OP_OPEN_DOWNGRADE) | 7092 1 << (OP_LOCKU) | 7093 1 << (OP_DELEGRETURN) | 7094 1 << (OP_COMMIT), 7095 [1] = 1 << (OP_SECINFO - 32) | 7096 1 << (OP_SECINFO_NO_NAME - 32) | 7097 1 << (OP_LAYOUTRETURN - 32) | 7098 1 << (OP_TEST_STATEID - 32) | 7099 1 << (OP_FREE_STATEID - 32) | 7100 1 << (OP_WRITE - 32) 7101 } 7102 }; 7103 7104 /* 7105 * Select the state protection mode for client `clp' given the server results 7106 * from exchange_id in `sp'. 7107 * 7108 * Returns 0 on success, negative errno otherwise. 7109 */ 7110 static int nfs4_sp4_select_mode(struct nfs_client *clp, 7111 struct nfs41_state_protection *sp) 7112 { 7113 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 7114 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 7115 1 << (OP_EXCHANGE_ID - 32) | 7116 1 << (OP_CREATE_SESSION - 32) | 7117 1 << (OP_DESTROY_SESSION - 32) | 7118 1 << (OP_DESTROY_CLIENTID - 32) 7119 }; 7120 unsigned int i; 7121 7122 if (sp->how == SP4_MACH_CRED) { 7123 /* Print state protect result */ 7124 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 7125 for (i = 0; i <= LAST_NFS4_OP; i++) { 7126 if (test_bit(i, sp->enforce.u.longs)) 7127 dfprintk(MOUNT, " enforce op %d\n", i); 7128 if (test_bit(i, sp->allow.u.longs)) 7129 dfprintk(MOUNT, " allow op %d\n", i); 7130 } 7131 7132 /* make sure nothing is on enforce list that isn't supported */ 7133 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 7134 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 7135 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 7136 return -EINVAL; 7137 } 7138 } 7139 7140 /* 7141 * Minimal mode - state operations are allowed to use machine 7142 * credential. Note this already happens by default, so the 7143 * client doesn't have to do anything more than the negotiation. 7144 * 7145 * NOTE: we don't care if EXCHANGE_ID is in the list - 7146 * we're already using the machine cred for exchange_id 7147 * and will never use a different cred. 7148 */ 7149 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 7150 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 7151 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 7152 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 7153 dfprintk(MOUNT, "sp4_mach_cred:\n"); 7154 dfprintk(MOUNT, " minimal mode enabled\n"); 7155 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags); 7156 } else { 7157 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 7158 return -EINVAL; 7159 } 7160 7161 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 7162 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) && 7163 test_bit(OP_DELEGRETURN, sp->allow.u.longs) && 7164 test_bit(OP_LOCKU, sp->allow.u.longs)) { 7165 dfprintk(MOUNT, " cleanup mode enabled\n"); 7166 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags); 7167 } 7168 7169 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) { 7170 dfprintk(MOUNT, " pnfs cleanup mode enabled\n"); 7171 set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, 7172 &clp->cl_sp4_flags); 7173 } 7174 7175 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 7176 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 7177 dfprintk(MOUNT, " secinfo mode enabled\n"); 7178 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags); 7179 } 7180 7181 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 7182 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 7183 dfprintk(MOUNT, " stateid mode enabled\n"); 7184 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags); 7185 } 7186 7187 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 7188 dfprintk(MOUNT, " write mode enabled\n"); 7189 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags); 7190 } 7191 7192 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 7193 dfprintk(MOUNT, " commit mode enabled\n"); 7194 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags); 7195 } 7196 } 7197 7198 return 0; 7199 } 7200 7201 struct nfs41_exchange_id_data { 7202 struct nfs41_exchange_id_res res; 7203 struct nfs41_exchange_id_args args; 7204 struct rpc_xprt *xprt; 7205 int rpc_status; 7206 }; 7207 7208 static void nfs4_exchange_id_done(struct rpc_task *task, void *data) 7209 { 7210 struct nfs41_exchange_id_data *cdata = 7211 (struct nfs41_exchange_id_data *)data; 7212 struct nfs_client *clp = cdata->args.client; 7213 int status = task->tk_status; 7214 7215 trace_nfs4_exchange_id(clp, status); 7216 7217 if (status == 0) 7218 status = nfs4_check_cl_exchange_flags(cdata->res.flags); 7219 7220 if (cdata->xprt && status == 0) { 7221 status = nfs4_detect_session_trunking(clp, &cdata->res, 7222 cdata->xprt); 7223 goto out; 7224 } 7225 7226 if (status == 0) 7227 status = nfs4_sp4_select_mode(clp, &cdata->res.state_protect); 7228 7229 if (status == 0) { 7230 clp->cl_clientid = cdata->res.clientid; 7231 clp->cl_exchange_flags = cdata->res.flags; 7232 /* Client ID is not confirmed */ 7233 if (!(cdata->res.flags & EXCHGID4_FLAG_CONFIRMED_R)) { 7234 clear_bit(NFS4_SESSION_ESTABLISHED, 7235 &clp->cl_session->session_state); 7236 clp->cl_seqid = cdata->res.seqid; 7237 } 7238 7239 kfree(clp->cl_serverowner); 7240 clp->cl_serverowner = cdata->res.server_owner; 7241 cdata->res.server_owner = NULL; 7242 7243 /* use the most recent implementation id */ 7244 kfree(clp->cl_implid); 7245 clp->cl_implid = cdata->res.impl_id; 7246 cdata->res.impl_id = NULL; 7247 7248 if (clp->cl_serverscope != NULL && 7249 !nfs41_same_server_scope(clp->cl_serverscope, 7250 cdata->res.server_scope)) { 7251 dprintk("%s: server_scope mismatch detected\n", 7252 __func__); 7253 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 7254 kfree(clp->cl_serverscope); 7255 clp->cl_serverscope = NULL; 7256 } 7257 7258 if (clp->cl_serverscope == NULL) { 7259 clp->cl_serverscope = cdata->res.server_scope; 7260 cdata->res.server_scope = NULL; 7261 } 7262 /* Save the EXCHANGE_ID verifier session trunk tests */ 7263 memcpy(clp->cl_confirm.data, cdata->args.verifier->data, 7264 sizeof(clp->cl_confirm.data)); 7265 } 7266 out: 7267 cdata->rpc_status = status; 7268 return; 7269 } 7270 7271 static void nfs4_exchange_id_release(void *data) 7272 { 7273 struct nfs41_exchange_id_data *cdata = 7274 (struct nfs41_exchange_id_data *)data; 7275 7276 nfs_put_client(cdata->args.client); 7277 if (cdata->xprt) { 7278 xprt_put(cdata->xprt); 7279 rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient); 7280 } 7281 kfree(cdata->res.impl_id); 7282 kfree(cdata->res.server_scope); 7283 kfree(cdata->res.server_owner); 7284 kfree(cdata); 7285 } 7286 7287 static const struct rpc_call_ops nfs4_exchange_id_call_ops = { 7288 .rpc_call_done = nfs4_exchange_id_done, 7289 .rpc_release = nfs4_exchange_id_release, 7290 }; 7291 7292 /* 7293 * _nfs4_proc_exchange_id() 7294 * 7295 * Wrapper for EXCHANGE_ID operation. 7296 */ 7297 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, 7298 u32 sp4_how, struct rpc_xprt *xprt) 7299 { 7300 nfs4_verifier verifier; 7301 struct rpc_message msg = { 7302 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 7303 .rpc_cred = cred, 7304 }; 7305 struct rpc_task_setup task_setup_data = { 7306 .rpc_client = clp->cl_rpcclient, 7307 .callback_ops = &nfs4_exchange_id_call_ops, 7308 .rpc_message = &msg, 7309 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 7310 }; 7311 struct nfs41_exchange_id_data *calldata; 7312 struct rpc_task *task; 7313 int status = -EIO; 7314 7315 if (!atomic_inc_not_zero(&clp->cl_count)) 7316 goto out; 7317 7318 status = -ENOMEM; 7319 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7320 if (!calldata) 7321 goto out; 7322 7323 if (!xprt) 7324 nfs4_init_boot_verifier(clp, &verifier); 7325 7326 status = nfs4_init_uniform_client_string(clp); 7327 if (status) 7328 goto out_calldata; 7329 7330 dprintk("NFS call exchange_id auth=%s, '%s'\n", 7331 clp->cl_rpcclient->cl_auth->au_ops->au_name, 7332 clp->cl_owner_id); 7333 7334 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 7335 GFP_NOFS); 7336 status = -ENOMEM; 7337 if (unlikely(calldata->res.server_owner == NULL)) 7338 goto out_calldata; 7339 7340 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 7341 GFP_NOFS); 7342 if (unlikely(calldata->res.server_scope == NULL)) 7343 goto out_server_owner; 7344 7345 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 7346 if (unlikely(calldata->res.impl_id == NULL)) 7347 goto out_server_scope; 7348 7349 switch (sp4_how) { 7350 case SP4_NONE: 7351 calldata->args.state_protect.how = SP4_NONE; 7352 break; 7353 7354 case SP4_MACH_CRED: 7355 calldata->args.state_protect = nfs4_sp4_mach_cred_request; 7356 break; 7357 7358 default: 7359 /* unsupported! */ 7360 WARN_ON_ONCE(1); 7361 status = -EINVAL; 7362 goto out_impl_id; 7363 } 7364 if (xprt) { 7365 calldata->xprt = xprt; 7366 task_setup_data.rpc_xprt = xprt; 7367 task_setup_data.flags = 7368 RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC; 7369 calldata->args.verifier = &clp->cl_confirm; 7370 } else { 7371 calldata->args.verifier = &verifier; 7372 } 7373 calldata->args.client = clp; 7374 #ifdef CONFIG_NFS_V4_1_MIGRATION 7375 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 7376 EXCHGID4_FLAG_BIND_PRINC_STATEID | 7377 EXCHGID4_FLAG_SUPP_MOVED_MIGR, 7378 #else 7379 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 7380 EXCHGID4_FLAG_BIND_PRINC_STATEID, 7381 #endif 7382 msg.rpc_argp = &calldata->args; 7383 msg.rpc_resp = &calldata->res; 7384 task_setup_data.callback_data = calldata; 7385 7386 task = rpc_run_task(&task_setup_data); 7387 if (IS_ERR(task)) { 7388 status = PTR_ERR(task); 7389 goto out_impl_id; 7390 } 7391 7392 if (!xprt) { 7393 status = rpc_wait_for_completion_task(task); 7394 if (!status) 7395 status = calldata->rpc_status; 7396 } else /* session trunking test */ 7397 status = calldata->rpc_status; 7398 7399 rpc_put_task(task); 7400 out: 7401 if (clp->cl_implid != NULL) 7402 dprintk("NFS reply exchange_id: Server Implementation ID: " 7403 "domain: %s, name: %s, date: %llu,%u\n", 7404 clp->cl_implid->domain, clp->cl_implid->name, 7405 clp->cl_implid->date.seconds, 7406 clp->cl_implid->date.nseconds); 7407 dprintk("NFS reply exchange_id: %d\n", status); 7408 return status; 7409 7410 out_impl_id: 7411 kfree(calldata->res.impl_id); 7412 out_server_scope: 7413 kfree(calldata->res.server_scope); 7414 out_server_owner: 7415 kfree(calldata->res.server_owner); 7416 out_calldata: 7417 kfree(calldata); 7418 goto out; 7419 } 7420 7421 /* 7422 * nfs4_proc_exchange_id() 7423 * 7424 * Returns zero, a negative errno, or a negative NFS4ERR status code. 7425 * 7426 * Since the clientid has expired, all compounds using sessions 7427 * associated with the stale clientid will be returning 7428 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 7429 * be in some phase of session reset. 7430 * 7431 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 7432 */ 7433 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) 7434 { 7435 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 7436 int status; 7437 7438 /* try SP4_MACH_CRED if krb5i/p */ 7439 if (authflavor == RPC_AUTH_GSS_KRB5I || 7440 authflavor == RPC_AUTH_GSS_KRB5P) { 7441 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED, NULL); 7442 if (!status) 7443 return 0; 7444 } 7445 7446 /* try SP4_NONE */ 7447 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE, NULL); 7448 } 7449 7450 /** 7451 * nfs4_test_session_trunk 7452 * 7453 * This is an add_xprt_test() test function called from 7454 * rpc_clnt_setup_test_and_add_xprt. 7455 * 7456 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt 7457 * and is dereferrenced in nfs4_exchange_id_release 7458 * 7459 * Upon success, add the new transport to the rpc_clnt 7460 * 7461 * @clnt: struct rpc_clnt to get new transport 7462 * @xprt: the rpc_xprt to test 7463 * @data: call data for _nfs4_proc_exchange_id. 7464 */ 7465 int nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt, 7466 void *data) 7467 { 7468 struct nfs4_add_xprt_data *adata = (struct nfs4_add_xprt_data *)data; 7469 u32 sp4_how; 7470 7471 dprintk("--> %s try %s\n", __func__, 7472 xprt->address_strings[RPC_DISPLAY_ADDR]); 7473 7474 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED); 7475 7476 /* Test connection for session trunking. Async exchange_id call */ 7477 return _nfs4_proc_exchange_id(adata->clp, adata->cred, sp4_how, xprt); 7478 } 7479 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk); 7480 7481 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 7482 struct rpc_cred *cred) 7483 { 7484 struct rpc_message msg = { 7485 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 7486 .rpc_argp = clp, 7487 .rpc_cred = cred, 7488 }; 7489 int status; 7490 7491 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7492 trace_nfs4_destroy_clientid(clp, status); 7493 if (status) 7494 dprintk("NFS: Got error %d from the server %s on " 7495 "DESTROY_CLIENTID.", status, clp->cl_hostname); 7496 return status; 7497 } 7498 7499 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 7500 struct rpc_cred *cred) 7501 { 7502 unsigned int loop; 7503 int ret; 7504 7505 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 7506 ret = _nfs4_proc_destroy_clientid(clp, cred); 7507 switch (ret) { 7508 case -NFS4ERR_DELAY: 7509 case -NFS4ERR_CLIENTID_BUSY: 7510 ssleep(1); 7511 break; 7512 default: 7513 return ret; 7514 } 7515 } 7516 return 0; 7517 } 7518 7519 int nfs4_destroy_clientid(struct nfs_client *clp) 7520 { 7521 struct rpc_cred *cred; 7522 int ret = 0; 7523 7524 if (clp->cl_mvops->minor_version < 1) 7525 goto out; 7526 if (clp->cl_exchange_flags == 0) 7527 goto out; 7528 if (clp->cl_preserve_clid) 7529 goto out; 7530 cred = nfs4_get_clid_cred(clp); 7531 ret = nfs4_proc_destroy_clientid(clp, cred); 7532 if (cred) 7533 put_rpccred(cred); 7534 switch (ret) { 7535 case 0: 7536 case -NFS4ERR_STALE_CLIENTID: 7537 clp->cl_exchange_flags = 0; 7538 } 7539 out: 7540 return ret; 7541 } 7542 7543 struct nfs4_get_lease_time_data { 7544 struct nfs4_get_lease_time_args *args; 7545 struct nfs4_get_lease_time_res *res; 7546 struct nfs_client *clp; 7547 }; 7548 7549 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 7550 void *calldata) 7551 { 7552 struct nfs4_get_lease_time_data *data = 7553 (struct nfs4_get_lease_time_data *)calldata; 7554 7555 dprintk("--> %s\n", __func__); 7556 /* just setup sequence, do not trigger session recovery 7557 since we're invoked within one */ 7558 nfs41_setup_sequence(data->clp->cl_session, 7559 &data->args->la_seq_args, 7560 &data->res->lr_seq_res, 7561 task); 7562 dprintk("<-- %s\n", __func__); 7563 } 7564 7565 /* 7566 * Called from nfs4_state_manager thread for session setup, so don't recover 7567 * from sequence operation or clientid errors. 7568 */ 7569 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 7570 { 7571 struct nfs4_get_lease_time_data *data = 7572 (struct nfs4_get_lease_time_data *)calldata; 7573 7574 dprintk("--> %s\n", __func__); 7575 if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) 7576 return; 7577 switch (task->tk_status) { 7578 case -NFS4ERR_DELAY: 7579 case -NFS4ERR_GRACE: 7580 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 7581 rpc_delay(task, NFS4_POLL_RETRY_MIN); 7582 task->tk_status = 0; 7583 /* fall through */ 7584 case -NFS4ERR_RETRY_UNCACHED_REP: 7585 rpc_restart_call_prepare(task); 7586 return; 7587 } 7588 dprintk("<-- %s\n", __func__); 7589 } 7590 7591 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 7592 .rpc_call_prepare = nfs4_get_lease_time_prepare, 7593 .rpc_call_done = nfs4_get_lease_time_done, 7594 }; 7595 7596 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 7597 { 7598 struct rpc_task *task; 7599 struct nfs4_get_lease_time_args args; 7600 struct nfs4_get_lease_time_res res = { 7601 .lr_fsinfo = fsinfo, 7602 }; 7603 struct nfs4_get_lease_time_data data = { 7604 .args = &args, 7605 .res = &res, 7606 .clp = clp, 7607 }; 7608 struct rpc_message msg = { 7609 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 7610 .rpc_argp = &args, 7611 .rpc_resp = &res, 7612 }; 7613 struct rpc_task_setup task_setup = { 7614 .rpc_client = clp->cl_rpcclient, 7615 .rpc_message = &msg, 7616 .callback_ops = &nfs4_get_lease_time_ops, 7617 .callback_data = &data, 7618 .flags = RPC_TASK_TIMEOUT, 7619 }; 7620 int status; 7621 7622 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); 7623 nfs4_set_sequence_privileged(&args.la_seq_args); 7624 dprintk("--> %s\n", __func__); 7625 task = rpc_run_task(&task_setup); 7626 7627 if (IS_ERR(task)) 7628 status = PTR_ERR(task); 7629 else { 7630 status = task->tk_status; 7631 rpc_put_task(task); 7632 } 7633 dprintk("<-- %s return %d\n", __func__, status); 7634 7635 return status; 7636 } 7637 7638 /* 7639 * Initialize the values to be used by the client in CREATE_SESSION 7640 * If nfs4_init_session set the fore channel request and response sizes, 7641 * use them. 7642 * 7643 * Set the back channel max_resp_sz_cached to zero to force the client to 7644 * always set csa_cachethis to FALSE because the current implementation 7645 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 7646 */ 7647 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args, 7648 struct rpc_clnt *clnt) 7649 { 7650 unsigned int max_rqst_sz, max_resp_sz; 7651 unsigned int max_bc_payload = rpc_max_bc_payload(clnt); 7652 7653 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 7654 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 7655 7656 /* Fore channel attributes */ 7657 args->fc_attrs.max_rqst_sz = max_rqst_sz; 7658 args->fc_attrs.max_resp_sz = max_resp_sz; 7659 args->fc_attrs.max_ops = NFS4_MAX_OPS; 7660 args->fc_attrs.max_reqs = max_session_slots; 7661 7662 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 7663 "max_ops=%u max_reqs=%u\n", 7664 __func__, 7665 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 7666 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 7667 7668 /* Back channel attributes */ 7669 args->bc_attrs.max_rqst_sz = max_bc_payload; 7670 args->bc_attrs.max_resp_sz = max_bc_payload; 7671 args->bc_attrs.max_resp_sz_cached = 0; 7672 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 7673 args->bc_attrs.max_reqs = min_t(unsigned short, max_session_cb_slots, 1); 7674 7675 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 7676 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 7677 __func__, 7678 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 7679 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 7680 args->bc_attrs.max_reqs); 7681 } 7682 7683 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 7684 struct nfs41_create_session_res *res) 7685 { 7686 struct nfs4_channel_attrs *sent = &args->fc_attrs; 7687 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 7688 7689 if (rcvd->max_resp_sz > sent->max_resp_sz) 7690 return -EINVAL; 7691 /* 7692 * Our requested max_ops is the minimum we need; we're not 7693 * prepared to break up compounds into smaller pieces than that. 7694 * So, no point even trying to continue if the server won't 7695 * cooperate: 7696 */ 7697 if (rcvd->max_ops < sent->max_ops) 7698 return -EINVAL; 7699 if (rcvd->max_reqs == 0) 7700 return -EINVAL; 7701 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 7702 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 7703 return 0; 7704 } 7705 7706 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 7707 struct nfs41_create_session_res *res) 7708 { 7709 struct nfs4_channel_attrs *sent = &args->bc_attrs; 7710 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 7711 7712 if (!(res->flags & SESSION4_BACK_CHAN)) 7713 goto out; 7714 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 7715 return -EINVAL; 7716 if (rcvd->max_resp_sz < sent->max_resp_sz) 7717 return -EINVAL; 7718 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 7719 return -EINVAL; 7720 if (rcvd->max_ops > sent->max_ops) 7721 return -EINVAL; 7722 if (rcvd->max_reqs > sent->max_reqs) 7723 return -EINVAL; 7724 out: 7725 return 0; 7726 } 7727 7728 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 7729 struct nfs41_create_session_res *res) 7730 { 7731 int ret; 7732 7733 ret = nfs4_verify_fore_channel_attrs(args, res); 7734 if (ret) 7735 return ret; 7736 return nfs4_verify_back_channel_attrs(args, res); 7737 } 7738 7739 static void nfs4_update_session(struct nfs4_session *session, 7740 struct nfs41_create_session_res *res) 7741 { 7742 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 7743 /* Mark client id and session as being confirmed */ 7744 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 7745 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 7746 session->flags = res->flags; 7747 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 7748 if (res->flags & SESSION4_BACK_CHAN) 7749 memcpy(&session->bc_attrs, &res->bc_attrs, 7750 sizeof(session->bc_attrs)); 7751 } 7752 7753 static int _nfs4_proc_create_session(struct nfs_client *clp, 7754 struct rpc_cred *cred) 7755 { 7756 struct nfs4_session *session = clp->cl_session; 7757 struct nfs41_create_session_args args = { 7758 .client = clp, 7759 .clientid = clp->cl_clientid, 7760 .seqid = clp->cl_seqid, 7761 .cb_program = NFS4_CALLBACK, 7762 }; 7763 struct nfs41_create_session_res res; 7764 7765 struct rpc_message msg = { 7766 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 7767 .rpc_argp = &args, 7768 .rpc_resp = &res, 7769 .rpc_cred = cred, 7770 }; 7771 int status; 7772 7773 nfs4_init_channel_attrs(&args, clp->cl_rpcclient); 7774 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 7775 7776 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7777 trace_nfs4_create_session(clp, status); 7778 7779 switch (status) { 7780 case -NFS4ERR_STALE_CLIENTID: 7781 case -NFS4ERR_DELAY: 7782 case -ETIMEDOUT: 7783 case -EACCES: 7784 case -EAGAIN: 7785 goto out; 7786 }; 7787 7788 clp->cl_seqid++; 7789 if (!status) { 7790 /* Verify the session's negotiated channel_attrs values */ 7791 status = nfs4_verify_channel_attrs(&args, &res); 7792 /* Increment the clientid slot sequence id */ 7793 if (status) 7794 goto out; 7795 nfs4_update_session(session, &res); 7796 } 7797 out: 7798 return status; 7799 } 7800 7801 /* 7802 * Issues a CREATE_SESSION operation to the server. 7803 * It is the responsibility of the caller to verify the session is 7804 * expired before calling this routine. 7805 */ 7806 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred) 7807 { 7808 int status; 7809 unsigned *ptr; 7810 struct nfs4_session *session = clp->cl_session; 7811 7812 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 7813 7814 status = _nfs4_proc_create_session(clp, cred); 7815 if (status) 7816 goto out; 7817 7818 /* Init or reset the session slot tables */ 7819 status = nfs4_setup_session_slot_tables(session); 7820 dprintk("slot table setup returned %d\n", status); 7821 if (status) 7822 goto out; 7823 7824 ptr = (unsigned *)&session->sess_id.data[0]; 7825 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 7826 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 7827 out: 7828 dprintk("<-- %s\n", __func__); 7829 return status; 7830 } 7831 7832 /* 7833 * Issue the over-the-wire RPC DESTROY_SESSION. 7834 * The caller must serialize access to this routine. 7835 */ 7836 int nfs4_proc_destroy_session(struct nfs4_session *session, 7837 struct rpc_cred *cred) 7838 { 7839 struct rpc_message msg = { 7840 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 7841 .rpc_argp = session, 7842 .rpc_cred = cred, 7843 }; 7844 int status = 0; 7845 7846 dprintk("--> nfs4_proc_destroy_session\n"); 7847 7848 /* session is still being setup */ 7849 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 7850 return 0; 7851 7852 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7853 trace_nfs4_destroy_session(session->clp, status); 7854 7855 if (status) 7856 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 7857 "Session has been destroyed regardless...\n", status); 7858 7859 dprintk("<-- nfs4_proc_destroy_session\n"); 7860 return status; 7861 } 7862 7863 /* 7864 * Renew the cl_session lease. 7865 */ 7866 struct nfs4_sequence_data { 7867 struct nfs_client *clp; 7868 struct nfs4_sequence_args args; 7869 struct nfs4_sequence_res res; 7870 }; 7871 7872 static void nfs41_sequence_release(void *data) 7873 { 7874 struct nfs4_sequence_data *calldata = data; 7875 struct nfs_client *clp = calldata->clp; 7876 7877 if (atomic_read(&clp->cl_count) > 1) 7878 nfs4_schedule_state_renewal(clp); 7879 nfs_put_client(clp); 7880 kfree(calldata); 7881 } 7882 7883 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 7884 { 7885 switch(task->tk_status) { 7886 case -NFS4ERR_DELAY: 7887 rpc_delay(task, NFS4_POLL_RETRY_MAX); 7888 return -EAGAIN; 7889 default: 7890 nfs4_schedule_lease_recovery(clp); 7891 } 7892 return 0; 7893 } 7894 7895 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 7896 { 7897 struct nfs4_sequence_data *calldata = data; 7898 struct nfs_client *clp = calldata->clp; 7899 7900 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 7901 return; 7902 7903 trace_nfs4_sequence(clp, task->tk_status); 7904 if (task->tk_status < 0) { 7905 dprintk("%s ERROR %d\n", __func__, task->tk_status); 7906 if (atomic_read(&clp->cl_count) == 1) 7907 goto out; 7908 7909 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 7910 rpc_restart_call_prepare(task); 7911 return; 7912 } 7913 } 7914 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 7915 out: 7916 dprintk("<-- %s\n", __func__); 7917 } 7918 7919 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 7920 { 7921 struct nfs4_sequence_data *calldata = data; 7922 struct nfs_client *clp = calldata->clp; 7923 struct nfs4_sequence_args *args; 7924 struct nfs4_sequence_res *res; 7925 7926 args = task->tk_msg.rpc_argp; 7927 res = task->tk_msg.rpc_resp; 7928 7929 nfs41_setup_sequence(clp->cl_session, args, res, task); 7930 } 7931 7932 static const struct rpc_call_ops nfs41_sequence_ops = { 7933 .rpc_call_done = nfs41_sequence_call_done, 7934 .rpc_call_prepare = nfs41_sequence_prepare, 7935 .rpc_release = nfs41_sequence_release, 7936 }; 7937 7938 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 7939 struct rpc_cred *cred, 7940 bool is_privileged) 7941 { 7942 struct nfs4_sequence_data *calldata; 7943 struct rpc_message msg = { 7944 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 7945 .rpc_cred = cred, 7946 }; 7947 struct rpc_task_setup task_setup_data = { 7948 .rpc_client = clp->cl_rpcclient, 7949 .rpc_message = &msg, 7950 .callback_ops = &nfs41_sequence_ops, 7951 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 7952 }; 7953 7954 if (!atomic_inc_not_zero(&clp->cl_count)) 7955 return ERR_PTR(-EIO); 7956 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7957 if (calldata == NULL) { 7958 nfs_put_client(clp); 7959 return ERR_PTR(-ENOMEM); 7960 } 7961 nfs4_init_sequence(&calldata->args, &calldata->res, 0); 7962 if (is_privileged) 7963 nfs4_set_sequence_privileged(&calldata->args); 7964 msg.rpc_argp = &calldata->args; 7965 msg.rpc_resp = &calldata->res; 7966 calldata->clp = clp; 7967 task_setup_data.callback_data = calldata; 7968 7969 return rpc_run_task(&task_setup_data); 7970 } 7971 7972 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 7973 { 7974 struct rpc_task *task; 7975 int ret = 0; 7976 7977 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 7978 return -EAGAIN; 7979 task = _nfs41_proc_sequence(clp, cred, false); 7980 if (IS_ERR(task)) 7981 ret = PTR_ERR(task); 7982 else 7983 rpc_put_task_async(task); 7984 dprintk("<-- %s status=%d\n", __func__, ret); 7985 return ret; 7986 } 7987 7988 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 7989 { 7990 struct rpc_task *task; 7991 int ret; 7992 7993 task = _nfs41_proc_sequence(clp, cred, true); 7994 if (IS_ERR(task)) { 7995 ret = PTR_ERR(task); 7996 goto out; 7997 } 7998 ret = rpc_wait_for_completion_task(task); 7999 if (!ret) 8000 ret = task->tk_status; 8001 rpc_put_task(task); 8002 out: 8003 dprintk("<-- %s status=%d\n", __func__, ret); 8004 return ret; 8005 } 8006 8007 struct nfs4_reclaim_complete_data { 8008 struct nfs_client *clp; 8009 struct nfs41_reclaim_complete_args arg; 8010 struct nfs41_reclaim_complete_res res; 8011 }; 8012 8013 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 8014 { 8015 struct nfs4_reclaim_complete_data *calldata = data; 8016 8017 nfs41_setup_sequence(calldata->clp->cl_session, 8018 &calldata->arg.seq_args, 8019 &calldata->res.seq_res, 8020 task); 8021 } 8022 8023 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 8024 { 8025 switch(task->tk_status) { 8026 case 0: 8027 case -NFS4ERR_COMPLETE_ALREADY: 8028 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 8029 break; 8030 case -NFS4ERR_DELAY: 8031 rpc_delay(task, NFS4_POLL_RETRY_MAX); 8032 /* fall through */ 8033 case -NFS4ERR_RETRY_UNCACHED_REP: 8034 return -EAGAIN; 8035 default: 8036 nfs4_schedule_lease_recovery(clp); 8037 } 8038 return 0; 8039 } 8040 8041 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 8042 { 8043 struct nfs4_reclaim_complete_data *calldata = data; 8044 struct nfs_client *clp = calldata->clp; 8045 struct nfs4_sequence_res *res = &calldata->res.seq_res; 8046 8047 dprintk("--> %s\n", __func__); 8048 if (!nfs41_sequence_done(task, res)) 8049 return; 8050 8051 trace_nfs4_reclaim_complete(clp, task->tk_status); 8052 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 8053 rpc_restart_call_prepare(task); 8054 return; 8055 } 8056 dprintk("<-- %s\n", __func__); 8057 } 8058 8059 static void nfs4_free_reclaim_complete_data(void *data) 8060 { 8061 struct nfs4_reclaim_complete_data *calldata = data; 8062 8063 kfree(calldata); 8064 } 8065 8066 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 8067 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 8068 .rpc_call_done = nfs4_reclaim_complete_done, 8069 .rpc_release = nfs4_free_reclaim_complete_data, 8070 }; 8071 8072 /* 8073 * Issue a global reclaim complete. 8074 */ 8075 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 8076 struct rpc_cred *cred) 8077 { 8078 struct nfs4_reclaim_complete_data *calldata; 8079 struct rpc_task *task; 8080 struct rpc_message msg = { 8081 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 8082 .rpc_cred = cred, 8083 }; 8084 struct rpc_task_setup task_setup_data = { 8085 .rpc_client = clp->cl_rpcclient, 8086 .rpc_message = &msg, 8087 .callback_ops = &nfs4_reclaim_complete_call_ops, 8088 .flags = RPC_TASK_ASYNC, 8089 }; 8090 int status = -ENOMEM; 8091 8092 dprintk("--> %s\n", __func__); 8093 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 8094 if (calldata == NULL) 8095 goto out; 8096 calldata->clp = clp; 8097 calldata->arg.one_fs = 0; 8098 8099 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); 8100 nfs4_set_sequence_privileged(&calldata->arg.seq_args); 8101 msg.rpc_argp = &calldata->arg; 8102 msg.rpc_resp = &calldata->res; 8103 task_setup_data.callback_data = calldata; 8104 task = rpc_run_task(&task_setup_data); 8105 if (IS_ERR(task)) { 8106 status = PTR_ERR(task); 8107 goto out; 8108 } 8109 status = nfs4_wait_for_completion_rpc_task(task); 8110 if (status == 0) 8111 status = task->tk_status; 8112 rpc_put_task(task); 8113 return 0; 8114 out: 8115 dprintk("<-- %s status=%d\n", __func__, status); 8116 return status; 8117 } 8118 8119 static void 8120 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 8121 { 8122 struct nfs4_layoutget *lgp = calldata; 8123 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 8124 struct nfs4_session *session = nfs4_get_session(server); 8125 8126 dprintk("--> %s\n", __func__); 8127 nfs41_setup_sequence(session, &lgp->args.seq_args, 8128 &lgp->res.seq_res, task); 8129 dprintk("<-- %s\n", __func__); 8130 } 8131 8132 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 8133 { 8134 struct nfs4_layoutget *lgp = calldata; 8135 8136 dprintk("--> %s\n", __func__); 8137 nfs41_sequence_process(task, &lgp->res.seq_res); 8138 dprintk("<-- %s\n", __func__); 8139 } 8140 8141 static int 8142 nfs4_layoutget_handle_exception(struct rpc_task *task, 8143 struct nfs4_layoutget *lgp, struct nfs4_exception *exception) 8144 { 8145 struct inode *inode = lgp->args.inode; 8146 struct nfs_server *server = NFS_SERVER(inode); 8147 struct pnfs_layout_hdr *lo; 8148 int nfs4err = task->tk_status; 8149 int err, status = 0; 8150 LIST_HEAD(head); 8151 8152 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 8153 8154 switch (nfs4err) { 8155 case 0: 8156 goto out; 8157 8158 /* 8159 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs 8160 * on the file. set tk_status to -ENODATA to tell upper layer to 8161 * retry go inband. 8162 */ 8163 case -NFS4ERR_LAYOUTUNAVAILABLE: 8164 status = -ENODATA; 8165 goto out; 8166 /* 8167 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 8168 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 8169 */ 8170 case -NFS4ERR_BADLAYOUT: 8171 status = -EOVERFLOW; 8172 goto out; 8173 /* 8174 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 8175 * (or clients) writing to the same RAID stripe except when 8176 * the minlength argument is 0 (see RFC5661 section 18.43.3). 8177 * 8178 * Treat it like we would RECALLCONFLICT -- we retry for a little 8179 * while, and then eventually give up. 8180 */ 8181 case -NFS4ERR_LAYOUTTRYLATER: 8182 if (lgp->args.minlength == 0) { 8183 status = -EOVERFLOW; 8184 goto out; 8185 } 8186 status = -EBUSY; 8187 break; 8188 case -NFS4ERR_RECALLCONFLICT: 8189 status = -ERECALLCONFLICT; 8190 break; 8191 case -NFS4ERR_EXPIRED: 8192 case -NFS4ERR_BAD_STATEID: 8193 exception->timeout = 0; 8194 spin_lock(&inode->i_lock); 8195 lo = NFS_I(inode)->layout; 8196 /* If the open stateid was bad, then recover it. */ 8197 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || 8198 nfs4_stateid_match_other(&lgp->args.stateid, 8199 &lgp->args.ctx->state->stateid)) { 8200 spin_unlock(&inode->i_lock); 8201 exception->state = lgp->args.ctx->state; 8202 break; 8203 } 8204 8205 /* 8206 * Mark the bad layout state as invalid, then retry 8207 */ 8208 pnfs_mark_layout_stateid_invalid(lo, &head); 8209 spin_unlock(&inode->i_lock); 8210 pnfs_free_lseg_list(&head); 8211 status = -EAGAIN; 8212 goto out; 8213 } 8214 8215 err = nfs4_handle_exception(server, nfs4err, exception); 8216 if (!status) { 8217 if (exception->retry) 8218 status = -EAGAIN; 8219 else 8220 status = err; 8221 } 8222 out: 8223 dprintk("<-- %s\n", __func__); 8224 return status; 8225 } 8226 8227 static size_t max_response_pages(struct nfs_server *server) 8228 { 8229 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 8230 return nfs_page_array_len(0, max_resp_sz); 8231 } 8232 8233 static void nfs4_free_pages(struct page **pages, size_t size) 8234 { 8235 int i; 8236 8237 if (!pages) 8238 return; 8239 8240 for (i = 0; i < size; i++) { 8241 if (!pages[i]) 8242 break; 8243 __free_page(pages[i]); 8244 } 8245 kfree(pages); 8246 } 8247 8248 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) 8249 { 8250 struct page **pages; 8251 int i; 8252 8253 pages = kcalloc(size, sizeof(struct page *), gfp_flags); 8254 if (!pages) { 8255 dprintk("%s: can't alloc array of %zu pages\n", __func__, size); 8256 return NULL; 8257 } 8258 8259 for (i = 0; i < size; i++) { 8260 pages[i] = alloc_page(gfp_flags); 8261 if (!pages[i]) { 8262 dprintk("%s: failed to allocate page\n", __func__); 8263 nfs4_free_pages(pages, size); 8264 return NULL; 8265 } 8266 } 8267 8268 return pages; 8269 } 8270 8271 static void nfs4_layoutget_release(void *calldata) 8272 { 8273 struct nfs4_layoutget *lgp = calldata; 8274 struct inode *inode = lgp->args.inode; 8275 struct nfs_server *server = NFS_SERVER(inode); 8276 size_t max_pages = max_response_pages(server); 8277 8278 dprintk("--> %s\n", __func__); 8279 nfs4_free_pages(lgp->args.layout.pages, max_pages); 8280 pnfs_put_layout_hdr(NFS_I(inode)->layout); 8281 put_nfs_open_context(lgp->args.ctx); 8282 kfree(calldata); 8283 dprintk("<-- %s\n", __func__); 8284 } 8285 8286 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 8287 .rpc_call_prepare = nfs4_layoutget_prepare, 8288 .rpc_call_done = nfs4_layoutget_done, 8289 .rpc_release = nfs4_layoutget_release, 8290 }; 8291 8292 struct pnfs_layout_segment * 8293 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags) 8294 { 8295 struct inode *inode = lgp->args.inode; 8296 struct nfs_server *server = NFS_SERVER(inode); 8297 size_t max_pages = max_response_pages(server); 8298 struct rpc_task *task; 8299 struct rpc_message msg = { 8300 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 8301 .rpc_argp = &lgp->args, 8302 .rpc_resp = &lgp->res, 8303 .rpc_cred = lgp->cred, 8304 }; 8305 struct rpc_task_setup task_setup_data = { 8306 .rpc_client = server->client, 8307 .rpc_message = &msg, 8308 .callback_ops = &nfs4_layoutget_call_ops, 8309 .callback_data = lgp, 8310 .flags = RPC_TASK_ASYNC, 8311 }; 8312 struct pnfs_layout_segment *lseg = NULL; 8313 struct nfs4_exception exception = { 8314 .inode = inode, 8315 .timeout = *timeout, 8316 }; 8317 int status = 0; 8318 8319 dprintk("--> %s\n", __func__); 8320 8321 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */ 8322 pnfs_get_layout_hdr(NFS_I(inode)->layout); 8323 8324 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); 8325 if (!lgp->args.layout.pages) { 8326 nfs4_layoutget_release(lgp); 8327 return ERR_PTR(-ENOMEM); 8328 } 8329 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 8330 8331 lgp->res.layoutp = &lgp->args.layout; 8332 lgp->res.seq_res.sr_slot = NULL; 8333 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 8334 8335 task = rpc_run_task(&task_setup_data); 8336 if (IS_ERR(task)) 8337 return ERR_CAST(task); 8338 status = nfs4_wait_for_completion_rpc_task(task); 8339 if (status == 0) { 8340 status = nfs4_layoutget_handle_exception(task, lgp, &exception); 8341 *timeout = exception.timeout; 8342 } 8343 8344 trace_nfs4_layoutget(lgp->args.ctx, 8345 &lgp->args.range, 8346 &lgp->res.range, 8347 &lgp->res.stateid, 8348 status); 8349 8350 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ 8351 if (status == 0 && lgp->res.layoutp->len) 8352 lseg = pnfs_layout_process(lgp); 8353 nfs4_sequence_free_slot(&lgp->res.seq_res); 8354 rpc_put_task(task); 8355 dprintk("<-- %s status=%d\n", __func__, status); 8356 if (status) 8357 return ERR_PTR(status); 8358 return lseg; 8359 } 8360 8361 static void 8362 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 8363 { 8364 struct nfs4_layoutreturn *lrp = calldata; 8365 8366 dprintk("--> %s\n", __func__); 8367 nfs41_setup_sequence(lrp->clp->cl_session, 8368 &lrp->args.seq_args, 8369 &lrp->res.seq_res, 8370 task); 8371 } 8372 8373 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 8374 { 8375 struct nfs4_layoutreturn *lrp = calldata; 8376 struct nfs_server *server; 8377 8378 dprintk("--> %s\n", __func__); 8379 8380 if (!nfs41_sequence_process(task, &lrp->res.seq_res)) 8381 return; 8382 8383 server = NFS_SERVER(lrp->args.inode); 8384 switch (task->tk_status) { 8385 default: 8386 task->tk_status = 0; 8387 case 0: 8388 break; 8389 case -NFS4ERR_DELAY: 8390 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN) 8391 break; 8392 nfs4_sequence_free_slot(&lrp->res.seq_res); 8393 rpc_restart_call_prepare(task); 8394 return; 8395 } 8396 dprintk("<-- %s\n", __func__); 8397 } 8398 8399 static void nfs4_layoutreturn_release(void *calldata) 8400 { 8401 struct nfs4_layoutreturn *lrp = calldata; 8402 struct pnfs_layout_hdr *lo = lrp->args.layout; 8403 LIST_HEAD(freeme); 8404 8405 dprintk("--> %s\n", __func__); 8406 spin_lock(&lo->plh_inode->i_lock); 8407 if (lrp->res.lrs_present) { 8408 pnfs_mark_matching_lsegs_invalid(lo, &freeme, 8409 &lrp->args.range, 8410 be32_to_cpu(lrp->args.stateid.seqid)); 8411 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 8412 } else 8413 pnfs_mark_layout_stateid_invalid(lo, &freeme); 8414 pnfs_clear_layoutreturn_waitbit(lo); 8415 spin_unlock(&lo->plh_inode->i_lock); 8416 nfs4_sequence_free_slot(&lrp->res.seq_res); 8417 pnfs_free_lseg_list(&freeme); 8418 pnfs_put_layout_hdr(lrp->args.layout); 8419 nfs_iput_and_deactive(lrp->inode); 8420 kfree(calldata); 8421 dprintk("<-- %s\n", __func__); 8422 } 8423 8424 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 8425 .rpc_call_prepare = nfs4_layoutreturn_prepare, 8426 .rpc_call_done = nfs4_layoutreturn_done, 8427 .rpc_release = nfs4_layoutreturn_release, 8428 }; 8429 8430 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync) 8431 { 8432 struct rpc_task *task; 8433 struct rpc_message msg = { 8434 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 8435 .rpc_argp = &lrp->args, 8436 .rpc_resp = &lrp->res, 8437 .rpc_cred = lrp->cred, 8438 }; 8439 struct rpc_task_setup task_setup_data = { 8440 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 8441 .rpc_message = &msg, 8442 .callback_ops = &nfs4_layoutreturn_call_ops, 8443 .callback_data = lrp, 8444 }; 8445 int status = 0; 8446 8447 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client, 8448 NFS_SP4_MACH_CRED_PNFS_CLEANUP, 8449 &task_setup_data.rpc_client, &msg); 8450 8451 dprintk("--> %s\n", __func__); 8452 if (!sync) { 8453 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 8454 if (!lrp->inode) { 8455 nfs4_layoutreturn_release(lrp); 8456 return -EAGAIN; 8457 } 8458 task_setup_data.flags |= RPC_TASK_ASYNC; 8459 } 8460 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); 8461 task = rpc_run_task(&task_setup_data); 8462 if (IS_ERR(task)) 8463 return PTR_ERR(task); 8464 if (sync) 8465 status = task->tk_status; 8466 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status); 8467 dprintk("<-- %s status=%d\n", __func__, status); 8468 rpc_put_task(task); 8469 return status; 8470 } 8471 8472 static int 8473 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 8474 struct pnfs_device *pdev, 8475 struct rpc_cred *cred) 8476 { 8477 struct nfs4_getdeviceinfo_args args = { 8478 .pdev = pdev, 8479 .notify_types = NOTIFY_DEVICEID4_CHANGE | 8480 NOTIFY_DEVICEID4_DELETE, 8481 }; 8482 struct nfs4_getdeviceinfo_res res = { 8483 .pdev = pdev, 8484 }; 8485 struct rpc_message msg = { 8486 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 8487 .rpc_argp = &args, 8488 .rpc_resp = &res, 8489 .rpc_cred = cred, 8490 }; 8491 int status; 8492 8493 dprintk("--> %s\n", __func__); 8494 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 8495 if (res.notification & ~args.notify_types) 8496 dprintk("%s: unsupported notification\n", __func__); 8497 if (res.notification != args.notify_types) 8498 pdev->nocache = 1; 8499 8500 dprintk("<-- %s status=%d\n", __func__, status); 8501 8502 return status; 8503 } 8504 8505 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 8506 struct pnfs_device *pdev, 8507 struct rpc_cred *cred) 8508 { 8509 struct nfs4_exception exception = { }; 8510 int err; 8511 8512 do { 8513 err = nfs4_handle_exception(server, 8514 _nfs4_proc_getdeviceinfo(server, pdev, cred), 8515 &exception); 8516 } while (exception.retry); 8517 return err; 8518 } 8519 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 8520 8521 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 8522 { 8523 struct nfs4_layoutcommit_data *data = calldata; 8524 struct nfs_server *server = NFS_SERVER(data->args.inode); 8525 struct nfs4_session *session = nfs4_get_session(server); 8526 8527 nfs41_setup_sequence(session, 8528 &data->args.seq_args, 8529 &data->res.seq_res, 8530 task); 8531 } 8532 8533 static void 8534 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 8535 { 8536 struct nfs4_layoutcommit_data *data = calldata; 8537 struct nfs_server *server = NFS_SERVER(data->args.inode); 8538 8539 if (!nfs41_sequence_done(task, &data->res.seq_res)) 8540 return; 8541 8542 switch (task->tk_status) { /* Just ignore these failures */ 8543 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 8544 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 8545 case -NFS4ERR_BADLAYOUT: /* no layout */ 8546 case -NFS4ERR_GRACE: /* loca_recalim always false */ 8547 task->tk_status = 0; 8548 case 0: 8549 break; 8550 default: 8551 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 8552 rpc_restart_call_prepare(task); 8553 return; 8554 } 8555 } 8556 } 8557 8558 static void nfs4_layoutcommit_release(void *calldata) 8559 { 8560 struct nfs4_layoutcommit_data *data = calldata; 8561 8562 pnfs_cleanup_layoutcommit(data); 8563 nfs_post_op_update_inode_force_wcc(data->args.inode, 8564 data->res.fattr); 8565 put_rpccred(data->cred); 8566 nfs_iput_and_deactive(data->inode); 8567 kfree(data); 8568 } 8569 8570 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 8571 .rpc_call_prepare = nfs4_layoutcommit_prepare, 8572 .rpc_call_done = nfs4_layoutcommit_done, 8573 .rpc_release = nfs4_layoutcommit_release, 8574 }; 8575 8576 int 8577 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 8578 { 8579 struct rpc_message msg = { 8580 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 8581 .rpc_argp = &data->args, 8582 .rpc_resp = &data->res, 8583 .rpc_cred = data->cred, 8584 }; 8585 struct rpc_task_setup task_setup_data = { 8586 .task = &data->task, 8587 .rpc_client = NFS_CLIENT(data->args.inode), 8588 .rpc_message = &msg, 8589 .callback_ops = &nfs4_layoutcommit_ops, 8590 .callback_data = data, 8591 }; 8592 struct rpc_task *task; 8593 int status = 0; 8594 8595 dprintk("NFS: initiating layoutcommit call. sync %d " 8596 "lbw: %llu inode %lu\n", sync, 8597 data->args.lastbytewritten, 8598 data->args.inode->i_ino); 8599 8600 if (!sync) { 8601 data->inode = nfs_igrab_and_active(data->args.inode); 8602 if (data->inode == NULL) { 8603 nfs4_layoutcommit_release(data); 8604 return -EAGAIN; 8605 } 8606 task_setup_data.flags = RPC_TASK_ASYNC; 8607 } 8608 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 8609 task = rpc_run_task(&task_setup_data); 8610 if (IS_ERR(task)) 8611 return PTR_ERR(task); 8612 if (sync) 8613 status = task->tk_status; 8614 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status); 8615 dprintk("%s: status %d\n", __func__, status); 8616 rpc_put_task(task); 8617 return status; 8618 } 8619 8620 /** 8621 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 8622 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 8623 */ 8624 static int 8625 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8626 struct nfs_fsinfo *info, 8627 struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8628 { 8629 struct nfs41_secinfo_no_name_args args = { 8630 .style = SECINFO_STYLE_CURRENT_FH, 8631 }; 8632 struct nfs4_secinfo_res res = { 8633 .flavors = flavors, 8634 }; 8635 struct rpc_message msg = { 8636 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 8637 .rpc_argp = &args, 8638 .rpc_resp = &res, 8639 }; 8640 struct rpc_clnt *clnt = server->client; 8641 struct rpc_cred *cred = NULL; 8642 int status; 8643 8644 if (use_integrity) { 8645 clnt = server->nfs_client->cl_rpcclient; 8646 cred = nfs4_get_clid_cred(server->nfs_client); 8647 msg.rpc_cred = cred; 8648 } 8649 8650 dprintk("--> %s\n", __func__); 8651 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, 8652 &res.seq_res, 0); 8653 dprintk("<-- %s status=%d\n", __func__, status); 8654 8655 if (cred) 8656 put_rpccred(cred); 8657 8658 return status; 8659 } 8660 8661 static int 8662 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8663 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 8664 { 8665 struct nfs4_exception exception = { }; 8666 int err; 8667 do { 8668 /* first try using integrity protection */ 8669 err = -NFS4ERR_WRONGSEC; 8670 8671 /* try to use integrity protection with machine cred */ 8672 if (_nfs4_is_integrity_protected(server->nfs_client)) 8673 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8674 flavors, true); 8675 8676 /* 8677 * if unable to use integrity protection, or SECINFO with 8678 * integrity protection returns NFS4ERR_WRONGSEC (which is 8679 * disallowed by spec, but exists in deployed servers) use 8680 * the current filesystem's rpc_client and the user cred. 8681 */ 8682 if (err == -NFS4ERR_WRONGSEC) 8683 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8684 flavors, false); 8685 8686 switch (err) { 8687 case 0: 8688 case -NFS4ERR_WRONGSEC: 8689 case -ENOTSUPP: 8690 goto out; 8691 default: 8692 err = nfs4_handle_exception(server, err, &exception); 8693 } 8694 } while (exception.retry); 8695 out: 8696 return err; 8697 } 8698 8699 static int 8700 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 8701 struct nfs_fsinfo *info) 8702 { 8703 int err; 8704 struct page *page; 8705 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 8706 struct nfs4_secinfo_flavors *flavors; 8707 struct nfs4_secinfo4 *secinfo; 8708 int i; 8709 8710 page = alloc_page(GFP_KERNEL); 8711 if (!page) { 8712 err = -ENOMEM; 8713 goto out; 8714 } 8715 8716 flavors = page_address(page); 8717 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 8718 8719 /* 8720 * Fall back on "guess and check" method if 8721 * the server doesn't support SECINFO_NO_NAME 8722 */ 8723 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 8724 err = nfs4_find_root_sec(server, fhandle, info); 8725 goto out_freepage; 8726 } 8727 if (err) 8728 goto out_freepage; 8729 8730 for (i = 0; i < flavors->num_flavors; i++) { 8731 secinfo = &flavors->flavors[i]; 8732 8733 switch (secinfo->flavor) { 8734 case RPC_AUTH_NULL: 8735 case RPC_AUTH_UNIX: 8736 case RPC_AUTH_GSS: 8737 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 8738 &secinfo->flavor_info); 8739 break; 8740 default: 8741 flavor = RPC_AUTH_MAXFLAVOR; 8742 break; 8743 } 8744 8745 if (!nfs_auth_info_match(&server->auth_info, flavor)) 8746 flavor = RPC_AUTH_MAXFLAVOR; 8747 8748 if (flavor != RPC_AUTH_MAXFLAVOR) { 8749 err = nfs4_lookup_root_sec(server, fhandle, 8750 info, flavor); 8751 if (!err) 8752 break; 8753 } 8754 } 8755 8756 if (flavor == RPC_AUTH_MAXFLAVOR) 8757 err = -EPERM; 8758 8759 out_freepage: 8760 put_page(page); 8761 if (err == -EACCES) 8762 return -EPERM; 8763 out: 8764 return err; 8765 } 8766 8767 static int _nfs41_test_stateid(struct nfs_server *server, 8768 nfs4_stateid *stateid, 8769 struct rpc_cred *cred) 8770 { 8771 int status; 8772 struct nfs41_test_stateid_args args = { 8773 .stateid = stateid, 8774 }; 8775 struct nfs41_test_stateid_res res; 8776 struct rpc_message msg = { 8777 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 8778 .rpc_argp = &args, 8779 .rpc_resp = &res, 8780 .rpc_cred = cred, 8781 }; 8782 struct rpc_clnt *rpc_client = server->client; 8783 8784 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8785 &rpc_client, &msg); 8786 8787 dprintk("NFS call test_stateid %p\n", stateid); 8788 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 8789 nfs4_set_sequence_privileged(&args.seq_args); 8790 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 8791 &args.seq_args, &res.seq_res); 8792 if (status != NFS_OK) { 8793 dprintk("NFS reply test_stateid: failed, %d\n", status); 8794 return status; 8795 } 8796 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 8797 return -res.status; 8798 } 8799 8800 static void nfs4_handle_delay_or_session_error(struct nfs_server *server, 8801 int err, struct nfs4_exception *exception) 8802 { 8803 exception->retry = 0; 8804 switch(err) { 8805 case -NFS4ERR_DELAY: 8806 nfs4_handle_exception(server, err, exception); 8807 break; 8808 case -NFS4ERR_BADSESSION: 8809 case -NFS4ERR_BADSLOT: 8810 case -NFS4ERR_BAD_HIGH_SLOT: 8811 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 8812 case -NFS4ERR_DEADSESSION: 8813 nfs4_do_handle_exception(server, err, exception); 8814 } 8815 } 8816 8817 /** 8818 * nfs41_test_stateid - perform a TEST_STATEID operation 8819 * 8820 * @server: server / transport on which to perform the operation 8821 * @stateid: state ID to test 8822 * @cred: credential 8823 * 8824 * Returns NFS_OK if the server recognizes that "stateid" is valid. 8825 * Otherwise a negative NFS4ERR value is returned if the operation 8826 * failed or the state ID is not currently valid. 8827 */ 8828 static int nfs41_test_stateid(struct nfs_server *server, 8829 nfs4_stateid *stateid, 8830 struct rpc_cred *cred) 8831 { 8832 struct nfs4_exception exception = { }; 8833 int err; 8834 do { 8835 err = _nfs41_test_stateid(server, stateid, cred); 8836 nfs4_handle_delay_or_session_error(server, err, &exception); 8837 } while (exception.retry); 8838 return err; 8839 } 8840 8841 struct nfs_free_stateid_data { 8842 struct nfs_server *server; 8843 struct nfs41_free_stateid_args args; 8844 struct nfs41_free_stateid_res res; 8845 }; 8846 8847 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 8848 { 8849 struct nfs_free_stateid_data *data = calldata; 8850 nfs41_setup_sequence(nfs4_get_session(data->server), 8851 &data->args.seq_args, 8852 &data->res.seq_res, 8853 task); 8854 } 8855 8856 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 8857 { 8858 struct nfs_free_stateid_data *data = calldata; 8859 8860 nfs41_sequence_done(task, &data->res.seq_res); 8861 8862 switch (task->tk_status) { 8863 case -NFS4ERR_DELAY: 8864 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 8865 rpc_restart_call_prepare(task); 8866 } 8867 } 8868 8869 static void nfs41_free_stateid_release(void *calldata) 8870 { 8871 kfree(calldata); 8872 } 8873 8874 static const struct rpc_call_ops nfs41_free_stateid_ops = { 8875 .rpc_call_prepare = nfs41_free_stateid_prepare, 8876 .rpc_call_done = nfs41_free_stateid_done, 8877 .rpc_release = nfs41_free_stateid_release, 8878 }; 8879 8880 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server, 8881 nfs4_stateid *stateid, 8882 struct rpc_cred *cred, 8883 bool privileged) 8884 { 8885 struct rpc_message msg = { 8886 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 8887 .rpc_cred = cred, 8888 }; 8889 struct rpc_task_setup task_setup = { 8890 .rpc_client = server->client, 8891 .rpc_message = &msg, 8892 .callback_ops = &nfs41_free_stateid_ops, 8893 .flags = RPC_TASK_ASYNC, 8894 }; 8895 struct nfs_free_stateid_data *data; 8896 8897 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8898 &task_setup.rpc_client, &msg); 8899 8900 dprintk("NFS call free_stateid %p\n", stateid); 8901 data = kmalloc(sizeof(*data), GFP_NOFS); 8902 if (!data) 8903 return ERR_PTR(-ENOMEM); 8904 data->server = server; 8905 nfs4_stateid_copy(&data->args.stateid, stateid); 8906 8907 task_setup.callback_data = data; 8908 8909 msg.rpc_argp = &data->args; 8910 msg.rpc_resp = &data->res; 8911 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 8912 if (privileged) 8913 nfs4_set_sequence_privileged(&data->args.seq_args); 8914 8915 return rpc_run_task(&task_setup); 8916 } 8917 8918 /** 8919 * nfs41_free_stateid - perform a FREE_STATEID operation 8920 * 8921 * @server: server / transport on which to perform the operation 8922 * @stateid: state ID to release 8923 * @cred: credential 8924 * 8925 * Returns NFS_OK if the server freed "stateid". Otherwise a 8926 * negative NFS4ERR value is returned. 8927 */ 8928 static int nfs41_free_stateid(struct nfs_server *server, 8929 nfs4_stateid *stateid, 8930 struct rpc_cred *cred) 8931 { 8932 struct rpc_task *task; 8933 int ret; 8934 8935 task = _nfs41_free_stateid(server, stateid, cred, true); 8936 if (IS_ERR(task)) 8937 return PTR_ERR(task); 8938 ret = rpc_wait_for_completion_task(task); 8939 if (!ret) 8940 ret = task->tk_status; 8941 rpc_put_task(task); 8942 return ret; 8943 } 8944 8945 static void 8946 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 8947 { 8948 struct rpc_task *task; 8949 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 8950 8951 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 8952 nfs4_free_lock_state(server, lsp); 8953 if (IS_ERR(task)) 8954 return; 8955 rpc_put_task(task); 8956 } 8957 8958 static bool nfs41_match_stateid(const nfs4_stateid *s1, 8959 const nfs4_stateid *s2) 8960 { 8961 if (s1->type != s2->type) 8962 return false; 8963 8964 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 8965 return false; 8966 8967 if (s1->seqid == s2->seqid) 8968 return true; 8969 if (s1->seqid == 0 || s2->seqid == 0) 8970 return true; 8971 8972 return false; 8973 } 8974 8975 #endif /* CONFIG_NFS_V4_1 */ 8976 8977 static bool nfs4_match_stateid(const nfs4_stateid *s1, 8978 const nfs4_stateid *s2) 8979 { 8980 return nfs4_stateid_match(s1, s2); 8981 } 8982 8983 8984 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 8985 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8986 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8987 .recover_open = nfs4_open_reclaim, 8988 .recover_lock = nfs4_lock_reclaim, 8989 .establish_clid = nfs4_init_clientid, 8990 .detect_trunking = nfs40_discover_server_trunking, 8991 }; 8992 8993 #if defined(CONFIG_NFS_V4_1) 8994 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 8995 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8996 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8997 .recover_open = nfs4_open_reclaim, 8998 .recover_lock = nfs4_lock_reclaim, 8999 .establish_clid = nfs41_init_clientid, 9000 .reclaim_complete = nfs41_proc_reclaim_complete, 9001 .detect_trunking = nfs41_discover_server_trunking, 9002 }; 9003 #endif /* CONFIG_NFS_V4_1 */ 9004 9005 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 9006 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 9007 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 9008 .recover_open = nfs40_open_expired, 9009 .recover_lock = nfs4_lock_expired, 9010 .establish_clid = nfs4_init_clientid, 9011 }; 9012 9013 #if defined(CONFIG_NFS_V4_1) 9014 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 9015 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 9016 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 9017 .recover_open = nfs41_open_expired, 9018 .recover_lock = nfs41_lock_expired, 9019 .establish_clid = nfs41_init_clientid, 9020 }; 9021 #endif /* CONFIG_NFS_V4_1 */ 9022 9023 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 9024 .sched_state_renewal = nfs4_proc_async_renew, 9025 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, 9026 .renew_lease = nfs4_proc_renew, 9027 }; 9028 9029 #if defined(CONFIG_NFS_V4_1) 9030 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 9031 .sched_state_renewal = nfs41_proc_async_sequence, 9032 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, 9033 .renew_lease = nfs4_proc_sequence, 9034 }; 9035 #endif 9036 9037 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 9038 .get_locations = _nfs40_proc_get_locations, 9039 .fsid_present = _nfs40_proc_fsid_present, 9040 }; 9041 9042 #if defined(CONFIG_NFS_V4_1) 9043 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 9044 .get_locations = _nfs41_proc_get_locations, 9045 .fsid_present = _nfs41_proc_fsid_present, 9046 }; 9047 #endif /* CONFIG_NFS_V4_1 */ 9048 9049 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 9050 .minor_version = 0, 9051 .init_caps = NFS_CAP_READDIRPLUS 9052 | NFS_CAP_ATOMIC_OPEN 9053 | NFS_CAP_POSIX_LOCK, 9054 .init_client = nfs40_init_client, 9055 .shutdown_client = nfs40_shutdown_client, 9056 .match_stateid = nfs4_match_stateid, 9057 .find_root_sec = nfs4_find_root_sec, 9058 .free_lock_state = nfs4_release_lockowner, 9059 .alloc_seqid = nfs_alloc_seqid, 9060 .call_sync_ops = &nfs40_call_sync_ops, 9061 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 9062 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 9063 .state_renewal_ops = &nfs40_state_renewal_ops, 9064 .mig_recovery_ops = &nfs40_mig_recovery_ops, 9065 }; 9066 9067 #if defined(CONFIG_NFS_V4_1) 9068 static struct nfs_seqid * 9069 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 9070 { 9071 return NULL; 9072 } 9073 9074 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 9075 .minor_version = 1, 9076 .init_caps = NFS_CAP_READDIRPLUS 9077 | NFS_CAP_ATOMIC_OPEN 9078 | NFS_CAP_POSIX_LOCK 9079 | NFS_CAP_STATEID_NFSV41 9080 | NFS_CAP_ATOMIC_OPEN_V1, 9081 .init_client = nfs41_init_client, 9082 .shutdown_client = nfs41_shutdown_client, 9083 .match_stateid = nfs41_match_stateid, 9084 .find_root_sec = nfs41_find_root_sec, 9085 .free_lock_state = nfs41_free_lock_state, 9086 .alloc_seqid = nfs_alloc_no_seqid, 9087 .session_trunk = nfs4_test_session_trunk, 9088 .call_sync_ops = &nfs41_call_sync_ops, 9089 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 9090 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 9091 .state_renewal_ops = &nfs41_state_renewal_ops, 9092 .mig_recovery_ops = &nfs41_mig_recovery_ops, 9093 }; 9094 #endif 9095 9096 #if defined(CONFIG_NFS_V4_2) 9097 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 9098 .minor_version = 2, 9099 .init_caps = NFS_CAP_READDIRPLUS 9100 | NFS_CAP_ATOMIC_OPEN 9101 | NFS_CAP_POSIX_LOCK 9102 | NFS_CAP_STATEID_NFSV41 9103 | NFS_CAP_ATOMIC_OPEN_V1 9104 | NFS_CAP_ALLOCATE 9105 | NFS_CAP_COPY 9106 | NFS_CAP_DEALLOCATE 9107 | NFS_CAP_SEEK 9108 | NFS_CAP_LAYOUTSTATS 9109 | NFS_CAP_CLONE, 9110 .init_client = nfs41_init_client, 9111 .shutdown_client = nfs41_shutdown_client, 9112 .match_stateid = nfs41_match_stateid, 9113 .find_root_sec = nfs41_find_root_sec, 9114 .free_lock_state = nfs41_free_lock_state, 9115 .call_sync_ops = &nfs41_call_sync_ops, 9116 .alloc_seqid = nfs_alloc_no_seqid, 9117 .session_trunk = nfs4_test_session_trunk, 9118 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 9119 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 9120 .state_renewal_ops = &nfs41_state_renewal_ops, 9121 .mig_recovery_ops = &nfs41_mig_recovery_ops, 9122 }; 9123 #endif 9124 9125 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 9126 [0] = &nfs_v4_0_minor_ops, 9127 #if defined(CONFIG_NFS_V4_1) 9128 [1] = &nfs_v4_1_minor_ops, 9129 #endif 9130 #if defined(CONFIG_NFS_V4_2) 9131 [2] = &nfs_v4_2_minor_ops, 9132 #endif 9133 }; 9134 9135 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) 9136 { 9137 ssize_t error, error2; 9138 9139 error = generic_listxattr(dentry, list, size); 9140 if (error < 0) 9141 return error; 9142 if (list) { 9143 list += error; 9144 size -= error; 9145 } 9146 9147 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size); 9148 if (error2 < 0) 9149 return error2; 9150 return error + error2; 9151 } 9152 9153 static const struct inode_operations nfs4_dir_inode_operations = { 9154 .create = nfs_create, 9155 .lookup = nfs_lookup, 9156 .atomic_open = nfs_atomic_open, 9157 .link = nfs_link, 9158 .unlink = nfs_unlink, 9159 .symlink = nfs_symlink, 9160 .mkdir = nfs_mkdir, 9161 .rmdir = nfs_rmdir, 9162 .mknod = nfs_mknod, 9163 .rename = nfs_rename, 9164 .permission = nfs_permission, 9165 .getattr = nfs_getattr, 9166 .setattr = nfs_setattr, 9167 .getxattr = generic_getxattr, 9168 .setxattr = generic_setxattr, 9169 .listxattr = nfs4_listxattr, 9170 .removexattr = generic_removexattr, 9171 }; 9172 9173 static const struct inode_operations nfs4_file_inode_operations = { 9174 .permission = nfs_permission, 9175 .getattr = nfs_getattr, 9176 .setattr = nfs_setattr, 9177 .getxattr = generic_getxattr, 9178 .setxattr = generic_setxattr, 9179 .listxattr = nfs4_listxattr, 9180 .removexattr = generic_removexattr, 9181 }; 9182 9183 const struct nfs_rpc_ops nfs_v4_clientops = { 9184 .version = 4, /* protocol version */ 9185 .dentry_ops = &nfs4_dentry_operations, 9186 .dir_inode_ops = &nfs4_dir_inode_operations, 9187 .file_inode_ops = &nfs4_file_inode_operations, 9188 .file_ops = &nfs4_file_operations, 9189 .getroot = nfs4_proc_get_root, 9190 .submount = nfs4_submount, 9191 .try_mount = nfs4_try_mount, 9192 .getattr = nfs4_proc_getattr, 9193 .setattr = nfs4_proc_setattr, 9194 .lookup = nfs4_proc_lookup, 9195 .access = nfs4_proc_access, 9196 .readlink = nfs4_proc_readlink, 9197 .create = nfs4_proc_create, 9198 .remove = nfs4_proc_remove, 9199 .unlink_setup = nfs4_proc_unlink_setup, 9200 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 9201 .unlink_done = nfs4_proc_unlink_done, 9202 .rename_setup = nfs4_proc_rename_setup, 9203 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 9204 .rename_done = nfs4_proc_rename_done, 9205 .link = nfs4_proc_link, 9206 .symlink = nfs4_proc_symlink, 9207 .mkdir = nfs4_proc_mkdir, 9208 .rmdir = nfs4_proc_remove, 9209 .readdir = nfs4_proc_readdir, 9210 .mknod = nfs4_proc_mknod, 9211 .statfs = nfs4_proc_statfs, 9212 .fsinfo = nfs4_proc_fsinfo, 9213 .pathconf = nfs4_proc_pathconf, 9214 .set_capabilities = nfs4_server_capabilities, 9215 .decode_dirent = nfs4_decode_dirent, 9216 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 9217 .read_setup = nfs4_proc_read_setup, 9218 .read_done = nfs4_read_done, 9219 .write_setup = nfs4_proc_write_setup, 9220 .write_done = nfs4_write_done, 9221 .commit_setup = nfs4_proc_commit_setup, 9222 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 9223 .commit_done = nfs4_commit_done, 9224 .lock = nfs4_proc_lock, 9225 .clear_acl_cache = nfs4_zap_acl_attr, 9226 .close_context = nfs4_close_context, 9227 .open_context = nfs4_atomic_open, 9228 .have_delegation = nfs4_have_delegation, 9229 .return_delegation = nfs4_inode_return_delegation, 9230 .alloc_client = nfs4_alloc_client, 9231 .init_client = nfs4_init_client, 9232 .free_client = nfs4_free_client, 9233 .create_server = nfs4_create_server, 9234 .clone_server = nfs_clone_server, 9235 }; 9236 9237 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 9238 .name = XATTR_NAME_NFSV4_ACL, 9239 .list = nfs4_xattr_list_nfs4_acl, 9240 .get = nfs4_xattr_get_nfs4_acl, 9241 .set = nfs4_xattr_set_nfs4_acl, 9242 }; 9243 9244 const struct xattr_handler *nfs4_xattr_handlers[] = { 9245 &nfs4_xattr_nfs4_acl_handler, 9246 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 9247 &nfs4_xattr_nfs4_label_handler, 9248 #endif 9249 NULL 9250 }; 9251 9252 /* 9253 * Local variables: 9254 * c-basic-offset: 8 9255 * End: 9256 */ 9257