1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/nfs_idmap.h> 55 #include <linux/xattr.h> 56 #include <linux/utsname.h> 57 #include <linux/freezer.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "nfs4session.h" 67 #include "fscache.h" 68 69 #include "nfs4trace.h" 70 71 #define NFSDBG_FACILITY NFSDBG_PROC 72 73 #define NFS4_POLL_RETRY_MIN (HZ/10) 74 #define NFS4_POLL_RETRY_MAX (15*HZ) 75 76 struct nfs4_opendata; 77 static int _nfs4_proc_open(struct nfs4_opendata *data); 78 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 79 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 80 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *, long *); 81 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 82 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label); 83 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label); 84 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 85 struct nfs_fattr *fattr, struct iattr *sattr, 86 struct nfs4_state *state, struct nfs4_label *ilabel, 87 struct nfs4_label *olabel); 88 #ifdef CONFIG_NFS_V4_1 89 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *, 90 struct rpc_cred *); 91 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 92 struct rpc_cred *); 93 #endif 94 95 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 96 static inline struct nfs4_label * 97 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 98 struct iattr *sattr, struct nfs4_label *label) 99 { 100 int err; 101 102 if (label == NULL) 103 return NULL; 104 105 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 106 return NULL; 107 108 err = security_dentry_init_security(dentry, sattr->ia_mode, 109 &dentry->d_name, (void **)&label->label, &label->len); 110 if (err == 0) 111 return label; 112 113 return NULL; 114 } 115 static inline void 116 nfs4_label_release_security(struct nfs4_label *label) 117 { 118 if (label) 119 security_release_secctx(label->label, label->len); 120 } 121 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 122 { 123 if (label) 124 return server->attr_bitmask; 125 126 return server->attr_bitmask_nl; 127 } 128 #else 129 static inline struct nfs4_label * 130 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 131 struct iattr *sattr, struct nfs4_label *l) 132 { return NULL; } 133 static inline void 134 nfs4_label_release_security(struct nfs4_label *label) 135 { return; } 136 static inline u32 * 137 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 138 { return server->attr_bitmask; } 139 #endif 140 141 /* Prevent leaks of NFSv4 errors into userland */ 142 static int nfs4_map_errors(int err) 143 { 144 if (err >= -1000) 145 return err; 146 switch (err) { 147 case -NFS4ERR_RESOURCE: 148 case -NFS4ERR_LAYOUTTRYLATER: 149 case -NFS4ERR_RECALLCONFLICT: 150 return -EREMOTEIO; 151 case -NFS4ERR_WRONGSEC: 152 case -NFS4ERR_WRONG_CRED: 153 return -EPERM; 154 case -NFS4ERR_BADOWNER: 155 case -NFS4ERR_BADNAME: 156 return -EINVAL; 157 case -NFS4ERR_SHARE_DENIED: 158 return -EACCES; 159 case -NFS4ERR_MINOR_VERS_MISMATCH: 160 return -EPROTONOSUPPORT; 161 case -NFS4ERR_FILE_OPEN: 162 return -EBUSY; 163 default: 164 dprintk("%s could not handle NFSv4 error %d\n", 165 __func__, -err); 166 break; 167 } 168 return -EIO; 169 } 170 171 /* 172 * This is our standard bitmap for GETATTR requests. 173 */ 174 const u32 nfs4_fattr_bitmap[3] = { 175 FATTR4_WORD0_TYPE 176 | FATTR4_WORD0_CHANGE 177 | FATTR4_WORD0_SIZE 178 | FATTR4_WORD0_FSID 179 | FATTR4_WORD0_FILEID, 180 FATTR4_WORD1_MODE 181 | FATTR4_WORD1_NUMLINKS 182 | FATTR4_WORD1_OWNER 183 | FATTR4_WORD1_OWNER_GROUP 184 | FATTR4_WORD1_RAWDEV 185 | FATTR4_WORD1_SPACE_USED 186 | FATTR4_WORD1_TIME_ACCESS 187 | FATTR4_WORD1_TIME_METADATA 188 | FATTR4_WORD1_TIME_MODIFY, 189 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 190 FATTR4_WORD2_SECURITY_LABEL 191 #endif 192 }; 193 194 static const u32 nfs4_pnfs_open_bitmap[3] = { 195 FATTR4_WORD0_TYPE 196 | FATTR4_WORD0_CHANGE 197 | FATTR4_WORD0_SIZE 198 | FATTR4_WORD0_FSID 199 | FATTR4_WORD0_FILEID, 200 FATTR4_WORD1_MODE 201 | FATTR4_WORD1_NUMLINKS 202 | FATTR4_WORD1_OWNER 203 | FATTR4_WORD1_OWNER_GROUP 204 | FATTR4_WORD1_RAWDEV 205 | FATTR4_WORD1_SPACE_USED 206 | FATTR4_WORD1_TIME_ACCESS 207 | FATTR4_WORD1_TIME_METADATA 208 | FATTR4_WORD1_TIME_MODIFY, 209 FATTR4_WORD2_MDSTHRESHOLD 210 }; 211 212 static const u32 nfs4_open_noattr_bitmap[3] = { 213 FATTR4_WORD0_TYPE 214 | FATTR4_WORD0_CHANGE 215 | FATTR4_WORD0_FILEID, 216 }; 217 218 const u32 nfs4_statfs_bitmap[3] = { 219 FATTR4_WORD0_FILES_AVAIL 220 | FATTR4_WORD0_FILES_FREE 221 | FATTR4_WORD0_FILES_TOTAL, 222 FATTR4_WORD1_SPACE_AVAIL 223 | FATTR4_WORD1_SPACE_FREE 224 | FATTR4_WORD1_SPACE_TOTAL 225 }; 226 227 const u32 nfs4_pathconf_bitmap[3] = { 228 FATTR4_WORD0_MAXLINK 229 | FATTR4_WORD0_MAXNAME, 230 0 231 }; 232 233 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 234 | FATTR4_WORD0_MAXREAD 235 | FATTR4_WORD0_MAXWRITE 236 | FATTR4_WORD0_LEASE_TIME, 237 FATTR4_WORD1_TIME_DELTA 238 | FATTR4_WORD1_FS_LAYOUT_TYPES, 239 FATTR4_WORD2_LAYOUT_BLKSIZE 240 }; 241 242 const u32 nfs4_fs_locations_bitmap[3] = { 243 FATTR4_WORD0_TYPE 244 | FATTR4_WORD0_CHANGE 245 | FATTR4_WORD0_SIZE 246 | FATTR4_WORD0_FSID 247 | FATTR4_WORD0_FILEID 248 | FATTR4_WORD0_FS_LOCATIONS, 249 FATTR4_WORD1_MODE 250 | FATTR4_WORD1_NUMLINKS 251 | FATTR4_WORD1_OWNER 252 | FATTR4_WORD1_OWNER_GROUP 253 | FATTR4_WORD1_RAWDEV 254 | FATTR4_WORD1_SPACE_USED 255 | FATTR4_WORD1_TIME_ACCESS 256 | FATTR4_WORD1_TIME_METADATA 257 | FATTR4_WORD1_TIME_MODIFY 258 | FATTR4_WORD1_MOUNTED_ON_FILEID, 259 }; 260 261 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 262 struct nfs4_readdir_arg *readdir) 263 { 264 __be32 *start, *p; 265 266 if (cookie > 2) { 267 readdir->cookie = cookie; 268 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 269 return; 270 } 271 272 readdir->cookie = 0; 273 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 274 if (cookie == 2) 275 return; 276 277 /* 278 * NFSv4 servers do not return entries for '.' and '..' 279 * Therefore, we fake these entries here. We let '.' 280 * have cookie 0 and '..' have cookie 1. Note that 281 * when talking to the server, we always send cookie 0 282 * instead of 1 or 2. 283 */ 284 start = p = kmap_atomic(*readdir->pages); 285 286 if (cookie == 0) { 287 *p++ = xdr_one; /* next */ 288 *p++ = xdr_zero; /* cookie, first word */ 289 *p++ = xdr_one; /* cookie, second word */ 290 *p++ = xdr_one; /* entry len */ 291 memcpy(p, ".\0\0\0", 4); /* entry */ 292 p++; 293 *p++ = xdr_one; /* bitmap length */ 294 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 295 *p++ = htonl(8); /* attribute buffer length */ 296 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode)); 297 } 298 299 *p++ = xdr_one; /* next */ 300 *p++ = xdr_zero; /* cookie, first word */ 301 *p++ = xdr_two; /* cookie, second word */ 302 *p++ = xdr_two; /* entry len */ 303 memcpy(p, "..\0\0", 4); /* entry */ 304 p++; 305 *p++ = xdr_one; /* bitmap length */ 306 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 307 *p++ = htonl(8); /* attribute buffer length */ 308 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode)); 309 310 readdir->pgbase = (char *)p - (char *)start; 311 readdir->count -= readdir->pgbase; 312 kunmap_atomic(start); 313 } 314 315 static long nfs4_update_delay(long *timeout) 316 { 317 long ret; 318 if (!timeout) 319 return NFS4_POLL_RETRY_MAX; 320 if (*timeout <= 0) 321 *timeout = NFS4_POLL_RETRY_MIN; 322 if (*timeout > NFS4_POLL_RETRY_MAX) 323 *timeout = NFS4_POLL_RETRY_MAX; 324 ret = *timeout; 325 *timeout <<= 1; 326 return ret; 327 } 328 329 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 330 { 331 int res = 0; 332 333 might_sleep(); 334 335 freezable_schedule_timeout_killable_unsafe( 336 nfs4_update_delay(timeout)); 337 if (fatal_signal_pending(current)) 338 res = -ERESTARTSYS; 339 return res; 340 } 341 342 /* This is the error handling routine for processes that are allowed 343 * to sleep. 344 */ 345 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 346 { 347 struct nfs_client *clp = server->nfs_client; 348 struct nfs4_state *state = exception->state; 349 struct inode *inode = exception->inode; 350 int ret = errorcode; 351 352 exception->retry = 0; 353 switch(errorcode) { 354 case 0: 355 return 0; 356 case -NFS4ERR_OPENMODE: 357 if (inode && nfs4_have_delegation(inode, FMODE_READ)) { 358 nfs4_inode_return_delegation(inode); 359 exception->retry = 1; 360 return 0; 361 } 362 if (state == NULL) 363 break; 364 ret = nfs4_schedule_stateid_recovery(server, state); 365 if (ret < 0) 366 break; 367 goto wait_on_recovery; 368 case -NFS4ERR_DELEG_REVOKED: 369 case -NFS4ERR_ADMIN_REVOKED: 370 case -NFS4ERR_BAD_STATEID: 371 if (state == NULL) 372 break; 373 ret = nfs4_schedule_stateid_recovery(server, state); 374 if (ret < 0) 375 break; 376 goto wait_on_recovery; 377 case -NFS4ERR_EXPIRED: 378 if (state != NULL) { 379 ret = nfs4_schedule_stateid_recovery(server, state); 380 if (ret < 0) 381 break; 382 } 383 case -NFS4ERR_STALE_STATEID: 384 case -NFS4ERR_STALE_CLIENTID: 385 nfs4_schedule_lease_recovery(clp); 386 goto wait_on_recovery; 387 case -NFS4ERR_MOVED: 388 ret = nfs4_schedule_migration_recovery(server); 389 if (ret < 0) 390 break; 391 goto wait_on_recovery; 392 case -NFS4ERR_LEASE_MOVED: 393 nfs4_schedule_lease_moved_recovery(clp); 394 goto wait_on_recovery; 395 #if defined(CONFIG_NFS_V4_1) 396 case -NFS4ERR_BADSESSION: 397 case -NFS4ERR_BADSLOT: 398 case -NFS4ERR_BAD_HIGH_SLOT: 399 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 400 case -NFS4ERR_DEADSESSION: 401 case -NFS4ERR_SEQ_FALSE_RETRY: 402 case -NFS4ERR_SEQ_MISORDERED: 403 dprintk("%s ERROR: %d Reset session\n", __func__, 404 errorcode); 405 nfs4_schedule_session_recovery(clp->cl_session, errorcode); 406 goto wait_on_recovery; 407 #endif /* defined(CONFIG_NFS_V4_1) */ 408 case -NFS4ERR_FILE_OPEN: 409 if (exception->timeout > HZ) { 410 /* We have retried a decent amount, time to 411 * fail 412 */ 413 ret = -EBUSY; 414 break; 415 } 416 case -NFS4ERR_GRACE: 417 case -NFS4ERR_DELAY: 418 ret = nfs4_delay(server->client, &exception->timeout); 419 if (ret != 0) 420 break; 421 case -NFS4ERR_RETRY_UNCACHED_REP: 422 case -NFS4ERR_OLD_STATEID: 423 exception->retry = 1; 424 break; 425 case -NFS4ERR_BADOWNER: 426 /* The following works around a Linux server bug! */ 427 case -NFS4ERR_BADNAME: 428 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 429 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 430 exception->retry = 1; 431 printk(KERN_WARNING "NFS: v4 server %s " 432 "does not accept raw " 433 "uid/gids. " 434 "Reenabling the idmapper.\n", 435 server->nfs_client->cl_hostname); 436 } 437 } 438 /* We failed to handle the error */ 439 return nfs4_map_errors(ret); 440 wait_on_recovery: 441 ret = nfs4_wait_clnt_recover(clp); 442 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 443 return -EIO; 444 if (ret == 0) 445 exception->retry = 1; 446 return ret; 447 } 448 449 /* 450 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 451 * or 'false' otherwise. 452 */ 453 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 454 { 455 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 456 457 if (flavor == RPC_AUTH_GSS_KRB5I || 458 flavor == RPC_AUTH_GSS_KRB5P) 459 return true; 460 461 return false; 462 } 463 464 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 465 { 466 spin_lock(&clp->cl_lock); 467 if (time_before(clp->cl_last_renewal,timestamp)) 468 clp->cl_last_renewal = timestamp; 469 spin_unlock(&clp->cl_lock); 470 } 471 472 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 473 { 474 do_renew_lease(server->nfs_client, timestamp); 475 } 476 477 struct nfs4_call_sync_data { 478 const struct nfs_server *seq_server; 479 struct nfs4_sequence_args *seq_args; 480 struct nfs4_sequence_res *seq_res; 481 }; 482 483 static void nfs4_init_sequence(struct nfs4_sequence_args *args, 484 struct nfs4_sequence_res *res, int cache_reply) 485 { 486 args->sa_slot = NULL; 487 args->sa_cache_this = cache_reply; 488 args->sa_privileged = 0; 489 490 res->sr_slot = NULL; 491 } 492 493 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) 494 { 495 args->sa_privileged = 1; 496 } 497 498 int nfs40_setup_sequence(struct nfs4_slot_table *tbl, 499 struct nfs4_sequence_args *args, 500 struct nfs4_sequence_res *res, 501 struct rpc_task *task) 502 { 503 struct nfs4_slot *slot; 504 505 /* slot already allocated? */ 506 if (res->sr_slot != NULL) 507 goto out_start; 508 509 spin_lock(&tbl->slot_tbl_lock); 510 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 511 goto out_sleep; 512 513 slot = nfs4_alloc_slot(tbl); 514 if (IS_ERR(slot)) { 515 if (slot == ERR_PTR(-ENOMEM)) 516 task->tk_timeout = HZ >> 2; 517 goto out_sleep; 518 } 519 spin_unlock(&tbl->slot_tbl_lock); 520 521 args->sa_slot = slot; 522 res->sr_slot = slot; 523 524 out_start: 525 rpc_call_start(task); 526 return 0; 527 528 out_sleep: 529 if (args->sa_privileged) 530 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 531 NULL, RPC_PRIORITY_PRIVILEGED); 532 else 533 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 534 spin_unlock(&tbl->slot_tbl_lock); 535 return -EAGAIN; 536 } 537 EXPORT_SYMBOL_GPL(nfs40_setup_sequence); 538 539 static int nfs40_sequence_done(struct rpc_task *task, 540 struct nfs4_sequence_res *res) 541 { 542 struct nfs4_slot *slot = res->sr_slot; 543 struct nfs4_slot_table *tbl; 544 545 if (slot == NULL) 546 goto out; 547 548 tbl = slot->table; 549 spin_lock(&tbl->slot_tbl_lock); 550 if (!nfs41_wake_and_assign_slot(tbl, slot)) 551 nfs4_free_slot(tbl, slot); 552 spin_unlock(&tbl->slot_tbl_lock); 553 554 res->sr_slot = NULL; 555 out: 556 return 1; 557 } 558 559 #if defined(CONFIG_NFS_V4_1) 560 561 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 562 { 563 struct nfs4_session *session; 564 struct nfs4_slot_table *tbl; 565 struct nfs4_slot *slot = res->sr_slot; 566 bool send_new_highest_used_slotid = false; 567 568 tbl = slot->table; 569 session = tbl->session; 570 571 spin_lock(&tbl->slot_tbl_lock); 572 /* Be nice to the server: try to ensure that the last transmitted 573 * value for highest_user_slotid <= target_highest_slotid 574 */ 575 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 576 send_new_highest_used_slotid = true; 577 578 if (nfs41_wake_and_assign_slot(tbl, slot)) { 579 send_new_highest_used_slotid = false; 580 goto out_unlock; 581 } 582 nfs4_free_slot(tbl, slot); 583 584 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 585 send_new_highest_used_slotid = false; 586 out_unlock: 587 spin_unlock(&tbl->slot_tbl_lock); 588 res->sr_slot = NULL; 589 if (send_new_highest_used_slotid) 590 nfs41_server_notify_highest_slotid_update(session->clp); 591 } 592 593 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 594 { 595 struct nfs4_session *session; 596 struct nfs4_slot *slot = res->sr_slot; 597 struct nfs_client *clp; 598 bool interrupted = false; 599 int ret = 1; 600 601 if (slot == NULL) 602 goto out_noaction; 603 /* don't increment the sequence number if the task wasn't sent */ 604 if (!RPC_WAS_SENT(task)) 605 goto out; 606 607 session = slot->table->session; 608 609 if (slot->interrupted) { 610 slot->interrupted = 0; 611 interrupted = true; 612 } 613 614 trace_nfs4_sequence_done(session, res); 615 /* Check the SEQUENCE operation status */ 616 switch (res->sr_status) { 617 case 0: 618 /* Update the slot's sequence and clientid lease timer */ 619 ++slot->seq_nr; 620 clp = session->clp; 621 do_renew_lease(clp, res->sr_timestamp); 622 /* Check sequence flags */ 623 if (res->sr_status_flags != 0) 624 nfs4_schedule_lease_recovery(clp); 625 nfs41_update_target_slotid(slot->table, slot, res); 626 break; 627 case 1: 628 /* 629 * sr_status remains 1 if an RPC level error occurred. 630 * The server may or may not have processed the sequence 631 * operation.. 632 * Mark the slot as having hosted an interrupted RPC call. 633 */ 634 slot->interrupted = 1; 635 goto out; 636 case -NFS4ERR_DELAY: 637 /* The server detected a resend of the RPC call and 638 * returned NFS4ERR_DELAY as per Section 2.10.6.2 639 * of RFC5661. 640 */ 641 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 642 __func__, 643 slot->slot_nr, 644 slot->seq_nr); 645 goto out_retry; 646 case -NFS4ERR_BADSLOT: 647 /* 648 * The slot id we used was probably retired. Try again 649 * using a different slot id. 650 */ 651 goto retry_nowait; 652 case -NFS4ERR_SEQ_MISORDERED: 653 /* 654 * Was the last operation on this sequence interrupted? 655 * If so, retry after bumping the sequence number. 656 */ 657 if (interrupted) { 658 ++slot->seq_nr; 659 goto retry_nowait; 660 } 661 /* 662 * Could this slot have been previously retired? 663 * If so, then the server may be expecting seq_nr = 1! 664 */ 665 if (slot->seq_nr != 1) { 666 slot->seq_nr = 1; 667 goto retry_nowait; 668 } 669 break; 670 case -NFS4ERR_SEQ_FALSE_RETRY: 671 ++slot->seq_nr; 672 goto retry_nowait; 673 default: 674 /* Just update the slot sequence no. */ 675 ++slot->seq_nr; 676 } 677 out: 678 /* The session may be reset by one of the error handlers. */ 679 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 680 nfs41_sequence_free_slot(res); 681 out_noaction: 682 return ret; 683 retry_nowait: 684 if (rpc_restart_call_prepare(task)) { 685 task->tk_status = 0; 686 ret = 0; 687 } 688 goto out; 689 out_retry: 690 if (!rpc_restart_call(task)) 691 goto out; 692 rpc_delay(task, NFS4_POLL_RETRY_MAX); 693 return 0; 694 } 695 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 696 697 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 698 { 699 if (res->sr_slot == NULL) 700 return 1; 701 if (!res->sr_slot->table->session) 702 return nfs40_sequence_done(task, res); 703 return nfs41_sequence_done(task, res); 704 } 705 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 706 707 int nfs41_setup_sequence(struct nfs4_session *session, 708 struct nfs4_sequence_args *args, 709 struct nfs4_sequence_res *res, 710 struct rpc_task *task) 711 { 712 struct nfs4_slot *slot; 713 struct nfs4_slot_table *tbl; 714 715 dprintk("--> %s\n", __func__); 716 /* slot already allocated? */ 717 if (res->sr_slot != NULL) 718 goto out_success; 719 720 tbl = &session->fc_slot_table; 721 722 task->tk_timeout = 0; 723 724 spin_lock(&tbl->slot_tbl_lock); 725 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) && 726 !args->sa_privileged) { 727 /* The state manager will wait until the slot table is empty */ 728 dprintk("%s session is draining\n", __func__); 729 goto out_sleep; 730 } 731 732 slot = nfs4_alloc_slot(tbl); 733 if (IS_ERR(slot)) { 734 /* If out of memory, try again in 1/4 second */ 735 if (slot == ERR_PTR(-ENOMEM)) 736 task->tk_timeout = HZ >> 2; 737 dprintk("<-- %s: no free slots\n", __func__); 738 goto out_sleep; 739 } 740 spin_unlock(&tbl->slot_tbl_lock); 741 742 args->sa_slot = slot; 743 744 dprintk("<-- %s slotid=%u seqid=%u\n", __func__, 745 slot->slot_nr, slot->seq_nr); 746 747 res->sr_slot = slot; 748 res->sr_timestamp = jiffies; 749 res->sr_status_flags = 0; 750 /* 751 * sr_status is only set in decode_sequence, and so will remain 752 * set to 1 if an rpc level failure occurs. 753 */ 754 res->sr_status = 1; 755 trace_nfs4_setup_sequence(session, args); 756 out_success: 757 rpc_call_start(task); 758 return 0; 759 out_sleep: 760 /* Privileged tasks are queued with top priority */ 761 if (args->sa_privileged) 762 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 763 NULL, RPC_PRIORITY_PRIVILEGED); 764 else 765 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 766 spin_unlock(&tbl->slot_tbl_lock); 767 return -EAGAIN; 768 } 769 EXPORT_SYMBOL_GPL(nfs41_setup_sequence); 770 771 static int nfs4_setup_sequence(const struct nfs_server *server, 772 struct nfs4_sequence_args *args, 773 struct nfs4_sequence_res *res, 774 struct rpc_task *task) 775 { 776 struct nfs4_session *session = nfs4_get_session(server); 777 int ret = 0; 778 779 if (!session) 780 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 781 args, res, task); 782 783 dprintk("--> %s clp %p session %p sr_slot %u\n", 784 __func__, session->clp, session, res->sr_slot ? 785 res->sr_slot->slot_nr : NFS4_NO_SLOT); 786 787 ret = nfs41_setup_sequence(session, args, res, task); 788 789 dprintk("<-- %s status=%d\n", __func__, ret); 790 return ret; 791 } 792 793 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 794 { 795 struct nfs4_call_sync_data *data = calldata; 796 struct nfs4_session *session = nfs4_get_session(data->seq_server); 797 798 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 799 800 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task); 801 } 802 803 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 804 { 805 struct nfs4_call_sync_data *data = calldata; 806 807 nfs41_sequence_done(task, data->seq_res); 808 } 809 810 static const struct rpc_call_ops nfs41_call_sync_ops = { 811 .rpc_call_prepare = nfs41_call_sync_prepare, 812 .rpc_call_done = nfs41_call_sync_done, 813 }; 814 815 #else /* !CONFIG_NFS_V4_1 */ 816 817 static int nfs4_setup_sequence(const struct nfs_server *server, 818 struct nfs4_sequence_args *args, 819 struct nfs4_sequence_res *res, 820 struct rpc_task *task) 821 { 822 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 823 args, res, task); 824 } 825 826 int nfs4_sequence_done(struct rpc_task *task, 827 struct nfs4_sequence_res *res) 828 { 829 return nfs40_sequence_done(task, res); 830 } 831 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 832 833 #endif /* !CONFIG_NFS_V4_1 */ 834 835 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 836 { 837 struct nfs4_call_sync_data *data = calldata; 838 nfs4_setup_sequence(data->seq_server, 839 data->seq_args, data->seq_res, task); 840 } 841 842 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 843 { 844 struct nfs4_call_sync_data *data = calldata; 845 nfs4_sequence_done(task, data->seq_res); 846 } 847 848 static const struct rpc_call_ops nfs40_call_sync_ops = { 849 .rpc_call_prepare = nfs40_call_sync_prepare, 850 .rpc_call_done = nfs40_call_sync_done, 851 }; 852 853 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 854 struct nfs_server *server, 855 struct rpc_message *msg, 856 struct nfs4_sequence_args *args, 857 struct nfs4_sequence_res *res) 858 { 859 int ret; 860 struct rpc_task *task; 861 struct nfs_client *clp = server->nfs_client; 862 struct nfs4_call_sync_data data = { 863 .seq_server = server, 864 .seq_args = args, 865 .seq_res = res, 866 }; 867 struct rpc_task_setup task_setup = { 868 .rpc_client = clnt, 869 .rpc_message = msg, 870 .callback_ops = clp->cl_mvops->call_sync_ops, 871 .callback_data = &data 872 }; 873 874 task = rpc_run_task(&task_setup); 875 if (IS_ERR(task)) 876 ret = PTR_ERR(task); 877 else { 878 ret = task->tk_status; 879 rpc_put_task(task); 880 } 881 return ret; 882 } 883 884 int nfs4_call_sync(struct rpc_clnt *clnt, 885 struct nfs_server *server, 886 struct rpc_message *msg, 887 struct nfs4_sequence_args *args, 888 struct nfs4_sequence_res *res, 889 int cache_reply) 890 { 891 nfs4_init_sequence(args, res, cache_reply); 892 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 893 } 894 895 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 896 { 897 struct nfs_inode *nfsi = NFS_I(dir); 898 899 spin_lock(&dir->i_lock); 900 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 901 if (!cinfo->atomic || cinfo->before != dir->i_version) 902 nfs_force_lookup_revalidate(dir); 903 dir->i_version = cinfo->after; 904 nfs_fscache_invalidate(dir); 905 spin_unlock(&dir->i_lock); 906 } 907 908 struct nfs4_opendata { 909 struct kref kref; 910 struct nfs_openargs o_arg; 911 struct nfs_openres o_res; 912 struct nfs_open_confirmargs c_arg; 913 struct nfs_open_confirmres c_res; 914 struct nfs4_string owner_name; 915 struct nfs4_string group_name; 916 struct nfs_fattr f_attr; 917 struct nfs4_label *f_label; 918 struct dentry *dir; 919 struct dentry *dentry; 920 struct nfs4_state_owner *owner; 921 struct nfs4_state *state; 922 struct iattr attrs; 923 unsigned long timestamp; 924 unsigned int rpc_done : 1; 925 unsigned int file_created : 1; 926 unsigned int is_recover : 1; 927 int rpc_status; 928 int cancelled; 929 }; 930 931 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 932 int err, struct nfs4_exception *exception) 933 { 934 if (err != -EINVAL) 935 return false; 936 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 937 return false; 938 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 939 exception->retry = 1; 940 return true; 941 } 942 943 static u32 944 nfs4_map_atomic_open_share(struct nfs_server *server, 945 fmode_t fmode, int openflags) 946 { 947 u32 res = 0; 948 949 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 950 case FMODE_READ: 951 res = NFS4_SHARE_ACCESS_READ; 952 break; 953 case FMODE_WRITE: 954 res = NFS4_SHARE_ACCESS_WRITE; 955 break; 956 case FMODE_READ|FMODE_WRITE: 957 res = NFS4_SHARE_ACCESS_BOTH; 958 } 959 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 960 goto out; 961 /* Want no delegation if we're using O_DIRECT */ 962 if (openflags & O_DIRECT) 963 res |= NFS4_SHARE_WANT_NO_DELEG; 964 out: 965 return res; 966 } 967 968 static enum open_claim_type4 969 nfs4_map_atomic_open_claim(struct nfs_server *server, 970 enum open_claim_type4 claim) 971 { 972 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 973 return claim; 974 switch (claim) { 975 default: 976 return claim; 977 case NFS4_OPEN_CLAIM_FH: 978 return NFS4_OPEN_CLAIM_NULL; 979 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 980 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 981 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 982 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 983 } 984 } 985 986 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 987 { 988 p->o_res.f_attr = &p->f_attr; 989 p->o_res.f_label = p->f_label; 990 p->o_res.seqid = p->o_arg.seqid; 991 p->c_res.seqid = p->c_arg.seqid; 992 p->o_res.server = p->o_arg.server; 993 p->o_res.access_request = p->o_arg.access; 994 nfs_fattr_init(&p->f_attr); 995 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 996 } 997 998 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 999 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1000 const struct iattr *attrs, 1001 struct nfs4_label *label, 1002 enum open_claim_type4 claim, 1003 gfp_t gfp_mask) 1004 { 1005 struct dentry *parent = dget_parent(dentry); 1006 struct inode *dir = parent->d_inode; 1007 struct nfs_server *server = NFS_SERVER(dir); 1008 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1009 struct nfs4_opendata *p; 1010 1011 p = kzalloc(sizeof(*p), gfp_mask); 1012 if (p == NULL) 1013 goto err; 1014 1015 p->f_label = nfs4_label_alloc(server, gfp_mask); 1016 if (IS_ERR(p->f_label)) 1017 goto err_free_p; 1018 1019 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1020 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1021 if (IS_ERR(p->o_arg.seqid)) 1022 goto err_free_label; 1023 nfs_sb_active(dentry->d_sb); 1024 p->dentry = dget(dentry); 1025 p->dir = parent; 1026 p->owner = sp; 1027 atomic_inc(&sp->so_count); 1028 p->o_arg.open_flags = flags; 1029 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1030 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1031 fmode, flags); 1032 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS 1033 * will return permission denied for all bits until close */ 1034 if (!(flags & O_EXCL)) { 1035 /* ask server to check for all possible rights as results 1036 * are cached */ 1037 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1038 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE; 1039 } 1040 p->o_arg.clientid = server->nfs_client->cl_clientid; 1041 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1042 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1043 p->o_arg.name = &dentry->d_name; 1044 p->o_arg.server = server; 1045 p->o_arg.bitmask = nfs4_bitmask(server, label); 1046 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1047 p->o_arg.label = label; 1048 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1049 switch (p->o_arg.claim) { 1050 case NFS4_OPEN_CLAIM_NULL: 1051 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1052 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1053 p->o_arg.fh = NFS_FH(dir); 1054 break; 1055 case NFS4_OPEN_CLAIM_PREVIOUS: 1056 case NFS4_OPEN_CLAIM_FH: 1057 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1058 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1059 p->o_arg.fh = NFS_FH(dentry->d_inode); 1060 } 1061 if (attrs != NULL && attrs->ia_valid != 0) { 1062 __u32 verf[2]; 1063 1064 p->o_arg.u.attrs = &p->attrs; 1065 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 1066 1067 verf[0] = jiffies; 1068 verf[1] = current->pid; 1069 memcpy(p->o_arg.u.verifier.data, verf, 1070 sizeof(p->o_arg.u.verifier.data)); 1071 } 1072 p->c_arg.fh = &p->o_res.fh; 1073 p->c_arg.stateid = &p->o_res.stateid; 1074 p->c_arg.seqid = p->o_arg.seqid; 1075 nfs4_init_opendata_res(p); 1076 kref_init(&p->kref); 1077 return p; 1078 1079 err_free_label: 1080 nfs4_label_free(p->f_label); 1081 err_free_p: 1082 kfree(p); 1083 err: 1084 dput(parent); 1085 return NULL; 1086 } 1087 1088 static void nfs4_opendata_free(struct kref *kref) 1089 { 1090 struct nfs4_opendata *p = container_of(kref, 1091 struct nfs4_opendata, kref); 1092 struct super_block *sb = p->dentry->d_sb; 1093 1094 nfs_free_seqid(p->o_arg.seqid); 1095 if (p->state != NULL) 1096 nfs4_put_open_state(p->state); 1097 nfs4_put_state_owner(p->owner); 1098 1099 nfs4_label_free(p->f_label); 1100 1101 dput(p->dir); 1102 dput(p->dentry); 1103 nfs_sb_deactive(sb); 1104 nfs_fattr_free_names(&p->f_attr); 1105 kfree(p->f_attr.mdsthreshold); 1106 kfree(p); 1107 } 1108 1109 static void nfs4_opendata_put(struct nfs4_opendata *p) 1110 { 1111 if (p != NULL) 1112 kref_put(&p->kref, nfs4_opendata_free); 1113 } 1114 1115 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) 1116 { 1117 int ret; 1118 1119 ret = rpc_wait_for_completion_task(task); 1120 return ret; 1121 } 1122 1123 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 1124 { 1125 int ret = 0; 1126 1127 if (open_mode & (O_EXCL|O_TRUNC)) 1128 goto out; 1129 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1130 case FMODE_READ: 1131 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1132 && state->n_rdonly != 0; 1133 break; 1134 case FMODE_WRITE: 1135 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1136 && state->n_wronly != 0; 1137 break; 1138 case FMODE_READ|FMODE_WRITE: 1139 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1140 && state->n_rdwr != 0; 1141 } 1142 out: 1143 return ret; 1144 } 1145 1146 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) 1147 { 1148 if (delegation == NULL) 1149 return 0; 1150 if ((delegation->type & fmode) != fmode) 1151 return 0; 1152 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 1153 return 0; 1154 nfs_mark_delegation_referenced(delegation); 1155 return 1; 1156 } 1157 1158 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1159 { 1160 switch (fmode) { 1161 case FMODE_WRITE: 1162 state->n_wronly++; 1163 break; 1164 case FMODE_READ: 1165 state->n_rdonly++; 1166 break; 1167 case FMODE_READ|FMODE_WRITE: 1168 state->n_rdwr++; 1169 } 1170 nfs4_state_set_mode_locked(state, state->state | fmode); 1171 } 1172 1173 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1174 { 1175 struct nfs_client *clp = state->owner->so_server->nfs_client; 1176 bool need_recover = false; 1177 1178 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1179 need_recover = true; 1180 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1181 need_recover = true; 1182 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1183 need_recover = true; 1184 if (need_recover) 1185 nfs4_state_mark_reclaim_nograce(clp, state); 1186 } 1187 1188 static bool nfs_need_update_open_stateid(struct nfs4_state *state, 1189 nfs4_stateid *stateid) 1190 { 1191 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0) 1192 return true; 1193 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1194 nfs_test_and_clear_all_open_stateid(state); 1195 return true; 1196 } 1197 if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) 1198 return true; 1199 return false; 1200 } 1201 1202 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1203 { 1204 if (state->n_wronly) 1205 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1206 if (state->n_rdonly) 1207 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1208 if (state->n_rdwr) 1209 set_bit(NFS_O_RDWR_STATE, &state->flags); 1210 } 1211 1212 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1213 nfs4_stateid *stateid, fmode_t fmode) 1214 { 1215 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1216 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1217 case FMODE_WRITE: 1218 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1219 break; 1220 case FMODE_READ: 1221 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1222 break; 1223 case 0: 1224 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1225 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1226 clear_bit(NFS_OPEN_STATE, &state->flags); 1227 } 1228 if (stateid == NULL) 1229 return; 1230 /* Handle races with OPEN */ 1231 if (!nfs4_stateid_match_other(stateid, &state->open_stateid) || 1232 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { 1233 nfs_resync_open_stateid_locked(state); 1234 return; 1235 } 1236 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1237 nfs4_stateid_copy(&state->stateid, stateid); 1238 nfs4_stateid_copy(&state->open_stateid, stateid); 1239 } 1240 1241 static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 1242 { 1243 write_seqlock(&state->seqlock); 1244 nfs_clear_open_stateid_locked(state, stateid, fmode); 1245 write_sequnlock(&state->seqlock); 1246 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1247 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1248 } 1249 1250 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 1251 { 1252 switch (fmode) { 1253 case FMODE_READ: 1254 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1255 break; 1256 case FMODE_WRITE: 1257 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1258 break; 1259 case FMODE_READ|FMODE_WRITE: 1260 set_bit(NFS_O_RDWR_STATE, &state->flags); 1261 } 1262 if (!nfs_need_update_open_stateid(state, stateid)) 1263 return; 1264 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1265 nfs4_stateid_copy(&state->stateid, stateid); 1266 nfs4_stateid_copy(&state->open_stateid, stateid); 1267 } 1268 1269 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) 1270 { 1271 /* 1272 * Protect the call to nfs4_state_set_mode_locked and 1273 * serialise the stateid update 1274 */ 1275 write_seqlock(&state->seqlock); 1276 if (deleg_stateid != NULL) { 1277 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1278 set_bit(NFS_DELEGATED_STATE, &state->flags); 1279 } 1280 if (open_stateid != NULL) 1281 nfs_set_open_stateid_locked(state, open_stateid, fmode); 1282 write_sequnlock(&state->seqlock); 1283 spin_lock(&state->owner->so_lock); 1284 update_open_stateflags(state, fmode); 1285 spin_unlock(&state->owner->so_lock); 1286 } 1287 1288 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) 1289 { 1290 struct nfs_inode *nfsi = NFS_I(state->inode); 1291 struct nfs_delegation *deleg_cur; 1292 int ret = 0; 1293 1294 fmode &= (FMODE_READ|FMODE_WRITE); 1295 1296 rcu_read_lock(); 1297 deleg_cur = rcu_dereference(nfsi->delegation); 1298 if (deleg_cur == NULL) 1299 goto no_delegation; 1300 1301 spin_lock(&deleg_cur->lock); 1302 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1303 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1304 (deleg_cur->type & fmode) != fmode) 1305 goto no_delegation_unlock; 1306 1307 if (delegation == NULL) 1308 delegation = &deleg_cur->stateid; 1309 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) 1310 goto no_delegation_unlock; 1311 1312 nfs_mark_delegation_referenced(deleg_cur); 1313 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); 1314 ret = 1; 1315 no_delegation_unlock: 1316 spin_unlock(&deleg_cur->lock); 1317 no_delegation: 1318 rcu_read_unlock(); 1319 1320 if (!ret && open_stateid != NULL) { 1321 __update_open_stateid(state, open_stateid, NULL, fmode); 1322 ret = 1; 1323 } 1324 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1325 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1326 1327 return ret; 1328 } 1329 1330 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1331 const nfs4_stateid *stateid) 1332 { 1333 struct nfs4_state *state = lsp->ls_state; 1334 bool ret = false; 1335 1336 spin_lock(&state->state_lock); 1337 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1338 goto out_noupdate; 1339 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1340 goto out_noupdate; 1341 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1342 ret = true; 1343 out_noupdate: 1344 spin_unlock(&state->state_lock); 1345 return ret; 1346 } 1347 1348 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1349 { 1350 struct nfs_delegation *delegation; 1351 1352 rcu_read_lock(); 1353 delegation = rcu_dereference(NFS_I(inode)->delegation); 1354 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1355 rcu_read_unlock(); 1356 return; 1357 } 1358 rcu_read_unlock(); 1359 nfs4_inode_return_delegation(inode); 1360 } 1361 1362 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1363 { 1364 struct nfs4_state *state = opendata->state; 1365 struct nfs_inode *nfsi = NFS_I(state->inode); 1366 struct nfs_delegation *delegation; 1367 int open_mode = opendata->o_arg.open_flags; 1368 fmode_t fmode = opendata->o_arg.fmode; 1369 nfs4_stateid stateid; 1370 int ret = -EAGAIN; 1371 1372 for (;;) { 1373 spin_lock(&state->owner->so_lock); 1374 if (can_open_cached(state, fmode, open_mode)) { 1375 update_open_stateflags(state, fmode); 1376 spin_unlock(&state->owner->so_lock); 1377 goto out_return_state; 1378 } 1379 spin_unlock(&state->owner->so_lock); 1380 rcu_read_lock(); 1381 delegation = rcu_dereference(nfsi->delegation); 1382 if (!can_open_delegated(delegation, fmode)) { 1383 rcu_read_unlock(); 1384 break; 1385 } 1386 /* Save the delegation */ 1387 nfs4_stateid_copy(&stateid, &delegation->stateid); 1388 rcu_read_unlock(); 1389 nfs_release_seqid(opendata->o_arg.seqid); 1390 if (!opendata->is_recover) { 1391 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1392 if (ret != 0) 1393 goto out; 1394 } 1395 ret = -EAGAIN; 1396 1397 /* Try to update the stateid using the delegation */ 1398 if (update_open_stateid(state, NULL, &stateid, fmode)) 1399 goto out_return_state; 1400 } 1401 out: 1402 return ERR_PTR(ret); 1403 out_return_state: 1404 atomic_inc(&state->count); 1405 return state; 1406 } 1407 1408 static void 1409 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) 1410 { 1411 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client; 1412 struct nfs_delegation *delegation; 1413 int delegation_flags = 0; 1414 1415 rcu_read_lock(); 1416 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1417 if (delegation) 1418 delegation_flags = delegation->flags; 1419 rcu_read_unlock(); 1420 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { 1421 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1422 "returning a delegation for " 1423 "OPEN(CLAIM_DELEGATE_CUR)\n", 1424 clp->cl_hostname); 1425 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1426 nfs_inode_set_delegation(state->inode, 1427 data->owner->so_cred, 1428 &data->o_res); 1429 else 1430 nfs_inode_reclaim_delegation(state->inode, 1431 data->owner->so_cred, 1432 &data->o_res); 1433 } 1434 1435 /* 1436 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 1437 * and update the nfs4_state. 1438 */ 1439 static struct nfs4_state * 1440 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 1441 { 1442 struct inode *inode = data->state->inode; 1443 struct nfs4_state *state = data->state; 1444 int ret; 1445 1446 if (!data->rpc_done) { 1447 if (data->rpc_status) { 1448 ret = data->rpc_status; 1449 goto err; 1450 } 1451 /* cached opens have already been processed */ 1452 goto update; 1453 } 1454 1455 ret = nfs_refresh_inode(inode, &data->f_attr); 1456 if (ret) 1457 goto err; 1458 1459 if (data->o_res.delegation_type != 0) 1460 nfs4_opendata_check_deleg(data, state); 1461 update: 1462 update_open_stateid(state, &data->o_res.stateid, NULL, 1463 data->o_arg.fmode); 1464 atomic_inc(&state->count); 1465 1466 return state; 1467 err: 1468 return ERR_PTR(ret); 1469 1470 } 1471 1472 static struct nfs4_state * 1473 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1474 { 1475 struct inode *inode; 1476 struct nfs4_state *state = NULL; 1477 int ret; 1478 1479 if (!data->rpc_done) { 1480 state = nfs4_try_open_cached(data); 1481 goto out; 1482 } 1483 1484 ret = -EAGAIN; 1485 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1486 goto err; 1487 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label); 1488 ret = PTR_ERR(inode); 1489 if (IS_ERR(inode)) 1490 goto err; 1491 ret = -ENOMEM; 1492 state = nfs4_get_open_state(inode, data->owner); 1493 if (state == NULL) 1494 goto err_put_inode; 1495 if (data->o_res.delegation_type != 0) 1496 nfs4_opendata_check_deleg(data, state); 1497 update_open_stateid(state, &data->o_res.stateid, NULL, 1498 data->o_arg.fmode); 1499 iput(inode); 1500 out: 1501 nfs_release_seqid(data->o_arg.seqid); 1502 return state; 1503 err_put_inode: 1504 iput(inode); 1505 err: 1506 return ERR_PTR(ret); 1507 } 1508 1509 static struct nfs4_state * 1510 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1511 { 1512 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 1513 return _nfs4_opendata_reclaim_to_nfs4_state(data); 1514 return _nfs4_opendata_to_nfs4_state(data); 1515 } 1516 1517 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) 1518 { 1519 struct nfs_inode *nfsi = NFS_I(state->inode); 1520 struct nfs_open_context *ctx; 1521 1522 spin_lock(&state->inode->i_lock); 1523 list_for_each_entry(ctx, &nfsi->open_files, list) { 1524 if (ctx->state != state) 1525 continue; 1526 get_nfs_open_context(ctx); 1527 spin_unlock(&state->inode->i_lock); 1528 return ctx; 1529 } 1530 spin_unlock(&state->inode->i_lock); 1531 return ERR_PTR(-ENOENT); 1532 } 1533 1534 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 1535 struct nfs4_state *state, enum open_claim_type4 claim) 1536 { 1537 struct nfs4_opendata *opendata; 1538 1539 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 1540 NULL, NULL, claim, GFP_NOFS); 1541 if (opendata == NULL) 1542 return ERR_PTR(-ENOMEM); 1543 opendata->state = state; 1544 atomic_inc(&state->count); 1545 return opendata; 1546 } 1547 1548 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) 1549 { 1550 struct nfs4_state *newstate; 1551 int ret; 1552 1553 opendata->o_arg.open_flags = 0; 1554 opendata->o_arg.fmode = fmode; 1555 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 1556 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 1557 nfs4_init_opendata_res(opendata); 1558 ret = _nfs4_recover_proc_open(opendata); 1559 if (ret != 0) 1560 return ret; 1561 newstate = nfs4_opendata_to_nfs4_state(opendata); 1562 if (IS_ERR(newstate)) 1563 return PTR_ERR(newstate); 1564 nfs4_close_state(newstate, fmode); 1565 *res = newstate; 1566 return 0; 1567 } 1568 1569 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1570 { 1571 struct nfs4_state *newstate; 1572 int ret; 1573 1574 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ 1575 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1576 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1577 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1578 /* memory barrier prior to reading state->n_* */ 1579 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1580 clear_bit(NFS_OPEN_STATE, &state->flags); 1581 smp_rmb(); 1582 if (state->n_rdwr != 0) { 1583 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); 1584 if (ret != 0) 1585 return ret; 1586 if (newstate != state) 1587 return -ESTALE; 1588 } 1589 if (state->n_wronly != 0) { 1590 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); 1591 if (ret != 0) 1592 return ret; 1593 if (newstate != state) 1594 return -ESTALE; 1595 } 1596 if (state->n_rdonly != 0) { 1597 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); 1598 if (ret != 0) 1599 return ret; 1600 if (newstate != state) 1601 return -ESTALE; 1602 } 1603 /* 1604 * We may have performed cached opens for all three recoveries. 1605 * Check if we need to update the current stateid. 1606 */ 1607 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 1608 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 1609 write_seqlock(&state->seqlock); 1610 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1611 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1612 write_sequnlock(&state->seqlock); 1613 } 1614 return 0; 1615 } 1616 1617 /* 1618 * OPEN_RECLAIM: 1619 * reclaim state on the server after a reboot. 1620 */ 1621 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1622 { 1623 struct nfs_delegation *delegation; 1624 struct nfs4_opendata *opendata; 1625 fmode_t delegation_type = 0; 1626 int status; 1627 1628 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1629 NFS4_OPEN_CLAIM_PREVIOUS); 1630 if (IS_ERR(opendata)) 1631 return PTR_ERR(opendata); 1632 rcu_read_lock(); 1633 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1634 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) 1635 delegation_type = delegation->type; 1636 rcu_read_unlock(); 1637 opendata->o_arg.u.delegation_type = delegation_type; 1638 status = nfs4_open_recover(opendata, state); 1639 nfs4_opendata_put(opendata); 1640 return status; 1641 } 1642 1643 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1644 { 1645 struct nfs_server *server = NFS_SERVER(state->inode); 1646 struct nfs4_exception exception = { }; 1647 int err; 1648 do { 1649 err = _nfs4_do_open_reclaim(ctx, state); 1650 trace_nfs4_open_reclaim(ctx, 0, err); 1651 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 1652 continue; 1653 if (err != -NFS4ERR_DELAY) 1654 break; 1655 nfs4_handle_exception(server, err, &exception); 1656 } while (exception.retry); 1657 return err; 1658 } 1659 1660 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 1661 { 1662 struct nfs_open_context *ctx; 1663 int ret; 1664 1665 ctx = nfs4_state_find_open_context(state); 1666 if (IS_ERR(ctx)) 1667 return -EAGAIN; 1668 ret = nfs4_do_open_reclaim(ctx, state); 1669 put_nfs_open_context(ctx); 1670 return ret; 1671 } 1672 1673 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err) 1674 { 1675 switch (err) { 1676 default: 1677 printk(KERN_ERR "NFS: %s: unhandled error " 1678 "%d.\n", __func__, err); 1679 case 0: 1680 case -ENOENT: 1681 case -ESTALE: 1682 break; 1683 case -NFS4ERR_BADSESSION: 1684 case -NFS4ERR_BADSLOT: 1685 case -NFS4ERR_BAD_HIGH_SLOT: 1686 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1687 case -NFS4ERR_DEADSESSION: 1688 set_bit(NFS_DELEGATED_STATE, &state->flags); 1689 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 1690 return -EAGAIN; 1691 case -NFS4ERR_STALE_CLIENTID: 1692 case -NFS4ERR_STALE_STATEID: 1693 set_bit(NFS_DELEGATED_STATE, &state->flags); 1694 case -NFS4ERR_EXPIRED: 1695 /* Don't recall a delegation if it was lost */ 1696 nfs4_schedule_lease_recovery(server->nfs_client); 1697 return -EAGAIN; 1698 case -NFS4ERR_MOVED: 1699 nfs4_schedule_migration_recovery(server); 1700 return -EAGAIN; 1701 case -NFS4ERR_LEASE_MOVED: 1702 nfs4_schedule_lease_moved_recovery(server->nfs_client); 1703 return -EAGAIN; 1704 case -NFS4ERR_DELEG_REVOKED: 1705 case -NFS4ERR_ADMIN_REVOKED: 1706 case -NFS4ERR_BAD_STATEID: 1707 case -NFS4ERR_OPENMODE: 1708 nfs_inode_find_state_and_recover(state->inode, 1709 stateid); 1710 nfs4_schedule_stateid_recovery(server, state); 1711 return -EAGAIN; 1712 case -NFS4ERR_DELAY: 1713 case -NFS4ERR_GRACE: 1714 set_bit(NFS_DELEGATED_STATE, &state->flags); 1715 ssleep(1); 1716 return -EAGAIN; 1717 case -ENOMEM: 1718 case -NFS4ERR_DENIED: 1719 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1720 return 0; 1721 } 1722 return err; 1723 } 1724 1725 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1726 { 1727 struct nfs_server *server = NFS_SERVER(state->inode); 1728 struct nfs4_opendata *opendata; 1729 int err; 1730 1731 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1732 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 1733 if (IS_ERR(opendata)) 1734 return PTR_ERR(opendata); 1735 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1736 err = nfs4_open_recover(opendata, state); 1737 nfs4_opendata_put(opendata); 1738 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 1739 } 1740 1741 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 1742 { 1743 struct nfs4_opendata *data = calldata; 1744 1745 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl, 1746 &data->c_arg.seq_args, &data->c_res.seq_res, task); 1747 } 1748 1749 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 1750 { 1751 struct nfs4_opendata *data = calldata; 1752 1753 nfs40_sequence_done(task, &data->c_res.seq_res); 1754 1755 data->rpc_status = task->tk_status; 1756 if (data->rpc_status == 0) { 1757 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 1758 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1759 renew_lease(data->o_res.server, data->timestamp); 1760 data->rpc_done = 1; 1761 } 1762 } 1763 1764 static void nfs4_open_confirm_release(void *calldata) 1765 { 1766 struct nfs4_opendata *data = calldata; 1767 struct nfs4_state *state = NULL; 1768 1769 /* If this request hasn't been cancelled, do nothing */ 1770 if (data->cancelled == 0) 1771 goto out_free; 1772 /* In case of error, no cleanup! */ 1773 if (!data->rpc_done) 1774 goto out_free; 1775 state = nfs4_opendata_to_nfs4_state(data); 1776 if (!IS_ERR(state)) 1777 nfs4_close_state(state, data->o_arg.fmode); 1778 out_free: 1779 nfs4_opendata_put(data); 1780 } 1781 1782 static const struct rpc_call_ops nfs4_open_confirm_ops = { 1783 .rpc_call_prepare = nfs4_open_confirm_prepare, 1784 .rpc_call_done = nfs4_open_confirm_done, 1785 .rpc_release = nfs4_open_confirm_release, 1786 }; 1787 1788 /* 1789 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 1790 */ 1791 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 1792 { 1793 struct nfs_server *server = NFS_SERVER(data->dir->d_inode); 1794 struct rpc_task *task; 1795 struct rpc_message msg = { 1796 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 1797 .rpc_argp = &data->c_arg, 1798 .rpc_resp = &data->c_res, 1799 .rpc_cred = data->owner->so_cred, 1800 }; 1801 struct rpc_task_setup task_setup_data = { 1802 .rpc_client = server->client, 1803 .rpc_message = &msg, 1804 .callback_ops = &nfs4_open_confirm_ops, 1805 .callback_data = data, 1806 .workqueue = nfsiod_workqueue, 1807 .flags = RPC_TASK_ASYNC, 1808 }; 1809 int status; 1810 1811 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1); 1812 kref_get(&data->kref); 1813 data->rpc_done = 0; 1814 data->rpc_status = 0; 1815 data->timestamp = jiffies; 1816 task = rpc_run_task(&task_setup_data); 1817 if (IS_ERR(task)) 1818 return PTR_ERR(task); 1819 status = nfs4_wait_for_completion_rpc_task(task); 1820 if (status != 0) { 1821 data->cancelled = 1; 1822 smp_wmb(); 1823 } else 1824 status = data->rpc_status; 1825 rpc_put_task(task); 1826 return status; 1827 } 1828 1829 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 1830 { 1831 struct nfs4_opendata *data = calldata; 1832 struct nfs4_state_owner *sp = data->owner; 1833 struct nfs_client *clp = sp->so_server->nfs_client; 1834 1835 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 1836 goto out_wait; 1837 /* 1838 * Check if we still need to send an OPEN call, or if we can use 1839 * a delegation instead. 1840 */ 1841 if (data->state != NULL) { 1842 struct nfs_delegation *delegation; 1843 1844 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) 1845 goto out_no_action; 1846 rcu_read_lock(); 1847 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 1848 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR && 1849 data->o_arg.claim != NFS4_OPEN_CLAIM_DELEG_CUR_FH && 1850 can_open_delegated(delegation, data->o_arg.fmode)) 1851 goto unlock_no_action; 1852 rcu_read_unlock(); 1853 } 1854 /* Update client id. */ 1855 data->o_arg.clientid = clp->cl_clientid; 1856 switch (data->o_arg.claim) { 1857 case NFS4_OPEN_CLAIM_PREVIOUS: 1858 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1859 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1860 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 1861 case NFS4_OPEN_CLAIM_FH: 1862 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 1863 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 1864 } 1865 data->timestamp = jiffies; 1866 if (nfs4_setup_sequence(data->o_arg.server, 1867 &data->o_arg.seq_args, 1868 &data->o_res.seq_res, 1869 task) != 0) 1870 nfs_release_seqid(data->o_arg.seqid); 1871 1872 /* Set the create mode (note dependency on the session type) */ 1873 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 1874 if (data->o_arg.open_flags & O_EXCL) { 1875 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 1876 if (nfs4_has_persistent_session(clp)) 1877 data->o_arg.createmode = NFS4_CREATE_GUARDED; 1878 else if (clp->cl_mvops->minor_version > 0) 1879 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 1880 } 1881 return; 1882 unlock_no_action: 1883 rcu_read_unlock(); 1884 out_no_action: 1885 task->tk_action = NULL; 1886 out_wait: 1887 nfs4_sequence_done(task, &data->o_res.seq_res); 1888 } 1889 1890 static void nfs4_open_done(struct rpc_task *task, void *calldata) 1891 { 1892 struct nfs4_opendata *data = calldata; 1893 1894 data->rpc_status = task->tk_status; 1895 1896 if (!nfs4_sequence_done(task, &data->o_res.seq_res)) 1897 return; 1898 1899 if (task->tk_status == 0) { 1900 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 1901 switch (data->o_res.f_attr->mode & S_IFMT) { 1902 case S_IFREG: 1903 break; 1904 case S_IFLNK: 1905 data->rpc_status = -ELOOP; 1906 break; 1907 case S_IFDIR: 1908 data->rpc_status = -EISDIR; 1909 break; 1910 default: 1911 data->rpc_status = -ENOTDIR; 1912 } 1913 } 1914 renew_lease(data->o_res.server, data->timestamp); 1915 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 1916 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1917 } 1918 data->rpc_done = 1; 1919 } 1920 1921 static void nfs4_open_release(void *calldata) 1922 { 1923 struct nfs4_opendata *data = calldata; 1924 struct nfs4_state *state = NULL; 1925 1926 /* If this request hasn't been cancelled, do nothing */ 1927 if (data->cancelled == 0) 1928 goto out_free; 1929 /* In case of error, no cleanup! */ 1930 if (data->rpc_status != 0 || !data->rpc_done) 1931 goto out_free; 1932 /* In case we need an open_confirm, no cleanup! */ 1933 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 1934 goto out_free; 1935 state = nfs4_opendata_to_nfs4_state(data); 1936 if (!IS_ERR(state)) 1937 nfs4_close_state(state, data->o_arg.fmode); 1938 out_free: 1939 nfs4_opendata_put(data); 1940 } 1941 1942 static const struct rpc_call_ops nfs4_open_ops = { 1943 .rpc_call_prepare = nfs4_open_prepare, 1944 .rpc_call_done = nfs4_open_done, 1945 .rpc_release = nfs4_open_release, 1946 }; 1947 1948 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) 1949 { 1950 struct inode *dir = data->dir->d_inode; 1951 struct nfs_server *server = NFS_SERVER(dir); 1952 struct nfs_openargs *o_arg = &data->o_arg; 1953 struct nfs_openres *o_res = &data->o_res; 1954 struct rpc_task *task; 1955 struct rpc_message msg = { 1956 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 1957 .rpc_argp = o_arg, 1958 .rpc_resp = o_res, 1959 .rpc_cred = data->owner->so_cred, 1960 }; 1961 struct rpc_task_setup task_setup_data = { 1962 .rpc_client = server->client, 1963 .rpc_message = &msg, 1964 .callback_ops = &nfs4_open_ops, 1965 .callback_data = data, 1966 .workqueue = nfsiod_workqueue, 1967 .flags = RPC_TASK_ASYNC, 1968 }; 1969 int status; 1970 1971 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); 1972 kref_get(&data->kref); 1973 data->rpc_done = 0; 1974 data->rpc_status = 0; 1975 data->cancelled = 0; 1976 data->is_recover = 0; 1977 if (isrecover) { 1978 nfs4_set_sequence_privileged(&o_arg->seq_args); 1979 data->is_recover = 1; 1980 } 1981 task = rpc_run_task(&task_setup_data); 1982 if (IS_ERR(task)) 1983 return PTR_ERR(task); 1984 status = nfs4_wait_for_completion_rpc_task(task); 1985 if (status != 0) { 1986 data->cancelled = 1; 1987 smp_wmb(); 1988 } else 1989 status = data->rpc_status; 1990 rpc_put_task(task); 1991 1992 return status; 1993 } 1994 1995 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 1996 { 1997 struct inode *dir = data->dir->d_inode; 1998 struct nfs_openres *o_res = &data->o_res; 1999 int status; 2000 2001 status = nfs4_run_open_task(data, 1); 2002 if (status != 0 || !data->rpc_done) 2003 return status; 2004 2005 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2006 2007 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2008 status = _nfs4_proc_open_confirm(data); 2009 if (status != 0) 2010 return status; 2011 } 2012 2013 return status; 2014 } 2015 2016 /* 2017 * Additional permission checks in order to distinguish between an 2018 * open for read, and an open for execute. This works around the 2019 * fact that NFSv4 OPEN treats read and execute permissions as being 2020 * the same. 2021 * Note that in the non-execute case, we want to turn off permission 2022 * checking if we just created a new file (POSIX open() semantics). 2023 */ 2024 static int nfs4_opendata_access(struct rpc_cred *cred, 2025 struct nfs4_opendata *opendata, 2026 struct nfs4_state *state, fmode_t fmode, 2027 int openflags) 2028 { 2029 struct nfs_access_entry cache; 2030 u32 mask; 2031 2032 /* access call failed or for some reason the server doesn't 2033 * support any access modes -- defer access call until later */ 2034 if (opendata->o_res.access_supported == 0) 2035 return 0; 2036 2037 mask = 0; 2038 /* 2039 * Use openflags to check for exec, because fmode won't 2040 * always have FMODE_EXEC set when file open for exec. 2041 */ 2042 if (openflags & __FMODE_EXEC) { 2043 /* ONLY check for exec rights */ 2044 mask = MAY_EXEC; 2045 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2046 mask = MAY_READ; 2047 2048 cache.cred = cred; 2049 cache.jiffies = jiffies; 2050 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2051 nfs_access_add_cache(state->inode, &cache); 2052 2053 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) 2054 return 0; 2055 2056 /* even though OPEN succeeded, access is denied. Close the file */ 2057 nfs4_close_state(state, fmode); 2058 return -EACCES; 2059 } 2060 2061 /* 2062 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2063 */ 2064 static int _nfs4_proc_open(struct nfs4_opendata *data) 2065 { 2066 struct inode *dir = data->dir->d_inode; 2067 struct nfs_server *server = NFS_SERVER(dir); 2068 struct nfs_openargs *o_arg = &data->o_arg; 2069 struct nfs_openres *o_res = &data->o_res; 2070 int status; 2071 2072 status = nfs4_run_open_task(data, 0); 2073 if (!data->rpc_done) 2074 return status; 2075 if (status != 0) { 2076 if (status == -NFS4ERR_BADNAME && 2077 !(o_arg->open_flags & O_CREAT)) 2078 return -ENOENT; 2079 return status; 2080 } 2081 2082 nfs_fattr_map_and_free_names(server, &data->f_attr); 2083 2084 if (o_arg->open_flags & O_CREAT) { 2085 update_changeattr(dir, &o_res->cinfo); 2086 if (o_arg->open_flags & O_EXCL) 2087 data->file_created = 1; 2088 else if (o_res->cinfo.before != o_res->cinfo.after) 2089 data->file_created = 1; 2090 } 2091 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2092 server->caps &= ~NFS_CAP_POSIX_LOCK; 2093 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2094 status = _nfs4_proc_open_confirm(data); 2095 if (status != 0) 2096 return status; 2097 } 2098 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 2099 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label); 2100 return 0; 2101 } 2102 2103 static int nfs4_recover_expired_lease(struct nfs_server *server) 2104 { 2105 return nfs4_client_recover_expired_lease(server->nfs_client); 2106 } 2107 2108 /* 2109 * OPEN_EXPIRED: 2110 * reclaim state on the server after a network partition. 2111 * Assumes caller holds the appropriate lock 2112 */ 2113 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2114 { 2115 struct nfs4_opendata *opendata; 2116 int ret; 2117 2118 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2119 NFS4_OPEN_CLAIM_FH); 2120 if (IS_ERR(opendata)) 2121 return PTR_ERR(opendata); 2122 ret = nfs4_open_recover(opendata, state); 2123 if (ret == -ESTALE) 2124 d_drop(ctx->dentry); 2125 nfs4_opendata_put(opendata); 2126 return ret; 2127 } 2128 2129 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2130 { 2131 struct nfs_server *server = NFS_SERVER(state->inode); 2132 struct nfs4_exception exception = { }; 2133 int err; 2134 2135 do { 2136 err = _nfs4_open_expired(ctx, state); 2137 trace_nfs4_open_expired(ctx, 0, err); 2138 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2139 continue; 2140 switch (err) { 2141 default: 2142 goto out; 2143 case -NFS4ERR_GRACE: 2144 case -NFS4ERR_DELAY: 2145 nfs4_handle_exception(server, err, &exception); 2146 err = 0; 2147 } 2148 } while (exception.retry); 2149 out: 2150 return err; 2151 } 2152 2153 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2154 { 2155 struct nfs_open_context *ctx; 2156 int ret; 2157 2158 ctx = nfs4_state_find_open_context(state); 2159 if (IS_ERR(ctx)) 2160 return -EAGAIN; 2161 ret = nfs4_do_open_expired(ctx, state); 2162 put_nfs_open_context(ctx); 2163 return ret; 2164 } 2165 2166 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state) 2167 { 2168 nfs_remove_bad_delegation(state->inode); 2169 write_seqlock(&state->seqlock); 2170 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2171 write_sequnlock(&state->seqlock); 2172 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2173 } 2174 2175 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2176 { 2177 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2178 nfs_finish_clear_delegation_stateid(state); 2179 } 2180 2181 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2182 { 2183 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2184 nfs40_clear_delegation_stateid(state); 2185 return nfs4_open_expired(sp, state); 2186 } 2187 2188 #if defined(CONFIG_NFS_V4_1) 2189 static void nfs41_check_delegation_stateid(struct nfs4_state *state) 2190 { 2191 struct nfs_server *server = NFS_SERVER(state->inode); 2192 nfs4_stateid stateid; 2193 struct nfs_delegation *delegation; 2194 struct rpc_cred *cred; 2195 int status; 2196 2197 /* Get the delegation credential for use by test/free_stateid */ 2198 rcu_read_lock(); 2199 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2200 if (delegation == NULL) { 2201 rcu_read_unlock(); 2202 return; 2203 } 2204 2205 nfs4_stateid_copy(&stateid, &delegation->stateid); 2206 cred = get_rpccred(delegation->cred); 2207 rcu_read_unlock(); 2208 status = nfs41_test_stateid(server, &stateid, cred); 2209 trace_nfs4_test_delegation_stateid(state, NULL, status); 2210 2211 if (status != NFS_OK) { 2212 /* Free the stateid unless the server explicitly 2213 * informs us the stateid is unrecognized. */ 2214 if (status != -NFS4ERR_BAD_STATEID) 2215 nfs41_free_stateid(server, &stateid, cred); 2216 nfs_finish_clear_delegation_stateid(state); 2217 } 2218 2219 put_rpccred(cred); 2220 } 2221 2222 /** 2223 * nfs41_check_open_stateid - possibly free an open stateid 2224 * 2225 * @state: NFSv4 state for an inode 2226 * 2227 * Returns NFS_OK if recovery for this stateid is now finished. 2228 * Otherwise a negative NFS4ERR value is returned. 2229 */ 2230 static int nfs41_check_open_stateid(struct nfs4_state *state) 2231 { 2232 struct nfs_server *server = NFS_SERVER(state->inode); 2233 nfs4_stateid *stateid = &state->open_stateid; 2234 struct rpc_cred *cred = state->owner->so_cred; 2235 int status; 2236 2237 /* If a state reset has been done, test_stateid is unneeded */ 2238 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) && 2239 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) && 2240 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0)) 2241 return -NFS4ERR_BAD_STATEID; 2242 2243 status = nfs41_test_stateid(server, stateid, cred); 2244 trace_nfs4_test_open_stateid(state, NULL, status); 2245 if (status != NFS_OK) { 2246 /* Free the stateid unless the server explicitly 2247 * informs us the stateid is unrecognized. */ 2248 if (status != -NFS4ERR_BAD_STATEID) 2249 nfs41_free_stateid(server, stateid, cred); 2250 2251 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2252 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2253 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2254 clear_bit(NFS_OPEN_STATE, &state->flags); 2255 } 2256 return status; 2257 } 2258 2259 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2260 { 2261 int status; 2262 2263 nfs41_check_delegation_stateid(state); 2264 status = nfs41_check_open_stateid(state); 2265 if (status != NFS_OK) 2266 status = nfs4_open_expired(sp, state); 2267 return status; 2268 } 2269 #endif 2270 2271 /* 2272 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 2273 * fields corresponding to attributes that were used to store the verifier. 2274 * Make sure we clobber those fields in the later setattr call 2275 */ 2276 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr) 2277 { 2278 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 2279 !(sattr->ia_valid & ATTR_ATIME_SET)) 2280 sattr->ia_valid |= ATTR_ATIME; 2281 2282 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 2283 !(sattr->ia_valid & ATTR_MTIME_SET)) 2284 sattr->ia_valid |= ATTR_MTIME; 2285 } 2286 2287 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 2288 fmode_t fmode, 2289 int flags, 2290 struct nfs_open_context *ctx) 2291 { 2292 struct nfs4_state_owner *sp = opendata->owner; 2293 struct nfs_server *server = sp->so_server; 2294 struct dentry *dentry; 2295 struct nfs4_state *state; 2296 unsigned int seq; 2297 int ret; 2298 2299 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 2300 2301 ret = _nfs4_proc_open(opendata); 2302 if (ret != 0) 2303 goto out; 2304 2305 state = nfs4_opendata_to_nfs4_state(opendata); 2306 ret = PTR_ERR(state); 2307 if (IS_ERR(state)) 2308 goto out; 2309 if (server->caps & NFS_CAP_POSIX_LOCK) 2310 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 2311 2312 dentry = opendata->dentry; 2313 if (dentry->d_inode == NULL) { 2314 /* FIXME: Is this d_drop() ever needed? */ 2315 d_drop(dentry); 2316 dentry = d_add_unique(dentry, igrab(state->inode)); 2317 if (dentry == NULL) { 2318 dentry = opendata->dentry; 2319 } else if (dentry != ctx->dentry) { 2320 dput(ctx->dentry); 2321 ctx->dentry = dget(dentry); 2322 } 2323 nfs_set_verifier(dentry, 2324 nfs_save_change_attribute(opendata->dir->d_inode)); 2325 } 2326 2327 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags); 2328 if (ret != 0) 2329 goto out; 2330 2331 ctx->state = state; 2332 if (dentry->d_inode == state->inode) { 2333 nfs_inode_attach_open_context(ctx); 2334 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) 2335 nfs4_schedule_stateid_recovery(server, state); 2336 } 2337 out: 2338 return ret; 2339 } 2340 2341 /* 2342 * Returns a referenced nfs4_state 2343 */ 2344 static int _nfs4_do_open(struct inode *dir, 2345 struct nfs_open_context *ctx, 2346 int flags, 2347 struct iattr *sattr, 2348 struct nfs4_label *label, 2349 int *opened) 2350 { 2351 struct nfs4_state_owner *sp; 2352 struct nfs4_state *state = NULL; 2353 struct nfs_server *server = NFS_SERVER(dir); 2354 struct nfs4_opendata *opendata; 2355 struct dentry *dentry = ctx->dentry; 2356 struct rpc_cred *cred = ctx->cred; 2357 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 2358 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 2359 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 2360 struct nfs4_label *olabel = NULL; 2361 int status; 2362 2363 /* Protect against reboot recovery conflicts */ 2364 status = -ENOMEM; 2365 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 2366 if (sp == NULL) { 2367 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 2368 goto out_err; 2369 } 2370 status = nfs4_recover_expired_lease(server); 2371 if (status != 0) 2372 goto err_put_state_owner; 2373 if (dentry->d_inode != NULL) 2374 nfs4_return_incompatible_delegation(dentry->d_inode, fmode); 2375 status = -ENOMEM; 2376 if (dentry->d_inode) 2377 claim = NFS4_OPEN_CLAIM_FH; 2378 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, 2379 label, claim, GFP_KERNEL); 2380 if (opendata == NULL) 2381 goto err_put_state_owner; 2382 2383 if (label) { 2384 olabel = nfs4_label_alloc(server, GFP_KERNEL); 2385 if (IS_ERR(olabel)) { 2386 status = PTR_ERR(olabel); 2387 goto err_opendata_put; 2388 } 2389 } 2390 2391 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 2392 if (!opendata->f_attr.mdsthreshold) { 2393 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 2394 if (!opendata->f_attr.mdsthreshold) 2395 goto err_free_label; 2396 } 2397 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 2398 } 2399 if (dentry->d_inode != NULL) 2400 opendata->state = nfs4_get_open_state(dentry->d_inode, sp); 2401 2402 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx); 2403 if (status != 0) 2404 goto err_free_label; 2405 state = ctx->state; 2406 2407 if ((opendata->o_arg.open_flags & O_EXCL) && 2408 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 2409 nfs4_exclusive_attrset(opendata, sattr); 2410 2411 nfs_fattr_init(opendata->o_res.f_attr); 2412 status = nfs4_do_setattr(state->inode, cred, 2413 opendata->o_res.f_attr, sattr, 2414 state, label, olabel); 2415 if (status == 0) { 2416 nfs_setattr_update_inode(state->inode, sattr); 2417 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); 2418 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); 2419 } 2420 } 2421 if (opendata->file_created) 2422 *opened |= FILE_CREATED; 2423 2424 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 2425 *ctx_th = opendata->f_attr.mdsthreshold; 2426 opendata->f_attr.mdsthreshold = NULL; 2427 } 2428 2429 nfs4_label_free(olabel); 2430 2431 nfs4_opendata_put(opendata); 2432 nfs4_put_state_owner(sp); 2433 return 0; 2434 err_free_label: 2435 nfs4_label_free(olabel); 2436 err_opendata_put: 2437 nfs4_opendata_put(opendata); 2438 err_put_state_owner: 2439 nfs4_put_state_owner(sp); 2440 out_err: 2441 return status; 2442 } 2443 2444 2445 static struct nfs4_state *nfs4_do_open(struct inode *dir, 2446 struct nfs_open_context *ctx, 2447 int flags, 2448 struct iattr *sattr, 2449 struct nfs4_label *label, 2450 int *opened) 2451 { 2452 struct nfs_server *server = NFS_SERVER(dir); 2453 struct nfs4_exception exception = { }; 2454 struct nfs4_state *res; 2455 int status; 2456 2457 do { 2458 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened); 2459 res = ctx->state; 2460 trace_nfs4_open_file(ctx, flags, status); 2461 if (status == 0) 2462 break; 2463 /* NOTE: BAD_SEQID means the server and client disagree about the 2464 * book-keeping w.r.t. state-changing operations 2465 * (OPEN/CLOSE/LOCK/LOCKU...) 2466 * It is actually a sign of a bug on the client or on the server. 2467 * 2468 * If we receive a BAD_SEQID error in the particular case of 2469 * doing an OPEN, we assume that nfs_increment_open_seqid() will 2470 * have unhashed the old state_owner for us, and that we can 2471 * therefore safely retry using a new one. We should still warn 2472 * the user though... 2473 */ 2474 if (status == -NFS4ERR_BAD_SEQID) { 2475 pr_warn_ratelimited("NFS: v4 server %s " 2476 " returned a bad sequence-id error!\n", 2477 NFS_SERVER(dir)->nfs_client->cl_hostname); 2478 exception.retry = 1; 2479 continue; 2480 } 2481 /* 2482 * BAD_STATEID on OPEN means that the server cancelled our 2483 * state before it received the OPEN_CONFIRM. 2484 * Recover by retrying the request as per the discussion 2485 * on Page 181 of RFC3530. 2486 */ 2487 if (status == -NFS4ERR_BAD_STATEID) { 2488 exception.retry = 1; 2489 continue; 2490 } 2491 if (status == -EAGAIN) { 2492 /* We must have found a delegation */ 2493 exception.retry = 1; 2494 continue; 2495 } 2496 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 2497 continue; 2498 res = ERR_PTR(nfs4_handle_exception(server, 2499 status, &exception)); 2500 } while (exception.retry); 2501 return res; 2502 } 2503 2504 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2505 struct nfs_fattr *fattr, struct iattr *sattr, 2506 struct nfs4_state *state, struct nfs4_label *ilabel, 2507 struct nfs4_label *olabel) 2508 { 2509 struct nfs_server *server = NFS_SERVER(inode); 2510 struct nfs_setattrargs arg = { 2511 .fh = NFS_FH(inode), 2512 .iap = sattr, 2513 .server = server, 2514 .bitmask = server->attr_bitmask, 2515 .label = ilabel, 2516 }; 2517 struct nfs_setattrres res = { 2518 .fattr = fattr, 2519 .label = olabel, 2520 .server = server, 2521 }; 2522 struct rpc_message msg = { 2523 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 2524 .rpc_argp = &arg, 2525 .rpc_resp = &res, 2526 .rpc_cred = cred, 2527 }; 2528 unsigned long timestamp = jiffies; 2529 fmode_t fmode; 2530 bool truncate; 2531 int status; 2532 2533 arg.bitmask = nfs4_bitmask(server, ilabel); 2534 if (ilabel) 2535 arg.bitmask = nfs4_bitmask(server, olabel); 2536 2537 nfs_fattr_init(fattr); 2538 2539 /* Servers should only apply open mode checks for file size changes */ 2540 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false; 2541 fmode = truncate ? FMODE_WRITE : FMODE_READ; 2542 2543 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) { 2544 /* Use that stateid */ 2545 } else if (truncate && state != NULL) { 2546 struct nfs_lockowner lockowner = { 2547 .l_owner = current->files, 2548 .l_pid = current->tgid, 2549 }; 2550 if (!nfs4_valid_open_stateid(state)) 2551 return -EBADF; 2552 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, 2553 &lockowner) == -EIO) 2554 return -EBADF; 2555 } else 2556 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 2557 2558 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 2559 if (status == 0 && state != NULL) 2560 renew_lease(server, timestamp); 2561 return status; 2562 } 2563 2564 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2565 struct nfs_fattr *fattr, struct iattr *sattr, 2566 struct nfs4_state *state, struct nfs4_label *ilabel, 2567 struct nfs4_label *olabel) 2568 { 2569 struct nfs_server *server = NFS_SERVER(inode); 2570 struct nfs4_exception exception = { 2571 .state = state, 2572 .inode = inode, 2573 }; 2574 int err; 2575 do { 2576 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel); 2577 trace_nfs4_setattr(inode, err); 2578 switch (err) { 2579 case -NFS4ERR_OPENMODE: 2580 if (!(sattr->ia_valid & ATTR_SIZE)) { 2581 pr_warn_once("NFSv4: server %s is incorrectly " 2582 "applying open mode checks to " 2583 "a SETATTR that is not " 2584 "changing file size.\n", 2585 server->nfs_client->cl_hostname); 2586 } 2587 if (state && !(state->state & FMODE_WRITE)) { 2588 err = -EBADF; 2589 if (sattr->ia_valid & ATTR_OPEN) 2590 err = -EACCES; 2591 goto out; 2592 } 2593 } 2594 err = nfs4_handle_exception(server, err, &exception); 2595 } while (exception.retry); 2596 out: 2597 return err; 2598 } 2599 2600 struct nfs4_closedata { 2601 struct inode *inode; 2602 struct nfs4_state *state; 2603 struct nfs_closeargs arg; 2604 struct nfs_closeres res; 2605 struct nfs_fattr fattr; 2606 unsigned long timestamp; 2607 bool roc; 2608 u32 roc_barrier; 2609 }; 2610 2611 static void nfs4_free_closedata(void *data) 2612 { 2613 struct nfs4_closedata *calldata = data; 2614 struct nfs4_state_owner *sp = calldata->state->owner; 2615 struct super_block *sb = calldata->state->inode->i_sb; 2616 2617 if (calldata->roc) 2618 pnfs_roc_release(calldata->state->inode); 2619 nfs4_put_open_state(calldata->state); 2620 nfs_free_seqid(calldata->arg.seqid); 2621 nfs4_put_state_owner(sp); 2622 nfs_sb_deactive(sb); 2623 kfree(calldata); 2624 } 2625 2626 static void nfs4_close_done(struct rpc_task *task, void *data) 2627 { 2628 struct nfs4_closedata *calldata = data; 2629 struct nfs4_state *state = calldata->state; 2630 struct nfs_server *server = NFS_SERVER(calldata->inode); 2631 nfs4_stateid *res_stateid = NULL; 2632 2633 dprintk("%s: begin!\n", __func__); 2634 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 2635 return; 2636 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 2637 /* hmm. we are done with the inode, and in the process of freeing 2638 * the state_owner. we keep this around to process errors 2639 */ 2640 switch (task->tk_status) { 2641 case 0: 2642 res_stateid = &calldata->res.stateid; 2643 if (calldata->arg.fmode == 0 && calldata->roc) 2644 pnfs_roc_set_barrier(state->inode, 2645 calldata->roc_barrier); 2646 renew_lease(server, calldata->timestamp); 2647 break; 2648 case -NFS4ERR_ADMIN_REVOKED: 2649 case -NFS4ERR_STALE_STATEID: 2650 case -NFS4ERR_OLD_STATEID: 2651 case -NFS4ERR_BAD_STATEID: 2652 case -NFS4ERR_EXPIRED: 2653 if (!nfs4_stateid_match(&calldata->arg.stateid, 2654 &state->stateid)) { 2655 rpc_restart_call_prepare(task); 2656 goto out_release; 2657 } 2658 if (calldata->arg.fmode == 0) 2659 break; 2660 default: 2661 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) { 2662 rpc_restart_call_prepare(task); 2663 goto out_release; 2664 } 2665 } 2666 nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode); 2667 out_release: 2668 nfs_release_seqid(calldata->arg.seqid); 2669 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2670 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2671 } 2672 2673 static void nfs4_close_prepare(struct rpc_task *task, void *data) 2674 { 2675 struct nfs4_closedata *calldata = data; 2676 struct nfs4_state *state = calldata->state; 2677 struct inode *inode = calldata->inode; 2678 bool is_rdonly, is_wronly, is_rdwr; 2679 int call_close = 0; 2680 2681 dprintk("%s: begin!\n", __func__); 2682 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 2683 goto out_wait; 2684 2685 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 2686 spin_lock(&state->owner->so_lock); 2687 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 2688 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 2689 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 2690 nfs4_stateid_copy(&calldata->arg.stateid, &state->stateid); 2691 /* Calculate the change in open mode */ 2692 calldata->arg.fmode = 0; 2693 if (state->n_rdwr == 0) { 2694 if (state->n_rdonly == 0) 2695 call_close |= is_rdonly; 2696 else if (is_rdonly) 2697 calldata->arg.fmode |= FMODE_READ; 2698 if (state->n_wronly == 0) 2699 call_close |= is_wronly; 2700 else if (is_wronly) 2701 calldata->arg.fmode |= FMODE_WRITE; 2702 } else if (is_rdwr) 2703 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 2704 2705 if (calldata->arg.fmode == 0) 2706 call_close |= is_rdwr; 2707 2708 if (!nfs4_valid_open_stateid(state)) 2709 call_close = 0; 2710 spin_unlock(&state->owner->so_lock); 2711 2712 if (!call_close) { 2713 /* Note: exit _without_ calling nfs4_close_done */ 2714 goto out_no_action; 2715 } 2716 2717 if (calldata->arg.fmode == 0) { 2718 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2719 if (calldata->roc && 2720 pnfs_roc_drain(inode, &calldata->roc_barrier, task)) { 2721 nfs_release_seqid(calldata->arg.seqid); 2722 goto out_wait; 2723 } 2724 } 2725 calldata->arg.share_access = 2726 nfs4_map_atomic_open_share(NFS_SERVER(inode), 2727 calldata->arg.fmode, 0); 2728 2729 nfs_fattr_init(calldata->res.fattr); 2730 calldata->timestamp = jiffies; 2731 if (nfs4_setup_sequence(NFS_SERVER(inode), 2732 &calldata->arg.seq_args, 2733 &calldata->res.seq_res, 2734 task) != 0) 2735 nfs_release_seqid(calldata->arg.seqid); 2736 dprintk("%s: done!\n", __func__); 2737 return; 2738 out_no_action: 2739 task->tk_action = NULL; 2740 out_wait: 2741 nfs4_sequence_done(task, &calldata->res.seq_res); 2742 } 2743 2744 static const struct rpc_call_ops nfs4_close_ops = { 2745 .rpc_call_prepare = nfs4_close_prepare, 2746 .rpc_call_done = nfs4_close_done, 2747 .rpc_release = nfs4_free_closedata, 2748 }; 2749 2750 static bool nfs4_roc(struct inode *inode) 2751 { 2752 if (!nfs_have_layout(inode)) 2753 return false; 2754 return pnfs_roc(inode); 2755 } 2756 2757 /* 2758 * It is possible for data to be read/written from a mem-mapped file 2759 * after the sys_close call (which hits the vfs layer as a flush). 2760 * This means that we can't safely call nfsv4 close on a file until 2761 * the inode is cleared. This in turn means that we are not good 2762 * NFSv4 citizens - we do not indicate to the server to update the file's 2763 * share state even when we are done with one of the three share 2764 * stateid's in the inode. 2765 * 2766 * NOTE: Caller must be holding the sp->so_owner semaphore! 2767 */ 2768 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 2769 { 2770 struct nfs_server *server = NFS_SERVER(state->inode); 2771 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 2772 struct nfs4_closedata *calldata; 2773 struct nfs4_state_owner *sp = state->owner; 2774 struct rpc_task *task; 2775 struct rpc_message msg = { 2776 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 2777 .rpc_cred = state->owner->so_cred, 2778 }; 2779 struct rpc_task_setup task_setup_data = { 2780 .rpc_client = server->client, 2781 .rpc_message = &msg, 2782 .callback_ops = &nfs4_close_ops, 2783 .workqueue = nfsiod_workqueue, 2784 .flags = RPC_TASK_ASYNC, 2785 }; 2786 int status = -ENOMEM; 2787 2788 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 2789 &task_setup_data.rpc_client, &msg); 2790 2791 calldata = kzalloc(sizeof(*calldata), gfp_mask); 2792 if (calldata == NULL) 2793 goto out; 2794 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); 2795 calldata->inode = state->inode; 2796 calldata->state = state; 2797 calldata->arg.fh = NFS_FH(state->inode); 2798 /* Serialization for the sequence id */ 2799 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 2800 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 2801 if (IS_ERR(calldata->arg.seqid)) 2802 goto out_free_calldata; 2803 calldata->arg.fmode = 0; 2804 calldata->arg.bitmask = server->cache_consistency_bitmask; 2805 calldata->res.fattr = &calldata->fattr; 2806 calldata->res.seqid = calldata->arg.seqid; 2807 calldata->res.server = server; 2808 calldata->roc = nfs4_roc(state->inode); 2809 nfs_sb_active(calldata->inode->i_sb); 2810 2811 msg.rpc_argp = &calldata->arg; 2812 msg.rpc_resp = &calldata->res; 2813 task_setup_data.callback_data = calldata; 2814 task = rpc_run_task(&task_setup_data); 2815 if (IS_ERR(task)) 2816 return PTR_ERR(task); 2817 status = 0; 2818 if (wait) 2819 status = rpc_wait_for_completion_task(task); 2820 rpc_put_task(task); 2821 return status; 2822 out_free_calldata: 2823 kfree(calldata); 2824 out: 2825 nfs4_put_open_state(state); 2826 nfs4_put_state_owner(sp); 2827 return status; 2828 } 2829 2830 static struct inode * 2831 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 2832 int open_flags, struct iattr *attr, int *opened) 2833 { 2834 struct nfs4_state *state; 2835 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; 2836 2837 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 2838 2839 /* Protect against concurrent sillydeletes */ 2840 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 2841 2842 nfs4_label_release_security(label); 2843 2844 if (IS_ERR(state)) 2845 return ERR_CAST(state); 2846 return state->inode; 2847 } 2848 2849 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 2850 { 2851 if (ctx->state == NULL) 2852 return; 2853 if (is_sync) 2854 nfs4_close_sync(ctx->state, ctx->mode); 2855 else 2856 nfs4_close_state(ctx->state, ctx->mode); 2857 } 2858 2859 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 2860 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 2861 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL) 2862 2863 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2864 { 2865 struct nfs4_server_caps_arg args = { 2866 .fhandle = fhandle, 2867 }; 2868 struct nfs4_server_caps_res res = {}; 2869 struct rpc_message msg = { 2870 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 2871 .rpc_argp = &args, 2872 .rpc_resp = &res, 2873 }; 2874 int status; 2875 2876 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2877 if (status == 0) { 2878 /* Sanity check the server answers */ 2879 switch (server->nfs_client->cl_minorversion) { 2880 case 0: 2881 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 2882 res.attr_bitmask[2] = 0; 2883 break; 2884 case 1: 2885 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 2886 break; 2887 case 2: 2888 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 2889 } 2890 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 2891 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| 2892 NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 2893 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| 2894 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| 2895 NFS_CAP_CTIME|NFS_CAP_MTIME| 2896 NFS_CAP_SECURITY_LABEL); 2897 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 2898 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 2899 server->caps |= NFS_CAP_ACLS; 2900 if (res.has_links != 0) 2901 server->caps |= NFS_CAP_HARDLINKS; 2902 if (res.has_symlinks != 0) 2903 server->caps |= NFS_CAP_SYMLINKS; 2904 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) 2905 server->caps |= NFS_CAP_FILEID; 2906 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) 2907 server->caps |= NFS_CAP_MODE; 2908 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) 2909 server->caps |= NFS_CAP_NLINK; 2910 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) 2911 server->caps |= NFS_CAP_OWNER; 2912 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) 2913 server->caps |= NFS_CAP_OWNER_GROUP; 2914 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) 2915 server->caps |= NFS_CAP_ATIME; 2916 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) 2917 server->caps |= NFS_CAP_CTIME; 2918 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) 2919 server->caps |= NFS_CAP_MTIME; 2920 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 2921 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 2922 server->caps |= NFS_CAP_SECURITY_LABEL; 2923 #endif 2924 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 2925 sizeof(server->attr_bitmask)); 2926 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 2927 2928 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 2929 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 2930 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 2931 server->cache_consistency_bitmask[2] = 0; 2932 server->acl_bitmask = res.acl_bitmask; 2933 server->fh_expire_type = res.fh_expire_type; 2934 } 2935 2936 return status; 2937 } 2938 2939 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2940 { 2941 struct nfs4_exception exception = { }; 2942 int err; 2943 do { 2944 err = nfs4_handle_exception(server, 2945 _nfs4_server_capabilities(server, fhandle), 2946 &exception); 2947 } while (exception.retry); 2948 return err; 2949 } 2950 2951 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2952 struct nfs_fsinfo *info) 2953 { 2954 u32 bitmask[3]; 2955 struct nfs4_lookup_root_arg args = { 2956 .bitmask = bitmask, 2957 }; 2958 struct nfs4_lookup_res res = { 2959 .server = server, 2960 .fattr = info->fattr, 2961 .fh = fhandle, 2962 }; 2963 struct rpc_message msg = { 2964 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 2965 .rpc_argp = &args, 2966 .rpc_resp = &res, 2967 }; 2968 2969 bitmask[0] = nfs4_fattr_bitmap[0]; 2970 bitmask[1] = nfs4_fattr_bitmap[1]; 2971 /* 2972 * Process the label in the upcoming getfattr 2973 */ 2974 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL; 2975 2976 nfs_fattr_init(info->fattr); 2977 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2978 } 2979 2980 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2981 struct nfs_fsinfo *info) 2982 { 2983 struct nfs4_exception exception = { }; 2984 int err; 2985 do { 2986 err = _nfs4_lookup_root(server, fhandle, info); 2987 trace_nfs4_lookup_root(server, fhandle, info->fattr, err); 2988 switch (err) { 2989 case 0: 2990 case -NFS4ERR_WRONGSEC: 2991 goto out; 2992 default: 2993 err = nfs4_handle_exception(server, err, &exception); 2994 } 2995 } while (exception.retry); 2996 out: 2997 return err; 2998 } 2999 3000 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3001 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 3002 { 3003 struct rpc_auth_create_args auth_args = { 3004 .pseudoflavor = flavor, 3005 }; 3006 struct rpc_auth *auth; 3007 int ret; 3008 3009 auth = rpcauth_create(&auth_args, server->client); 3010 if (IS_ERR(auth)) { 3011 ret = -EACCES; 3012 goto out; 3013 } 3014 ret = nfs4_lookup_root(server, fhandle, info); 3015 out: 3016 return ret; 3017 } 3018 3019 /* 3020 * Retry pseudoroot lookup with various security flavors. We do this when: 3021 * 3022 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 3023 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 3024 * 3025 * Returns zero on success, or a negative NFS4ERR value, or a 3026 * negative errno value. 3027 */ 3028 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3029 struct nfs_fsinfo *info) 3030 { 3031 /* Per 3530bis 15.33.5 */ 3032 static const rpc_authflavor_t flav_array[] = { 3033 RPC_AUTH_GSS_KRB5P, 3034 RPC_AUTH_GSS_KRB5I, 3035 RPC_AUTH_GSS_KRB5, 3036 RPC_AUTH_UNIX, /* courtesy */ 3037 RPC_AUTH_NULL, 3038 }; 3039 int status = -EPERM; 3040 size_t i; 3041 3042 if (server->auth_info.flavor_len > 0) { 3043 /* try each flavor specified by user */ 3044 for (i = 0; i < server->auth_info.flavor_len; i++) { 3045 status = nfs4_lookup_root_sec(server, fhandle, info, 3046 server->auth_info.flavors[i]); 3047 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3048 continue; 3049 break; 3050 } 3051 } else { 3052 /* no flavors specified by user, try default list */ 3053 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 3054 status = nfs4_lookup_root_sec(server, fhandle, info, 3055 flav_array[i]); 3056 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3057 continue; 3058 break; 3059 } 3060 } 3061 3062 /* 3063 * -EACCESS could mean that the user doesn't have correct permissions 3064 * to access the mount. It could also mean that we tried to mount 3065 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 3066 * existing mount programs don't handle -EACCES very well so it should 3067 * be mapped to -EPERM instead. 3068 */ 3069 if (status == -EACCES) 3070 status = -EPERM; 3071 return status; 3072 } 3073 3074 static int nfs4_do_find_root_sec(struct nfs_server *server, 3075 struct nfs_fh *fhandle, struct nfs_fsinfo *info) 3076 { 3077 int mv = server->nfs_client->cl_minorversion; 3078 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info); 3079 } 3080 3081 /** 3082 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 3083 * @server: initialized nfs_server handle 3084 * @fhandle: we fill in the pseudo-fs root file handle 3085 * @info: we fill in an FSINFO struct 3086 * @auth_probe: probe the auth flavours 3087 * 3088 * Returns zero on success, or a negative errno. 3089 */ 3090 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 3091 struct nfs_fsinfo *info, 3092 bool auth_probe) 3093 { 3094 int status; 3095 3096 switch (auth_probe) { 3097 case false: 3098 status = nfs4_lookup_root(server, fhandle, info); 3099 if (status != -NFS4ERR_WRONGSEC) 3100 break; 3101 default: 3102 status = nfs4_do_find_root_sec(server, fhandle, info); 3103 } 3104 3105 if (status == 0) 3106 status = nfs4_server_capabilities(server, fhandle); 3107 if (status == 0) 3108 status = nfs4_do_fsinfo(server, fhandle, info); 3109 3110 return nfs4_map_errors(status); 3111 } 3112 3113 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 3114 struct nfs_fsinfo *info) 3115 { 3116 int error; 3117 struct nfs_fattr *fattr = info->fattr; 3118 struct nfs4_label *label = NULL; 3119 3120 error = nfs4_server_capabilities(server, mntfh); 3121 if (error < 0) { 3122 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 3123 return error; 3124 } 3125 3126 label = nfs4_label_alloc(server, GFP_KERNEL); 3127 if (IS_ERR(label)) 3128 return PTR_ERR(label); 3129 3130 error = nfs4_proc_getattr(server, mntfh, fattr, label); 3131 if (error < 0) { 3132 dprintk("nfs4_get_root: getattr error = %d\n", -error); 3133 goto err_free_label; 3134 } 3135 3136 if (fattr->valid & NFS_ATTR_FATTR_FSID && 3137 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 3138 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 3139 3140 err_free_label: 3141 nfs4_label_free(label); 3142 3143 return error; 3144 } 3145 3146 /* 3147 * Get locations and (maybe) other attributes of a referral. 3148 * Note that we'll actually follow the referral later when 3149 * we detect fsid mismatch in inode revalidation 3150 */ 3151 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 3152 const struct qstr *name, struct nfs_fattr *fattr, 3153 struct nfs_fh *fhandle) 3154 { 3155 int status = -ENOMEM; 3156 struct page *page = NULL; 3157 struct nfs4_fs_locations *locations = NULL; 3158 3159 page = alloc_page(GFP_KERNEL); 3160 if (page == NULL) 3161 goto out; 3162 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 3163 if (locations == NULL) 3164 goto out; 3165 3166 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 3167 if (status != 0) 3168 goto out; 3169 3170 /* 3171 * If the fsid didn't change, this is a migration event, not a 3172 * referral. Cause us to drop into the exception handler, which 3173 * will kick off migration recovery. 3174 */ 3175 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 3176 dprintk("%s: server did not return a different fsid for" 3177 " a referral at %s\n", __func__, name->name); 3178 status = -NFS4ERR_MOVED; 3179 goto out; 3180 } 3181 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 3182 nfs_fixup_referral_attributes(&locations->fattr); 3183 3184 /* replace the lookup nfs_fattr with the locations nfs_fattr */ 3185 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 3186 memset(fhandle, 0, sizeof(struct nfs_fh)); 3187 out: 3188 if (page) 3189 __free_page(page); 3190 kfree(locations); 3191 return status; 3192 } 3193 3194 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3195 struct nfs_fattr *fattr, struct nfs4_label *label) 3196 { 3197 struct nfs4_getattr_arg args = { 3198 .fh = fhandle, 3199 .bitmask = server->attr_bitmask, 3200 }; 3201 struct nfs4_getattr_res res = { 3202 .fattr = fattr, 3203 .label = label, 3204 .server = server, 3205 }; 3206 struct rpc_message msg = { 3207 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 3208 .rpc_argp = &args, 3209 .rpc_resp = &res, 3210 }; 3211 3212 args.bitmask = nfs4_bitmask(server, label); 3213 3214 nfs_fattr_init(fattr); 3215 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3216 } 3217 3218 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3219 struct nfs_fattr *fattr, struct nfs4_label *label) 3220 { 3221 struct nfs4_exception exception = { }; 3222 int err; 3223 do { 3224 err = _nfs4_proc_getattr(server, fhandle, fattr, label); 3225 trace_nfs4_getattr(server, fhandle, fattr, err); 3226 err = nfs4_handle_exception(server, err, 3227 &exception); 3228 } while (exception.retry); 3229 return err; 3230 } 3231 3232 /* 3233 * The file is not closed if it is opened due to the a request to change 3234 * the size of the file. The open call will not be needed once the 3235 * VFS layer lookup-intents are implemented. 3236 * 3237 * Close is called when the inode is destroyed. 3238 * If we haven't opened the file for O_WRONLY, we 3239 * need to in the size_change case to obtain a stateid. 3240 * 3241 * Got race? 3242 * Because OPEN is always done by name in nfsv4, it is 3243 * possible that we opened a different file by the same 3244 * name. We can recognize this race condition, but we 3245 * can't do anything about it besides returning an error. 3246 * 3247 * This will be fixed with VFS changes (lookup-intent). 3248 */ 3249 static int 3250 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 3251 struct iattr *sattr) 3252 { 3253 struct inode *inode = dentry->d_inode; 3254 struct rpc_cred *cred = NULL; 3255 struct nfs4_state *state = NULL; 3256 struct nfs4_label *label = NULL; 3257 int status; 3258 3259 if (pnfs_ld_layoutret_on_setattr(inode) && 3260 sattr->ia_valid & ATTR_SIZE && 3261 sattr->ia_size < i_size_read(inode)) 3262 pnfs_commit_and_return_layout(inode); 3263 3264 nfs_fattr_init(fattr); 3265 3266 /* Deal with open(O_TRUNC) */ 3267 if (sattr->ia_valid & ATTR_OPEN) 3268 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 3269 3270 /* Optimization: if the end result is no change, don't RPC */ 3271 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 3272 return 0; 3273 3274 /* Search for an existing open(O_WRITE) file */ 3275 if (sattr->ia_valid & ATTR_FILE) { 3276 struct nfs_open_context *ctx; 3277 3278 ctx = nfs_file_open_context(sattr->ia_file); 3279 if (ctx) { 3280 cred = ctx->cred; 3281 state = ctx->state; 3282 } 3283 } 3284 3285 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 3286 if (IS_ERR(label)) 3287 return PTR_ERR(label); 3288 3289 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label); 3290 if (status == 0) { 3291 nfs_setattr_update_inode(inode, sattr); 3292 nfs_setsecurity(inode, fattr, label); 3293 } 3294 nfs4_label_free(label); 3295 return status; 3296 } 3297 3298 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 3299 const struct qstr *name, struct nfs_fh *fhandle, 3300 struct nfs_fattr *fattr, struct nfs4_label *label) 3301 { 3302 struct nfs_server *server = NFS_SERVER(dir); 3303 int status; 3304 struct nfs4_lookup_arg args = { 3305 .bitmask = server->attr_bitmask, 3306 .dir_fh = NFS_FH(dir), 3307 .name = name, 3308 }; 3309 struct nfs4_lookup_res res = { 3310 .server = server, 3311 .fattr = fattr, 3312 .label = label, 3313 .fh = fhandle, 3314 }; 3315 struct rpc_message msg = { 3316 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 3317 .rpc_argp = &args, 3318 .rpc_resp = &res, 3319 }; 3320 3321 args.bitmask = nfs4_bitmask(server, label); 3322 3323 nfs_fattr_init(fattr); 3324 3325 dprintk("NFS call lookup %s\n", name->name); 3326 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); 3327 dprintk("NFS reply lookup: %d\n", status); 3328 return status; 3329 } 3330 3331 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 3332 { 3333 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 3334 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 3335 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 3336 fattr->nlink = 2; 3337 } 3338 3339 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 3340 struct qstr *name, struct nfs_fh *fhandle, 3341 struct nfs_fattr *fattr, struct nfs4_label *label) 3342 { 3343 struct nfs4_exception exception = { }; 3344 struct rpc_clnt *client = *clnt; 3345 int err; 3346 do { 3347 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label); 3348 trace_nfs4_lookup(dir, name, err); 3349 switch (err) { 3350 case -NFS4ERR_BADNAME: 3351 err = -ENOENT; 3352 goto out; 3353 case -NFS4ERR_MOVED: 3354 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 3355 goto out; 3356 case -NFS4ERR_WRONGSEC: 3357 err = -EPERM; 3358 if (client != *clnt) 3359 goto out; 3360 client = nfs4_negotiate_security(client, dir, name); 3361 if (IS_ERR(client)) 3362 return PTR_ERR(client); 3363 3364 exception.retry = 1; 3365 break; 3366 default: 3367 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 3368 } 3369 } while (exception.retry); 3370 3371 out: 3372 if (err == 0) 3373 *clnt = client; 3374 else if (client != *clnt) 3375 rpc_shutdown_client(client); 3376 3377 return err; 3378 } 3379 3380 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, 3381 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 3382 struct nfs4_label *label) 3383 { 3384 int status; 3385 struct rpc_clnt *client = NFS_CLIENT(dir); 3386 3387 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label); 3388 if (client != NFS_CLIENT(dir)) { 3389 rpc_shutdown_client(client); 3390 nfs_fixup_secinfo_attributes(fattr); 3391 } 3392 return status; 3393 } 3394 3395 struct rpc_clnt * 3396 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, 3397 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 3398 { 3399 struct rpc_clnt *client = NFS_CLIENT(dir); 3400 int status; 3401 3402 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL); 3403 if (status < 0) 3404 return ERR_PTR(status); 3405 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 3406 } 3407 3408 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3409 { 3410 struct nfs_server *server = NFS_SERVER(inode); 3411 struct nfs4_accessargs args = { 3412 .fh = NFS_FH(inode), 3413 .bitmask = server->cache_consistency_bitmask, 3414 }; 3415 struct nfs4_accessres res = { 3416 .server = server, 3417 }; 3418 struct rpc_message msg = { 3419 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 3420 .rpc_argp = &args, 3421 .rpc_resp = &res, 3422 .rpc_cred = entry->cred, 3423 }; 3424 int mode = entry->mask; 3425 int status = 0; 3426 3427 /* 3428 * Determine which access bits we want to ask for... 3429 */ 3430 if (mode & MAY_READ) 3431 args.access |= NFS4_ACCESS_READ; 3432 if (S_ISDIR(inode->i_mode)) { 3433 if (mode & MAY_WRITE) 3434 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; 3435 if (mode & MAY_EXEC) 3436 args.access |= NFS4_ACCESS_LOOKUP; 3437 } else { 3438 if (mode & MAY_WRITE) 3439 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; 3440 if (mode & MAY_EXEC) 3441 args.access |= NFS4_ACCESS_EXECUTE; 3442 } 3443 3444 res.fattr = nfs_alloc_fattr(); 3445 if (res.fattr == NULL) 3446 return -ENOMEM; 3447 3448 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3449 if (!status) { 3450 nfs_access_set_mask(entry, res.access); 3451 nfs_refresh_inode(inode, res.fattr); 3452 } 3453 nfs_free_fattr(res.fattr); 3454 return status; 3455 } 3456 3457 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3458 { 3459 struct nfs4_exception exception = { }; 3460 int err; 3461 do { 3462 err = _nfs4_proc_access(inode, entry); 3463 trace_nfs4_access(inode, err); 3464 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3465 &exception); 3466 } while (exception.retry); 3467 return err; 3468 } 3469 3470 /* 3471 * TODO: For the time being, we don't try to get any attributes 3472 * along with any of the zero-copy operations READ, READDIR, 3473 * READLINK, WRITE. 3474 * 3475 * In the case of the first three, we want to put the GETATTR 3476 * after the read-type operation -- this is because it is hard 3477 * to predict the length of a GETATTR response in v4, and thus 3478 * align the READ data correctly. This means that the GETATTR 3479 * may end up partially falling into the page cache, and we should 3480 * shift it into the 'tail' of the xdr_buf before processing. 3481 * To do this efficiently, we need to know the total length 3482 * of data received, which doesn't seem to be available outside 3483 * of the RPC layer. 3484 * 3485 * In the case of WRITE, we also want to put the GETATTR after 3486 * the operation -- in this case because we want to make sure 3487 * we get the post-operation mtime and size. 3488 * 3489 * Both of these changes to the XDR layer would in fact be quite 3490 * minor, but I decided to leave them for a subsequent patch. 3491 */ 3492 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 3493 unsigned int pgbase, unsigned int pglen) 3494 { 3495 struct nfs4_readlink args = { 3496 .fh = NFS_FH(inode), 3497 .pgbase = pgbase, 3498 .pglen = pglen, 3499 .pages = &page, 3500 }; 3501 struct nfs4_readlink_res res; 3502 struct rpc_message msg = { 3503 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 3504 .rpc_argp = &args, 3505 .rpc_resp = &res, 3506 }; 3507 3508 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 3509 } 3510 3511 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 3512 unsigned int pgbase, unsigned int pglen) 3513 { 3514 struct nfs4_exception exception = { }; 3515 int err; 3516 do { 3517 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 3518 trace_nfs4_readlink(inode, err); 3519 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3520 &exception); 3521 } while (exception.retry); 3522 return err; 3523 } 3524 3525 /* 3526 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 3527 */ 3528 static int 3529 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 3530 int flags) 3531 { 3532 struct nfs4_label l, *ilabel = NULL; 3533 struct nfs_open_context *ctx; 3534 struct nfs4_state *state; 3535 int opened = 0; 3536 int status = 0; 3537 3538 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 3539 if (IS_ERR(ctx)) 3540 return PTR_ERR(ctx); 3541 3542 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 3543 3544 sattr->ia_mode &= ~current_umask(); 3545 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, &opened); 3546 if (IS_ERR(state)) { 3547 status = PTR_ERR(state); 3548 goto out; 3549 } 3550 out: 3551 nfs4_label_release_security(ilabel); 3552 put_nfs_open_context(ctx); 3553 return status; 3554 } 3555 3556 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) 3557 { 3558 struct nfs_server *server = NFS_SERVER(dir); 3559 struct nfs_removeargs args = { 3560 .fh = NFS_FH(dir), 3561 .name = *name, 3562 }; 3563 struct nfs_removeres res = { 3564 .server = server, 3565 }; 3566 struct rpc_message msg = { 3567 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 3568 .rpc_argp = &args, 3569 .rpc_resp = &res, 3570 }; 3571 int status; 3572 3573 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 3574 if (status == 0) 3575 update_changeattr(dir, &res.cinfo); 3576 return status; 3577 } 3578 3579 static int nfs4_proc_remove(struct inode *dir, struct qstr *name) 3580 { 3581 struct nfs4_exception exception = { }; 3582 int err; 3583 do { 3584 err = _nfs4_proc_remove(dir, name); 3585 trace_nfs4_remove(dir, name, err); 3586 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3587 &exception); 3588 } while (exception.retry); 3589 return err; 3590 } 3591 3592 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) 3593 { 3594 struct nfs_server *server = NFS_SERVER(dir); 3595 struct nfs_removeargs *args = msg->rpc_argp; 3596 struct nfs_removeres *res = msg->rpc_resp; 3597 3598 res->server = server; 3599 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 3600 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1); 3601 3602 nfs_fattr_init(res->dir_attr); 3603 } 3604 3605 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 3606 { 3607 nfs4_setup_sequence(NFS_SERVER(data->dir), 3608 &data->args.seq_args, 3609 &data->res.seq_res, 3610 task); 3611 } 3612 3613 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 3614 { 3615 struct nfs_unlinkdata *data = task->tk_calldata; 3616 struct nfs_removeres *res = &data->res; 3617 3618 if (!nfs4_sequence_done(task, &res->seq_res)) 3619 return 0; 3620 if (nfs4_async_handle_error(task, res->server, NULL, 3621 &data->timeout) == -EAGAIN) 3622 return 0; 3623 update_changeattr(dir, &res->cinfo); 3624 return 1; 3625 } 3626 3627 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) 3628 { 3629 struct nfs_server *server = NFS_SERVER(dir); 3630 struct nfs_renameargs *arg = msg->rpc_argp; 3631 struct nfs_renameres *res = msg->rpc_resp; 3632 3633 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 3634 res->server = server; 3635 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1); 3636 } 3637 3638 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 3639 { 3640 nfs4_setup_sequence(NFS_SERVER(data->old_dir), 3641 &data->args.seq_args, 3642 &data->res.seq_res, 3643 task); 3644 } 3645 3646 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 3647 struct inode *new_dir) 3648 { 3649 struct nfs_renamedata *data = task->tk_calldata; 3650 struct nfs_renameres *res = &data->res; 3651 3652 if (!nfs4_sequence_done(task, &res->seq_res)) 3653 return 0; 3654 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 3655 return 0; 3656 3657 update_changeattr(old_dir, &res->old_cinfo); 3658 update_changeattr(new_dir, &res->new_cinfo); 3659 return 1; 3660 } 3661 3662 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3663 { 3664 struct nfs_server *server = NFS_SERVER(inode); 3665 struct nfs4_link_arg arg = { 3666 .fh = NFS_FH(inode), 3667 .dir_fh = NFS_FH(dir), 3668 .name = name, 3669 .bitmask = server->attr_bitmask, 3670 }; 3671 struct nfs4_link_res res = { 3672 .server = server, 3673 .label = NULL, 3674 }; 3675 struct rpc_message msg = { 3676 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 3677 .rpc_argp = &arg, 3678 .rpc_resp = &res, 3679 }; 3680 int status = -ENOMEM; 3681 3682 res.fattr = nfs_alloc_fattr(); 3683 if (res.fattr == NULL) 3684 goto out; 3685 3686 res.label = nfs4_label_alloc(server, GFP_KERNEL); 3687 if (IS_ERR(res.label)) { 3688 status = PTR_ERR(res.label); 3689 goto out; 3690 } 3691 arg.bitmask = nfs4_bitmask(server, res.label); 3692 3693 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3694 if (!status) { 3695 update_changeattr(dir, &res.cinfo); 3696 status = nfs_post_op_update_inode(inode, res.fattr); 3697 if (!status) 3698 nfs_setsecurity(inode, res.fattr, res.label); 3699 } 3700 3701 3702 nfs4_label_free(res.label); 3703 3704 out: 3705 nfs_free_fattr(res.fattr); 3706 return status; 3707 } 3708 3709 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3710 { 3711 struct nfs4_exception exception = { }; 3712 int err; 3713 do { 3714 err = nfs4_handle_exception(NFS_SERVER(inode), 3715 _nfs4_proc_link(inode, dir, name), 3716 &exception); 3717 } while (exception.retry); 3718 return err; 3719 } 3720 3721 struct nfs4_createdata { 3722 struct rpc_message msg; 3723 struct nfs4_create_arg arg; 3724 struct nfs4_create_res res; 3725 struct nfs_fh fh; 3726 struct nfs_fattr fattr; 3727 struct nfs4_label *label; 3728 }; 3729 3730 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 3731 struct qstr *name, struct iattr *sattr, u32 ftype) 3732 { 3733 struct nfs4_createdata *data; 3734 3735 data = kzalloc(sizeof(*data), GFP_KERNEL); 3736 if (data != NULL) { 3737 struct nfs_server *server = NFS_SERVER(dir); 3738 3739 data->label = nfs4_label_alloc(server, GFP_KERNEL); 3740 if (IS_ERR(data->label)) 3741 goto out_free; 3742 3743 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 3744 data->msg.rpc_argp = &data->arg; 3745 data->msg.rpc_resp = &data->res; 3746 data->arg.dir_fh = NFS_FH(dir); 3747 data->arg.server = server; 3748 data->arg.name = name; 3749 data->arg.attrs = sattr; 3750 data->arg.ftype = ftype; 3751 data->arg.bitmask = nfs4_bitmask(server, data->label); 3752 data->res.server = server; 3753 data->res.fh = &data->fh; 3754 data->res.fattr = &data->fattr; 3755 data->res.label = data->label; 3756 nfs_fattr_init(data->res.fattr); 3757 } 3758 return data; 3759 out_free: 3760 kfree(data); 3761 return NULL; 3762 } 3763 3764 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 3765 { 3766 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 3767 &data->arg.seq_args, &data->res.seq_res, 1); 3768 if (status == 0) { 3769 update_changeattr(dir, &data->res.dir_cinfo); 3770 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label); 3771 } 3772 return status; 3773 } 3774 3775 static void nfs4_free_createdata(struct nfs4_createdata *data) 3776 { 3777 nfs4_label_free(data->label); 3778 kfree(data); 3779 } 3780 3781 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3782 struct page *page, unsigned int len, struct iattr *sattr, 3783 struct nfs4_label *label) 3784 { 3785 struct nfs4_createdata *data; 3786 int status = -ENAMETOOLONG; 3787 3788 if (len > NFS4_MAXPATHLEN) 3789 goto out; 3790 3791 status = -ENOMEM; 3792 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 3793 if (data == NULL) 3794 goto out; 3795 3796 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 3797 data->arg.u.symlink.pages = &page; 3798 data->arg.u.symlink.len = len; 3799 data->arg.label = label; 3800 3801 status = nfs4_do_create(dir, dentry, data); 3802 3803 nfs4_free_createdata(data); 3804 out: 3805 return status; 3806 } 3807 3808 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3809 struct page *page, unsigned int len, struct iattr *sattr) 3810 { 3811 struct nfs4_exception exception = { }; 3812 struct nfs4_label l, *label = NULL; 3813 int err; 3814 3815 label = nfs4_label_init_security(dir, dentry, sattr, &l); 3816 3817 do { 3818 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label); 3819 trace_nfs4_symlink(dir, &dentry->d_name, err); 3820 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3821 &exception); 3822 } while (exception.retry); 3823 3824 nfs4_label_release_security(label); 3825 return err; 3826 } 3827 3828 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3829 struct iattr *sattr, struct nfs4_label *label) 3830 { 3831 struct nfs4_createdata *data; 3832 int status = -ENOMEM; 3833 3834 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 3835 if (data == NULL) 3836 goto out; 3837 3838 data->arg.label = label; 3839 status = nfs4_do_create(dir, dentry, data); 3840 3841 nfs4_free_createdata(data); 3842 out: 3843 return status; 3844 } 3845 3846 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3847 struct iattr *sattr) 3848 { 3849 struct nfs4_exception exception = { }; 3850 struct nfs4_label l, *label = NULL; 3851 int err; 3852 3853 label = nfs4_label_init_security(dir, dentry, sattr, &l); 3854 3855 sattr->ia_mode &= ~current_umask(); 3856 do { 3857 err = _nfs4_proc_mkdir(dir, dentry, sattr, label); 3858 trace_nfs4_mkdir(dir, &dentry->d_name, err); 3859 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3860 &exception); 3861 } while (exception.retry); 3862 nfs4_label_release_security(label); 3863 3864 return err; 3865 } 3866 3867 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3868 u64 cookie, struct page **pages, unsigned int count, int plus) 3869 { 3870 struct inode *dir = dentry->d_inode; 3871 struct nfs4_readdir_arg args = { 3872 .fh = NFS_FH(dir), 3873 .pages = pages, 3874 .pgbase = 0, 3875 .count = count, 3876 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask, 3877 .plus = plus, 3878 }; 3879 struct nfs4_readdir_res res; 3880 struct rpc_message msg = { 3881 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 3882 .rpc_argp = &args, 3883 .rpc_resp = &res, 3884 .rpc_cred = cred, 3885 }; 3886 int status; 3887 3888 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__, 3889 dentry, 3890 (unsigned long long)cookie); 3891 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); 3892 res.pgbase = args.pgbase; 3893 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 3894 if (status >= 0) { 3895 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); 3896 status += args.pgbase; 3897 } 3898 3899 nfs_invalidate_atime(dir); 3900 3901 dprintk("%s: returns %d\n", __func__, status); 3902 return status; 3903 } 3904 3905 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3906 u64 cookie, struct page **pages, unsigned int count, int plus) 3907 { 3908 struct nfs4_exception exception = { }; 3909 int err; 3910 do { 3911 err = _nfs4_proc_readdir(dentry, cred, cookie, 3912 pages, count, plus); 3913 trace_nfs4_readdir(dentry->d_inode, err); 3914 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), err, 3915 &exception); 3916 } while (exception.retry); 3917 return err; 3918 } 3919 3920 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3921 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 3922 { 3923 struct nfs4_createdata *data; 3924 int mode = sattr->ia_mode; 3925 int status = -ENOMEM; 3926 3927 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 3928 if (data == NULL) 3929 goto out; 3930 3931 if (S_ISFIFO(mode)) 3932 data->arg.ftype = NF4FIFO; 3933 else if (S_ISBLK(mode)) { 3934 data->arg.ftype = NF4BLK; 3935 data->arg.u.device.specdata1 = MAJOR(rdev); 3936 data->arg.u.device.specdata2 = MINOR(rdev); 3937 } 3938 else if (S_ISCHR(mode)) { 3939 data->arg.ftype = NF4CHR; 3940 data->arg.u.device.specdata1 = MAJOR(rdev); 3941 data->arg.u.device.specdata2 = MINOR(rdev); 3942 } else if (!S_ISSOCK(mode)) { 3943 status = -EINVAL; 3944 goto out_free; 3945 } 3946 3947 data->arg.label = label; 3948 status = nfs4_do_create(dir, dentry, data); 3949 out_free: 3950 nfs4_free_createdata(data); 3951 out: 3952 return status; 3953 } 3954 3955 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3956 struct iattr *sattr, dev_t rdev) 3957 { 3958 struct nfs4_exception exception = { }; 3959 struct nfs4_label l, *label = NULL; 3960 int err; 3961 3962 label = nfs4_label_init_security(dir, dentry, sattr, &l); 3963 3964 sattr->ia_mode &= ~current_umask(); 3965 do { 3966 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 3967 trace_nfs4_mknod(dir, &dentry->d_name, err); 3968 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3969 &exception); 3970 } while (exception.retry); 3971 3972 nfs4_label_release_security(label); 3973 3974 return err; 3975 } 3976 3977 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 3978 struct nfs_fsstat *fsstat) 3979 { 3980 struct nfs4_statfs_arg args = { 3981 .fh = fhandle, 3982 .bitmask = server->attr_bitmask, 3983 }; 3984 struct nfs4_statfs_res res = { 3985 .fsstat = fsstat, 3986 }; 3987 struct rpc_message msg = { 3988 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 3989 .rpc_argp = &args, 3990 .rpc_resp = &res, 3991 }; 3992 3993 nfs_fattr_init(fsstat->fattr); 3994 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3995 } 3996 3997 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 3998 { 3999 struct nfs4_exception exception = { }; 4000 int err; 4001 do { 4002 err = nfs4_handle_exception(server, 4003 _nfs4_proc_statfs(server, fhandle, fsstat), 4004 &exception); 4005 } while (exception.retry); 4006 return err; 4007 } 4008 4009 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 4010 struct nfs_fsinfo *fsinfo) 4011 { 4012 struct nfs4_fsinfo_arg args = { 4013 .fh = fhandle, 4014 .bitmask = server->attr_bitmask, 4015 }; 4016 struct nfs4_fsinfo_res res = { 4017 .fsinfo = fsinfo, 4018 }; 4019 struct rpc_message msg = { 4020 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 4021 .rpc_argp = &args, 4022 .rpc_resp = &res, 4023 }; 4024 4025 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4026 } 4027 4028 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4029 { 4030 struct nfs4_exception exception = { }; 4031 unsigned long now = jiffies; 4032 int err; 4033 4034 do { 4035 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 4036 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 4037 if (err == 0) { 4038 struct nfs_client *clp = server->nfs_client; 4039 4040 spin_lock(&clp->cl_lock); 4041 clp->cl_lease_time = fsinfo->lease_time * HZ; 4042 clp->cl_last_renewal = now; 4043 spin_unlock(&clp->cl_lock); 4044 break; 4045 } 4046 err = nfs4_handle_exception(server, err, &exception); 4047 } while (exception.retry); 4048 return err; 4049 } 4050 4051 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4052 { 4053 int error; 4054 4055 nfs_fattr_init(fsinfo->fattr); 4056 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 4057 if (error == 0) { 4058 /* block layout checks this! */ 4059 server->pnfs_blksize = fsinfo->blksize; 4060 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype); 4061 } 4062 4063 return error; 4064 } 4065 4066 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4067 struct nfs_pathconf *pathconf) 4068 { 4069 struct nfs4_pathconf_arg args = { 4070 .fh = fhandle, 4071 .bitmask = server->attr_bitmask, 4072 }; 4073 struct nfs4_pathconf_res res = { 4074 .pathconf = pathconf, 4075 }; 4076 struct rpc_message msg = { 4077 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 4078 .rpc_argp = &args, 4079 .rpc_resp = &res, 4080 }; 4081 4082 /* None of the pathconf attributes are mandatory to implement */ 4083 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 4084 memset(pathconf, 0, sizeof(*pathconf)); 4085 return 0; 4086 } 4087 4088 nfs_fattr_init(pathconf->fattr); 4089 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4090 } 4091 4092 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4093 struct nfs_pathconf *pathconf) 4094 { 4095 struct nfs4_exception exception = { }; 4096 int err; 4097 4098 do { 4099 err = nfs4_handle_exception(server, 4100 _nfs4_proc_pathconf(server, fhandle, pathconf), 4101 &exception); 4102 } while (exception.retry); 4103 return err; 4104 } 4105 4106 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 4107 const struct nfs_open_context *ctx, 4108 const struct nfs_lock_context *l_ctx, 4109 fmode_t fmode) 4110 { 4111 const struct nfs_lockowner *lockowner = NULL; 4112 4113 if (l_ctx != NULL) 4114 lockowner = &l_ctx->lockowner; 4115 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner); 4116 } 4117 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 4118 4119 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 4120 const struct nfs_open_context *ctx, 4121 const struct nfs_lock_context *l_ctx, 4122 fmode_t fmode) 4123 { 4124 nfs4_stateid current_stateid; 4125 4126 /* If the current stateid represents a lost lock, then exit */ 4127 if (nfs4_set_rw_stateid(¤t_stateid, ctx, l_ctx, fmode) == -EIO) 4128 return true; 4129 return nfs4_stateid_match(stateid, ¤t_stateid); 4130 } 4131 4132 static bool nfs4_error_stateid_expired(int err) 4133 { 4134 switch (err) { 4135 case -NFS4ERR_DELEG_REVOKED: 4136 case -NFS4ERR_ADMIN_REVOKED: 4137 case -NFS4ERR_BAD_STATEID: 4138 case -NFS4ERR_STALE_STATEID: 4139 case -NFS4ERR_OLD_STATEID: 4140 case -NFS4ERR_OPENMODE: 4141 case -NFS4ERR_EXPIRED: 4142 return true; 4143 } 4144 return false; 4145 } 4146 4147 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr) 4148 { 4149 nfs_invalidate_atime(hdr->inode); 4150 } 4151 4152 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 4153 { 4154 struct nfs_server *server = NFS_SERVER(hdr->inode); 4155 4156 trace_nfs4_read(hdr, task->tk_status); 4157 if (nfs4_async_handle_error(task, server, 4158 hdr->args.context->state, 4159 NULL) == -EAGAIN) { 4160 rpc_restart_call_prepare(task); 4161 return -EAGAIN; 4162 } 4163 4164 __nfs4_read_done_cb(hdr); 4165 if (task->tk_status > 0) 4166 renew_lease(server, hdr->timestamp); 4167 return 0; 4168 } 4169 4170 static bool nfs4_read_stateid_changed(struct rpc_task *task, 4171 struct nfs_pgio_args *args) 4172 { 4173 4174 if (!nfs4_error_stateid_expired(task->tk_status) || 4175 nfs4_stateid_is_current(&args->stateid, 4176 args->context, 4177 args->lock_context, 4178 FMODE_READ)) 4179 return false; 4180 rpc_restart_call_prepare(task); 4181 return true; 4182 } 4183 4184 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4185 { 4186 4187 dprintk("--> %s\n", __func__); 4188 4189 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4190 return -EAGAIN; 4191 if (nfs4_read_stateid_changed(task, &hdr->args)) 4192 return -EAGAIN; 4193 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4194 nfs4_read_done_cb(task, hdr); 4195 } 4196 4197 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 4198 struct rpc_message *msg) 4199 { 4200 hdr->timestamp = jiffies; 4201 hdr->pgio_done_cb = nfs4_read_done_cb; 4202 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 4203 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0); 4204 } 4205 4206 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 4207 struct nfs_pgio_header *hdr) 4208 { 4209 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode), 4210 &hdr->args.seq_args, 4211 &hdr->res.seq_res, 4212 task)) 4213 return 0; 4214 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 4215 hdr->args.lock_context, 4216 hdr->rw_ops->rw_mode) == -EIO) 4217 return -EIO; 4218 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 4219 return -EIO; 4220 return 0; 4221 } 4222 4223 static int nfs4_write_done_cb(struct rpc_task *task, 4224 struct nfs_pgio_header *hdr) 4225 { 4226 struct inode *inode = hdr->inode; 4227 4228 trace_nfs4_write(hdr, task->tk_status); 4229 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4230 hdr->args.context->state, 4231 NULL) == -EAGAIN) { 4232 rpc_restart_call_prepare(task); 4233 return -EAGAIN; 4234 } 4235 if (task->tk_status >= 0) { 4236 renew_lease(NFS_SERVER(inode), hdr->timestamp); 4237 nfs_post_op_update_inode_force_wcc(inode, &hdr->fattr); 4238 } 4239 return 0; 4240 } 4241 4242 static bool nfs4_write_stateid_changed(struct rpc_task *task, 4243 struct nfs_pgio_args *args) 4244 { 4245 4246 if (!nfs4_error_stateid_expired(task->tk_status) || 4247 nfs4_stateid_is_current(&args->stateid, 4248 args->context, 4249 args->lock_context, 4250 FMODE_WRITE)) 4251 return false; 4252 rpc_restart_call_prepare(task); 4253 return true; 4254 } 4255 4256 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4257 { 4258 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4259 return -EAGAIN; 4260 if (nfs4_write_stateid_changed(task, &hdr->args)) 4261 return -EAGAIN; 4262 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4263 nfs4_write_done_cb(task, hdr); 4264 } 4265 4266 static 4267 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 4268 { 4269 /* Don't request attributes for pNFS or O_DIRECT writes */ 4270 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 4271 return false; 4272 /* Otherwise, request attributes if and only if we don't hold 4273 * a delegation 4274 */ 4275 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; 4276 } 4277 4278 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 4279 struct rpc_message *msg) 4280 { 4281 struct nfs_server *server = NFS_SERVER(hdr->inode); 4282 4283 if (!nfs4_write_need_cache_consistency_data(hdr)) { 4284 hdr->args.bitmask = NULL; 4285 hdr->res.fattr = NULL; 4286 } else 4287 hdr->args.bitmask = server->cache_consistency_bitmask; 4288 4289 if (!hdr->pgio_done_cb) 4290 hdr->pgio_done_cb = nfs4_write_done_cb; 4291 hdr->res.server = server; 4292 hdr->timestamp = jiffies; 4293 4294 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 4295 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1); 4296 } 4297 4298 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 4299 { 4300 nfs4_setup_sequence(NFS_SERVER(data->inode), 4301 &data->args.seq_args, 4302 &data->res.seq_res, 4303 task); 4304 } 4305 4306 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 4307 { 4308 struct inode *inode = data->inode; 4309 4310 trace_nfs4_commit(data, task->tk_status); 4311 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4312 NULL, NULL) == -EAGAIN) { 4313 rpc_restart_call_prepare(task); 4314 return -EAGAIN; 4315 } 4316 return 0; 4317 } 4318 4319 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 4320 { 4321 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4322 return -EAGAIN; 4323 return data->commit_done_cb(task, data); 4324 } 4325 4326 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) 4327 { 4328 struct nfs_server *server = NFS_SERVER(data->inode); 4329 4330 if (data->commit_done_cb == NULL) 4331 data->commit_done_cb = nfs4_commit_done_cb; 4332 data->res.server = server; 4333 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 4334 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 4335 } 4336 4337 struct nfs4_renewdata { 4338 struct nfs_client *client; 4339 unsigned long timestamp; 4340 }; 4341 4342 /* 4343 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 4344 * standalone procedure for queueing an asynchronous RENEW. 4345 */ 4346 static void nfs4_renew_release(void *calldata) 4347 { 4348 struct nfs4_renewdata *data = calldata; 4349 struct nfs_client *clp = data->client; 4350 4351 if (atomic_read(&clp->cl_count) > 1) 4352 nfs4_schedule_state_renewal(clp); 4353 nfs_put_client(clp); 4354 kfree(data); 4355 } 4356 4357 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 4358 { 4359 struct nfs4_renewdata *data = calldata; 4360 struct nfs_client *clp = data->client; 4361 unsigned long timestamp = data->timestamp; 4362 4363 trace_nfs4_renew_async(clp, task->tk_status); 4364 switch (task->tk_status) { 4365 case 0: 4366 break; 4367 case -NFS4ERR_LEASE_MOVED: 4368 nfs4_schedule_lease_moved_recovery(clp); 4369 break; 4370 default: 4371 /* Unless we're shutting down, schedule state recovery! */ 4372 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 4373 return; 4374 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 4375 nfs4_schedule_lease_recovery(clp); 4376 return; 4377 } 4378 nfs4_schedule_path_down_recovery(clp); 4379 } 4380 do_renew_lease(clp, timestamp); 4381 } 4382 4383 static const struct rpc_call_ops nfs4_renew_ops = { 4384 .rpc_call_done = nfs4_renew_done, 4385 .rpc_release = nfs4_renew_release, 4386 }; 4387 4388 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 4389 { 4390 struct rpc_message msg = { 4391 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4392 .rpc_argp = clp, 4393 .rpc_cred = cred, 4394 }; 4395 struct nfs4_renewdata *data; 4396 4397 if (renew_flags == 0) 4398 return 0; 4399 if (!atomic_inc_not_zero(&clp->cl_count)) 4400 return -EIO; 4401 data = kmalloc(sizeof(*data), GFP_NOFS); 4402 if (data == NULL) 4403 return -ENOMEM; 4404 data->client = clp; 4405 data->timestamp = jiffies; 4406 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 4407 &nfs4_renew_ops, data); 4408 } 4409 4410 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 4411 { 4412 struct rpc_message msg = { 4413 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4414 .rpc_argp = clp, 4415 .rpc_cred = cred, 4416 }; 4417 unsigned long now = jiffies; 4418 int status; 4419 4420 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4421 if (status < 0) 4422 return status; 4423 do_renew_lease(clp, now); 4424 return 0; 4425 } 4426 4427 static inline int nfs4_server_supports_acls(struct nfs_server *server) 4428 { 4429 return server->caps & NFS_CAP_ACLS; 4430 } 4431 4432 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 4433 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 4434 * the stack. 4435 */ 4436 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 4437 4438 static int buf_to_pages_noslab(const void *buf, size_t buflen, 4439 struct page **pages, unsigned int *pgbase) 4440 { 4441 struct page *newpage, **spages; 4442 int rc = 0; 4443 size_t len; 4444 spages = pages; 4445 4446 do { 4447 len = min_t(size_t, PAGE_SIZE, buflen); 4448 newpage = alloc_page(GFP_KERNEL); 4449 4450 if (newpage == NULL) 4451 goto unwind; 4452 memcpy(page_address(newpage), buf, len); 4453 buf += len; 4454 buflen -= len; 4455 *pages++ = newpage; 4456 rc++; 4457 } while (buflen != 0); 4458 4459 return rc; 4460 4461 unwind: 4462 for(; rc > 0; rc--) 4463 __free_page(spages[rc-1]); 4464 return -ENOMEM; 4465 } 4466 4467 struct nfs4_cached_acl { 4468 int cached; 4469 size_t len; 4470 char data[0]; 4471 }; 4472 4473 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 4474 { 4475 struct nfs_inode *nfsi = NFS_I(inode); 4476 4477 spin_lock(&inode->i_lock); 4478 kfree(nfsi->nfs4_acl); 4479 nfsi->nfs4_acl = acl; 4480 spin_unlock(&inode->i_lock); 4481 } 4482 4483 static void nfs4_zap_acl_attr(struct inode *inode) 4484 { 4485 nfs4_set_cached_acl(inode, NULL); 4486 } 4487 4488 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 4489 { 4490 struct nfs_inode *nfsi = NFS_I(inode); 4491 struct nfs4_cached_acl *acl; 4492 int ret = -ENOENT; 4493 4494 spin_lock(&inode->i_lock); 4495 acl = nfsi->nfs4_acl; 4496 if (acl == NULL) 4497 goto out; 4498 if (buf == NULL) /* user is just asking for length */ 4499 goto out_len; 4500 if (acl->cached == 0) 4501 goto out; 4502 ret = -ERANGE; /* see getxattr(2) man page */ 4503 if (acl->len > buflen) 4504 goto out; 4505 memcpy(buf, acl->data, acl->len); 4506 out_len: 4507 ret = acl->len; 4508 out: 4509 spin_unlock(&inode->i_lock); 4510 return ret; 4511 } 4512 4513 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 4514 { 4515 struct nfs4_cached_acl *acl; 4516 size_t buflen = sizeof(*acl) + acl_len; 4517 4518 if (buflen <= PAGE_SIZE) { 4519 acl = kmalloc(buflen, GFP_KERNEL); 4520 if (acl == NULL) 4521 goto out; 4522 acl->cached = 1; 4523 _copy_from_pages(acl->data, pages, pgbase, acl_len); 4524 } else { 4525 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 4526 if (acl == NULL) 4527 goto out; 4528 acl->cached = 0; 4529 } 4530 acl->len = acl_len; 4531 out: 4532 nfs4_set_cached_acl(inode, acl); 4533 } 4534 4535 /* 4536 * The getxattr API returns the required buffer length when called with a 4537 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 4538 * the required buf. On a NULL buf, we send a page of data to the server 4539 * guessing that the ACL request can be serviced by a page. If so, we cache 4540 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 4541 * the cache. If not so, we throw away the page, and cache the required 4542 * length. The next getxattr call will then produce another round trip to 4543 * the server, this time with the input buf of the required size. 4544 */ 4545 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4546 { 4547 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; 4548 struct nfs_getaclargs args = { 4549 .fh = NFS_FH(inode), 4550 .acl_pages = pages, 4551 .acl_len = buflen, 4552 }; 4553 struct nfs_getaclres res = { 4554 .acl_len = buflen, 4555 }; 4556 struct rpc_message msg = { 4557 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 4558 .rpc_argp = &args, 4559 .rpc_resp = &res, 4560 }; 4561 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4562 int ret = -ENOMEM, i; 4563 4564 /* As long as we're doing a round trip to the server anyway, 4565 * let's be prepared for a page of acl data. */ 4566 if (npages == 0) 4567 npages = 1; 4568 if (npages > ARRAY_SIZE(pages)) 4569 return -ERANGE; 4570 4571 for (i = 0; i < npages; i++) { 4572 pages[i] = alloc_page(GFP_KERNEL); 4573 if (!pages[i]) 4574 goto out_free; 4575 } 4576 4577 /* for decoding across pages */ 4578 res.acl_scratch = alloc_page(GFP_KERNEL); 4579 if (!res.acl_scratch) 4580 goto out_free; 4581 4582 args.acl_len = npages * PAGE_SIZE; 4583 args.acl_pgbase = 0; 4584 4585 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 4586 __func__, buf, buflen, npages, args.acl_len); 4587 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 4588 &msg, &args.seq_args, &res.seq_res, 0); 4589 if (ret) 4590 goto out_free; 4591 4592 /* Handle the case where the passed-in buffer is too short */ 4593 if (res.acl_flags & NFS4_ACL_TRUNC) { 4594 /* Did the user only issue a request for the acl length? */ 4595 if (buf == NULL) 4596 goto out_ok; 4597 ret = -ERANGE; 4598 goto out_free; 4599 } 4600 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); 4601 if (buf) { 4602 if (res.acl_len > buflen) { 4603 ret = -ERANGE; 4604 goto out_free; 4605 } 4606 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 4607 } 4608 out_ok: 4609 ret = res.acl_len; 4610 out_free: 4611 for (i = 0; i < npages; i++) 4612 if (pages[i]) 4613 __free_page(pages[i]); 4614 if (res.acl_scratch) 4615 __free_page(res.acl_scratch); 4616 return ret; 4617 } 4618 4619 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4620 { 4621 struct nfs4_exception exception = { }; 4622 ssize_t ret; 4623 do { 4624 ret = __nfs4_get_acl_uncached(inode, buf, buflen); 4625 trace_nfs4_get_acl(inode, ret); 4626 if (ret >= 0) 4627 break; 4628 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 4629 } while (exception.retry); 4630 return ret; 4631 } 4632 4633 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 4634 { 4635 struct nfs_server *server = NFS_SERVER(inode); 4636 int ret; 4637 4638 if (!nfs4_server_supports_acls(server)) 4639 return -EOPNOTSUPP; 4640 ret = nfs_revalidate_inode(server, inode); 4641 if (ret < 0) 4642 return ret; 4643 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 4644 nfs_zap_acl_cache(inode); 4645 ret = nfs4_read_cached_acl(inode, buf, buflen); 4646 if (ret != -ENOENT) 4647 /* -ENOENT is returned if there is no ACL or if there is an ACL 4648 * but no cached acl data, just the acl length */ 4649 return ret; 4650 return nfs4_get_acl_uncached(inode, buf, buflen); 4651 } 4652 4653 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4654 { 4655 struct nfs_server *server = NFS_SERVER(inode); 4656 struct page *pages[NFS4ACL_MAXPAGES]; 4657 struct nfs_setaclargs arg = { 4658 .fh = NFS_FH(inode), 4659 .acl_pages = pages, 4660 .acl_len = buflen, 4661 }; 4662 struct nfs_setaclres res; 4663 struct rpc_message msg = { 4664 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 4665 .rpc_argp = &arg, 4666 .rpc_resp = &res, 4667 }; 4668 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4669 int ret, i; 4670 4671 if (!nfs4_server_supports_acls(server)) 4672 return -EOPNOTSUPP; 4673 if (npages > ARRAY_SIZE(pages)) 4674 return -ERANGE; 4675 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 4676 if (i < 0) 4677 return i; 4678 nfs4_inode_return_delegation(inode); 4679 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4680 4681 /* 4682 * Free each page after tx, so the only ref left is 4683 * held by the network stack 4684 */ 4685 for (; i > 0; i--) 4686 put_page(pages[i-1]); 4687 4688 /* 4689 * Acl update can result in inode attribute update. 4690 * so mark the attribute cache invalid. 4691 */ 4692 spin_lock(&inode->i_lock); 4693 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 4694 spin_unlock(&inode->i_lock); 4695 nfs_access_zap_cache(inode); 4696 nfs_zap_acl_cache(inode); 4697 return ret; 4698 } 4699 4700 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4701 { 4702 struct nfs4_exception exception = { }; 4703 int err; 4704 do { 4705 err = __nfs4_proc_set_acl(inode, buf, buflen); 4706 trace_nfs4_set_acl(inode, err); 4707 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4708 &exception); 4709 } while (exception.retry); 4710 return err; 4711 } 4712 4713 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 4714 static int _nfs4_get_security_label(struct inode *inode, void *buf, 4715 size_t buflen) 4716 { 4717 struct nfs_server *server = NFS_SERVER(inode); 4718 struct nfs_fattr fattr; 4719 struct nfs4_label label = {0, 0, buflen, buf}; 4720 4721 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 4722 struct nfs4_getattr_arg arg = { 4723 .fh = NFS_FH(inode), 4724 .bitmask = bitmask, 4725 }; 4726 struct nfs4_getattr_res res = { 4727 .fattr = &fattr, 4728 .label = &label, 4729 .server = server, 4730 }; 4731 struct rpc_message msg = { 4732 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4733 .rpc_argp = &arg, 4734 .rpc_resp = &res, 4735 }; 4736 int ret; 4737 4738 nfs_fattr_init(&fattr); 4739 4740 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 4741 if (ret) 4742 return ret; 4743 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 4744 return -ENOENT; 4745 if (buflen < label.len) 4746 return -ERANGE; 4747 return 0; 4748 } 4749 4750 static int nfs4_get_security_label(struct inode *inode, void *buf, 4751 size_t buflen) 4752 { 4753 struct nfs4_exception exception = { }; 4754 int err; 4755 4756 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 4757 return -EOPNOTSUPP; 4758 4759 do { 4760 err = _nfs4_get_security_label(inode, buf, buflen); 4761 trace_nfs4_get_security_label(inode, err); 4762 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4763 &exception); 4764 } while (exception.retry); 4765 return err; 4766 } 4767 4768 static int _nfs4_do_set_security_label(struct inode *inode, 4769 struct nfs4_label *ilabel, 4770 struct nfs_fattr *fattr, 4771 struct nfs4_label *olabel) 4772 { 4773 4774 struct iattr sattr = {0}; 4775 struct nfs_server *server = NFS_SERVER(inode); 4776 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 4777 struct nfs_setattrargs arg = { 4778 .fh = NFS_FH(inode), 4779 .iap = &sattr, 4780 .server = server, 4781 .bitmask = bitmask, 4782 .label = ilabel, 4783 }; 4784 struct nfs_setattrres res = { 4785 .fattr = fattr, 4786 .label = olabel, 4787 .server = server, 4788 }; 4789 struct rpc_message msg = { 4790 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 4791 .rpc_argp = &arg, 4792 .rpc_resp = &res, 4793 }; 4794 int status; 4795 4796 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 4797 4798 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4799 if (status) 4800 dprintk("%s failed: %d\n", __func__, status); 4801 4802 return status; 4803 } 4804 4805 static int nfs4_do_set_security_label(struct inode *inode, 4806 struct nfs4_label *ilabel, 4807 struct nfs_fattr *fattr, 4808 struct nfs4_label *olabel) 4809 { 4810 struct nfs4_exception exception = { }; 4811 int err; 4812 4813 do { 4814 err = _nfs4_do_set_security_label(inode, ilabel, 4815 fattr, olabel); 4816 trace_nfs4_set_security_label(inode, err); 4817 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4818 &exception); 4819 } while (exception.retry); 4820 return err; 4821 } 4822 4823 static int 4824 nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen) 4825 { 4826 struct nfs4_label ilabel, *olabel = NULL; 4827 struct nfs_fattr fattr; 4828 struct rpc_cred *cred; 4829 struct inode *inode = dentry->d_inode; 4830 int status; 4831 4832 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 4833 return -EOPNOTSUPP; 4834 4835 nfs_fattr_init(&fattr); 4836 4837 ilabel.pi = 0; 4838 ilabel.lfs = 0; 4839 ilabel.label = (char *)buf; 4840 ilabel.len = buflen; 4841 4842 cred = rpc_lookup_cred(); 4843 if (IS_ERR(cred)) 4844 return PTR_ERR(cred); 4845 4846 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 4847 if (IS_ERR(olabel)) { 4848 status = -PTR_ERR(olabel); 4849 goto out; 4850 } 4851 4852 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel); 4853 if (status == 0) 4854 nfs_setsecurity(inode, &fattr, olabel); 4855 4856 nfs4_label_free(olabel); 4857 out: 4858 put_rpccred(cred); 4859 return status; 4860 } 4861 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 4862 4863 4864 static int 4865 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, 4866 struct nfs4_state *state, long *timeout) 4867 { 4868 struct nfs_client *clp = server->nfs_client; 4869 4870 if (task->tk_status >= 0) 4871 return 0; 4872 switch(task->tk_status) { 4873 case -NFS4ERR_DELEG_REVOKED: 4874 case -NFS4ERR_ADMIN_REVOKED: 4875 case -NFS4ERR_BAD_STATEID: 4876 case -NFS4ERR_OPENMODE: 4877 if (state == NULL) 4878 break; 4879 if (nfs4_schedule_stateid_recovery(server, state) < 0) 4880 goto recovery_failed; 4881 goto wait_on_recovery; 4882 case -NFS4ERR_EXPIRED: 4883 if (state != NULL) { 4884 if (nfs4_schedule_stateid_recovery(server, state) < 0) 4885 goto recovery_failed; 4886 } 4887 case -NFS4ERR_STALE_STATEID: 4888 case -NFS4ERR_STALE_CLIENTID: 4889 nfs4_schedule_lease_recovery(clp); 4890 goto wait_on_recovery; 4891 case -NFS4ERR_MOVED: 4892 if (nfs4_schedule_migration_recovery(server) < 0) 4893 goto recovery_failed; 4894 goto wait_on_recovery; 4895 case -NFS4ERR_LEASE_MOVED: 4896 nfs4_schedule_lease_moved_recovery(clp); 4897 goto wait_on_recovery; 4898 #if defined(CONFIG_NFS_V4_1) 4899 case -NFS4ERR_BADSESSION: 4900 case -NFS4ERR_BADSLOT: 4901 case -NFS4ERR_BAD_HIGH_SLOT: 4902 case -NFS4ERR_DEADSESSION: 4903 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 4904 case -NFS4ERR_SEQ_FALSE_RETRY: 4905 case -NFS4ERR_SEQ_MISORDERED: 4906 dprintk("%s ERROR %d, Reset session\n", __func__, 4907 task->tk_status); 4908 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 4909 goto wait_on_recovery; 4910 #endif /* CONFIG_NFS_V4_1 */ 4911 case -NFS4ERR_DELAY: 4912 nfs_inc_server_stats(server, NFSIOS_DELAY); 4913 rpc_delay(task, nfs4_update_delay(timeout)); 4914 goto restart_call; 4915 case -NFS4ERR_GRACE: 4916 rpc_delay(task, NFS4_POLL_RETRY_MAX); 4917 case -NFS4ERR_RETRY_UNCACHED_REP: 4918 case -NFS4ERR_OLD_STATEID: 4919 goto restart_call; 4920 } 4921 task->tk_status = nfs4_map_errors(task->tk_status); 4922 return 0; 4923 recovery_failed: 4924 task->tk_status = -EIO; 4925 return 0; 4926 wait_on_recovery: 4927 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 4928 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 4929 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 4930 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 4931 goto recovery_failed; 4932 restart_call: 4933 task->tk_status = 0; 4934 return -EAGAIN; 4935 } 4936 4937 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 4938 nfs4_verifier *bootverf) 4939 { 4940 __be32 verf[2]; 4941 4942 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 4943 /* An impossible timestamp guarantees this value 4944 * will never match a generated boot time. */ 4945 verf[0] = 0; 4946 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1); 4947 } else { 4948 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 4949 verf[0] = cpu_to_be32(nn->boot_time.tv_sec); 4950 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec); 4951 } 4952 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 4953 } 4954 4955 static unsigned int 4956 nfs4_init_nonuniform_client_string(struct nfs_client *clp, 4957 char *buf, size_t len) 4958 { 4959 unsigned int result; 4960 4961 if (clp->cl_owner_id != NULL) 4962 return strlcpy(buf, clp->cl_owner_id, len); 4963 4964 rcu_read_lock(); 4965 result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s", 4966 clp->cl_ipaddr, 4967 rpc_peeraddr2str(clp->cl_rpcclient, 4968 RPC_DISPLAY_ADDR), 4969 rpc_peeraddr2str(clp->cl_rpcclient, 4970 RPC_DISPLAY_PROTO)); 4971 rcu_read_unlock(); 4972 clp->cl_owner_id = kstrdup(buf, GFP_KERNEL); 4973 return result; 4974 } 4975 4976 static unsigned int 4977 nfs4_init_uniform_client_string(struct nfs_client *clp, 4978 char *buf, size_t len) 4979 { 4980 const char *nodename = clp->cl_rpcclient->cl_nodename; 4981 unsigned int result; 4982 4983 if (clp->cl_owner_id != NULL) 4984 return strlcpy(buf, clp->cl_owner_id, len); 4985 4986 if (nfs4_client_id_uniquifier[0] != '\0') 4987 result = scnprintf(buf, len, "Linux NFSv%u.%u %s/%s", 4988 clp->rpc_ops->version, 4989 clp->cl_minorversion, 4990 nfs4_client_id_uniquifier, 4991 nodename); 4992 else 4993 result = scnprintf(buf, len, "Linux NFSv%u.%u %s", 4994 clp->rpc_ops->version, clp->cl_minorversion, 4995 nodename); 4996 clp->cl_owner_id = kstrdup(buf, GFP_KERNEL); 4997 return result; 4998 } 4999 5000 /* 5001 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 5002 * services. Advertise one based on the address family of the 5003 * clientaddr. 5004 */ 5005 static unsigned int 5006 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 5007 { 5008 if (strchr(clp->cl_ipaddr, ':') != NULL) 5009 return scnprintf(buf, len, "tcp6"); 5010 else 5011 return scnprintf(buf, len, "tcp"); 5012 } 5013 5014 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 5015 { 5016 struct nfs4_setclientid *sc = calldata; 5017 5018 if (task->tk_status == 0) 5019 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 5020 } 5021 5022 static const struct rpc_call_ops nfs4_setclientid_ops = { 5023 .rpc_call_done = nfs4_setclientid_done, 5024 }; 5025 5026 /** 5027 * nfs4_proc_setclientid - Negotiate client ID 5028 * @clp: state data structure 5029 * @program: RPC program for NFSv4 callback service 5030 * @port: IP port number for NFS4 callback service 5031 * @cred: RPC credential to use for this call 5032 * @res: where to place the result 5033 * 5034 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5035 */ 5036 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 5037 unsigned short port, struct rpc_cred *cred, 5038 struct nfs4_setclientid_res *res) 5039 { 5040 nfs4_verifier sc_verifier; 5041 struct nfs4_setclientid setclientid = { 5042 .sc_verifier = &sc_verifier, 5043 .sc_prog = program, 5044 .sc_cb_ident = clp->cl_cb_ident, 5045 }; 5046 struct rpc_message msg = { 5047 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 5048 .rpc_argp = &setclientid, 5049 .rpc_resp = res, 5050 .rpc_cred = cred, 5051 }; 5052 struct rpc_task *task; 5053 struct rpc_task_setup task_setup_data = { 5054 .rpc_client = clp->cl_rpcclient, 5055 .rpc_message = &msg, 5056 .callback_ops = &nfs4_setclientid_ops, 5057 .callback_data = &setclientid, 5058 .flags = RPC_TASK_TIMEOUT, 5059 }; 5060 int status; 5061 5062 /* nfs_client_id4 */ 5063 nfs4_init_boot_verifier(clp, &sc_verifier); 5064 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 5065 setclientid.sc_name_len = 5066 nfs4_init_uniform_client_string(clp, 5067 setclientid.sc_name, 5068 sizeof(setclientid.sc_name)); 5069 else 5070 setclientid.sc_name_len = 5071 nfs4_init_nonuniform_client_string(clp, 5072 setclientid.sc_name, 5073 sizeof(setclientid.sc_name)); 5074 /* cb_client4 */ 5075 setclientid.sc_netid_len = 5076 nfs4_init_callback_netid(clp, 5077 setclientid.sc_netid, 5078 sizeof(setclientid.sc_netid)); 5079 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 5080 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 5081 clp->cl_ipaddr, port >> 8, port & 255); 5082 5083 dprintk("NFS call setclientid auth=%s, '%.*s'\n", 5084 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5085 setclientid.sc_name_len, setclientid.sc_name); 5086 task = rpc_run_task(&task_setup_data); 5087 if (IS_ERR(task)) { 5088 status = PTR_ERR(task); 5089 goto out; 5090 } 5091 status = task->tk_status; 5092 if (setclientid.sc_cred) { 5093 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 5094 put_rpccred(setclientid.sc_cred); 5095 } 5096 rpc_put_task(task); 5097 out: 5098 trace_nfs4_setclientid(clp, status); 5099 dprintk("NFS reply setclientid: %d\n", status); 5100 return status; 5101 } 5102 5103 /** 5104 * nfs4_proc_setclientid_confirm - Confirm client ID 5105 * @clp: state data structure 5106 * @res: result of a previous SETCLIENTID 5107 * @cred: RPC credential to use for this call 5108 * 5109 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5110 */ 5111 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 5112 struct nfs4_setclientid_res *arg, 5113 struct rpc_cred *cred) 5114 { 5115 struct rpc_message msg = { 5116 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 5117 .rpc_argp = arg, 5118 .rpc_cred = cred, 5119 }; 5120 int status; 5121 5122 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 5123 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5124 clp->cl_clientid); 5125 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5126 trace_nfs4_setclientid_confirm(clp, status); 5127 dprintk("NFS reply setclientid_confirm: %d\n", status); 5128 return status; 5129 } 5130 5131 struct nfs4_delegreturndata { 5132 struct nfs4_delegreturnargs args; 5133 struct nfs4_delegreturnres res; 5134 struct nfs_fh fh; 5135 nfs4_stateid stateid; 5136 unsigned long timestamp; 5137 struct nfs_fattr fattr; 5138 int rpc_status; 5139 struct inode *inode; 5140 bool roc; 5141 u32 roc_barrier; 5142 }; 5143 5144 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 5145 { 5146 struct nfs4_delegreturndata *data = calldata; 5147 5148 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5149 return; 5150 5151 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 5152 switch (task->tk_status) { 5153 case 0: 5154 renew_lease(data->res.server, data->timestamp); 5155 case -NFS4ERR_ADMIN_REVOKED: 5156 case -NFS4ERR_DELEG_REVOKED: 5157 case -NFS4ERR_BAD_STATEID: 5158 case -NFS4ERR_OLD_STATEID: 5159 case -NFS4ERR_STALE_STATEID: 5160 case -NFS4ERR_EXPIRED: 5161 task->tk_status = 0; 5162 if (data->roc) 5163 pnfs_roc_set_barrier(data->inode, data->roc_barrier); 5164 break; 5165 default: 5166 if (nfs4_async_handle_error(task, data->res.server, 5167 NULL, NULL) == -EAGAIN) { 5168 rpc_restart_call_prepare(task); 5169 return; 5170 } 5171 } 5172 data->rpc_status = task->tk_status; 5173 } 5174 5175 static void nfs4_delegreturn_release(void *calldata) 5176 { 5177 struct nfs4_delegreturndata *data = calldata; 5178 struct inode *inode = data->inode; 5179 5180 if (inode) { 5181 if (data->roc) 5182 pnfs_roc_release(inode); 5183 nfs_iput_and_deactive(inode); 5184 } 5185 kfree(calldata); 5186 } 5187 5188 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 5189 { 5190 struct nfs4_delegreturndata *d_data; 5191 5192 d_data = (struct nfs4_delegreturndata *)data; 5193 5194 if (d_data->roc && 5195 pnfs_roc_drain(d_data->inode, &d_data->roc_barrier, task)) 5196 return; 5197 5198 nfs4_setup_sequence(d_data->res.server, 5199 &d_data->args.seq_args, 5200 &d_data->res.seq_res, 5201 task); 5202 } 5203 5204 static const struct rpc_call_ops nfs4_delegreturn_ops = { 5205 .rpc_call_prepare = nfs4_delegreturn_prepare, 5206 .rpc_call_done = nfs4_delegreturn_done, 5207 .rpc_release = nfs4_delegreturn_release, 5208 }; 5209 5210 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5211 { 5212 struct nfs4_delegreturndata *data; 5213 struct nfs_server *server = NFS_SERVER(inode); 5214 struct rpc_task *task; 5215 struct rpc_message msg = { 5216 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 5217 .rpc_cred = cred, 5218 }; 5219 struct rpc_task_setup task_setup_data = { 5220 .rpc_client = server->client, 5221 .rpc_message = &msg, 5222 .callback_ops = &nfs4_delegreturn_ops, 5223 .flags = RPC_TASK_ASYNC, 5224 }; 5225 int status = 0; 5226 5227 data = kzalloc(sizeof(*data), GFP_NOFS); 5228 if (data == NULL) 5229 return -ENOMEM; 5230 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 5231 data->args.fhandle = &data->fh; 5232 data->args.stateid = &data->stateid; 5233 data->args.bitmask = server->cache_consistency_bitmask; 5234 nfs_copy_fh(&data->fh, NFS_FH(inode)); 5235 nfs4_stateid_copy(&data->stateid, stateid); 5236 data->res.fattr = &data->fattr; 5237 data->res.server = server; 5238 nfs_fattr_init(data->res.fattr); 5239 data->timestamp = jiffies; 5240 data->rpc_status = 0; 5241 data->inode = nfs_igrab_and_active(inode); 5242 if (data->inode) 5243 data->roc = nfs4_roc(inode); 5244 5245 task_setup_data.callback_data = data; 5246 msg.rpc_argp = &data->args; 5247 msg.rpc_resp = &data->res; 5248 task = rpc_run_task(&task_setup_data); 5249 if (IS_ERR(task)) 5250 return PTR_ERR(task); 5251 if (!issync) 5252 goto out; 5253 status = nfs4_wait_for_completion_rpc_task(task); 5254 if (status != 0) 5255 goto out; 5256 status = data->rpc_status; 5257 if (status == 0) 5258 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 5259 else 5260 nfs_refresh_inode(inode, &data->fattr); 5261 out: 5262 rpc_put_task(task); 5263 return status; 5264 } 5265 5266 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5267 { 5268 struct nfs_server *server = NFS_SERVER(inode); 5269 struct nfs4_exception exception = { }; 5270 int err; 5271 do { 5272 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); 5273 trace_nfs4_delegreturn(inode, err); 5274 switch (err) { 5275 case -NFS4ERR_STALE_STATEID: 5276 case -NFS4ERR_EXPIRED: 5277 case 0: 5278 return 0; 5279 } 5280 err = nfs4_handle_exception(server, err, &exception); 5281 } while (exception.retry); 5282 return err; 5283 } 5284 5285 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 5286 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 5287 5288 /* 5289 * sleep, with exponential backoff, and retry the LOCK operation. 5290 */ 5291 static unsigned long 5292 nfs4_set_lock_task_retry(unsigned long timeout) 5293 { 5294 freezable_schedule_timeout_killable_unsafe(timeout); 5295 timeout <<= 1; 5296 if (timeout > NFS4_LOCK_MAXTIMEOUT) 5297 return NFS4_LOCK_MAXTIMEOUT; 5298 return timeout; 5299 } 5300 5301 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5302 { 5303 struct inode *inode = state->inode; 5304 struct nfs_server *server = NFS_SERVER(inode); 5305 struct nfs_client *clp = server->nfs_client; 5306 struct nfs_lockt_args arg = { 5307 .fh = NFS_FH(inode), 5308 .fl = request, 5309 }; 5310 struct nfs_lockt_res res = { 5311 .denied = request, 5312 }; 5313 struct rpc_message msg = { 5314 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 5315 .rpc_argp = &arg, 5316 .rpc_resp = &res, 5317 .rpc_cred = state->owner->so_cred, 5318 }; 5319 struct nfs4_lock_state *lsp; 5320 int status; 5321 5322 arg.lock_owner.clientid = clp->cl_clientid; 5323 status = nfs4_set_lock_state(state, request); 5324 if (status != 0) 5325 goto out; 5326 lsp = request->fl_u.nfs4_fl.owner; 5327 arg.lock_owner.id = lsp->ls_seqid.owner_id; 5328 arg.lock_owner.s_dev = server->s_dev; 5329 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5330 switch (status) { 5331 case 0: 5332 request->fl_type = F_UNLCK; 5333 break; 5334 case -NFS4ERR_DENIED: 5335 status = 0; 5336 } 5337 request->fl_ops->fl_release_private(request); 5338 request->fl_ops = NULL; 5339 out: 5340 return status; 5341 } 5342 5343 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5344 { 5345 struct nfs4_exception exception = { }; 5346 int err; 5347 5348 do { 5349 err = _nfs4_proc_getlk(state, cmd, request); 5350 trace_nfs4_get_lock(request, state, cmd, err); 5351 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 5352 &exception); 5353 } while (exception.retry); 5354 return err; 5355 } 5356 5357 static int do_vfs_lock(struct file *file, struct file_lock *fl) 5358 { 5359 int res = 0; 5360 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 5361 case FL_POSIX: 5362 res = posix_lock_file_wait(file, fl); 5363 break; 5364 case FL_FLOCK: 5365 res = flock_lock_file_wait(file, fl); 5366 break; 5367 default: 5368 BUG(); 5369 } 5370 return res; 5371 } 5372 5373 struct nfs4_unlockdata { 5374 struct nfs_locku_args arg; 5375 struct nfs_locku_res res; 5376 struct nfs4_lock_state *lsp; 5377 struct nfs_open_context *ctx; 5378 struct file_lock fl; 5379 const struct nfs_server *server; 5380 unsigned long timestamp; 5381 }; 5382 5383 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 5384 struct nfs_open_context *ctx, 5385 struct nfs4_lock_state *lsp, 5386 struct nfs_seqid *seqid) 5387 { 5388 struct nfs4_unlockdata *p; 5389 struct inode *inode = lsp->ls_state->inode; 5390 5391 p = kzalloc(sizeof(*p), GFP_NOFS); 5392 if (p == NULL) 5393 return NULL; 5394 p->arg.fh = NFS_FH(inode); 5395 p->arg.fl = &p->fl; 5396 p->arg.seqid = seqid; 5397 p->res.seqid = seqid; 5398 p->lsp = lsp; 5399 atomic_inc(&lsp->ls_count); 5400 /* Ensure we don't close file until we're done freeing locks! */ 5401 p->ctx = get_nfs_open_context(ctx); 5402 memcpy(&p->fl, fl, sizeof(p->fl)); 5403 p->server = NFS_SERVER(inode); 5404 return p; 5405 } 5406 5407 static void nfs4_locku_release_calldata(void *data) 5408 { 5409 struct nfs4_unlockdata *calldata = data; 5410 nfs_free_seqid(calldata->arg.seqid); 5411 nfs4_put_lock_state(calldata->lsp); 5412 put_nfs_open_context(calldata->ctx); 5413 kfree(calldata); 5414 } 5415 5416 static void nfs4_locku_done(struct rpc_task *task, void *data) 5417 { 5418 struct nfs4_unlockdata *calldata = data; 5419 5420 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 5421 return; 5422 switch (task->tk_status) { 5423 case 0: 5424 renew_lease(calldata->server, calldata->timestamp); 5425 do_vfs_lock(calldata->fl.fl_file, &calldata->fl); 5426 if (nfs4_update_lock_stateid(calldata->lsp, 5427 &calldata->res.stateid)) 5428 break; 5429 case -NFS4ERR_BAD_STATEID: 5430 case -NFS4ERR_OLD_STATEID: 5431 case -NFS4ERR_STALE_STATEID: 5432 case -NFS4ERR_EXPIRED: 5433 if (!nfs4_stateid_match(&calldata->arg.stateid, 5434 &calldata->lsp->ls_stateid)) 5435 rpc_restart_call_prepare(task); 5436 break; 5437 default: 5438 if (nfs4_async_handle_error(task, calldata->server, 5439 NULL, NULL) == -EAGAIN) 5440 rpc_restart_call_prepare(task); 5441 } 5442 nfs_release_seqid(calldata->arg.seqid); 5443 } 5444 5445 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 5446 { 5447 struct nfs4_unlockdata *calldata = data; 5448 5449 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 5450 goto out_wait; 5451 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid); 5452 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 5453 /* Note: exit _without_ running nfs4_locku_done */ 5454 goto out_no_action; 5455 } 5456 calldata->timestamp = jiffies; 5457 if (nfs4_setup_sequence(calldata->server, 5458 &calldata->arg.seq_args, 5459 &calldata->res.seq_res, 5460 task) != 0) 5461 nfs_release_seqid(calldata->arg.seqid); 5462 return; 5463 out_no_action: 5464 task->tk_action = NULL; 5465 out_wait: 5466 nfs4_sequence_done(task, &calldata->res.seq_res); 5467 } 5468 5469 static const struct rpc_call_ops nfs4_locku_ops = { 5470 .rpc_call_prepare = nfs4_locku_prepare, 5471 .rpc_call_done = nfs4_locku_done, 5472 .rpc_release = nfs4_locku_release_calldata, 5473 }; 5474 5475 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 5476 struct nfs_open_context *ctx, 5477 struct nfs4_lock_state *lsp, 5478 struct nfs_seqid *seqid) 5479 { 5480 struct nfs4_unlockdata *data; 5481 struct rpc_message msg = { 5482 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 5483 .rpc_cred = ctx->cred, 5484 }; 5485 struct rpc_task_setup task_setup_data = { 5486 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 5487 .rpc_message = &msg, 5488 .callback_ops = &nfs4_locku_ops, 5489 .workqueue = nfsiod_workqueue, 5490 .flags = RPC_TASK_ASYNC, 5491 }; 5492 5493 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 5494 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 5495 5496 /* Ensure this is an unlock - when canceling a lock, the 5497 * canceled lock is passed in, and it won't be an unlock. 5498 */ 5499 fl->fl_type = F_UNLCK; 5500 5501 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 5502 if (data == NULL) { 5503 nfs_free_seqid(seqid); 5504 return ERR_PTR(-ENOMEM); 5505 } 5506 5507 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5508 msg.rpc_argp = &data->arg; 5509 msg.rpc_resp = &data->res; 5510 task_setup_data.callback_data = data; 5511 return rpc_run_task(&task_setup_data); 5512 } 5513 5514 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 5515 { 5516 struct inode *inode = state->inode; 5517 struct nfs4_state_owner *sp = state->owner; 5518 struct nfs_inode *nfsi = NFS_I(inode); 5519 struct nfs_seqid *seqid; 5520 struct nfs4_lock_state *lsp; 5521 struct rpc_task *task; 5522 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5523 int status = 0; 5524 unsigned char fl_flags = request->fl_flags; 5525 5526 status = nfs4_set_lock_state(state, request); 5527 /* Unlock _before_ we do the RPC call */ 5528 request->fl_flags |= FL_EXISTS; 5529 /* Exclude nfs_delegation_claim_locks() */ 5530 mutex_lock(&sp->so_delegreturn_mutex); 5531 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 5532 down_read(&nfsi->rwsem); 5533 if (do_vfs_lock(request->fl_file, request) == -ENOENT) { 5534 up_read(&nfsi->rwsem); 5535 mutex_unlock(&sp->so_delegreturn_mutex); 5536 goto out; 5537 } 5538 up_read(&nfsi->rwsem); 5539 mutex_unlock(&sp->so_delegreturn_mutex); 5540 if (status != 0) 5541 goto out; 5542 /* Is this a delegated lock? */ 5543 lsp = request->fl_u.nfs4_fl.owner; 5544 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 5545 goto out; 5546 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 5547 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 5548 status = -ENOMEM; 5549 if (IS_ERR(seqid)) 5550 goto out; 5551 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 5552 status = PTR_ERR(task); 5553 if (IS_ERR(task)) 5554 goto out; 5555 status = nfs4_wait_for_completion_rpc_task(task); 5556 rpc_put_task(task); 5557 out: 5558 request->fl_flags = fl_flags; 5559 trace_nfs4_unlock(request, state, F_SETLK, status); 5560 return status; 5561 } 5562 5563 struct nfs4_lockdata { 5564 struct nfs_lock_args arg; 5565 struct nfs_lock_res res; 5566 struct nfs4_lock_state *lsp; 5567 struct nfs_open_context *ctx; 5568 struct file_lock fl; 5569 unsigned long timestamp; 5570 int rpc_status; 5571 int cancelled; 5572 struct nfs_server *server; 5573 }; 5574 5575 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 5576 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 5577 gfp_t gfp_mask) 5578 { 5579 struct nfs4_lockdata *p; 5580 struct inode *inode = lsp->ls_state->inode; 5581 struct nfs_server *server = NFS_SERVER(inode); 5582 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5583 5584 p = kzalloc(sizeof(*p), gfp_mask); 5585 if (p == NULL) 5586 return NULL; 5587 5588 p->arg.fh = NFS_FH(inode); 5589 p->arg.fl = &p->fl; 5590 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 5591 if (IS_ERR(p->arg.open_seqid)) 5592 goto out_free; 5593 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 5594 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 5595 if (IS_ERR(p->arg.lock_seqid)) 5596 goto out_free_seqid; 5597 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 5598 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 5599 p->arg.lock_owner.s_dev = server->s_dev; 5600 p->res.lock_seqid = p->arg.lock_seqid; 5601 p->lsp = lsp; 5602 p->server = server; 5603 atomic_inc(&lsp->ls_count); 5604 p->ctx = get_nfs_open_context(ctx); 5605 memcpy(&p->fl, fl, sizeof(p->fl)); 5606 return p; 5607 out_free_seqid: 5608 nfs_free_seqid(p->arg.open_seqid); 5609 out_free: 5610 kfree(p); 5611 return NULL; 5612 } 5613 5614 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 5615 { 5616 struct nfs4_lockdata *data = calldata; 5617 struct nfs4_state *state = data->lsp->ls_state; 5618 5619 dprintk("%s: begin!\n", __func__); 5620 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 5621 goto out_wait; 5622 /* Do we need to do an open_to_lock_owner? */ 5623 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 5624 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 5625 goto out_release_lock_seqid; 5626 } 5627 nfs4_stateid_copy(&data->arg.open_stateid, 5628 &state->open_stateid); 5629 data->arg.new_lock_owner = 1; 5630 data->res.open_seqid = data->arg.open_seqid; 5631 } else { 5632 data->arg.new_lock_owner = 0; 5633 nfs4_stateid_copy(&data->arg.lock_stateid, 5634 &data->lsp->ls_stateid); 5635 } 5636 if (!nfs4_valid_open_stateid(state)) { 5637 data->rpc_status = -EBADF; 5638 task->tk_action = NULL; 5639 goto out_release_open_seqid; 5640 } 5641 data->timestamp = jiffies; 5642 if (nfs4_setup_sequence(data->server, 5643 &data->arg.seq_args, 5644 &data->res.seq_res, 5645 task) == 0) 5646 return; 5647 out_release_open_seqid: 5648 nfs_release_seqid(data->arg.open_seqid); 5649 out_release_lock_seqid: 5650 nfs_release_seqid(data->arg.lock_seqid); 5651 out_wait: 5652 nfs4_sequence_done(task, &data->res.seq_res); 5653 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); 5654 } 5655 5656 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 5657 { 5658 struct nfs4_lockdata *data = calldata; 5659 struct nfs4_lock_state *lsp = data->lsp; 5660 5661 dprintk("%s: begin!\n", __func__); 5662 5663 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5664 return; 5665 5666 data->rpc_status = task->tk_status; 5667 switch (task->tk_status) { 5668 case 0: 5669 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), 5670 data->timestamp); 5671 if (data->arg.new_lock) { 5672 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); 5673 if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) { 5674 rpc_restart_call_prepare(task); 5675 break; 5676 } 5677 } 5678 if (data->arg.new_lock_owner != 0) { 5679 nfs_confirm_seqid(&lsp->ls_seqid, 0); 5680 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 5681 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 5682 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 5683 rpc_restart_call_prepare(task); 5684 break; 5685 case -NFS4ERR_BAD_STATEID: 5686 case -NFS4ERR_OLD_STATEID: 5687 case -NFS4ERR_STALE_STATEID: 5688 case -NFS4ERR_EXPIRED: 5689 if (data->arg.new_lock_owner != 0) { 5690 if (!nfs4_stateid_match(&data->arg.open_stateid, 5691 &lsp->ls_state->open_stateid)) 5692 rpc_restart_call_prepare(task); 5693 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 5694 &lsp->ls_stateid)) 5695 rpc_restart_call_prepare(task); 5696 } 5697 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 5698 } 5699 5700 static void nfs4_lock_release(void *calldata) 5701 { 5702 struct nfs4_lockdata *data = calldata; 5703 5704 dprintk("%s: begin!\n", __func__); 5705 nfs_free_seqid(data->arg.open_seqid); 5706 if (data->cancelled != 0) { 5707 struct rpc_task *task; 5708 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 5709 data->arg.lock_seqid); 5710 if (!IS_ERR(task)) 5711 rpc_put_task_async(task); 5712 dprintk("%s: cancelling lock!\n", __func__); 5713 } else 5714 nfs_free_seqid(data->arg.lock_seqid); 5715 nfs4_put_lock_state(data->lsp); 5716 put_nfs_open_context(data->ctx); 5717 kfree(data); 5718 dprintk("%s: done!\n", __func__); 5719 } 5720 5721 static const struct rpc_call_ops nfs4_lock_ops = { 5722 .rpc_call_prepare = nfs4_lock_prepare, 5723 .rpc_call_done = nfs4_lock_done, 5724 .rpc_release = nfs4_lock_release, 5725 }; 5726 5727 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 5728 { 5729 switch (error) { 5730 case -NFS4ERR_ADMIN_REVOKED: 5731 case -NFS4ERR_BAD_STATEID: 5732 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5733 if (new_lock_owner != 0 || 5734 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 5735 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 5736 break; 5737 case -NFS4ERR_STALE_STATEID: 5738 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5739 case -NFS4ERR_EXPIRED: 5740 nfs4_schedule_lease_recovery(server->nfs_client); 5741 }; 5742 } 5743 5744 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 5745 { 5746 struct nfs4_lockdata *data; 5747 struct rpc_task *task; 5748 struct rpc_message msg = { 5749 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 5750 .rpc_cred = state->owner->so_cred, 5751 }; 5752 struct rpc_task_setup task_setup_data = { 5753 .rpc_client = NFS_CLIENT(state->inode), 5754 .rpc_message = &msg, 5755 .callback_ops = &nfs4_lock_ops, 5756 .workqueue = nfsiod_workqueue, 5757 .flags = RPC_TASK_ASYNC, 5758 }; 5759 int ret; 5760 5761 dprintk("%s: begin!\n", __func__); 5762 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 5763 fl->fl_u.nfs4_fl.owner, 5764 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); 5765 if (data == NULL) 5766 return -ENOMEM; 5767 if (IS_SETLKW(cmd)) 5768 data->arg.block = 1; 5769 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5770 msg.rpc_argp = &data->arg; 5771 msg.rpc_resp = &data->res; 5772 task_setup_data.callback_data = data; 5773 if (recovery_type > NFS_LOCK_NEW) { 5774 if (recovery_type == NFS_LOCK_RECLAIM) 5775 data->arg.reclaim = NFS_LOCK_RECLAIM; 5776 nfs4_set_sequence_privileged(&data->arg.seq_args); 5777 } else 5778 data->arg.new_lock = 1; 5779 task = rpc_run_task(&task_setup_data); 5780 if (IS_ERR(task)) 5781 return PTR_ERR(task); 5782 ret = nfs4_wait_for_completion_rpc_task(task); 5783 if (ret == 0) { 5784 ret = data->rpc_status; 5785 if (ret) 5786 nfs4_handle_setlk_error(data->server, data->lsp, 5787 data->arg.new_lock_owner, ret); 5788 } else 5789 data->cancelled = 1; 5790 rpc_put_task(task); 5791 dprintk("%s: done, ret = %d!\n", __func__, ret); 5792 return ret; 5793 } 5794 5795 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 5796 { 5797 struct nfs_server *server = NFS_SERVER(state->inode); 5798 struct nfs4_exception exception = { 5799 .inode = state->inode, 5800 }; 5801 int err; 5802 5803 do { 5804 /* Cache the lock if possible... */ 5805 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 5806 return 0; 5807 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 5808 trace_nfs4_lock_reclaim(request, state, F_SETLK, err); 5809 if (err != -NFS4ERR_DELAY) 5810 break; 5811 nfs4_handle_exception(server, err, &exception); 5812 } while (exception.retry); 5813 return err; 5814 } 5815 5816 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 5817 { 5818 struct nfs_server *server = NFS_SERVER(state->inode); 5819 struct nfs4_exception exception = { 5820 .inode = state->inode, 5821 }; 5822 int err; 5823 5824 err = nfs4_set_lock_state(state, request); 5825 if (err != 0) 5826 return err; 5827 if (!recover_lost_locks) { 5828 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 5829 return 0; 5830 } 5831 do { 5832 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 5833 return 0; 5834 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 5835 trace_nfs4_lock_expired(request, state, F_SETLK, err); 5836 switch (err) { 5837 default: 5838 goto out; 5839 case -NFS4ERR_GRACE: 5840 case -NFS4ERR_DELAY: 5841 nfs4_handle_exception(server, err, &exception); 5842 err = 0; 5843 } 5844 } while (exception.retry); 5845 out: 5846 return err; 5847 } 5848 5849 #if defined(CONFIG_NFS_V4_1) 5850 /** 5851 * nfs41_check_expired_locks - possibly free a lock stateid 5852 * 5853 * @state: NFSv4 state for an inode 5854 * 5855 * Returns NFS_OK if recovery for this stateid is now finished. 5856 * Otherwise a negative NFS4ERR value is returned. 5857 */ 5858 static int nfs41_check_expired_locks(struct nfs4_state *state) 5859 { 5860 int status, ret = -NFS4ERR_BAD_STATEID; 5861 struct nfs4_lock_state *lsp; 5862 struct nfs_server *server = NFS_SERVER(state->inode); 5863 5864 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 5865 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 5866 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 5867 5868 status = nfs41_test_stateid(server, 5869 &lsp->ls_stateid, 5870 cred); 5871 trace_nfs4_test_lock_stateid(state, lsp, status); 5872 if (status != NFS_OK) { 5873 /* Free the stateid unless the server 5874 * informs us the stateid is unrecognized. */ 5875 if (status != -NFS4ERR_BAD_STATEID) 5876 nfs41_free_stateid(server, 5877 &lsp->ls_stateid, 5878 cred); 5879 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 5880 ret = status; 5881 } 5882 } 5883 }; 5884 5885 return ret; 5886 } 5887 5888 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 5889 { 5890 int status = NFS_OK; 5891 5892 if (test_bit(LK_STATE_IN_USE, &state->flags)) 5893 status = nfs41_check_expired_locks(state); 5894 if (status != NFS_OK) 5895 status = nfs4_lock_expired(state, request); 5896 return status; 5897 } 5898 #endif 5899 5900 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5901 { 5902 struct nfs_inode *nfsi = NFS_I(state->inode); 5903 unsigned char fl_flags = request->fl_flags; 5904 int status = -ENOLCK; 5905 5906 if ((fl_flags & FL_POSIX) && 5907 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 5908 goto out; 5909 /* Is this a delegated open? */ 5910 status = nfs4_set_lock_state(state, request); 5911 if (status != 0) 5912 goto out; 5913 request->fl_flags |= FL_ACCESS; 5914 status = do_vfs_lock(request->fl_file, request); 5915 if (status < 0) 5916 goto out; 5917 down_read(&nfsi->rwsem); 5918 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 5919 /* Yes: cache locks! */ 5920 /* ...but avoid races with delegation recall... */ 5921 request->fl_flags = fl_flags & ~FL_SLEEP; 5922 status = do_vfs_lock(request->fl_file, request); 5923 up_read(&nfsi->rwsem); 5924 goto out; 5925 } 5926 up_read(&nfsi->rwsem); 5927 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 5928 out: 5929 request->fl_flags = fl_flags; 5930 return status; 5931 } 5932 5933 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5934 { 5935 struct nfs4_exception exception = { 5936 .state = state, 5937 .inode = state->inode, 5938 }; 5939 int err; 5940 5941 do { 5942 err = _nfs4_proc_setlk(state, cmd, request); 5943 trace_nfs4_set_lock(request, state, cmd, err); 5944 if (err == -NFS4ERR_DENIED) 5945 err = -EAGAIN; 5946 err = nfs4_handle_exception(NFS_SERVER(state->inode), 5947 err, &exception); 5948 } while (exception.retry); 5949 return err; 5950 } 5951 5952 static int 5953 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 5954 { 5955 struct nfs_open_context *ctx; 5956 struct nfs4_state *state; 5957 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 5958 int status; 5959 5960 /* verify open state */ 5961 ctx = nfs_file_open_context(filp); 5962 state = ctx->state; 5963 5964 if (request->fl_start < 0 || request->fl_end < 0) 5965 return -EINVAL; 5966 5967 if (IS_GETLK(cmd)) { 5968 if (state != NULL) 5969 return nfs4_proc_getlk(state, F_GETLK, request); 5970 return 0; 5971 } 5972 5973 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 5974 return -EINVAL; 5975 5976 if (request->fl_type == F_UNLCK) { 5977 if (state != NULL) 5978 return nfs4_proc_unlck(state, cmd, request); 5979 return 0; 5980 } 5981 5982 if (state == NULL) 5983 return -ENOLCK; 5984 /* 5985 * Don't rely on the VFS having checked the file open mode, 5986 * since it won't do this for flock() locks. 5987 */ 5988 switch (request->fl_type) { 5989 case F_RDLCK: 5990 if (!(filp->f_mode & FMODE_READ)) 5991 return -EBADF; 5992 break; 5993 case F_WRLCK: 5994 if (!(filp->f_mode & FMODE_WRITE)) 5995 return -EBADF; 5996 } 5997 5998 do { 5999 status = nfs4_proc_setlk(state, cmd, request); 6000 if ((status != -EAGAIN) || IS_SETLK(cmd)) 6001 break; 6002 timeout = nfs4_set_lock_task_retry(timeout); 6003 status = -ERESTARTSYS; 6004 if (signalled()) 6005 break; 6006 } while(status < 0); 6007 return status; 6008 } 6009 6010 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 6011 { 6012 struct nfs_server *server = NFS_SERVER(state->inode); 6013 int err; 6014 6015 err = nfs4_set_lock_state(state, fl); 6016 if (err != 0) 6017 return err; 6018 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 6019 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 6020 } 6021 6022 struct nfs_release_lockowner_data { 6023 struct nfs4_lock_state *lsp; 6024 struct nfs_server *server; 6025 struct nfs_release_lockowner_args args; 6026 struct nfs_release_lockowner_res res; 6027 unsigned long timestamp; 6028 }; 6029 6030 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 6031 { 6032 struct nfs_release_lockowner_data *data = calldata; 6033 struct nfs_server *server = data->server; 6034 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 6035 &data->args.seq_args, &data->res.seq_res, task); 6036 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6037 data->timestamp = jiffies; 6038 } 6039 6040 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 6041 { 6042 struct nfs_release_lockowner_data *data = calldata; 6043 struct nfs_server *server = data->server; 6044 6045 nfs40_sequence_done(task, &data->res.seq_res); 6046 6047 switch (task->tk_status) { 6048 case 0: 6049 renew_lease(server, data->timestamp); 6050 break; 6051 case -NFS4ERR_STALE_CLIENTID: 6052 case -NFS4ERR_EXPIRED: 6053 nfs4_schedule_lease_recovery(server->nfs_client); 6054 break; 6055 case -NFS4ERR_LEASE_MOVED: 6056 case -NFS4ERR_DELAY: 6057 if (nfs4_async_handle_error(task, server, 6058 NULL, NULL) == -EAGAIN) 6059 rpc_restart_call_prepare(task); 6060 } 6061 } 6062 6063 static void nfs4_release_lockowner_release(void *calldata) 6064 { 6065 struct nfs_release_lockowner_data *data = calldata; 6066 nfs4_free_lock_state(data->server, data->lsp); 6067 kfree(calldata); 6068 } 6069 6070 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 6071 .rpc_call_prepare = nfs4_release_lockowner_prepare, 6072 .rpc_call_done = nfs4_release_lockowner_done, 6073 .rpc_release = nfs4_release_lockowner_release, 6074 }; 6075 6076 static void 6077 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 6078 { 6079 struct nfs_release_lockowner_data *data; 6080 struct rpc_message msg = { 6081 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 6082 }; 6083 6084 if (server->nfs_client->cl_mvops->minor_version != 0) 6085 return; 6086 6087 data = kmalloc(sizeof(*data), GFP_NOFS); 6088 if (!data) 6089 return; 6090 data->lsp = lsp; 6091 data->server = server; 6092 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6093 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 6094 data->args.lock_owner.s_dev = server->s_dev; 6095 6096 msg.rpc_argp = &data->args; 6097 msg.rpc_resp = &data->res; 6098 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 6099 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 6100 } 6101 6102 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 6103 6104 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key, 6105 const void *buf, size_t buflen, 6106 int flags, int type) 6107 { 6108 if (strcmp(key, "") != 0) 6109 return -EINVAL; 6110 6111 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen); 6112 } 6113 6114 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, 6115 void *buf, size_t buflen, int type) 6116 { 6117 if (strcmp(key, "") != 0) 6118 return -EINVAL; 6119 6120 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen); 6121 } 6122 6123 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, 6124 size_t list_len, const char *name, 6125 size_t name_len, int type) 6126 { 6127 size_t len = sizeof(XATTR_NAME_NFSV4_ACL); 6128 6129 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode))) 6130 return 0; 6131 6132 if (list && len <= list_len) 6133 memcpy(list, XATTR_NAME_NFSV4_ACL, len); 6134 return len; 6135 } 6136 6137 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6138 static inline int nfs4_server_supports_labels(struct nfs_server *server) 6139 { 6140 return server->caps & NFS_CAP_SECURITY_LABEL; 6141 } 6142 6143 static int nfs4_xattr_set_nfs4_label(struct dentry *dentry, const char *key, 6144 const void *buf, size_t buflen, 6145 int flags, int type) 6146 { 6147 if (security_ismaclabel(key)) 6148 return nfs4_set_security_label(dentry, buf, buflen); 6149 6150 return -EOPNOTSUPP; 6151 } 6152 6153 static int nfs4_xattr_get_nfs4_label(struct dentry *dentry, const char *key, 6154 void *buf, size_t buflen, int type) 6155 { 6156 if (security_ismaclabel(key)) 6157 return nfs4_get_security_label(dentry->d_inode, buf, buflen); 6158 return -EOPNOTSUPP; 6159 } 6160 6161 static size_t nfs4_xattr_list_nfs4_label(struct dentry *dentry, char *list, 6162 size_t list_len, const char *name, 6163 size_t name_len, int type) 6164 { 6165 size_t len = 0; 6166 6167 if (nfs_server_capable(dentry->d_inode, NFS_CAP_SECURITY_LABEL)) { 6168 len = security_inode_listsecurity(dentry->d_inode, NULL, 0); 6169 if (list && len <= list_len) 6170 security_inode_listsecurity(dentry->d_inode, list, len); 6171 } 6172 return len; 6173 } 6174 6175 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 6176 .prefix = XATTR_SECURITY_PREFIX, 6177 .list = nfs4_xattr_list_nfs4_label, 6178 .get = nfs4_xattr_get_nfs4_label, 6179 .set = nfs4_xattr_set_nfs4_label, 6180 }; 6181 #endif 6182 6183 6184 /* 6185 * nfs_fhget will use either the mounted_on_fileid or the fileid 6186 */ 6187 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 6188 { 6189 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 6190 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 6191 (fattr->valid & NFS_ATTR_FATTR_FSID) && 6192 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 6193 return; 6194 6195 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 6196 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 6197 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 6198 fattr->nlink = 2; 6199 } 6200 6201 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6202 const struct qstr *name, 6203 struct nfs4_fs_locations *fs_locations, 6204 struct page *page) 6205 { 6206 struct nfs_server *server = NFS_SERVER(dir); 6207 u32 bitmask[3] = { 6208 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6209 }; 6210 struct nfs4_fs_locations_arg args = { 6211 .dir_fh = NFS_FH(dir), 6212 .name = name, 6213 .page = page, 6214 .bitmask = bitmask, 6215 }; 6216 struct nfs4_fs_locations_res res = { 6217 .fs_locations = fs_locations, 6218 }; 6219 struct rpc_message msg = { 6220 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6221 .rpc_argp = &args, 6222 .rpc_resp = &res, 6223 }; 6224 int status; 6225 6226 dprintk("%s: start\n", __func__); 6227 6228 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 6229 * is not supported */ 6230 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 6231 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; 6232 else 6233 bitmask[0] |= FATTR4_WORD0_FILEID; 6234 6235 nfs_fattr_init(&fs_locations->fattr); 6236 fs_locations->server = server; 6237 fs_locations->nlocations = 0; 6238 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 6239 dprintk("%s: returned status = %d\n", __func__, status); 6240 return status; 6241 } 6242 6243 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6244 const struct qstr *name, 6245 struct nfs4_fs_locations *fs_locations, 6246 struct page *page) 6247 { 6248 struct nfs4_exception exception = { }; 6249 int err; 6250 do { 6251 err = _nfs4_proc_fs_locations(client, dir, name, 6252 fs_locations, page); 6253 trace_nfs4_get_fs_locations(dir, name, err); 6254 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6255 &exception); 6256 } while (exception.retry); 6257 return err; 6258 } 6259 6260 /* 6261 * This operation also signals the server that this client is 6262 * performing migration recovery. The server can stop returning 6263 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 6264 * appended to this compound to identify the client ID which is 6265 * performing recovery. 6266 */ 6267 static int _nfs40_proc_get_locations(struct inode *inode, 6268 struct nfs4_fs_locations *locations, 6269 struct page *page, struct rpc_cred *cred) 6270 { 6271 struct nfs_server *server = NFS_SERVER(inode); 6272 struct rpc_clnt *clnt = server->client; 6273 u32 bitmask[2] = { 6274 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6275 }; 6276 struct nfs4_fs_locations_arg args = { 6277 .clientid = server->nfs_client->cl_clientid, 6278 .fh = NFS_FH(inode), 6279 .page = page, 6280 .bitmask = bitmask, 6281 .migration = 1, /* skip LOOKUP */ 6282 .renew = 1, /* append RENEW */ 6283 }; 6284 struct nfs4_fs_locations_res res = { 6285 .fs_locations = locations, 6286 .migration = 1, 6287 .renew = 1, 6288 }; 6289 struct rpc_message msg = { 6290 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6291 .rpc_argp = &args, 6292 .rpc_resp = &res, 6293 .rpc_cred = cred, 6294 }; 6295 unsigned long now = jiffies; 6296 int status; 6297 6298 nfs_fattr_init(&locations->fattr); 6299 locations->server = server; 6300 locations->nlocations = 0; 6301 6302 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6303 nfs4_set_sequence_privileged(&args.seq_args); 6304 status = nfs4_call_sync_sequence(clnt, server, &msg, 6305 &args.seq_args, &res.seq_res); 6306 if (status) 6307 return status; 6308 6309 renew_lease(server, now); 6310 return 0; 6311 } 6312 6313 #ifdef CONFIG_NFS_V4_1 6314 6315 /* 6316 * This operation also signals the server that this client is 6317 * performing migration recovery. The server can stop asserting 6318 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 6319 * performing this operation is identified in the SEQUENCE 6320 * operation in this compound. 6321 * 6322 * When the client supports GETATTR(fs_locations_info), it can 6323 * be plumbed in here. 6324 */ 6325 static int _nfs41_proc_get_locations(struct inode *inode, 6326 struct nfs4_fs_locations *locations, 6327 struct page *page, struct rpc_cred *cred) 6328 { 6329 struct nfs_server *server = NFS_SERVER(inode); 6330 struct rpc_clnt *clnt = server->client; 6331 u32 bitmask[2] = { 6332 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6333 }; 6334 struct nfs4_fs_locations_arg args = { 6335 .fh = NFS_FH(inode), 6336 .page = page, 6337 .bitmask = bitmask, 6338 .migration = 1, /* skip LOOKUP */ 6339 }; 6340 struct nfs4_fs_locations_res res = { 6341 .fs_locations = locations, 6342 .migration = 1, 6343 }; 6344 struct rpc_message msg = { 6345 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6346 .rpc_argp = &args, 6347 .rpc_resp = &res, 6348 .rpc_cred = cred, 6349 }; 6350 int status; 6351 6352 nfs_fattr_init(&locations->fattr); 6353 locations->server = server; 6354 locations->nlocations = 0; 6355 6356 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6357 nfs4_set_sequence_privileged(&args.seq_args); 6358 status = nfs4_call_sync_sequence(clnt, server, &msg, 6359 &args.seq_args, &res.seq_res); 6360 if (status == NFS4_OK && 6361 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6362 status = -NFS4ERR_LEASE_MOVED; 6363 return status; 6364 } 6365 6366 #endif /* CONFIG_NFS_V4_1 */ 6367 6368 /** 6369 * nfs4_proc_get_locations - discover locations for a migrated FSID 6370 * @inode: inode on FSID that is migrating 6371 * @locations: result of query 6372 * @page: buffer 6373 * @cred: credential to use for this operation 6374 * 6375 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 6376 * operation failed, or a negative errno if a local error occurred. 6377 * 6378 * On success, "locations" is filled in, but if the server has 6379 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 6380 * asserted. 6381 * 6382 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 6383 * from this client that require migration recovery. 6384 */ 6385 int nfs4_proc_get_locations(struct inode *inode, 6386 struct nfs4_fs_locations *locations, 6387 struct page *page, struct rpc_cred *cred) 6388 { 6389 struct nfs_server *server = NFS_SERVER(inode); 6390 struct nfs_client *clp = server->nfs_client; 6391 const struct nfs4_mig_recovery_ops *ops = 6392 clp->cl_mvops->mig_recovery_ops; 6393 struct nfs4_exception exception = { }; 6394 int status; 6395 6396 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6397 (unsigned long long)server->fsid.major, 6398 (unsigned long long)server->fsid.minor, 6399 clp->cl_hostname); 6400 nfs_display_fhandle(NFS_FH(inode), __func__); 6401 6402 do { 6403 status = ops->get_locations(inode, locations, page, cred); 6404 if (status != -NFS4ERR_DELAY) 6405 break; 6406 nfs4_handle_exception(server, status, &exception); 6407 } while (exception.retry); 6408 return status; 6409 } 6410 6411 /* 6412 * This operation also signals the server that this client is 6413 * performing "lease moved" recovery. The server can stop 6414 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 6415 * is appended to this compound to identify the client ID which is 6416 * performing recovery. 6417 */ 6418 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6419 { 6420 struct nfs_server *server = NFS_SERVER(inode); 6421 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 6422 struct rpc_clnt *clnt = server->client; 6423 struct nfs4_fsid_present_arg args = { 6424 .fh = NFS_FH(inode), 6425 .clientid = clp->cl_clientid, 6426 .renew = 1, /* append RENEW */ 6427 }; 6428 struct nfs4_fsid_present_res res = { 6429 .renew = 1, 6430 }; 6431 struct rpc_message msg = { 6432 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6433 .rpc_argp = &args, 6434 .rpc_resp = &res, 6435 .rpc_cred = cred, 6436 }; 6437 unsigned long now = jiffies; 6438 int status; 6439 6440 res.fh = nfs_alloc_fhandle(); 6441 if (res.fh == NULL) 6442 return -ENOMEM; 6443 6444 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6445 nfs4_set_sequence_privileged(&args.seq_args); 6446 status = nfs4_call_sync_sequence(clnt, server, &msg, 6447 &args.seq_args, &res.seq_res); 6448 nfs_free_fhandle(res.fh); 6449 if (status) 6450 return status; 6451 6452 do_renew_lease(clp, now); 6453 return 0; 6454 } 6455 6456 #ifdef CONFIG_NFS_V4_1 6457 6458 /* 6459 * This operation also signals the server that this client is 6460 * performing "lease moved" recovery. The server can stop asserting 6461 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 6462 * this operation is identified in the SEQUENCE operation in this 6463 * compound. 6464 */ 6465 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6466 { 6467 struct nfs_server *server = NFS_SERVER(inode); 6468 struct rpc_clnt *clnt = server->client; 6469 struct nfs4_fsid_present_arg args = { 6470 .fh = NFS_FH(inode), 6471 }; 6472 struct nfs4_fsid_present_res res = { 6473 }; 6474 struct rpc_message msg = { 6475 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6476 .rpc_argp = &args, 6477 .rpc_resp = &res, 6478 .rpc_cred = cred, 6479 }; 6480 int status; 6481 6482 res.fh = nfs_alloc_fhandle(); 6483 if (res.fh == NULL) 6484 return -ENOMEM; 6485 6486 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6487 nfs4_set_sequence_privileged(&args.seq_args); 6488 status = nfs4_call_sync_sequence(clnt, server, &msg, 6489 &args.seq_args, &res.seq_res); 6490 nfs_free_fhandle(res.fh); 6491 if (status == NFS4_OK && 6492 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6493 status = -NFS4ERR_LEASE_MOVED; 6494 return status; 6495 } 6496 6497 #endif /* CONFIG_NFS_V4_1 */ 6498 6499 /** 6500 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 6501 * @inode: inode on FSID to check 6502 * @cred: credential to use for this operation 6503 * 6504 * Server indicates whether the FSID is present, moved, or not 6505 * recognized. This operation is necessary to clear a LEASE_MOVED 6506 * condition for this client ID. 6507 * 6508 * Returns NFS4_OK if the FSID is present on this server, 6509 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 6510 * NFS4ERR code if some error occurred on the server, or a 6511 * negative errno if a local failure occurred. 6512 */ 6513 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6514 { 6515 struct nfs_server *server = NFS_SERVER(inode); 6516 struct nfs_client *clp = server->nfs_client; 6517 const struct nfs4_mig_recovery_ops *ops = 6518 clp->cl_mvops->mig_recovery_ops; 6519 struct nfs4_exception exception = { }; 6520 int status; 6521 6522 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6523 (unsigned long long)server->fsid.major, 6524 (unsigned long long)server->fsid.minor, 6525 clp->cl_hostname); 6526 nfs_display_fhandle(NFS_FH(inode), __func__); 6527 6528 do { 6529 status = ops->fsid_present(inode, cred); 6530 if (status != -NFS4ERR_DELAY) 6531 break; 6532 nfs4_handle_exception(server, status, &exception); 6533 } while (exception.retry); 6534 return status; 6535 } 6536 6537 /** 6538 * If 'use_integrity' is true and the state managment nfs_client 6539 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 6540 * and the machine credential as per RFC3530bis and RFC5661 Security 6541 * Considerations sections. Otherwise, just use the user cred with the 6542 * filesystem's rpc_client. 6543 */ 6544 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 6545 { 6546 int status; 6547 struct nfs4_secinfo_arg args = { 6548 .dir_fh = NFS_FH(dir), 6549 .name = name, 6550 }; 6551 struct nfs4_secinfo_res res = { 6552 .flavors = flavors, 6553 }; 6554 struct rpc_message msg = { 6555 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 6556 .rpc_argp = &args, 6557 .rpc_resp = &res, 6558 }; 6559 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 6560 struct rpc_cred *cred = NULL; 6561 6562 if (use_integrity) { 6563 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient; 6564 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client); 6565 msg.rpc_cred = cred; 6566 } 6567 6568 dprintk("NFS call secinfo %s\n", name->name); 6569 6570 nfs4_state_protect(NFS_SERVER(dir)->nfs_client, 6571 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 6572 6573 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args, 6574 &res.seq_res, 0); 6575 dprintk("NFS reply secinfo: %d\n", status); 6576 6577 if (cred) 6578 put_rpccred(cred); 6579 6580 return status; 6581 } 6582 6583 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 6584 struct nfs4_secinfo_flavors *flavors) 6585 { 6586 struct nfs4_exception exception = { }; 6587 int err; 6588 do { 6589 err = -NFS4ERR_WRONGSEC; 6590 6591 /* try to use integrity protection with machine cred */ 6592 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 6593 err = _nfs4_proc_secinfo(dir, name, flavors, true); 6594 6595 /* 6596 * if unable to use integrity protection, or SECINFO with 6597 * integrity protection returns NFS4ERR_WRONGSEC (which is 6598 * disallowed by spec, but exists in deployed servers) use 6599 * the current filesystem's rpc_client and the user cred. 6600 */ 6601 if (err == -NFS4ERR_WRONGSEC) 6602 err = _nfs4_proc_secinfo(dir, name, flavors, false); 6603 6604 trace_nfs4_secinfo(dir, name, err); 6605 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6606 &exception); 6607 } while (exception.retry); 6608 return err; 6609 } 6610 6611 #ifdef CONFIG_NFS_V4_1 6612 /* 6613 * Check the exchange flags returned by the server for invalid flags, having 6614 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 6615 * DS flags set. 6616 */ 6617 static int nfs4_check_cl_exchange_flags(u32 flags) 6618 { 6619 if (flags & ~EXCHGID4_FLAG_MASK_R) 6620 goto out_inval; 6621 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 6622 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 6623 goto out_inval; 6624 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 6625 goto out_inval; 6626 return NFS_OK; 6627 out_inval: 6628 return -NFS4ERR_INVAL; 6629 } 6630 6631 static bool 6632 nfs41_same_server_scope(struct nfs41_server_scope *a, 6633 struct nfs41_server_scope *b) 6634 { 6635 if (a->server_scope_sz == b->server_scope_sz && 6636 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) 6637 return true; 6638 6639 return false; 6640 } 6641 6642 /* 6643 * nfs4_proc_bind_conn_to_session() 6644 * 6645 * The 4.1 client currently uses the same TCP connection for the 6646 * fore and backchannel. 6647 */ 6648 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) 6649 { 6650 int status; 6651 struct nfs41_bind_conn_to_session_args args = { 6652 .client = clp, 6653 .dir = NFS4_CDFC4_FORE_OR_BOTH, 6654 }; 6655 struct nfs41_bind_conn_to_session_res res; 6656 struct rpc_message msg = { 6657 .rpc_proc = 6658 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 6659 .rpc_argp = &args, 6660 .rpc_resp = &res, 6661 .rpc_cred = cred, 6662 }; 6663 6664 dprintk("--> %s\n", __func__); 6665 6666 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 6667 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 6668 args.dir = NFS4_CDFC4_FORE; 6669 6670 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 6671 trace_nfs4_bind_conn_to_session(clp, status); 6672 if (status == 0) { 6673 if (memcmp(res.sessionid.data, 6674 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 6675 dprintk("NFS: %s: Session ID mismatch\n", __func__); 6676 status = -EIO; 6677 goto out; 6678 } 6679 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 6680 dprintk("NFS: %s: Unexpected direction from server\n", 6681 __func__); 6682 status = -EIO; 6683 goto out; 6684 } 6685 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 6686 dprintk("NFS: %s: Server returned RDMA mode = true\n", 6687 __func__); 6688 status = -EIO; 6689 goto out; 6690 } 6691 } 6692 out: 6693 dprintk("<-- %s status= %d\n", __func__, status); 6694 return status; 6695 } 6696 6697 /* 6698 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 6699 * and operations we'd like to see to enable certain features in the allow map 6700 */ 6701 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 6702 .how = SP4_MACH_CRED, 6703 .enforce.u.words = { 6704 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 6705 1 << (OP_EXCHANGE_ID - 32) | 6706 1 << (OP_CREATE_SESSION - 32) | 6707 1 << (OP_DESTROY_SESSION - 32) | 6708 1 << (OP_DESTROY_CLIENTID - 32) 6709 }, 6710 .allow.u.words = { 6711 [0] = 1 << (OP_CLOSE) | 6712 1 << (OP_LOCKU) | 6713 1 << (OP_COMMIT), 6714 [1] = 1 << (OP_SECINFO - 32) | 6715 1 << (OP_SECINFO_NO_NAME - 32) | 6716 1 << (OP_TEST_STATEID - 32) | 6717 1 << (OP_FREE_STATEID - 32) | 6718 1 << (OP_WRITE - 32) 6719 } 6720 }; 6721 6722 /* 6723 * Select the state protection mode for client `clp' given the server results 6724 * from exchange_id in `sp'. 6725 * 6726 * Returns 0 on success, negative errno otherwise. 6727 */ 6728 static int nfs4_sp4_select_mode(struct nfs_client *clp, 6729 struct nfs41_state_protection *sp) 6730 { 6731 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 6732 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 6733 1 << (OP_EXCHANGE_ID - 32) | 6734 1 << (OP_CREATE_SESSION - 32) | 6735 1 << (OP_DESTROY_SESSION - 32) | 6736 1 << (OP_DESTROY_CLIENTID - 32) 6737 }; 6738 unsigned int i; 6739 6740 if (sp->how == SP4_MACH_CRED) { 6741 /* Print state protect result */ 6742 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 6743 for (i = 0; i <= LAST_NFS4_OP; i++) { 6744 if (test_bit(i, sp->enforce.u.longs)) 6745 dfprintk(MOUNT, " enforce op %d\n", i); 6746 if (test_bit(i, sp->allow.u.longs)) 6747 dfprintk(MOUNT, " allow op %d\n", i); 6748 } 6749 6750 /* make sure nothing is on enforce list that isn't supported */ 6751 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 6752 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 6753 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 6754 return -EINVAL; 6755 } 6756 } 6757 6758 /* 6759 * Minimal mode - state operations are allowed to use machine 6760 * credential. Note this already happens by default, so the 6761 * client doesn't have to do anything more than the negotiation. 6762 * 6763 * NOTE: we don't care if EXCHANGE_ID is in the list - 6764 * we're already using the machine cred for exchange_id 6765 * and will never use a different cred. 6766 */ 6767 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 6768 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 6769 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 6770 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 6771 dfprintk(MOUNT, "sp4_mach_cred:\n"); 6772 dfprintk(MOUNT, " minimal mode enabled\n"); 6773 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags); 6774 } else { 6775 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 6776 return -EINVAL; 6777 } 6778 6779 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 6780 test_bit(OP_LOCKU, sp->allow.u.longs)) { 6781 dfprintk(MOUNT, " cleanup mode enabled\n"); 6782 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags); 6783 } 6784 6785 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 6786 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 6787 dfprintk(MOUNT, " secinfo mode enabled\n"); 6788 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags); 6789 } 6790 6791 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 6792 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 6793 dfprintk(MOUNT, " stateid mode enabled\n"); 6794 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags); 6795 } 6796 6797 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 6798 dfprintk(MOUNT, " write mode enabled\n"); 6799 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags); 6800 } 6801 6802 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 6803 dfprintk(MOUNT, " commit mode enabled\n"); 6804 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags); 6805 } 6806 } 6807 6808 return 0; 6809 } 6810 6811 /* 6812 * _nfs4_proc_exchange_id() 6813 * 6814 * Wrapper for EXCHANGE_ID operation. 6815 */ 6816 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, 6817 u32 sp4_how) 6818 { 6819 nfs4_verifier verifier; 6820 struct nfs41_exchange_id_args args = { 6821 .verifier = &verifier, 6822 .client = clp, 6823 #ifdef CONFIG_NFS_V4_1_MIGRATION 6824 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 6825 EXCHGID4_FLAG_BIND_PRINC_STATEID | 6826 EXCHGID4_FLAG_SUPP_MOVED_MIGR, 6827 #else 6828 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 6829 EXCHGID4_FLAG_BIND_PRINC_STATEID, 6830 #endif 6831 }; 6832 struct nfs41_exchange_id_res res = { 6833 0 6834 }; 6835 int status; 6836 struct rpc_message msg = { 6837 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 6838 .rpc_argp = &args, 6839 .rpc_resp = &res, 6840 .rpc_cred = cred, 6841 }; 6842 6843 nfs4_init_boot_verifier(clp, &verifier); 6844 args.id_len = nfs4_init_uniform_client_string(clp, args.id, 6845 sizeof(args.id)); 6846 dprintk("NFS call exchange_id auth=%s, '%.*s'\n", 6847 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6848 args.id_len, args.id); 6849 6850 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 6851 GFP_NOFS); 6852 if (unlikely(res.server_owner == NULL)) { 6853 status = -ENOMEM; 6854 goto out; 6855 } 6856 6857 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 6858 GFP_NOFS); 6859 if (unlikely(res.server_scope == NULL)) { 6860 status = -ENOMEM; 6861 goto out_server_owner; 6862 } 6863 6864 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 6865 if (unlikely(res.impl_id == NULL)) { 6866 status = -ENOMEM; 6867 goto out_server_scope; 6868 } 6869 6870 switch (sp4_how) { 6871 case SP4_NONE: 6872 args.state_protect.how = SP4_NONE; 6873 break; 6874 6875 case SP4_MACH_CRED: 6876 args.state_protect = nfs4_sp4_mach_cred_request; 6877 break; 6878 6879 default: 6880 /* unsupported! */ 6881 WARN_ON_ONCE(1); 6882 status = -EINVAL; 6883 goto out_server_scope; 6884 } 6885 6886 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 6887 trace_nfs4_exchange_id(clp, status); 6888 if (status == 0) 6889 status = nfs4_check_cl_exchange_flags(res.flags); 6890 6891 if (status == 0) 6892 status = nfs4_sp4_select_mode(clp, &res.state_protect); 6893 6894 if (status == 0) { 6895 clp->cl_clientid = res.clientid; 6896 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R); 6897 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) 6898 clp->cl_seqid = res.seqid; 6899 6900 kfree(clp->cl_serverowner); 6901 clp->cl_serverowner = res.server_owner; 6902 res.server_owner = NULL; 6903 6904 /* use the most recent implementation id */ 6905 kfree(clp->cl_implid); 6906 clp->cl_implid = res.impl_id; 6907 6908 if (clp->cl_serverscope != NULL && 6909 !nfs41_same_server_scope(clp->cl_serverscope, 6910 res.server_scope)) { 6911 dprintk("%s: server_scope mismatch detected\n", 6912 __func__); 6913 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 6914 kfree(clp->cl_serverscope); 6915 clp->cl_serverscope = NULL; 6916 } 6917 6918 if (clp->cl_serverscope == NULL) { 6919 clp->cl_serverscope = res.server_scope; 6920 goto out; 6921 } 6922 } else 6923 kfree(res.impl_id); 6924 6925 out_server_owner: 6926 kfree(res.server_owner); 6927 out_server_scope: 6928 kfree(res.server_scope); 6929 out: 6930 if (clp->cl_implid != NULL) 6931 dprintk("NFS reply exchange_id: Server Implementation ID: " 6932 "domain: %s, name: %s, date: %llu,%u\n", 6933 clp->cl_implid->domain, clp->cl_implid->name, 6934 clp->cl_implid->date.seconds, 6935 clp->cl_implid->date.nseconds); 6936 dprintk("NFS reply exchange_id: %d\n", status); 6937 return status; 6938 } 6939 6940 /* 6941 * nfs4_proc_exchange_id() 6942 * 6943 * Returns zero, a negative errno, or a negative NFS4ERR status code. 6944 * 6945 * Since the clientid has expired, all compounds using sessions 6946 * associated with the stale clientid will be returning 6947 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 6948 * be in some phase of session reset. 6949 * 6950 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 6951 */ 6952 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) 6953 { 6954 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 6955 int status; 6956 6957 /* try SP4_MACH_CRED if krb5i/p */ 6958 if (authflavor == RPC_AUTH_GSS_KRB5I || 6959 authflavor == RPC_AUTH_GSS_KRB5P) { 6960 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 6961 if (!status) 6962 return 0; 6963 } 6964 6965 /* try SP4_NONE */ 6966 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 6967 } 6968 6969 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 6970 struct rpc_cred *cred) 6971 { 6972 struct rpc_message msg = { 6973 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 6974 .rpc_argp = clp, 6975 .rpc_cred = cred, 6976 }; 6977 int status; 6978 6979 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 6980 trace_nfs4_destroy_clientid(clp, status); 6981 if (status) 6982 dprintk("NFS: Got error %d from the server %s on " 6983 "DESTROY_CLIENTID.", status, clp->cl_hostname); 6984 return status; 6985 } 6986 6987 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 6988 struct rpc_cred *cred) 6989 { 6990 unsigned int loop; 6991 int ret; 6992 6993 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 6994 ret = _nfs4_proc_destroy_clientid(clp, cred); 6995 switch (ret) { 6996 case -NFS4ERR_DELAY: 6997 case -NFS4ERR_CLIENTID_BUSY: 6998 ssleep(1); 6999 break; 7000 default: 7001 return ret; 7002 } 7003 } 7004 return 0; 7005 } 7006 7007 int nfs4_destroy_clientid(struct nfs_client *clp) 7008 { 7009 struct rpc_cred *cred; 7010 int ret = 0; 7011 7012 if (clp->cl_mvops->minor_version < 1) 7013 goto out; 7014 if (clp->cl_exchange_flags == 0) 7015 goto out; 7016 if (clp->cl_preserve_clid) 7017 goto out; 7018 cred = nfs4_get_clid_cred(clp); 7019 ret = nfs4_proc_destroy_clientid(clp, cred); 7020 if (cred) 7021 put_rpccred(cred); 7022 switch (ret) { 7023 case 0: 7024 case -NFS4ERR_STALE_CLIENTID: 7025 clp->cl_exchange_flags = 0; 7026 } 7027 out: 7028 return ret; 7029 } 7030 7031 struct nfs4_get_lease_time_data { 7032 struct nfs4_get_lease_time_args *args; 7033 struct nfs4_get_lease_time_res *res; 7034 struct nfs_client *clp; 7035 }; 7036 7037 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 7038 void *calldata) 7039 { 7040 struct nfs4_get_lease_time_data *data = 7041 (struct nfs4_get_lease_time_data *)calldata; 7042 7043 dprintk("--> %s\n", __func__); 7044 /* just setup sequence, do not trigger session recovery 7045 since we're invoked within one */ 7046 nfs41_setup_sequence(data->clp->cl_session, 7047 &data->args->la_seq_args, 7048 &data->res->lr_seq_res, 7049 task); 7050 dprintk("<-- %s\n", __func__); 7051 } 7052 7053 /* 7054 * Called from nfs4_state_manager thread for session setup, so don't recover 7055 * from sequence operation or clientid errors. 7056 */ 7057 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 7058 { 7059 struct nfs4_get_lease_time_data *data = 7060 (struct nfs4_get_lease_time_data *)calldata; 7061 7062 dprintk("--> %s\n", __func__); 7063 if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) 7064 return; 7065 switch (task->tk_status) { 7066 case -NFS4ERR_DELAY: 7067 case -NFS4ERR_GRACE: 7068 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 7069 rpc_delay(task, NFS4_POLL_RETRY_MIN); 7070 task->tk_status = 0; 7071 /* fall through */ 7072 case -NFS4ERR_RETRY_UNCACHED_REP: 7073 rpc_restart_call_prepare(task); 7074 return; 7075 } 7076 dprintk("<-- %s\n", __func__); 7077 } 7078 7079 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 7080 .rpc_call_prepare = nfs4_get_lease_time_prepare, 7081 .rpc_call_done = nfs4_get_lease_time_done, 7082 }; 7083 7084 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 7085 { 7086 struct rpc_task *task; 7087 struct nfs4_get_lease_time_args args; 7088 struct nfs4_get_lease_time_res res = { 7089 .lr_fsinfo = fsinfo, 7090 }; 7091 struct nfs4_get_lease_time_data data = { 7092 .args = &args, 7093 .res = &res, 7094 .clp = clp, 7095 }; 7096 struct rpc_message msg = { 7097 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 7098 .rpc_argp = &args, 7099 .rpc_resp = &res, 7100 }; 7101 struct rpc_task_setup task_setup = { 7102 .rpc_client = clp->cl_rpcclient, 7103 .rpc_message = &msg, 7104 .callback_ops = &nfs4_get_lease_time_ops, 7105 .callback_data = &data, 7106 .flags = RPC_TASK_TIMEOUT, 7107 }; 7108 int status; 7109 7110 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); 7111 nfs4_set_sequence_privileged(&args.la_seq_args); 7112 dprintk("--> %s\n", __func__); 7113 task = rpc_run_task(&task_setup); 7114 7115 if (IS_ERR(task)) 7116 status = PTR_ERR(task); 7117 else { 7118 status = task->tk_status; 7119 rpc_put_task(task); 7120 } 7121 dprintk("<-- %s return %d\n", __func__, status); 7122 7123 return status; 7124 } 7125 7126 /* 7127 * Initialize the values to be used by the client in CREATE_SESSION 7128 * If nfs4_init_session set the fore channel request and response sizes, 7129 * use them. 7130 * 7131 * Set the back channel max_resp_sz_cached to zero to force the client to 7132 * always set csa_cachethis to FALSE because the current implementation 7133 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 7134 */ 7135 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) 7136 { 7137 unsigned int max_rqst_sz, max_resp_sz; 7138 7139 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 7140 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 7141 7142 /* Fore channel attributes */ 7143 args->fc_attrs.max_rqst_sz = max_rqst_sz; 7144 args->fc_attrs.max_resp_sz = max_resp_sz; 7145 args->fc_attrs.max_ops = NFS4_MAX_OPS; 7146 args->fc_attrs.max_reqs = max_session_slots; 7147 7148 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 7149 "max_ops=%u max_reqs=%u\n", 7150 __func__, 7151 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 7152 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 7153 7154 /* Back channel attributes */ 7155 args->bc_attrs.max_rqst_sz = PAGE_SIZE; 7156 args->bc_attrs.max_resp_sz = PAGE_SIZE; 7157 args->bc_attrs.max_resp_sz_cached = 0; 7158 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 7159 args->bc_attrs.max_reqs = 1; 7160 7161 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 7162 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 7163 __func__, 7164 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 7165 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 7166 args->bc_attrs.max_reqs); 7167 } 7168 7169 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 7170 struct nfs41_create_session_res *res) 7171 { 7172 struct nfs4_channel_attrs *sent = &args->fc_attrs; 7173 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 7174 7175 if (rcvd->max_resp_sz > sent->max_resp_sz) 7176 return -EINVAL; 7177 /* 7178 * Our requested max_ops is the minimum we need; we're not 7179 * prepared to break up compounds into smaller pieces than that. 7180 * So, no point even trying to continue if the server won't 7181 * cooperate: 7182 */ 7183 if (rcvd->max_ops < sent->max_ops) 7184 return -EINVAL; 7185 if (rcvd->max_reqs == 0) 7186 return -EINVAL; 7187 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 7188 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 7189 return 0; 7190 } 7191 7192 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 7193 struct nfs41_create_session_res *res) 7194 { 7195 struct nfs4_channel_attrs *sent = &args->bc_attrs; 7196 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 7197 7198 if (!(res->flags & SESSION4_BACK_CHAN)) 7199 goto out; 7200 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 7201 return -EINVAL; 7202 if (rcvd->max_resp_sz < sent->max_resp_sz) 7203 return -EINVAL; 7204 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 7205 return -EINVAL; 7206 /* These would render the backchannel useless: */ 7207 if (rcvd->max_ops != sent->max_ops) 7208 return -EINVAL; 7209 if (rcvd->max_reqs != sent->max_reqs) 7210 return -EINVAL; 7211 out: 7212 return 0; 7213 } 7214 7215 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 7216 struct nfs41_create_session_res *res) 7217 { 7218 int ret; 7219 7220 ret = nfs4_verify_fore_channel_attrs(args, res); 7221 if (ret) 7222 return ret; 7223 return nfs4_verify_back_channel_attrs(args, res); 7224 } 7225 7226 static void nfs4_update_session(struct nfs4_session *session, 7227 struct nfs41_create_session_res *res) 7228 { 7229 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 7230 session->flags = res->flags; 7231 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 7232 if (res->flags & SESSION4_BACK_CHAN) 7233 memcpy(&session->bc_attrs, &res->bc_attrs, 7234 sizeof(session->bc_attrs)); 7235 } 7236 7237 static int _nfs4_proc_create_session(struct nfs_client *clp, 7238 struct rpc_cred *cred) 7239 { 7240 struct nfs4_session *session = clp->cl_session; 7241 struct nfs41_create_session_args args = { 7242 .client = clp, 7243 .clientid = clp->cl_clientid, 7244 .seqid = clp->cl_seqid, 7245 .cb_program = NFS4_CALLBACK, 7246 }; 7247 struct nfs41_create_session_res res; 7248 7249 struct rpc_message msg = { 7250 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 7251 .rpc_argp = &args, 7252 .rpc_resp = &res, 7253 .rpc_cred = cred, 7254 }; 7255 int status; 7256 7257 nfs4_init_channel_attrs(&args); 7258 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 7259 7260 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7261 trace_nfs4_create_session(clp, status); 7262 7263 if (!status) { 7264 /* Verify the session's negotiated channel_attrs values */ 7265 status = nfs4_verify_channel_attrs(&args, &res); 7266 /* Increment the clientid slot sequence id */ 7267 if (clp->cl_seqid == res.seqid) 7268 clp->cl_seqid++; 7269 if (status) 7270 goto out; 7271 nfs4_update_session(session, &res); 7272 } 7273 out: 7274 return status; 7275 } 7276 7277 /* 7278 * Issues a CREATE_SESSION operation to the server. 7279 * It is the responsibility of the caller to verify the session is 7280 * expired before calling this routine. 7281 */ 7282 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred) 7283 { 7284 int status; 7285 unsigned *ptr; 7286 struct nfs4_session *session = clp->cl_session; 7287 7288 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 7289 7290 status = _nfs4_proc_create_session(clp, cred); 7291 if (status) 7292 goto out; 7293 7294 /* Init or reset the session slot tables */ 7295 status = nfs4_setup_session_slot_tables(session); 7296 dprintk("slot table setup returned %d\n", status); 7297 if (status) 7298 goto out; 7299 7300 ptr = (unsigned *)&session->sess_id.data[0]; 7301 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 7302 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 7303 out: 7304 dprintk("<-- %s\n", __func__); 7305 return status; 7306 } 7307 7308 /* 7309 * Issue the over-the-wire RPC DESTROY_SESSION. 7310 * The caller must serialize access to this routine. 7311 */ 7312 int nfs4_proc_destroy_session(struct nfs4_session *session, 7313 struct rpc_cred *cred) 7314 { 7315 struct rpc_message msg = { 7316 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 7317 .rpc_argp = session, 7318 .rpc_cred = cred, 7319 }; 7320 int status = 0; 7321 7322 dprintk("--> nfs4_proc_destroy_session\n"); 7323 7324 /* session is still being setup */ 7325 if (session->clp->cl_cons_state != NFS_CS_READY) 7326 return status; 7327 7328 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7329 trace_nfs4_destroy_session(session->clp, status); 7330 7331 if (status) 7332 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 7333 "Session has been destroyed regardless...\n", status); 7334 7335 dprintk("<-- nfs4_proc_destroy_session\n"); 7336 return status; 7337 } 7338 7339 /* 7340 * Renew the cl_session lease. 7341 */ 7342 struct nfs4_sequence_data { 7343 struct nfs_client *clp; 7344 struct nfs4_sequence_args args; 7345 struct nfs4_sequence_res res; 7346 }; 7347 7348 static void nfs41_sequence_release(void *data) 7349 { 7350 struct nfs4_sequence_data *calldata = data; 7351 struct nfs_client *clp = calldata->clp; 7352 7353 if (atomic_read(&clp->cl_count) > 1) 7354 nfs4_schedule_state_renewal(clp); 7355 nfs_put_client(clp); 7356 kfree(calldata); 7357 } 7358 7359 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 7360 { 7361 switch(task->tk_status) { 7362 case -NFS4ERR_DELAY: 7363 rpc_delay(task, NFS4_POLL_RETRY_MAX); 7364 return -EAGAIN; 7365 default: 7366 nfs4_schedule_lease_recovery(clp); 7367 } 7368 return 0; 7369 } 7370 7371 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 7372 { 7373 struct nfs4_sequence_data *calldata = data; 7374 struct nfs_client *clp = calldata->clp; 7375 7376 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 7377 return; 7378 7379 trace_nfs4_sequence(clp, task->tk_status); 7380 if (task->tk_status < 0) { 7381 dprintk("%s ERROR %d\n", __func__, task->tk_status); 7382 if (atomic_read(&clp->cl_count) == 1) 7383 goto out; 7384 7385 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 7386 rpc_restart_call_prepare(task); 7387 return; 7388 } 7389 } 7390 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 7391 out: 7392 dprintk("<-- %s\n", __func__); 7393 } 7394 7395 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 7396 { 7397 struct nfs4_sequence_data *calldata = data; 7398 struct nfs_client *clp = calldata->clp; 7399 struct nfs4_sequence_args *args; 7400 struct nfs4_sequence_res *res; 7401 7402 args = task->tk_msg.rpc_argp; 7403 res = task->tk_msg.rpc_resp; 7404 7405 nfs41_setup_sequence(clp->cl_session, args, res, task); 7406 } 7407 7408 static const struct rpc_call_ops nfs41_sequence_ops = { 7409 .rpc_call_done = nfs41_sequence_call_done, 7410 .rpc_call_prepare = nfs41_sequence_prepare, 7411 .rpc_release = nfs41_sequence_release, 7412 }; 7413 7414 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 7415 struct rpc_cred *cred, 7416 bool is_privileged) 7417 { 7418 struct nfs4_sequence_data *calldata; 7419 struct rpc_message msg = { 7420 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 7421 .rpc_cred = cred, 7422 }; 7423 struct rpc_task_setup task_setup_data = { 7424 .rpc_client = clp->cl_rpcclient, 7425 .rpc_message = &msg, 7426 .callback_ops = &nfs41_sequence_ops, 7427 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 7428 }; 7429 7430 if (!atomic_inc_not_zero(&clp->cl_count)) 7431 return ERR_PTR(-EIO); 7432 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7433 if (calldata == NULL) { 7434 nfs_put_client(clp); 7435 return ERR_PTR(-ENOMEM); 7436 } 7437 nfs4_init_sequence(&calldata->args, &calldata->res, 0); 7438 if (is_privileged) 7439 nfs4_set_sequence_privileged(&calldata->args); 7440 msg.rpc_argp = &calldata->args; 7441 msg.rpc_resp = &calldata->res; 7442 calldata->clp = clp; 7443 task_setup_data.callback_data = calldata; 7444 7445 return rpc_run_task(&task_setup_data); 7446 } 7447 7448 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 7449 { 7450 struct rpc_task *task; 7451 int ret = 0; 7452 7453 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 7454 return -EAGAIN; 7455 task = _nfs41_proc_sequence(clp, cred, false); 7456 if (IS_ERR(task)) 7457 ret = PTR_ERR(task); 7458 else 7459 rpc_put_task_async(task); 7460 dprintk("<-- %s status=%d\n", __func__, ret); 7461 return ret; 7462 } 7463 7464 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 7465 { 7466 struct rpc_task *task; 7467 int ret; 7468 7469 task = _nfs41_proc_sequence(clp, cred, true); 7470 if (IS_ERR(task)) { 7471 ret = PTR_ERR(task); 7472 goto out; 7473 } 7474 ret = rpc_wait_for_completion_task(task); 7475 if (!ret) { 7476 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; 7477 7478 if (task->tk_status == 0) 7479 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 7480 ret = task->tk_status; 7481 } 7482 rpc_put_task(task); 7483 out: 7484 dprintk("<-- %s status=%d\n", __func__, ret); 7485 return ret; 7486 } 7487 7488 struct nfs4_reclaim_complete_data { 7489 struct nfs_client *clp; 7490 struct nfs41_reclaim_complete_args arg; 7491 struct nfs41_reclaim_complete_res res; 7492 }; 7493 7494 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 7495 { 7496 struct nfs4_reclaim_complete_data *calldata = data; 7497 7498 nfs41_setup_sequence(calldata->clp->cl_session, 7499 &calldata->arg.seq_args, 7500 &calldata->res.seq_res, 7501 task); 7502 } 7503 7504 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 7505 { 7506 switch(task->tk_status) { 7507 case 0: 7508 case -NFS4ERR_COMPLETE_ALREADY: 7509 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 7510 break; 7511 case -NFS4ERR_DELAY: 7512 rpc_delay(task, NFS4_POLL_RETRY_MAX); 7513 /* fall through */ 7514 case -NFS4ERR_RETRY_UNCACHED_REP: 7515 return -EAGAIN; 7516 default: 7517 nfs4_schedule_lease_recovery(clp); 7518 } 7519 return 0; 7520 } 7521 7522 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 7523 { 7524 struct nfs4_reclaim_complete_data *calldata = data; 7525 struct nfs_client *clp = calldata->clp; 7526 struct nfs4_sequence_res *res = &calldata->res.seq_res; 7527 7528 dprintk("--> %s\n", __func__); 7529 if (!nfs41_sequence_done(task, res)) 7530 return; 7531 7532 trace_nfs4_reclaim_complete(clp, task->tk_status); 7533 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 7534 rpc_restart_call_prepare(task); 7535 return; 7536 } 7537 dprintk("<-- %s\n", __func__); 7538 } 7539 7540 static void nfs4_free_reclaim_complete_data(void *data) 7541 { 7542 struct nfs4_reclaim_complete_data *calldata = data; 7543 7544 kfree(calldata); 7545 } 7546 7547 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 7548 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 7549 .rpc_call_done = nfs4_reclaim_complete_done, 7550 .rpc_release = nfs4_free_reclaim_complete_data, 7551 }; 7552 7553 /* 7554 * Issue a global reclaim complete. 7555 */ 7556 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 7557 struct rpc_cred *cred) 7558 { 7559 struct nfs4_reclaim_complete_data *calldata; 7560 struct rpc_task *task; 7561 struct rpc_message msg = { 7562 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 7563 .rpc_cred = cred, 7564 }; 7565 struct rpc_task_setup task_setup_data = { 7566 .rpc_client = clp->cl_rpcclient, 7567 .rpc_message = &msg, 7568 .callback_ops = &nfs4_reclaim_complete_call_ops, 7569 .flags = RPC_TASK_ASYNC, 7570 }; 7571 int status = -ENOMEM; 7572 7573 dprintk("--> %s\n", __func__); 7574 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7575 if (calldata == NULL) 7576 goto out; 7577 calldata->clp = clp; 7578 calldata->arg.one_fs = 0; 7579 7580 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); 7581 nfs4_set_sequence_privileged(&calldata->arg.seq_args); 7582 msg.rpc_argp = &calldata->arg; 7583 msg.rpc_resp = &calldata->res; 7584 task_setup_data.callback_data = calldata; 7585 task = rpc_run_task(&task_setup_data); 7586 if (IS_ERR(task)) { 7587 status = PTR_ERR(task); 7588 goto out; 7589 } 7590 status = nfs4_wait_for_completion_rpc_task(task); 7591 if (status == 0) 7592 status = task->tk_status; 7593 rpc_put_task(task); 7594 return 0; 7595 out: 7596 dprintk("<-- %s status=%d\n", __func__, status); 7597 return status; 7598 } 7599 7600 static void 7601 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 7602 { 7603 struct nfs4_layoutget *lgp = calldata; 7604 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 7605 struct nfs4_session *session = nfs4_get_session(server); 7606 7607 dprintk("--> %s\n", __func__); 7608 /* Note the is a race here, where a CB_LAYOUTRECALL can come in 7609 * right now covering the LAYOUTGET we are about to send. 7610 * However, that is not so catastrophic, and there seems 7611 * to be no way to prevent it completely. 7612 */ 7613 if (nfs41_setup_sequence(session, &lgp->args.seq_args, 7614 &lgp->res.seq_res, task)) 7615 return; 7616 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, 7617 NFS_I(lgp->args.inode)->layout, 7618 &lgp->args.range, 7619 lgp->args.ctx->state)) { 7620 rpc_exit(task, NFS4_OK); 7621 } 7622 } 7623 7624 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 7625 { 7626 struct nfs4_layoutget *lgp = calldata; 7627 struct inode *inode = lgp->args.inode; 7628 struct nfs_server *server = NFS_SERVER(inode); 7629 struct pnfs_layout_hdr *lo; 7630 struct nfs4_state *state = NULL; 7631 unsigned long timeo, now, giveup; 7632 7633 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 7634 7635 if (!nfs41_sequence_done(task, &lgp->res.seq_res)) 7636 goto out; 7637 7638 switch (task->tk_status) { 7639 case 0: 7640 goto out; 7641 /* 7642 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 7643 * (or clients) writing to the same RAID stripe 7644 */ 7645 case -NFS4ERR_LAYOUTTRYLATER: 7646 /* 7647 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall 7648 * existing layout before getting a new one). 7649 */ 7650 case -NFS4ERR_RECALLCONFLICT: 7651 timeo = rpc_get_timeout(task->tk_client); 7652 giveup = lgp->args.timestamp + timeo; 7653 now = jiffies; 7654 if (time_after(giveup, now)) { 7655 unsigned long delay; 7656 7657 /* Delay for: 7658 * - Not less then NFS4_POLL_RETRY_MIN. 7659 * - One last time a jiffie before we give up 7660 * - exponential backoff (time_now minus start_attempt) 7661 */ 7662 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN, 7663 min((giveup - now - 1), 7664 now - lgp->args.timestamp)); 7665 7666 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n", 7667 __func__, delay); 7668 rpc_delay(task, delay); 7669 task->tk_status = 0; 7670 rpc_restart_call_prepare(task); 7671 goto out; /* Do not call nfs4_async_handle_error() */ 7672 } 7673 break; 7674 case -NFS4ERR_EXPIRED: 7675 case -NFS4ERR_BAD_STATEID: 7676 spin_lock(&inode->i_lock); 7677 lo = NFS_I(inode)->layout; 7678 if (!lo || list_empty(&lo->plh_segs)) { 7679 spin_unlock(&inode->i_lock); 7680 /* If the open stateid was bad, then recover it. */ 7681 state = lgp->args.ctx->state; 7682 } else { 7683 LIST_HEAD(head); 7684 7685 /* 7686 * Mark the bad layout state as invalid, then retry 7687 * with the current stateid. 7688 */ 7689 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL); 7690 spin_unlock(&inode->i_lock); 7691 pnfs_free_lseg_list(&head); 7692 7693 task->tk_status = 0; 7694 rpc_restart_call_prepare(task); 7695 } 7696 } 7697 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) 7698 rpc_restart_call_prepare(task); 7699 out: 7700 dprintk("<-- %s\n", __func__); 7701 } 7702 7703 static size_t max_response_pages(struct nfs_server *server) 7704 { 7705 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 7706 return nfs_page_array_len(0, max_resp_sz); 7707 } 7708 7709 static void nfs4_free_pages(struct page **pages, size_t size) 7710 { 7711 int i; 7712 7713 if (!pages) 7714 return; 7715 7716 for (i = 0; i < size; i++) { 7717 if (!pages[i]) 7718 break; 7719 __free_page(pages[i]); 7720 } 7721 kfree(pages); 7722 } 7723 7724 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) 7725 { 7726 struct page **pages; 7727 int i; 7728 7729 pages = kcalloc(size, sizeof(struct page *), gfp_flags); 7730 if (!pages) { 7731 dprintk("%s: can't alloc array of %zu pages\n", __func__, size); 7732 return NULL; 7733 } 7734 7735 for (i = 0; i < size; i++) { 7736 pages[i] = alloc_page(gfp_flags); 7737 if (!pages[i]) { 7738 dprintk("%s: failed to allocate page\n", __func__); 7739 nfs4_free_pages(pages, size); 7740 return NULL; 7741 } 7742 } 7743 7744 return pages; 7745 } 7746 7747 static void nfs4_layoutget_release(void *calldata) 7748 { 7749 struct nfs4_layoutget *lgp = calldata; 7750 struct inode *inode = lgp->args.inode; 7751 struct nfs_server *server = NFS_SERVER(inode); 7752 size_t max_pages = max_response_pages(server); 7753 7754 dprintk("--> %s\n", __func__); 7755 nfs4_free_pages(lgp->args.layout.pages, max_pages); 7756 pnfs_put_layout_hdr(NFS_I(inode)->layout); 7757 put_nfs_open_context(lgp->args.ctx); 7758 kfree(calldata); 7759 dprintk("<-- %s\n", __func__); 7760 } 7761 7762 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 7763 .rpc_call_prepare = nfs4_layoutget_prepare, 7764 .rpc_call_done = nfs4_layoutget_done, 7765 .rpc_release = nfs4_layoutget_release, 7766 }; 7767 7768 struct pnfs_layout_segment * 7769 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) 7770 { 7771 struct inode *inode = lgp->args.inode; 7772 struct nfs_server *server = NFS_SERVER(inode); 7773 size_t max_pages = max_response_pages(server); 7774 struct rpc_task *task; 7775 struct rpc_message msg = { 7776 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 7777 .rpc_argp = &lgp->args, 7778 .rpc_resp = &lgp->res, 7779 .rpc_cred = lgp->cred, 7780 }; 7781 struct rpc_task_setup task_setup_data = { 7782 .rpc_client = server->client, 7783 .rpc_message = &msg, 7784 .callback_ops = &nfs4_layoutget_call_ops, 7785 .callback_data = lgp, 7786 .flags = RPC_TASK_ASYNC, 7787 }; 7788 struct pnfs_layout_segment *lseg = NULL; 7789 int status = 0; 7790 7791 dprintk("--> %s\n", __func__); 7792 7793 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */ 7794 pnfs_get_layout_hdr(NFS_I(inode)->layout); 7795 7796 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); 7797 if (!lgp->args.layout.pages) { 7798 nfs4_layoutget_release(lgp); 7799 return ERR_PTR(-ENOMEM); 7800 } 7801 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 7802 lgp->args.timestamp = jiffies; 7803 7804 lgp->res.layoutp = &lgp->args.layout; 7805 lgp->res.seq_res.sr_slot = NULL; 7806 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 7807 7808 task = rpc_run_task(&task_setup_data); 7809 if (IS_ERR(task)) 7810 return ERR_CAST(task); 7811 status = nfs4_wait_for_completion_rpc_task(task); 7812 if (status == 0) 7813 status = task->tk_status; 7814 trace_nfs4_layoutget(lgp->args.ctx, 7815 &lgp->args.range, 7816 &lgp->res.range, 7817 status); 7818 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ 7819 if (status == 0 && lgp->res.layoutp->len) 7820 lseg = pnfs_layout_process(lgp); 7821 rpc_put_task(task); 7822 dprintk("<-- %s status=%d\n", __func__, status); 7823 if (status) 7824 return ERR_PTR(status); 7825 return lseg; 7826 } 7827 7828 static void 7829 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 7830 { 7831 struct nfs4_layoutreturn *lrp = calldata; 7832 7833 dprintk("--> %s\n", __func__); 7834 nfs41_setup_sequence(lrp->clp->cl_session, 7835 &lrp->args.seq_args, 7836 &lrp->res.seq_res, 7837 task); 7838 } 7839 7840 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 7841 { 7842 struct nfs4_layoutreturn *lrp = calldata; 7843 struct nfs_server *server; 7844 7845 dprintk("--> %s\n", __func__); 7846 7847 if (!nfs41_sequence_done(task, &lrp->res.seq_res)) 7848 return; 7849 7850 server = NFS_SERVER(lrp->args.inode); 7851 switch (task->tk_status) { 7852 default: 7853 task->tk_status = 0; 7854 case 0: 7855 break; 7856 case -NFS4ERR_DELAY: 7857 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN) 7858 break; 7859 rpc_restart_call_prepare(task); 7860 return; 7861 } 7862 dprintk("<-- %s\n", __func__); 7863 } 7864 7865 static void nfs4_layoutreturn_release(void *calldata) 7866 { 7867 struct nfs4_layoutreturn *lrp = calldata; 7868 struct pnfs_layout_hdr *lo = lrp->args.layout; 7869 7870 dprintk("--> %s\n", __func__); 7871 spin_lock(&lo->plh_inode->i_lock); 7872 if (lrp->res.lrs_present) 7873 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 7874 pnfs_clear_layoutreturn_waitbit(lo); 7875 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags); 7876 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq); 7877 lo->plh_block_lgets--; 7878 spin_unlock(&lo->plh_inode->i_lock); 7879 pnfs_put_layout_hdr(lrp->args.layout); 7880 nfs_iput_and_deactive(lrp->inode); 7881 kfree(calldata); 7882 dprintk("<-- %s\n", __func__); 7883 } 7884 7885 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 7886 .rpc_call_prepare = nfs4_layoutreturn_prepare, 7887 .rpc_call_done = nfs4_layoutreturn_done, 7888 .rpc_release = nfs4_layoutreturn_release, 7889 }; 7890 7891 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync) 7892 { 7893 struct rpc_task *task; 7894 struct rpc_message msg = { 7895 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 7896 .rpc_argp = &lrp->args, 7897 .rpc_resp = &lrp->res, 7898 .rpc_cred = lrp->cred, 7899 }; 7900 struct rpc_task_setup task_setup_data = { 7901 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 7902 .rpc_message = &msg, 7903 .callback_ops = &nfs4_layoutreturn_call_ops, 7904 .callback_data = lrp, 7905 }; 7906 int status = 0; 7907 7908 dprintk("--> %s\n", __func__); 7909 if (!sync) { 7910 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 7911 if (!lrp->inode) { 7912 nfs4_layoutreturn_release(lrp); 7913 return -EAGAIN; 7914 } 7915 task_setup_data.flags |= RPC_TASK_ASYNC; 7916 } 7917 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); 7918 task = rpc_run_task(&task_setup_data); 7919 if (IS_ERR(task)) 7920 return PTR_ERR(task); 7921 if (sync) 7922 status = task->tk_status; 7923 trace_nfs4_layoutreturn(lrp->args.inode, status); 7924 dprintk("<-- %s status=%d\n", __func__, status); 7925 rpc_put_task(task); 7926 return status; 7927 } 7928 7929 static int 7930 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 7931 struct pnfs_device *pdev, 7932 struct rpc_cred *cred) 7933 { 7934 struct nfs4_getdeviceinfo_args args = { 7935 .pdev = pdev, 7936 }; 7937 struct nfs4_getdeviceinfo_res res = { 7938 .pdev = pdev, 7939 }; 7940 struct rpc_message msg = { 7941 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 7942 .rpc_argp = &args, 7943 .rpc_resp = &res, 7944 .rpc_cred = cred, 7945 }; 7946 int status; 7947 7948 dprintk("--> %s\n", __func__); 7949 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 7950 dprintk("<-- %s status=%d\n", __func__, status); 7951 7952 return status; 7953 } 7954 7955 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 7956 struct pnfs_device *pdev, 7957 struct rpc_cred *cred) 7958 { 7959 struct nfs4_exception exception = { }; 7960 int err; 7961 7962 do { 7963 err = nfs4_handle_exception(server, 7964 _nfs4_proc_getdeviceinfo(server, pdev, cred), 7965 &exception); 7966 } while (exception.retry); 7967 return err; 7968 } 7969 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 7970 7971 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 7972 { 7973 struct nfs4_layoutcommit_data *data = calldata; 7974 struct nfs_server *server = NFS_SERVER(data->args.inode); 7975 struct nfs4_session *session = nfs4_get_session(server); 7976 7977 nfs41_setup_sequence(session, 7978 &data->args.seq_args, 7979 &data->res.seq_res, 7980 task); 7981 } 7982 7983 static void 7984 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 7985 { 7986 struct nfs4_layoutcommit_data *data = calldata; 7987 struct nfs_server *server = NFS_SERVER(data->args.inode); 7988 7989 if (!nfs41_sequence_done(task, &data->res.seq_res)) 7990 return; 7991 7992 switch (task->tk_status) { /* Just ignore these failures */ 7993 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 7994 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 7995 case -NFS4ERR_BADLAYOUT: /* no layout */ 7996 case -NFS4ERR_GRACE: /* loca_recalim always false */ 7997 task->tk_status = 0; 7998 case 0: 7999 break; 8000 default: 8001 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 8002 rpc_restart_call_prepare(task); 8003 return; 8004 } 8005 } 8006 } 8007 8008 static void nfs4_layoutcommit_release(void *calldata) 8009 { 8010 struct nfs4_layoutcommit_data *data = calldata; 8011 8012 pnfs_cleanup_layoutcommit(data); 8013 nfs_post_op_update_inode_force_wcc(data->args.inode, 8014 data->res.fattr); 8015 put_rpccred(data->cred); 8016 nfs_iput_and_deactive(data->inode); 8017 kfree(data); 8018 } 8019 8020 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 8021 .rpc_call_prepare = nfs4_layoutcommit_prepare, 8022 .rpc_call_done = nfs4_layoutcommit_done, 8023 .rpc_release = nfs4_layoutcommit_release, 8024 }; 8025 8026 int 8027 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 8028 { 8029 struct rpc_message msg = { 8030 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 8031 .rpc_argp = &data->args, 8032 .rpc_resp = &data->res, 8033 .rpc_cred = data->cred, 8034 }; 8035 struct rpc_task_setup task_setup_data = { 8036 .task = &data->task, 8037 .rpc_client = NFS_CLIENT(data->args.inode), 8038 .rpc_message = &msg, 8039 .callback_ops = &nfs4_layoutcommit_ops, 8040 .callback_data = data, 8041 }; 8042 struct rpc_task *task; 8043 int status = 0; 8044 8045 dprintk("NFS: %4d initiating layoutcommit call. sync %d " 8046 "lbw: %llu inode %lu\n", 8047 data->task.tk_pid, sync, 8048 data->args.lastbytewritten, 8049 data->args.inode->i_ino); 8050 8051 if (!sync) { 8052 data->inode = nfs_igrab_and_active(data->args.inode); 8053 if (data->inode == NULL) { 8054 nfs4_layoutcommit_release(data); 8055 return -EAGAIN; 8056 } 8057 task_setup_data.flags = RPC_TASK_ASYNC; 8058 } 8059 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 8060 task = rpc_run_task(&task_setup_data); 8061 if (IS_ERR(task)) 8062 return PTR_ERR(task); 8063 if (sync) 8064 status = task->tk_status; 8065 trace_nfs4_layoutcommit(data->args.inode, status); 8066 dprintk("%s: status %d\n", __func__, status); 8067 rpc_put_task(task); 8068 return status; 8069 } 8070 8071 /** 8072 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 8073 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 8074 */ 8075 static int 8076 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8077 struct nfs_fsinfo *info, 8078 struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8079 { 8080 struct nfs41_secinfo_no_name_args args = { 8081 .style = SECINFO_STYLE_CURRENT_FH, 8082 }; 8083 struct nfs4_secinfo_res res = { 8084 .flavors = flavors, 8085 }; 8086 struct rpc_message msg = { 8087 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 8088 .rpc_argp = &args, 8089 .rpc_resp = &res, 8090 }; 8091 struct rpc_clnt *clnt = server->client; 8092 struct rpc_cred *cred = NULL; 8093 int status; 8094 8095 if (use_integrity) { 8096 clnt = server->nfs_client->cl_rpcclient; 8097 cred = nfs4_get_clid_cred(server->nfs_client); 8098 msg.rpc_cred = cred; 8099 } 8100 8101 dprintk("--> %s\n", __func__); 8102 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, 8103 &res.seq_res, 0); 8104 dprintk("<-- %s status=%d\n", __func__, status); 8105 8106 if (cred) 8107 put_rpccred(cred); 8108 8109 return status; 8110 } 8111 8112 static int 8113 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8114 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 8115 { 8116 struct nfs4_exception exception = { }; 8117 int err; 8118 do { 8119 /* first try using integrity protection */ 8120 err = -NFS4ERR_WRONGSEC; 8121 8122 /* try to use integrity protection with machine cred */ 8123 if (_nfs4_is_integrity_protected(server->nfs_client)) 8124 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8125 flavors, true); 8126 8127 /* 8128 * if unable to use integrity protection, or SECINFO with 8129 * integrity protection returns NFS4ERR_WRONGSEC (which is 8130 * disallowed by spec, but exists in deployed servers) use 8131 * the current filesystem's rpc_client and the user cred. 8132 */ 8133 if (err == -NFS4ERR_WRONGSEC) 8134 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8135 flavors, false); 8136 8137 switch (err) { 8138 case 0: 8139 case -NFS4ERR_WRONGSEC: 8140 case -ENOTSUPP: 8141 goto out; 8142 default: 8143 err = nfs4_handle_exception(server, err, &exception); 8144 } 8145 } while (exception.retry); 8146 out: 8147 return err; 8148 } 8149 8150 static int 8151 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 8152 struct nfs_fsinfo *info) 8153 { 8154 int err; 8155 struct page *page; 8156 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 8157 struct nfs4_secinfo_flavors *flavors; 8158 struct nfs4_secinfo4 *secinfo; 8159 int i; 8160 8161 page = alloc_page(GFP_KERNEL); 8162 if (!page) { 8163 err = -ENOMEM; 8164 goto out; 8165 } 8166 8167 flavors = page_address(page); 8168 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 8169 8170 /* 8171 * Fall back on "guess and check" method if 8172 * the server doesn't support SECINFO_NO_NAME 8173 */ 8174 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 8175 err = nfs4_find_root_sec(server, fhandle, info); 8176 goto out_freepage; 8177 } 8178 if (err) 8179 goto out_freepage; 8180 8181 for (i = 0; i < flavors->num_flavors; i++) { 8182 secinfo = &flavors->flavors[i]; 8183 8184 switch (secinfo->flavor) { 8185 case RPC_AUTH_NULL: 8186 case RPC_AUTH_UNIX: 8187 case RPC_AUTH_GSS: 8188 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 8189 &secinfo->flavor_info); 8190 break; 8191 default: 8192 flavor = RPC_AUTH_MAXFLAVOR; 8193 break; 8194 } 8195 8196 if (!nfs_auth_info_match(&server->auth_info, flavor)) 8197 flavor = RPC_AUTH_MAXFLAVOR; 8198 8199 if (flavor != RPC_AUTH_MAXFLAVOR) { 8200 err = nfs4_lookup_root_sec(server, fhandle, 8201 info, flavor); 8202 if (!err) 8203 break; 8204 } 8205 } 8206 8207 if (flavor == RPC_AUTH_MAXFLAVOR) 8208 err = -EPERM; 8209 8210 out_freepage: 8211 put_page(page); 8212 if (err == -EACCES) 8213 return -EPERM; 8214 out: 8215 return err; 8216 } 8217 8218 static int _nfs41_test_stateid(struct nfs_server *server, 8219 nfs4_stateid *stateid, 8220 struct rpc_cred *cred) 8221 { 8222 int status; 8223 struct nfs41_test_stateid_args args = { 8224 .stateid = stateid, 8225 }; 8226 struct nfs41_test_stateid_res res; 8227 struct rpc_message msg = { 8228 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 8229 .rpc_argp = &args, 8230 .rpc_resp = &res, 8231 .rpc_cred = cred, 8232 }; 8233 struct rpc_clnt *rpc_client = server->client; 8234 8235 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8236 &rpc_client, &msg); 8237 8238 dprintk("NFS call test_stateid %p\n", stateid); 8239 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 8240 nfs4_set_sequence_privileged(&args.seq_args); 8241 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 8242 &args.seq_args, &res.seq_res); 8243 if (status != NFS_OK) { 8244 dprintk("NFS reply test_stateid: failed, %d\n", status); 8245 return status; 8246 } 8247 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 8248 return -res.status; 8249 } 8250 8251 /** 8252 * nfs41_test_stateid - perform a TEST_STATEID operation 8253 * 8254 * @server: server / transport on which to perform the operation 8255 * @stateid: state ID to test 8256 * @cred: credential 8257 * 8258 * Returns NFS_OK if the server recognizes that "stateid" is valid. 8259 * Otherwise a negative NFS4ERR value is returned if the operation 8260 * failed or the state ID is not currently valid. 8261 */ 8262 static int nfs41_test_stateid(struct nfs_server *server, 8263 nfs4_stateid *stateid, 8264 struct rpc_cred *cred) 8265 { 8266 struct nfs4_exception exception = { }; 8267 int err; 8268 do { 8269 err = _nfs41_test_stateid(server, stateid, cred); 8270 if (err != -NFS4ERR_DELAY) 8271 break; 8272 nfs4_handle_exception(server, err, &exception); 8273 } while (exception.retry); 8274 return err; 8275 } 8276 8277 struct nfs_free_stateid_data { 8278 struct nfs_server *server; 8279 struct nfs41_free_stateid_args args; 8280 struct nfs41_free_stateid_res res; 8281 }; 8282 8283 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 8284 { 8285 struct nfs_free_stateid_data *data = calldata; 8286 nfs41_setup_sequence(nfs4_get_session(data->server), 8287 &data->args.seq_args, 8288 &data->res.seq_res, 8289 task); 8290 } 8291 8292 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 8293 { 8294 struct nfs_free_stateid_data *data = calldata; 8295 8296 nfs41_sequence_done(task, &data->res.seq_res); 8297 8298 switch (task->tk_status) { 8299 case -NFS4ERR_DELAY: 8300 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 8301 rpc_restart_call_prepare(task); 8302 } 8303 } 8304 8305 static void nfs41_free_stateid_release(void *calldata) 8306 { 8307 kfree(calldata); 8308 } 8309 8310 static const struct rpc_call_ops nfs41_free_stateid_ops = { 8311 .rpc_call_prepare = nfs41_free_stateid_prepare, 8312 .rpc_call_done = nfs41_free_stateid_done, 8313 .rpc_release = nfs41_free_stateid_release, 8314 }; 8315 8316 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server, 8317 nfs4_stateid *stateid, 8318 struct rpc_cred *cred, 8319 bool privileged) 8320 { 8321 struct rpc_message msg = { 8322 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 8323 .rpc_cred = cred, 8324 }; 8325 struct rpc_task_setup task_setup = { 8326 .rpc_client = server->client, 8327 .rpc_message = &msg, 8328 .callback_ops = &nfs41_free_stateid_ops, 8329 .flags = RPC_TASK_ASYNC, 8330 }; 8331 struct nfs_free_stateid_data *data; 8332 8333 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8334 &task_setup.rpc_client, &msg); 8335 8336 dprintk("NFS call free_stateid %p\n", stateid); 8337 data = kmalloc(sizeof(*data), GFP_NOFS); 8338 if (!data) 8339 return ERR_PTR(-ENOMEM); 8340 data->server = server; 8341 nfs4_stateid_copy(&data->args.stateid, stateid); 8342 8343 task_setup.callback_data = data; 8344 8345 msg.rpc_argp = &data->args; 8346 msg.rpc_resp = &data->res; 8347 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 8348 if (privileged) 8349 nfs4_set_sequence_privileged(&data->args.seq_args); 8350 8351 return rpc_run_task(&task_setup); 8352 } 8353 8354 /** 8355 * nfs41_free_stateid - perform a FREE_STATEID operation 8356 * 8357 * @server: server / transport on which to perform the operation 8358 * @stateid: state ID to release 8359 * @cred: credential 8360 * 8361 * Returns NFS_OK if the server freed "stateid". Otherwise a 8362 * negative NFS4ERR value is returned. 8363 */ 8364 static int nfs41_free_stateid(struct nfs_server *server, 8365 nfs4_stateid *stateid, 8366 struct rpc_cred *cred) 8367 { 8368 struct rpc_task *task; 8369 int ret; 8370 8371 task = _nfs41_free_stateid(server, stateid, cred, true); 8372 if (IS_ERR(task)) 8373 return PTR_ERR(task); 8374 ret = rpc_wait_for_completion_task(task); 8375 if (!ret) 8376 ret = task->tk_status; 8377 rpc_put_task(task); 8378 return ret; 8379 } 8380 8381 static void 8382 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 8383 { 8384 struct rpc_task *task; 8385 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 8386 8387 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 8388 nfs4_free_lock_state(server, lsp); 8389 if (IS_ERR(task)) 8390 return; 8391 rpc_put_task(task); 8392 } 8393 8394 static bool nfs41_match_stateid(const nfs4_stateid *s1, 8395 const nfs4_stateid *s2) 8396 { 8397 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 8398 return false; 8399 8400 if (s1->seqid == s2->seqid) 8401 return true; 8402 if (s1->seqid == 0 || s2->seqid == 0) 8403 return true; 8404 8405 return false; 8406 } 8407 8408 #endif /* CONFIG_NFS_V4_1 */ 8409 8410 static bool nfs4_match_stateid(const nfs4_stateid *s1, 8411 const nfs4_stateid *s2) 8412 { 8413 return nfs4_stateid_match(s1, s2); 8414 } 8415 8416 8417 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 8418 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8419 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8420 .recover_open = nfs4_open_reclaim, 8421 .recover_lock = nfs4_lock_reclaim, 8422 .establish_clid = nfs4_init_clientid, 8423 .detect_trunking = nfs40_discover_server_trunking, 8424 }; 8425 8426 #if defined(CONFIG_NFS_V4_1) 8427 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 8428 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8429 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8430 .recover_open = nfs4_open_reclaim, 8431 .recover_lock = nfs4_lock_reclaim, 8432 .establish_clid = nfs41_init_clientid, 8433 .reclaim_complete = nfs41_proc_reclaim_complete, 8434 .detect_trunking = nfs41_discover_server_trunking, 8435 }; 8436 #endif /* CONFIG_NFS_V4_1 */ 8437 8438 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 8439 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 8440 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 8441 .recover_open = nfs40_open_expired, 8442 .recover_lock = nfs4_lock_expired, 8443 .establish_clid = nfs4_init_clientid, 8444 }; 8445 8446 #if defined(CONFIG_NFS_V4_1) 8447 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 8448 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 8449 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 8450 .recover_open = nfs41_open_expired, 8451 .recover_lock = nfs41_lock_expired, 8452 .establish_clid = nfs41_init_clientid, 8453 }; 8454 #endif /* CONFIG_NFS_V4_1 */ 8455 8456 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 8457 .sched_state_renewal = nfs4_proc_async_renew, 8458 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, 8459 .renew_lease = nfs4_proc_renew, 8460 }; 8461 8462 #if defined(CONFIG_NFS_V4_1) 8463 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 8464 .sched_state_renewal = nfs41_proc_async_sequence, 8465 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, 8466 .renew_lease = nfs4_proc_sequence, 8467 }; 8468 #endif 8469 8470 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 8471 .get_locations = _nfs40_proc_get_locations, 8472 .fsid_present = _nfs40_proc_fsid_present, 8473 }; 8474 8475 #if defined(CONFIG_NFS_V4_1) 8476 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 8477 .get_locations = _nfs41_proc_get_locations, 8478 .fsid_present = _nfs41_proc_fsid_present, 8479 }; 8480 #endif /* CONFIG_NFS_V4_1 */ 8481 8482 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 8483 .minor_version = 0, 8484 .init_caps = NFS_CAP_READDIRPLUS 8485 | NFS_CAP_ATOMIC_OPEN 8486 | NFS_CAP_CHANGE_ATTR 8487 | NFS_CAP_POSIX_LOCK, 8488 .init_client = nfs40_init_client, 8489 .shutdown_client = nfs40_shutdown_client, 8490 .match_stateid = nfs4_match_stateid, 8491 .find_root_sec = nfs4_find_root_sec, 8492 .free_lock_state = nfs4_release_lockowner, 8493 .alloc_seqid = nfs_alloc_seqid, 8494 .call_sync_ops = &nfs40_call_sync_ops, 8495 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 8496 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 8497 .state_renewal_ops = &nfs40_state_renewal_ops, 8498 .mig_recovery_ops = &nfs40_mig_recovery_ops, 8499 }; 8500 8501 #if defined(CONFIG_NFS_V4_1) 8502 static struct nfs_seqid * 8503 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 8504 { 8505 return NULL; 8506 } 8507 8508 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 8509 .minor_version = 1, 8510 .init_caps = NFS_CAP_READDIRPLUS 8511 | NFS_CAP_ATOMIC_OPEN 8512 | NFS_CAP_CHANGE_ATTR 8513 | NFS_CAP_POSIX_LOCK 8514 | NFS_CAP_STATEID_NFSV41 8515 | NFS_CAP_ATOMIC_OPEN_V1, 8516 .init_client = nfs41_init_client, 8517 .shutdown_client = nfs41_shutdown_client, 8518 .match_stateid = nfs41_match_stateid, 8519 .find_root_sec = nfs41_find_root_sec, 8520 .free_lock_state = nfs41_free_lock_state, 8521 .alloc_seqid = nfs_alloc_no_seqid, 8522 .call_sync_ops = &nfs41_call_sync_ops, 8523 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 8524 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 8525 .state_renewal_ops = &nfs41_state_renewal_ops, 8526 .mig_recovery_ops = &nfs41_mig_recovery_ops, 8527 }; 8528 #endif 8529 8530 #if defined(CONFIG_NFS_V4_2) 8531 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 8532 .minor_version = 2, 8533 .init_caps = NFS_CAP_READDIRPLUS 8534 | NFS_CAP_ATOMIC_OPEN 8535 | NFS_CAP_CHANGE_ATTR 8536 | NFS_CAP_POSIX_LOCK 8537 | NFS_CAP_STATEID_NFSV41 8538 | NFS_CAP_ATOMIC_OPEN_V1 8539 | NFS_CAP_ALLOCATE 8540 | NFS_CAP_DEALLOCATE 8541 | NFS_CAP_SEEK, 8542 .init_client = nfs41_init_client, 8543 .shutdown_client = nfs41_shutdown_client, 8544 .match_stateid = nfs41_match_stateid, 8545 .find_root_sec = nfs41_find_root_sec, 8546 .free_lock_state = nfs41_free_lock_state, 8547 .call_sync_ops = &nfs41_call_sync_ops, 8548 .alloc_seqid = nfs_alloc_no_seqid, 8549 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 8550 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 8551 .state_renewal_ops = &nfs41_state_renewal_ops, 8552 }; 8553 #endif 8554 8555 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 8556 [0] = &nfs_v4_0_minor_ops, 8557 #if defined(CONFIG_NFS_V4_1) 8558 [1] = &nfs_v4_1_minor_ops, 8559 #endif 8560 #if defined(CONFIG_NFS_V4_2) 8561 [2] = &nfs_v4_2_minor_ops, 8562 #endif 8563 }; 8564 8565 static const struct inode_operations nfs4_dir_inode_operations = { 8566 .create = nfs_create, 8567 .lookup = nfs_lookup, 8568 .atomic_open = nfs_atomic_open, 8569 .link = nfs_link, 8570 .unlink = nfs_unlink, 8571 .symlink = nfs_symlink, 8572 .mkdir = nfs_mkdir, 8573 .rmdir = nfs_rmdir, 8574 .mknod = nfs_mknod, 8575 .rename = nfs_rename, 8576 .permission = nfs_permission, 8577 .getattr = nfs_getattr, 8578 .setattr = nfs_setattr, 8579 .getxattr = generic_getxattr, 8580 .setxattr = generic_setxattr, 8581 .listxattr = generic_listxattr, 8582 .removexattr = generic_removexattr, 8583 }; 8584 8585 static const struct inode_operations nfs4_file_inode_operations = { 8586 .permission = nfs_permission, 8587 .getattr = nfs_getattr, 8588 .setattr = nfs_setattr, 8589 .getxattr = generic_getxattr, 8590 .setxattr = generic_setxattr, 8591 .listxattr = generic_listxattr, 8592 .removexattr = generic_removexattr, 8593 }; 8594 8595 const struct nfs_rpc_ops nfs_v4_clientops = { 8596 .version = 4, /* protocol version */ 8597 .dentry_ops = &nfs4_dentry_operations, 8598 .dir_inode_ops = &nfs4_dir_inode_operations, 8599 .file_inode_ops = &nfs4_file_inode_operations, 8600 .file_ops = &nfs4_file_operations, 8601 .getroot = nfs4_proc_get_root, 8602 .submount = nfs4_submount, 8603 .try_mount = nfs4_try_mount, 8604 .getattr = nfs4_proc_getattr, 8605 .setattr = nfs4_proc_setattr, 8606 .lookup = nfs4_proc_lookup, 8607 .access = nfs4_proc_access, 8608 .readlink = nfs4_proc_readlink, 8609 .create = nfs4_proc_create, 8610 .remove = nfs4_proc_remove, 8611 .unlink_setup = nfs4_proc_unlink_setup, 8612 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 8613 .unlink_done = nfs4_proc_unlink_done, 8614 .rename_setup = nfs4_proc_rename_setup, 8615 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 8616 .rename_done = nfs4_proc_rename_done, 8617 .link = nfs4_proc_link, 8618 .symlink = nfs4_proc_symlink, 8619 .mkdir = nfs4_proc_mkdir, 8620 .rmdir = nfs4_proc_remove, 8621 .readdir = nfs4_proc_readdir, 8622 .mknod = nfs4_proc_mknod, 8623 .statfs = nfs4_proc_statfs, 8624 .fsinfo = nfs4_proc_fsinfo, 8625 .pathconf = nfs4_proc_pathconf, 8626 .set_capabilities = nfs4_server_capabilities, 8627 .decode_dirent = nfs4_decode_dirent, 8628 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 8629 .read_setup = nfs4_proc_read_setup, 8630 .read_done = nfs4_read_done, 8631 .write_setup = nfs4_proc_write_setup, 8632 .write_done = nfs4_write_done, 8633 .commit_setup = nfs4_proc_commit_setup, 8634 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 8635 .commit_done = nfs4_commit_done, 8636 .lock = nfs4_proc_lock, 8637 .clear_acl_cache = nfs4_zap_acl_attr, 8638 .close_context = nfs4_close_context, 8639 .open_context = nfs4_atomic_open, 8640 .have_delegation = nfs4_have_delegation, 8641 .return_delegation = nfs4_inode_return_delegation, 8642 .alloc_client = nfs4_alloc_client, 8643 .init_client = nfs4_init_client, 8644 .free_client = nfs4_free_client, 8645 .create_server = nfs4_create_server, 8646 .clone_server = nfs_clone_server, 8647 }; 8648 8649 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 8650 .prefix = XATTR_NAME_NFSV4_ACL, 8651 .list = nfs4_xattr_list_nfs4_acl, 8652 .get = nfs4_xattr_get_nfs4_acl, 8653 .set = nfs4_xattr_set_nfs4_acl, 8654 }; 8655 8656 const struct xattr_handler *nfs4_xattr_handlers[] = { 8657 &nfs4_xattr_nfs4_acl_handler, 8658 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 8659 &nfs4_xattr_nfs4_label_handler, 8660 #endif 8661 NULL 8662 }; 8663 8664 /* 8665 * Local variables: 8666 * c-basic-offset: 8 8667 * End: 8668 */ 8669