1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/file.h> 42 #include <linux/string.h> 43 #include <linux/ratelimit.h> 44 #include <linux/printk.h> 45 #include <linux/slab.h> 46 #include <linux/sunrpc/clnt.h> 47 #include <linux/nfs.h> 48 #include <linux/nfs4.h> 49 #include <linux/nfs_fs.h> 50 #include <linux/nfs_page.h> 51 #include <linux/nfs_mount.h> 52 #include <linux/namei.h> 53 #include <linux/mount.h> 54 #include <linux/module.h> 55 #include <linux/xattr.h> 56 #include <linux/utsname.h> 57 #include <linux/freezer.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "nfs4idmap.h" 67 #include "nfs4session.h" 68 #include "fscache.h" 69 70 #include "nfs4trace.h" 71 72 #define NFSDBG_FACILITY NFSDBG_PROC 73 74 #define NFS4_POLL_RETRY_MIN (HZ/10) 75 #define NFS4_POLL_RETRY_MAX (15*HZ) 76 77 struct nfs4_opendata; 78 static int _nfs4_proc_open(struct nfs4_opendata *data); 79 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 80 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 81 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *, long *); 82 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 83 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label); 84 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label); 85 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 86 struct nfs_fattr *fattr, struct iattr *sattr, 87 struct nfs4_state *state, struct nfs4_label *ilabel, 88 struct nfs4_label *olabel); 89 #ifdef CONFIG_NFS_V4_1 90 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *, 91 struct rpc_cred *); 92 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *, 93 struct rpc_cred *); 94 #endif 95 96 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 97 static inline struct nfs4_label * 98 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 99 struct iattr *sattr, struct nfs4_label *label) 100 { 101 int err; 102 103 if (label == NULL) 104 return NULL; 105 106 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0) 107 return NULL; 108 109 err = security_dentry_init_security(dentry, sattr->ia_mode, 110 &dentry->d_name, (void **)&label->label, &label->len); 111 if (err == 0) 112 return label; 113 114 return NULL; 115 } 116 static inline void 117 nfs4_label_release_security(struct nfs4_label *label) 118 { 119 if (label) 120 security_release_secctx(label->label, label->len); 121 } 122 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 123 { 124 if (label) 125 return server->attr_bitmask; 126 127 return server->attr_bitmask_nl; 128 } 129 #else 130 static inline struct nfs4_label * 131 nfs4_label_init_security(struct inode *dir, struct dentry *dentry, 132 struct iattr *sattr, struct nfs4_label *l) 133 { return NULL; } 134 static inline void 135 nfs4_label_release_security(struct nfs4_label *label) 136 { return; } 137 static inline u32 * 138 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) 139 { return server->attr_bitmask; } 140 #endif 141 142 /* Prevent leaks of NFSv4 errors into userland */ 143 static int nfs4_map_errors(int err) 144 { 145 if (err >= -1000) 146 return err; 147 switch (err) { 148 case -NFS4ERR_RESOURCE: 149 case -NFS4ERR_LAYOUTTRYLATER: 150 case -NFS4ERR_RECALLCONFLICT: 151 return -EREMOTEIO; 152 case -NFS4ERR_WRONGSEC: 153 case -NFS4ERR_WRONG_CRED: 154 return -EPERM; 155 case -NFS4ERR_BADOWNER: 156 case -NFS4ERR_BADNAME: 157 return -EINVAL; 158 case -NFS4ERR_SHARE_DENIED: 159 return -EACCES; 160 case -NFS4ERR_MINOR_VERS_MISMATCH: 161 return -EPROTONOSUPPORT; 162 case -NFS4ERR_FILE_OPEN: 163 return -EBUSY; 164 default: 165 dprintk("%s could not handle NFSv4 error %d\n", 166 __func__, -err); 167 break; 168 } 169 return -EIO; 170 } 171 172 /* 173 * This is our standard bitmap for GETATTR requests. 174 */ 175 const u32 nfs4_fattr_bitmap[3] = { 176 FATTR4_WORD0_TYPE 177 | FATTR4_WORD0_CHANGE 178 | FATTR4_WORD0_SIZE 179 | FATTR4_WORD0_FSID 180 | FATTR4_WORD0_FILEID, 181 FATTR4_WORD1_MODE 182 | FATTR4_WORD1_NUMLINKS 183 | FATTR4_WORD1_OWNER 184 | FATTR4_WORD1_OWNER_GROUP 185 | FATTR4_WORD1_RAWDEV 186 | FATTR4_WORD1_SPACE_USED 187 | FATTR4_WORD1_TIME_ACCESS 188 | FATTR4_WORD1_TIME_METADATA 189 | FATTR4_WORD1_TIME_MODIFY 190 | FATTR4_WORD1_MOUNTED_ON_FILEID, 191 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 192 FATTR4_WORD2_SECURITY_LABEL 193 #endif 194 }; 195 196 static const u32 nfs4_pnfs_open_bitmap[3] = { 197 FATTR4_WORD0_TYPE 198 | FATTR4_WORD0_CHANGE 199 | FATTR4_WORD0_SIZE 200 | FATTR4_WORD0_FSID 201 | FATTR4_WORD0_FILEID, 202 FATTR4_WORD1_MODE 203 | FATTR4_WORD1_NUMLINKS 204 | FATTR4_WORD1_OWNER 205 | FATTR4_WORD1_OWNER_GROUP 206 | FATTR4_WORD1_RAWDEV 207 | FATTR4_WORD1_SPACE_USED 208 | FATTR4_WORD1_TIME_ACCESS 209 | FATTR4_WORD1_TIME_METADATA 210 | FATTR4_WORD1_TIME_MODIFY, 211 FATTR4_WORD2_MDSTHRESHOLD 212 }; 213 214 static const u32 nfs4_open_noattr_bitmap[3] = { 215 FATTR4_WORD0_TYPE 216 | FATTR4_WORD0_CHANGE 217 | FATTR4_WORD0_FILEID, 218 }; 219 220 const u32 nfs4_statfs_bitmap[3] = { 221 FATTR4_WORD0_FILES_AVAIL 222 | FATTR4_WORD0_FILES_FREE 223 | FATTR4_WORD0_FILES_TOTAL, 224 FATTR4_WORD1_SPACE_AVAIL 225 | FATTR4_WORD1_SPACE_FREE 226 | FATTR4_WORD1_SPACE_TOTAL 227 }; 228 229 const u32 nfs4_pathconf_bitmap[3] = { 230 FATTR4_WORD0_MAXLINK 231 | FATTR4_WORD0_MAXNAME, 232 0 233 }; 234 235 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 236 | FATTR4_WORD0_MAXREAD 237 | FATTR4_WORD0_MAXWRITE 238 | FATTR4_WORD0_LEASE_TIME, 239 FATTR4_WORD1_TIME_DELTA 240 | FATTR4_WORD1_FS_LAYOUT_TYPES, 241 FATTR4_WORD2_LAYOUT_BLKSIZE 242 }; 243 244 const u32 nfs4_fs_locations_bitmap[3] = { 245 FATTR4_WORD0_TYPE 246 | FATTR4_WORD0_CHANGE 247 | FATTR4_WORD0_SIZE 248 | FATTR4_WORD0_FSID 249 | FATTR4_WORD0_FILEID 250 | FATTR4_WORD0_FS_LOCATIONS, 251 FATTR4_WORD1_MODE 252 | FATTR4_WORD1_NUMLINKS 253 | FATTR4_WORD1_OWNER 254 | FATTR4_WORD1_OWNER_GROUP 255 | FATTR4_WORD1_RAWDEV 256 | FATTR4_WORD1_SPACE_USED 257 | FATTR4_WORD1_TIME_ACCESS 258 | FATTR4_WORD1_TIME_METADATA 259 | FATTR4_WORD1_TIME_MODIFY 260 | FATTR4_WORD1_MOUNTED_ON_FILEID, 261 }; 262 263 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 264 struct nfs4_readdir_arg *readdir) 265 { 266 __be32 *start, *p; 267 268 if (cookie > 2) { 269 readdir->cookie = cookie; 270 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 271 return; 272 } 273 274 readdir->cookie = 0; 275 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 276 if (cookie == 2) 277 return; 278 279 /* 280 * NFSv4 servers do not return entries for '.' and '..' 281 * Therefore, we fake these entries here. We let '.' 282 * have cookie 0 and '..' have cookie 1. Note that 283 * when talking to the server, we always send cookie 0 284 * instead of 1 or 2. 285 */ 286 start = p = kmap_atomic(*readdir->pages); 287 288 if (cookie == 0) { 289 *p++ = xdr_one; /* next */ 290 *p++ = xdr_zero; /* cookie, first word */ 291 *p++ = xdr_one; /* cookie, second word */ 292 *p++ = xdr_one; /* entry len */ 293 memcpy(p, ".\0\0\0", 4); /* entry */ 294 p++; 295 *p++ = xdr_one; /* bitmap length */ 296 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 297 *p++ = htonl(8); /* attribute buffer length */ 298 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry))); 299 } 300 301 *p++ = xdr_one; /* next */ 302 *p++ = xdr_zero; /* cookie, first word */ 303 *p++ = xdr_two; /* cookie, second word */ 304 *p++ = xdr_two; /* entry len */ 305 memcpy(p, "..\0\0", 4); /* entry */ 306 p++; 307 *p++ = xdr_one; /* bitmap length */ 308 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 309 *p++ = htonl(8); /* attribute buffer length */ 310 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent))); 311 312 readdir->pgbase = (char *)p - (char *)start; 313 readdir->count -= readdir->pgbase; 314 kunmap_atomic(start); 315 } 316 317 static long nfs4_update_delay(long *timeout) 318 { 319 long ret; 320 if (!timeout) 321 return NFS4_POLL_RETRY_MAX; 322 if (*timeout <= 0) 323 *timeout = NFS4_POLL_RETRY_MIN; 324 if (*timeout > NFS4_POLL_RETRY_MAX) 325 *timeout = NFS4_POLL_RETRY_MAX; 326 ret = *timeout; 327 *timeout <<= 1; 328 return ret; 329 } 330 331 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 332 { 333 int res = 0; 334 335 might_sleep(); 336 337 freezable_schedule_timeout_killable_unsafe( 338 nfs4_update_delay(timeout)); 339 if (fatal_signal_pending(current)) 340 res = -ERESTARTSYS; 341 return res; 342 } 343 344 /* This is the error handling routine for processes that are allowed 345 * to sleep. 346 */ 347 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 348 { 349 struct nfs_client *clp = server->nfs_client; 350 struct nfs4_state *state = exception->state; 351 struct inode *inode = exception->inode; 352 int ret = errorcode; 353 354 exception->retry = 0; 355 switch(errorcode) { 356 case 0: 357 return 0; 358 case -NFS4ERR_OPENMODE: 359 case -NFS4ERR_DELEG_REVOKED: 360 case -NFS4ERR_ADMIN_REVOKED: 361 case -NFS4ERR_BAD_STATEID: 362 if (inode && nfs4_have_delegation(inode, FMODE_READ)) { 363 nfs4_inode_return_delegation(inode); 364 exception->retry = 1; 365 return 0; 366 } 367 if (state == NULL) 368 break; 369 ret = nfs4_schedule_stateid_recovery(server, state); 370 if (ret < 0) 371 break; 372 goto wait_on_recovery; 373 case -NFS4ERR_EXPIRED: 374 if (state != NULL) { 375 ret = nfs4_schedule_stateid_recovery(server, state); 376 if (ret < 0) 377 break; 378 } 379 case -NFS4ERR_STALE_STATEID: 380 case -NFS4ERR_STALE_CLIENTID: 381 nfs4_schedule_lease_recovery(clp); 382 goto wait_on_recovery; 383 case -NFS4ERR_MOVED: 384 ret = nfs4_schedule_migration_recovery(server); 385 if (ret < 0) 386 break; 387 goto wait_on_recovery; 388 case -NFS4ERR_LEASE_MOVED: 389 nfs4_schedule_lease_moved_recovery(clp); 390 goto wait_on_recovery; 391 #if defined(CONFIG_NFS_V4_1) 392 case -NFS4ERR_BADSESSION: 393 case -NFS4ERR_BADSLOT: 394 case -NFS4ERR_BAD_HIGH_SLOT: 395 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 396 case -NFS4ERR_DEADSESSION: 397 case -NFS4ERR_SEQ_FALSE_RETRY: 398 case -NFS4ERR_SEQ_MISORDERED: 399 dprintk("%s ERROR: %d Reset session\n", __func__, 400 errorcode); 401 nfs4_schedule_session_recovery(clp->cl_session, errorcode); 402 goto wait_on_recovery; 403 #endif /* defined(CONFIG_NFS_V4_1) */ 404 case -NFS4ERR_FILE_OPEN: 405 if (exception->timeout > HZ) { 406 /* We have retried a decent amount, time to 407 * fail 408 */ 409 ret = -EBUSY; 410 break; 411 } 412 case -NFS4ERR_GRACE: 413 case -NFS4ERR_DELAY: 414 ret = nfs4_delay(server->client, &exception->timeout); 415 if (ret != 0) 416 break; 417 case -NFS4ERR_RETRY_UNCACHED_REP: 418 case -NFS4ERR_OLD_STATEID: 419 exception->retry = 1; 420 break; 421 case -NFS4ERR_BADOWNER: 422 /* The following works around a Linux server bug! */ 423 case -NFS4ERR_BADNAME: 424 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 425 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 426 exception->retry = 1; 427 printk(KERN_WARNING "NFS: v4 server %s " 428 "does not accept raw " 429 "uid/gids. " 430 "Reenabling the idmapper.\n", 431 server->nfs_client->cl_hostname); 432 } 433 } 434 /* We failed to handle the error */ 435 return nfs4_map_errors(ret); 436 wait_on_recovery: 437 ret = nfs4_wait_clnt_recover(clp); 438 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 439 return -EIO; 440 if (ret == 0) 441 exception->retry = 1; 442 return ret; 443 } 444 445 /* 446 * Return 'true' if 'clp' is using an rpc_client that is integrity protected 447 * or 'false' otherwise. 448 */ 449 static bool _nfs4_is_integrity_protected(struct nfs_client *clp) 450 { 451 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor; 452 453 if (flavor == RPC_AUTH_GSS_KRB5I || 454 flavor == RPC_AUTH_GSS_KRB5P) 455 return true; 456 457 return false; 458 } 459 460 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 461 { 462 spin_lock(&clp->cl_lock); 463 if (time_before(clp->cl_last_renewal,timestamp)) 464 clp->cl_last_renewal = timestamp; 465 spin_unlock(&clp->cl_lock); 466 } 467 468 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 469 { 470 struct nfs_client *clp = server->nfs_client; 471 472 if (!nfs4_has_session(clp)) 473 do_renew_lease(clp, timestamp); 474 } 475 476 struct nfs4_call_sync_data { 477 const struct nfs_server *seq_server; 478 struct nfs4_sequence_args *seq_args; 479 struct nfs4_sequence_res *seq_res; 480 }; 481 482 void nfs4_init_sequence(struct nfs4_sequence_args *args, 483 struct nfs4_sequence_res *res, int cache_reply) 484 { 485 args->sa_slot = NULL; 486 args->sa_cache_this = cache_reply; 487 args->sa_privileged = 0; 488 489 res->sr_slot = NULL; 490 } 491 492 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) 493 { 494 args->sa_privileged = 1; 495 } 496 497 int nfs40_setup_sequence(struct nfs4_slot_table *tbl, 498 struct nfs4_sequence_args *args, 499 struct nfs4_sequence_res *res, 500 struct rpc_task *task) 501 { 502 struct nfs4_slot *slot; 503 504 /* slot already allocated? */ 505 if (res->sr_slot != NULL) 506 goto out_start; 507 508 spin_lock(&tbl->slot_tbl_lock); 509 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged) 510 goto out_sleep; 511 512 slot = nfs4_alloc_slot(tbl); 513 if (IS_ERR(slot)) { 514 if (slot == ERR_PTR(-ENOMEM)) 515 task->tk_timeout = HZ >> 2; 516 goto out_sleep; 517 } 518 spin_unlock(&tbl->slot_tbl_lock); 519 520 args->sa_slot = slot; 521 res->sr_slot = slot; 522 523 out_start: 524 rpc_call_start(task); 525 return 0; 526 527 out_sleep: 528 if (args->sa_privileged) 529 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 530 NULL, RPC_PRIORITY_PRIVILEGED); 531 else 532 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 533 spin_unlock(&tbl->slot_tbl_lock); 534 return -EAGAIN; 535 } 536 EXPORT_SYMBOL_GPL(nfs40_setup_sequence); 537 538 static int nfs40_sequence_done(struct rpc_task *task, 539 struct nfs4_sequence_res *res) 540 { 541 struct nfs4_slot *slot = res->sr_slot; 542 struct nfs4_slot_table *tbl; 543 544 if (slot == NULL) 545 goto out; 546 547 tbl = slot->table; 548 spin_lock(&tbl->slot_tbl_lock); 549 if (!nfs41_wake_and_assign_slot(tbl, slot)) 550 nfs4_free_slot(tbl, slot); 551 spin_unlock(&tbl->slot_tbl_lock); 552 553 res->sr_slot = NULL; 554 out: 555 return 1; 556 } 557 558 #if defined(CONFIG_NFS_V4_1) 559 560 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 561 { 562 struct nfs4_session *session; 563 struct nfs4_slot_table *tbl; 564 struct nfs4_slot *slot = res->sr_slot; 565 bool send_new_highest_used_slotid = false; 566 567 tbl = slot->table; 568 session = tbl->session; 569 570 spin_lock(&tbl->slot_tbl_lock); 571 /* Be nice to the server: try to ensure that the last transmitted 572 * value for highest_user_slotid <= target_highest_slotid 573 */ 574 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 575 send_new_highest_used_slotid = true; 576 577 if (nfs41_wake_and_assign_slot(tbl, slot)) { 578 send_new_highest_used_slotid = false; 579 goto out_unlock; 580 } 581 nfs4_free_slot(tbl, slot); 582 583 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 584 send_new_highest_used_slotid = false; 585 out_unlock: 586 spin_unlock(&tbl->slot_tbl_lock); 587 res->sr_slot = NULL; 588 if (send_new_highest_used_slotid) 589 nfs41_notify_server(session->clp); 590 } 591 592 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 593 { 594 struct nfs4_session *session; 595 struct nfs4_slot *slot = res->sr_slot; 596 struct nfs_client *clp; 597 bool interrupted = false; 598 int ret = 1; 599 600 if (slot == NULL) 601 goto out_noaction; 602 /* don't increment the sequence number if the task wasn't sent */ 603 if (!RPC_WAS_SENT(task)) 604 goto out; 605 606 session = slot->table->session; 607 608 if (slot->interrupted) { 609 slot->interrupted = 0; 610 interrupted = true; 611 } 612 613 trace_nfs4_sequence_done(session, res); 614 /* Check the SEQUENCE operation status */ 615 switch (res->sr_status) { 616 case 0: 617 /* Update the slot's sequence and clientid lease timer */ 618 ++slot->seq_nr; 619 clp = session->clp; 620 do_renew_lease(clp, res->sr_timestamp); 621 /* Check sequence flags */ 622 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 623 nfs41_update_target_slotid(slot->table, slot, res); 624 break; 625 case 1: 626 /* 627 * sr_status remains 1 if an RPC level error occurred. 628 * The server may or may not have processed the sequence 629 * operation.. 630 * Mark the slot as having hosted an interrupted RPC call. 631 */ 632 slot->interrupted = 1; 633 goto out; 634 case -NFS4ERR_DELAY: 635 /* The server detected a resend of the RPC call and 636 * returned NFS4ERR_DELAY as per Section 2.10.6.2 637 * of RFC5661. 638 */ 639 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 640 __func__, 641 slot->slot_nr, 642 slot->seq_nr); 643 goto out_retry; 644 case -NFS4ERR_BADSLOT: 645 /* 646 * The slot id we used was probably retired. Try again 647 * using a different slot id. 648 */ 649 goto retry_nowait; 650 case -NFS4ERR_SEQ_MISORDERED: 651 /* 652 * Was the last operation on this sequence interrupted? 653 * If so, retry after bumping the sequence number. 654 */ 655 if (interrupted) { 656 ++slot->seq_nr; 657 goto retry_nowait; 658 } 659 /* 660 * Could this slot have been previously retired? 661 * If so, then the server may be expecting seq_nr = 1! 662 */ 663 if (slot->seq_nr != 1) { 664 slot->seq_nr = 1; 665 goto retry_nowait; 666 } 667 break; 668 case -NFS4ERR_SEQ_FALSE_RETRY: 669 ++slot->seq_nr; 670 goto retry_nowait; 671 default: 672 /* Just update the slot sequence no. */ 673 ++slot->seq_nr; 674 } 675 out: 676 /* The session may be reset by one of the error handlers. */ 677 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 678 nfs41_sequence_free_slot(res); 679 out_noaction: 680 return ret; 681 retry_nowait: 682 if (rpc_restart_call_prepare(task)) { 683 task->tk_status = 0; 684 ret = 0; 685 } 686 goto out; 687 out_retry: 688 if (!rpc_restart_call(task)) 689 goto out; 690 rpc_delay(task, NFS4_POLL_RETRY_MAX); 691 return 0; 692 } 693 EXPORT_SYMBOL_GPL(nfs41_sequence_done); 694 695 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 696 { 697 if (res->sr_slot == NULL) 698 return 1; 699 if (!res->sr_slot->table->session) 700 return nfs40_sequence_done(task, res); 701 return nfs41_sequence_done(task, res); 702 } 703 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 704 705 int nfs41_setup_sequence(struct nfs4_session *session, 706 struct nfs4_sequence_args *args, 707 struct nfs4_sequence_res *res, 708 struct rpc_task *task) 709 { 710 struct nfs4_slot *slot; 711 struct nfs4_slot_table *tbl; 712 713 dprintk("--> %s\n", __func__); 714 /* slot already allocated? */ 715 if (res->sr_slot != NULL) 716 goto out_success; 717 718 tbl = &session->fc_slot_table; 719 720 task->tk_timeout = 0; 721 722 spin_lock(&tbl->slot_tbl_lock); 723 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) && 724 !args->sa_privileged) { 725 /* The state manager will wait until the slot table is empty */ 726 dprintk("%s session is draining\n", __func__); 727 goto out_sleep; 728 } 729 730 slot = nfs4_alloc_slot(tbl); 731 if (IS_ERR(slot)) { 732 /* If out of memory, try again in 1/4 second */ 733 if (slot == ERR_PTR(-ENOMEM)) 734 task->tk_timeout = HZ >> 2; 735 dprintk("<-- %s: no free slots\n", __func__); 736 goto out_sleep; 737 } 738 spin_unlock(&tbl->slot_tbl_lock); 739 740 args->sa_slot = slot; 741 742 dprintk("<-- %s slotid=%u seqid=%u\n", __func__, 743 slot->slot_nr, slot->seq_nr); 744 745 res->sr_slot = slot; 746 res->sr_timestamp = jiffies; 747 res->sr_status_flags = 0; 748 /* 749 * sr_status is only set in decode_sequence, and so will remain 750 * set to 1 if an rpc level failure occurs. 751 */ 752 res->sr_status = 1; 753 trace_nfs4_setup_sequence(session, args); 754 out_success: 755 rpc_call_start(task); 756 return 0; 757 out_sleep: 758 /* Privileged tasks are queued with top priority */ 759 if (args->sa_privileged) 760 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 761 NULL, RPC_PRIORITY_PRIVILEGED); 762 else 763 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 764 spin_unlock(&tbl->slot_tbl_lock); 765 return -EAGAIN; 766 } 767 EXPORT_SYMBOL_GPL(nfs41_setup_sequence); 768 769 static int nfs4_setup_sequence(const struct nfs_server *server, 770 struct nfs4_sequence_args *args, 771 struct nfs4_sequence_res *res, 772 struct rpc_task *task) 773 { 774 struct nfs4_session *session = nfs4_get_session(server); 775 int ret = 0; 776 777 if (!session) 778 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 779 args, res, task); 780 781 dprintk("--> %s clp %p session %p sr_slot %u\n", 782 __func__, session->clp, session, res->sr_slot ? 783 res->sr_slot->slot_nr : NFS4_NO_SLOT); 784 785 ret = nfs41_setup_sequence(session, args, res, task); 786 787 dprintk("<-- %s status=%d\n", __func__, ret); 788 return ret; 789 } 790 791 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 792 { 793 struct nfs4_call_sync_data *data = calldata; 794 struct nfs4_session *session = nfs4_get_session(data->seq_server); 795 796 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 797 798 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task); 799 } 800 801 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 802 { 803 struct nfs4_call_sync_data *data = calldata; 804 805 nfs41_sequence_done(task, data->seq_res); 806 } 807 808 static const struct rpc_call_ops nfs41_call_sync_ops = { 809 .rpc_call_prepare = nfs41_call_sync_prepare, 810 .rpc_call_done = nfs41_call_sync_done, 811 }; 812 813 #else /* !CONFIG_NFS_V4_1 */ 814 815 static int nfs4_setup_sequence(const struct nfs_server *server, 816 struct nfs4_sequence_args *args, 817 struct nfs4_sequence_res *res, 818 struct rpc_task *task) 819 { 820 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 821 args, res, task); 822 } 823 824 int nfs4_sequence_done(struct rpc_task *task, 825 struct nfs4_sequence_res *res) 826 { 827 return nfs40_sequence_done(task, res); 828 } 829 EXPORT_SYMBOL_GPL(nfs4_sequence_done); 830 831 #endif /* !CONFIG_NFS_V4_1 */ 832 833 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata) 834 { 835 struct nfs4_call_sync_data *data = calldata; 836 nfs4_setup_sequence(data->seq_server, 837 data->seq_args, data->seq_res, task); 838 } 839 840 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata) 841 { 842 struct nfs4_call_sync_data *data = calldata; 843 nfs4_sequence_done(task, data->seq_res); 844 } 845 846 static const struct rpc_call_ops nfs40_call_sync_ops = { 847 .rpc_call_prepare = nfs40_call_sync_prepare, 848 .rpc_call_done = nfs40_call_sync_done, 849 }; 850 851 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 852 struct nfs_server *server, 853 struct rpc_message *msg, 854 struct nfs4_sequence_args *args, 855 struct nfs4_sequence_res *res) 856 { 857 int ret; 858 struct rpc_task *task; 859 struct nfs_client *clp = server->nfs_client; 860 struct nfs4_call_sync_data data = { 861 .seq_server = server, 862 .seq_args = args, 863 .seq_res = res, 864 }; 865 struct rpc_task_setup task_setup = { 866 .rpc_client = clnt, 867 .rpc_message = msg, 868 .callback_ops = clp->cl_mvops->call_sync_ops, 869 .callback_data = &data 870 }; 871 872 task = rpc_run_task(&task_setup); 873 if (IS_ERR(task)) 874 ret = PTR_ERR(task); 875 else { 876 ret = task->tk_status; 877 rpc_put_task(task); 878 } 879 return ret; 880 } 881 882 int nfs4_call_sync(struct rpc_clnt *clnt, 883 struct nfs_server *server, 884 struct rpc_message *msg, 885 struct nfs4_sequence_args *args, 886 struct nfs4_sequence_res *res, 887 int cache_reply) 888 { 889 nfs4_init_sequence(args, res, cache_reply); 890 return nfs4_call_sync_sequence(clnt, server, msg, args, res); 891 } 892 893 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 894 { 895 struct nfs_inode *nfsi = NFS_I(dir); 896 897 spin_lock(&dir->i_lock); 898 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 899 if (!cinfo->atomic || cinfo->before != dir->i_version) 900 nfs_force_lookup_revalidate(dir); 901 dir->i_version = cinfo->after; 902 nfsi->attr_gencount = nfs_inc_attr_generation_counter(); 903 nfs_fscache_invalidate(dir); 904 spin_unlock(&dir->i_lock); 905 } 906 907 struct nfs4_opendata { 908 struct kref kref; 909 struct nfs_openargs o_arg; 910 struct nfs_openres o_res; 911 struct nfs_open_confirmargs c_arg; 912 struct nfs_open_confirmres c_res; 913 struct nfs4_string owner_name; 914 struct nfs4_string group_name; 915 struct nfs4_label *a_label; 916 struct nfs_fattr f_attr; 917 struct nfs4_label *f_label; 918 struct dentry *dir; 919 struct dentry *dentry; 920 struct nfs4_state_owner *owner; 921 struct nfs4_state *state; 922 struct iattr attrs; 923 unsigned long timestamp; 924 unsigned int rpc_done : 1; 925 unsigned int file_created : 1; 926 unsigned int is_recover : 1; 927 int rpc_status; 928 int cancelled; 929 }; 930 931 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 932 int err, struct nfs4_exception *exception) 933 { 934 if (err != -EINVAL) 935 return false; 936 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 937 return false; 938 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 939 exception->retry = 1; 940 return true; 941 } 942 943 static u32 944 nfs4_map_atomic_open_share(struct nfs_server *server, 945 fmode_t fmode, int openflags) 946 { 947 u32 res = 0; 948 949 switch (fmode & (FMODE_READ | FMODE_WRITE)) { 950 case FMODE_READ: 951 res = NFS4_SHARE_ACCESS_READ; 952 break; 953 case FMODE_WRITE: 954 res = NFS4_SHARE_ACCESS_WRITE; 955 break; 956 case FMODE_READ|FMODE_WRITE: 957 res = NFS4_SHARE_ACCESS_BOTH; 958 } 959 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 960 goto out; 961 /* Want no delegation if we're using O_DIRECT */ 962 if (openflags & O_DIRECT) 963 res |= NFS4_SHARE_WANT_NO_DELEG; 964 out: 965 return res; 966 } 967 968 static enum open_claim_type4 969 nfs4_map_atomic_open_claim(struct nfs_server *server, 970 enum open_claim_type4 claim) 971 { 972 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 973 return claim; 974 switch (claim) { 975 default: 976 return claim; 977 case NFS4_OPEN_CLAIM_FH: 978 return NFS4_OPEN_CLAIM_NULL; 979 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 980 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 981 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 982 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 983 } 984 } 985 986 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 987 { 988 p->o_res.f_attr = &p->f_attr; 989 p->o_res.f_label = p->f_label; 990 p->o_res.seqid = p->o_arg.seqid; 991 p->c_res.seqid = p->c_arg.seqid; 992 p->o_res.server = p->o_arg.server; 993 p->o_res.access_request = p->o_arg.access; 994 nfs_fattr_init(&p->f_attr); 995 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 996 } 997 998 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 999 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 1000 const struct iattr *attrs, 1001 struct nfs4_label *label, 1002 enum open_claim_type4 claim, 1003 gfp_t gfp_mask) 1004 { 1005 struct dentry *parent = dget_parent(dentry); 1006 struct inode *dir = d_inode(parent); 1007 struct nfs_server *server = NFS_SERVER(dir); 1008 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 1009 struct nfs4_opendata *p; 1010 1011 p = kzalloc(sizeof(*p), gfp_mask); 1012 if (p == NULL) 1013 goto err; 1014 1015 p->f_label = nfs4_label_alloc(server, gfp_mask); 1016 if (IS_ERR(p->f_label)) 1017 goto err_free_p; 1018 1019 p->a_label = nfs4_label_alloc(server, gfp_mask); 1020 if (IS_ERR(p->a_label)) 1021 goto err_free_f; 1022 1023 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1024 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1025 if (IS_ERR(p->o_arg.seqid)) 1026 goto err_free_label; 1027 nfs_sb_active(dentry->d_sb); 1028 p->dentry = dget(dentry); 1029 p->dir = parent; 1030 p->owner = sp; 1031 atomic_inc(&sp->so_count); 1032 p->o_arg.open_flags = flags; 1033 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 1034 p->o_arg.share_access = nfs4_map_atomic_open_share(server, 1035 fmode, flags); 1036 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS 1037 * will return permission denied for all bits until close */ 1038 if (!(flags & O_EXCL)) { 1039 /* ask server to check for all possible rights as results 1040 * are cached */ 1041 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 1042 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE; 1043 } 1044 p->o_arg.clientid = server->nfs_client->cl_clientid; 1045 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 1046 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 1047 p->o_arg.name = &dentry->d_name; 1048 p->o_arg.server = server; 1049 p->o_arg.bitmask = nfs4_bitmask(server, label); 1050 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1051 p->o_arg.label = nfs4_label_copy(p->a_label, label); 1052 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1053 switch (p->o_arg.claim) { 1054 case NFS4_OPEN_CLAIM_NULL: 1055 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1056 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 1057 p->o_arg.fh = NFS_FH(dir); 1058 break; 1059 case NFS4_OPEN_CLAIM_PREVIOUS: 1060 case NFS4_OPEN_CLAIM_FH: 1061 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1062 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1063 p->o_arg.fh = NFS_FH(d_inode(dentry)); 1064 } 1065 if (attrs != NULL && attrs->ia_valid != 0) { 1066 __u32 verf[2]; 1067 1068 p->o_arg.u.attrs = &p->attrs; 1069 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 1070 1071 verf[0] = jiffies; 1072 verf[1] = current->pid; 1073 memcpy(p->o_arg.u.verifier.data, verf, 1074 sizeof(p->o_arg.u.verifier.data)); 1075 } 1076 p->c_arg.fh = &p->o_res.fh; 1077 p->c_arg.stateid = &p->o_res.stateid; 1078 p->c_arg.seqid = p->o_arg.seqid; 1079 nfs4_init_opendata_res(p); 1080 kref_init(&p->kref); 1081 return p; 1082 1083 err_free_label: 1084 nfs4_label_free(p->a_label); 1085 err_free_f: 1086 nfs4_label_free(p->f_label); 1087 err_free_p: 1088 kfree(p); 1089 err: 1090 dput(parent); 1091 return NULL; 1092 } 1093 1094 static void nfs4_opendata_free(struct kref *kref) 1095 { 1096 struct nfs4_opendata *p = container_of(kref, 1097 struct nfs4_opendata, kref); 1098 struct super_block *sb = p->dentry->d_sb; 1099 1100 nfs_free_seqid(p->o_arg.seqid); 1101 if (p->state != NULL) 1102 nfs4_put_open_state(p->state); 1103 nfs4_put_state_owner(p->owner); 1104 1105 nfs4_label_free(p->a_label); 1106 nfs4_label_free(p->f_label); 1107 1108 dput(p->dir); 1109 dput(p->dentry); 1110 nfs_sb_deactive(sb); 1111 nfs_fattr_free_names(&p->f_attr); 1112 kfree(p->f_attr.mdsthreshold); 1113 kfree(p); 1114 } 1115 1116 static void nfs4_opendata_put(struct nfs4_opendata *p) 1117 { 1118 if (p != NULL) 1119 kref_put(&p->kref, nfs4_opendata_free); 1120 } 1121 1122 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) 1123 { 1124 int ret; 1125 1126 ret = rpc_wait_for_completion_task(task); 1127 return ret; 1128 } 1129 1130 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 1131 { 1132 int ret = 0; 1133 1134 if (open_mode & (O_EXCL|O_TRUNC)) 1135 goto out; 1136 switch (mode & (FMODE_READ|FMODE_WRITE)) { 1137 case FMODE_READ: 1138 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 1139 && state->n_rdonly != 0; 1140 break; 1141 case FMODE_WRITE: 1142 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 1143 && state->n_wronly != 0; 1144 break; 1145 case FMODE_READ|FMODE_WRITE: 1146 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 1147 && state->n_rdwr != 0; 1148 } 1149 out: 1150 return ret; 1151 } 1152 1153 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, 1154 enum open_claim_type4 claim) 1155 { 1156 if (delegation == NULL) 1157 return 0; 1158 if ((delegation->type & fmode) != fmode) 1159 return 0; 1160 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 1161 return 0; 1162 switch (claim) { 1163 case NFS4_OPEN_CLAIM_NULL: 1164 case NFS4_OPEN_CLAIM_FH: 1165 break; 1166 case NFS4_OPEN_CLAIM_PREVIOUS: 1167 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 1168 break; 1169 default: 1170 return 0; 1171 } 1172 nfs_mark_delegation_referenced(delegation); 1173 return 1; 1174 } 1175 1176 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 1177 { 1178 switch (fmode) { 1179 case FMODE_WRITE: 1180 state->n_wronly++; 1181 break; 1182 case FMODE_READ: 1183 state->n_rdonly++; 1184 break; 1185 case FMODE_READ|FMODE_WRITE: 1186 state->n_rdwr++; 1187 } 1188 nfs4_state_set_mode_locked(state, state->state | fmode); 1189 } 1190 1191 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state) 1192 { 1193 struct nfs_client *clp = state->owner->so_server->nfs_client; 1194 bool need_recover = false; 1195 1196 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly) 1197 need_recover = true; 1198 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly) 1199 need_recover = true; 1200 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr) 1201 need_recover = true; 1202 if (need_recover) 1203 nfs4_state_mark_reclaim_nograce(clp, state); 1204 } 1205 1206 static bool nfs_need_update_open_stateid(struct nfs4_state *state, 1207 nfs4_stateid *stateid) 1208 { 1209 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0) 1210 return true; 1211 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) { 1212 nfs_test_and_clear_all_open_stateid(state); 1213 return true; 1214 } 1215 if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) 1216 return true; 1217 return false; 1218 } 1219 1220 static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1221 { 1222 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) 1223 return; 1224 if (state->n_wronly) 1225 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1226 if (state->n_rdonly) 1227 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1228 if (state->n_rdwr) 1229 set_bit(NFS_O_RDWR_STATE, &state->flags); 1230 set_bit(NFS_OPEN_STATE, &state->flags); 1231 } 1232 1233 static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1234 nfs4_stateid *arg_stateid, 1235 nfs4_stateid *stateid, fmode_t fmode) 1236 { 1237 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1238 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 1239 case FMODE_WRITE: 1240 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1241 break; 1242 case FMODE_READ: 1243 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1244 break; 1245 case 0: 1246 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1247 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1248 clear_bit(NFS_OPEN_STATE, &state->flags); 1249 } 1250 if (stateid == NULL) 1251 return; 1252 /* Handle races with OPEN */ 1253 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || 1254 (nfs4_stateid_match_other(stateid, &state->open_stateid) && 1255 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) { 1256 nfs_resync_open_stateid_locked(state); 1257 return; 1258 } 1259 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1260 nfs4_stateid_copy(&state->stateid, stateid); 1261 nfs4_stateid_copy(&state->open_stateid, stateid); 1262 } 1263 1264 static void nfs_clear_open_stateid(struct nfs4_state *state, 1265 nfs4_stateid *arg_stateid, 1266 nfs4_stateid *stateid, fmode_t fmode) 1267 { 1268 write_seqlock(&state->seqlock); 1269 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); 1270 write_sequnlock(&state->seqlock); 1271 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1272 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1273 } 1274 1275 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 1276 { 1277 switch (fmode) { 1278 case FMODE_READ: 1279 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1280 break; 1281 case FMODE_WRITE: 1282 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1283 break; 1284 case FMODE_READ|FMODE_WRITE: 1285 set_bit(NFS_O_RDWR_STATE, &state->flags); 1286 } 1287 if (!nfs_need_update_open_stateid(state, stateid)) 1288 return; 1289 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1290 nfs4_stateid_copy(&state->stateid, stateid); 1291 nfs4_stateid_copy(&state->open_stateid, stateid); 1292 } 1293 1294 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) 1295 { 1296 /* 1297 * Protect the call to nfs4_state_set_mode_locked and 1298 * serialise the stateid update 1299 */ 1300 write_seqlock(&state->seqlock); 1301 if (deleg_stateid != NULL) { 1302 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1303 set_bit(NFS_DELEGATED_STATE, &state->flags); 1304 } 1305 if (open_stateid != NULL) 1306 nfs_set_open_stateid_locked(state, open_stateid, fmode); 1307 write_sequnlock(&state->seqlock); 1308 spin_lock(&state->owner->so_lock); 1309 update_open_stateflags(state, fmode); 1310 spin_unlock(&state->owner->so_lock); 1311 } 1312 1313 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) 1314 { 1315 struct nfs_inode *nfsi = NFS_I(state->inode); 1316 struct nfs_delegation *deleg_cur; 1317 int ret = 0; 1318 1319 fmode &= (FMODE_READ|FMODE_WRITE); 1320 1321 rcu_read_lock(); 1322 deleg_cur = rcu_dereference(nfsi->delegation); 1323 if (deleg_cur == NULL) 1324 goto no_delegation; 1325 1326 spin_lock(&deleg_cur->lock); 1327 if (rcu_dereference(nfsi->delegation) != deleg_cur || 1328 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1329 (deleg_cur->type & fmode) != fmode) 1330 goto no_delegation_unlock; 1331 1332 if (delegation == NULL) 1333 delegation = &deleg_cur->stateid; 1334 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) 1335 goto no_delegation_unlock; 1336 1337 nfs_mark_delegation_referenced(deleg_cur); 1338 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); 1339 ret = 1; 1340 no_delegation_unlock: 1341 spin_unlock(&deleg_cur->lock); 1342 no_delegation: 1343 rcu_read_unlock(); 1344 1345 if (!ret && open_stateid != NULL) { 1346 __update_open_stateid(state, open_stateid, NULL, fmode); 1347 ret = 1; 1348 } 1349 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) 1350 nfs4_schedule_state_manager(state->owner->so_server->nfs_client); 1351 1352 return ret; 1353 } 1354 1355 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, 1356 const nfs4_stateid *stateid) 1357 { 1358 struct nfs4_state *state = lsp->ls_state; 1359 bool ret = false; 1360 1361 spin_lock(&state->state_lock); 1362 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) 1363 goto out_noupdate; 1364 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) 1365 goto out_noupdate; 1366 nfs4_stateid_copy(&lsp->ls_stateid, stateid); 1367 ret = true; 1368 out_noupdate: 1369 spin_unlock(&state->state_lock); 1370 return ret; 1371 } 1372 1373 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1374 { 1375 struct nfs_delegation *delegation; 1376 1377 rcu_read_lock(); 1378 delegation = rcu_dereference(NFS_I(inode)->delegation); 1379 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1380 rcu_read_unlock(); 1381 return; 1382 } 1383 rcu_read_unlock(); 1384 nfs4_inode_return_delegation(inode); 1385 } 1386 1387 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1388 { 1389 struct nfs4_state *state = opendata->state; 1390 struct nfs_inode *nfsi = NFS_I(state->inode); 1391 struct nfs_delegation *delegation; 1392 int open_mode = opendata->o_arg.open_flags; 1393 fmode_t fmode = opendata->o_arg.fmode; 1394 enum open_claim_type4 claim = opendata->o_arg.claim; 1395 nfs4_stateid stateid; 1396 int ret = -EAGAIN; 1397 1398 for (;;) { 1399 spin_lock(&state->owner->so_lock); 1400 if (can_open_cached(state, fmode, open_mode)) { 1401 update_open_stateflags(state, fmode); 1402 spin_unlock(&state->owner->so_lock); 1403 goto out_return_state; 1404 } 1405 spin_unlock(&state->owner->so_lock); 1406 rcu_read_lock(); 1407 delegation = rcu_dereference(nfsi->delegation); 1408 if (!can_open_delegated(delegation, fmode, claim)) { 1409 rcu_read_unlock(); 1410 break; 1411 } 1412 /* Save the delegation */ 1413 nfs4_stateid_copy(&stateid, &delegation->stateid); 1414 rcu_read_unlock(); 1415 nfs_release_seqid(opendata->o_arg.seqid); 1416 if (!opendata->is_recover) { 1417 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1418 if (ret != 0) 1419 goto out; 1420 } 1421 ret = -EAGAIN; 1422 1423 /* Try to update the stateid using the delegation */ 1424 if (update_open_stateid(state, NULL, &stateid, fmode)) 1425 goto out_return_state; 1426 } 1427 out: 1428 return ERR_PTR(ret); 1429 out_return_state: 1430 atomic_inc(&state->count); 1431 return state; 1432 } 1433 1434 static void 1435 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) 1436 { 1437 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client; 1438 struct nfs_delegation *delegation; 1439 int delegation_flags = 0; 1440 1441 rcu_read_lock(); 1442 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1443 if (delegation) 1444 delegation_flags = delegation->flags; 1445 rcu_read_unlock(); 1446 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { 1447 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1448 "returning a delegation for " 1449 "OPEN(CLAIM_DELEGATE_CUR)\n", 1450 clp->cl_hostname); 1451 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1452 nfs_inode_set_delegation(state->inode, 1453 data->owner->so_cred, 1454 &data->o_res); 1455 else 1456 nfs_inode_reclaim_delegation(state->inode, 1457 data->owner->so_cred, 1458 &data->o_res); 1459 } 1460 1461 /* 1462 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 1463 * and update the nfs4_state. 1464 */ 1465 static struct nfs4_state * 1466 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 1467 { 1468 struct inode *inode = data->state->inode; 1469 struct nfs4_state *state = data->state; 1470 int ret; 1471 1472 if (!data->rpc_done) { 1473 if (data->rpc_status) { 1474 ret = data->rpc_status; 1475 goto err; 1476 } 1477 /* cached opens have already been processed */ 1478 goto update; 1479 } 1480 1481 ret = nfs_refresh_inode(inode, &data->f_attr); 1482 if (ret) 1483 goto err; 1484 1485 if (data->o_res.delegation_type != 0) 1486 nfs4_opendata_check_deleg(data, state); 1487 update: 1488 update_open_stateid(state, &data->o_res.stateid, NULL, 1489 data->o_arg.fmode); 1490 atomic_inc(&state->count); 1491 1492 return state; 1493 err: 1494 return ERR_PTR(ret); 1495 1496 } 1497 1498 static struct nfs4_state * 1499 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1500 { 1501 struct inode *inode; 1502 struct nfs4_state *state = NULL; 1503 int ret; 1504 1505 if (!data->rpc_done) { 1506 state = nfs4_try_open_cached(data); 1507 goto out; 1508 } 1509 1510 ret = -EAGAIN; 1511 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1512 goto err; 1513 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label); 1514 ret = PTR_ERR(inode); 1515 if (IS_ERR(inode)) 1516 goto err; 1517 ret = -ENOMEM; 1518 state = nfs4_get_open_state(inode, data->owner); 1519 if (state == NULL) 1520 goto err_put_inode; 1521 if (data->o_res.delegation_type != 0) 1522 nfs4_opendata_check_deleg(data, state); 1523 update_open_stateid(state, &data->o_res.stateid, NULL, 1524 data->o_arg.fmode); 1525 iput(inode); 1526 out: 1527 nfs_release_seqid(data->o_arg.seqid); 1528 return state; 1529 err_put_inode: 1530 iput(inode); 1531 err: 1532 return ERR_PTR(ret); 1533 } 1534 1535 static struct nfs4_state * 1536 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1537 { 1538 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 1539 return _nfs4_opendata_reclaim_to_nfs4_state(data); 1540 return _nfs4_opendata_to_nfs4_state(data); 1541 } 1542 1543 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) 1544 { 1545 struct nfs_inode *nfsi = NFS_I(state->inode); 1546 struct nfs_open_context *ctx; 1547 1548 spin_lock(&state->inode->i_lock); 1549 list_for_each_entry(ctx, &nfsi->open_files, list) { 1550 if (ctx->state != state) 1551 continue; 1552 get_nfs_open_context(ctx); 1553 spin_unlock(&state->inode->i_lock); 1554 return ctx; 1555 } 1556 spin_unlock(&state->inode->i_lock); 1557 return ERR_PTR(-ENOENT); 1558 } 1559 1560 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 1561 struct nfs4_state *state, enum open_claim_type4 claim) 1562 { 1563 struct nfs4_opendata *opendata; 1564 1565 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 1566 NULL, NULL, claim, GFP_NOFS); 1567 if (opendata == NULL) 1568 return ERR_PTR(-ENOMEM); 1569 opendata->state = state; 1570 atomic_inc(&state->count); 1571 return opendata; 1572 } 1573 1574 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) 1575 { 1576 struct nfs4_state *newstate; 1577 int ret; 1578 1579 if ((opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR || 1580 opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEG_CUR_FH) && 1581 (opendata->o_arg.u.delegation_type & fmode) != fmode) 1582 /* This mode can't have been delegated, so we must have 1583 * a valid open_stateid to cover it - not need to reclaim. 1584 */ 1585 return 0; 1586 opendata->o_arg.open_flags = 0; 1587 opendata->o_arg.fmode = fmode; 1588 opendata->o_arg.share_access = nfs4_map_atomic_open_share( 1589 NFS_SB(opendata->dentry->d_sb), 1590 fmode, 0); 1591 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 1592 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 1593 nfs4_init_opendata_res(opendata); 1594 ret = _nfs4_recover_proc_open(opendata); 1595 if (ret != 0) 1596 return ret; 1597 newstate = nfs4_opendata_to_nfs4_state(opendata); 1598 if (IS_ERR(newstate)) 1599 return PTR_ERR(newstate); 1600 nfs4_close_state(newstate, fmode); 1601 *res = newstate; 1602 return 0; 1603 } 1604 1605 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1606 { 1607 struct nfs4_state *newstate; 1608 int ret; 1609 1610 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ 1611 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1612 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1613 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1614 /* memory barrier prior to reading state->n_* */ 1615 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1616 clear_bit(NFS_OPEN_STATE, &state->flags); 1617 smp_rmb(); 1618 if (state->n_rdwr != 0) { 1619 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); 1620 if (ret != 0) 1621 return ret; 1622 if (newstate != state) 1623 return -ESTALE; 1624 } 1625 if (state->n_wronly != 0) { 1626 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); 1627 if (ret != 0) 1628 return ret; 1629 if (newstate != state) 1630 return -ESTALE; 1631 } 1632 if (state->n_rdonly != 0) { 1633 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); 1634 if (ret != 0) 1635 return ret; 1636 if (newstate != state) 1637 return -ESTALE; 1638 } 1639 /* 1640 * We may have performed cached opens for all three recoveries. 1641 * Check if we need to update the current stateid. 1642 */ 1643 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 1644 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 1645 write_seqlock(&state->seqlock); 1646 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1647 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1648 write_sequnlock(&state->seqlock); 1649 } 1650 return 0; 1651 } 1652 1653 /* 1654 * OPEN_RECLAIM: 1655 * reclaim state on the server after a reboot. 1656 */ 1657 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1658 { 1659 struct nfs_delegation *delegation; 1660 struct nfs4_opendata *opendata; 1661 fmode_t delegation_type = 0; 1662 int status; 1663 1664 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1665 NFS4_OPEN_CLAIM_PREVIOUS); 1666 if (IS_ERR(opendata)) 1667 return PTR_ERR(opendata); 1668 rcu_read_lock(); 1669 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1670 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) 1671 delegation_type = delegation->type; 1672 rcu_read_unlock(); 1673 opendata->o_arg.u.delegation_type = delegation_type; 1674 status = nfs4_open_recover(opendata, state); 1675 nfs4_opendata_put(opendata); 1676 return status; 1677 } 1678 1679 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1680 { 1681 struct nfs_server *server = NFS_SERVER(state->inode); 1682 struct nfs4_exception exception = { }; 1683 int err; 1684 do { 1685 err = _nfs4_do_open_reclaim(ctx, state); 1686 trace_nfs4_open_reclaim(ctx, 0, err); 1687 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 1688 continue; 1689 if (err != -NFS4ERR_DELAY) 1690 break; 1691 nfs4_handle_exception(server, err, &exception); 1692 } while (exception.retry); 1693 return err; 1694 } 1695 1696 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 1697 { 1698 struct nfs_open_context *ctx; 1699 int ret; 1700 1701 ctx = nfs4_state_find_open_context(state); 1702 if (IS_ERR(ctx)) 1703 return -EAGAIN; 1704 ret = nfs4_do_open_reclaim(ctx, state); 1705 put_nfs_open_context(ctx); 1706 return ret; 1707 } 1708 1709 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err) 1710 { 1711 switch (err) { 1712 default: 1713 printk(KERN_ERR "NFS: %s: unhandled error " 1714 "%d.\n", __func__, err); 1715 case 0: 1716 case -ENOENT: 1717 case -EAGAIN: 1718 case -ESTALE: 1719 break; 1720 case -NFS4ERR_BADSESSION: 1721 case -NFS4ERR_BADSLOT: 1722 case -NFS4ERR_BAD_HIGH_SLOT: 1723 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1724 case -NFS4ERR_DEADSESSION: 1725 set_bit(NFS_DELEGATED_STATE, &state->flags); 1726 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 1727 return -EAGAIN; 1728 case -NFS4ERR_STALE_CLIENTID: 1729 case -NFS4ERR_STALE_STATEID: 1730 set_bit(NFS_DELEGATED_STATE, &state->flags); 1731 case -NFS4ERR_EXPIRED: 1732 /* Don't recall a delegation if it was lost */ 1733 nfs4_schedule_lease_recovery(server->nfs_client); 1734 return -EAGAIN; 1735 case -NFS4ERR_MOVED: 1736 nfs4_schedule_migration_recovery(server); 1737 return -EAGAIN; 1738 case -NFS4ERR_LEASE_MOVED: 1739 nfs4_schedule_lease_moved_recovery(server->nfs_client); 1740 return -EAGAIN; 1741 case -NFS4ERR_DELEG_REVOKED: 1742 case -NFS4ERR_ADMIN_REVOKED: 1743 case -NFS4ERR_BAD_STATEID: 1744 case -NFS4ERR_OPENMODE: 1745 nfs_inode_find_state_and_recover(state->inode, 1746 stateid); 1747 nfs4_schedule_stateid_recovery(server, state); 1748 return -EAGAIN; 1749 case -NFS4ERR_DELAY: 1750 case -NFS4ERR_GRACE: 1751 set_bit(NFS_DELEGATED_STATE, &state->flags); 1752 ssleep(1); 1753 return -EAGAIN; 1754 case -ENOMEM: 1755 case -NFS4ERR_DENIED: 1756 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1757 return 0; 1758 } 1759 return err; 1760 } 1761 1762 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1763 { 1764 struct nfs_server *server = NFS_SERVER(state->inode); 1765 struct nfs4_opendata *opendata; 1766 int err; 1767 1768 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1769 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 1770 if (IS_ERR(opendata)) 1771 return PTR_ERR(opendata); 1772 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1773 err = nfs4_open_recover(opendata, state); 1774 nfs4_opendata_put(opendata); 1775 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 1776 } 1777 1778 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) 1779 { 1780 struct nfs4_opendata *data = calldata; 1781 1782 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl, 1783 &data->c_arg.seq_args, &data->c_res.seq_res, task); 1784 } 1785 1786 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 1787 { 1788 struct nfs4_opendata *data = calldata; 1789 1790 nfs40_sequence_done(task, &data->c_res.seq_res); 1791 1792 data->rpc_status = task->tk_status; 1793 if (data->rpc_status == 0) { 1794 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 1795 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1796 renew_lease(data->o_res.server, data->timestamp); 1797 data->rpc_done = 1; 1798 } 1799 } 1800 1801 static void nfs4_open_confirm_release(void *calldata) 1802 { 1803 struct nfs4_opendata *data = calldata; 1804 struct nfs4_state *state = NULL; 1805 1806 /* If this request hasn't been cancelled, do nothing */ 1807 if (data->cancelled == 0) 1808 goto out_free; 1809 /* In case of error, no cleanup! */ 1810 if (!data->rpc_done) 1811 goto out_free; 1812 state = nfs4_opendata_to_nfs4_state(data); 1813 if (!IS_ERR(state)) 1814 nfs4_close_state(state, data->o_arg.fmode); 1815 out_free: 1816 nfs4_opendata_put(data); 1817 } 1818 1819 static const struct rpc_call_ops nfs4_open_confirm_ops = { 1820 .rpc_call_prepare = nfs4_open_confirm_prepare, 1821 .rpc_call_done = nfs4_open_confirm_done, 1822 .rpc_release = nfs4_open_confirm_release, 1823 }; 1824 1825 /* 1826 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 1827 */ 1828 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 1829 { 1830 struct nfs_server *server = NFS_SERVER(d_inode(data->dir)); 1831 struct rpc_task *task; 1832 struct rpc_message msg = { 1833 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 1834 .rpc_argp = &data->c_arg, 1835 .rpc_resp = &data->c_res, 1836 .rpc_cred = data->owner->so_cred, 1837 }; 1838 struct rpc_task_setup task_setup_data = { 1839 .rpc_client = server->client, 1840 .rpc_message = &msg, 1841 .callback_ops = &nfs4_open_confirm_ops, 1842 .callback_data = data, 1843 .workqueue = nfsiod_workqueue, 1844 .flags = RPC_TASK_ASYNC, 1845 }; 1846 int status; 1847 1848 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1); 1849 kref_get(&data->kref); 1850 data->rpc_done = 0; 1851 data->rpc_status = 0; 1852 data->timestamp = jiffies; 1853 task = rpc_run_task(&task_setup_data); 1854 if (IS_ERR(task)) 1855 return PTR_ERR(task); 1856 status = nfs4_wait_for_completion_rpc_task(task); 1857 if (status != 0) { 1858 data->cancelled = 1; 1859 smp_wmb(); 1860 } else 1861 status = data->rpc_status; 1862 rpc_put_task(task); 1863 return status; 1864 } 1865 1866 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 1867 { 1868 struct nfs4_opendata *data = calldata; 1869 struct nfs4_state_owner *sp = data->owner; 1870 struct nfs_client *clp = sp->so_server->nfs_client; 1871 enum open_claim_type4 claim = data->o_arg.claim; 1872 1873 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 1874 goto out_wait; 1875 /* 1876 * Check if we still need to send an OPEN call, or if we can use 1877 * a delegation instead. 1878 */ 1879 if (data->state != NULL) { 1880 struct nfs_delegation *delegation; 1881 1882 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) 1883 goto out_no_action; 1884 rcu_read_lock(); 1885 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 1886 if (can_open_delegated(delegation, data->o_arg.fmode, claim)) 1887 goto unlock_no_action; 1888 rcu_read_unlock(); 1889 } 1890 /* Update client id. */ 1891 data->o_arg.clientid = clp->cl_clientid; 1892 switch (claim) { 1893 default: 1894 break; 1895 case NFS4_OPEN_CLAIM_PREVIOUS: 1896 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1897 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1898 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 1899 case NFS4_OPEN_CLAIM_FH: 1900 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 1901 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 1902 } 1903 data->timestamp = jiffies; 1904 if (nfs4_setup_sequence(data->o_arg.server, 1905 &data->o_arg.seq_args, 1906 &data->o_res.seq_res, 1907 task) != 0) 1908 nfs_release_seqid(data->o_arg.seqid); 1909 1910 /* Set the create mode (note dependency on the session type) */ 1911 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 1912 if (data->o_arg.open_flags & O_EXCL) { 1913 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 1914 if (nfs4_has_persistent_session(clp)) 1915 data->o_arg.createmode = NFS4_CREATE_GUARDED; 1916 else if (clp->cl_mvops->minor_version > 0) 1917 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 1918 } 1919 return; 1920 unlock_no_action: 1921 rcu_read_unlock(); 1922 out_no_action: 1923 task->tk_action = NULL; 1924 out_wait: 1925 nfs4_sequence_done(task, &data->o_res.seq_res); 1926 } 1927 1928 static void nfs4_open_done(struct rpc_task *task, void *calldata) 1929 { 1930 struct nfs4_opendata *data = calldata; 1931 1932 data->rpc_status = task->tk_status; 1933 1934 if (!nfs4_sequence_done(task, &data->o_res.seq_res)) 1935 return; 1936 1937 if (task->tk_status == 0) { 1938 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 1939 switch (data->o_res.f_attr->mode & S_IFMT) { 1940 case S_IFREG: 1941 break; 1942 case S_IFLNK: 1943 data->rpc_status = -ELOOP; 1944 break; 1945 case S_IFDIR: 1946 data->rpc_status = -EISDIR; 1947 break; 1948 default: 1949 data->rpc_status = -ENOTDIR; 1950 } 1951 } 1952 renew_lease(data->o_res.server, data->timestamp); 1953 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 1954 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1955 } 1956 data->rpc_done = 1; 1957 } 1958 1959 static void nfs4_open_release(void *calldata) 1960 { 1961 struct nfs4_opendata *data = calldata; 1962 struct nfs4_state *state = NULL; 1963 1964 /* If this request hasn't been cancelled, do nothing */ 1965 if (data->cancelled == 0) 1966 goto out_free; 1967 /* In case of error, no cleanup! */ 1968 if (data->rpc_status != 0 || !data->rpc_done) 1969 goto out_free; 1970 /* In case we need an open_confirm, no cleanup! */ 1971 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 1972 goto out_free; 1973 state = nfs4_opendata_to_nfs4_state(data); 1974 if (!IS_ERR(state)) 1975 nfs4_close_state(state, data->o_arg.fmode); 1976 out_free: 1977 nfs4_opendata_put(data); 1978 } 1979 1980 static const struct rpc_call_ops nfs4_open_ops = { 1981 .rpc_call_prepare = nfs4_open_prepare, 1982 .rpc_call_done = nfs4_open_done, 1983 .rpc_release = nfs4_open_release, 1984 }; 1985 1986 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) 1987 { 1988 struct inode *dir = d_inode(data->dir); 1989 struct nfs_server *server = NFS_SERVER(dir); 1990 struct nfs_openargs *o_arg = &data->o_arg; 1991 struct nfs_openres *o_res = &data->o_res; 1992 struct rpc_task *task; 1993 struct rpc_message msg = { 1994 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 1995 .rpc_argp = o_arg, 1996 .rpc_resp = o_res, 1997 .rpc_cred = data->owner->so_cred, 1998 }; 1999 struct rpc_task_setup task_setup_data = { 2000 .rpc_client = server->client, 2001 .rpc_message = &msg, 2002 .callback_ops = &nfs4_open_ops, 2003 .callback_data = data, 2004 .workqueue = nfsiod_workqueue, 2005 .flags = RPC_TASK_ASYNC, 2006 }; 2007 int status; 2008 2009 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); 2010 kref_get(&data->kref); 2011 data->rpc_done = 0; 2012 data->rpc_status = 0; 2013 data->cancelled = 0; 2014 data->is_recover = 0; 2015 if (isrecover) { 2016 nfs4_set_sequence_privileged(&o_arg->seq_args); 2017 data->is_recover = 1; 2018 } 2019 task = rpc_run_task(&task_setup_data); 2020 if (IS_ERR(task)) 2021 return PTR_ERR(task); 2022 status = nfs4_wait_for_completion_rpc_task(task); 2023 if (status != 0) { 2024 data->cancelled = 1; 2025 smp_wmb(); 2026 } else 2027 status = data->rpc_status; 2028 rpc_put_task(task); 2029 2030 return status; 2031 } 2032 2033 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 2034 { 2035 struct inode *dir = d_inode(data->dir); 2036 struct nfs_openres *o_res = &data->o_res; 2037 int status; 2038 2039 status = nfs4_run_open_task(data, 1); 2040 if (status != 0 || !data->rpc_done) 2041 return status; 2042 2043 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 2044 2045 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2046 status = _nfs4_proc_open_confirm(data); 2047 if (status != 0) 2048 return status; 2049 } 2050 2051 return status; 2052 } 2053 2054 /* 2055 * Additional permission checks in order to distinguish between an 2056 * open for read, and an open for execute. This works around the 2057 * fact that NFSv4 OPEN treats read and execute permissions as being 2058 * the same. 2059 * Note that in the non-execute case, we want to turn off permission 2060 * checking if we just created a new file (POSIX open() semantics). 2061 */ 2062 static int nfs4_opendata_access(struct rpc_cred *cred, 2063 struct nfs4_opendata *opendata, 2064 struct nfs4_state *state, fmode_t fmode, 2065 int openflags) 2066 { 2067 struct nfs_access_entry cache; 2068 u32 mask; 2069 2070 /* access call failed or for some reason the server doesn't 2071 * support any access modes -- defer access call until later */ 2072 if (opendata->o_res.access_supported == 0) 2073 return 0; 2074 2075 mask = 0; 2076 /* 2077 * Use openflags to check for exec, because fmode won't 2078 * always have FMODE_EXEC set when file open for exec. 2079 */ 2080 if (openflags & __FMODE_EXEC) { 2081 /* ONLY check for exec rights */ 2082 mask = MAY_EXEC; 2083 } else if ((fmode & FMODE_READ) && !opendata->file_created) 2084 mask = MAY_READ; 2085 2086 cache.cred = cred; 2087 cache.jiffies = jiffies; 2088 nfs_access_set_mask(&cache, opendata->o_res.access_result); 2089 nfs_access_add_cache(state->inode, &cache); 2090 2091 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) 2092 return 0; 2093 2094 /* even though OPEN succeeded, access is denied. Close the file */ 2095 nfs4_close_state(state, fmode); 2096 return -EACCES; 2097 } 2098 2099 /* 2100 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 2101 */ 2102 static int _nfs4_proc_open(struct nfs4_opendata *data) 2103 { 2104 struct inode *dir = d_inode(data->dir); 2105 struct nfs_server *server = NFS_SERVER(dir); 2106 struct nfs_openargs *o_arg = &data->o_arg; 2107 struct nfs_openres *o_res = &data->o_res; 2108 int status; 2109 2110 status = nfs4_run_open_task(data, 0); 2111 if (!data->rpc_done) 2112 return status; 2113 if (status != 0) { 2114 if (status == -NFS4ERR_BADNAME && 2115 !(o_arg->open_flags & O_CREAT)) 2116 return -ENOENT; 2117 return status; 2118 } 2119 2120 nfs_fattr_map_and_free_names(server, &data->f_attr); 2121 2122 if (o_arg->open_flags & O_CREAT) { 2123 update_changeattr(dir, &o_res->cinfo); 2124 if (o_arg->open_flags & O_EXCL) 2125 data->file_created = 1; 2126 else if (o_res->cinfo.before != o_res->cinfo.after) 2127 data->file_created = 1; 2128 } 2129 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 2130 server->caps &= ~NFS_CAP_POSIX_LOCK; 2131 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 2132 status = _nfs4_proc_open_confirm(data); 2133 if (status != 0) 2134 return status; 2135 } 2136 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 2137 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label); 2138 return 0; 2139 } 2140 2141 static int nfs4_recover_expired_lease(struct nfs_server *server) 2142 { 2143 return nfs4_client_recover_expired_lease(server->nfs_client); 2144 } 2145 2146 /* 2147 * OPEN_EXPIRED: 2148 * reclaim state on the server after a network partition. 2149 * Assumes caller holds the appropriate lock 2150 */ 2151 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2152 { 2153 struct nfs4_opendata *opendata; 2154 int ret; 2155 2156 opendata = nfs4_open_recoverdata_alloc(ctx, state, 2157 NFS4_OPEN_CLAIM_FH); 2158 if (IS_ERR(opendata)) 2159 return PTR_ERR(opendata); 2160 ret = nfs4_open_recover(opendata, state); 2161 if (ret == -ESTALE) 2162 d_drop(ctx->dentry); 2163 nfs4_opendata_put(opendata); 2164 return ret; 2165 } 2166 2167 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 2168 { 2169 struct nfs_server *server = NFS_SERVER(state->inode); 2170 struct nfs4_exception exception = { }; 2171 int err; 2172 2173 do { 2174 err = _nfs4_open_expired(ctx, state); 2175 trace_nfs4_open_expired(ctx, 0, err); 2176 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 2177 continue; 2178 switch (err) { 2179 default: 2180 goto out; 2181 case -NFS4ERR_GRACE: 2182 case -NFS4ERR_DELAY: 2183 nfs4_handle_exception(server, err, &exception); 2184 err = 0; 2185 } 2186 } while (exception.retry); 2187 out: 2188 return err; 2189 } 2190 2191 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2192 { 2193 struct nfs_open_context *ctx; 2194 int ret; 2195 2196 ctx = nfs4_state_find_open_context(state); 2197 if (IS_ERR(ctx)) 2198 return -EAGAIN; 2199 ret = nfs4_do_open_expired(ctx, state); 2200 put_nfs_open_context(ctx); 2201 return ret; 2202 } 2203 2204 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state) 2205 { 2206 nfs_remove_bad_delegation(state->inode); 2207 write_seqlock(&state->seqlock); 2208 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 2209 write_sequnlock(&state->seqlock); 2210 clear_bit(NFS_DELEGATED_STATE, &state->flags); 2211 } 2212 2213 static void nfs40_clear_delegation_stateid(struct nfs4_state *state) 2214 { 2215 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) 2216 nfs_finish_clear_delegation_stateid(state); 2217 } 2218 2219 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2220 { 2221 /* NFSv4.0 doesn't allow for delegation recovery on open expire */ 2222 nfs40_clear_delegation_stateid(state); 2223 return nfs4_open_expired(sp, state); 2224 } 2225 2226 #if defined(CONFIG_NFS_V4_1) 2227 static void nfs41_check_delegation_stateid(struct nfs4_state *state) 2228 { 2229 struct nfs_server *server = NFS_SERVER(state->inode); 2230 nfs4_stateid stateid; 2231 struct nfs_delegation *delegation; 2232 struct rpc_cred *cred; 2233 int status; 2234 2235 /* Get the delegation credential for use by test/free_stateid */ 2236 rcu_read_lock(); 2237 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2238 if (delegation == NULL) { 2239 rcu_read_unlock(); 2240 return; 2241 } 2242 2243 nfs4_stateid_copy(&stateid, &delegation->stateid); 2244 cred = get_rpccred(delegation->cred); 2245 rcu_read_unlock(); 2246 status = nfs41_test_stateid(server, &stateid, cred); 2247 trace_nfs4_test_delegation_stateid(state, NULL, status); 2248 2249 if (status != NFS_OK) { 2250 /* Free the stateid unless the server explicitly 2251 * informs us the stateid is unrecognized. */ 2252 if (status != -NFS4ERR_BAD_STATEID) 2253 nfs41_free_stateid(server, &stateid, cred); 2254 nfs_finish_clear_delegation_stateid(state); 2255 } 2256 2257 put_rpccred(cred); 2258 } 2259 2260 /** 2261 * nfs41_check_open_stateid - possibly free an open stateid 2262 * 2263 * @state: NFSv4 state for an inode 2264 * 2265 * Returns NFS_OK if recovery for this stateid is now finished. 2266 * Otherwise a negative NFS4ERR value is returned. 2267 */ 2268 static int nfs41_check_open_stateid(struct nfs4_state *state) 2269 { 2270 struct nfs_server *server = NFS_SERVER(state->inode); 2271 nfs4_stateid *stateid = &state->open_stateid; 2272 struct rpc_cred *cred = state->owner->so_cred; 2273 int status; 2274 2275 /* If a state reset has been done, test_stateid is unneeded */ 2276 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) && 2277 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) && 2278 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0)) 2279 return -NFS4ERR_BAD_STATEID; 2280 2281 status = nfs41_test_stateid(server, stateid, cred); 2282 trace_nfs4_test_open_stateid(state, NULL, status); 2283 if (status != NFS_OK) { 2284 /* Free the stateid unless the server explicitly 2285 * informs us the stateid is unrecognized. */ 2286 if (status != -NFS4ERR_BAD_STATEID) 2287 nfs41_free_stateid(server, stateid, cred); 2288 2289 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2290 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2291 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2292 clear_bit(NFS_OPEN_STATE, &state->flags); 2293 } 2294 return status; 2295 } 2296 2297 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 2298 { 2299 int status; 2300 2301 nfs41_check_delegation_stateid(state); 2302 status = nfs41_check_open_stateid(state); 2303 if (status != NFS_OK) 2304 status = nfs4_open_expired(sp, state); 2305 return status; 2306 } 2307 #endif 2308 2309 /* 2310 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 2311 * fields corresponding to attributes that were used to store the verifier. 2312 * Make sure we clobber those fields in the later setattr call 2313 */ 2314 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, 2315 struct iattr *sattr, struct nfs4_label **label) 2316 { 2317 const u32 *attrset = opendata->o_res.attrset; 2318 2319 if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 2320 !(sattr->ia_valid & ATTR_ATIME_SET)) 2321 sattr->ia_valid |= ATTR_ATIME; 2322 2323 if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 2324 !(sattr->ia_valid & ATTR_MTIME_SET)) 2325 sattr->ia_valid |= ATTR_MTIME; 2326 2327 /* Except MODE, it seems harmless of setting twice. */ 2328 if ((attrset[1] & FATTR4_WORD1_MODE)) 2329 sattr->ia_valid &= ~ATTR_MODE; 2330 2331 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL) 2332 *label = NULL; 2333 } 2334 2335 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 2336 fmode_t fmode, 2337 int flags, 2338 struct nfs_open_context *ctx) 2339 { 2340 struct nfs4_state_owner *sp = opendata->owner; 2341 struct nfs_server *server = sp->so_server; 2342 struct dentry *dentry; 2343 struct nfs4_state *state; 2344 unsigned int seq; 2345 int ret; 2346 2347 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 2348 2349 ret = _nfs4_proc_open(opendata); 2350 if (ret != 0) 2351 goto out; 2352 2353 state = nfs4_opendata_to_nfs4_state(opendata); 2354 ret = PTR_ERR(state); 2355 if (IS_ERR(state)) 2356 goto out; 2357 if (server->caps & NFS_CAP_POSIX_LOCK) 2358 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 2359 2360 dentry = opendata->dentry; 2361 if (d_really_is_negative(dentry)) { 2362 /* FIXME: Is this d_drop() ever needed? */ 2363 d_drop(dentry); 2364 dentry = d_add_unique(dentry, igrab(state->inode)); 2365 if (dentry == NULL) { 2366 dentry = opendata->dentry; 2367 } else if (dentry != ctx->dentry) { 2368 dput(ctx->dentry); 2369 ctx->dentry = dget(dentry); 2370 } 2371 nfs_set_verifier(dentry, 2372 nfs_save_change_attribute(d_inode(opendata->dir))); 2373 } 2374 2375 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags); 2376 if (ret != 0) 2377 goto out; 2378 2379 ctx->state = state; 2380 if (d_inode(dentry) == state->inode) { 2381 nfs_inode_attach_open_context(ctx); 2382 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) 2383 nfs4_schedule_stateid_recovery(server, state); 2384 } 2385 out: 2386 return ret; 2387 } 2388 2389 /* 2390 * Returns a referenced nfs4_state 2391 */ 2392 static int _nfs4_do_open(struct inode *dir, 2393 struct nfs_open_context *ctx, 2394 int flags, 2395 struct iattr *sattr, 2396 struct nfs4_label *label, 2397 int *opened) 2398 { 2399 struct nfs4_state_owner *sp; 2400 struct nfs4_state *state = NULL; 2401 struct nfs_server *server = NFS_SERVER(dir); 2402 struct nfs4_opendata *opendata; 2403 struct dentry *dentry = ctx->dentry; 2404 struct rpc_cred *cred = ctx->cred; 2405 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold; 2406 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC); 2407 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 2408 struct nfs4_label *olabel = NULL; 2409 int status; 2410 2411 /* Protect against reboot recovery conflicts */ 2412 status = -ENOMEM; 2413 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 2414 if (sp == NULL) { 2415 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 2416 goto out_err; 2417 } 2418 status = nfs4_recover_expired_lease(server); 2419 if (status != 0) 2420 goto err_put_state_owner; 2421 if (d_really_is_positive(dentry)) 2422 nfs4_return_incompatible_delegation(d_inode(dentry), fmode); 2423 status = -ENOMEM; 2424 if (d_really_is_positive(dentry)) 2425 claim = NFS4_OPEN_CLAIM_FH; 2426 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, 2427 label, claim, GFP_KERNEL); 2428 if (opendata == NULL) 2429 goto err_put_state_owner; 2430 2431 if (label) { 2432 olabel = nfs4_label_alloc(server, GFP_KERNEL); 2433 if (IS_ERR(olabel)) { 2434 status = PTR_ERR(olabel); 2435 goto err_opendata_put; 2436 } 2437 } 2438 2439 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 2440 if (!opendata->f_attr.mdsthreshold) { 2441 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 2442 if (!opendata->f_attr.mdsthreshold) 2443 goto err_free_label; 2444 } 2445 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 2446 } 2447 if (d_really_is_positive(dentry)) 2448 opendata->state = nfs4_get_open_state(d_inode(dentry), sp); 2449 2450 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx); 2451 if (status != 0) 2452 goto err_free_label; 2453 state = ctx->state; 2454 2455 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && 2456 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 2457 nfs4_exclusive_attrset(opendata, sattr, &label); 2458 2459 nfs_fattr_init(opendata->o_res.f_attr); 2460 status = nfs4_do_setattr(state->inode, cred, 2461 opendata->o_res.f_attr, sattr, 2462 state, label, olabel); 2463 if (status == 0) { 2464 nfs_setattr_update_inode(state->inode, sattr, 2465 opendata->o_res.f_attr); 2466 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); 2467 } 2468 } 2469 if (opened && opendata->file_created) 2470 *opened |= FILE_CREATED; 2471 2472 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) { 2473 *ctx_th = opendata->f_attr.mdsthreshold; 2474 opendata->f_attr.mdsthreshold = NULL; 2475 } 2476 2477 nfs4_label_free(olabel); 2478 2479 nfs4_opendata_put(opendata); 2480 nfs4_put_state_owner(sp); 2481 return 0; 2482 err_free_label: 2483 nfs4_label_free(olabel); 2484 err_opendata_put: 2485 nfs4_opendata_put(opendata); 2486 err_put_state_owner: 2487 nfs4_put_state_owner(sp); 2488 out_err: 2489 return status; 2490 } 2491 2492 2493 static struct nfs4_state *nfs4_do_open(struct inode *dir, 2494 struct nfs_open_context *ctx, 2495 int flags, 2496 struct iattr *sattr, 2497 struct nfs4_label *label, 2498 int *opened) 2499 { 2500 struct nfs_server *server = NFS_SERVER(dir); 2501 struct nfs4_exception exception = { }; 2502 struct nfs4_state *res; 2503 int status; 2504 2505 do { 2506 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened); 2507 res = ctx->state; 2508 trace_nfs4_open_file(ctx, flags, status); 2509 if (status == 0) 2510 break; 2511 /* NOTE: BAD_SEQID means the server and client disagree about the 2512 * book-keeping w.r.t. state-changing operations 2513 * (OPEN/CLOSE/LOCK/LOCKU...) 2514 * It is actually a sign of a bug on the client or on the server. 2515 * 2516 * If we receive a BAD_SEQID error in the particular case of 2517 * doing an OPEN, we assume that nfs_increment_open_seqid() will 2518 * have unhashed the old state_owner for us, and that we can 2519 * therefore safely retry using a new one. We should still warn 2520 * the user though... 2521 */ 2522 if (status == -NFS4ERR_BAD_SEQID) { 2523 pr_warn_ratelimited("NFS: v4 server %s " 2524 " returned a bad sequence-id error!\n", 2525 NFS_SERVER(dir)->nfs_client->cl_hostname); 2526 exception.retry = 1; 2527 continue; 2528 } 2529 /* 2530 * BAD_STATEID on OPEN means that the server cancelled our 2531 * state before it received the OPEN_CONFIRM. 2532 * Recover by retrying the request as per the discussion 2533 * on Page 181 of RFC3530. 2534 */ 2535 if (status == -NFS4ERR_BAD_STATEID) { 2536 exception.retry = 1; 2537 continue; 2538 } 2539 if (status == -EAGAIN) { 2540 /* We must have found a delegation */ 2541 exception.retry = 1; 2542 continue; 2543 } 2544 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 2545 continue; 2546 res = ERR_PTR(nfs4_handle_exception(server, 2547 status, &exception)); 2548 } while (exception.retry); 2549 return res; 2550 } 2551 2552 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2553 struct nfs_fattr *fattr, struct iattr *sattr, 2554 struct nfs4_state *state, struct nfs4_label *ilabel, 2555 struct nfs4_label *olabel) 2556 { 2557 struct nfs_server *server = NFS_SERVER(inode); 2558 struct nfs_setattrargs arg = { 2559 .fh = NFS_FH(inode), 2560 .iap = sattr, 2561 .server = server, 2562 .bitmask = server->attr_bitmask, 2563 .label = ilabel, 2564 }; 2565 struct nfs_setattrres res = { 2566 .fattr = fattr, 2567 .label = olabel, 2568 .server = server, 2569 }; 2570 struct rpc_message msg = { 2571 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 2572 .rpc_argp = &arg, 2573 .rpc_resp = &res, 2574 .rpc_cred = cred, 2575 }; 2576 unsigned long timestamp = jiffies; 2577 fmode_t fmode; 2578 bool truncate; 2579 int status; 2580 2581 arg.bitmask = nfs4_bitmask(server, ilabel); 2582 if (ilabel) 2583 arg.bitmask = nfs4_bitmask(server, olabel); 2584 2585 nfs_fattr_init(fattr); 2586 2587 /* Servers should only apply open mode checks for file size changes */ 2588 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false; 2589 fmode = truncate ? FMODE_WRITE : FMODE_READ; 2590 2591 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) { 2592 /* Use that stateid */ 2593 } else if (truncate && state != NULL) { 2594 struct nfs_lockowner lockowner = { 2595 .l_owner = current->files, 2596 .l_pid = current->tgid, 2597 }; 2598 if (!nfs4_valid_open_stateid(state)) 2599 return -EBADF; 2600 if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, 2601 &lockowner) == -EIO) 2602 return -EBADF; 2603 } else 2604 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 2605 2606 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 2607 if (status == 0 && state != NULL) 2608 renew_lease(server, timestamp); 2609 return status; 2610 } 2611 2612 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2613 struct nfs_fattr *fattr, struct iattr *sattr, 2614 struct nfs4_state *state, struct nfs4_label *ilabel, 2615 struct nfs4_label *olabel) 2616 { 2617 struct nfs_server *server = NFS_SERVER(inode); 2618 struct nfs4_exception exception = { 2619 .state = state, 2620 .inode = inode, 2621 }; 2622 int err; 2623 do { 2624 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state, ilabel, olabel); 2625 trace_nfs4_setattr(inode, err); 2626 switch (err) { 2627 case -NFS4ERR_OPENMODE: 2628 if (!(sattr->ia_valid & ATTR_SIZE)) { 2629 pr_warn_once("NFSv4: server %s is incorrectly " 2630 "applying open mode checks to " 2631 "a SETATTR that is not " 2632 "changing file size.\n", 2633 server->nfs_client->cl_hostname); 2634 } 2635 if (state && !(state->state & FMODE_WRITE)) { 2636 err = -EBADF; 2637 if (sattr->ia_valid & ATTR_OPEN) 2638 err = -EACCES; 2639 goto out; 2640 } 2641 } 2642 err = nfs4_handle_exception(server, err, &exception); 2643 } while (exception.retry); 2644 out: 2645 return err; 2646 } 2647 2648 struct nfs4_closedata { 2649 struct inode *inode; 2650 struct nfs4_state *state; 2651 struct nfs_closeargs arg; 2652 struct nfs_closeres res; 2653 struct nfs_fattr fattr; 2654 unsigned long timestamp; 2655 bool roc; 2656 u32 roc_barrier; 2657 }; 2658 2659 static void nfs4_free_closedata(void *data) 2660 { 2661 struct nfs4_closedata *calldata = data; 2662 struct nfs4_state_owner *sp = calldata->state->owner; 2663 struct super_block *sb = calldata->state->inode->i_sb; 2664 2665 if (calldata->roc) 2666 pnfs_roc_release(calldata->state->inode); 2667 nfs4_put_open_state(calldata->state); 2668 nfs_free_seqid(calldata->arg.seqid); 2669 nfs4_put_state_owner(sp); 2670 nfs_sb_deactive(sb); 2671 kfree(calldata); 2672 } 2673 2674 static void nfs4_close_done(struct rpc_task *task, void *data) 2675 { 2676 struct nfs4_closedata *calldata = data; 2677 struct nfs4_state *state = calldata->state; 2678 struct nfs_server *server = NFS_SERVER(calldata->inode); 2679 nfs4_stateid *res_stateid = NULL; 2680 2681 dprintk("%s: begin!\n", __func__); 2682 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 2683 return; 2684 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status); 2685 /* hmm. we are done with the inode, and in the process of freeing 2686 * the state_owner. we keep this around to process errors 2687 */ 2688 switch (task->tk_status) { 2689 case 0: 2690 res_stateid = &calldata->res.stateid; 2691 if (calldata->roc) 2692 pnfs_roc_set_barrier(state->inode, 2693 calldata->roc_barrier); 2694 renew_lease(server, calldata->timestamp); 2695 break; 2696 case -NFS4ERR_ADMIN_REVOKED: 2697 case -NFS4ERR_STALE_STATEID: 2698 case -NFS4ERR_OLD_STATEID: 2699 case -NFS4ERR_BAD_STATEID: 2700 case -NFS4ERR_EXPIRED: 2701 if (!nfs4_stateid_match(&calldata->arg.stateid, 2702 &state->open_stateid)) { 2703 rpc_restart_call_prepare(task); 2704 goto out_release; 2705 } 2706 if (calldata->arg.fmode == 0) 2707 break; 2708 default: 2709 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) { 2710 rpc_restart_call_prepare(task); 2711 goto out_release; 2712 } 2713 } 2714 nfs_clear_open_stateid(state, &calldata->arg.stateid, 2715 res_stateid, calldata->arg.fmode); 2716 out_release: 2717 nfs_release_seqid(calldata->arg.seqid); 2718 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2719 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2720 } 2721 2722 static void nfs4_close_prepare(struct rpc_task *task, void *data) 2723 { 2724 struct nfs4_closedata *calldata = data; 2725 struct nfs4_state *state = calldata->state; 2726 struct inode *inode = calldata->inode; 2727 bool is_rdonly, is_wronly, is_rdwr; 2728 int call_close = 0; 2729 2730 dprintk("%s: begin!\n", __func__); 2731 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 2732 goto out_wait; 2733 2734 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 2735 spin_lock(&state->owner->so_lock); 2736 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); 2737 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); 2738 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); 2739 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid); 2740 /* Calculate the change in open mode */ 2741 calldata->arg.fmode = 0; 2742 if (state->n_rdwr == 0) { 2743 if (state->n_rdonly == 0) 2744 call_close |= is_rdonly; 2745 else if (is_rdonly) 2746 calldata->arg.fmode |= FMODE_READ; 2747 if (state->n_wronly == 0) 2748 call_close |= is_wronly; 2749 else if (is_wronly) 2750 calldata->arg.fmode |= FMODE_WRITE; 2751 } else if (is_rdwr) 2752 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; 2753 2754 if (calldata->arg.fmode == 0) 2755 call_close |= is_rdwr; 2756 2757 if (!nfs4_valid_open_stateid(state)) 2758 call_close = 0; 2759 spin_unlock(&state->owner->so_lock); 2760 2761 if (!call_close) { 2762 /* Note: exit _without_ calling nfs4_close_done */ 2763 goto out_no_action; 2764 } 2765 2766 if (calldata->arg.fmode == 0) 2767 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2768 if (calldata->roc) 2769 pnfs_roc_get_barrier(inode, &calldata->roc_barrier); 2770 2771 calldata->arg.share_access = 2772 nfs4_map_atomic_open_share(NFS_SERVER(inode), 2773 calldata->arg.fmode, 0); 2774 2775 nfs_fattr_init(calldata->res.fattr); 2776 calldata->timestamp = jiffies; 2777 if (nfs4_setup_sequence(NFS_SERVER(inode), 2778 &calldata->arg.seq_args, 2779 &calldata->res.seq_res, 2780 task) != 0) 2781 nfs_release_seqid(calldata->arg.seqid); 2782 dprintk("%s: done!\n", __func__); 2783 return; 2784 out_no_action: 2785 task->tk_action = NULL; 2786 out_wait: 2787 nfs4_sequence_done(task, &calldata->res.seq_res); 2788 } 2789 2790 static const struct rpc_call_ops nfs4_close_ops = { 2791 .rpc_call_prepare = nfs4_close_prepare, 2792 .rpc_call_done = nfs4_close_done, 2793 .rpc_release = nfs4_free_closedata, 2794 }; 2795 2796 static bool nfs4_roc(struct inode *inode) 2797 { 2798 if (!nfs_have_layout(inode)) 2799 return false; 2800 return pnfs_roc(inode); 2801 } 2802 2803 /* 2804 * It is possible for data to be read/written from a mem-mapped file 2805 * after the sys_close call (which hits the vfs layer as a flush). 2806 * This means that we can't safely call nfsv4 close on a file until 2807 * the inode is cleared. This in turn means that we are not good 2808 * NFSv4 citizens - we do not indicate to the server to update the file's 2809 * share state even when we are done with one of the three share 2810 * stateid's in the inode. 2811 * 2812 * NOTE: Caller must be holding the sp->so_owner semaphore! 2813 */ 2814 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 2815 { 2816 struct nfs_server *server = NFS_SERVER(state->inode); 2817 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 2818 struct nfs4_closedata *calldata; 2819 struct nfs4_state_owner *sp = state->owner; 2820 struct rpc_task *task; 2821 struct rpc_message msg = { 2822 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 2823 .rpc_cred = state->owner->so_cred, 2824 }; 2825 struct rpc_task_setup task_setup_data = { 2826 .rpc_client = server->client, 2827 .rpc_message = &msg, 2828 .callback_ops = &nfs4_close_ops, 2829 .workqueue = nfsiod_workqueue, 2830 .flags = RPC_TASK_ASYNC, 2831 }; 2832 int status = -ENOMEM; 2833 2834 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP, 2835 &task_setup_data.rpc_client, &msg); 2836 2837 calldata = kzalloc(sizeof(*calldata), gfp_mask); 2838 if (calldata == NULL) 2839 goto out; 2840 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); 2841 calldata->inode = state->inode; 2842 calldata->state = state; 2843 calldata->arg.fh = NFS_FH(state->inode); 2844 /* Serialization for the sequence id */ 2845 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 2846 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); 2847 if (IS_ERR(calldata->arg.seqid)) 2848 goto out_free_calldata; 2849 calldata->arg.fmode = 0; 2850 calldata->arg.bitmask = server->cache_consistency_bitmask; 2851 calldata->res.fattr = &calldata->fattr; 2852 calldata->res.seqid = calldata->arg.seqid; 2853 calldata->res.server = server; 2854 calldata->roc = nfs4_roc(state->inode); 2855 nfs_sb_active(calldata->inode->i_sb); 2856 2857 msg.rpc_argp = &calldata->arg; 2858 msg.rpc_resp = &calldata->res; 2859 task_setup_data.callback_data = calldata; 2860 task = rpc_run_task(&task_setup_data); 2861 if (IS_ERR(task)) 2862 return PTR_ERR(task); 2863 status = 0; 2864 if (wait) 2865 status = rpc_wait_for_completion_task(task); 2866 rpc_put_task(task); 2867 return status; 2868 out_free_calldata: 2869 kfree(calldata); 2870 out: 2871 nfs4_put_open_state(state); 2872 nfs4_put_state_owner(sp); 2873 return status; 2874 } 2875 2876 static struct inode * 2877 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, 2878 int open_flags, struct iattr *attr, int *opened) 2879 { 2880 struct nfs4_state *state; 2881 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL; 2882 2883 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l); 2884 2885 /* Protect against concurrent sillydeletes */ 2886 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened); 2887 2888 nfs4_label_release_security(label); 2889 2890 if (IS_ERR(state)) 2891 return ERR_CAST(state); 2892 return state->inode; 2893 } 2894 2895 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 2896 { 2897 if (ctx->state == NULL) 2898 return; 2899 if (is_sync) 2900 nfs4_close_sync(ctx->state, ctx->mode); 2901 else 2902 nfs4_close_state(ctx->state, ctx->mode); 2903 } 2904 2905 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) 2906 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) 2907 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL) 2908 2909 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2910 { 2911 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion; 2912 struct nfs4_server_caps_arg args = { 2913 .fhandle = fhandle, 2914 .bitmask = bitmask, 2915 }; 2916 struct nfs4_server_caps_res res = {}; 2917 struct rpc_message msg = { 2918 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 2919 .rpc_argp = &args, 2920 .rpc_resp = &res, 2921 }; 2922 int status; 2923 2924 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | 2925 FATTR4_WORD0_FH_EXPIRE_TYPE | 2926 FATTR4_WORD0_LINK_SUPPORT | 2927 FATTR4_WORD0_SYMLINK_SUPPORT | 2928 FATTR4_WORD0_ACLSUPPORT; 2929 if (minorversion) 2930 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT; 2931 2932 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2933 if (status == 0) { 2934 /* Sanity check the server answers */ 2935 switch (minorversion) { 2936 case 0: 2937 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK; 2938 res.attr_bitmask[2] = 0; 2939 break; 2940 case 1: 2941 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK; 2942 break; 2943 case 2: 2944 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK; 2945 } 2946 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 2947 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| 2948 NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 2949 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| 2950 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| 2951 NFS_CAP_CTIME|NFS_CAP_MTIME| 2952 NFS_CAP_SECURITY_LABEL); 2953 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL && 2954 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 2955 server->caps |= NFS_CAP_ACLS; 2956 if (res.has_links != 0) 2957 server->caps |= NFS_CAP_HARDLINKS; 2958 if (res.has_symlinks != 0) 2959 server->caps |= NFS_CAP_SYMLINKS; 2960 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) 2961 server->caps |= NFS_CAP_FILEID; 2962 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) 2963 server->caps |= NFS_CAP_MODE; 2964 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) 2965 server->caps |= NFS_CAP_NLINK; 2966 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) 2967 server->caps |= NFS_CAP_OWNER; 2968 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) 2969 server->caps |= NFS_CAP_OWNER_GROUP; 2970 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) 2971 server->caps |= NFS_CAP_ATIME; 2972 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) 2973 server->caps |= NFS_CAP_CTIME; 2974 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) 2975 server->caps |= NFS_CAP_MTIME; 2976 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 2977 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL) 2978 server->caps |= NFS_CAP_SECURITY_LABEL; 2979 #endif 2980 memcpy(server->attr_bitmask_nl, res.attr_bitmask, 2981 sizeof(server->attr_bitmask)); 2982 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL; 2983 2984 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 2985 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 2986 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 2987 server->cache_consistency_bitmask[2] = 0; 2988 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, 2989 sizeof(server->exclcreat_bitmask)); 2990 server->acl_bitmask = res.acl_bitmask; 2991 server->fh_expire_type = res.fh_expire_type; 2992 } 2993 2994 return status; 2995 } 2996 2997 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2998 { 2999 struct nfs4_exception exception = { }; 3000 int err; 3001 do { 3002 err = nfs4_handle_exception(server, 3003 _nfs4_server_capabilities(server, fhandle), 3004 &exception); 3005 } while (exception.retry); 3006 return err; 3007 } 3008 3009 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 3010 struct nfs_fsinfo *info) 3011 { 3012 u32 bitmask[3]; 3013 struct nfs4_lookup_root_arg args = { 3014 .bitmask = bitmask, 3015 }; 3016 struct nfs4_lookup_res res = { 3017 .server = server, 3018 .fattr = info->fattr, 3019 .fh = fhandle, 3020 }; 3021 struct rpc_message msg = { 3022 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 3023 .rpc_argp = &args, 3024 .rpc_resp = &res, 3025 }; 3026 3027 bitmask[0] = nfs4_fattr_bitmap[0]; 3028 bitmask[1] = nfs4_fattr_bitmap[1]; 3029 /* 3030 * Process the label in the upcoming getfattr 3031 */ 3032 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL; 3033 3034 nfs_fattr_init(info->fattr); 3035 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3036 } 3037 3038 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 3039 struct nfs_fsinfo *info) 3040 { 3041 struct nfs4_exception exception = { }; 3042 int err; 3043 do { 3044 err = _nfs4_lookup_root(server, fhandle, info); 3045 trace_nfs4_lookup_root(server, fhandle, info->fattr, err); 3046 switch (err) { 3047 case 0: 3048 case -NFS4ERR_WRONGSEC: 3049 goto out; 3050 default: 3051 err = nfs4_handle_exception(server, err, &exception); 3052 } 3053 } while (exception.retry); 3054 out: 3055 return err; 3056 } 3057 3058 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3059 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 3060 { 3061 struct rpc_auth_create_args auth_args = { 3062 .pseudoflavor = flavor, 3063 }; 3064 struct rpc_auth *auth; 3065 int ret; 3066 3067 auth = rpcauth_create(&auth_args, server->client); 3068 if (IS_ERR(auth)) { 3069 ret = -EACCES; 3070 goto out; 3071 } 3072 ret = nfs4_lookup_root(server, fhandle, info); 3073 out: 3074 return ret; 3075 } 3076 3077 /* 3078 * Retry pseudoroot lookup with various security flavors. We do this when: 3079 * 3080 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 3081 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 3082 * 3083 * Returns zero on success, or a negative NFS4ERR value, or a 3084 * negative errno value. 3085 */ 3086 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 3087 struct nfs_fsinfo *info) 3088 { 3089 /* Per 3530bis 15.33.5 */ 3090 static const rpc_authflavor_t flav_array[] = { 3091 RPC_AUTH_GSS_KRB5P, 3092 RPC_AUTH_GSS_KRB5I, 3093 RPC_AUTH_GSS_KRB5, 3094 RPC_AUTH_UNIX, /* courtesy */ 3095 RPC_AUTH_NULL, 3096 }; 3097 int status = -EPERM; 3098 size_t i; 3099 3100 if (server->auth_info.flavor_len > 0) { 3101 /* try each flavor specified by user */ 3102 for (i = 0; i < server->auth_info.flavor_len; i++) { 3103 status = nfs4_lookup_root_sec(server, fhandle, info, 3104 server->auth_info.flavors[i]); 3105 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3106 continue; 3107 break; 3108 } 3109 } else { 3110 /* no flavors specified by user, try default list */ 3111 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 3112 status = nfs4_lookup_root_sec(server, fhandle, info, 3113 flav_array[i]); 3114 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 3115 continue; 3116 break; 3117 } 3118 } 3119 3120 /* 3121 * -EACCESS could mean that the user doesn't have correct permissions 3122 * to access the mount. It could also mean that we tried to mount 3123 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 3124 * existing mount programs don't handle -EACCES very well so it should 3125 * be mapped to -EPERM instead. 3126 */ 3127 if (status == -EACCES) 3128 status = -EPERM; 3129 return status; 3130 } 3131 3132 static int nfs4_do_find_root_sec(struct nfs_server *server, 3133 struct nfs_fh *fhandle, struct nfs_fsinfo *info) 3134 { 3135 int mv = server->nfs_client->cl_minorversion; 3136 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info); 3137 } 3138 3139 /** 3140 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 3141 * @server: initialized nfs_server handle 3142 * @fhandle: we fill in the pseudo-fs root file handle 3143 * @info: we fill in an FSINFO struct 3144 * @auth_probe: probe the auth flavours 3145 * 3146 * Returns zero on success, or a negative errno. 3147 */ 3148 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 3149 struct nfs_fsinfo *info, 3150 bool auth_probe) 3151 { 3152 int status = 0; 3153 3154 if (!auth_probe) 3155 status = nfs4_lookup_root(server, fhandle, info); 3156 3157 if (auth_probe || status == NFS4ERR_WRONGSEC) 3158 status = nfs4_do_find_root_sec(server, fhandle, info); 3159 3160 if (status == 0) 3161 status = nfs4_server_capabilities(server, fhandle); 3162 if (status == 0) 3163 status = nfs4_do_fsinfo(server, fhandle, info); 3164 3165 return nfs4_map_errors(status); 3166 } 3167 3168 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 3169 struct nfs_fsinfo *info) 3170 { 3171 int error; 3172 struct nfs_fattr *fattr = info->fattr; 3173 struct nfs4_label *label = NULL; 3174 3175 error = nfs4_server_capabilities(server, mntfh); 3176 if (error < 0) { 3177 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 3178 return error; 3179 } 3180 3181 label = nfs4_label_alloc(server, GFP_KERNEL); 3182 if (IS_ERR(label)) 3183 return PTR_ERR(label); 3184 3185 error = nfs4_proc_getattr(server, mntfh, fattr, label); 3186 if (error < 0) { 3187 dprintk("nfs4_get_root: getattr error = %d\n", -error); 3188 goto err_free_label; 3189 } 3190 3191 if (fattr->valid & NFS_ATTR_FATTR_FSID && 3192 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 3193 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 3194 3195 err_free_label: 3196 nfs4_label_free(label); 3197 3198 return error; 3199 } 3200 3201 /* 3202 * Get locations and (maybe) other attributes of a referral. 3203 * Note that we'll actually follow the referral later when 3204 * we detect fsid mismatch in inode revalidation 3205 */ 3206 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 3207 const struct qstr *name, struct nfs_fattr *fattr, 3208 struct nfs_fh *fhandle) 3209 { 3210 int status = -ENOMEM; 3211 struct page *page = NULL; 3212 struct nfs4_fs_locations *locations = NULL; 3213 3214 page = alloc_page(GFP_KERNEL); 3215 if (page == NULL) 3216 goto out; 3217 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 3218 if (locations == NULL) 3219 goto out; 3220 3221 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 3222 if (status != 0) 3223 goto out; 3224 3225 /* 3226 * If the fsid didn't change, this is a migration event, not a 3227 * referral. Cause us to drop into the exception handler, which 3228 * will kick off migration recovery. 3229 */ 3230 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 3231 dprintk("%s: server did not return a different fsid for" 3232 " a referral at %s\n", __func__, name->name); 3233 status = -NFS4ERR_MOVED; 3234 goto out; 3235 } 3236 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 3237 nfs_fixup_referral_attributes(&locations->fattr); 3238 3239 /* replace the lookup nfs_fattr with the locations nfs_fattr */ 3240 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 3241 memset(fhandle, 0, sizeof(struct nfs_fh)); 3242 out: 3243 if (page) 3244 __free_page(page); 3245 kfree(locations); 3246 return status; 3247 } 3248 3249 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3250 struct nfs_fattr *fattr, struct nfs4_label *label) 3251 { 3252 struct nfs4_getattr_arg args = { 3253 .fh = fhandle, 3254 .bitmask = server->attr_bitmask, 3255 }; 3256 struct nfs4_getattr_res res = { 3257 .fattr = fattr, 3258 .label = label, 3259 .server = server, 3260 }; 3261 struct rpc_message msg = { 3262 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 3263 .rpc_argp = &args, 3264 .rpc_resp = &res, 3265 }; 3266 3267 args.bitmask = nfs4_bitmask(server, label); 3268 3269 nfs_fattr_init(fattr); 3270 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3271 } 3272 3273 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, 3274 struct nfs_fattr *fattr, struct nfs4_label *label) 3275 { 3276 struct nfs4_exception exception = { }; 3277 int err; 3278 do { 3279 err = _nfs4_proc_getattr(server, fhandle, fattr, label); 3280 trace_nfs4_getattr(server, fhandle, fattr, err); 3281 err = nfs4_handle_exception(server, err, 3282 &exception); 3283 } while (exception.retry); 3284 return err; 3285 } 3286 3287 /* 3288 * The file is not closed if it is opened due to the a request to change 3289 * the size of the file. The open call will not be needed once the 3290 * VFS layer lookup-intents are implemented. 3291 * 3292 * Close is called when the inode is destroyed. 3293 * If we haven't opened the file for O_WRONLY, we 3294 * need to in the size_change case to obtain a stateid. 3295 * 3296 * Got race? 3297 * Because OPEN is always done by name in nfsv4, it is 3298 * possible that we opened a different file by the same 3299 * name. We can recognize this race condition, but we 3300 * can't do anything about it besides returning an error. 3301 * 3302 * This will be fixed with VFS changes (lookup-intent). 3303 */ 3304 static int 3305 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 3306 struct iattr *sattr) 3307 { 3308 struct inode *inode = d_inode(dentry); 3309 struct rpc_cred *cred = NULL; 3310 struct nfs4_state *state = NULL; 3311 struct nfs4_label *label = NULL; 3312 int status; 3313 3314 if (pnfs_ld_layoutret_on_setattr(inode) && 3315 sattr->ia_valid & ATTR_SIZE && 3316 sattr->ia_size < i_size_read(inode)) 3317 pnfs_commit_and_return_layout(inode); 3318 3319 nfs_fattr_init(fattr); 3320 3321 /* Deal with open(O_TRUNC) */ 3322 if (sattr->ia_valid & ATTR_OPEN) 3323 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME); 3324 3325 /* Optimization: if the end result is no change, don't RPC */ 3326 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0) 3327 return 0; 3328 3329 /* Search for an existing open(O_WRITE) file */ 3330 if (sattr->ia_valid & ATTR_FILE) { 3331 struct nfs_open_context *ctx; 3332 3333 ctx = nfs_file_open_context(sattr->ia_file); 3334 if (ctx) { 3335 cred = ctx->cred; 3336 state = ctx->state; 3337 } 3338 } 3339 3340 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 3341 if (IS_ERR(label)) 3342 return PTR_ERR(label); 3343 3344 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label); 3345 if (status == 0) { 3346 nfs_setattr_update_inode(inode, sattr, fattr); 3347 nfs_setsecurity(inode, fattr, label); 3348 } 3349 nfs4_label_free(label); 3350 return status; 3351 } 3352 3353 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 3354 const struct qstr *name, struct nfs_fh *fhandle, 3355 struct nfs_fattr *fattr, struct nfs4_label *label) 3356 { 3357 struct nfs_server *server = NFS_SERVER(dir); 3358 int status; 3359 struct nfs4_lookup_arg args = { 3360 .bitmask = server->attr_bitmask, 3361 .dir_fh = NFS_FH(dir), 3362 .name = name, 3363 }; 3364 struct nfs4_lookup_res res = { 3365 .server = server, 3366 .fattr = fattr, 3367 .label = label, 3368 .fh = fhandle, 3369 }; 3370 struct rpc_message msg = { 3371 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 3372 .rpc_argp = &args, 3373 .rpc_resp = &res, 3374 }; 3375 3376 args.bitmask = nfs4_bitmask(server, label); 3377 3378 nfs_fattr_init(fattr); 3379 3380 dprintk("NFS call lookup %s\n", name->name); 3381 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); 3382 dprintk("NFS reply lookup: %d\n", status); 3383 return status; 3384 } 3385 3386 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 3387 { 3388 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 3389 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 3390 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 3391 fattr->nlink = 2; 3392 } 3393 3394 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 3395 struct qstr *name, struct nfs_fh *fhandle, 3396 struct nfs_fattr *fattr, struct nfs4_label *label) 3397 { 3398 struct nfs4_exception exception = { }; 3399 struct rpc_clnt *client = *clnt; 3400 int err; 3401 do { 3402 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label); 3403 trace_nfs4_lookup(dir, name, err); 3404 switch (err) { 3405 case -NFS4ERR_BADNAME: 3406 err = -ENOENT; 3407 goto out; 3408 case -NFS4ERR_MOVED: 3409 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 3410 if (err == -NFS4ERR_MOVED) 3411 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 3412 goto out; 3413 case -NFS4ERR_WRONGSEC: 3414 err = -EPERM; 3415 if (client != *clnt) 3416 goto out; 3417 client = nfs4_negotiate_security(client, dir, name); 3418 if (IS_ERR(client)) 3419 return PTR_ERR(client); 3420 3421 exception.retry = 1; 3422 break; 3423 default: 3424 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 3425 } 3426 } while (exception.retry); 3427 3428 out: 3429 if (err == 0) 3430 *clnt = client; 3431 else if (client != *clnt) 3432 rpc_shutdown_client(client); 3433 3434 return err; 3435 } 3436 3437 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, 3438 struct nfs_fh *fhandle, struct nfs_fattr *fattr, 3439 struct nfs4_label *label) 3440 { 3441 int status; 3442 struct rpc_clnt *client = NFS_CLIENT(dir); 3443 3444 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label); 3445 if (client != NFS_CLIENT(dir)) { 3446 rpc_shutdown_client(client); 3447 nfs_fixup_secinfo_attributes(fattr); 3448 } 3449 return status; 3450 } 3451 3452 struct rpc_clnt * 3453 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, 3454 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 3455 { 3456 struct rpc_clnt *client = NFS_CLIENT(dir); 3457 int status; 3458 3459 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL); 3460 if (status < 0) 3461 return ERR_PTR(status); 3462 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; 3463 } 3464 3465 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3466 { 3467 struct nfs_server *server = NFS_SERVER(inode); 3468 struct nfs4_accessargs args = { 3469 .fh = NFS_FH(inode), 3470 .bitmask = server->cache_consistency_bitmask, 3471 }; 3472 struct nfs4_accessres res = { 3473 .server = server, 3474 }; 3475 struct rpc_message msg = { 3476 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 3477 .rpc_argp = &args, 3478 .rpc_resp = &res, 3479 .rpc_cred = entry->cred, 3480 }; 3481 int mode = entry->mask; 3482 int status = 0; 3483 3484 /* 3485 * Determine which access bits we want to ask for... 3486 */ 3487 if (mode & MAY_READ) 3488 args.access |= NFS4_ACCESS_READ; 3489 if (S_ISDIR(inode->i_mode)) { 3490 if (mode & MAY_WRITE) 3491 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; 3492 if (mode & MAY_EXEC) 3493 args.access |= NFS4_ACCESS_LOOKUP; 3494 } else { 3495 if (mode & MAY_WRITE) 3496 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; 3497 if (mode & MAY_EXEC) 3498 args.access |= NFS4_ACCESS_EXECUTE; 3499 } 3500 3501 res.fattr = nfs_alloc_fattr(); 3502 if (res.fattr == NULL) 3503 return -ENOMEM; 3504 3505 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3506 if (!status) { 3507 nfs_access_set_mask(entry, res.access); 3508 nfs_refresh_inode(inode, res.fattr); 3509 } 3510 nfs_free_fattr(res.fattr); 3511 return status; 3512 } 3513 3514 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 3515 { 3516 struct nfs4_exception exception = { }; 3517 int err; 3518 do { 3519 err = _nfs4_proc_access(inode, entry); 3520 trace_nfs4_access(inode, err); 3521 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3522 &exception); 3523 } while (exception.retry); 3524 return err; 3525 } 3526 3527 /* 3528 * TODO: For the time being, we don't try to get any attributes 3529 * along with any of the zero-copy operations READ, READDIR, 3530 * READLINK, WRITE. 3531 * 3532 * In the case of the first three, we want to put the GETATTR 3533 * after the read-type operation -- this is because it is hard 3534 * to predict the length of a GETATTR response in v4, and thus 3535 * align the READ data correctly. This means that the GETATTR 3536 * may end up partially falling into the page cache, and we should 3537 * shift it into the 'tail' of the xdr_buf before processing. 3538 * To do this efficiently, we need to know the total length 3539 * of data received, which doesn't seem to be available outside 3540 * of the RPC layer. 3541 * 3542 * In the case of WRITE, we also want to put the GETATTR after 3543 * the operation -- in this case because we want to make sure 3544 * we get the post-operation mtime and size. 3545 * 3546 * Both of these changes to the XDR layer would in fact be quite 3547 * minor, but I decided to leave them for a subsequent patch. 3548 */ 3549 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 3550 unsigned int pgbase, unsigned int pglen) 3551 { 3552 struct nfs4_readlink args = { 3553 .fh = NFS_FH(inode), 3554 .pgbase = pgbase, 3555 .pglen = pglen, 3556 .pages = &page, 3557 }; 3558 struct nfs4_readlink_res res; 3559 struct rpc_message msg = { 3560 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 3561 .rpc_argp = &args, 3562 .rpc_resp = &res, 3563 }; 3564 3565 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 3566 } 3567 3568 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 3569 unsigned int pgbase, unsigned int pglen) 3570 { 3571 struct nfs4_exception exception = { }; 3572 int err; 3573 do { 3574 err = _nfs4_proc_readlink(inode, page, pgbase, pglen); 3575 trace_nfs4_readlink(inode, err); 3576 err = nfs4_handle_exception(NFS_SERVER(inode), err, 3577 &exception); 3578 } while (exception.retry); 3579 return err; 3580 } 3581 3582 /* 3583 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 3584 */ 3585 static int 3586 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 3587 int flags) 3588 { 3589 struct nfs4_label l, *ilabel = NULL; 3590 struct nfs_open_context *ctx; 3591 struct nfs4_state *state; 3592 int status = 0; 3593 3594 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 3595 if (IS_ERR(ctx)) 3596 return PTR_ERR(ctx); 3597 3598 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); 3599 3600 sattr->ia_mode &= ~current_umask(); 3601 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); 3602 if (IS_ERR(state)) { 3603 status = PTR_ERR(state); 3604 goto out; 3605 } 3606 out: 3607 nfs4_label_release_security(ilabel); 3608 put_nfs_open_context(ctx); 3609 return status; 3610 } 3611 3612 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) 3613 { 3614 struct nfs_server *server = NFS_SERVER(dir); 3615 struct nfs_removeargs args = { 3616 .fh = NFS_FH(dir), 3617 .name = *name, 3618 }; 3619 struct nfs_removeres res = { 3620 .server = server, 3621 }; 3622 struct rpc_message msg = { 3623 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 3624 .rpc_argp = &args, 3625 .rpc_resp = &res, 3626 }; 3627 int status; 3628 3629 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 3630 if (status == 0) 3631 update_changeattr(dir, &res.cinfo); 3632 return status; 3633 } 3634 3635 static int nfs4_proc_remove(struct inode *dir, struct qstr *name) 3636 { 3637 struct nfs4_exception exception = { }; 3638 int err; 3639 do { 3640 err = _nfs4_proc_remove(dir, name); 3641 trace_nfs4_remove(dir, name, err); 3642 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3643 &exception); 3644 } while (exception.retry); 3645 return err; 3646 } 3647 3648 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) 3649 { 3650 struct nfs_server *server = NFS_SERVER(dir); 3651 struct nfs_removeargs *args = msg->rpc_argp; 3652 struct nfs_removeres *res = msg->rpc_resp; 3653 3654 res->server = server; 3655 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 3656 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1); 3657 3658 nfs_fattr_init(res->dir_attr); 3659 } 3660 3661 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 3662 { 3663 nfs4_setup_sequence(NFS_SERVER(data->dir), 3664 &data->args.seq_args, 3665 &data->res.seq_res, 3666 task); 3667 } 3668 3669 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 3670 { 3671 struct nfs_unlinkdata *data = task->tk_calldata; 3672 struct nfs_removeres *res = &data->res; 3673 3674 if (!nfs4_sequence_done(task, &res->seq_res)) 3675 return 0; 3676 if (nfs4_async_handle_error(task, res->server, NULL, 3677 &data->timeout) == -EAGAIN) 3678 return 0; 3679 update_changeattr(dir, &res->cinfo); 3680 return 1; 3681 } 3682 3683 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) 3684 { 3685 struct nfs_server *server = NFS_SERVER(dir); 3686 struct nfs_renameargs *arg = msg->rpc_argp; 3687 struct nfs_renameres *res = msg->rpc_resp; 3688 3689 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 3690 res->server = server; 3691 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1); 3692 } 3693 3694 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 3695 { 3696 nfs4_setup_sequence(NFS_SERVER(data->old_dir), 3697 &data->args.seq_args, 3698 &data->res.seq_res, 3699 task); 3700 } 3701 3702 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 3703 struct inode *new_dir) 3704 { 3705 struct nfs_renamedata *data = task->tk_calldata; 3706 struct nfs_renameres *res = &data->res; 3707 3708 if (!nfs4_sequence_done(task, &res->seq_res)) 3709 return 0; 3710 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN) 3711 return 0; 3712 3713 update_changeattr(old_dir, &res->old_cinfo); 3714 update_changeattr(new_dir, &res->new_cinfo); 3715 return 1; 3716 } 3717 3718 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3719 { 3720 struct nfs_server *server = NFS_SERVER(inode); 3721 struct nfs4_link_arg arg = { 3722 .fh = NFS_FH(inode), 3723 .dir_fh = NFS_FH(dir), 3724 .name = name, 3725 .bitmask = server->attr_bitmask, 3726 }; 3727 struct nfs4_link_res res = { 3728 .server = server, 3729 .label = NULL, 3730 }; 3731 struct rpc_message msg = { 3732 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 3733 .rpc_argp = &arg, 3734 .rpc_resp = &res, 3735 }; 3736 int status = -ENOMEM; 3737 3738 res.fattr = nfs_alloc_fattr(); 3739 if (res.fattr == NULL) 3740 goto out; 3741 3742 res.label = nfs4_label_alloc(server, GFP_KERNEL); 3743 if (IS_ERR(res.label)) { 3744 status = PTR_ERR(res.label); 3745 goto out; 3746 } 3747 arg.bitmask = nfs4_bitmask(server, res.label); 3748 3749 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3750 if (!status) { 3751 update_changeattr(dir, &res.cinfo); 3752 status = nfs_post_op_update_inode(inode, res.fattr); 3753 if (!status) 3754 nfs_setsecurity(inode, res.fattr, res.label); 3755 } 3756 3757 3758 nfs4_label_free(res.label); 3759 3760 out: 3761 nfs_free_fattr(res.fattr); 3762 return status; 3763 } 3764 3765 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3766 { 3767 struct nfs4_exception exception = { }; 3768 int err; 3769 do { 3770 err = nfs4_handle_exception(NFS_SERVER(inode), 3771 _nfs4_proc_link(inode, dir, name), 3772 &exception); 3773 } while (exception.retry); 3774 return err; 3775 } 3776 3777 struct nfs4_createdata { 3778 struct rpc_message msg; 3779 struct nfs4_create_arg arg; 3780 struct nfs4_create_res res; 3781 struct nfs_fh fh; 3782 struct nfs_fattr fattr; 3783 struct nfs4_label *label; 3784 }; 3785 3786 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 3787 struct qstr *name, struct iattr *sattr, u32 ftype) 3788 { 3789 struct nfs4_createdata *data; 3790 3791 data = kzalloc(sizeof(*data), GFP_KERNEL); 3792 if (data != NULL) { 3793 struct nfs_server *server = NFS_SERVER(dir); 3794 3795 data->label = nfs4_label_alloc(server, GFP_KERNEL); 3796 if (IS_ERR(data->label)) 3797 goto out_free; 3798 3799 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 3800 data->msg.rpc_argp = &data->arg; 3801 data->msg.rpc_resp = &data->res; 3802 data->arg.dir_fh = NFS_FH(dir); 3803 data->arg.server = server; 3804 data->arg.name = name; 3805 data->arg.attrs = sattr; 3806 data->arg.ftype = ftype; 3807 data->arg.bitmask = nfs4_bitmask(server, data->label); 3808 data->res.server = server; 3809 data->res.fh = &data->fh; 3810 data->res.fattr = &data->fattr; 3811 data->res.label = data->label; 3812 nfs_fattr_init(data->res.fattr); 3813 } 3814 return data; 3815 out_free: 3816 kfree(data); 3817 return NULL; 3818 } 3819 3820 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 3821 { 3822 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 3823 &data->arg.seq_args, &data->res.seq_res, 1); 3824 if (status == 0) { 3825 update_changeattr(dir, &data->res.dir_cinfo); 3826 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label); 3827 } 3828 return status; 3829 } 3830 3831 static void nfs4_free_createdata(struct nfs4_createdata *data) 3832 { 3833 nfs4_label_free(data->label); 3834 kfree(data); 3835 } 3836 3837 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3838 struct page *page, unsigned int len, struct iattr *sattr, 3839 struct nfs4_label *label) 3840 { 3841 struct nfs4_createdata *data; 3842 int status = -ENAMETOOLONG; 3843 3844 if (len > NFS4_MAXPATHLEN) 3845 goto out; 3846 3847 status = -ENOMEM; 3848 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 3849 if (data == NULL) 3850 goto out; 3851 3852 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 3853 data->arg.u.symlink.pages = &page; 3854 data->arg.u.symlink.len = len; 3855 data->arg.label = label; 3856 3857 status = nfs4_do_create(dir, dentry, data); 3858 3859 nfs4_free_createdata(data); 3860 out: 3861 return status; 3862 } 3863 3864 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3865 struct page *page, unsigned int len, struct iattr *sattr) 3866 { 3867 struct nfs4_exception exception = { }; 3868 struct nfs4_label l, *label = NULL; 3869 int err; 3870 3871 label = nfs4_label_init_security(dir, dentry, sattr, &l); 3872 3873 do { 3874 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label); 3875 trace_nfs4_symlink(dir, &dentry->d_name, err); 3876 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3877 &exception); 3878 } while (exception.retry); 3879 3880 nfs4_label_release_security(label); 3881 return err; 3882 } 3883 3884 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3885 struct iattr *sattr, struct nfs4_label *label) 3886 { 3887 struct nfs4_createdata *data; 3888 int status = -ENOMEM; 3889 3890 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 3891 if (data == NULL) 3892 goto out; 3893 3894 data->arg.label = label; 3895 status = nfs4_do_create(dir, dentry, data); 3896 3897 nfs4_free_createdata(data); 3898 out: 3899 return status; 3900 } 3901 3902 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3903 struct iattr *sattr) 3904 { 3905 struct nfs4_exception exception = { }; 3906 struct nfs4_label l, *label = NULL; 3907 int err; 3908 3909 label = nfs4_label_init_security(dir, dentry, sattr, &l); 3910 3911 sattr->ia_mode &= ~current_umask(); 3912 do { 3913 err = _nfs4_proc_mkdir(dir, dentry, sattr, label); 3914 trace_nfs4_mkdir(dir, &dentry->d_name, err); 3915 err = nfs4_handle_exception(NFS_SERVER(dir), err, 3916 &exception); 3917 } while (exception.retry); 3918 nfs4_label_release_security(label); 3919 3920 return err; 3921 } 3922 3923 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3924 u64 cookie, struct page **pages, unsigned int count, int plus) 3925 { 3926 struct inode *dir = d_inode(dentry); 3927 struct nfs4_readdir_arg args = { 3928 .fh = NFS_FH(dir), 3929 .pages = pages, 3930 .pgbase = 0, 3931 .count = count, 3932 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask, 3933 .plus = plus, 3934 }; 3935 struct nfs4_readdir_res res; 3936 struct rpc_message msg = { 3937 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 3938 .rpc_argp = &args, 3939 .rpc_resp = &res, 3940 .rpc_cred = cred, 3941 }; 3942 int status; 3943 3944 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__, 3945 dentry, 3946 (unsigned long long)cookie); 3947 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); 3948 res.pgbase = args.pgbase; 3949 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 3950 if (status >= 0) { 3951 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); 3952 status += args.pgbase; 3953 } 3954 3955 nfs_invalidate_atime(dir); 3956 3957 dprintk("%s: returns %d\n", __func__, status); 3958 return status; 3959 } 3960 3961 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3962 u64 cookie, struct page **pages, unsigned int count, int plus) 3963 { 3964 struct nfs4_exception exception = { }; 3965 int err; 3966 do { 3967 err = _nfs4_proc_readdir(dentry, cred, cookie, 3968 pages, count, plus); 3969 trace_nfs4_readdir(d_inode(dentry), err); 3970 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err, 3971 &exception); 3972 } while (exception.retry); 3973 return err; 3974 } 3975 3976 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3977 struct iattr *sattr, struct nfs4_label *label, dev_t rdev) 3978 { 3979 struct nfs4_createdata *data; 3980 int mode = sattr->ia_mode; 3981 int status = -ENOMEM; 3982 3983 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 3984 if (data == NULL) 3985 goto out; 3986 3987 if (S_ISFIFO(mode)) 3988 data->arg.ftype = NF4FIFO; 3989 else if (S_ISBLK(mode)) { 3990 data->arg.ftype = NF4BLK; 3991 data->arg.u.device.specdata1 = MAJOR(rdev); 3992 data->arg.u.device.specdata2 = MINOR(rdev); 3993 } 3994 else if (S_ISCHR(mode)) { 3995 data->arg.ftype = NF4CHR; 3996 data->arg.u.device.specdata1 = MAJOR(rdev); 3997 data->arg.u.device.specdata2 = MINOR(rdev); 3998 } else if (!S_ISSOCK(mode)) { 3999 status = -EINVAL; 4000 goto out_free; 4001 } 4002 4003 data->arg.label = label; 4004 status = nfs4_do_create(dir, dentry, data); 4005 out_free: 4006 nfs4_free_createdata(data); 4007 out: 4008 return status; 4009 } 4010 4011 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 4012 struct iattr *sattr, dev_t rdev) 4013 { 4014 struct nfs4_exception exception = { }; 4015 struct nfs4_label l, *label = NULL; 4016 int err; 4017 4018 label = nfs4_label_init_security(dir, dentry, sattr, &l); 4019 4020 sattr->ia_mode &= ~current_umask(); 4021 do { 4022 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev); 4023 trace_nfs4_mknod(dir, &dentry->d_name, err); 4024 err = nfs4_handle_exception(NFS_SERVER(dir), err, 4025 &exception); 4026 } while (exception.retry); 4027 4028 nfs4_label_release_security(label); 4029 4030 return err; 4031 } 4032 4033 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 4034 struct nfs_fsstat *fsstat) 4035 { 4036 struct nfs4_statfs_arg args = { 4037 .fh = fhandle, 4038 .bitmask = server->attr_bitmask, 4039 }; 4040 struct nfs4_statfs_res res = { 4041 .fsstat = fsstat, 4042 }; 4043 struct rpc_message msg = { 4044 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 4045 .rpc_argp = &args, 4046 .rpc_resp = &res, 4047 }; 4048 4049 nfs_fattr_init(fsstat->fattr); 4050 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4051 } 4052 4053 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 4054 { 4055 struct nfs4_exception exception = { }; 4056 int err; 4057 do { 4058 err = nfs4_handle_exception(server, 4059 _nfs4_proc_statfs(server, fhandle, fsstat), 4060 &exception); 4061 } while (exception.retry); 4062 return err; 4063 } 4064 4065 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 4066 struct nfs_fsinfo *fsinfo) 4067 { 4068 struct nfs4_fsinfo_arg args = { 4069 .fh = fhandle, 4070 .bitmask = server->attr_bitmask, 4071 }; 4072 struct nfs4_fsinfo_res res = { 4073 .fsinfo = fsinfo, 4074 }; 4075 struct rpc_message msg = { 4076 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 4077 .rpc_argp = &args, 4078 .rpc_resp = &res, 4079 }; 4080 4081 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4082 } 4083 4084 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4085 { 4086 struct nfs4_exception exception = { }; 4087 unsigned long now = jiffies; 4088 int err; 4089 4090 do { 4091 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 4092 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err); 4093 if (err == 0) { 4094 struct nfs_client *clp = server->nfs_client; 4095 4096 spin_lock(&clp->cl_lock); 4097 clp->cl_lease_time = fsinfo->lease_time * HZ; 4098 clp->cl_last_renewal = now; 4099 spin_unlock(&clp->cl_lock); 4100 break; 4101 } 4102 err = nfs4_handle_exception(server, err, &exception); 4103 } while (exception.retry); 4104 return err; 4105 } 4106 4107 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 4108 { 4109 int error; 4110 4111 nfs_fattr_init(fsinfo->fattr); 4112 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 4113 if (error == 0) { 4114 /* block layout checks this! */ 4115 server->pnfs_blksize = fsinfo->blksize; 4116 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype); 4117 } 4118 4119 return error; 4120 } 4121 4122 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4123 struct nfs_pathconf *pathconf) 4124 { 4125 struct nfs4_pathconf_arg args = { 4126 .fh = fhandle, 4127 .bitmask = server->attr_bitmask, 4128 }; 4129 struct nfs4_pathconf_res res = { 4130 .pathconf = pathconf, 4131 }; 4132 struct rpc_message msg = { 4133 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 4134 .rpc_argp = &args, 4135 .rpc_resp = &res, 4136 }; 4137 4138 /* None of the pathconf attributes are mandatory to implement */ 4139 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 4140 memset(pathconf, 0, sizeof(*pathconf)); 4141 return 0; 4142 } 4143 4144 nfs_fattr_init(pathconf->fattr); 4145 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 4146 } 4147 4148 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 4149 struct nfs_pathconf *pathconf) 4150 { 4151 struct nfs4_exception exception = { }; 4152 int err; 4153 4154 do { 4155 err = nfs4_handle_exception(server, 4156 _nfs4_proc_pathconf(server, fhandle, pathconf), 4157 &exception); 4158 } while (exception.retry); 4159 return err; 4160 } 4161 4162 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 4163 const struct nfs_open_context *ctx, 4164 const struct nfs_lock_context *l_ctx, 4165 fmode_t fmode) 4166 { 4167 const struct nfs_lockowner *lockowner = NULL; 4168 4169 if (l_ctx != NULL) 4170 lockowner = &l_ctx->lockowner; 4171 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner); 4172 } 4173 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 4174 4175 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 4176 const struct nfs_open_context *ctx, 4177 const struct nfs_lock_context *l_ctx, 4178 fmode_t fmode) 4179 { 4180 nfs4_stateid current_stateid; 4181 4182 /* If the current stateid represents a lost lock, then exit */ 4183 if (nfs4_set_rw_stateid(¤t_stateid, ctx, l_ctx, fmode) == -EIO) 4184 return true; 4185 return nfs4_stateid_match(stateid, ¤t_stateid); 4186 } 4187 4188 static bool nfs4_error_stateid_expired(int err) 4189 { 4190 switch (err) { 4191 case -NFS4ERR_DELEG_REVOKED: 4192 case -NFS4ERR_ADMIN_REVOKED: 4193 case -NFS4ERR_BAD_STATEID: 4194 case -NFS4ERR_STALE_STATEID: 4195 case -NFS4ERR_OLD_STATEID: 4196 case -NFS4ERR_OPENMODE: 4197 case -NFS4ERR_EXPIRED: 4198 return true; 4199 } 4200 return false; 4201 } 4202 4203 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr) 4204 { 4205 nfs_invalidate_atime(hdr->inode); 4206 } 4207 4208 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) 4209 { 4210 struct nfs_server *server = NFS_SERVER(hdr->inode); 4211 4212 trace_nfs4_read(hdr, task->tk_status); 4213 if (nfs4_async_handle_error(task, server, 4214 hdr->args.context->state, 4215 NULL) == -EAGAIN) { 4216 rpc_restart_call_prepare(task); 4217 return -EAGAIN; 4218 } 4219 4220 __nfs4_read_done_cb(hdr); 4221 if (task->tk_status > 0) 4222 renew_lease(server, hdr->timestamp); 4223 return 0; 4224 } 4225 4226 static bool nfs4_read_stateid_changed(struct rpc_task *task, 4227 struct nfs_pgio_args *args) 4228 { 4229 4230 if (!nfs4_error_stateid_expired(task->tk_status) || 4231 nfs4_stateid_is_current(&args->stateid, 4232 args->context, 4233 args->lock_context, 4234 FMODE_READ)) 4235 return false; 4236 rpc_restart_call_prepare(task); 4237 return true; 4238 } 4239 4240 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4241 { 4242 4243 dprintk("--> %s\n", __func__); 4244 4245 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4246 return -EAGAIN; 4247 if (nfs4_read_stateid_changed(task, &hdr->args)) 4248 return -EAGAIN; 4249 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4250 nfs4_read_done_cb(task, hdr); 4251 } 4252 4253 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, 4254 struct rpc_message *msg) 4255 { 4256 hdr->timestamp = jiffies; 4257 hdr->pgio_done_cb = nfs4_read_done_cb; 4258 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 4259 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0); 4260 } 4261 4262 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task, 4263 struct nfs_pgio_header *hdr) 4264 { 4265 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode), 4266 &hdr->args.seq_args, 4267 &hdr->res.seq_res, 4268 task)) 4269 return 0; 4270 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, 4271 hdr->args.lock_context, 4272 hdr->rw_ops->rw_mode) == -EIO) 4273 return -EIO; 4274 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) 4275 return -EIO; 4276 return 0; 4277 } 4278 4279 static int nfs4_write_done_cb(struct rpc_task *task, 4280 struct nfs_pgio_header *hdr) 4281 { 4282 struct inode *inode = hdr->inode; 4283 4284 trace_nfs4_write(hdr, task->tk_status); 4285 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4286 hdr->args.context->state, 4287 NULL) == -EAGAIN) { 4288 rpc_restart_call_prepare(task); 4289 return -EAGAIN; 4290 } 4291 if (task->tk_status >= 0) { 4292 renew_lease(NFS_SERVER(inode), hdr->timestamp); 4293 nfs_writeback_update_inode(hdr); 4294 } 4295 return 0; 4296 } 4297 4298 static bool nfs4_write_stateid_changed(struct rpc_task *task, 4299 struct nfs_pgio_args *args) 4300 { 4301 4302 if (!nfs4_error_stateid_expired(task->tk_status) || 4303 nfs4_stateid_is_current(&args->stateid, 4304 args->context, 4305 args->lock_context, 4306 FMODE_WRITE)) 4307 return false; 4308 rpc_restart_call_prepare(task); 4309 return true; 4310 } 4311 4312 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) 4313 { 4314 if (!nfs4_sequence_done(task, &hdr->res.seq_res)) 4315 return -EAGAIN; 4316 if (nfs4_write_stateid_changed(task, &hdr->args)) 4317 return -EAGAIN; 4318 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) : 4319 nfs4_write_done_cb(task, hdr); 4320 } 4321 4322 static 4323 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr) 4324 { 4325 /* Don't request attributes for pNFS or O_DIRECT writes */ 4326 if (hdr->ds_clp != NULL || hdr->dreq != NULL) 4327 return false; 4328 /* Otherwise, request attributes if and only if we don't hold 4329 * a delegation 4330 */ 4331 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; 4332 } 4333 4334 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, 4335 struct rpc_message *msg) 4336 { 4337 struct nfs_server *server = NFS_SERVER(hdr->inode); 4338 4339 if (!nfs4_write_need_cache_consistency_data(hdr)) { 4340 hdr->args.bitmask = NULL; 4341 hdr->res.fattr = NULL; 4342 } else 4343 hdr->args.bitmask = server->cache_consistency_bitmask; 4344 4345 if (!hdr->pgio_done_cb) 4346 hdr->pgio_done_cb = nfs4_write_done_cb; 4347 hdr->res.server = server; 4348 hdr->timestamp = jiffies; 4349 4350 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 4351 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1); 4352 } 4353 4354 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 4355 { 4356 nfs4_setup_sequence(NFS_SERVER(data->inode), 4357 &data->args.seq_args, 4358 &data->res.seq_res, 4359 task); 4360 } 4361 4362 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 4363 { 4364 struct inode *inode = data->inode; 4365 4366 trace_nfs4_commit(data, task->tk_status); 4367 if (nfs4_async_handle_error(task, NFS_SERVER(inode), 4368 NULL, NULL) == -EAGAIN) { 4369 rpc_restart_call_prepare(task); 4370 return -EAGAIN; 4371 } 4372 return 0; 4373 } 4374 4375 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 4376 { 4377 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4378 return -EAGAIN; 4379 return data->commit_done_cb(task, data); 4380 } 4381 4382 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) 4383 { 4384 struct nfs_server *server = NFS_SERVER(data->inode); 4385 4386 if (data->commit_done_cb == NULL) 4387 data->commit_done_cb = nfs4_commit_done_cb; 4388 data->res.server = server; 4389 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 4390 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 4391 } 4392 4393 struct nfs4_renewdata { 4394 struct nfs_client *client; 4395 unsigned long timestamp; 4396 }; 4397 4398 /* 4399 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 4400 * standalone procedure for queueing an asynchronous RENEW. 4401 */ 4402 static void nfs4_renew_release(void *calldata) 4403 { 4404 struct nfs4_renewdata *data = calldata; 4405 struct nfs_client *clp = data->client; 4406 4407 if (atomic_read(&clp->cl_count) > 1) 4408 nfs4_schedule_state_renewal(clp); 4409 nfs_put_client(clp); 4410 kfree(data); 4411 } 4412 4413 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 4414 { 4415 struct nfs4_renewdata *data = calldata; 4416 struct nfs_client *clp = data->client; 4417 unsigned long timestamp = data->timestamp; 4418 4419 trace_nfs4_renew_async(clp, task->tk_status); 4420 switch (task->tk_status) { 4421 case 0: 4422 break; 4423 case -NFS4ERR_LEASE_MOVED: 4424 nfs4_schedule_lease_moved_recovery(clp); 4425 break; 4426 default: 4427 /* Unless we're shutting down, schedule state recovery! */ 4428 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 4429 return; 4430 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 4431 nfs4_schedule_lease_recovery(clp); 4432 return; 4433 } 4434 nfs4_schedule_path_down_recovery(clp); 4435 } 4436 do_renew_lease(clp, timestamp); 4437 } 4438 4439 static const struct rpc_call_ops nfs4_renew_ops = { 4440 .rpc_call_done = nfs4_renew_done, 4441 .rpc_release = nfs4_renew_release, 4442 }; 4443 4444 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 4445 { 4446 struct rpc_message msg = { 4447 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4448 .rpc_argp = clp, 4449 .rpc_cred = cred, 4450 }; 4451 struct nfs4_renewdata *data; 4452 4453 if (renew_flags == 0) 4454 return 0; 4455 if (!atomic_inc_not_zero(&clp->cl_count)) 4456 return -EIO; 4457 data = kmalloc(sizeof(*data), GFP_NOFS); 4458 if (data == NULL) 4459 return -ENOMEM; 4460 data->client = clp; 4461 data->timestamp = jiffies; 4462 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 4463 &nfs4_renew_ops, data); 4464 } 4465 4466 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 4467 { 4468 struct rpc_message msg = { 4469 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 4470 .rpc_argp = clp, 4471 .rpc_cred = cred, 4472 }; 4473 unsigned long now = jiffies; 4474 int status; 4475 4476 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4477 if (status < 0) 4478 return status; 4479 do_renew_lease(clp, now); 4480 return 0; 4481 } 4482 4483 static inline int nfs4_server_supports_acls(struct nfs_server *server) 4484 { 4485 return server->caps & NFS_CAP_ACLS; 4486 } 4487 4488 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 4489 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 4490 * the stack. 4491 */ 4492 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 4493 4494 static int buf_to_pages_noslab(const void *buf, size_t buflen, 4495 struct page **pages, unsigned int *pgbase) 4496 { 4497 struct page *newpage, **spages; 4498 int rc = 0; 4499 size_t len; 4500 spages = pages; 4501 4502 do { 4503 len = min_t(size_t, PAGE_SIZE, buflen); 4504 newpage = alloc_page(GFP_KERNEL); 4505 4506 if (newpage == NULL) 4507 goto unwind; 4508 memcpy(page_address(newpage), buf, len); 4509 buf += len; 4510 buflen -= len; 4511 *pages++ = newpage; 4512 rc++; 4513 } while (buflen != 0); 4514 4515 return rc; 4516 4517 unwind: 4518 for(; rc > 0; rc--) 4519 __free_page(spages[rc-1]); 4520 return -ENOMEM; 4521 } 4522 4523 struct nfs4_cached_acl { 4524 int cached; 4525 size_t len; 4526 char data[0]; 4527 }; 4528 4529 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 4530 { 4531 struct nfs_inode *nfsi = NFS_I(inode); 4532 4533 spin_lock(&inode->i_lock); 4534 kfree(nfsi->nfs4_acl); 4535 nfsi->nfs4_acl = acl; 4536 spin_unlock(&inode->i_lock); 4537 } 4538 4539 static void nfs4_zap_acl_attr(struct inode *inode) 4540 { 4541 nfs4_set_cached_acl(inode, NULL); 4542 } 4543 4544 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 4545 { 4546 struct nfs_inode *nfsi = NFS_I(inode); 4547 struct nfs4_cached_acl *acl; 4548 int ret = -ENOENT; 4549 4550 spin_lock(&inode->i_lock); 4551 acl = nfsi->nfs4_acl; 4552 if (acl == NULL) 4553 goto out; 4554 if (buf == NULL) /* user is just asking for length */ 4555 goto out_len; 4556 if (acl->cached == 0) 4557 goto out; 4558 ret = -ERANGE; /* see getxattr(2) man page */ 4559 if (acl->len > buflen) 4560 goto out; 4561 memcpy(buf, acl->data, acl->len); 4562 out_len: 4563 ret = acl->len; 4564 out: 4565 spin_unlock(&inode->i_lock); 4566 return ret; 4567 } 4568 4569 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 4570 { 4571 struct nfs4_cached_acl *acl; 4572 size_t buflen = sizeof(*acl) + acl_len; 4573 4574 if (buflen <= PAGE_SIZE) { 4575 acl = kmalloc(buflen, GFP_KERNEL); 4576 if (acl == NULL) 4577 goto out; 4578 acl->cached = 1; 4579 _copy_from_pages(acl->data, pages, pgbase, acl_len); 4580 } else { 4581 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 4582 if (acl == NULL) 4583 goto out; 4584 acl->cached = 0; 4585 } 4586 acl->len = acl_len; 4587 out: 4588 nfs4_set_cached_acl(inode, acl); 4589 } 4590 4591 /* 4592 * The getxattr API returns the required buffer length when called with a 4593 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 4594 * the required buf. On a NULL buf, we send a page of data to the server 4595 * guessing that the ACL request can be serviced by a page. If so, we cache 4596 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 4597 * the cache. If not so, we throw away the page, and cache the required 4598 * length. The next getxattr call will then produce another round trip to 4599 * the server, this time with the input buf of the required size. 4600 */ 4601 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4602 { 4603 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; 4604 struct nfs_getaclargs args = { 4605 .fh = NFS_FH(inode), 4606 .acl_pages = pages, 4607 .acl_len = buflen, 4608 }; 4609 struct nfs_getaclres res = { 4610 .acl_len = buflen, 4611 }; 4612 struct rpc_message msg = { 4613 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 4614 .rpc_argp = &args, 4615 .rpc_resp = &res, 4616 }; 4617 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4618 int ret = -ENOMEM, i; 4619 4620 /* As long as we're doing a round trip to the server anyway, 4621 * let's be prepared for a page of acl data. */ 4622 if (npages == 0) 4623 npages = 1; 4624 if (npages > ARRAY_SIZE(pages)) 4625 return -ERANGE; 4626 4627 for (i = 0; i < npages; i++) { 4628 pages[i] = alloc_page(GFP_KERNEL); 4629 if (!pages[i]) 4630 goto out_free; 4631 } 4632 4633 /* for decoding across pages */ 4634 res.acl_scratch = alloc_page(GFP_KERNEL); 4635 if (!res.acl_scratch) 4636 goto out_free; 4637 4638 args.acl_len = npages * PAGE_SIZE; 4639 args.acl_pgbase = 0; 4640 4641 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 4642 __func__, buf, buflen, npages, args.acl_len); 4643 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 4644 &msg, &args.seq_args, &res.seq_res, 0); 4645 if (ret) 4646 goto out_free; 4647 4648 /* Handle the case where the passed-in buffer is too short */ 4649 if (res.acl_flags & NFS4_ACL_TRUNC) { 4650 /* Did the user only issue a request for the acl length? */ 4651 if (buf == NULL) 4652 goto out_ok; 4653 ret = -ERANGE; 4654 goto out_free; 4655 } 4656 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); 4657 if (buf) { 4658 if (res.acl_len > buflen) { 4659 ret = -ERANGE; 4660 goto out_free; 4661 } 4662 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 4663 } 4664 out_ok: 4665 ret = res.acl_len; 4666 out_free: 4667 for (i = 0; i < npages; i++) 4668 if (pages[i]) 4669 __free_page(pages[i]); 4670 if (res.acl_scratch) 4671 __free_page(res.acl_scratch); 4672 return ret; 4673 } 4674 4675 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4676 { 4677 struct nfs4_exception exception = { }; 4678 ssize_t ret; 4679 do { 4680 ret = __nfs4_get_acl_uncached(inode, buf, buflen); 4681 trace_nfs4_get_acl(inode, ret); 4682 if (ret >= 0) 4683 break; 4684 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 4685 } while (exception.retry); 4686 return ret; 4687 } 4688 4689 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 4690 { 4691 struct nfs_server *server = NFS_SERVER(inode); 4692 int ret; 4693 4694 if (!nfs4_server_supports_acls(server)) 4695 return -EOPNOTSUPP; 4696 ret = nfs_revalidate_inode(server, inode); 4697 if (ret < 0) 4698 return ret; 4699 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 4700 nfs_zap_acl_cache(inode); 4701 ret = nfs4_read_cached_acl(inode, buf, buflen); 4702 if (ret != -ENOENT) 4703 /* -ENOENT is returned if there is no ACL or if there is an ACL 4704 * but no cached acl data, just the acl length */ 4705 return ret; 4706 return nfs4_get_acl_uncached(inode, buf, buflen); 4707 } 4708 4709 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4710 { 4711 struct nfs_server *server = NFS_SERVER(inode); 4712 struct page *pages[NFS4ACL_MAXPAGES]; 4713 struct nfs_setaclargs arg = { 4714 .fh = NFS_FH(inode), 4715 .acl_pages = pages, 4716 .acl_len = buflen, 4717 }; 4718 struct nfs_setaclres res; 4719 struct rpc_message msg = { 4720 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 4721 .rpc_argp = &arg, 4722 .rpc_resp = &res, 4723 }; 4724 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4725 int ret, i; 4726 4727 if (!nfs4_server_supports_acls(server)) 4728 return -EOPNOTSUPP; 4729 if (npages > ARRAY_SIZE(pages)) 4730 return -ERANGE; 4731 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 4732 if (i < 0) 4733 return i; 4734 nfs4_inode_return_delegation(inode); 4735 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4736 4737 /* 4738 * Free each page after tx, so the only ref left is 4739 * held by the network stack 4740 */ 4741 for (; i > 0; i--) 4742 put_page(pages[i-1]); 4743 4744 /* 4745 * Acl update can result in inode attribute update. 4746 * so mark the attribute cache invalid. 4747 */ 4748 spin_lock(&inode->i_lock); 4749 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 4750 spin_unlock(&inode->i_lock); 4751 nfs_access_zap_cache(inode); 4752 nfs_zap_acl_cache(inode); 4753 return ret; 4754 } 4755 4756 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4757 { 4758 struct nfs4_exception exception = { }; 4759 int err; 4760 do { 4761 err = __nfs4_proc_set_acl(inode, buf, buflen); 4762 trace_nfs4_set_acl(inode, err); 4763 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4764 &exception); 4765 } while (exception.retry); 4766 return err; 4767 } 4768 4769 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 4770 static int _nfs4_get_security_label(struct inode *inode, void *buf, 4771 size_t buflen) 4772 { 4773 struct nfs_server *server = NFS_SERVER(inode); 4774 struct nfs_fattr fattr; 4775 struct nfs4_label label = {0, 0, buflen, buf}; 4776 4777 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 4778 struct nfs4_getattr_arg arg = { 4779 .fh = NFS_FH(inode), 4780 .bitmask = bitmask, 4781 }; 4782 struct nfs4_getattr_res res = { 4783 .fattr = &fattr, 4784 .label = &label, 4785 .server = server, 4786 }; 4787 struct rpc_message msg = { 4788 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 4789 .rpc_argp = &arg, 4790 .rpc_resp = &res, 4791 }; 4792 int ret; 4793 4794 nfs_fattr_init(&fattr); 4795 4796 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0); 4797 if (ret) 4798 return ret; 4799 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) 4800 return -ENOENT; 4801 if (buflen < label.len) 4802 return -ERANGE; 4803 return 0; 4804 } 4805 4806 static int nfs4_get_security_label(struct inode *inode, void *buf, 4807 size_t buflen) 4808 { 4809 struct nfs4_exception exception = { }; 4810 int err; 4811 4812 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 4813 return -EOPNOTSUPP; 4814 4815 do { 4816 err = _nfs4_get_security_label(inode, buf, buflen); 4817 trace_nfs4_get_security_label(inode, err); 4818 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4819 &exception); 4820 } while (exception.retry); 4821 return err; 4822 } 4823 4824 static int _nfs4_do_set_security_label(struct inode *inode, 4825 struct nfs4_label *ilabel, 4826 struct nfs_fattr *fattr, 4827 struct nfs4_label *olabel) 4828 { 4829 4830 struct iattr sattr = {0}; 4831 struct nfs_server *server = NFS_SERVER(inode); 4832 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; 4833 struct nfs_setattrargs arg = { 4834 .fh = NFS_FH(inode), 4835 .iap = &sattr, 4836 .server = server, 4837 .bitmask = bitmask, 4838 .label = ilabel, 4839 }; 4840 struct nfs_setattrres res = { 4841 .fattr = fattr, 4842 .label = olabel, 4843 .server = server, 4844 }; 4845 struct rpc_message msg = { 4846 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 4847 .rpc_argp = &arg, 4848 .rpc_resp = &res, 4849 }; 4850 int status; 4851 4852 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 4853 4854 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4855 if (status) 4856 dprintk("%s failed: %d\n", __func__, status); 4857 4858 return status; 4859 } 4860 4861 static int nfs4_do_set_security_label(struct inode *inode, 4862 struct nfs4_label *ilabel, 4863 struct nfs_fattr *fattr, 4864 struct nfs4_label *olabel) 4865 { 4866 struct nfs4_exception exception = { }; 4867 int err; 4868 4869 do { 4870 err = _nfs4_do_set_security_label(inode, ilabel, 4871 fattr, olabel); 4872 trace_nfs4_set_security_label(inode, err); 4873 err = nfs4_handle_exception(NFS_SERVER(inode), err, 4874 &exception); 4875 } while (exception.retry); 4876 return err; 4877 } 4878 4879 static int 4880 nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen) 4881 { 4882 struct nfs4_label ilabel, *olabel = NULL; 4883 struct nfs_fattr fattr; 4884 struct rpc_cred *cred; 4885 struct inode *inode = d_inode(dentry); 4886 int status; 4887 4888 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) 4889 return -EOPNOTSUPP; 4890 4891 nfs_fattr_init(&fattr); 4892 4893 ilabel.pi = 0; 4894 ilabel.lfs = 0; 4895 ilabel.label = (char *)buf; 4896 ilabel.len = buflen; 4897 4898 cred = rpc_lookup_cred(); 4899 if (IS_ERR(cred)) 4900 return PTR_ERR(cred); 4901 4902 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL); 4903 if (IS_ERR(olabel)) { 4904 status = -PTR_ERR(olabel); 4905 goto out; 4906 } 4907 4908 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel); 4909 if (status == 0) 4910 nfs_setsecurity(inode, &fattr, olabel); 4911 4912 nfs4_label_free(olabel); 4913 out: 4914 put_rpccred(cred); 4915 return status; 4916 } 4917 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 4918 4919 4920 static int 4921 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, 4922 struct nfs4_state *state, long *timeout) 4923 { 4924 struct nfs_client *clp = server->nfs_client; 4925 4926 if (task->tk_status >= 0) 4927 return 0; 4928 switch(task->tk_status) { 4929 case -NFS4ERR_DELEG_REVOKED: 4930 case -NFS4ERR_ADMIN_REVOKED: 4931 case -NFS4ERR_BAD_STATEID: 4932 case -NFS4ERR_OPENMODE: 4933 if (state == NULL) 4934 break; 4935 if (nfs4_schedule_stateid_recovery(server, state) < 0) 4936 goto recovery_failed; 4937 goto wait_on_recovery; 4938 case -NFS4ERR_EXPIRED: 4939 if (state != NULL) { 4940 if (nfs4_schedule_stateid_recovery(server, state) < 0) 4941 goto recovery_failed; 4942 } 4943 case -NFS4ERR_STALE_STATEID: 4944 case -NFS4ERR_STALE_CLIENTID: 4945 nfs4_schedule_lease_recovery(clp); 4946 goto wait_on_recovery; 4947 case -NFS4ERR_MOVED: 4948 if (nfs4_schedule_migration_recovery(server) < 0) 4949 goto recovery_failed; 4950 goto wait_on_recovery; 4951 case -NFS4ERR_LEASE_MOVED: 4952 nfs4_schedule_lease_moved_recovery(clp); 4953 goto wait_on_recovery; 4954 #if defined(CONFIG_NFS_V4_1) 4955 case -NFS4ERR_BADSESSION: 4956 case -NFS4ERR_BADSLOT: 4957 case -NFS4ERR_BAD_HIGH_SLOT: 4958 case -NFS4ERR_DEADSESSION: 4959 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 4960 case -NFS4ERR_SEQ_FALSE_RETRY: 4961 case -NFS4ERR_SEQ_MISORDERED: 4962 dprintk("%s ERROR %d, Reset session\n", __func__, 4963 task->tk_status); 4964 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 4965 goto wait_on_recovery; 4966 #endif /* CONFIG_NFS_V4_1 */ 4967 case -NFS4ERR_DELAY: 4968 nfs_inc_server_stats(server, NFSIOS_DELAY); 4969 rpc_delay(task, nfs4_update_delay(timeout)); 4970 goto restart_call; 4971 case -NFS4ERR_GRACE: 4972 rpc_delay(task, NFS4_POLL_RETRY_MAX); 4973 case -NFS4ERR_RETRY_UNCACHED_REP: 4974 case -NFS4ERR_OLD_STATEID: 4975 goto restart_call; 4976 } 4977 task->tk_status = nfs4_map_errors(task->tk_status); 4978 return 0; 4979 recovery_failed: 4980 task->tk_status = -EIO; 4981 return 0; 4982 wait_on_recovery: 4983 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 4984 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 4985 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 4986 if (test_bit(NFS_MIG_FAILED, &server->mig_status)) 4987 goto recovery_failed; 4988 restart_call: 4989 task->tk_status = 0; 4990 return -EAGAIN; 4991 } 4992 4993 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 4994 nfs4_verifier *bootverf) 4995 { 4996 __be32 verf[2]; 4997 4998 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 4999 /* An impossible timestamp guarantees this value 5000 * will never match a generated boot time. */ 5001 verf[0] = 0; 5002 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1); 5003 } else { 5004 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 5005 verf[0] = cpu_to_be32(nn->boot_time.tv_sec); 5006 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec); 5007 } 5008 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 5009 } 5010 5011 static int 5012 nfs4_init_nonuniform_client_string(struct nfs_client *clp) 5013 { 5014 int result; 5015 size_t len; 5016 char *str; 5017 5018 if (clp->cl_owner_id != NULL) 5019 return 0; 5020 5021 rcu_read_lock(); 5022 len = 14 + strlen(clp->cl_ipaddr) + 1 + 5023 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + 5024 1 + 5025 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) + 5026 1; 5027 rcu_read_unlock(); 5028 5029 if (len > NFS4_OPAQUE_LIMIT + 1) 5030 return -EINVAL; 5031 5032 /* 5033 * Since this string is allocated at mount time, and held until the 5034 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5035 * about a memory-reclaim deadlock. 5036 */ 5037 str = kmalloc(len, GFP_KERNEL); 5038 if (!str) 5039 return -ENOMEM; 5040 5041 rcu_read_lock(); 5042 result = scnprintf(str, len, "Linux NFSv4.0 %s/%s %s", 5043 clp->cl_ipaddr, 5044 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR), 5045 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)); 5046 rcu_read_unlock(); 5047 5048 clp->cl_owner_id = str; 5049 return 0; 5050 } 5051 5052 static int 5053 nfs4_init_uniquifier_client_string(struct nfs_client *clp) 5054 { 5055 int result; 5056 size_t len; 5057 char *str; 5058 5059 len = 10 + 10 + 1 + 10 + 1 + 5060 strlen(nfs4_client_id_uniquifier) + 1 + 5061 strlen(clp->cl_rpcclient->cl_nodename) + 1; 5062 5063 if (len > NFS4_OPAQUE_LIMIT + 1) 5064 return -EINVAL; 5065 5066 /* 5067 * Since this string is allocated at mount time, and held until the 5068 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5069 * about a memory-reclaim deadlock. 5070 */ 5071 str = kmalloc(len, GFP_KERNEL); 5072 if (!str) 5073 return -ENOMEM; 5074 5075 result = scnprintf(str, len, "Linux NFSv%u.%u %s/%s", 5076 clp->rpc_ops->version, clp->cl_minorversion, 5077 nfs4_client_id_uniquifier, 5078 clp->cl_rpcclient->cl_nodename); 5079 clp->cl_owner_id = str; 5080 return 0; 5081 } 5082 5083 static int 5084 nfs4_init_uniform_client_string(struct nfs_client *clp) 5085 { 5086 int result; 5087 size_t len; 5088 char *str; 5089 5090 if (clp->cl_owner_id != NULL) 5091 return 0; 5092 5093 if (nfs4_client_id_uniquifier[0] != '\0') 5094 return nfs4_init_uniquifier_client_string(clp); 5095 5096 len = 10 + 10 + 1 + 10 + 1 + 5097 strlen(clp->cl_rpcclient->cl_nodename) + 1; 5098 5099 if (len > NFS4_OPAQUE_LIMIT + 1) 5100 return -EINVAL; 5101 5102 /* 5103 * Since this string is allocated at mount time, and held until the 5104 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying 5105 * about a memory-reclaim deadlock. 5106 */ 5107 str = kmalloc(len, GFP_KERNEL); 5108 if (!str) 5109 return -ENOMEM; 5110 5111 result = scnprintf(str, len, "Linux NFSv%u.%u %s", 5112 clp->rpc_ops->version, clp->cl_minorversion, 5113 clp->cl_rpcclient->cl_nodename); 5114 clp->cl_owner_id = str; 5115 return 0; 5116 } 5117 5118 /* 5119 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback 5120 * services. Advertise one based on the address family of the 5121 * clientaddr. 5122 */ 5123 static unsigned int 5124 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len) 5125 { 5126 if (strchr(clp->cl_ipaddr, ':') != NULL) 5127 return scnprintf(buf, len, "tcp6"); 5128 else 5129 return scnprintf(buf, len, "tcp"); 5130 } 5131 5132 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata) 5133 { 5134 struct nfs4_setclientid *sc = calldata; 5135 5136 if (task->tk_status == 0) 5137 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred); 5138 } 5139 5140 static const struct rpc_call_ops nfs4_setclientid_ops = { 5141 .rpc_call_done = nfs4_setclientid_done, 5142 }; 5143 5144 /** 5145 * nfs4_proc_setclientid - Negotiate client ID 5146 * @clp: state data structure 5147 * @program: RPC program for NFSv4 callback service 5148 * @port: IP port number for NFS4 callback service 5149 * @cred: RPC credential to use for this call 5150 * @res: where to place the result 5151 * 5152 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5153 */ 5154 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 5155 unsigned short port, struct rpc_cred *cred, 5156 struct nfs4_setclientid_res *res) 5157 { 5158 nfs4_verifier sc_verifier; 5159 struct nfs4_setclientid setclientid = { 5160 .sc_verifier = &sc_verifier, 5161 .sc_prog = program, 5162 .sc_clnt = clp, 5163 }; 5164 struct rpc_message msg = { 5165 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 5166 .rpc_argp = &setclientid, 5167 .rpc_resp = res, 5168 .rpc_cred = cred, 5169 }; 5170 struct rpc_task *task; 5171 struct rpc_task_setup task_setup_data = { 5172 .rpc_client = clp->cl_rpcclient, 5173 .rpc_message = &msg, 5174 .callback_ops = &nfs4_setclientid_ops, 5175 .callback_data = &setclientid, 5176 .flags = RPC_TASK_TIMEOUT, 5177 }; 5178 int status; 5179 5180 /* nfs_client_id4 */ 5181 nfs4_init_boot_verifier(clp, &sc_verifier); 5182 5183 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 5184 status = nfs4_init_uniform_client_string(clp); 5185 else 5186 status = nfs4_init_nonuniform_client_string(clp); 5187 5188 if (status) 5189 goto out; 5190 5191 /* cb_client4 */ 5192 setclientid.sc_netid_len = 5193 nfs4_init_callback_netid(clp, 5194 setclientid.sc_netid, 5195 sizeof(setclientid.sc_netid)); 5196 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 5197 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 5198 clp->cl_ipaddr, port >> 8, port & 255); 5199 5200 dprintk("NFS call setclientid auth=%s, '%s'\n", 5201 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5202 clp->cl_owner_id); 5203 task = rpc_run_task(&task_setup_data); 5204 if (IS_ERR(task)) { 5205 status = PTR_ERR(task); 5206 goto out; 5207 } 5208 status = task->tk_status; 5209 if (setclientid.sc_cred) { 5210 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); 5211 put_rpccred(setclientid.sc_cred); 5212 } 5213 rpc_put_task(task); 5214 out: 5215 trace_nfs4_setclientid(clp, status); 5216 dprintk("NFS reply setclientid: %d\n", status); 5217 return status; 5218 } 5219 5220 /** 5221 * nfs4_proc_setclientid_confirm - Confirm client ID 5222 * @clp: state data structure 5223 * @res: result of a previous SETCLIENTID 5224 * @cred: RPC credential to use for this call 5225 * 5226 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5227 */ 5228 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 5229 struct nfs4_setclientid_res *arg, 5230 struct rpc_cred *cred) 5231 { 5232 struct rpc_message msg = { 5233 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 5234 .rpc_argp = arg, 5235 .rpc_cred = cred, 5236 }; 5237 int status; 5238 5239 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 5240 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5241 clp->cl_clientid); 5242 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5243 trace_nfs4_setclientid_confirm(clp, status); 5244 dprintk("NFS reply setclientid_confirm: %d\n", status); 5245 return status; 5246 } 5247 5248 struct nfs4_delegreturndata { 5249 struct nfs4_delegreturnargs args; 5250 struct nfs4_delegreturnres res; 5251 struct nfs_fh fh; 5252 nfs4_stateid stateid; 5253 unsigned long timestamp; 5254 struct nfs_fattr fattr; 5255 int rpc_status; 5256 struct inode *inode; 5257 bool roc; 5258 u32 roc_barrier; 5259 }; 5260 5261 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 5262 { 5263 struct nfs4_delegreturndata *data = calldata; 5264 5265 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5266 return; 5267 5268 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status); 5269 switch (task->tk_status) { 5270 case 0: 5271 renew_lease(data->res.server, data->timestamp); 5272 case -NFS4ERR_ADMIN_REVOKED: 5273 case -NFS4ERR_DELEG_REVOKED: 5274 case -NFS4ERR_BAD_STATEID: 5275 case -NFS4ERR_OLD_STATEID: 5276 case -NFS4ERR_STALE_STATEID: 5277 case -NFS4ERR_EXPIRED: 5278 task->tk_status = 0; 5279 if (data->roc) 5280 pnfs_roc_set_barrier(data->inode, data->roc_barrier); 5281 break; 5282 default: 5283 if (nfs4_async_handle_error(task, data->res.server, 5284 NULL, NULL) == -EAGAIN) { 5285 rpc_restart_call_prepare(task); 5286 return; 5287 } 5288 } 5289 data->rpc_status = task->tk_status; 5290 } 5291 5292 static void nfs4_delegreturn_release(void *calldata) 5293 { 5294 struct nfs4_delegreturndata *data = calldata; 5295 struct inode *inode = data->inode; 5296 5297 if (inode) { 5298 if (data->roc) 5299 pnfs_roc_release(inode); 5300 nfs_iput_and_deactive(inode); 5301 } 5302 kfree(calldata); 5303 } 5304 5305 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 5306 { 5307 struct nfs4_delegreturndata *d_data; 5308 5309 d_data = (struct nfs4_delegreturndata *)data; 5310 5311 if (d_data->roc) 5312 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier); 5313 5314 nfs4_setup_sequence(d_data->res.server, 5315 &d_data->args.seq_args, 5316 &d_data->res.seq_res, 5317 task); 5318 } 5319 5320 static const struct rpc_call_ops nfs4_delegreturn_ops = { 5321 .rpc_call_prepare = nfs4_delegreturn_prepare, 5322 .rpc_call_done = nfs4_delegreturn_done, 5323 .rpc_release = nfs4_delegreturn_release, 5324 }; 5325 5326 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5327 { 5328 struct nfs4_delegreturndata *data; 5329 struct nfs_server *server = NFS_SERVER(inode); 5330 struct rpc_task *task; 5331 struct rpc_message msg = { 5332 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 5333 .rpc_cred = cred, 5334 }; 5335 struct rpc_task_setup task_setup_data = { 5336 .rpc_client = server->client, 5337 .rpc_message = &msg, 5338 .callback_ops = &nfs4_delegreturn_ops, 5339 .flags = RPC_TASK_ASYNC, 5340 }; 5341 int status = 0; 5342 5343 data = kzalloc(sizeof(*data), GFP_NOFS); 5344 if (data == NULL) 5345 return -ENOMEM; 5346 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 5347 data->args.fhandle = &data->fh; 5348 data->args.stateid = &data->stateid; 5349 data->args.bitmask = server->cache_consistency_bitmask; 5350 nfs_copy_fh(&data->fh, NFS_FH(inode)); 5351 nfs4_stateid_copy(&data->stateid, stateid); 5352 data->res.fattr = &data->fattr; 5353 data->res.server = server; 5354 nfs_fattr_init(data->res.fattr); 5355 data->timestamp = jiffies; 5356 data->rpc_status = 0; 5357 data->inode = nfs_igrab_and_active(inode); 5358 if (data->inode) 5359 data->roc = nfs4_roc(inode); 5360 5361 task_setup_data.callback_data = data; 5362 msg.rpc_argp = &data->args; 5363 msg.rpc_resp = &data->res; 5364 task = rpc_run_task(&task_setup_data); 5365 if (IS_ERR(task)) 5366 return PTR_ERR(task); 5367 if (!issync) 5368 goto out; 5369 status = nfs4_wait_for_completion_rpc_task(task); 5370 if (status != 0) 5371 goto out; 5372 status = data->rpc_status; 5373 if (status == 0) 5374 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 5375 else 5376 nfs_refresh_inode(inode, &data->fattr); 5377 out: 5378 rpc_put_task(task); 5379 return status; 5380 } 5381 5382 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 5383 { 5384 struct nfs_server *server = NFS_SERVER(inode); 5385 struct nfs4_exception exception = { }; 5386 int err; 5387 do { 5388 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); 5389 trace_nfs4_delegreturn(inode, err); 5390 switch (err) { 5391 case -NFS4ERR_STALE_STATEID: 5392 case -NFS4ERR_EXPIRED: 5393 case 0: 5394 return 0; 5395 } 5396 err = nfs4_handle_exception(server, err, &exception); 5397 } while (exception.retry); 5398 return err; 5399 } 5400 5401 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 5402 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 5403 5404 /* 5405 * sleep, with exponential backoff, and retry the LOCK operation. 5406 */ 5407 static unsigned long 5408 nfs4_set_lock_task_retry(unsigned long timeout) 5409 { 5410 freezable_schedule_timeout_killable_unsafe(timeout); 5411 timeout <<= 1; 5412 if (timeout > NFS4_LOCK_MAXTIMEOUT) 5413 return NFS4_LOCK_MAXTIMEOUT; 5414 return timeout; 5415 } 5416 5417 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5418 { 5419 struct inode *inode = state->inode; 5420 struct nfs_server *server = NFS_SERVER(inode); 5421 struct nfs_client *clp = server->nfs_client; 5422 struct nfs_lockt_args arg = { 5423 .fh = NFS_FH(inode), 5424 .fl = request, 5425 }; 5426 struct nfs_lockt_res res = { 5427 .denied = request, 5428 }; 5429 struct rpc_message msg = { 5430 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 5431 .rpc_argp = &arg, 5432 .rpc_resp = &res, 5433 .rpc_cred = state->owner->so_cred, 5434 }; 5435 struct nfs4_lock_state *lsp; 5436 int status; 5437 5438 arg.lock_owner.clientid = clp->cl_clientid; 5439 status = nfs4_set_lock_state(state, request); 5440 if (status != 0) 5441 goto out; 5442 lsp = request->fl_u.nfs4_fl.owner; 5443 arg.lock_owner.id = lsp->ls_seqid.owner_id; 5444 arg.lock_owner.s_dev = server->s_dev; 5445 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 5446 switch (status) { 5447 case 0: 5448 request->fl_type = F_UNLCK; 5449 break; 5450 case -NFS4ERR_DENIED: 5451 status = 0; 5452 } 5453 request->fl_ops->fl_release_private(request); 5454 request->fl_ops = NULL; 5455 out: 5456 return status; 5457 } 5458 5459 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5460 { 5461 struct nfs4_exception exception = { }; 5462 int err; 5463 5464 do { 5465 err = _nfs4_proc_getlk(state, cmd, request); 5466 trace_nfs4_get_lock(request, state, cmd, err); 5467 err = nfs4_handle_exception(NFS_SERVER(state->inode), err, 5468 &exception); 5469 } while (exception.retry); 5470 return err; 5471 } 5472 5473 static int do_vfs_lock(struct inode *inode, struct file_lock *fl) 5474 { 5475 int res = 0; 5476 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 5477 case FL_POSIX: 5478 res = posix_lock_inode_wait(inode, fl); 5479 break; 5480 case FL_FLOCK: 5481 res = flock_lock_inode_wait(inode, fl); 5482 break; 5483 default: 5484 BUG(); 5485 } 5486 return res; 5487 } 5488 5489 struct nfs4_unlockdata { 5490 struct nfs_locku_args arg; 5491 struct nfs_locku_res res; 5492 struct nfs4_lock_state *lsp; 5493 struct nfs_open_context *ctx; 5494 struct file_lock fl; 5495 const struct nfs_server *server; 5496 unsigned long timestamp; 5497 }; 5498 5499 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 5500 struct nfs_open_context *ctx, 5501 struct nfs4_lock_state *lsp, 5502 struct nfs_seqid *seqid) 5503 { 5504 struct nfs4_unlockdata *p; 5505 struct inode *inode = lsp->ls_state->inode; 5506 5507 p = kzalloc(sizeof(*p), GFP_NOFS); 5508 if (p == NULL) 5509 return NULL; 5510 p->arg.fh = NFS_FH(inode); 5511 p->arg.fl = &p->fl; 5512 p->arg.seqid = seqid; 5513 p->res.seqid = seqid; 5514 p->lsp = lsp; 5515 atomic_inc(&lsp->ls_count); 5516 /* Ensure we don't close file until we're done freeing locks! */ 5517 p->ctx = get_nfs_open_context(ctx); 5518 memcpy(&p->fl, fl, sizeof(p->fl)); 5519 p->server = NFS_SERVER(inode); 5520 return p; 5521 } 5522 5523 static void nfs4_locku_release_calldata(void *data) 5524 { 5525 struct nfs4_unlockdata *calldata = data; 5526 nfs_free_seqid(calldata->arg.seqid); 5527 nfs4_put_lock_state(calldata->lsp); 5528 put_nfs_open_context(calldata->ctx); 5529 kfree(calldata); 5530 } 5531 5532 static void nfs4_locku_done(struct rpc_task *task, void *data) 5533 { 5534 struct nfs4_unlockdata *calldata = data; 5535 5536 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 5537 return; 5538 switch (task->tk_status) { 5539 case 0: 5540 renew_lease(calldata->server, calldata->timestamp); 5541 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl); 5542 if (nfs4_update_lock_stateid(calldata->lsp, 5543 &calldata->res.stateid)) 5544 break; 5545 case -NFS4ERR_BAD_STATEID: 5546 case -NFS4ERR_OLD_STATEID: 5547 case -NFS4ERR_STALE_STATEID: 5548 case -NFS4ERR_EXPIRED: 5549 if (!nfs4_stateid_match(&calldata->arg.stateid, 5550 &calldata->lsp->ls_stateid)) 5551 rpc_restart_call_prepare(task); 5552 break; 5553 default: 5554 if (nfs4_async_handle_error(task, calldata->server, 5555 NULL, NULL) == -EAGAIN) 5556 rpc_restart_call_prepare(task); 5557 } 5558 nfs_release_seqid(calldata->arg.seqid); 5559 } 5560 5561 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 5562 { 5563 struct nfs4_unlockdata *calldata = data; 5564 5565 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 5566 goto out_wait; 5567 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid); 5568 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 5569 /* Note: exit _without_ running nfs4_locku_done */ 5570 goto out_no_action; 5571 } 5572 calldata->timestamp = jiffies; 5573 if (nfs4_setup_sequence(calldata->server, 5574 &calldata->arg.seq_args, 5575 &calldata->res.seq_res, 5576 task) != 0) 5577 nfs_release_seqid(calldata->arg.seqid); 5578 return; 5579 out_no_action: 5580 task->tk_action = NULL; 5581 out_wait: 5582 nfs4_sequence_done(task, &calldata->res.seq_res); 5583 } 5584 5585 static const struct rpc_call_ops nfs4_locku_ops = { 5586 .rpc_call_prepare = nfs4_locku_prepare, 5587 .rpc_call_done = nfs4_locku_done, 5588 .rpc_release = nfs4_locku_release_calldata, 5589 }; 5590 5591 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 5592 struct nfs_open_context *ctx, 5593 struct nfs4_lock_state *lsp, 5594 struct nfs_seqid *seqid) 5595 { 5596 struct nfs4_unlockdata *data; 5597 struct rpc_message msg = { 5598 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 5599 .rpc_cred = ctx->cred, 5600 }; 5601 struct rpc_task_setup task_setup_data = { 5602 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 5603 .rpc_message = &msg, 5604 .callback_ops = &nfs4_locku_ops, 5605 .workqueue = nfsiod_workqueue, 5606 .flags = RPC_TASK_ASYNC, 5607 }; 5608 5609 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client, 5610 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg); 5611 5612 /* Ensure this is an unlock - when canceling a lock, the 5613 * canceled lock is passed in, and it won't be an unlock. 5614 */ 5615 fl->fl_type = F_UNLCK; 5616 5617 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 5618 if (data == NULL) { 5619 nfs_free_seqid(seqid); 5620 return ERR_PTR(-ENOMEM); 5621 } 5622 5623 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5624 msg.rpc_argp = &data->arg; 5625 msg.rpc_resp = &data->res; 5626 task_setup_data.callback_data = data; 5627 return rpc_run_task(&task_setup_data); 5628 } 5629 5630 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 5631 { 5632 struct inode *inode = state->inode; 5633 struct nfs4_state_owner *sp = state->owner; 5634 struct nfs_inode *nfsi = NFS_I(inode); 5635 struct nfs_seqid *seqid; 5636 struct nfs4_lock_state *lsp; 5637 struct rpc_task *task; 5638 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5639 int status = 0; 5640 unsigned char fl_flags = request->fl_flags; 5641 5642 status = nfs4_set_lock_state(state, request); 5643 /* Unlock _before_ we do the RPC call */ 5644 request->fl_flags |= FL_EXISTS; 5645 /* Exclude nfs_delegation_claim_locks() */ 5646 mutex_lock(&sp->so_delegreturn_mutex); 5647 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 5648 down_read(&nfsi->rwsem); 5649 if (do_vfs_lock(inode, request) == -ENOENT) { 5650 up_read(&nfsi->rwsem); 5651 mutex_unlock(&sp->so_delegreturn_mutex); 5652 goto out; 5653 } 5654 up_read(&nfsi->rwsem); 5655 mutex_unlock(&sp->so_delegreturn_mutex); 5656 if (status != 0) 5657 goto out; 5658 /* Is this a delegated lock? */ 5659 lsp = request->fl_u.nfs4_fl.owner; 5660 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 5661 goto out; 5662 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; 5663 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 5664 status = -ENOMEM; 5665 if (IS_ERR(seqid)) 5666 goto out; 5667 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 5668 status = PTR_ERR(task); 5669 if (IS_ERR(task)) 5670 goto out; 5671 status = nfs4_wait_for_completion_rpc_task(task); 5672 rpc_put_task(task); 5673 out: 5674 request->fl_flags = fl_flags; 5675 trace_nfs4_unlock(request, state, F_SETLK, status); 5676 return status; 5677 } 5678 5679 struct nfs4_lockdata { 5680 struct nfs_lock_args arg; 5681 struct nfs_lock_res res; 5682 struct nfs4_lock_state *lsp; 5683 struct nfs_open_context *ctx; 5684 struct file_lock fl; 5685 unsigned long timestamp; 5686 int rpc_status; 5687 int cancelled; 5688 struct nfs_server *server; 5689 }; 5690 5691 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 5692 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 5693 gfp_t gfp_mask) 5694 { 5695 struct nfs4_lockdata *p; 5696 struct inode *inode = lsp->ls_state->inode; 5697 struct nfs_server *server = NFS_SERVER(inode); 5698 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); 5699 5700 p = kzalloc(sizeof(*p), gfp_mask); 5701 if (p == NULL) 5702 return NULL; 5703 5704 p->arg.fh = NFS_FH(inode); 5705 p->arg.fl = &p->fl; 5706 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 5707 if (IS_ERR(p->arg.open_seqid)) 5708 goto out_free; 5709 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 5710 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); 5711 if (IS_ERR(p->arg.lock_seqid)) 5712 goto out_free_seqid; 5713 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 5714 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 5715 p->arg.lock_owner.s_dev = server->s_dev; 5716 p->res.lock_seqid = p->arg.lock_seqid; 5717 p->lsp = lsp; 5718 p->server = server; 5719 atomic_inc(&lsp->ls_count); 5720 p->ctx = get_nfs_open_context(ctx); 5721 get_file(fl->fl_file); 5722 memcpy(&p->fl, fl, sizeof(p->fl)); 5723 return p; 5724 out_free_seqid: 5725 nfs_free_seqid(p->arg.open_seqid); 5726 out_free: 5727 kfree(p); 5728 return NULL; 5729 } 5730 5731 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 5732 { 5733 struct nfs4_lockdata *data = calldata; 5734 struct nfs4_state *state = data->lsp->ls_state; 5735 5736 dprintk("%s: begin!\n", __func__); 5737 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 5738 goto out_wait; 5739 /* Do we need to do an open_to_lock_owner? */ 5740 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { 5741 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 5742 goto out_release_lock_seqid; 5743 } 5744 nfs4_stateid_copy(&data->arg.open_stateid, 5745 &state->open_stateid); 5746 data->arg.new_lock_owner = 1; 5747 data->res.open_seqid = data->arg.open_seqid; 5748 } else { 5749 data->arg.new_lock_owner = 0; 5750 nfs4_stateid_copy(&data->arg.lock_stateid, 5751 &data->lsp->ls_stateid); 5752 } 5753 if (!nfs4_valid_open_stateid(state)) { 5754 data->rpc_status = -EBADF; 5755 task->tk_action = NULL; 5756 goto out_release_open_seqid; 5757 } 5758 data->timestamp = jiffies; 5759 if (nfs4_setup_sequence(data->server, 5760 &data->arg.seq_args, 5761 &data->res.seq_res, 5762 task) == 0) 5763 return; 5764 out_release_open_seqid: 5765 nfs_release_seqid(data->arg.open_seqid); 5766 out_release_lock_seqid: 5767 nfs_release_seqid(data->arg.lock_seqid); 5768 out_wait: 5769 nfs4_sequence_done(task, &data->res.seq_res); 5770 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); 5771 } 5772 5773 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 5774 { 5775 struct nfs4_lockdata *data = calldata; 5776 struct nfs4_lock_state *lsp = data->lsp; 5777 5778 dprintk("%s: begin!\n", __func__); 5779 5780 if (!nfs4_sequence_done(task, &data->res.seq_res)) 5781 return; 5782 5783 data->rpc_status = task->tk_status; 5784 switch (task->tk_status) { 5785 case 0: 5786 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)), 5787 data->timestamp); 5788 if (data->arg.new_lock) { 5789 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); 5790 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) { 5791 rpc_restart_call_prepare(task); 5792 break; 5793 } 5794 } 5795 if (data->arg.new_lock_owner != 0) { 5796 nfs_confirm_seqid(&lsp->ls_seqid, 0); 5797 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); 5798 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 5799 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) 5800 rpc_restart_call_prepare(task); 5801 break; 5802 case -NFS4ERR_BAD_STATEID: 5803 case -NFS4ERR_OLD_STATEID: 5804 case -NFS4ERR_STALE_STATEID: 5805 case -NFS4ERR_EXPIRED: 5806 if (data->arg.new_lock_owner != 0) { 5807 if (!nfs4_stateid_match(&data->arg.open_stateid, 5808 &lsp->ls_state->open_stateid)) 5809 rpc_restart_call_prepare(task); 5810 } else if (!nfs4_stateid_match(&data->arg.lock_stateid, 5811 &lsp->ls_stateid)) 5812 rpc_restart_call_prepare(task); 5813 } 5814 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 5815 } 5816 5817 static void nfs4_lock_release(void *calldata) 5818 { 5819 struct nfs4_lockdata *data = calldata; 5820 5821 dprintk("%s: begin!\n", __func__); 5822 nfs_free_seqid(data->arg.open_seqid); 5823 if (data->cancelled != 0) { 5824 struct rpc_task *task; 5825 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 5826 data->arg.lock_seqid); 5827 if (!IS_ERR(task)) 5828 rpc_put_task_async(task); 5829 dprintk("%s: cancelling lock!\n", __func__); 5830 } else 5831 nfs_free_seqid(data->arg.lock_seqid); 5832 nfs4_put_lock_state(data->lsp); 5833 put_nfs_open_context(data->ctx); 5834 fput(data->fl.fl_file); 5835 kfree(data); 5836 dprintk("%s: done!\n", __func__); 5837 } 5838 5839 static const struct rpc_call_ops nfs4_lock_ops = { 5840 .rpc_call_prepare = nfs4_lock_prepare, 5841 .rpc_call_done = nfs4_lock_done, 5842 .rpc_release = nfs4_lock_release, 5843 }; 5844 5845 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 5846 { 5847 switch (error) { 5848 case -NFS4ERR_ADMIN_REVOKED: 5849 case -NFS4ERR_BAD_STATEID: 5850 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5851 if (new_lock_owner != 0 || 5852 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 5853 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 5854 break; 5855 case -NFS4ERR_STALE_STATEID: 5856 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 5857 case -NFS4ERR_EXPIRED: 5858 nfs4_schedule_lease_recovery(server->nfs_client); 5859 }; 5860 } 5861 5862 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 5863 { 5864 struct nfs4_lockdata *data; 5865 struct rpc_task *task; 5866 struct rpc_message msg = { 5867 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 5868 .rpc_cred = state->owner->so_cred, 5869 }; 5870 struct rpc_task_setup task_setup_data = { 5871 .rpc_client = NFS_CLIENT(state->inode), 5872 .rpc_message = &msg, 5873 .callback_ops = &nfs4_lock_ops, 5874 .workqueue = nfsiod_workqueue, 5875 .flags = RPC_TASK_ASYNC, 5876 }; 5877 int ret; 5878 5879 dprintk("%s: begin!\n", __func__); 5880 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 5881 fl->fl_u.nfs4_fl.owner, 5882 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); 5883 if (data == NULL) 5884 return -ENOMEM; 5885 if (IS_SETLKW(cmd)) 5886 data->arg.block = 1; 5887 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 5888 msg.rpc_argp = &data->arg; 5889 msg.rpc_resp = &data->res; 5890 task_setup_data.callback_data = data; 5891 if (recovery_type > NFS_LOCK_NEW) { 5892 if (recovery_type == NFS_LOCK_RECLAIM) 5893 data->arg.reclaim = NFS_LOCK_RECLAIM; 5894 nfs4_set_sequence_privileged(&data->arg.seq_args); 5895 } else 5896 data->arg.new_lock = 1; 5897 task = rpc_run_task(&task_setup_data); 5898 if (IS_ERR(task)) 5899 return PTR_ERR(task); 5900 ret = nfs4_wait_for_completion_rpc_task(task); 5901 if (ret == 0) { 5902 ret = data->rpc_status; 5903 if (ret) 5904 nfs4_handle_setlk_error(data->server, data->lsp, 5905 data->arg.new_lock_owner, ret); 5906 } else 5907 data->cancelled = 1; 5908 rpc_put_task(task); 5909 dprintk("%s: done, ret = %d!\n", __func__, ret); 5910 return ret; 5911 } 5912 5913 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 5914 { 5915 struct nfs_server *server = NFS_SERVER(state->inode); 5916 struct nfs4_exception exception = { 5917 .inode = state->inode, 5918 }; 5919 int err; 5920 5921 do { 5922 /* Cache the lock if possible... */ 5923 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 5924 return 0; 5925 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 5926 trace_nfs4_lock_reclaim(request, state, F_SETLK, err); 5927 if (err != -NFS4ERR_DELAY) 5928 break; 5929 nfs4_handle_exception(server, err, &exception); 5930 } while (exception.retry); 5931 return err; 5932 } 5933 5934 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 5935 { 5936 struct nfs_server *server = NFS_SERVER(state->inode); 5937 struct nfs4_exception exception = { 5938 .inode = state->inode, 5939 }; 5940 int err; 5941 5942 err = nfs4_set_lock_state(state, request); 5943 if (err != 0) 5944 return err; 5945 if (!recover_lost_locks) { 5946 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags); 5947 return 0; 5948 } 5949 do { 5950 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 5951 return 0; 5952 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 5953 trace_nfs4_lock_expired(request, state, F_SETLK, err); 5954 switch (err) { 5955 default: 5956 goto out; 5957 case -NFS4ERR_GRACE: 5958 case -NFS4ERR_DELAY: 5959 nfs4_handle_exception(server, err, &exception); 5960 err = 0; 5961 } 5962 } while (exception.retry); 5963 out: 5964 return err; 5965 } 5966 5967 #if defined(CONFIG_NFS_V4_1) 5968 /** 5969 * nfs41_check_expired_locks - possibly free a lock stateid 5970 * 5971 * @state: NFSv4 state for an inode 5972 * 5973 * Returns NFS_OK if recovery for this stateid is now finished. 5974 * Otherwise a negative NFS4ERR value is returned. 5975 */ 5976 static int nfs41_check_expired_locks(struct nfs4_state *state) 5977 { 5978 int status, ret = -NFS4ERR_BAD_STATEID; 5979 struct nfs4_lock_state *lsp; 5980 struct nfs_server *server = NFS_SERVER(state->inode); 5981 5982 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 5983 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 5984 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 5985 5986 status = nfs41_test_stateid(server, 5987 &lsp->ls_stateid, 5988 cred); 5989 trace_nfs4_test_lock_stateid(state, lsp, status); 5990 if (status != NFS_OK) { 5991 /* Free the stateid unless the server 5992 * informs us the stateid is unrecognized. */ 5993 if (status != -NFS4ERR_BAD_STATEID) 5994 nfs41_free_stateid(server, 5995 &lsp->ls_stateid, 5996 cred); 5997 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 5998 ret = status; 5999 } 6000 } 6001 }; 6002 6003 return ret; 6004 } 6005 6006 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 6007 { 6008 int status = NFS_OK; 6009 6010 if (test_bit(LK_STATE_IN_USE, &state->flags)) 6011 status = nfs41_check_expired_locks(state); 6012 if (status != NFS_OK) 6013 status = nfs4_lock_expired(state, request); 6014 return status; 6015 } 6016 #endif 6017 6018 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6019 { 6020 struct nfs_inode *nfsi = NFS_I(state->inode); 6021 unsigned char fl_flags = request->fl_flags; 6022 int status = -ENOLCK; 6023 6024 if ((fl_flags & FL_POSIX) && 6025 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 6026 goto out; 6027 /* Is this a delegated open? */ 6028 status = nfs4_set_lock_state(state, request); 6029 if (status != 0) 6030 goto out; 6031 request->fl_flags |= FL_ACCESS; 6032 status = do_vfs_lock(state->inode, request); 6033 if (status < 0) 6034 goto out; 6035 down_read(&nfsi->rwsem); 6036 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 6037 /* Yes: cache locks! */ 6038 /* ...but avoid races with delegation recall... */ 6039 request->fl_flags = fl_flags & ~FL_SLEEP; 6040 status = do_vfs_lock(state->inode, request); 6041 up_read(&nfsi->rwsem); 6042 goto out; 6043 } 6044 up_read(&nfsi->rwsem); 6045 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 6046 out: 6047 request->fl_flags = fl_flags; 6048 return status; 6049 } 6050 6051 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 6052 { 6053 struct nfs4_exception exception = { 6054 .state = state, 6055 .inode = state->inode, 6056 }; 6057 int err; 6058 6059 do { 6060 err = _nfs4_proc_setlk(state, cmd, request); 6061 trace_nfs4_set_lock(request, state, cmd, err); 6062 if (err == -NFS4ERR_DENIED) 6063 err = -EAGAIN; 6064 err = nfs4_handle_exception(NFS_SERVER(state->inode), 6065 err, &exception); 6066 } while (exception.retry); 6067 return err; 6068 } 6069 6070 static int 6071 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 6072 { 6073 struct nfs_open_context *ctx; 6074 struct nfs4_state *state; 6075 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 6076 int status; 6077 6078 /* verify open state */ 6079 ctx = nfs_file_open_context(filp); 6080 state = ctx->state; 6081 6082 if (request->fl_start < 0 || request->fl_end < 0) 6083 return -EINVAL; 6084 6085 if (IS_GETLK(cmd)) { 6086 if (state != NULL) 6087 return nfs4_proc_getlk(state, F_GETLK, request); 6088 return 0; 6089 } 6090 6091 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 6092 return -EINVAL; 6093 6094 if (request->fl_type == F_UNLCK) { 6095 if (state != NULL) 6096 return nfs4_proc_unlck(state, cmd, request); 6097 return 0; 6098 } 6099 6100 if (state == NULL) 6101 return -ENOLCK; 6102 /* 6103 * Don't rely on the VFS having checked the file open mode, 6104 * since it won't do this for flock() locks. 6105 */ 6106 switch (request->fl_type) { 6107 case F_RDLCK: 6108 if (!(filp->f_mode & FMODE_READ)) 6109 return -EBADF; 6110 break; 6111 case F_WRLCK: 6112 if (!(filp->f_mode & FMODE_WRITE)) 6113 return -EBADF; 6114 } 6115 6116 do { 6117 status = nfs4_proc_setlk(state, cmd, request); 6118 if ((status != -EAGAIN) || IS_SETLK(cmd)) 6119 break; 6120 timeout = nfs4_set_lock_task_retry(timeout); 6121 status = -ERESTARTSYS; 6122 if (signalled()) 6123 break; 6124 } while(status < 0); 6125 return status; 6126 } 6127 6128 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 6129 { 6130 struct nfs_server *server = NFS_SERVER(state->inode); 6131 int err; 6132 6133 err = nfs4_set_lock_state(state, fl); 6134 if (err != 0) 6135 return err; 6136 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 6137 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 6138 } 6139 6140 struct nfs_release_lockowner_data { 6141 struct nfs4_lock_state *lsp; 6142 struct nfs_server *server; 6143 struct nfs_release_lockowner_args args; 6144 struct nfs_release_lockowner_res res; 6145 unsigned long timestamp; 6146 }; 6147 6148 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata) 6149 { 6150 struct nfs_release_lockowner_data *data = calldata; 6151 struct nfs_server *server = data->server; 6152 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, 6153 &data->args.seq_args, &data->res.seq_res, task); 6154 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6155 data->timestamp = jiffies; 6156 } 6157 6158 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) 6159 { 6160 struct nfs_release_lockowner_data *data = calldata; 6161 struct nfs_server *server = data->server; 6162 6163 nfs40_sequence_done(task, &data->res.seq_res); 6164 6165 switch (task->tk_status) { 6166 case 0: 6167 renew_lease(server, data->timestamp); 6168 break; 6169 case -NFS4ERR_STALE_CLIENTID: 6170 case -NFS4ERR_EXPIRED: 6171 nfs4_schedule_lease_recovery(server->nfs_client); 6172 break; 6173 case -NFS4ERR_LEASE_MOVED: 6174 case -NFS4ERR_DELAY: 6175 if (nfs4_async_handle_error(task, server, 6176 NULL, NULL) == -EAGAIN) 6177 rpc_restart_call_prepare(task); 6178 } 6179 } 6180 6181 static void nfs4_release_lockowner_release(void *calldata) 6182 { 6183 struct nfs_release_lockowner_data *data = calldata; 6184 nfs4_free_lock_state(data->server, data->lsp); 6185 kfree(calldata); 6186 } 6187 6188 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 6189 .rpc_call_prepare = nfs4_release_lockowner_prepare, 6190 .rpc_call_done = nfs4_release_lockowner_done, 6191 .rpc_release = nfs4_release_lockowner_release, 6192 }; 6193 6194 static void 6195 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 6196 { 6197 struct nfs_release_lockowner_data *data; 6198 struct rpc_message msg = { 6199 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 6200 }; 6201 6202 if (server->nfs_client->cl_mvops->minor_version != 0) 6203 return; 6204 6205 data = kmalloc(sizeof(*data), GFP_NOFS); 6206 if (!data) 6207 return; 6208 data->lsp = lsp; 6209 data->server = server; 6210 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 6211 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 6212 data->args.lock_owner.s_dev = server->s_dev; 6213 6214 msg.rpc_argp = &data->args; 6215 msg.rpc_resp = &data->res; 6216 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 6217 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 6218 } 6219 6220 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 6221 6222 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key, 6223 const void *buf, size_t buflen, 6224 int flags, int type) 6225 { 6226 if (strcmp(key, "") != 0) 6227 return -EINVAL; 6228 6229 return nfs4_proc_set_acl(d_inode(dentry), buf, buflen); 6230 } 6231 6232 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, 6233 void *buf, size_t buflen, int type) 6234 { 6235 if (strcmp(key, "") != 0) 6236 return -EINVAL; 6237 6238 return nfs4_proc_get_acl(d_inode(dentry), buf, buflen); 6239 } 6240 6241 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, 6242 size_t list_len, const char *name, 6243 size_t name_len, int type) 6244 { 6245 size_t len = sizeof(XATTR_NAME_NFSV4_ACL); 6246 6247 if (!nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)))) 6248 return 0; 6249 6250 if (list && len <= list_len) 6251 memcpy(list, XATTR_NAME_NFSV4_ACL, len); 6252 return len; 6253 } 6254 6255 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 6256 static inline int nfs4_server_supports_labels(struct nfs_server *server) 6257 { 6258 return server->caps & NFS_CAP_SECURITY_LABEL; 6259 } 6260 6261 static int nfs4_xattr_set_nfs4_label(struct dentry *dentry, const char *key, 6262 const void *buf, size_t buflen, 6263 int flags, int type) 6264 { 6265 if (security_ismaclabel(key)) 6266 return nfs4_set_security_label(dentry, buf, buflen); 6267 6268 return -EOPNOTSUPP; 6269 } 6270 6271 static int nfs4_xattr_get_nfs4_label(struct dentry *dentry, const char *key, 6272 void *buf, size_t buflen, int type) 6273 { 6274 if (security_ismaclabel(key)) 6275 return nfs4_get_security_label(d_inode(dentry), buf, buflen); 6276 return -EOPNOTSUPP; 6277 } 6278 6279 static size_t nfs4_xattr_list_nfs4_label(struct dentry *dentry, char *list, 6280 size_t list_len, const char *name, 6281 size_t name_len, int type) 6282 { 6283 size_t len = 0; 6284 6285 if (nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) { 6286 len = security_inode_listsecurity(d_inode(dentry), NULL, 0); 6287 if (list && len <= list_len) 6288 security_inode_listsecurity(d_inode(dentry), list, len); 6289 } 6290 return len; 6291 } 6292 6293 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = { 6294 .prefix = XATTR_SECURITY_PREFIX, 6295 .list = nfs4_xattr_list_nfs4_label, 6296 .get = nfs4_xattr_get_nfs4_label, 6297 .set = nfs4_xattr_set_nfs4_label, 6298 }; 6299 #endif 6300 6301 6302 /* 6303 * nfs_fhget will use either the mounted_on_fileid or the fileid 6304 */ 6305 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 6306 { 6307 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 6308 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 6309 (fattr->valid & NFS_ATTR_FATTR_FSID) && 6310 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 6311 return; 6312 6313 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 6314 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 6315 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 6316 fattr->nlink = 2; 6317 } 6318 6319 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6320 const struct qstr *name, 6321 struct nfs4_fs_locations *fs_locations, 6322 struct page *page) 6323 { 6324 struct nfs_server *server = NFS_SERVER(dir); 6325 u32 bitmask[3] = { 6326 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6327 }; 6328 struct nfs4_fs_locations_arg args = { 6329 .dir_fh = NFS_FH(dir), 6330 .name = name, 6331 .page = page, 6332 .bitmask = bitmask, 6333 }; 6334 struct nfs4_fs_locations_res res = { 6335 .fs_locations = fs_locations, 6336 }; 6337 struct rpc_message msg = { 6338 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6339 .rpc_argp = &args, 6340 .rpc_resp = &res, 6341 }; 6342 int status; 6343 6344 dprintk("%s: start\n", __func__); 6345 6346 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 6347 * is not supported */ 6348 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 6349 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; 6350 else 6351 bitmask[0] |= FATTR4_WORD0_FILEID; 6352 6353 nfs_fattr_init(&fs_locations->fattr); 6354 fs_locations->server = server; 6355 fs_locations->nlocations = 0; 6356 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 6357 dprintk("%s: returned status = %d\n", __func__, status); 6358 return status; 6359 } 6360 6361 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 6362 const struct qstr *name, 6363 struct nfs4_fs_locations *fs_locations, 6364 struct page *page) 6365 { 6366 struct nfs4_exception exception = { }; 6367 int err; 6368 do { 6369 err = _nfs4_proc_fs_locations(client, dir, name, 6370 fs_locations, page); 6371 trace_nfs4_get_fs_locations(dir, name, err); 6372 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6373 &exception); 6374 } while (exception.retry); 6375 return err; 6376 } 6377 6378 /* 6379 * This operation also signals the server that this client is 6380 * performing migration recovery. The server can stop returning 6381 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is 6382 * appended to this compound to identify the client ID which is 6383 * performing recovery. 6384 */ 6385 static int _nfs40_proc_get_locations(struct inode *inode, 6386 struct nfs4_fs_locations *locations, 6387 struct page *page, struct rpc_cred *cred) 6388 { 6389 struct nfs_server *server = NFS_SERVER(inode); 6390 struct rpc_clnt *clnt = server->client; 6391 u32 bitmask[2] = { 6392 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6393 }; 6394 struct nfs4_fs_locations_arg args = { 6395 .clientid = server->nfs_client->cl_clientid, 6396 .fh = NFS_FH(inode), 6397 .page = page, 6398 .bitmask = bitmask, 6399 .migration = 1, /* skip LOOKUP */ 6400 .renew = 1, /* append RENEW */ 6401 }; 6402 struct nfs4_fs_locations_res res = { 6403 .fs_locations = locations, 6404 .migration = 1, 6405 .renew = 1, 6406 }; 6407 struct rpc_message msg = { 6408 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6409 .rpc_argp = &args, 6410 .rpc_resp = &res, 6411 .rpc_cred = cred, 6412 }; 6413 unsigned long now = jiffies; 6414 int status; 6415 6416 nfs_fattr_init(&locations->fattr); 6417 locations->server = server; 6418 locations->nlocations = 0; 6419 6420 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6421 nfs4_set_sequence_privileged(&args.seq_args); 6422 status = nfs4_call_sync_sequence(clnt, server, &msg, 6423 &args.seq_args, &res.seq_res); 6424 if (status) 6425 return status; 6426 6427 renew_lease(server, now); 6428 return 0; 6429 } 6430 6431 #ifdef CONFIG_NFS_V4_1 6432 6433 /* 6434 * This operation also signals the server that this client is 6435 * performing migration recovery. The server can stop asserting 6436 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID 6437 * performing this operation is identified in the SEQUENCE 6438 * operation in this compound. 6439 * 6440 * When the client supports GETATTR(fs_locations_info), it can 6441 * be plumbed in here. 6442 */ 6443 static int _nfs41_proc_get_locations(struct inode *inode, 6444 struct nfs4_fs_locations *locations, 6445 struct page *page, struct rpc_cred *cred) 6446 { 6447 struct nfs_server *server = NFS_SERVER(inode); 6448 struct rpc_clnt *clnt = server->client; 6449 u32 bitmask[2] = { 6450 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 6451 }; 6452 struct nfs4_fs_locations_arg args = { 6453 .fh = NFS_FH(inode), 6454 .page = page, 6455 .bitmask = bitmask, 6456 .migration = 1, /* skip LOOKUP */ 6457 }; 6458 struct nfs4_fs_locations_res res = { 6459 .fs_locations = locations, 6460 .migration = 1, 6461 }; 6462 struct rpc_message msg = { 6463 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 6464 .rpc_argp = &args, 6465 .rpc_resp = &res, 6466 .rpc_cred = cred, 6467 }; 6468 int status; 6469 6470 nfs_fattr_init(&locations->fattr); 6471 locations->server = server; 6472 locations->nlocations = 0; 6473 6474 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6475 nfs4_set_sequence_privileged(&args.seq_args); 6476 status = nfs4_call_sync_sequence(clnt, server, &msg, 6477 &args.seq_args, &res.seq_res); 6478 if (status == NFS4_OK && 6479 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6480 status = -NFS4ERR_LEASE_MOVED; 6481 return status; 6482 } 6483 6484 #endif /* CONFIG_NFS_V4_1 */ 6485 6486 /** 6487 * nfs4_proc_get_locations - discover locations for a migrated FSID 6488 * @inode: inode on FSID that is migrating 6489 * @locations: result of query 6490 * @page: buffer 6491 * @cred: credential to use for this operation 6492 * 6493 * Returns NFS4_OK on success, a negative NFS4ERR status code if the 6494 * operation failed, or a negative errno if a local error occurred. 6495 * 6496 * On success, "locations" is filled in, but if the server has 6497 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not 6498 * asserted. 6499 * 6500 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases 6501 * from this client that require migration recovery. 6502 */ 6503 int nfs4_proc_get_locations(struct inode *inode, 6504 struct nfs4_fs_locations *locations, 6505 struct page *page, struct rpc_cred *cred) 6506 { 6507 struct nfs_server *server = NFS_SERVER(inode); 6508 struct nfs_client *clp = server->nfs_client; 6509 const struct nfs4_mig_recovery_ops *ops = 6510 clp->cl_mvops->mig_recovery_ops; 6511 struct nfs4_exception exception = { }; 6512 int status; 6513 6514 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6515 (unsigned long long)server->fsid.major, 6516 (unsigned long long)server->fsid.minor, 6517 clp->cl_hostname); 6518 nfs_display_fhandle(NFS_FH(inode), __func__); 6519 6520 do { 6521 status = ops->get_locations(inode, locations, page, cred); 6522 if (status != -NFS4ERR_DELAY) 6523 break; 6524 nfs4_handle_exception(server, status, &exception); 6525 } while (exception.retry); 6526 return status; 6527 } 6528 6529 /* 6530 * This operation also signals the server that this client is 6531 * performing "lease moved" recovery. The server can stop 6532 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation 6533 * is appended to this compound to identify the client ID which is 6534 * performing recovery. 6535 */ 6536 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6537 { 6538 struct nfs_server *server = NFS_SERVER(inode); 6539 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 6540 struct rpc_clnt *clnt = server->client; 6541 struct nfs4_fsid_present_arg args = { 6542 .fh = NFS_FH(inode), 6543 .clientid = clp->cl_clientid, 6544 .renew = 1, /* append RENEW */ 6545 }; 6546 struct nfs4_fsid_present_res res = { 6547 .renew = 1, 6548 }; 6549 struct rpc_message msg = { 6550 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6551 .rpc_argp = &args, 6552 .rpc_resp = &res, 6553 .rpc_cred = cred, 6554 }; 6555 unsigned long now = jiffies; 6556 int status; 6557 6558 res.fh = nfs_alloc_fhandle(); 6559 if (res.fh == NULL) 6560 return -ENOMEM; 6561 6562 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6563 nfs4_set_sequence_privileged(&args.seq_args); 6564 status = nfs4_call_sync_sequence(clnt, server, &msg, 6565 &args.seq_args, &res.seq_res); 6566 nfs_free_fhandle(res.fh); 6567 if (status) 6568 return status; 6569 6570 do_renew_lease(clp, now); 6571 return 0; 6572 } 6573 6574 #ifdef CONFIG_NFS_V4_1 6575 6576 /* 6577 * This operation also signals the server that this client is 6578 * performing "lease moved" recovery. The server can stop asserting 6579 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing 6580 * this operation is identified in the SEQUENCE operation in this 6581 * compound. 6582 */ 6583 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6584 { 6585 struct nfs_server *server = NFS_SERVER(inode); 6586 struct rpc_clnt *clnt = server->client; 6587 struct nfs4_fsid_present_arg args = { 6588 .fh = NFS_FH(inode), 6589 }; 6590 struct nfs4_fsid_present_res res = { 6591 }; 6592 struct rpc_message msg = { 6593 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT], 6594 .rpc_argp = &args, 6595 .rpc_resp = &res, 6596 .rpc_cred = cred, 6597 }; 6598 int status; 6599 6600 res.fh = nfs_alloc_fhandle(); 6601 if (res.fh == NULL) 6602 return -ENOMEM; 6603 6604 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 6605 nfs4_set_sequence_privileged(&args.seq_args); 6606 status = nfs4_call_sync_sequence(clnt, server, &msg, 6607 &args.seq_args, &res.seq_res); 6608 nfs_free_fhandle(res.fh); 6609 if (status == NFS4_OK && 6610 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED) 6611 status = -NFS4ERR_LEASE_MOVED; 6612 return status; 6613 } 6614 6615 #endif /* CONFIG_NFS_V4_1 */ 6616 6617 /** 6618 * nfs4_proc_fsid_present - Is this FSID present or absent on server? 6619 * @inode: inode on FSID to check 6620 * @cred: credential to use for this operation 6621 * 6622 * Server indicates whether the FSID is present, moved, or not 6623 * recognized. This operation is necessary to clear a LEASE_MOVED 6624 * condition for this client ID. 6625 * 6626 * Returns NFS4_OK if the FSID is present on this server, 6627 * -NFS4ERR_MOVED if the FSID is no longer present, a negative 6628 * NFS4ERR code if some error occurred on the server, or a 6629 * negative errno if a local failure occurred. 6630 */ 6631 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred) 6632 { 6633 struct nfs_server *server = NFS_SERVER(inode); 6634 struct nfs_client *clp = server->nfs_client; 6635 const struct nfs4_mig_recovery_ops *ops = 6636 clp->cl_mvops->mig_recovery_ops; 6637 struct nfs4_exception exception = { }; 6638 int status; 6639 6640 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__, 6641 (unsigned long long)server->fsid.major, 6642 (unsigned long long)server->fsid.minor, 6643 clp->cl_hostname); 6644 nfs_display_fhandle(NFS_FH(inode), __func__); 6645 6646 do { 6647 status = ops->fsid_present(inode, cred); 6648 if (status != -NFS4ERR_DELAY) 6649 break; 6650 nfs4_handle_exception(server, status, &exception); 6651 } while (exception.retry); 6652 return status; 6653 } 6654 6655 /** 6656 * If 'use_integrity' is true and the state managment nfs_client 6657 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient 6658 * and the machine credential as per RFC3530bis and RFC5661 Security 6659 * Considerations sections. Otherwise, just use the user cred with the 6660 * filesystem's rpc_client. 6661 */ 6662 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity) 6663 { 6664 int status; 6665 struct nfs4_secinfo_arg args = { 6666 .dir_fh = NFS_FH(dir), 6667 .name = name, 6668 }; 6669 struct nfs4_secinfo_res res = { 6670 .flavors = flavors, 6671 }; 6672 struct rpc_message msg = { 6673 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 6674 .rpc_argp = &args, 6675 .rpc_resp = &res, 6676 }; 6677 struct rpc_clnt *clnt = NFS_SERVER(dir)->client; 6678 struct rpc_cred *cred = NULL; 6679 6680 if (use_integrity) { 6681 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient; 6682 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client); 6683 msg.rpc_cred = cred; 6684 } 6685 6686 dprintk("NFS call secinfo %s\n", name->name); 6687 6688 nfs4_state_protect(NFS_SERVER(dir)->nfs_client, 6689 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg); 6690 6691 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args, 6692 &res.seq_res, 0); 6693 dprintk("NFS reply secinfo: %d\n", status); 6694 6695 if (cred) 6696 put_rpccred(cred); 6697 6698 return status; 6699 } 6700 6701 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 6702 struct nfs4_secinfo_flavors *flavors) 6703 { 6704 struct nfs4_exception exception = { }; 6705 int err; 6706 do { 6707 err = -NFS4ERR_WRONGSEC; 6708 6709 /* try to use integrity protection with machine cred */ 6710 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client)) 6711 err = _nfs4_proc_secinfo(dir, name, flavors, true); 6712 6713 /* 6714 * if unable to use integrity protection, or SECINFO with 6715 * integrity protection returns NFS4ERR_WRONGSEC (which is 6716 * disallowed by spec, but exists in deployed servers) use 6717 * the current filesystem's rpc_client and the user cred. 6718 */ 6719 if (err == -NFS4ERR_WRONGSEC) 6720 err = _nfs4_proc_secinfo(dir, name, flavors, false); 6721 6722 trace_nfs4_secinfo(dir, name, err); 6723 err = nfs4_handle_exception(NFS_SERVER(dir), err, 6724 &exception); 6725 } while (exception.retry); 6726 return err; 6727 } 6728 6729 #ifdef CONFIG_NFS_V4_1 6730 /* 6731 * Check the exchange flags returned by the server for invalid flags, having 6732 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 6733 * DS flags set. 6734 */ 6735 static int nfs4_check_cl_exchange_flags(u32 flags) 6736 { 6737 if (flags & ~EXCHGID4_FLAG_MASK_R) 6738 goto out_inval; 6739 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 6740 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 6741 goto out_inval; 6742 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 6743 goto out_inval; 6744 return NFS_OK; 6745 out_inval: 6746 return -NFS4ERR_INVAL; 6747 } 6748 6749 static bool 6750 nfs41_same_server_scope(struct nfs41_server_scope *a, 6751 struct nfs41_server_scope *b) 6752 { 6753 if (a->server_scope_sz == b->server_scope_sz && 6754 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) 6755 return true; 6756 6757 return false; 6758 } 6759 6760 /* 6761 * nfs4_proc_bind_conn_to_session() 6762 * 6763 * The 4.1 client currently uses the same TCP connection for the 6764 * fore and backchannel. 6765 */ 6766 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) 6767 { 6768 int status; 6769 struct nfs41_bind_conn_to_session_args args = { 6770 .client = clp, 6771 .dir = NFS4_CDFC4_FORE_OR_BOTH, 6772 }; 6773 struct nfs41_bind_conn_to_session_res res; 6774 struct rpc_message msg = { 6775 .rpc_proc = 6776 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 6777 .rpc_argp = &args, 6778 .rpc_resp = &res, 6779 .rpc_cred = cred, 6780 }; 6781 6782 dprintk("--> %s\n", __func__); 6783 6784 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); 6785 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 6786 args.dir = NFS4_CDFC4_FORE; 6787 6788 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 6789 trace_nfs4_bind_conn_to_session(clp, status); 6790 if (status == 0) { 6791 if (memcmp(res.sessionid.data, 6792 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 6793 dprintk("NFS: %s: Session ID mismatch\n", __func__); 6794 status = -EIO; 6795 goto out; 6796 } 6797 if ((res.dir & args.dir) != res.dir || res.dir == 0) { 6798 dprintk("NFS: %s: Unexpected direction from server\n", 6799 __func__); 6800 status = -EIO; 6801 goto out; 6802 } 6803 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { 6804 dprintk("NFS: %s: Server returned RDMA mode = true\n", 6805 __func__); 6806 status = -EIO; 6807 goto out; 6808 } 6809 } 6810 out: 6811 dprintk("<-- %s status= %d\n", __func__, status); 6812 return status; 6813 } 6814 6815 /* 6816 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map 6817 * and operations we'd like to see to enable certain features in the allow map 6818 */ 6819 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = { 6820 .how = SP4_MACH_CRED, 6821 .enforce.u.words = { 6822 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 6823 1 << (OP_EXCHANGE_ID - 32) | 6824 1 << (OP_CREATE_SESSION - 32) | 6825 1 << (OP_DESTROY_SESSION - 32) | 6826 1 << (OP_DESTROY_CLIENTID - 32) 6827 }, 6828 .allow.u.words = { 6829 [0] = 1 << (OP_CLOSE) | 6830 1 << (OP_LOCKU) | 6831 1 << (OP_COMMIT), 6832 [1] = 1 << (OP_SECINFO - 32) | 6833 1 << (OP_SECINFO_NO_NAME - 32) | 6834 1 << (OP_TEST_STATEID - 32) | 6835 1 << (OP_FREE_STATEID - 32) | 6836 1 << (OP_WRITE - 32) 6837 } 6838 }; 6839 6840 /* 6841 * Select the state protection mode for client `clp' given the server results 6842 * from exchange_id in `sp'. 6843 * 6844 * Returns 0 on success, negative errno otherwise. 6845 */ 6846 static int nfs4_sp4_select_mode(struct nfs_client *clp, 6847 struct nfs41_state_protection *sp) 6848 { 6849 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = { 6850 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) | 6851 1 << (OP_EXCHANGE_ID - 32) | 6852 1 << (OP_CREATE_SESSION - 32) | 6853 1 << (OP_DESTROY_SESSION - 32) | 6854 1 << (OP_DESTROY_CLIENTID - 32) 6855 }; 6856 unsigned int i; 6857 6858 if (sp->how == SP4_MACH_CRED) { 6859 /* Print state protect result */ 6860 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n"); 6861 for (i = 0; i <= LAST_NFS4_OP; i++) { 6862 if (test_bit(i, sp->enforce.u.longs)) 6863 dfprintk(MOUNT, " enforce op %d\n", i); 6864 if (test_bit(i, sp->allow.u.longs)) 6865 dfprintk(MOUNT, " allow op %d\n", i); 6866 } 6867 6868 /* make sure nothing is on enforce list that isn't supported */ 6869 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) { 6870 if (sp->enforce.u.words[i] & ~supported_enforce[i]) { 6871 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 6872 return -EINVAL; 6873 } 6874 } 6875 6876 /* 6877 * Minimal mode - state operations are allowed to use machine 6878 * credential. Note this already happens by default, so the 6879 * client doesn't have to do anything more than the negotiation. 6880 * 6881 * NOTE: we don't care if EXCHANGE_ID is in the list - 6882 * we're already using the machine cred for exchange_id 6883 * and will never use a different cred. 6884 */ 6885 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) && 6886 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) && 6887 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) && 6888 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) { 6889 dfprintk(MOUNT, "sp4_mach_cred:\n"); 6890 dfprintk(MOUNT, " minimal mode enabled\n"); 6891 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags); 6892 } else { 6893 dfprintk(MOUNT, "sp4_mach_cred: disabled\n"); 6894 return -EINVAL; 6895 } 6896 6897 if (test_bit(OP_CLOSE, sp->allow.u.longs) && 6898 test_bit(OP_LOCKU, sp->allow.u.longs)) { 6899 dfprintk(MOUNT, " cleanup mode enabled\n"); 6900 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags); 6901 } 6902 6903 if (test_bit(OP_SECINFO, sp->allow.u.longs) && 6904 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) { 6905 dfprintk(MOUNT, " secinfo mode enabled\n"); 6906 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags); 6907 } 6908 6909 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) && 6910 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) { 6911 dfprintk(MOUNT, " stateid mode enabled\n"); 6912 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags); 6913 } 6914 6915 if (test_bit(OP_WRITE, sp->allow.u.longs)) { 6916 dfprintk(MOUNT, " write mode enabled\n"); 6917 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags); 6918 } 6919 6920 if (test_bit(OP_COMMIT, sp->allow.u.longs)) { 6921 dfprintk(MOUNT, " commit mode enabled\n"); 6922 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags); 6923 } 6924 } 6925 6926 return 0; 6927 } 6928 6929 /* 6930 * _nfs4_proc_exchange_id() 6931 * 6932 * Wrapper for EXCHANGE_ID operation. 6933 */ 6934 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, 6935 u32 sp4_how) 6936 { 6937 nfs4_verifier verifier; 6938 struct nfs41_exchange_id_args args = { 6939 .verifier = &verifier, 6940 .client = clp, 6941 #ifdef CONFIG_NFS_V4_1_MIGRATION 6942 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 6943 EXCHGID4_FLAG_BIND_PRINC_STATEID | 6944 EXCHGID4_FLAG_SUPP_MOVED_MIGR, 6945 #else 6946 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER | 6947 EXCHGID4_FLAG_BIND_PRINC_STATEID, 6948 #endif 6949 }; 6950 struct nfs41_exchange_id_res res = { 6951 0 6952 }; 6953 int status; 6954 struct rpc_message msg = { 6955 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 6956 .rpc_argp = &args, 6957 .rpc_resp = &res, 6958 .rpc_cred = cred, 6959 }; 6960 6961 nfs4_init_boot_verifier(clp, &verifier); 6962 6963 status = nfs4_init_uniform_client_string(clp); 6964 if (status) 6965 goto out; 6966 6967 dprintk("NFS call exchange_id auth=%s, '%s'\n", 6968 clp->cl_rpcclient->cl_auth->au_ops->au_name, 6969 clp->cl_owner_id); 6970 6971 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 6972 GFP_NOFS); 6973 if (unlikely(res.server_owner == NULL)) { 6974 status = -ENOMEM; 6975 goto out; 6976 } 6977 6978 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 6979 GFP_NOFS); 6980 if (unlikely(res.server_scope == NULL)) { 6981 status = -ENOMEM; 6982 goto out_server_owner; 6983 } 6984 6985 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 6986 if (unlikely(res.impl_id == NULL)) { 6987 status = -ENOMEM; 6988 goto out_server_scope; 6989 } 6990 6991 switch (sp4_how) { 6992 case SP4_NONE: 6993 args.state_protect.how = SP4_NONE; 6994 break; 6995 6996 case SP4_MACH_CRED: 6997 args.state_protect = nfs4_sp4_mach_cred_request; 6998 break; 6999 7000 default: 7001 /* unsupported! */ 7002 WARN_ON_ONCE(1); 7003 status = -EINVAL; 7004 goto out_impl_id; 7005 } 7006 7007 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7008 trace_nfs4_exchange_id(clp, status); 7009 if (status == 0) 7010 status = nfs4_check_cl_exchange_flags(res.flags); 7011 7012 if (status == 0) 7013 status = nfs4_sp4_select_mode(clp, &res.state_protect); 7014 7015 if (status == 0) { 7016 clp->cl_clientid = res.clientid; 7017 clp->cl_exchange_flags = res.flags; 7018 /* Client ID is not confirmed */ 7019 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) { 7020 clear_bit(NFS4_SESSION_ESTABLISHED, 7021 &clp->cl_session->session_state); 7022 clp->cl_seqid = res.seqid; 7023 } 7024 7025 kfree(clp->cl_serverowner); 7026 clp->cl_serverowner = res.server_owner; 7027 res.server_owner = NULL; 7028 7029 /* use the most recent implementation id */ 7030 kfree(clp->cl_implid); 7031 clp->cl_implid = res.impl_id; 7032 res.impl_id = NULL; 7033 7034 if (clp->cl_serverscope != NULL && 7035 !nfs41_same_server_scope(clp->cl_serverscope, 7036 res.server_scope)) { 7037 dprintk("%s: server_scope mismatch detected\n", 7038 __func__); 7039 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 7040 kfree(clp->cl_serverscope); 7041 clp->cl_serverscope = NULL; 7042 } 7043 7044 if (clp->cl_serverscope == NULL) { 7045 clp->cl_serverscope = res.server_scope; 7046 res.server_scope = NULL; 7047 } 7048 } 7049 7050 out_impl_id: 7051 kfree(res.impl_id); 7052 out_server_scope: 7053 kfree(res.server_scope); 7054 out_server_owner: 7055 kfree(res.server_owner); 7056 out: 7057 if (clp->cl_implid != NULL) 7058 dprintk("NFS reply exchange_id: Server Implementation ID: " 7059 "domain: %s, name: %s, date: %llu,%u\n", 7060 clp->cl_implid->domain, clp->cl_implid->name, 7061 clp->cl_implid->date.seconds, 7062 clp->cl_implid->date.nseconds); 7063 dprintk("NFS reply exchange_id: %d\n", status); 7064 return status; 7065 } 7066 7067 /* 7068 * nfs4_proc_exchange_id() 7069 * 7070 * Returns zero, a negative errno, or a negative NFS4ERR status code. 7071 * 7072 * Since the clientid has expired, all compounds using sessions 7073 * associated with the stale clientid will be returning 7074 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 7075 * be in some phase of session reset. 7076 * 7077 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used. 7078 */ 7079 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) 7080 { 7081 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor; 7082 int status; 7083 7084 /* try SP4_MACH_CRED if krb5i/p */ 7085 if (authflavor == RPC_AUTH_GSS_KRB5I || 7086 authflavor == RPC_AUTH_GSS_KRB5P) { 7087 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED); 7088 if (!status) 7089 return 0; 7090 } 7091 7092 /* try SP4_NONE */ 7093 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE); 7094 } 7095 7096 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 7097 struct rpc_cred *cred) 7098 { 7099 struct rpc_message msg = { 7100 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 7101 .rpc_argp = clp, 7102 .rpc_cred = cred, 7103 }; 7104 int status; 7105 7106 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7107 trace_nfs4_destroy_clientid(clp, status); 7108 if (status) 7109 dprintk("NFS: Got error %d from the server %s on " 7110 "DESTROY_CLIENTID.", status, clp->cl_hostname); 7111 return status; 7112 } 7113 7114 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 7115 struct rpc_cred *cred) 7116 { 7117 unsigned int loop; 7118 int ret; 7119 7120 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 7121 ret = _nfs4_proc_destroy_clientid(clp, cred); 7122 switch (ret) { 7123 case -NFS4ERR_DELAY: 7124 case -NFS4ERR_CLIENTID_BUSY: 7125 ssleep(1); 7126 break; 7127 default: 7128 return ret; 7129 } 7130 } 7131 return 0; 7132 } 7133 7134 int nfs4_destroy_clientid(struct nfs_client *clp) 7135 { 7136 struct rpc_cred *cred; 7137 int ret = 0; 7138 7139 if (clp->cl_mvops->minor_version < 1) 7140 goto out; 7141 if (clp->cl_exchange_flags == 0) 7142 goto out; 7143 if (clp->cl_preserve_clid) 7144 goto out; 7145 cred = nfs4_get_clid_cred(clp); 7146 ret = nfs4_proc_destroy_clientid(clp, cred); 7147 if (cred) 7148 put_rpccred(cred); 7149 switch (ret) { 7150 case 0: 7151 case -NFS4ERR_STALE_CLIENTID: 7152 clp->cl_exchange_flags = 0; 7153 } 7154 out: 7155 return ret; 7156 } 7157 7158 struct nfs4_get_lease_time_data { 7159 struct nfs4_get_lease_time_args *args; 7160 struct nfs4_get_lease_time_res *res; 7161 struct nfs_client *clp; 7162 }; 7163 7164 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 7165 void *calldata) 7166 { 7167 struct nfs4_get_lease_time_data *data = 7168 (struct nfs4_get_lease_time_data *)calldata; 7169 7170 dprintk("--> %s\n", __func__); 7171 /* just setup sequence, do not trigger session recovery 7172 since we're invoked within one */ 7173 nfs41_setup_sequence(data->clp->cl_session, 7174 &data->args->la_seq_args, 7175 &data->res->lr_seq_res, 7176 task); 7177 dprintk("<-- %s\n", __func__); 7178 } 7179 7180 /* 7181 * Called from nfs4_state_manager thread for session setup, so don't recover 7182 * from sequence operation or clientid errors. 7183 */ 7184 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 7185 { 7186 struct nfs4_get_lease_time_data *data = 7187 (struct nfs4_get_lease_time_data *)calldata; 7188 7189 dprintk("--> %s\n", __func__); 7190 if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) 7191 return; 7192 switch (task->tk_status) { 7193 case -NFS4ERR_DELAY: 7194 case -NFS4ERR_GRACE: 7195 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 7196 rpc_delay(task, NFS4_POLL_RETRY_MIN); 7197 task->tk_status = 0; 7198 /* fall through */ 7199 case -NFS4ERR_RETRY_UNCACHED_REP: 7200 rpc_restart_call_prepare(task); 7201 return; 7202 } 7203 dprintk("<-- %s\n", __func__); 7204 } 7205 7206 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 7207 .rpc_call_prepare = nfs4_get_lease_time_prepare, 7208 .rpc_call_done = nfs4_get_lease_time_done, 7209 }; 7210 7211 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 7212 { 7213 struct rpc_task *task; 7214 struct nfs4_get_lease_time_args args; 7215 struct nfs4_get_lease_time_res res = { 7216 .lr_fsinfo = fsinfo, 7217 }; 7218 struct nfs4_get_lease_time_data data = { 7219 .args = &args, 7220 .res = &res, 7221 .clp = clp, 7222 }; 7223 struct rpc_message msg = { 7224 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 7225 .rpc_argp = &args, 7226 .rpc_resp = &res, 7227 }; 7228 struct rpc_task_setup task_setup = { 7229 .rpc_client = clp->cl_rpcclient, 7230 .rpc_message = &msg, 7231 .callback_ops = &nfs4_get_lease_time_ops, 7232 .callback_data = &data, 7233 .flags = RPC_TASK_TIMEOUT, 7234 }; 7235 int status; 7236 7237 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); 7238 nfs4_set_sequence_privileged(&args.la_seq_args); 7239 dprintk("--> %s\n", __func__); 7240 task = rpc_run_task(&task_setup); 7241 7242 if (IS_ERR(task)) 7243 status = PTR_ERR(task); 7244 else { 7245 status = task->tk_status; 7246 rpc_put_task(task); 7247 } 7248 dprintk("<-- %s return %d\n", __func__, status); 7249 7250 return status; 7251 } 7252 7253 /* 7254 * Initialize the values to be used by the client in CREATE_SESSION 7255 * If nfs4_init_session set the fore channel request and response sizes, 7256 * use them. 7257 * 7258 * Set the back channel max_resp_sz_cached to zero to force the client to 7259 * always set csa_cachethis to FALSE because the current implementation 7260 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 7261 */ 7262 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) 7263 { 7264 unsigned int max_rqst_sz, max_resp_sz; 7265 7266 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead; 7267 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead; 7268 7269 /* Fore channel attributes */ 7270 args->fc_attrs.max_rqst_sz = max_rqst_sz; 7271 args->fc_attrs.max_resp_sz = max_resp_sz; 7272 args->fc_attrs.max_ops = NFS4_MAX_OPS; 7273 args->fc_attrs.max_reqs = max_session_slots; 7274 7275 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 7276 "max_ops=%u max_reqs=%u\n", 7277 __func__, 7278 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 7279 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 7280 7281 /* Back channel attributes */ 7282 args->bc_attrs.max_rqst_sz = PAGE_SIZE; 7283 args->bc_attrs.max_resp_sz = PAGE_SIZE; 7284 args->bc_attrs.max_resp_sz_cached = 0; 7285 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 7286 args->bc_attrs.max_reqs = 1; 7287 7288 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 7289 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 7290 __func__, 7291 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 7292 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 7293 args->bc_attrs.max_reqs); 7294 } 7295 7296 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, 7297 struct nfs41_create_session_res *res) 7298 { 7299 struct nfs4_channel_attrs *sent = &args->fc_attrs; 7300 struct nfs4_channel_attrs *rcvd = &res->fc_attrs; 7301 7302 if (rcvd->max_resp_sz > sent->max_resp_sz) 7303 return -EINVAL; 7304 /* 7305 * Our requested max_ops is the minimum we need; we're not 7306 * prepared to break up compounds into smaller pieces than that. 7307 * So, no point even trying to continue if the server won't 7308 * cooperate: 7309 */ 7310 if (rcvd->max_ops < sent->max_ops) 7311 return -EINVAL; 7312 if (rcvd->max_reqs == 0) 7313 return -EINVAL; 7314 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 7315 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 7316 return 0; 7317 } 7318 7319 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, 7320 struct nfs41_create_session_res *res) 7321 { 7322 struct nfs4_channel_attrs *sent = &args->bc_attrs; 7323 struct nfs4_channel_attrs *rcvd = &res->bc_attrs; 7324 7325 if (!(res->flags & SESSION4_BACK_CHAN)) 7326 goto out; 7327 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 7328 return -EINVAL; 7329 if (rcvd->max_resp_sz < sent->max_resp_sz) 7330 return -EINVAL; 7331 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 7332 return -EINVAL; 7333 /* These would render the backchannel useless: */ 7334 if (rcvd->max_ops != sent->max_ops) 7335 return -EINVAL; 7336 if (rcvd->max_reqs != sent->max_reqs) 7337 return -EINVAL; 7338 out: 7339 return 0; 7340 } 7341 7342 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 7343 struct nfs41_create_session_res *res) 7344 { 7345 int ret; 7346 7347 ret = nfs4_verify_fore_channel_attrs(args, res); 7348 if (ret) 7349 return ret; 7350 return nfs4_verify_back_channel_attrs(args, res); 7351 } 7352 7353 static void nfs4_update_session(struct nfs4_session *session, 7354 struct nfs41_create_session_res *res) 7355 { 7356 nfs4_copy_sessionid(&session->sess_id, &res->sessionid); 7357 /* Mark client id and session as being confirmed */ 7358 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 7359 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); 7360 session->flags = res->flags; 7361 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); 7362 if (res->flags & SESSION4_BACK_CHAN) 7363 memcpy(&session->bc_attrs, &res->bc_attrs, 7364 sizeof(session->bc_attrs)); 7365 } 7366 7367 static int _nfs4_proc_create_session(struct nfs_client *clp, 7368 struct rpc_cred *cred) 7369 { 7370 struct nfs4_session *session = clp->cl_session; 7371 struct nfs41_create_session_args args = { 7372 .client = clp, 7373 .clientid = clp->cl_clientid, 7374 .seqid = clp->cl_seqid, 7375 .cb_program = NFS4_CALLBACK, 7376 }; 7377 struct nfs41_create_session_res res; 7378 7379 struct rpc_message msg = { 7380 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 7381 .rpc_argp = &args, 7382 .rpc_resp = &res, 7383 .rpc_cred = cred, 7384 }; 7385 int status; 7386 7387 nfs4_init_channel_attrs(&args); 7388 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 7389 7390 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7391 trace_nfs4_create_session(clp, status); 7392 7393 if (!status) { 7394 /* Verify the session's negotiated channel_attrs values */ 7395 status = nfs4_verify_channel_attrs(&args, &res); 7396 /* Increment the clientid slot sequence id */ 7397 if (clp->cl_seqid == res.seqid) 7398 clp->cl_seqid++; 7399 if (status) 7400 goto out; 7401 nfs4_update_session(session, &res); 7402 } 7403 out: 7404 return status; 7405 } 7406 7407 /* 7408 * Issues a CREATE_SESSION operation to the server. 7409 * It is the responsibility of the caller to verify the session is 7410 * expired before calling this routine. 7411 */ 7412 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred) 7413 { 7414 int status; 7415 unsigned *ptr; 7416 struct nfs4_session *session = clp->cl_session; 7417 7418 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 7419 7420 status = _nfs4_proc_create_session(clp, cred); 7421 if (status) 7422 goto out; 7423 7424 /* Init or reset the session slot tables */ 7425 status = nfs4_setup_session_slot_tables(session); 7426 dprintk("slot table setup returned %d\n", status); 7427 if (status) 7428 goto out; 7429 7430 ptr = (unsigned *)&session->sess_id.data[0]; 7431 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 7432 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 7433 out: 7434 dprintk("<-- %s\n", __func__); 7435 return status; 7436 } 7437 7438 /* 7439 * Issue the over-the-wire RPC DESTROY_SESSION. 7440 * The caller must serialize access to this routine. 7441 */ 7442 int nfs4_proc_destroy_session(struct nfs4_session *session, 7443 struct rpc_cred *cred) 7444 { 7445 struct rpc_message msg = { 7446 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 7447 .rpc_argp = session, 7448 .rpc_cred = cred, 7449 }; 7450 int status = 0; 7451 7452 dprintk("--> nfs4_proc_destroy_session\n"); 7453 7454 /* session is still being setup */ 7455 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) 7456 return 0; 7457 7458 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 7459 trace_nfs4_destroy_session(session->clp, status); 7460 7461 if (status) 7462 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 7463 "Session has been destroyed regardless...\n", status); 7464 7465 dprintk("<-- nfs4_proc_destroy_session\n"); 7466 return status; 7467 } 7468 7469 /* 7470 * Renew the cl_session lease. 7471 */ 7472 struct nfs4_sequence_data { 7473 struct nfs_client *clp; 7474 struct nfs4_sequence_args args; 7475 struct nfs4_sequence_res res; 7476 }; 7477 7478 static void nfs41_sequence_release(void *data) 7479 { 7480 struct nfs4_sequence_data *calldata = data; 7481 struct nfs_client *clp = calldata->clp; 7482 7483 if (atomic_read(&clp->cl_count) > 1) 7484 nfs4_schedule_state_renewal(clp); 7485 nfs_put_client(clp); 7486 kfree(calldata); 7487 } 7488 7489 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 7490 { 7491 switch(task->tk_status) { 7492 case -NFS4ERR_DELAY: 7493 rpc_delay(task, NFS4_POLL_RETRY_MAX); 7494 return -EAGAIN; 7495 default: 7496 nfs4_schedule_lease_recovery(clp); 7497 } 7498 return 0; 7499 } 7500 7501 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 7502 { 7503 struct nfs4_sequence_data *calldata = data; 7504 struct nfs_client *clp = calldata->clp; 7505 7506 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 7507 return; 7508 7509 trace_nfs4_sequence(clp, task->tk_status); 7510 if (task->tk_status < 0) { 7511 dprintk("%s ERROR %d\n", __func__, task->tk_status); 7512 if (atomic_read(&clp->cl_count) == 1) 7513 goto out; 7514 7515 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 7516 rpc_restart_call_prepare(task); 7517 return; 7518 } 7519 } 7520 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 7521 out: 7522 dprintk("<-- %s\n", __func__); 7523 } 7524 7525 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 7526 { 7527 struct nfs4_sequence_data *calldata = data; 7528 struct nfs_client *clp = calldata->clp; 7529 struct nfs4_sequence_args *args; 7530 struct nfs4_sequence_res *res; 7531 7532 args = task->tk_msg.rpc_argp; 7533 res = task->tk_msg.rpc_resp; 7534 7535 nfs41_setup_sequence(clp->cl_session, args, res, task); 7536 } 7537 7538 static const struct rpc_call_ops nfs41_sequence_ops = { 7539 .rpc_call_done = nfs41_sequence_call_done, 7540 .rpc_call_prepare = nfs41_sequence_prepare, 7541 .rpc_release = nfs41_sequence_release, 7542 }; 7543 7544 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 7545 struct rpc_cred *cred, 7546 bool is_privileged) 7547 { 7548 struct nfs4_sequence_data *calldata; 7549 struct rpc_message msg = { 7550 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 7551 .rpc_cred = cred, 7552 }; 7553 struct rpc_task_setup task_setup_data = { 7554 .rpc_client = clp->cl_rpcclient, 7555 .rpc_message = &msg, 7556 .callback_ops = &nfs41_sequence_ops, 7557 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 7558 }; 7559 7560 if (!atomic_inc_not_zero(&clp->cl_count)) 7561 return ERR_PTR(-EIO); 7562 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7563 if (calldata == NULL) { 7564 nfs_put_client(clp); 7565 return ERR_PTR(-ENOMEM); 7566 } 7567 nfs4_init_sequence(&calldata->args, &calldata->res, 0); 7568 if (is_privileged) 7569 nfs4_set_sequence_privileged(&calldata->args); 7570 msg.rpc_argp = &calldata->args; 7571 msg.rpc_resp = &calldata->res; 7572 calldata->clp = clp; 7573 task_setup_data.callback_data = calldata; 7574 7575 return rpc_run_task(&task_setup_data); 7576 } 7577 7578 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 7579 { 7580 struct rpc_task *task; 7581 int ret = 0; 7582 7583 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 7584 return -EAGAIN; 7585 task = _nfs41_proc_sequence(clp, cred, false); 7586 if (IS_ERR(task)) 7587 ret = PTR_ERR(task); 7588 else 7589 rpc_put_task_async(task); 7590 dprintk("<-- %s status=%d\n", __func__, ret); 7591 return ret; 7592 } 7593 7594 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 7595 { 7596 struct rpc_task *task; 7597 int ret; 7598 7599 task = _nfs41_proc_sequence(clp, cred, true); 7600 if (IS_ERR(task)) { 7601 ret = PTR_ERR(task); 7602 goto out; 7603 } 7604 ret = rpc_wait_for_completion_task(task); 7605 if (!ret) 7606 ret = task->tk_status; 7607 rpc_put_task(task); 7608 out: 7609 dprintk("<-- %s status=%d\n", __func__, ret); 7610 return ret; 7611 } 7612 7613 struct nfs4_reclaim_complete_data { 7614 struct nfs_client *clp; 7615 struct nfs41_reclaim_complete_args arg; 7616 struct nfs41_reclaim_complete_res res; 7617 }; 7618 7619 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 7620 { 7621 struct nfs4_reclaim_complete_data *calldata = data; 7622 7623 nfs41_setup_sequence(calldata->clp->cl_session, 7624 &calldata->arg.seq_args, 7625 &calldata->res.seq_res, 7626 task); 7627 } 7628 7629 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 7630 { 7631 switch(task->tk_status) { 7632 case 0: 7633 case -NFS4ERR_COMPLETE_ALREADY: 7634 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 7635 break; 7636 case -NFS4ERR_DELAY: 7637 rpc_delay(task, NFS4_POLL_RETRY_MAX); 7638 /* fall through */ 7639 case -NFS4ERR_RETRY_UNCACHED_REP: 7640 return -EAGAIN; 7641 default: 7642 nfs4_schedule_lease_recovery(clp); 7643 } 7644 return 0; 7645 } 7646 7647 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 7648 { 7649 struct nfs4_reclaim_complete_data *calldata = data; 7650 struct nfs_client *clp = calldata->clp; 7651 struct nfs4_sequence_res *res = &calldata->res.seq_res; 7652 7653 dprintk("--> %s\n", __func__); 7654 if (!nfs41_sequence_done(task, res)) 7655 return; 7656 7657 trace_nfs4_reclaim_complete(clp, task->tk_status); 7658 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 7659 rpc_restart_call_prepare(task); 7660 return; 7661 } 7662 dprintk("<-- %s\n", __func__); 7663 } 7664 7665 static void nfs4_free_reclaim_complete_data(void *data) 7666 { 7667 struct nfs4_reclaim_complete_data *calldata = data; 7668 7669 kfree(calldata); 7670 } 7671 7672 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 7673 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 7674 .rpc_call_done = nfs4_reclaim_complete_done, 7675 .rpc_release = nfs4_free_reclaim_complete_data, 7676 }; 7677 7678 /* 7679 * Issue a global reclaim complete. 7680 */ 7681 static int nfs41_proc_reclaim_complete(struct nfs_client *clp, 7682 struct rpc_cred *cred) 7683 { 7684 struct nfs4_reclaim_complete_data *calldata; 7685 struct rpc_task *task; 7686 struct rpc_message msg = { 7687 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 7688 .rpc_cred = cred, 7689 }; 7690 struct rpc_task_setup task_setup_data = { 7691 .rpc_client = clp->cl_rpcclient, 7692 .rpc_message = &msg, 7693 .callback_ops = &nfs4_reclaim_complete_call_ops, 7694 .flags = RPC_TASK_ASYNC, 7695 }; 7696 int status = -ENOMEM; 7697 7698 dprintk("--> %s\n", __func__); 7699 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 7700 if (calldata == NULL) 7701 goto out; 7702 calldata->clp = clp; 7703 calldata->arg.one_fs = 0; 7704 7705 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); 7706 nfs4_set_sequence_privileged(&calldata->arg.seq_args); 7707 msg.rpc_argp = &calldata->arg; 7708 msg.rpc_resp = &calldata->res; 7709 task_setup_data.callback_data = calldata; 7710 task = rpc_run_task(&task_setup_data); 7711 if (IS_ERR(task)) { 7712 status = PTR_ERR(task); 7713 goto out; 7714 } 7715 status = nfs4_wait_for_completion_rpc_task(task); 7716 if (status == 0) 7717 status = task->tk_status; 7718 rpc_put_task(task); 7719 return 0; 7720 out: 7721 dprintk("<-- %s status=%d\n", __func__, status); 7722 return status; 7723 } 7724 7725 static void 7726 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 7727 { 7728 struct nfs4_layoutget *lgp = calldata; 7729 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 7730 struct nfs4_session *session = nfs4_get_session(server); 7731 7732 dprintk("--> %s\n", __func__); 7733 /* Note the is a race here, where a CB_LAYOUTRECALL can come in 7734 * right now covering the LAYOUTGET we are about to send. 7735 * However, that is not so catastrophic, and there seems 7736 * to be no way to prevent it completely. 7737 */ 7738 if (nfs41_setup_sequence(session, &lgp->args.seq_args, 7739 &lgp->res.seq_res, task)) 7740 return; 7741 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, 7742 NFS_I(lgp->args.inode)->layout, 7743 &lgp->args.range, 7744 lgp->args.ctx->state)) { 7745 rpc_exit(task, NFS4_OK); 7746 } 7747 } 7748 7749 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 7750 { 7751 struct nfs4_layoutget *lgp = calldata; 7752 struct inode *inode = lgp->args.inode; 7753 struct nfs_server *server = NFS_SERVER(inode); 7754 struct pnfs_layout_hdr *lo; 7755 struct nfs4_state *state = NULL; 7756 unsigned long timeo, now, giveup; 7757 7758 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); 7759 7760 if (!nfs41_sequence_done(task, &lgp->res.seq_res)) 7761 goto out; 7762 7763 switch (task->tk_status) { 7764 case 0: 7765 goto out; 7766 /* 7767 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of 7768 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3). 7769 */ 7770 case -NFS4ERR_BADLAYOUT: 7771 goto out_overflow; 7772 /* 7773 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client 7774 * (or clients) writing to the same RAID stripe except when 7775 * the minlength argument is 0 (see RFC5661 section 18.43.3). 7776 */ 7777 case -NFS4ERR_LAYOUTTRYLATER: 7778 if (lgp->args.minlength == 0) 7779 goto out_overflow; 7780 /* 7781 * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall 7782 * existing layout before getting a new one). 7783 */ 7784 case -NFS4ERR_RECALLCONFLICT: 7785 timeo = rpc_get_timeout(task->tk_client); 7786 giveup = lgp->args.timestamp + timeo; 7787 now = jiffies; 7788 if (time_after(giveup, now)) { 7789 unsigned long delay; 7790 7791 /* Delay for: 7792 * - Not less then NFS4_POLL_RETRY_MIN. 7793 * - One last time a jiffie before we give up 7794 * - exponential backoff (time_now minus start_attempt) 7795 */ 7796 delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN, 7797 min((giveup - now - 1), 7798 now - lgp->args.timestamp)); 7799 7800 dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n", 7801 __func__, delay); 7802 rpc_delay(task, delay); 7803 task->tk_status = 0; 7804 rpc_restart_call_prepare(task); 7805 goto out; /* Do not call nfs4_async_handle_error() */ 7806 } 7807 break; 7808 case -NFS4ERR_EXPIRED: 7809 case -NFS4ERR_BAD_STATEID: 7810 spin_lock(&inode->i_lock); 7811 lo = NFS_I(inode)->layout; 7812 if (!lo || list_empty(&lo->plh_segs)) { 7813 spin_unlock(&inode->i_lock); 7814 /* If the open stateid was bad, then recover it. */ 7815 state = lgp->args.ctx->state; 7816 } else { 7817 LIST_HEAD(head); 7818 7819 /* 7820 * Mark the bad layout state as invalid, then retry 7821 * with the current stateid. 7822 */ 7823 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL); 7824 spin_unlock(&inode->i_lock); 7825 pnfs_free_lseg_list(&head); 7826 7827 task->tk_status = 0; 7828 rpc_restart_call_prepare(task); 7829 } 7830 } 7831 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) 7832 rpc_restart_call_prepare(task); 7833 out: 7834 dprintk("<-- %s\n", __func__); 7835 return; 7836 out_overflow: 7837 task->tk_status = -EOVERFLOW; 7838 goto out; 7839 } 7840 7841 static size_t max_response_pages(struct nfs_server *server) 7842 { 7843 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 7844 return nfs_page_array_len(0, max_resp_sz); 7845 } 7846 7847 static void nfs4_free_pages(struct page **pages, size_t size) 7848 { 7849 int i; 7850 7851 if (!pages) 7852 return; 7853 7854 for (i = 0; i < size; i++) { 7855 if (!pages[i]) 7856 break; 7857 __free_page(pages[i]); 7858 } 7859 kfree(pages); 7860 } 7861 7862 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) 7863 { 7864 struct page **pages; 7865 int i; 7866 7867 pages = kcalloc(size, sizeof(struct page *), gfp_flags); 7868 if (!pages) { 7869 dprintk("%s: can't alloc array of %zu pages\n", __func__, size); 7870 return NULL; 7871 } 7872 7873 for (i = 0; i < size; i++) { 7874 pages[i] = alloc_page(gfp_flags); 7875 if (!pages[i]) { 7876 dprintk("%s: failed to allocate page\n", __func__); 7877 nfs4_free_pages(pages, size); 7878 return NULL; 7879 } 7880 } 7881 7882 return pages; 7883 } 7884 7885 static void nfs4_layoutget_release(void *calldata) 7886 { 7887 struct nfs4_layoutget *lgp = calldata; 7888 struct inode *inode = lgp->args.inode; 7889 struct nfs_server *server = NFS_SERVER(inode); 7890 size_t max_pages = max_response_pages(server); 7891 7892 dprintk("--> %s\n", __func__); 7893 nfs4_free_pages(lgp->args.layout.pages, max_pages); 7894 pnfs_put_layout_hdr(NFS_I(inode)->layout); 7895 put_nfs_open_context(lgp->args.ctx); 7896 kfree(calldata); 7897 dprintk("<-- %s\n", __func__); 7898 } 7899 7900 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 7901 .rpc_call_prepare = nfs4_layoutget_prepare, 7902 .rpc_call_done = nfs4_layoutget_done, 7903 .rpc_release = nfs4_layoutget_release, 7904 }; 7905 7906 struct pnfs_layout_segment * 7907 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) 7908 { 7909 struct inode *inode = lgp->args.inode; 7910 struct nfs_server *server = NFS_SERVER(inode); 7911 size_t max_pages = max_response_pages(server); 7912 struct rpc_task *task; 7913 struct rpc_message msg = { 7914 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 7915 .rpc_argp = &lgp->args, 7916 .rpc_resp = &lgp->res, 7917 .rpc_cred = lgp->cred, 7918 }; 7919 struct rpc_task_setup task_setup_data = { 7920 .rpc_client = server->client, 7921 .rpc_message = &msg, 7922 .callback_ops = &nfs4_layoutget_call_ops, 7923 .callback_data = lgp, 7924 .flags = RPC_TASK_ASYNC, 7925 }; 7926 struct pnfs_layout_segment *lseg = NULL; 7927 int status = 0; 7928 7929 dprintk("--> %s\n", __func__); 7930 7931 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */ 7932 pnfs_get_layout_hdr(NFS_I(inode)->layout); 7933 7934 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); 7935 if (!lgp->args.layout.pages) { 7936 nfs4_layoutget_release(lgp); 7937 return ERR_PTR(-ENOMEM); 7938 } 7939 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 7940 lgp->args.timestamp = jiffies; 7941 7942 lgp->res.layoutp = &lgp->args.layout; 7943 lgp->res.seq_res.sr_slot = NULL; 7944 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 7945 7946 task = rpc_run_task(&task_setup_data); 7947 if (IS_ERR(task)) 7948 return ERR_CAST(task); 7949 status = nfs4_wait_for_completion_rpc_task(task); 7950 if (status == 0) 7951 status = task->tk_status; 7952 trace_nfs4_layoutget(lgp->args.ctx, 7953 &lgp->args.range, 7954 &lgp->res.range, 7955 status); 7956 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ 7957 if (status == 0 && lgp->res.layoutp->len) 7958 lseg = pnfs_layout_process(lgp); 7959 rpc_put_task(task); 7960 dprintk("<-- %s status=%d\n", __func__, status); 7961 if (status) 7962 return ERR_PTR(status); 7963 return lseg; 7964 } 7965 7966 static void 7967 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 7968 { 7969 struct nfs4_layoutreturn *lrp = calldata; 7970 7971 dprintk("--> %s\n", __func__); 7972 nfs41_setup_sequence(lrp->clp->cl_session, 7973 &lrp->args.seq_args, 7974 &lrp->res.seq_res, 7975 task); 7976 } 7977 7978 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 7979 { 7980 struct nfs4_layoutreturn *lrp = calldata; 7981 struct nfs_server *server; 7982 7983 dprintk("--> %s\n", __func__); 7984 7985 if (!nfs41_sequence_done(task, &lrp->res.seq_res)) 7986 return; 7987 7988 server = NFS_SERVER(lrp->args.inode); 7989 switch (task->tk_status) { 7990 default: 7991 task->tk_status = 0; 7992 case 0: 7993 break; 7994 case -NFS4ERR_DELAY: 7995 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN) 7996 break; 7997 rpc_restart_call_prepare(task); 7998 return; 7999 } 8000 dprintk("<-- %s\n", __func__); 8001 } 8002 8003 static void nfs4_layoutreturn_release(void *calldata) 8004 { 8005 struct nfs4_layoutreturn *lrp = calldata; 8006 struct pnfs_layout_hdr *lo = lrp->args.layout; 8007 LIST_HEAD(freeme); 8008 8009 dprintk("--> %s\n", __func__); 8010 spin_lock(&lo->plh_inode->i_lock); 8011 if (lrp->res.lrs_present) 8012 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 8013 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range); 8014 pnfs_clear_layoutreturn_waitbit(lo); 8015 lo->plh_block_lgets--; 8016 spin_unlock(&lo->plh_inode->i_lock); 8017 pnfs_free_lseg_list(&freeme); 8018 pnfs_put_layout_hdr(lrp->args.layout); 8019 nfs_iput_and_deactive(lrp->inode); 8020 kfree(calldata); 8021 dprintk("<-- %s\n", __func__); 8022 } 8023 8024 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 8025 .rpc_call_prepare = nfs4_layoutreturn_prepare, 8026 .rpc_call_done = nfs4_layoutreturn_done, 8027 .rpc_release = nfs4_layoutreturn_release, 8028 }; 8029 8030 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync) 8031 { 8032 struct rpc_task *task; 8033 struct rpc_message msg = { 8034 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 8035 .rpc_argp = &lrp->args, 8036 .rpc_resp = &lrp->res, 8037 .rpc_cred = lrp->cred, 8038 }; 8039 struct rpc_task_setup task_setup_data = { 8040 .rpc_client = NFS_SERVER(lrp->args.inode)->client, 8041 .rpc_message = &msg, 8042 .callback_ops = &nfs4_layoutreturn_call_ops, 8043 .callback_data = lrp, 8044 }; 8045 int status = 0; 8046 8047 dprintk("--> %s\n", __func__); 8048 if (!sync) { 8049 lrp->inode = nfs_igrab_and_active(lrp->args.inode); 8050 if (!lrp->inode) { 8051 nfs4_layoutreturn_release(lrp); 8052 return -EAGAIN; 8053 } 8054 task_setup_data.flags |= RPC_TASK_ASYNC; 8055 } 8056 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); 8057 task = rpc_run_task(&task_setup_data); 8058 if (IS_ERR(task)) 8059 return PTR_ERR(task); 8060 if (sync) 8061 status = task->tk_status; 8062 trace_nfs4_layoutreturn(lrp->args.inode, status); 8063 dprintk("<-- %s status=%d\n", __func__, status); 8064 rpc_put_task(task); 8065 return status; 8066 } 8067 8068 static int 8069 _nfs4_proc_getdeviceinfo(struct nfs_server *server, 8070 struct pnfs_device *pdev, 8071 struct rpc_cred *cred) 8072 { 8073 struct nfs4_getdeviceinfo_args args = { 8074 .pdev = pdev, 8075 .notify_types = NOTIFY_DEVICEID4_CHANGE | 8076 NOTIFY_DEVICEID4_DELETE, 8077 }; 8078 struct nfs4_getdeviceinfo_res res = { 8079 .pdev = pdev, 8080 }; 8081 struct rpc_message msg = { 8082 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 8083 .rpc_argp = &args, 8084 .rpc_resp = &res, 8085 .rpc_cred = cred, 8086 }; 8087 int status; 8088 8089 dprintk("--> %s\n", __func__); 8090 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 8091 if (res.notification & ~args.notify_types) 8092 dprintk("%s: unsupported notification\n", __func__); 8093 if (res.notification != args.notify_types) 8094 pdev->nocache = 1; 8095 8096 dprintk("<-- %s status=%d\n", __func__, status); 8097 8098 return status; 8099 } 8100 8101 int nfs4_proc_getdeviceinfo(struct nfs_server *server, 8102 struct pnfs_device *pdev, 8103 struct rpc_cred *cred) 8104 { 8105 struct nfs4_exception exception = { }; 8106 int err; 8107 8108 do { 8109 err = nfs4_handle_exception(server, 8110 _nfs4_proc_getdeviceinfo(server, pdev, cred), 8111 &exception); 8112 } while (exception.retry); 8113 return err; 8114 } 8115 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 8116 8117 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 8118 { 8119 struct nfs4_layoutcommit_data *data = calldata; 8120 struct nfs_server *server = NFS_SERVER(data->args.inode); 8121 struct nfs4_session *session = nfs4_get_session(server); 8122 8123 nfs41_setup_sequence(session, 8124 &data->args.seq_args, 8125 &data->res.seq_res, 8126 task); 8127 } 8128 8129 static void 8130 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 8131 { 8132 struct nfs4_layoutcommit_data *data = calldata; 8133 struct nfs_server *server = NFS_SERVER(data->args.inode); 8134 8135 if (!nfs41_sequence_done(task, &data->res.seq_res)) 8136 return; 8137 8138 switch (task->tk_status) { /* Just ignore these failures */ 8139 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 8140 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 8141 case -NFS4ERR_BADLAYOUT: /* no layout */ 8142 case -NFS4ERR_GRACE: /* loca_recalim always false */ 8143 task->tk_status = 0; 8144 case 0: 8145 break; 8146 default: 8147 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) { 8148 rpc_restart_call_prepare(task); 8149 return; 8150 } 8151 } 8152 } 8153 8154 static void nfs4_layoutcommit_release(void *calldata) 8155 { 8156 struct nfs4_layoutcommit_data *data = calldata; 8157 8158 pnfs_cleanup_layoutcommit(data); 8159 nfs_post_op_update_inode_force_wcc(data->args.inode, 8160 data->res.fattr); 8161 put_rpccred(data->cred); 8162 nfs_iput_and_deactive(data->inode); 8163 kfree(data); 8164 } 8165 8166 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 8167 .rpc_call_prepare = nfs4_layoutcommit_prepare, 8168 .rpc_call_done = nfs4_layoutcommit_done, 8169 .rpc_release = nfs4_layoutcommit_release, 8170 }; 8171 8172 int 8173 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 8174 { 8175 struct rpc_message msg = { 8176 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 8177 .rpc_argp = &data->args, 8178 .rpc_resp = &data->res, 8179 .rpc_cred = data->cred, 8180 }; 8181 struct rpc_task_setup task_setup_data = { 8182 .task = &data->task, 8183 .rpc_client = NFS_CLIENT(data->args.inode), 8184 .rpc_message = &msg, 8185 .callback_ops = &nfs4_layoutcommit_ops, 8186 .callback_data = data, 8187 }; 8188 struct rpc_task *task; 8189 int status = 0; 8190 8191 dprintk("NFS: initiating layoutcommit call. sync %d " 8192 "lbw: %llu inode %lu\n", sync, 8193 data->args.lastbytewritten, 8194 data->args.inode->i_ino); 8195 8196 if (!sync) { 8197 data->inode = nfs_igrab_and_active(data->args.inode); 8198 if (data->inode == NULL) { 8199 nfs4_layoutcommit_release(data); 8200 return -EAGAIN; 8201 } 8202 task_setup_data.flags = RPC_TASK_ASYNC; 8203 } 8204 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 8205 task = rpc_run_task(&task_setup_data); 8206 if (IS_ERR(task)) 8207 return PTR_ERR(task); 8208 if (sync) 8209 status = task->tk_status; 8210 trace_nfs4_layoutcommit(data->args.inode, status); 8211 dprintk("%s: status %d\n", __func__, status); 8212 rpc_put_task(task); 8213 return status; 8214 } 8215 8216 /** 8217 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if 8218 * possible) as per RFC3530bis and RFC5661 Security Considerations sections 8219 */ 8220 static int 8221 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8222 struct nfs_fsinfo *info, 8223 struct nfs4_secinfo_flavors *flavors, bool use_integrity) 8224 { 8225 struct nfs41_secinfo_no_name_args args = { 8226 .style = SECINFO_STYLE_CURRENT_FH, 8227 }; 8228 struct nfs4_secinfo_res res = { 8229 .flavors = flavors, 8230 }; 8231 struct rpc_message msg = { 8232 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 8233 .rpc_argp = &args, 8234 .rpc_resp = &res, 8235 }; 8236 struct rpc_clnt *clnt = server->client; 8237 struct rpc_cred *cred = NULL; 8238 int status; 8239 8240 if (use_integrity) { 8241 clnt = server->nfs_client->cl_rpcclient; 8242 cred = nfs4_get_clid_cred(server->nfs_client); 8243 msg.rpc_cred = cred; 8244 } 8245 8246 dprintk("--> %s\n", __func__); 8247 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, 8248 &res.seq_res, 0); 8249 dprintk("<-- %s status=%d\n", __func__, status); 8250 8251 if (cred) 8252 put_rpccred(cred); 8253 8254 return status; 8255 } 8256 8257 static int 8258 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 8259 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 8260 { 8261 struct nfs4_exception exception = { }; 8262 int err; 8263 do { 8264 /* first try using integrity protection */ 8265 err = -NFS4ERR_WRONGSEC; 8266 8267 /* try to use integrity protection with machine cred */ 8268 if (_nfs4_is_integrity_protected(server->nfs_client)) 8269 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8270 flavors, true); 8271 8272 /* 8273 * if unable to use integrity protection, or SECINFO with 8274 * integrity protection returns NFS4ERR_WRONGSEC (which is 8275 * disallowed by spec, but exists in deployed servers) use 8276 * the current filesystem's rpc_client and the user cred. 8277 */ 8278 if (err == -NFS4ERR_WRONGSEC) 8279 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, 8280 flavors, false); 8281 8282 switch (err) { 8283 case 0: 8284 case -NFS4ERR_WRONGSEC: 8285 case -ENOTSUPP: 8286 goto out; 8287 default: 8288 err = nfs4_handle_exception(server, err, &exception); 8289 } 8290 } while (exception.retry); 8291 out: 8292 return err; 8293 } 8294 8295 static int 8296 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 8297 struct nfs_fsinfo *info) 8298 { 8299 int err; 8300 struct page *page; 8301 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR; 8302 struct nfs4_secinfo_flavors *flavors; 8303 struct nfs4_secinfo4 *secinfo; 8304 int i; 8305 8306 page = alloc_page(GFP_KERNEL); 8307 if (!page) { 8308 err = -ENOMEM; 8309 goto out; 8310 } 8311 8312 flavors = page_address(page); 8313 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 8314 8315 /* 8316 * Fall back on "guess and check" method if 8317 * the server doesn't support SECINFO_NO_NAME 8318 */ 8319 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { 8320 err = nfs4_find_root_sec(server, fhandle, info); 8321 goto out_freepage; 8322 } 8323 if (err) 8324 goto out_freepage; 8325 8326 for (i = 0; i < flavors->num_flavors; i++) { 8327 secinfo = &flavors->flavors[i]; 8328 8329 switch (secinfo->flavor) { 8330 case RPC_AUTH_NULL: 8331 case RPC_AUTH_UNIX: 8332 case RPC_AUTH_GSS: 8333 flavor = rpcauth_get_pseudoflavor(secinfo->flavor, 8334 &secinfo->flavor_info); 8335 break; 8336 default: 8337 flavor = RPC_AUTH_MAXFLAVOR; 8338 break; 8339 } 8340 8341 if (!nfs_auth_info_match(&server->auth_info, flavor)) 8342 flavor = RPC_AUTH_MAXFLAVOR; 8343 8344 if (flavor != RPC_AUTH_MAXFLAVOR) { 8345 err = nfs4_lookup_root_sec(server, fhandle, 8346 info, flavor); 8347 if (!err) 8348 break; 8349 } 8350 } 8351 8352 if (flavor == RPC_AUTH_MAXFLAVOR) 8353 err = -EPERM; 8354 8355 out_freepage: 8356 put_page(page); 8357 if (err == -EACCES) 8358 return -EPERM; 8359 out: 8360 return err; 8361 } 8362 8363 static int _nfs41_test_stateid(struct nfs_server *server, 8364 nfs4_stateid *stateid, 8365 struct rpc_cred *cred) 8366 { 8367 int status; 8368 struct nfs41_test_stateid_args args = { 8369 .stateid = stateid, 8370 }; 8371 struct nfs41_test_stateid_res res; 8372 struct rpc_message msg = { 8373 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 8374 .rpc_argp = &args, 8375 .rpc_resp = &res, 8376 .rpc_cred = cred, 8377 }; 8378 struct rpc_clnt *rpc_client = server->client; 8379 8380 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8381 &rpc_client, &msg); 8382 8383 dprintk("NFS call test_stateid %p\n", stateid); 8384 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0); 8385 nfs4_set_sequence_privileged(&args.seq_args); 8386 status = nfs4_call_sync_sequence(rpc_client, server, &msg, 8387 &args.seq_args, &res.seq_res); 8388 if (status != NFS_OK) { 8389 dprintk("NFS reply test_stateid: failed, %d\n", status); 8390 return status; 8391 } 8392 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 8393 return -res.status; 8394 } 8395 8396 /** 8397 * nfs41_test_stateid - perform a TEST_STATEID operation 8398 * 8399 * @server: server / transport on which to perform the operation 8400 * @stateid: state ID to test 8401 * @cred: credential 8402 * 8403 * Returns NFS_OK if the server recognizes that "stateid" is valid. 8404 * Otherwise a negative NFS4ERR value is returned if the operation 8405 * failed or the state ID is not currently valid. 8406 */ 8407 static int nfs41_test_stateid(struct nfs_server *server, 8408 nfs4_stateid *stateid, 8409 struct rpc_cred *cred) 8410 { 8411 struct nfs4_exception exception = { }; 8412 int err; 8413 do { 8414 err = _nfs41_test_stateid(server, stateid, cred); 8415 if (err != -NFS4ERR_DELAY) 8416 break; 8417 nfs4_handle_exception(server, err, &exception); 8418 } while (exception.retry); 8419 return err; 8420 } 8421 8422 struct nfs_free_stateid_data { 8423 struct nfs_server *server; 8424 struct nfs41_free_stateid_args args; 8425 struct nfs41_free_stateid_res res; 8426 }; 8427 8428 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 8429 { 8430 struct nfs_free_stateid_data *data = calldata; 8431 nfs41_setup_sequence(nfs4_get_session(data->server), 8432 &data->args.seq_args, 8433 &data->res.seq_res, 8434 task); 8435 } 8436 8437 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 8438 { 8439 struct nfs_free_stateid_data *data = calldata; 8440 8441 nfs41_sequence_done(task, &data->res.seq_res); 8442 8443 switch (task->tk_status) { 8444 case -NFS4ERR_DELAY: 8445 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN) 8446 rpc_restart_call_prepare(task); 8447 } 8448 } 8449 8450 static void nfs41_free_stateid_release(void *calldata) 8451 { 8452 kfree(calldata); 8453 } 8454 8455 static const struct rpc_call_ops nfs41_free_stateid_ops = { 8456 .rpc_call_prepare = nfs41_free_stateid_prepare, 8457 .rpc_call_done = nfs41_free_stateid_done, 8458 .rpc_release = nfs41_free_stateid_release, 8459 }; 8460 8461 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server, 8462 nfs4_stateid *stateid, 8463 struct rpc_cred *cred, 8464 bool privileged) 8465 { 8466 struct rpc_message msg = { 8467 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 8468 .rpc_cred = cred, 8469 }; 8470 struct rpc_task_setup task_setup = { 8471 .rpc_client = server->client, 8472 .rpc_message = &msg, 8473 .callback_ops = &nfs41_free_stateid_ops, 8474 .flags = RPC_TASK_ASYNC, 8475 }; 8476 struct nfs_free_stateid_data *data; 8477 8478 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID, 8479 &task_setup.rpc_client, &msg); 8480 8481 dprintk("NFS call free_stateid %p\n", stateid); 8482 data = kmalloc(sizeof(*data), GFP_NOFS); 8483 if (!data) 8484 return ERR_PTR(-ENOMEM); 8485 data->server = server; 8486 nfs4_stateid_copy(&data->args.stateid, stateid); 8487 8488 task_setup.callback_data = data; 8489 8490 msg.rpc_argp = &data->args; 8491 msg.rpc_resp = &data->res; 8492 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 8493 if (privileged) 8494 nfs4_set_sequence_privileged(&data->args.seq_args); 8495 8496 return rpc_run_task(&task_setup); 8497 } 8498 8499 /** 8500 * nfs41_free_stateid - perform a FREE_STATEID operation 8501 * 8502 * @server: server / transport on which to perform the operation 8503 * @stateid: state ID to release 8504 * @cred: credential 8505 * 8506 * Returns NFS_OK if the server freed "stateid". Otherwise a 8507 * negative NFS4ERR value is returned. 8508 */ 8509 static int nfs41_free_stateid(struct nfs_server *server, 8510 nfs4_stateid *stateid, 8511 struct rpc_cred *cred) 8512 { 8513 struct rpc_task *task; 8514 int ret; 8515 8516 task = _nfs41_free_stateid(server, stateid, cred, true); 8517 if (IS_ERR(task)) 8518 return PTR_ERR(task); 8519 ret = rpc_wait_for_completion_task(task); 8520 if (!ret) 8521 ret = task->tk_status; 8522 rpc_put_task(task); 8523 return ret; 8524 } 8525 8526 static void 8527 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 8528 { 8529 struct rpc_task *task; 8530 struct rpc_cred *cred = lsp->ls_state->owner->so_cred; 8531 8532 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false); 8533 nfs4_free_lock_state(server, lsp); 8534 if (IS_ERR(task)) 8535 return; 8536 rpc_put_task(task); 8537 } 8538 8539 static bool nfs41_match_stateid(const nfs4_stateid *s1, 8540 const nfs4_stateid *s2) 8541 { 8542 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 8543 return false; 8544 8545 if (s1->seqid == s2->seqid) 8546 return true; 8547 if (s1->seqid == 0 || s2->seqid == 0) 8548 return true; 8549 8550 return false; 8551 } 8552 8553 #endif /* CONFIG_NFS_V4_1 */ 8554 8555 static bool nfs4_match_stateid(const nfs4_stateid *s1, 8556 const nfs4_stateid *s2) 8557 { 8558 return nfs4_stateid_match(s1, s2); 8559 } 8560 8561 8562 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 8563 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8564 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8565 .recover_open = nfs4_open_reclaim, 8566 .recover_lock = nfs4_lock_reclaim, 8567 .establish_clid = nfs4_init_clientid, 8568 .detect_trunking = nfs40_discover_server_trunking, 8569 }; 8570 8571 #if defined(CONFIG_NFS_V4_1) 8572 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 8573 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 8574 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 8575 .recover_open = nfs4_open_reclaim, 8576 .recover_lock = nfs4_lock_reclaim, 8577 .establish_clid = nfs41_init_clientid, 8578 .reclaim_complete = nfs41_proc_reclaim_complete, 8579 .detect_trunking = nfs41_discover_server_trunking, 8580 }; 8581 #endif /* CONFIG_NFS_V4_1 */ 8582 8583 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 8584 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 8585 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 8586 .recover_open = nfs40_open_expired, 8587 .recover_lock = nfs4_lock_expired, 8588 .establish_clid = nfs4_init_clientid, 8589 }; 8590 8591 #if defined(CONFIG_NFS_V4_1) 8592 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 8593 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 8594 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 8595 .recover_open = nfs41_open_expired, 8596 .recover_lock = nfs41_lock_expired, 8597 .establish_clid = nfs41_init_clientid, 8598 }; 8599 #endif /* CONFIG_NFS_V4_1 */ 8600 8601 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 8602 .sched_state_renewal = nfs4_proc_async_renew, 8603 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, 8604 .renew_lease = nfs4_proc_renew, 8605 }; 8606 8607 #if defined(CONFIG_NFS_V4_1) 8608 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 8609 .sched_state_renewal = nfs41_proc_async_sequence, 8610 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, 8611 .renew_lease = nfs4_proc_sequence, 8612 }; 8613 #endif 8614 8615 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = { 8616 .get_locations = _nfs40_proc_get_locations, 8617 .fsid_present = _nfs40_proc_fsid_present, 8618 }; 8619 8620 #if defined(CONFIG_NFS_V4_1) 8621 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = { 8622 .get_locations = _nfs41_proc_get_locations, 8623 .fsid_present = _nfs41_proc_fsid_present, 8624 }; 8625 #endif /* CONFIG_NFS_V4_1 */ 8626 8627 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 8628 .minor_version = 0, 8629 .init_caps = NFS_CAP_READDIRPLUS 8630 | NFS_CAP_ATOMIC_OPEN 8631 | NFS_CAP_POSIX_LOCK, 8632 .init_client = nfs40_init_client, 8633 .shutdown_client = nfs40_shutdown_client, 8634 .match_stateid = nfs4_match_stateid, 8635 .find_root_sec = nfs4_find_root_sec, 8636 .free_lock_state = nfs4_release_lockowner, 8637 .alloc_seqid = nfs_alloc_seqid, 8638 .call_sync_ops = &nfs40_call_sync_ops, 8639 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 8640 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 8641 .state_renewal_ops = &nfs40_state_renewal_ops, 8642 .mig_recovery_ops = &nfs40_mig_recovery_ops, 8643 }; 8644 8645 #if defined(CONFIG_NFS_V4_1) 8646 static struct nfs_seqid * 8647 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) 8648 { 8649 return NULL; 8650 } 8651 8652 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 8653 .minor_version = 1, 8654 .init_caps = NFS_CAP_READDIRPLUS 8655 | NFS_CAP_ATOMIC_OPEN 8656 | NFS_CAP_POSIX_LOCK 8657 | NFS_CAP_STATEID_NFSV41 8658 | NFS_CAP_ATOMIC_OPEN_V1, 8659 .init_client = nfs41_init_client, 8660 .shutdown_client = nfs41_shutdown_client, 8661 .match_stateid = nfs41_match_stateid, 8662 .find_root_sec = nfs41_find_root_sec, 8663 .free_lock_state = nfs41_free_lock_state, 8664 .alloc_seqid = nfs_alloc_no_seqid, 8665 .call_sync_ops = &nfs41_call_sync_ops, 8666 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 8667 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 8668 .state_renewal_ops = &nfs41_state_renewal_ops, 8669 .mig_recovery_ops = &nfs41_mig_recovery_ops, 8670 }; 8671 #endif 8672 8673 #if defined(CONFIG_NFS_V4_2) 8674 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { 8675 .minor_version = 2, 8676 .init_caps = NFS_CAP_READDIRPLUS 8677 | NFS_CAP_ATOMIC_OPEN 8678 | NFS_CAP_POSIX_LOCK 8679 | NFS_CAP_STATEID_NFSV41 8680 | NFS_CAP_ATOMIC_OPEN_V1 8681 | NFS_CAP_ALLOCATE 8682 | NFS_CAP_DEALLOCATE 8683 | NFS_CAP_SEEK 8684 | NFS_CAP_LAYOUTSTATS, 8685 .init_client = nfs41_init_client, 8686 .shutdown_client = nfs41_shutdown_client, 8687 .match_stateid = nfs41_match_stateid, 8688 .find_root_sec = nfs41_find_root_sec, 8689 .free_lock_state = nfs41_free_lock_state, 8690 .call_sync_ops = &nfs41_call_sync_ops, 8691 .alloc_seqid = nfs_alloc_no_seqid, 8692 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 8693 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 8694 .state_renewal_ops = &nfs41_state_renewal_ops, 8695 .mig_recovery_ops = &nfs41_mig_recovery_ops, 8696 }; 8697 #endif 8698 8699 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 8700 [0] = &nfs_v4_0_minor_ops, 8701 #if defined(CONFIG_NFS_V4_1) 8702 [1] = &nfs_v4_1_minor_ops, 8703 #endif 8704 #if defined(CONFIG_NFS_V4_2) 8705 [2] = &nfs_v4_2_minor_ops, 8706 #endif 8707 }; 8708 8709 static const struct inode_operations nfs4_dir_inode_operations = { 8710 .create = nfs_create, 8711 .lookup = nfs_lookup, 8712 .atomic_open = nfs_atomic_open, 8713 .link = nfs_link, 8714 .unlink = nfs_unlink, 8715 .symlink = nfs_symlink, 8716 .mkdir = nfs_mkdir, 8717 .rmdir = nfs_rmdir, 8718 .mknod = nfs_mknod, 8719 .rename = nfs_rename, 8720 .permission = nfs_permission, 8721 .getattr = nfs_getattr, 8722 .setattr = nfs_setattr, 8723 .getxattr = generic_getxattr, 8724 .setxattr = generic_setxattr, 8725 .listxattr = generic_listxattr, 8726 .removexattr = generic_removexattr, 8727 }; 8728 8729 static const struct inode_operations nfs4_file_inode_operations = { 8730 .permission = nfs_permission, 8731 .getattr = nfs_getattr, 8732 .setattr = nfs_setattr, 8733 .getxattr = generic_getxattr, 8734 .setxattr = generic_setxattr, 8735 .listxattr = generic_listxattr, 8736 .removexattr = generic_removexattr, 8737 }; 8738 8739 const struct nfs_rpc_ops nfs_v4_clientops = { 8740 .version = 4, /* protocol version */ 8741 .dentry_ops = &nfs4_dentry_operations, 8742 .dir_inode_ops = &nfs4_dir_inode_operations, 8743 .file_inode_ops = &nfs4_file_inode_operations, 8744 .file_ops = &nfs4_file_operations, 8745 .getroot = nfs4_proc_get_root, 8746 .submount = nfs4_submount, 8747 .try_mount = nfs4_try_mount, 8748 .getattr = nfs4_proc_getattr, 8749 .setattr = nfs4_proc_setattr, 8750 .lookup = nfs4_proc_lookup, 8751 .access = nfs4_proc_access, 8752 .readlink = nfs4_proc_readlink, 8753 .create = nfs4_proc_create, 8754 .remove = nfs4_proc_remove, 8755 .unlink_setup = nfs4_proc_unlink_setup, 8756 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 8757 .unlink_done = nfs4_proc_unlink_done, 8758 .rename_setup = nfs4_proc_rename_setup, 8759 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 8760 .rename_done = nfs4_proc_rename_done, 8761 .link = nfs4_proc_link, 8762 .symlink = nfs4_proc_symlink, 8763 .mkdir = nfs4_proc_mkdir, 8764 .rmdir = nfs4_proc_remove, 8765 .readdir = nfs4_proc_readdir, 8766 .mknod = nfs4_proc_mknod, 8767 .statfs = nfs4_proc_statfs, 8768 .fsinfo = nfs4_proc_fsinfo, 8769 .pathconf = nfs4_proc_pathconf, 8770 .set_capabilities = nfs4_server_capabilities, 8771 .decode_dirent = nfs4_decode_dirent, 8772 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare, 8773 .read_setup = nfs4_proc_read_setup, 8774 .read_done = nfs4_read_done, 8775 .write_setup = nfs4_proc_write_setup, 8776 .write_done = nfs4_write_done, 8777 .commit_setup = nfs4_proc_commit_setup, 8778 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 8779 .commit_done = nfs4_commit_done, 8780 .lock = nfs4_proc_lock, 8781 .clear_acl_cache = nfs4_zap_acl_attr, 8782 .close_context = nfs4_close_context, 8783 .open_context = nfs4_atomic_open, 8784 .have_delegation = nfs4_have_delegation, 8785 .return_delegation = nfs4_inode_return_delegation, 8786 .alloc_client = nfs4_alloc_client, 8787 .init_client = nfs4_init_client, 8788 .free_client = nfs4_free_client, 8789 .create_server = nfs4_create_server, 8790 .clone_server = nfs_clone_server, 8791 }; 8792 8793 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 8794 .prefix = XATTR_NAME_NFSV4_ACL, 8795 .list = nfs4_xattr_list_nfs4_acl, 8796 .get = nfs4_xattr_get_nfs4_acl, 8797 .set = nfs4_xattr_set_nfs4_acl, 8798 }; 8799 8800 const struct xattr_handler *nfs4_xattr_handlers[] = { 8801 &nfs4_xattr_nfs4_acl_handler, 8802 #ifdef CONFIG_NFS_V4_SECURITY_LABEL 8803 &nfs4_xattr_nfs4_label_handler, 8804 #endif 8805 NULL 8806 }; 8807 8808 /* 8809 * Local variables: 8810 * c-basic-offset: 8 8811 * End: 8812 */ 8813