1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/nfs_idmap.h> 55 #include <linux/xattr.h> 56 #include <linux/utsname.h> 57 #include <linux/freezer.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "nfs4session.h" 67 #include "fscache.h" 68 69 #define NFSDBG_FACILITY NFSDBG_PROC 70 71 #define NFS4_POLL_RETRY_MIN (HZ/10) 72 #define NFS4_POLL_RETRY_MAX (15*HZ) 73 74 struct nfs4_opendata; 75 static int _nfs4_proc_open(struct nfs4_opendata *data); 76 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 77 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 78 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *); 79 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 80 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *); 81 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr); 82 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 83 struct nfs_fattr *fattr, struct iattr *sattr, 84 struct nfs4_state *state); 85 #ifdef CONFIG_NFS_V4_1 86 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *); 87 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *); 88 #endif 89 /* Prevent leaks of NFSv4 errors into userland */ 90 static int nfs4_map_errors(int err) 91 { 92 if (err >= -1000) 93 return err; 94 switch (err) { 95 case -NFS4ERR_RESOURCE: 96 case -NFS4ERR_LAYOUTTRYLATER: 97 case -NFS4ERR_RECALLCONFLICT: 98 return -EREMOTEIO; 99 case -NFS4ERR_WRONGSEC: 100 return -EPERM; 101 case -NFS4ERR_BADOWNER: 102 case -NFS4ERR_BADNAME: 103 return -EINVAL; 104 case -NFS4ERR_SHARE_DENIED: 105 return -EACCES; 106 case -NFS4ERR_MINOR_VERS_MISMATCH: 107 return -EPROTONOSUPPORT; 108 case -NFS4ERR_ACCESS: 109 return -EACCES; 110 case -NFS4ERR_FILE_OPEN: 111 return -EBUSY; 112 default: 113 dprintk("%s could not handle NFSv4 error %d\n", 114 __func__, -err); 115 break; 116 } 117 return -EIO; 118 } 119 120 /* 121 * This is our standard bitmap for GETATTR requests. 122 */ 123 const u32 nfs4_fattr_bitmap[3] = { 124 FATTR4_WORD0_TYPE 125 | FATTR4_WORD0_CHANGE 126 | FATTR4_WORD0_SIZE 127 | FATTR4_WORD0_FSID 128 | FATTR4_WORD0_FILEID, 129 FATTR4_WORD1_MODE 130 | FATTR4_WORD1_NUMLINKS 131 | FATTR4_WORD1_OWNER 132 | FATTR4_WORD1_OWNER_GROUP 133 | FATTR4_WORD1_RAWDEV 134 | FATTR4_WORD1_SPACE_USED 135 | FATTR4_WORD1_TIME_ACCESS 136 | FATTR4_WORD1_TIME_METADATA 137 | FATTR4_WORD1_TIME_MODIFY 138 }; 139 140 static const u32 nfs4_pnfs_open_bitmap[3] = { 141 FATTR4_WORD0_TYPE 142 | FATTR4_WORD0_CHANGE 143 | FATTR4_WORD0_SIZE 144 | FATTR4_WORD0_FSID 145 | FATTR4_WORD0_FILEID, 146 FATTR4_WORD1_MODE 147 | FATTR4_WORD1_NUMLINKS 148 | FATTR4_WORD1_OWNER 149 | FATTR4_WORD1_OWNER_GROUP 150 | FATTR4_WORD1_RAWDEV 151 | FATTR4_WORD1_SPACE_USED 152 | FATTR4_WORD1_TIME_ACCESS 153 | FATTR4_WORD1_TIME_METADATA 154 | FATTR4_WORD1_TIME_MODIFY, 155 FATTR4_WORD2_MDSTHRESHOLD 156 }; 157 158 static const u32 nfs4_open_noattr_bitmap[3] = { 159 FATTR4_WORD0_TYPE 160 | FATTR4_WORD0_CHANGE 161 | FATTR4_WORD0_FILEID, 162 }; 163 164 const u32 nfs4_statfs_bitmap[2] = { 165 FATTR4_WORD0_FILES_AVAIL 166 | FATTR4_WORD0_FILES_FREE 167 | FATTR4_WORD0_FILES_TOTAL, 168 FATTR4_WORD1_SPACE_AVAIL 169 | FATTR4_WORD1_SPACE_FREE 170 | FATTR4_WORD1_SPACE_TOTAL 171 }; 172 173 const u32 nfs4_pathconf_bitmap[2] = { 174 FATTR4_WORD0_MAXLINK 175 | FATTR4_WORD0_MAXNAME, 176 0 177 }; 178 179 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 180 | FATTR4_WORD0_MAXREAD 181 | FATTR4_WORD0_MAXWRITE 182 | FATTR4_WORD0_LEASE_TIME, 183 FATTR4_WORD1_TIME_DELTA 184 | FATTR4_WORD1_FS_LAYOUT_TYPES, 185 FATTR4_WORD2_LAYOUT_BLKSIZE 186 }; 187 188 const u32 nfs4_fs_locations_bitmap[2] = { 189 FATTR4_WORD0_TYPE 190 | FATTR4_WORD0_CHANGE 191 | FATTR4_WORD0_SIZE 192 | FATTR4_WORD0_FSID 193 | FATTR4_WORD0_FILEID 194 | FATTR4_WORD0_FS_LOCATIONS, 195 FATTR4_WORD1_MODE 196 | FATTR4_WORD1_NUMLINKS 197 | FATTR4_WORD1_OWNER 198 | FATTR4_WORD1_OWNER_GROUP 199 | FATTR4_WORD1_RAWDEV 200 | FATTR4_WORD1_SPACE_USED 201 | FATTR4_WORD1_TIME_ACCESS 202 | FATTR4_WORD1_TIME_METADATA 203 | FATTR4_WORD1_TIME_MODIFY 204 | FATTR4_WORD1_MOUNTED_ON_FILEID 205 }; 206 207 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 208 struct nfs4_readdir_arg *readdir) 209 { 210 __be32 *start, *p; 211 212 if (cookie > 2) { 213 readdir->cookie = cookie; 214 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 215 return; 216 } 217 218 readdir->cookie = 0; 219 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 220 if (cookie == 2) 221 return; 222 223 /* 224 * NFSv4 servers do not return entries for '.' and '..' 225 * Therefore, we fake these entries here. We let '.' 226 * have cookie 0 and '..' have cookie 1. Note that 227 * when talking to the server, we always send cookie 0 228 * instead of 1 or 2. 229 */ 230 start = p = kmap_atomic(*readdir->pages); 231 232 if (cookie == 0) { 233 *p++ = xdr_one; /* next */ 234 *p++ = xdr_zero; /* cookie, first word */ 235 *p++ = xdr_one; /* cookie, second word */ 236 *p++ = xdr_one; /* entry len */ 237 memcpy(p, ".\0\0\0", 4); /* entry */ 238 p++; 239 *p++ = xdr_one; /* bitmap length */ 240 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 241 *p++ = htonl(8); /* attribute buffer length */ 242 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode)); 243 } 244 245 *p++ = xdr_one; /* next */ 246 *p++ = xdr_zero; /* cookie, first word */ 247 *p++ = xdr_two; /* cookie, second word */ 248 *p++ = xdr_two; /* entry len */ 249 memcpy(p, "..\0\0", 4); /* entry */ 250 p++; 251 *p++ = xdr_one; /* bitmap length */ 252 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 253 *p++ = htonl(8); /* attribute buffer length */ 254 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode)); 255 256 readdir->pgbase = (char *)p - (char *)start; 257 readdir->count -= readdir->pgbase; 258 kunmap_atomic(start); 259 } 260 261 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 262 { 263 int res = 0; 264 265 might_sleep(); 266 267 if (*timeout <= 0) 268 *timeout = NFS4_POLL_RETRY_MIN; 269 if (*timeout > NFS4_POLL_RETRY_MAX) 270 *timeout = NFS4_POLL_RETRY_MAX; 271 freezable_schedule_timeout_killable(*timeout); 272 if (fatal_signal_pending(current)) 273 res = -ERESTARTSYS; 274 *timeout <<= 1; 275 return res; 276 } 277 278 /* This is the error handling routine for processes that are allowed 279 * to sleep. 280 */ 281 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 282 { 283 struct nfs_client *clp = server->nfs_client; 284 struct nfs4_state *state = exception->state; 285 struct inode *inode = exception->inode; 286 int ret = errorcode; 287 288 exception->retry = 0; 289 switch(errorcode) { 290 case 0: 291 return 0; 292 case -NFS4ERR_OPENMODE: 293 if (inode && nfs4_have_delegation(inode, FMODE_READ)) { 294 nfs4_inode_return_delegation(inode); 295 exception->retry = 1; 296 return 0; 297 } 298 if (state == NULL) 299 break; 300 ret = nfs4_schedule_stateid_recovery(server, state); 301 if (ret < 0) 302 break; 303 goto wait_on_recovery; 304 case -NFS4ERR_DELEG_REVOKED: 305 case -NFS4ERR_ADMIN_REVOKED: 306 case -NFS4ERR_BAD_STATEID: 307 if (inode != NULL && nfs4_have_delegation(inode, FMODE_READ)) { 308 nfs_remove_bad_delegation(inode); 309 exception->retry = 1; 310 break; 311 } 312 if (state == NULL) 313 break; 314 ret = nfs4_schedule_stateid_recovery(server, state); 315 if (ret < 0) 316 break; 317 goto wait_on_recovery; 318 case -NFS4ERR_EXPIRED: 319 if (state != NULL) { 320 ret = nfs4_schedule_stateid_recovery(server, state); 321 if (ret < 0) 322 break; 323 } 324 case -NFS4ERR_STALE_STATEID: 325 case -NFS4ERR_STALE_CLIENTID: 326 nfs4_schedule_lease_recovery(clp); 327 goto wait_on_recovery; 328 #if defined(CONFIG_NFS_V4_1) 329 case -NFS4ERR_BADSESSION: 330 case -NFS4ERR_BADSLOT: 331 case -NFS4ERR_BAD_HIGH_SLOT: 332 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 333 case -NFS4ERR_DEADSESSION: 334 case -NFS4ERR_SEQ_FALSE_RETRY: 335 case -NFS4ERR_SEQ_MISORDERED: 336 dprintk("%s ERROR: %d Reset session\n", __func__, 337 errorcode); 338 nfs4_schedule_session_recovery(clp->cl_session, errorcode); 339 goto wait_on_recovery; 340 #endif /* defined(CONFIG_NFS_V4_1) */ 341 case -NFS4ERR_FILE_OPEN: 342 if (exception->timeout > HZ) { 343 /* We have retried a decent amount, time to 344 * fail 345 */ 346 ret = -EBUSY; 347 break; 348 } 349 case -NFS4ERR_GRACE: 350 case -NFS4ERR_DELAY: 351 ret = nfs4_delay(server->client, &exception->timeout); 352 if (ret != 0) 353 break; 354 case -NFS4ERR_RETRY_UNCACHED_REP: 355 case -NFS4ERR_OLD_STATEID: 356 exception->retry = 1; 357 break; 358 case -NFS4ERR_BADOWNER: 359 /* The following works around a Linux server bug! */ 360 case -NFS4ERR_BADNAME: 361 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 362 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 363 exception->retry = 1; 364 printk(KERN_WARNING "NFS: v4 server %s " 365 "does not accept raw " 366 "uid/gids. " 367 "Reenabling the idmapper.\n", 368 server->nfs_client->cl_hostname); 369 } 370 } 371 /* We failed to handle the error */ 372 return nfs4_map_errors(ret); 373 wait_on_recovery: 374 ret = nfs4_wait_clnt_recover(clp); 375 if (ret == 0) 376 exception->retry = 1; 377 return ret; 378 } 379 380 381 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 382 { 383 spin_lock(&clp->cl_lock); 384 if (time_before(clp->cl_last_renewal,timestamp)) 385 clp->cl_last_renewal = timestamp; 386 spin_unlock(&clp->cl_lock); 387 } 388 389 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 390 { 391 do_renew_lease(server->nfs_client, timestamp); 392 } 393 394 #if defined(CONFIG_NFS_V4_1) 395 396 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 397 { 398 struct nfs4_session *session; 399 struct nfs4_slot_table *tbl; 400 bool send_new_highest_used_slotid = false; 401 402 if (!res->sr_slot) { 403 /* just wake up the next guy waiting since 404 * we may have not consumed a slot after all */ 405 dprintk("%s: No slot\n", __func__); 406 return; 407 } 408 tbl = res->sr_slot->table; 409 session = tbl->session; 410 411 spin_lock(&tbl->slot_tbl_lock); 412 /* Be nice to the server: try to ensure that the last transmitted 413 * value for highest_user_slotid <= target_highest_slotid 414 */ 415 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 416 send_new_highest_used_slotid = true; 417 418 if (nfs41_wake_and_assign_slot(tbl, res->sr_slot)) { 419 send_new_highest_used_slotid = false; 420 goto out_unlock; 421 } 422 nfs4_free_slot(tbl, res->sr_slot); 423 424 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 425 send_new_highest_used_slotid = false; 426 out_unlock: 427 spin_unlock(&tbl->slot_tbl_lock); 428 res->sr_slot = NULL; 429 if (send_new_highest_used_slotid) 430 nfs41_server_notify_highest_slotid_update(session->clp); 431 } 432 433 static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 434 { 435 struct nfs4_session *session; 436 struct nfs4_slot *slot; 437 struct nfs_client *clp; 438 bool interrupted = false; 439 int ret = 1; 440 441 /* don't increment the sequence number if the task wasn't sent */ 442 if (!RPC_WAS_SENT(task)) 443 goto out; 444 445 slot = res->sr_slot; 446 session = slot->table->session; 447 448 if (slot->interrupted) { 449 slot->interrupted = 0; 450 interrupted = true; 451 } 452 453 /* Check the SEQUENCE operation status */ 454 switch (res->sr_status) { 455 case 0: 456 /* Update the slot's sequence and clientid lease timer */ 457 ++slot->seq_nr; 458 clp = session->clp; 459 do_renew_lease(clp, res->sr_timestamp); 460 /* Check sequence flags */ 461 if (res->sr_status_flags != 0) 462 nfs4_schedule_lease_recovery(clp); 463 nfs41_update_target_slotid(slot->table, slot, res); 464 break; 465 case 1: 466 /* 467 * sr_status remains 1 if an RPC level error occurred. 468 * The server may or may not have processed the sequence 469 * operation.. 470 * Mark the slot as having hosted an interrupted RPC call. 471 */ 472 slot->interrupted = 1; 473 goto out; 474 case -NFS4ERR_DELAY: 475 /* The server detected a resend of the RPC call and 476 * returned NFS4ERR_DELAY as per Section 2.10.6.2 477 * of RFC5661. 478 */ 479 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 480 __func__, 481 slot->slot_nr, 482 slot->seq_nr); 483 goto out_retry; 484 case -NFS4ERR_BADSLOT: 485 /* 486 * The slot id we used was probably retired. Try again 487 * using a different slot id. 488 */ 489 goto retry_nowait; 490 case -NFS4ERR_SEQ_MISORDERED: 491 /* 492 * Was the last operation on this sequence interrupted? 493 * If so, retry after bumping the sequence number. 494 */ 495 if (interrupted) { 496 ++slot->seq_nr; 497 goto retry_nowait; 498 } 499 /* 500 * Could this slot have been previously retired? 501 * If so, then the server may be expecting seq_nr = 1! 502 */ 503 if (slot->seq_nr != 1) { 504 slot->seq_nr = 1; 505 goto retry_nowait; 506 } 507 break; 508 case -NFS4ERR_SEQ_FALSE_RETRY: 509 ++slot->seq_nr; 510 goto retry_nowait; 511 default: 512 /* Just update the slot sequence no. */ 513 ++slot->seq_nr; 514 } 515 out: 516 /* The session may be reset by one of the error handlers. */ 517 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 518 nfs41_sequence_free_slot(res); 519 return ret; 520 retry_nowait: 521 if (rpc_restart_call_prepare(task)) { 522 task->tk_status = 0; 523 ret = 0; 524 } 525 goto out; 526 out_retry: 527 if (!rpc_restart_call(task)) 528 goto out; 529 rpc_delay(task, NFS4_POLL_RETRY_MAX); 530 return 0; 531 } 532 533 static int nfs4_sequence_done(struct rpc_task *task, 534 struct nfs4_sequence_res *res) 535 { 536 if (res->sr_slot == NULL) 537 return 1; 538 return nfs41_sequence_done(task, res); 539 } 540 541 static void nfs41_init_sequence(struct nfs4_sequence_args *args, 542 struct nfs4_sequence_res *res, int cache_reply) 543 { 544 args->sa_slot = NULL; 545 args->sa_cache_this = 0; 546 args->sa_privileged = 0; 547 if (cache_reply) 548 args->sa_cache_this = 1; 549 res->sr_slot = NULL; 550 } 551 552 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) 553 { 554 args->sa_privileged = 1; 555 } 556 557 int nfs41_setup_sequence(struct nfs4_session *session, 558 struct nfs4_sequence_args *args, 559 struct nfs4_sequence_res *res, 560 struct rpc_task *task) 561 { 562 struct nfs4_slot *slot; 563 struct nfs4_slot_table *tbl; 564 565 dprintk("--> %s\n", __func__); 566 /* slot already allocated? */ 567 if (res->sr_slot != NULL) 568 goto out_success; 569 570 tbl = &session->fc_slot_table; 571 572 task->tk_timeout = 0; 573 574 spin_lock(&tbl->slot_tbl_lock); 575 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) && 576 !args->sa_privileged) { 577 /* The state manager will wait until the slot table is empty */ 578 dprintk("%s session is draining\n", __func__); 579 goto out_sleep; 580 } 581 582 slot = nfs4_alloc_slot(tbl); 583 if (IS_ERR(slot)) { 584 /* If out of memory, try again in 1/4 second */ 585 if (slot == ERR_PTR(-ENOMEM)) 586 task->tk_timeout = HZ >> 2; 587 dprintk("<-- %s: no free slots\n", __func__); 588 goto out_sleep; 589 } 590 spin_unlock(&tbl->slot_tbl_lock); 591 592 args->sa_slot = slot; 593 594 dprintk("<-- %s slotid=%d seqid=%d\n", __func__, 595 slot->slot_nr, slot->seq_nr); 596 597 res->sr_slot = slot; 598 res->sr_timestamp = jiffies; 599 res->sr_status_flags = 0; 600 /* 601 * sr_status is only set in decode_sequence, and so will remain 602 * set to 1 if an rpc level failure occurs. 603 */ 604 res->sr_status = 1; 605 out_success: 606 rpc_call_start(task); 607 return 0; 608 out_sleep: 609 /* Privileged tasks are queued with top priority */ 610 if (args->sa_privileged) 611 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 612 NULL, RPC_PRIORITY_PRIVILEGED); 613 else 614 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 615 spin_unlock(&tbl->slot_tbl_lock); 616 return -EAGAIN; 617 } 618 EXPORT_SYMBOL_GPL(nfs41_setup_sequence); 619 620 int nfs4_setup_sequence(const struct nfs_server *server, 621 struct nfs4_sequence_args *args, 622 struct nfs4_sequence_res *res, 623 struct rpc_task *task) 624 { 625 struct nfs4_session *session = nfs4_get_session(server); 626 int ret = 0; 627 628 if (session == NULL) { 629 rpc_call_start(task); 630 goto out; 631 } 632 633 dprintk("--> %s clp %p session %p sr_slot %d\n", 634 __func__, session->clp, session, res->sr_slot ? 635 res->sr_slot->slot_nr : -1); 636 637 ret = nfs41_setup_sequence(session, args, res, task); 638 out: 639 dprintk("<-- %s status=%d\n", __func__, ret); 640 return ret; 641 } 642 643 struct nfs41_call_sync_data { 644 const struct nfs_server *seq_server; 645 struct nfs4_sequence_args *seq_args; 646 struct nfs4_sequence_res *seq_res; 647 }; 648 649 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 650 { 651 struct nfs41_call_sync_data *data = calldata; 652 struct nfs4_session *session = nfs4_get_session(data->seq_server); 653 654 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 655 656 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task); 657 } 658 659 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 660 { 661 struct nfs41_call_sync_data *data = calldata; 662 663 nfs41_sequence_done(task, data->seq_res); 664 } 665 666 static const struct rpc_call_ops nfs41_call_sync_ops = { 667 .rpc_call_prepare = nfs41_call_sync_prepare, 668 .rpc_call_done = nfs41_call_sync_done, 669 }; 670 671 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 672 struct nfs_server *server, 673 struct rpc_message *msg, 674 struct nfs4_sequence_args *args, 675 struct nfs4_sequence_res *res) 676 { 677 int ret; 678 struct rpc_task *task; 679 struct nfs41_call_sync_data data = { 680 .seq_server = server, 681 .seq_args = args, 682 .seq_res = res, 683 }; 684 struct rpc_task_setup task_setup = { 685 .rpc_client = clnt, 686 .rpc_message = msg, 687 .callback_ops = &nfs41_call_sync_ops, 688 .callback_data = &data 689 }; 690 691 task = rpc_run_task(&task_setup); 692 if (IS_ERR(task)) 693 ret = PTR_ERR(task); 694 else { 695 ret = task->tk_status; 696 rpc_put_task(task); 697 } 698 return ret; 699 } 700 701 #else 702 static 703 void nfs41_init_sequence(struct nfs4_sequence_args *args, 704 struct nfs4_sequence_res *res, int cache_reply) 705 { 706 } 707 708 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) 709 { 710 } 711 712 713 static int nfs4_sequence_done(struct rpc_task *task, 714 struct nfs4_sequence_res *res) 715 { 716 return 1; 717 } 718 #endif /* CONFIG_NFS_V4_1 */ 719 720 static 721 int _nfs4_call_sync(struct rpc_clnt *clnt, 722 struct nfs_server *server, 723 struct rpc_message *msg, 724 struct nfs4_sequence_args *args, 725 struct nfs4_sequence_res *res) 726 { 727 return rpc_call_sync(clnt, msg, 0); 728 } 729 730 static 731 int nfs4_call_sync(struct rpc_clnt *clnt, 732 struct nfs_server *server, 733 struct rpc_message *msg, 734 struct nfs4_sequence_args *args, 735 struct nfs4_sequence_res *res, 736 int cache_reply) 737 { 738 nfs41_init_sequence(args, res, cache_reply); 739 return server->nfs_client->cl_mvops->call_sync(clnt, server, msg, 740 args, res); 741 } 742 743 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 744 { 745 struct nfs_inode *nfsi = NFS_I(dir); 746 747 spin_lock(&dir->i_lock); 748 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 749 if (!cinfo->atomic || cinfo->before != dir->i_version) 750 nfs_force_lookup_revalidate(dir); 751 dir->i_version = cinfo->after; 752 nfs_fscache_invalidate(dir); 753 spin_unlock(&dir->i_lock); 754 } 755 756 struct nfs4_opendata { 757 struct kref kref; 758 struct nfs_openargs o_arg; 759 struct nfs_openres o_res; 760 struct nfs_open_confirmargs c_arg; 761 struct nfs_open_confirmres c_res; 762 struct nfs4_string owner_name; 763 struct nfs4_string group_name; 764 struct nfs_fattr f_attr; 765 struct dentry *dir; 766 struct dentry *dentry; 767 struct nfs4_state_owner *owner; 768 struct nfs4_state *state; 769 struct iattr attrs; 770 unsigned long timestamp; 771 unsigned int rpc_done : 1; 772 unsigned int is_recover : 1; 773 int rpc_status; 774 int cancelled; 775 }; 776 777 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, 778 int err, struct nfs4_exception *exception) 779 { 780 if (err != -EINVAL) 781 return false; 782 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) 783 return false; 784 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1; 785 exception->retry = 1; 786 return true; 787 } 788 789 static enum open_claim_type4 790 nfs4_map_atomic_open_claim(struct nfs_server *server, 791 enum open_claim_type4 claim) 792 { 793 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1) 794 return claim; 795 switch (claim) { 796 default: 797 return claim; 798 case NFS4_OPEN_CLAIM_FH: 799 return NFS4_OPEN_CLAIM_NULL; 800 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 801 return NFS4_OPEN_CLAIM_DELEGATE_CUR; 802 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 803 return NFS4_OPEN_CLAIM_DELEGATE_PREV; 804 } 805 } 806 807 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 808 { 809 p->o_res.f_attr = &p->f_attr; 810 p->o_res.seqid = p->o_arg.seqid; 811 p->c_res.seqid = p->c_arg.seqid; 812 p->o_res.server = p->o_arg.server; 813 p->o_res.access_request = p->o_arg.access; 814 nfs_fattr_init(&p->f_attr); 815 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 816 } 817 818 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 819 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 820 const struct iattr *attrs, 821 enum open_claim_type4 claim, 822 gfp_t gfp_mask) 823 { 824 struct dentry *parent = dget_parent(dentry); 825 struct inode *dir = parent->d_inode; 826 struct nfs_server *server = NFS_SERVER(dir); 827 struct nfs4_opendata *p; 828 829 p = kzalloc(sizeof(*p), gfp_mask); 830 if (p == NULL) 831 goto err; 832 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask); 833 if (p->o_arg.seqid == NULL) 834 goto err_free; 835 nfs_sb_active(dentry->d_sb); 836 p->dentry = dget(dentry); 837 p->dir = parent; 838 p->owner = sp; 839 atomic_inc(&sp->so_count); 840 p->o_arg.open_flags = flags; 841 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 842 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS 843 * will return permission denied for all bits until close */ 844 if (!(flags & O_EXCL)) { 845 /* ask server to check for all possible rights as results 846 * are cached */ 847 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 848 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE; 849 } 850 p->o_arg.clientid = server->nfs_client->cl_clientid; 851 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 852 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 853 p->o_arg.name = &dentry->d_name; 854 p->o_arg.server = server; 855 p->o_arg.bitmask = server->attr_bitmask; 856 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 857 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 858 switch (p->o_arg.claim) { 859 case NFS4_OPEN_CLAIM_NULL: 860 case NFS4_OPEN_CLAIM_DELEGATE_CUR: 861 case NFS4_OPEN_CLAIM_DELEGATE_PREV: 862 p->o_arg.fh = NFS_FH(dir); 863 break; 864 case NFS4_OPEN_CLAIM_PREVIOUS: 865 case NFS4_OPEN_CLAIM_FH: 866 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 867 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 868 p->o_arg.fh = NFS_FH(dentry->d_inode); 869 } 870 if (attrs != NULL && attrs->ia_valid != 0) { 871 __be32 verf[2]; 872 873 p->o_arg.u.attrs = &p->attrs; 874 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 875 876 verf[0] = jiffies; 877 verf[1] = current->pid; 878 memcpy(p->o_arg.u.verifier.data, verf, 879 sizeof(p->o_arg.u.verifier.data)); 880 } 881 p->c_arg.fh = &p->o_res.fh; 882 p->c_arg.stateid = &p->o_res.stateid; 883 p->c_arg.seqid = p->o_arg.seqid; 884 nfs4_init_opendata_res(p); 885 kref_init(&p->kref); 886 return p; 887 err_free: 888 kfree(p); 889 err: 890 dput(parent); 891 return NULL; 892 } 893 894 static void nfs4_opendata_free(struct kref *kref) 895 { 896 struct nfs4_opendata *p = container_of(kref, 897 struct nfs4_opendata, kref); 898 struct super_block *sb = p->dentry->d_sb; 899 900 nfs_free_seqid(p->o_arg.seqid); 901 if (p->state != NULL) 902 nfs4_put_open_state(p->state); 903 nfs4_put_state_owner(p->owner); 904 dput(p->dir); 905 dput(p->dentry); 906 nfs_sb_deactive(sb); 907 nfs_fattr_free_names(&p->f_attr); 908 kfree(p); 909 } 910 911 static void nfs4_opendata_put(struct nfs4_opendata *p) 912 { 913 if (p != NULL) 914 kref_put(&p->kref, nfs4_opendata_free); 915 } 916 917 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) 918 { 919 int ret; 920 921 ret = rpc_wait_for_completion_task(task); 922 return ret; 923 } 924 925 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 926 { 927 int ret = 0; 928 929 if (open_mode & (O_EXCL|O_TRUNC)) 930 goto out; 931 switch (mode & (FMODE_READ|FMODE_WRITE)) { 932 case FMODE_READ: 933 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 934 && state->n_rdonly != 0; 935 break; 936 case FMODE_WRITE: 937 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 938 && state->n_wronly != 0; 939 break; 940 case FMODE_READ|FMODE_WRITE: 941 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 942 && state->n_rdwr != 0; 943 } 944 out: 945 return ret; 946 } 947 948 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) 949 { 950 if (delegation == NULL) 951 return 0; 952 if ((delegation->type & fmode) != fmode) 953 return 0; 954 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 955 return 0; 956 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 957 return 0; 958 nfs_mark_delegation_referenced(delegation); 959 return 1; 960 } 961 962 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 963 { 964 switch (fmode) { 965 case FMODE_WRITE: 966 state->n_wronly++; 967 break; 968 case FMODE_READ: 969 state->n_rdonly++; 970 break; 971 case FMODE_READ|FMODE_WRITE: 972 state->n_rdwr++; 973 } 974 nfs4_state_set_mode_locked(state, state->state | fmode); 975 } 976 977 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 978 { 979 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 980 nfs4_stateid_copy(&state->stateid, stateid); 981 nfs4_stateid_copy(&state->open_stateid, stateid); 982 set_bit(NFS_OPEN_STATE, &state->flags); 983 switch (fmode) { 984 case FMODE_READ: 985 set_bit(NFS_O_RDONLY_STATE, &state->flags); 986 break; 987 case FMODE_WRITE: 988 set_bit(NFS_O_WRONLY_STATE, &state->flags); 989 break; 990 case FMODE_READ|FMODE_WRITE: 991 set_bit(NFS_O_RDWR_STATE, &state->flags); 992 } 993 } 994 995 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 996 { 997 write_seqlock(&state->seqlock); 998 nfs_set_open_stateid_locked(state, stateid, fmode); 999 write_sequnlock(&state->seqlock); 1000 } 1001 1002 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) 1003 { 1004 /* 1005 * Protect the call to nfs4_state_set_mode_locked and 1006 * serialise the stateid update 1007 */ 1008 write_seqlock(&state->seqlock); 1009 if (deleg_stateid != NULL) { 1010 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1011 set_bit(NFS_DELEGATED_STATE, &state->flags); 1012 } 1013 if (open_stateid != NULL) 1014 nfs_set_open_stateid_locked(state, open_stateid, fmode); 1015 write_sequnlock(&state->seqlock); 1016 spin_lock(&state->owner->so_lock); 1017 update_open_stateflags(state, fmode); 1018 spin_unlock(&state->owner->so_lock); 1019 } 1020 1021 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) 1022 { 1023 struct nfs_inode *nfsi = NFS_I(state->inode); 1024 struct nfs_delegation *deleg_cur; 1025 int ret = 0; 1026 1027 fmode &= (FMODE_READ|FMODE_WRITE); 1028 1029 rcu_read_lock(); 1030 deleg_cur = rcu_dereference(nfsi->delegation); 1031 if (deleg_cur == NULL) 1032 goto no_delegation; 1033 1034 spin_lock(&deleg_cur->lock); 1035 if (nfsi->delegation != deleg_cur || 1036 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 1037 (deleg_cur->type & fmode) != fmode) 1038 goto no_delegation_unlock; 1039 1040 if (delegation == NULL) 1041 delegation = &deleg_cur->stateid; 1042 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) 1043 goto no_delegation_unlock; 1044 1045 nfs_mark_delegation_referenced(deleg_cur); 1046 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); 1047 ret = 1; 1048 no_delegation_unlock: 1049 spin_unlock(&deleg_cur->lock); 1050 no_delegation: 1051 rcu_read_unlock(); 1052 1053 if (!ret && open_stateid != NULL) { 1054 __update_open_stateid(state, open_stateid, NULL, fmode); 1055 ret = 1; 1056 } 1057 1058 return ret; 1059 } 1060 1061 1062 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1063 { 1064 struct nfs_delegation *delegation; 1065 1066 rcu_read_lock(); 1067 delegation = rcu_dereference(NFS_I(inode)->delegation); 1068 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1069 rcu_read_unlock(); 1070 return; 1071 } 1072 rcu_read_unlock(); 1073 nfs4_inode_return_delegation(inode); 1074 } 1075 1076 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1077 { 1078 struct nfs4_state *state = opendata->state; 1079 struct nfs_inode *nfsi = NFS_I(state->inode); 1080 struct nfs_delegation *delegation; 1081 int open_mode = opendata->o_arg.open_flags; 1082 fmode_t fmode = opendata->o_arg.fmode; 1083 nfs4_stateid stateid; 1084 int ret = -EAGAIN; 1085 1086 for (;;) { 1087 if (can_open_cached(state, fmode, open_mode)) { 1088 spin_lock(&state->owner->so_lock); 1089 if (can_open_cached(state, fmode, open_mode)) { 1090 update_open_stateflags(state, fmode); 1091 spin_unlock(&state->owner->so_lock); 1092 goto out_return_state; 1093 } 1094 spin_unlock(&state->owner->so_lock); 1095 } 1096 rcu_read_lock(); 1097 delegation = rcu_dereference(nfsi->delegation); 1098 if (!can_open_delegated(delegation, fmode)) { 1099 rcu_read_unlock(); 1100 break; 1101 } 1102 /* Save the delegation */ 1103 nfs4_stateid_copy(&stateid, &delegation->stateid); 1104 rcu_read_unlock(); 1105 nfs_release_seqid(opendata->o_arg.seqid); 1106 if (!opendata->is_recover) { 1107 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1108 if (ret != 0) 1109 goto out; 1110 } 1111 ret = -EAGAIN; 1112 1113 /* Try to update the stateid using the delegation */ 1114 if (update_open_stateid(state, NULL, &stateid, fmode)) 1115 goto out_return_state; 1116 } 1117 out: 1118 return ERR_PTR(ret); 1119 out_return_state: 1120 atomic_inc(&state->count); 1121 return state; 1122 } 1123 1124 static void 1125 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) 1126 { 1127 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client; 1128 struct nfs_delegation *delegation; 1129 int delegation_flags = 0; 1130 1131 rcu_read_lock(); 1132 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1133 if (delegation) 1134 delegation_flags = delegation->flags; 1135 rcu_read_unlock(); 1136 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { 1137 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1138 "returning a delegation for " 1139 "OPEN(CLAIM_DELEGATE_CUR)\n", 1140 clp->cl_hostname); 1141 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1142 nfs_inode_set_delegation(state->inode, 1143 data->owner->so_cred, 1144 &data->o_res); 1145 else 1146 nfs_inode_reclaim_delegation(state->inode, 1147 data->owner->so_cred, 1148 &data->o_res); 1149 } 1150 1151 /* 1152 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 1153 * and update the nfs4_state. 1154 */ 1155 static struct nfs4_state * 1156 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 1157 { 1158 struct inode *inode = data->state->inode; 1159 struct nfs4_state *state = data->state; 1160 int ret; 1161 1162 if (!data->rpc_done) { 1163 ret = data->rpc_status; 1164 goto err; 1165 } 1166 1167 ret = -ESTALE; 1168 if (!(data->f_attr.valid & NFS_ATTR_FATTR_TYPE) || 1169 !(data->f_attr.valid & NFS_ATTR_FATTR_FILEID) || 1170 !(data->f_attr.valid & NFS_ATTR_FATTR_CHANGE)) 1171 goto err; 1172 1173 ret = -ENOMEM; 1174 state = nfs4_get_open_state(inode, data->owner); 1175 if (state == NULL) 1176 goto err; 1177 1178 ret = nfs_refresh_inode(inode, &data->f_attr); 1179 if (ret) 1180 goto err; 1181 1182 if (data->o_res.delegation_type != 0) 1183 nfs4_opendata_check_deleg(data, state); 1184 update_open_stateid(state, &data->o_res.stateid, NULL, 1185 data->o_arg.fmode); 1186 1187 return state; 1188 err: 1189 return ERR_PTR(ret); 1190 1191 } 1192 1193 static struct nfs4_state * 1194 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1195 { 1196 struct inode *inode; 1197 struct nfs4_state *state = NULL; 1198 int ret; 1199 1200 if (!data->rpc_done) { 1201 state = nfs4_try_open_cached(data); 1202 goto out; 1203 } 1204 1205 ret = -EAGAIN; 1206 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1207 goto err; 1208 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr); 1209 ret = PTR_ERR(inode); 1210 if (IS_ERR(inode)) 1211 goto err; 1212 ret = -ENOMEM; 1213 state = nfs4_get_open_state(inode, data->owner); 1214 if (state == NULL) 1215 goto err_put_inode; 1216 if (data->o_res.delegation_type != 0) 1217 nfs4_opendata_check_deleg(data, state); 1218 update_open_stateid(state, &data->o_res.stateid, NULL, 1219 data->o_arg.fmode); 1220 iput(inode); 1221 out: 1222 nfs_release_seqid(data->o_arg.seqid); 1223 return state; 1224 err_put_inode: 1225 iput(inode); 1226 err: 1227 return ERR_PTR(ret); 1228 } 1229 1230 static struct nfs4_state * 1231 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1232 { 1233 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 1234 return _nfs4_opendata_reclaim_to_nfs4_state(data); 1235 return _nfs4_opendata_to_nfs4_state(data); 1236 } 1237 1238 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) 1239 { 1240 struct nfs_inode *nfsi = NFS_I(state->inode); 1241 struct nfs_open_context *ctx; 1242 1243 spin_lock(&state->inode->i_lock); 1244 list_for_each_entry(ctx, &nfsi->open_files, list) { 1245 if (ctx->state != state) 1246 continue; 1247 get_nfs_open_context(ctx); 1248 spin_unlock(&state->inode->i_lock); 1249 return ctx; 1250 } 1251 spin_unlock(&state->inode->i_lock); 1252 return ERR_PTR(-ENOENT); 1253 } 1254 1255 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, 1256 struct nfs4_state *state, enum open_claim_type4 claim) 1257 { 1258 struct nfs4_opendata *opendata; 1259 1260 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, 1261 NULL, claim, GFP_NOFS); 1262 if (opendata == NULL) 1263 return ERR_PTR(-ENOMEM); 1264 opendata->state = state; 1265 atomic_inc(&state->count); 1266 return opendata; 1267 } 1268 1269 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) 1270 { 1271 struct nfs4_state *newstate; 1272 int ret; 1273 1274 opendata->o_arg.open_flags = 0; 1275 opendata->o_arg.fmode = fmode; 1276 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 1277 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 1278 nfs4_init_opendata_res(opendata); 1279 ret = _nfs4_recover_proc_open(opendata); 1280 if (ret != 0) 1281 return ret; 1282 newstate = nfs4_opendata_to_nfs4_state(opendata); 1283 if (IS_ERR(newstate)) 1284 return PTR_ERR(newstate); 1285 nfs4_close_state(newstate, fmode); 1286 *res = newstate; 1287 return 0; 1288 } 1289 1290 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1291 { 1292 struct nfs4_state *newstate; 1293 int ret; 1294 1295 /* memory barrier prior to reading state->n_* */ 1296 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1297 clear_bit(NFS_OPEN_STATE, &state->flags); 1298 smp_rmb(); 1299 if (state->n_rdwr != 0) { 1300 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1301 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); 1302 if (ret != 0) 1303 return ret; 1304 if (newstate != state) 1305 return -ESTALE; 1306 } 1307 if (state->n_wronly != 0) { 1308 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1309 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); 1310 if (ret != 0) 1311 return ret; 1312 if (newstate != state) 1313 return -ESTALE; 1314 } 1315 if (state->n_rdonly != 0) { 1316 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1317 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); 1318 if (ret != 0) 1319 return ret; 1320 if (newstate != state) 1321 return -ESTALE; 1322 } 1323 /* 1324 * We may have performed cached opens for all three recoveries. 1325 * Check if we need to update the current stateid. 1326 */ 1327 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 1328 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 1329 write_seqlock(&state->seqlock); 1330 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1331 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1332 write_sequnlock(&state->seqlock); 1333 } 1334 return 0; 1335 } 1336 1337 /* 1338 * OPEN_RECLAIM: 1339 * reclaim state on the server after a reboot. 1340 */ 1341 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1342 { 1343 struct nfs_delegation *delegation; 1344 struct nfs4_opendata *opendata; 1345 fmode_t delegation_type = 0; 1346 int status; 1347 1348 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1349 NFS4_OPEN_CLAIM_PREVIOUS); 1350 if (IS_ERR(opendata)) 1351 return PTR_ERR(opendata); 1352 rcu_read_lock(); 1353 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1354 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) 1355 delegation_type = delegation->type; 1356 rcu_read_unlock(); 1357 opendata->o_arg.u.delegation_type = delegation_type; 1358 status = nfs4_open_recover(opendata, state); 1359 nfs4_opendata_put(opendata); 1360 return status; 1361 } 1362 1363 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1364 { 1365 struct nfs_server *server = NFS_SERVER(state->inode); 1366 struct nfs4_exception exception = { }; 1367 int err; 1368 do { 1369 err = _nfs4_do_open_reclaim(ctx, state); 1370 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 1371 continue; 1372 if (err != -NFS4ERR_DELAY) 1373 break; 1374 nfs4_handle_exception(server, err, &exception); 1375 } while (exception.retry); 1376 return err; 1377 } 1378 1379 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 1380 { 1381 struct nfs_open_context *ctx; 1382 int ret; 1383 1384 ctx = nfs4_state_find_open_context(state); 1385 if (IS_ERR(ctx)) 1386 return -EAGAIN; 1387 ret = nfs4_do_open_reclaim(ctx, state); 1388 put_nfs_open_context(ctx); 1389 return ret; 1390 } 1391 1392 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err) 1393 { 1394 switch (err) { 1395 default: 1396 printk(KERN_ERR "NFS: %s: unhandled error " 1397 "%d.\n", __func__, err); 1398 case 0: 1399 case -ENOENT: 1400 case -ESTALE: 1401 break; 1402 case -NFS4ERR_BADSESSION: 1403 case -NFS4ERR_BADSLOT: 1404 case -NFS4ERR_BAD_HIGH_SLOT: 1405 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1406 case -NFS4ERR_DEADSESSION: 1407 set_bit(NFS_DELEGATED_STATE, &state->flags); 1408 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 1409 return -EAGAIN; 1410 case -NFS4ERR_STALE_CLIENTID: 1411 case -NFS4ERR_STALE_STATEID: 1412 set_bit(NFS_DELEGATED_STATE, &state->flags); 1413 case -NFS4ERR_EXPIRED: 1414 /* Don't recall a delegation if it was lost */ 1415 nfs4_schedule_lease_recovery(server->nfs_client); 1416 return -EAGAIN; 1417 case -NFS4ERR_DELEG_REVOKED: 1418 case -NFS4ERR_ADMIN_REVOKED: 1419 case -NFS4ERR_BAD_STATEID: 1420 case -NFS4ERR_OPENMODE: 1421 nfs_inode_find_state_and_recover(state->inode, 1422 stateid); 1423 nfs4_schedule_stateid_recovery(server, state); 1424 return 0; 1425 case -NFS4ERR_DELAY: 1426 case -NFS4ERR_GRACE: 1427 set_bit(NFS_DELEGATED_STATE, &state->flags); 1428 ssleep(1); 1429 return -EAGAIN; 1430 case -ENOMEM: 1431 case -NFS4ERR_DENIED: 1432 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 1433 return 0; 1434 } 1435 return err; 1436 } 1437 1438 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1439 { 1440 struct nfs_server *server = NFS_SERVER(state->inode); 1441 struct nfs4_opendata *opendata; 1442 int err; 1443 1444 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1445 NFS4_OPEN_CLAIM_DELEG_CUR_FH); 1446 if (IS_ERR(opendata)) 1447 return PTR_ERR(opendata); 1448 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1449 err = nfs4_open_recover(opendata, state); 1450 nfs4_opendata_put(opendata); 1451 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 1452 } 1453 1454 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 1455 { 1456 struct nfs4_opendata *data = calldata; 1457 1458 data->rpc_status = task->tk_status; 1459 if (data->rpc_status == 0) { 1460 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 1461 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1462 renew_lease(data->o_res.server, data->timestamp); 1463 data->rpc_done = 1; 1464 } 1465 } 1466 1467 static void nfs4_open_confirm_release(void *calldata) 1468 { 1469 struct nfs4_opendata *data = calldata; 1470 struct nfs4_state *state = NULL; 1471 1472 /* If this request hasn't been cancelled, do nothing */ 1473 if (data->cancelled == 0) 1474 goto out_free; 1475 /* In case of error, no cleanup! */ 1476 if (!data->rpc_done) 1477 goto out_free; 1478 state = nfs4_opendata_to_nfs4_state(data); 1479 if (!IS_ERR(state)) 1480 nfs4_close_state(state, data->o_arg.fmode); 1481 out_free: 1482 nfs4_opendata_put(data); 1483 } 1484 1485 static const struct rpc_call_ops nfs4_open_confirm_ops = { 1486 .rpc_call_done = nfs4_open_confirm_done, 1487 .rpc_release = nfs4_open_confirm_release, 1488 }; 1489 1490 /* 1491 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 1492 */ 1493 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 1494 { 1495 struct nfs_server *server = NFS_SERVER(data->dir->d_inode); 1496 struct rpc_task *task; 1497 struct rpc_message msg = { 1498 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 1499 .rpc_argp = &data->c_arg, 1500 .rpc_resp = &data->c_res, 1501 .rpc_cred = data->owner->so_cred, 1502 }; 1503 struct rpc_task_setup task_setup_data = { 1504 .rpc_client = server->client, 1505 .rpc_message = &msg, 1506 .callback_ops = &nfs4_open_confirm_ops, 1507 .callback_data = data, 1508 .workqueue = nfsiod_workqueue, 1509 .flags = RPC_TASK_ASYNC, 1510 }; 1511 int status; 1512 1513 kref_get(&data->kref); 1514 data->rpc_done = 0; 1515 data->rpc_status = 0; 1516 data->timestamp = jiffies; 1517 task = rpc_run_task(&task_setup_data); 1518 if (IS_ERR(task)) 1519 return PTR_ERR(task); 1520 status = nfs4_wait_for_completion_rpc_task(task); 1521 if (status != 0) { 1522 data->cancelled = 1; 1523 smp_wmb(); 1524 } else 1525 status = data->rpc_status; 1526 rpc_put_task(task); 1527 return status; 1528 } 1529 1530 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 1531 { 1532 struct nfs4_opendata *data = calldata; 1533 struct nfs4_state_owner *sp = data->owner; 1534 struct nfs_client *clp = sp->so_server->nfs_client; 1535 1536 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 1537 goto out_wait; 1538 /* 1539 * Check if we still need to send an OPEN call, or if we can use 1540 * a delegation instead. 1541 */ 1542 if (data->state != NULL) { 1543 struct nfs_delegation *delegation; 1544 1545 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) 1546 goto out_no_action; 1547 rcu_read_lock(); 1548 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 1549 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR && 1550 data->o_arg.claim != NFS4_OPEN_CLAIM_DELEG_CUR_FH && 1551 can_open_delegated(delegation, data->o_arg.fmode)) 1552 goto unlock_no_action; 1553 rcu_read_unlock(); 1554 } 1555 /* Update client id. */ 1556 data->o_arg.clientid = clp->cl_clientid; 1557 switch (data->o_arg.claim) { 1558 case NFS4_OPEN_CLAIM_PREVIOUS: 1559 case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1560 case NFS4_OPEN_CLAIM_DELEG_PREV_FH: 1561 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 1562 case NFS4_OPEN_CLAIM_FH: 1563 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 1564 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 1565 } 1566 data->timestamp = jiffies; 1567 if (nfs4_setup_sequence(data->o_arg.server, 1568 &data->o_arg.seq_args, 1569 &data->o_res.seq_res, 1570 task) != 0) 1571 nfs_release_seqid(data->o_arg.seqid); 1572 1573 /* Set the create mode (note dependency on the session type) */ 1574 data->o_arg.createmode = NFS4_CREATE_UNCHECKED; 1575 if (data->o_arg.open_flags & O_EXCL) { 1576 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE; 1577 if (nfs4_has_persistent_session(clp)) 1578 data->o_arg.createmode = NFS4_CREATE_GUARDED; 1579 else if (clp->cl_mvops->minor_version > 0) 1580 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1; 1581 } 1582 return; 1583 unlock_no_action: 1584 rcu_read_unlock(); 1585 out_no_action: 1586 task->tk_action = NULL; 1587 out_wait: 1588 nfs4_sequence_done(task, &data->o_res.seq_res); 1589 } 1590 1591 static void nfs4_open_done(struct rpc_task *task, void *calldata) 1592 { 1593 struct nfs4_opendata *data = calldata; 1594 1595 data->rpc_status = task->tk_status; 1596 1597 if (!nfs4_sequence_done(task, &data->o_res.seq_res)) 1598 return; 1599 1600 if (task->tk_status == 0) { 1601 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 1602 switch (data->o_res.f_attr->mode & S_IFMT) { 1603 case S_IFREG: 1604 break; 1605 case S_IFLNK: 1606 data->rpc_status = -ELOOP; 1607 break; 1608 case S_IFDIR: 1609 data->rpc_status = -EISDIR; 1610 break; 1611 default: 1612 data->rpc_status = -ENOTDIR; 1613 } 1614 } 1615 renew_lease(data->o_res.server, data->timestamp); 1616 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 1617 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1618 } 1619 data->rpc_done = 1; 1620 } 1621 1622 static void nfs4_open_release(void *calldata) 1623 { 1624 struct nfs4_opendata *data = calldata; 1625 struct nfs4_state *state = NULL; 1626 1627 /* If this request hasn't been cancelled, do nothing */ 1628 if (data->cancelled == 0) 1629 goto out_free; 1630 /* In case of error, no cleanup! */ 1631 if (data->rpc_status != 0 || !data->rpc_done) 1632 goto out_free; 1633 /* In case we need an open_confirm, no cleanup! */ 1634 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 1635 goto out_free; 1636 state = nfs4_opendata_to_nfs4_state(data); 1637 if (!IS_ERR(state)) 1638 nfs4_close_state(state, data->o_arg.fmode); 1639 out_free: 1640 nfs4_opendata_put(data); 1641 } 1642 1643 static const struct rpc_call_ops nfs4_open_ops = { 1644 .rpc_call_prepare = nfs4_open_prepare, 1645 .rpc_call_done = nfs4_open_done, 1646 .rpc_release = nfs4_open_release, 1647 }; 1648 1649 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) 1650 { 1651 struct inode *dir = data->dir->d_inode; 1652 struct nfs_server *server = NFS_SERVER(dir); 1653 struct nfs_openargs *o_arg = &data->o_arg; 1654 struct nfs_openres *o_res = &data->o_res; 1655 struct rpc_task *task; 1656 struct rpc_message msg = { 1657 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 1658 .rpc_argp = o_arg, 1659 .rpc_resp = o_res, 1660 .rpc_cred = data->owner->so_cred, 1661 }; 1662 struct rpc_task_setup task_setup_data = { 1663 .rpc_client = server->client, 1664 .rpc_message = &msg, 1665 .callback_ops = &nfs4_open_ops, 1666 .callback_data = data, 1667 .workqueue = nfsiod_workqueue, 1668 .flags = RPC_TASK_ASYNC, 1669 }; 1670 int status; 1671 1672 nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); 1673 kref_get(&data->kref); 1674 data->rpc_done = 0; 1675 data->rpc_status = 0; 1676 data->cancelled = 0; 1677 data->is_recover = 0; 1678 if (isrecover) { 1679 nfs4_set_sequence_privileged(&o_arg->seq_args); 1680 data->is_recover = 1; 1681 } 1682 task = rpc_run_task(&task_setup_data); 1683 if (IS_ERR(task)) 1684 return PTR_ERR(task); 1685 status = nfs4_wait_for_completion_rpc_task(task); 1686 if (status != 0) { 1687 data->cancelled = 1; 1688 smp_wmb(); 1689 } else 1690 status = data->rpc_status; 1691 rpc_put_task(task); 1692 1693 return status; 1694 } 1695 1696 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 1697 { 1698 struct inode *dir = data->dir->d_inode; 1699 struct nfs_openres *o_res = &data->o_res; 1700 int status; 1701 1702 status = nfs4_run_open_task(data, 1); 1703 if (status != 0 || !data->rpc_done) 1704 return status; 1705 1706 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 1707 1708 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1709 status = _nfs4_proc_open_confirm(data); 1710 if (status != 0) 1711 return status; 1712 } 1713 1714 return status; 1715 } 1716 1717 static int nfs4_opendata_access(struct rpc_cred *cred, 1718 struct nfs4_opendata *opendata, 1719 struct nfs4_state *state, fmode_t fmode, 1720 int openflags) 1721 { 1722 struct nfs_access_entry cache; 1723 u32 mask; 1724 1725 /* access call failed or for some reason the server doesn't 1726 * support any access modes -- defer access call until later */ 1727 if (opendata->o_res.access_supported == 0) 1728 return 0; 1729 1730 mask = 0; 1731 /* don't check MAY_WRITE - a newly created file may not have 1732 * write mode bits, but POSIX allows the creating process to write. 1733 * use openflags to check for exec, because fmode won't 1734 * always have FMODE_EXEC set when file open for exec. */ 1735 if (openflags & __FMODE_EXEC) { 1736 /* ONLY check for exec rights */ 1737 mask = MAY_EXEC; 1738 } else if (fmode & FMODE_READ) 1739 mask = MAY_READ; 1740 1741 cache.cred = cred; 1742 cache.jiffies = jiffies; 1743 nfs_access_set_mask(&cache, opendata->o_res.access_result); 1744 nfs_access_add_cache(state->inode, &cache); 1745 1746 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) 1747 return 0; 1748 1749 /* even though OPEN succeeded, access is denied. Close the file */ 1750 nfs4_close_state(state, fmode); 1751 return -EACCES; 1752 } 1753 1754 /* 1755 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 1756 */ 1757 static int _nfs4_proc_open(struct nfs4_opendata *data) 1758 { 1759 struct inode *dir = data->dir->d_inode; 1760 struct nfs_server *server = NFS_SERVER(dir); 1761 struct nfs_openargs *o_arg = &data->o_arg; 1762 struct nfs_openres *o_res = &data->o_res; 1763 int status; 1764 1765 status = nfs4_run_open_task(data, 0); 1766 if (!data->rpc_done) 1767 return status; 1768 if (status != 0) { 1769 if (status == -NFS4ERR_BADNAME && 1770 !(o_arg->open_flags & O_CREAT)) 1771 return -ENOENT; 1772 return status; 1773 } 1774 1775 nfs_fattr_map_and_free_names(server, &data->f_attr); 1776 1777 if (o_arg->open_flags & O_CREAT) 1778 update_changeattr(dir, &o_res->cinfo); 1779 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 1780 server->caps &= ~NFS_CAP_POSIX_LOCK; 1781 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1782 status = _nfs4_proc_open_confirm(data); 1783 if (status != 0) 1784 return status; 1785 } 1786 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 1787 _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr); 1788 return 0; 1789 } 1790 1791 static int nfs4_recover_expired_lease(struct nfs_server *server) 1792 { 1793 return nfs4_client_recover_expired_lease(server->nfs_client); 1794 } 1795 1796 /* 1797 * OPEN_EXPIRED: 1798 * reclaim state on the server after a network partition. 1799 * Assumes caller holds the appropriate lock 1800 */ 1801 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 1802 { 1803 struct nfs4_opendata *opendata; 1804 int ret; 1805 1806 opendata = nfs4_open_recoverdata_alloc(ctx, state, 1807 NFS4_OPEN_CLAIM_FH); 1808 if (IS_ERR(opendata)) 1809 return PTR_ERR(opendata); 1810 ret = nfs4_open_recover(opendata, state); 1811 if (ret == -ESTALE) 1812 d_drop(ctx->dentry); 1813 nfs4_opendata_put(opendata); 1814 return ret; 1815 } 1816 1817 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 1818 { 1819 struct nfs_server *server = NFS_SERVER(state->inode); 1820 struct nfs4_exception exception = { }; 1821 int err; 1822 1823 do { 1824 err = _nfs4_open_expired(ctx, state); 1825 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception)) 1826 continue; 1827 switch (err) { 1828 default: 1829 goto out; 1830 case -NFS4ERR_GRACE: 1831 case -NFS4ERR_DELAY: 1832 nfs4_handle_exception(server, err, &exception); 1833 err = 0; 1834 } 1835 } while (exception.retry); 1836 out: 1837 return err; 1838 } 1839 1840 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 1841 { 1842 struct nfs_open_context *ctx; 1843 int ret; 1844 1845 ctx = nfs4_state_find_open_context(state); 1846 if (IS_ERR(ctx)) 1847 return -EAGAIN; 1848 ret = nfs4_do_open_expired(ctx, state); 1849 put_nfs_open_context(ctx); 1850 return ret; 1851 } 1852 1853 #if defined(CONFIG_NFS_V4_1) 1854 static void nfs41_clear_delegation_stateid(struct nfs4_state *state) 1855 { 1856 struct nfs_server *server = NFS_SERVER(state->inode); 1857 nfs4_stateid *stateid = &state->stateid; 1858 int status; 1859 1860 /* If a state reset has been done, test_stateid is unneeded */ 1861 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1862 return; 1863 1864 status = nfs41_test_stateid(server, stateid); 1865 if (status != NFS_OK) { 1866 /* Free the stateid unless the server explicitly 1867 * informs us the stateid is unrecognized. */ 1868 if (status != -NFS4ERR_BAD_STATEID) 1869 nfs41_free_stateid(server, stateid); 1870 nfs_remove_bad_delegation(state->inode); 1871 1872 write_seqlock(&state->seqlock); 1873 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1874 write_sequnlock(&state->seqlock); 1875 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1876 } 1877 } 1878 1879 /** 1880 * nfs41_check_open_stateid - possibly free an open stateid 1881 * 1882 * @state: NFSv4 state for an inode 1883 * 1884 * Returns NFS_OK if recovery for this stateid is now finished. 1885 * Otherwise a negative NFS4ERR value is returned. 1886 */ 1887 static int nfs41_check_open_stateid(struct nfs4_state *state) 1888 { 1889 struct nfs_server *server = NFS_SERVER(state->inode); 1890 nfs4_stateid *stateid = &state->open_stateid; 1891 int status; 1892 1893 /* If a state reset has been done, test_stateid is unneeded */ 1894 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) && 1895 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) && 1896 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0)) 1897 return -NFS4ERR_BAD_STATEID; 1898 1899 status = nfs41_test_stateid(server, stateid); 1900 if (status != NFS_OK) { 1901 /* Free the stateid unless the server explicitly 1902 * informs us the stateid is unrecognized. */ 1903 if (status != -NFS4ERR_BAD_STATEID) 1904 nfs41_free_stateid(server, stateid); 1905 1906 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1907 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1908 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1909 clear_bit(NFS_OPEN_STATE, &state->flags); 1910 } 1911 return status; 1912 } 1913 1914 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 1915 { 1916 int status; 1917 1918 nfs41_clear_delegation_stateid(state); 1919 status = nfs41_check_open_stateid(state); 1920 if (status != NFS_OK) 1921 status = nfs4_open_expired(sp, state); 1922 return status; 1923 } 1924 #endif 1925 1926 /* 1927 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 1928 * fields corresponding to attributes that were used to store the verifier. 1929 * Make sure we clobber those fields in the later setattr call 1930 */ 1931 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr) 1932 { 1933 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 1934 !(sattr->ia_valid & ATTR_ATIME_SET)) 1935 sattr->ia_valid |= ATTR_ATIME; 1936 1937 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 1938 !(sattr->ia_valid & ATTR_MTIME_SET)) 1939 sattr->ia_valid |= ATTR_MTIME; 1940 } 1941 1942 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 1943 fmode_t fmode, 1944 int flags, 1945 struct nfs4_state **res) 1946 { 1947 struct nfs4_state_owner *sp = opendata->owner; 1948 struct nfs_server *server = sp->so_server; 1949 struct nfs4_state *state; 1950 unsigned int seq; 1951 int ret; 1952 1953 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 1954 1955 ret = _nfs4_proc_open(opendata); 1956 if (ret != 0) 1957 goto out; 1958 1959 state = nfs4_opendata_to_nfs4_state(opendata); 1960 ret = PTR_ERR(state); 1961 if (IS_ERR(state)) 1962 goto out; 1963 if (server->caps & NFS_CAP_POSIX_LOCK) 1964 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 1965 1966 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags); 1967 if (ret != 0) 1968 goto out; 1969 1970 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) 1971 nfs4_schedule_stateid_recovery(server, state); 1972 *res = state; 1973 out: 1974 return ret; 1975 } 1976 1977 /* 1978 * Returns a referenced nfs4_state 1979 */ 1980 static int _nfs4_do_open(struct inode *dir, 1981 struct dentry *dentry, 1982 fmode_t fmode, 1983 int flags, 1984 struct iattr *sattr, 1985 struct rpc_cred *cred, 1986 struct nfs4_state **res, 1987 struct nfs4_threshold **ctx_th) 1988 { 1989 struct nfs4_state_owner *sp; 1990 struct nfs4_state *state = NULL; 1991 struct nfs_server *server = NFS_SERVER(dir); 1992 struct nfs4_opendata *opendata; 1993 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL; 1994 int status; 1995 1996 /* Protect against reboot recovery conflicts */ 1997 status = -ENOMEM; 1998 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 1999 if (sp == NULL) { 2000 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 2001 goto out_err; 2002 } 2003 status = nfs4_recover_expired_lease(server); 2004 if (status != 0) 2005 goto err_put_state_owner; 2006 if (dentry->d_inode != NULL) 2007 nfs4_return_incompatible_delegation(dentry->d_inode, fmode); 2008 status = -ENOMEM; 2009 if (dentry->d_inode) 2010 claim = NFS4_OPEN_CLAIM_FH; 2011 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, 2012 claim, GFP_KERNEL); 2013 if (opendata == NULL) 2014 goto err_put_state_owner; 2015 2016 if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 2017 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 2018 if (!opendata->f_attr.mdsthreshold) 2019 goto err_opendata_put; 2020 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 2021 } 2022 if (dentry->d_inode != NULL) 2023 opendata->state = nfs4_get_open_state(dentry->d_inode, sp); 2024 2025 status = _nfs4_open_and_get_state(opendata, fmode, flags, &state); 2026 if (status != 0) 2027 goto err_opendata_put; 2028 2029 if ((opendata->o_arg.open_flags & O_EXCL) && 2030 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { 2031 nfs4_exclusive_attrset(opendata, sattr); 2032 2033 nfs_fattr_init(opendata->o_res.f_attr); 2034 status = nfs4_do_setattr(state->inode, cred, 2035 opendata->o_res.f_attr, sattr, 2036 state); 2037 if (status == 0) 2038 nfs_setattr_update_inode(state->inode, sattr); 2039 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); 2040 } 2041 2042 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) 2043 *ctx_th = opendata->f_attr.mdsthreshold; 2044 else 2045 kfree(opendata->f_attr.mdsthreshold); 2046 opendata->f_attr.mdsthreshold = NULL; 2047 2048 nfs4_opendata_put(opendata); 2049 nfs4_put_state_owner(sp); 2050 *res = state; 2051 return 0; 2052 err_opendata_put: 2053 kfree(opendata->f_attr.mdsthreshold); 2054 nfs4_opendata_put(opendata); 2055 err_put_state_owner: 2056 nfs4_put_state_owner(sp); 2057 out_err: 2058 *res = NULL; 2059 return status; 2060 } 2061 2062 2063 static struct nfs4_state *nfs4_do_open(struct inode *dir, 2064 struct dentry *dentry, 2065 fmode_t fmode, 2066 int flags, 2067 struct iattr *sattr, 2068 struct rpc_cred *cred, 2069 struct nfs4_threshold **ctx_th) 2070 { 2071 struct nfs_server *server = NFS_SERVER(dir); 2072 struct nfs4_exception exception = { }; 2073 struct nfs4_state *res; 2074 int status; 2075 2076 fmode &= FMODE_READ|FMODE_WRITE|FMODE_EXEC; 2077 do { 2078 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, 2079 &res, ctx_th); 2080 if (status == 0) 2081 break; 2082 /* NOTE: BAD_SEQID means the server and client disagree about the 2083 * book-keeping w.r.t. state-changing operations 2084 * (OPEN/CLOSE/LOCK/LOCKU...) 2085 * It is actually a sign of a bug on the client or on the server. 2086 * 2087 * If we receive a BAD_SEQID error in the particular case of 2088 * doing an OPEN, we assume that nfs_increment_open_seqid() will 2089 * have unhashed the old state_owner for us, and that we can 2090 * therefore safely retry using a new one. We should still warn 2091 * the user though... 2092 */ 2093 if (status == -NFS4ERR_BAD_SEQID) { 2094 pr_warn_ratelimited("NFS: v4 server %s " 2095 " returned a bad sequence-id error!\n", 2096 NFS_SERVER(dir)->nfs_client->cl_hostname); 2097 exception.retry = 1; 2098 continue; 2099 } 2100 /* 2101 * BAD_STATEID on OPEN means that the server cancelled our 2102 * state before it received the OPEN_CONFIRM. 2103 * Recover by retrying the request as per the discussion 2104 * on Page 181 of RFC3530. 2105 */ 2106 if (status == -NFS4ERR_BAD_STATEID) { 2107 exception.retry = 1; 2108 continue; 2109 } 2110 if (status == -EAGAIN) { 2111 /* We must have found a delegation */ 2112 exception.retry = 1; 2113 continue; 2114 } 2115 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception)) 2116 continue; 2117 res = ERR_PTR(nfs4_handle_exception(server, 2118 status, &exception)); 2119 } while (exception.retry); 2120 return res; 2121 } 2122 2123 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2124 struct nfs_fattr *fattr, struct iattr *sattr, 2125 struct nfs4_state *state) 2126 { 2127 struct nfs_server *server = NFS_SERVER(inode); 2128 struct nfs_setattrargs arg = { 2129 .fh = NFS_FH(inode), 2130 .iap = sattr, 2131 .server = server, 2132 .bitmask = server->attr_bitmask, 2133 }; 2134 struct nfs_setattrres res = { 2135 .fattr = fattr, 2136 .server = server, 2137 }; 2138 struct rpc_message msg = { 2139 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 2140 .rpc_argp = &arg, 2141 .rpc_resp = &res, 2142 .rpc_cred = cred, 2143 }; 2144 unsigned long timestamp = jiffies; 2145 fmode_t fmode; 2146 bool truncate; 2147 int status; 2148 2149 nfs_fattr_init(fattr); 2150 2151 /* Servers should only apply open mode checks for file size changes */ 2152 truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false; 2153 fmode = truncate ? FMODE_WRITE : FMODE_READ; 2154 2155 if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) { 2156 /* Use that stateid */ 2157 } else if (truncate && state != NULL && nfs4_valid_open_stateid(state)) { 2158 struct nfs_lockowner lockowner = { 2159 .l_owner = current->files, 2160 .l_pid = current->tgid, 2161 }; 2162 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, 2163 &lockowner); 2164 } else 2165 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 2166 2167 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 2168 if (status == 0 && state != NULL) 2169 renew_lease(server, timestamp); 2170 return status; 2171 } 2172 2173 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2174 struct nfs_fattr *fattr, struct iattr *sattr, 2175 struct nfs4_state *state) 2176 { 2177 struct nfs_server *server = NFS_SERVER(inode); 2178 struct nfs4_exception exception = { 2179 .state = state, 2180 .inode = inode, 2181 }; 2182 int err; 2183 do { 2184 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state); 2185 switch (err) { 2186 case -NFS4ERR_OPENMODE: 2187 if (!(sattr->ia_valid & ATTR_SIZE)) { 2188 pr_warn_once("NFSv4: server %s is incorrectly " 2189 "applying open mode checks to " 2190 "a SETATTR that is not " 2191 "changing file size.\n", 2192 server->nfs_client->cl_hostname); 2193 } 2194 if (state && !(state->state & FMODE_WRITE)) { 2195 err = -EBADF; 2196 if (sattr->ia_valid & ATTR_OPEN) 2197 err = -EACCES; 2198 goto out; 2199 } 2200 } 2201 err = nfs4_handle_exception(server, err, &exception); 2202 } while (exception.retry); 2203 out: 2204 return err; 2205 } 2206 2207 struct nfs4_closedata { 2208 struct inode *inode; 2209 struct nfs4_state *state; 2210 struct nfs_closeargs arg; 2211 struct nfs_closeres res; 2212 struct nfs_fattr fattr; 2213 unsigned long timestamp; 2214 bool roc; 2215 u32 roc_barrier; 2216 }; 2217 2218 static void nfs4_free_closedata(void *data) 2219 { 2220 struct nfs4_closedata *calldata = data; 2221 struct nfs4_state_owner *sp = calldata->state->owner; 2222 struct super_block *sb = calldata->state->inode->i_sb; 2223 2224 if (calldata->roc) 2225 pnfs_roc_release(calldata->state->inode); 2226 nfs4_put_open_state(calldata->state); 2227 nfs_free_seqid(calldata->arg.seqid); 2228 nfs4_put_state_owner(sp); 2229 nfs_sb_deactive(sb); 2230 kfree(calldata); 2231 } 2232 2233 static void nfs4_close_clear_stateid_flags(struct nfs4_state *state, 2234 fmode_t fmode) 2235 { 2236 spin_lock(&state->owner->so_lock); 2237 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2238 switch (fmode & (FMODE_READ|FMODE_WRITE)) { 2239 case FMODE_WRITE: 2240 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2241 break; 2242 case FMODE_READ: 2243 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2244 break; 2245 case 0: 2246 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2247 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2248 clear_bit(NFS_OPEN_STATE, &state->flags); 2249 } 2250 spin_unlock(&state->owner->so_lock); 2251 } 2252 2253 static void nfs4_close_done(struct rpc_task *task, void *data) 2254 { 2255 struct nfs4_closedata *calldata = data; 2256 struct nfs4_state *state = calldata->state; 2257 struct nfs_server *server = NFS_SERVER(calldata->inode); 2258 2259 dprintk("%s: begin!\n", __func__); 2260 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 2261 return; 2262 /* hmm. we are done with the inode, and in the process of freeing 2263 * the state_owner. we keep this around to process errors 2264 */ 2265 switch (task->tk_status) { 2266 case 0: 2267 if (calldata->roc) 2268 pnfs_roc_set_barrier(state->inode, 2269 calldata->roc_barrier); 2270 nfs_set_open_stateid(state, &calldata->res.stateid, 0); 2271 renew_lease(server, calldata->timestamp); 2272 nfs4_close_clear_stateid_flags(state, 2273 calldata->arg.fmode); 2274 break; 2275 case -NFS4ERR_STALE_STATEID: 2276 case -NFS4ERR_OLD_STATEID: 2277 case -NFS4ERR_BAD_STATEID: 2278 case -NFS4ERR_EXPIRED: 2279 if (calldata->arg.fmode == 0) 2280 break; 2281 default: 2282 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) 2283 rpc_restart_call_prepare(task); 2284 } 2285 nfs_release_seqid(calldata->arg.seqid); 2286 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2287 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2288 } 2289 2290 static void nfs4_close_prepare(struct rpc_task *task, void *data) 2291 { 2292 struct nfs4_closedata *calldata = data; 2293 struct nfs4_state *state = calldata->state; 2294 struct inode *inode = calldata->inode; 2295 int call_close = 0; 2296 2297 dprintk("%s: begin!\n", __func__); 2298 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 2299 goto out_wait; 2300 2301 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 2302 calldata->arg.fmode = FMODE_READ|FMODE_WRITE; 2303 spin_lock(&state->owner->so_lock); 2304 /* Calculate the change in open mode */ 2305 if (state->n_rdwr == 0) { 2306 if (state->n_rdonly == 0) { 2307 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); 2308 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 2309 calldata->arg.fmode &= ~FMODE_READ; 2310 } 2311 if (state->n_wronly == 0) { 2312 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); 2313 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 2314 calldata->arg.fmode &= ~FMODE_WRITE; 2315 } 2316 } 2317 if (!nfs4_valid_open_stateid(state)) 2318 call_close = 0; 2319 spin_unlock(&state->owner->so_lock); 2320 2321 if (!call_close) { 2322 /* Note: exit _without_ calling nfs4_close_done */ 2323 goto out_no_action; 2324 } 2325 2326 if (calldata->arg.fmode == 0) { 2327 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2328 if (calldata->roc && 2329 pnfs_roc_drain(inode, &calldata->roc_barrier, task)) { 2330 nfs_release_seqid(calldata->arg.seqid); 2331 goto out_wait; 2332 } 2333 } 2334 2335 nfs_fattr_init(calldata->res.fattr); 2336 calldata->timestamp = jiffies; 2337 if (nfs4_setup_sequence(NFS_SERVER(inode), 2338 &calldata->arg.seq_args, 2339 &calldata->res.seq_res, 2340 task) != 0) 2341 nfs_release_seqid(calldata->arg.seqid); 2342 dprintk("%s: done!\n", __func__); 2343 return; 2344 out_no_action: 2345 task->tk_action = NULL; 2346 out_wait: 2347 nfs4_sequence_done(task, &calldata->res.seq_res); 2348 } 2349 2350 static const struct rpc_call_ops nfs4_close_ops = { 2351 .rpc_call_prepare = nfs4_close_prepare, 2352 .rpc_call_done = nfs4_close_done, 2353 .rpc_release = nfs4_free_closedata, 2354 }; 2355 2356 /* 2357 * It is possible for data to be read/written from a mem-mapped file 2358 * after the sys_close call (which hits the vfs layer as a flush). 2359 * This means that we can't safely call nfsv4 close on a file until 2360 * the inode is cleared. This in turn means that we are not good 2361 * NFSv4 citizens - we do not indicate to the server to update the file's 2362 * share state even when we are done with one of the three share 2363 * stateid's in the inode. 2364 * 2365 * NOTE: Caller must be holding the sp->so_owner semaphore! 2366 */ 2367 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 2368 { 2369 struct nfs_server *server = NFS_SERVER(state->inode); 2370 struct nfs4_closedata *calldata; 2371 struct nfs4_state_owner *sp = state->owner; 2372 struct rpc_task *task; 2373 struct rpc_message msg = { 2374 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 2375 .rpc_cred = state->owner->so_cred, 2376 }; 2377 struct rpc_task_setup task_setup_data = { 2378 .rpc_client = server->client, 2379 .rpc_message = &msg, 2380 .callback_ops = &nfs4_close_ops, 2381 .workqueue = nfsiod_workqueue, 2382 .flags = RPC_TASK_ASYNC, 2383 }; 2384 int status = -ENOMEM; 2385 2386 calldata = kzalloc(sizeof(*calldata), gfp_mask); 2387 if (calldata == NULL) 2388 goto out; 2389 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); 2390 calldata->inode = state->inode; 2391 calldata->state = state; 2392 calldata->arg.fh = NFS_FH(state->inode); 2393 calldata->arg.stateid = &state->open_stateid; 2394 /* Serialization for the sequence id */ 2395 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask); 2396 if (calldata->arg.seqid == NULL) 2397 goto out_free_calldata; 2398 calldata->arg.fmode = 0; 2399 calldata->arg.bitmask = server->cache_consistency_bitmask; 2400 calldata->res.fattr = &calldata->fattr; 2401 calldata->res.seqid = calldata->arg.seqid; 2402 calldata->res.server = server; 2403 calldata->roc = pnfs_roc(state->inode); 2404 nfs_sb_active(calldata->inode->i_sb); 2405 2406 msg.rpc_argp = &calldata->arg; 2407 msg.rpc_resp = &calldata->res; 2408 task_setup_data.callback_data = calldata; 2409 task = rpc_run_task(&task_setup_data); 2410 if (IS_ERR(task)) 2411 return PTR_ERR(task); 2412 status = 0; 2413 if (wait) 2414 status = rpc_wait_for_completion_task(task); 2415 rpc_put_task(task); 2416 return status; 2417 out_free_calldata: 2418 kfree(calldata); 2419 out: 2420 nfs4_put_open_state(state); 2421 nfs4_put_state_owner(sp); 2422 return status; 2423 } 2424 2425 static struct inode * 2426 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr) 2427 { 2428 struct nfs4_state *state; 2429 2430 /* Protect against concurrent sillydeletes */ 2431 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, 2432 ctx->cred, &ctx->mdsthreshold); 2433 if (IS_ERR(state)) 2434 return ERR_CAST(state); 2435 ctx->state = state; 2436 return igrab(state->inode); 2437 } 2438 2439 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 2440 { 2441 if (ctx->state == NULL) 2442 return; 2443 if (is_sync) 2444 nfs4_close_sync(ctx->state, ctx->mode); 2445 else 2446 nfs4_close_state(ctx->state, ctx->mode); 2447 } 2448 2449 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2450 { 2451 struct nfs4_server_caps_arg args = { 2452 .fhandle = fhandle, 2453 }; 2454 struct nfs4_server_caps_res res = {}; 2455 struct rpc_message msg = { 2456 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 2457 .rpc_argp = &args, 2458 .rpc_resp = &res, 2459 }; 2460 int status; 2461 2462 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2463 if (status == 0) { 2464 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 2465 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| 2466 NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 2467 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| 2468 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| 2469 NFS_CAP_CTIME|NFS_CAP_MTIME); 2470 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL) 2471 server->caps |= NFS_CAP_ACLS; 2472 if (res.has_links != 0) 2473 server->caps |= NFS_CAP_HARDLINKS; 2474 if (res.has_symlinks != 0) 2475 server->caps |= NFS_CAP_SYMLINKS; 2476 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) 2477 server->caps |= NFS_CAP_FILEID; 2478 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) 2479 server->caps |= NFS_CAP_MODE; 2480 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) 2481 server->caps |= NFS_CAP_NLINK; 2482 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) 2483 server->caps |= NFS_CAP_OWNER; 2484 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) 2485 server->caps |= NFS_CAP_OWNER_GROUP; 2486 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) 2487 server->caps |= NFS_CAP_ATIME; 2488 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) 2489 server->caps |= NFS_CAP_CTIME; 2490 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) 2491 server->caps |= NFS_CAP_MTIME; 2492 2493 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 2494 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 2495 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 2496 server->acl_bitmask = res.acl_bitmask; 2497 server->fh_expire_type = res.fh_expire_type; 2498 } 2499 2500 return status; 2501 } 2502 2503 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2504 { 2505 struct nfs4_exception exception = { }; 2506 int err; 2507 do { 2508 err = nfs4_handle_exception(server, 2509 _nfs4_server_capabilities(server, fhandle), 2510 &exception); 2511 } while (exception.retry); 2512 return err; 2513 } 2514 2515 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2516 struct nfs_fsinfo *info) 2517 { 2518 struct nfs4_lookup_root_arg args = { 2519 .bitmask = nfs4_fattr_bitmap, 2520 }; 2521 struct nfs4_lookup_res res = { 2522 .server = server, 2523 .fattr = info->fattr, 2524 .fh = fhandle, 2525 }; 2526 struct rpc_message msg = { 2527 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 2528 .rpc_argp = &args, 2529 .rpc_resp = &res, 2530 }; 2531 2532 nfs_fattr_init(info->fattr); 2533 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2534 } 2535 2536 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2537 struct nfs_fsinfo *info) 2538 { 2539 struct nfs4_exception exception = { }; 2540 int err; 2541 do { 2542 err = _nfs4_lookup_root(server, fhandle, info); 2543 switch (err) { 2544 case 0: 2545 case -NFS4ERR_WRONGSEC: 2546 goto out; 2547 default: 2548 err = nfs4_handle_exception(server, err, &exception); 2549 } 2550 } while (exception.retry); 2551 out: 2552 return err; 2553 } 2554 2555 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 2556 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 2557 { 2558 struct rpc_auth *auth; 2559 int ret; 2560 2561 auth = rpcauth_create(flavor, server->client); 2562 if (IS_ERR(auth)) { 2563 ret = -EACCES; 2564 goto out; 2565 } 2566 ret = nfs4_lookup_root(server, fhandle, info); 2567 out: 2568 return ret; 2569 } 2570 2571 /* 2572 * Retry pseudoroot lookup with various security flavors. We do this when: 2573 * 2574 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC 2575 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation 2576 * 2577 * Returns zero on success, or a negative NFS4ERR value, or a 2578 * negative errno value. 2579 */ 2580 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 2581 struct nfs_fsinfo *info) 2582 { 2583 /* Per 3530bis 15.33.5 */ 2584 static const rpc_authflavor_t flav_array[] = { 2585 RPC_AUTH_GSS_KRB5P, 2586 RPC_AUTH_GSS_KRB5I, 2587 RPC_AUTH_GSS_KRB5, 2588 RPC_AUTH_UNIX, /* courtesy */ 2589 RPC_AUTH_NULL, 2590 }; 2591 int status = -EPERM; 2592 size_t i; 2593 2594 for (i = 0; i < ARRAY_SIZE(flav_array); i++) { 2595 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); 2596 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 2597 continue; 2598 break; 2599 } 2600 2601 /* 2602 * -EACCESS could mean that the user doesn't have correct permissions 2603 * to access the mount. It could also mean that we tried to mount 2604 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 2605 * existing mount programs don't handle -EACCES very well so it should 2606 * be mapped to -EPERM instead. 2607 */ 2608 if (status == -EACCES) 2609 status = -EPERM; 2610 return status; 2611 } 2612 2613 static int nfs4_do_find_root_sec(struct nfs_server *server, 2614 struct nfs_fh *fhandle, struct nfs_fsinfo *info) 2615 { 2616 int mv = server->nfs_client->cl_minorversion; 2617 return nfs_v4_minor_ops[mv]->find_root_sec(server, fhandle, info); 2618 } 2619 2620 /** 2621 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot 2622 * @server: initialized nfs_server handle 2623 * @fhandle: we fill in the pseudo-fs root file handle 2624 * @info: we fill in an FSINFO struct 2625 * 2626 * Returns zero on success, or a negative errno. 2627 */ 2628 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 2629 struct nfs_fsinfo *info) 2630 { 2631 int status; 2632 2633 status = nfs4_lookup_root(server, fhandle, info); 2634 if ((status == -NFS4ERR_WRONGSEC) && 2635 !(server->flags & NFS_MOUNT_SECFLAVOUR)) 2636 status = nfs4_do_find_root_sec(server, fhandle, info); 2637 2638 if (status == 0) 2639 status = nfs4_server_capabilities(server, fhandle); 2640 if (status == 0) 2641 status = nfs4_do_fsinfo(server, fhandle, info); 2642 2643 return nfs4_map_errors(status); 2644 } 2645 2646 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 2647 struct nfs_fsinfo *info) 2648 { 2649 int error; 2650 struct nfs_fattr *fattr = info->fattr; 2651 2652 error = nfs4_server_capabilities(server, mntfh); 2653 if (error < 0) { 2654 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 2655 return error; 2656 } 2657 2658 error = nfs4_proc_getattr(server, mntfh, fattr); 2659 if (error < 0) { 2660 dprintk("nfs4_get_root: getattr error = %d\n", -error); 2661 return error; 2662 } 2663 2664 if (fattr->valid & NFS_ATTR_FATTR_FSID && 2665 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 2666 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 2667 2668 return error; 2669 } 2670 2671 /* 2672 * Get locations and (maybe) other attributes of a referral. 2673 * Note that we'll actually follow the referral later when 2674 * we detect fsid mismatch in inode revalidation 2675 */ 2676 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 2677 const struct qstr *name, struct nfs_fattr *fattr, 2678 struct nfs_fh *fhandle) 2679 { 2680 int status = -ENOMEM; 2681 struct page *page = NULL; 2682 struct nfs4_fs_locations *locations = NULL; 2683 2684 page = alloc_page(GFP_KERNEL); 2685 if (page == NULL) 2686 goto out; 2687 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 2688 if (locations == NULL) 2689 goto out; 2690 2691 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 2692 if (status != 0) 2693 goto out; 2694 /* Make sure server returned a different fsid for the referral */ 2695 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 2696 dprintk("%s: server did not return a different fsid for" 2697 " a referral at %s\n", __func__, name->name); 2698 status = -EIO; 2699 goto out; 2700 } 2701 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 2702 nfs_fixup_referral_attributes(&locations->fattr); 2703 2704 /* replace the lookup nfs_fattr with the locations nfs_fattr */ 2705 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 2706 memset(fhandle, 0, sizeof(struct nfs_fh)); 2707 out: 2708 if (page) 2709 __free_page(page); 2710 kfree(locations); 2711 return status; 2712 } 2713 2714 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2715 { 2716 struct nfs4_getattr_arg args = { 2717 .fh = fhandle, 2718 .bitmask = server->attr_bitmask, 2719 }; 2720 struct nfs4_getattr_res res = { 2721 .fattr = fattr, 2722 .server = server, 2723 }; 2724 struct rpc_message msg = { 2725 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 2726 .rpc_argp = &args, 2727 .rpc_resp = &res, 2728 }; 2729 2730 nfs_fattr_init(fattr); 2731 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2732 } 2733 2734 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2735 { 2736 struct nfs4_exception exception = { }; 2737 int err; 2738 do { 2739 err = nfs4_handle_exception(server, 2740 _nfs4_proc_getattr(server, fhandle, fattr), 2741 &exception); 2742 } while (exception.retry); 2743 return err; 2744 } 2745 2746 /* 2747 * The file is not closed if it is opened due to the a request to change 2748 * the size of the file. The open call will not be needed once the 2749 * VFS layer lookup-intents are implemented. 2750 * 2751 * Close is called when the inode is destroyed. 2752 * If we haven't opened the file for O_WRONLY, we 2753 * need to in the size_change case to obtain a stateid. 2754 * 2755 * Got race? 2756 * Because OPEN is always done by name in nfsv4, it is 2757 * possible that we opened a different file by the same 2758 * name. We can recognize this race condition, but we 2759 * can't do anything about it besides returning an error. 2760 * 2761 * This will be fixed with VFS changes (lookup-intent). 2762 */ 2763 static int 2764 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 2765 struct iattr *sattr) 2766 { 2767 struct inode *inode = dentry->d_inode; 2768 struct rpc_cred *cred = NULL; 2769 struct nfs4_state *state = NULL; 2770 int status; 2771 2772 if (pnfs_ld_layoutret_on_setattr(inode)) 2773 pnfs_commit_and_return_layout(inode); 2774 2775 nfs_fattr_init(fattr); 2776 2777 /* Deal with open(O_TRUNC) */ 2778 if (sattr->ia_valid & ATTR_OPEN) 2779 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); 2780 2781 /* Optimization: if the end result is no change, don't RPC */ 2782 if ((sattr->ia_valid & ~(ATTR_FILE)) == 0) 2783 return 0; 2784 2785 /* Search for an existing open(O_WRITE) file */ 2786 if (sattr->ia_valid & ATTR_FILE) { 2787 struct nfs_open_context *ctx; 2788 2789 ctx = nfs_file_open_context(sattr->ia_file); 2790 if (ctx) { 2791 cred = ctx->cred; 2792 state = ctx->state; 2793 } 2794 } 2795 2796 status = nfs4_do_setattr(inode, cred, fattr, sattr, state); 2797 if (status == 0) 2798 nfs_setattr_update_inode(inode, sattr); 2799 return status; 2800 } 2801 2802 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 2803 const struct qstr *name, struct nfs_fh *fhandle, 2804 struct nfs_fattr *fattr) 2805 { 2806 struct nfs_server *server = NFS_SERVER(dir); 2807 int status; 2808 struct nfs4_lookup_arg args = { 2809 .bitmask = server->attr_bitmask, 2810 .dir_fh = NFS_FH(dir), 2811 .name = name, 2812 }; 2813 struct nfs4_lookup_res res = { 2814 .server = server, 2815 .fattr = fattr, 2816 .fh = fhandle, 2817 }; 2818 struct rpc_message msg = { 2819 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 2820 .rpc_argp = &args, 2821 .rpc_resp = &res, 2822 }; 2823 2824 nfs_fattr_init(fattr); 2825 2826 dprintk("NFS call lookup %s\n", name->name); 2827 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); 2828 dprintk("NFS reply lookup: %d\n", status); 2829 return status; 2830 } 2831 2832 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 2833 { 2834 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 2835 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 2836 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 2837 fattr->nlink = 2; 2838 } 2839 2840 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 2841 struct qstr *name, struct nfs_fh *fhandle, 2842 struct nfs_fattr *fattr) 2843 { 2844 struct nfs4_exception exception = { }; 2845 struct rpc_clnt *client = *clnt; 2846 int err; 2847 do { 2848 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr); 2849 switch (err) { 2850 case -NFS4ERR_BADNAME: 2851 err = -ENOENT; 2852 goto out; 2853 case -NFS4ERR_MOVED: 2854 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 2855 goto out; 2856 case -NFS4ERR_WRONGSEC: 2857 err = -EPERM; 2858 if (client != *clnt) 2859 goto out; 2860 2861 client = nfs4_create_sec_client(client, dir, name); 2862 if (IS_ERR(client)) 2863 return PTR_ERR(client); 2864 2865 exception.retry = 1; 2866 break; 2867 default: 2868 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 2869 } 2870 } while (exception.retry); 2871 2872 out: 2873 if (err == 0) 2874 *clnt = client; 2875 else if (client != *clnt) 2876 rpc_shutdown_client(client); 2877 2878 return err; 2879 } 2880 2881 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, 2882 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2883 { 2884 int status; 2885 struct rpc_clnt *client = NFS_CLIENT(dir); 2886 2887 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr); 2888 if (client != NFS_CLIENT(dir)) { 2889 rpc_shutdown_client(client); 2890 nfs_fixup_secinfo_attributes(fattr); 2891 } 2892 return status; 2893 } 2894 2895 struct rpc_clnt * 2896 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, 2897 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2898 { 2899 int status; 2900 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir)); 2901 2902 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr); 2903 if (status < 0) { 2904 rpc_shutdown_client(client); 2905 return ERR_PTR(status); 2906 } 2907 return client; 2908 } 2909 2910 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 2911 { 2912 struct nfs_server *server = NFS_SERVER(inode); 2913 struct nfs4_accessargs args = { 2914 .fh = NFS_FH(inode), 2915 .bitmask = server->cache_consistency_bitmask, 2916 }; 2917 struct nfs4_accessres res = { 2918 .server = server, 2919 }; 2920 struct rpc_message msg = { 2921 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 2922 .rpc_argp = &args, 2923 .rpc_resp = &res, 2924 .rpc_cred = entry->cred, 2925 }; 2926 int mode = entry->mask; 2927 int status; 2928 2929 /* 2930 * Determine which access bits we want to ask for... 2931 */ 2932 if (mode & MAY_READ) 2933 args.access |= NFS4_ACCESS_READ; 2934 if (S_ISDIR(inode->i_mode)) { 2935 if (mode & MAY_WRITE) 2936 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; 2937 if (mode & MAY_EXEC) 2938 args.access |= NFS4_ACCESS_LOOKUP; 2939 } else { 2940 if (mode & MAY_WRITE) 2941 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; 2942 if (mode & MAY_EXEC) 2943 args.access |= NFS4_ACCESS_EXECUTE; 2944 } 2945 2946 res.fattr = nfs_alloc_fattr(); 2947 if (res.fattr == NULL) 2948 return -ENOMEM; 2949 2950 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2951 if (!status) { 2952 nfs_access_set_mask(entry, res.access); 2953 nfs_refresh_inode(inode, res.fattr); 2954 } 2955 nfs_free_fattr(res.fattr); 2956 return status; 2957 } 2958 2959 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 2960 { 2961 struct nfs4_exception exception = { }; 2962 int err; 2963 do { 2964 err = nfs4_handle_exception(NFS_SERVER(inode), 2965 _nfs4_proc_access(inode, entry), 2966 &exception); 2967 } while (exception.retry); 2968 return err; 2969 } 2970 2971 /* 2972 * TODO: For the time being, we don't try to get any attributes 2973 * along with any of the zero-copy operations READ, READDIR, 2974 * READLINK, WRITE. 2975 * 2976 * In the case of the first three, we want to put the GETATTR 2977 * after the read-type operation -- this is because it is hard 2978 * to predict the length of a GETATTR response in v4, and thus 2979 * align the READ data correctly. This means that the GETATTR 2980 * may end up partially falling into the page cache, and we should 2981 * shift it into the 'tail' of the xdr_buf before processing. 2982 * To do this efficiently, we need to know the total length 2983 * of data received, which doesn't seem to be available outside 2984 * of the RPC layer. 2985 * 2986 * In the case of WRITE, we also want to put the GETATTR after 2987 * the operation -- in this case because we want to make sure 2988 * we get the post-operation mtime and size. 2989 * 2990 * Both of these changes to the XDR layer would in fact be quite 2991 * minor, but I decided to leave them for a subsequent patch. 2992 */ 2993 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 2994 unsigned int pgbase, unsigned int pglen) 2995 { 2996 struct nfs4_readlink args = { 2997 .fh = NFS_FH(inode), 2998 .pgbase = pgbase, 2999 .pglen = pglen, 3000 .pages = &page, 3001 }; 3002 struct nfs4_readlink_res res; 3003 struct rpc_message msg = { 3004 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 3005 .rpc_argp = &args, 3006 .rpc_resp = &res, 3007 }; 3008 3009 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 3010 } 3011 3012 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 3013 unsigned int pgbase, unsigned int pglen) 3014 { 3015 struct nfs4_exception exception = { }; 3016 int err; 3017 do { 3018 err = nfs4_handle_exception(NFS_SERVER(inode), 3019 _nfs4_proc_readlink(inode, page, pgbase, pglen), 3020 &exception); 3021 } while (exception.retry); 3022 return err; 3023 } 3024 3025 /* 3026 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 3027 */ 3028 static int 3029 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 3030 int flags) 3031 { 3032 struct nfs_open_context *ctx; 3033 struct nfs4_state *state; 3034 int status = 0; 3035 3036 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 3037 if (IS_ERR(ctx)) 3038 return PTR_ERR(ctx); 3039 3040 sattr->ia_mode &= ~current_umask(); 3041 state = nfs4_do_open(dir, dentry, ctx->mode, 3042 flags, sattr, ctx->cred, 3043 &ctx->mdsthreshold); 3044 d_drop(dentry); 3045 if (IS_ERR(state)) { 3046 status = PTR_ERR(state); 3047 goto out; 3048 } 3049 d_add(dentry, igrab(state->inode)); 3050 nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); 3051 ctx->state = state; 3052 out: 3053 put_nfs_open_context(ctx); 3054 return status; 3055 } 3056 3057 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) 3058 { 3059 struct nfs_server *server = NFS_SERVER(dir); 3060 struct nfs_removeargs args = { 3061 .fh = NFS_FH(dir), 3062 .name = *name, 3063 }; 3064 struct nfs_removeres res = { 3065 .server = server, 3066 }; 3067 struct rpc_message msg = { 3068 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 3069 .rpc_argp = &args, 3070 .rpc_resp = &res, 3071 }; 3072 int status; 3073 3074 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 3075 if (status == 0) 3076 update_changeattr(dir, &res.cinfo); 3077 return status; 3078 } 3079 3080 static int nfs4_proc_remove(struct inode *dir, struct qstr *name) 3081 { 3082 struct nfs4_exception exception = { }; 3083 int err; 3084 do { 3085 err = nfs4_handle_exception(NFS_SERVER(dir), 3086 _nfs4_proc_remove(dir, name), 3087 &exception); 3088 } while (exception.retry); 3089 return err; 3090 } 3091 3092 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) 3093 { 3094 struct nfs_server *server = NFS_SERVER(dir); 3095 struct nfs_removeargs *args = msg->rpc_argp; 3096 struct nfs_removeres *res = msg->rpc_resp; 3097 3098 res->server = server; 3099 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 3100 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1); 3101 } 3102 3103 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 3104 { 3105 nfs4_setup_sequence(NFS_SERVER(data->dir), 3106 &data->args.seq_args, 3107 &data->res.seq_res, 3108 task); 3109 } 3110 3111 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 3112 { 3113 struct nfs_removeres *res = task->tk_msg.rpc_resp; 3114 3115 if (!nfs4_sequence_done(task, &res->seq_res)) 3116 return 0; 3117 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 3118 return 0; 3119 update_changeattr(dir, &res->cinfo); 3120 return 1; 3121 } 3122 3123 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) 3124 { 3125 struct nfs_server *server = NFS_SERVER(dir); 3126 struct nfs_renameargs *arg = msg->rpc_argp; 3127 struct nfs_renameres *res = msg->rpc_resp; 3128 3129 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 3130 res->server = server; 3131 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1); 3132 } 3133 3134 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 3135 { 3136 nfs4_setup_sequence(NFS_SERVER(data->old_dir), 3137 &data->args.seq_args, 3138 &data->res.seq_res, 3139 task); 3140 } 3141 3142 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 3143 struct inode *new_dir) 3144 { 3145 struct nfs_renameres *res = task->tk_msg.rpc_resp; 3146 3147 if (!nfs4_sequence_done(task, &res->seq_res)) 3148 return 0; 3149 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 3150 return 0; 3151 3152 update_changeattr(old_dir, &res->old_cinfo); 3153 update_changeattr(new_dir, &res->new_cinfo); 3154 return 1; 3155 } 3156 3157 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, 3158 struct inode *new_dir, struct qstr *new_name) 3159 { 3160 struct nfs_server *server = NFS_SERVER(old_dir); 3161 struct nfs_renameargs arg = { 3162 .old_dir = NFS_FH(old_dir), 3163 .new_dir = NFS_FH(new_dir), 3164 .old_name = old_name, 3165 .new_name = new_name, 3166 }; 3167 struct nfs_renameres res = { 3168 .server = server, 3169 }; 3170 struct rpc_message msg = { 3171 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME], 3172 .rpc_argp = &arg, 3173 .rpc_resp = &res, 3174 }; 3175 int status = -ENOMEM; 3176 3177 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3178 if (!status) { 3179 update_changeattr(old_dir, &res.old_cinfo); 3180 update_changeattr(new_dir, &res.new_cinfo); 3181 } 3182 return status; 3183 } 3184 3185 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, 3186 struct inode *new_dir, struct qstr *new_name) 3187 { 3188 struct nfs4_exception exception = { }; 3189 int err; 3190 do { 3191 err = nfs4_handle_exception(NFS_SERVER(old_dir), 3192 _nfs4_proc_rename(old_dir, old_name, 3193 new_dir, new_name), 3194 &exception); 3195 } while (exception.retry); 3196 return err; 3197 } 3198 3199 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3200 { 3201 struct nfs_server *server = NFS_SERVER(inode); 3202 struct nfs4_link_arg arg = { 3203 .fh = NFS_FH(inode), 3204 .dir_fh = NFS_FH(dir), 3205 .name = name, 3206 .bitmask = server->attr_bitmask, 3207 }; 3208 struct nfs4_link_res res = { 3209 .server = server, 3210 }; 3211 struct rpc_message msg = { 3212 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 3213 .rpc_argp = &arg, 3214 .rpc_resp = &res, 3215 }; 3216 int status = -ENOMEM; 3217 3218 res.fattr = nfs_alloc_fattr(); 3219 if (res.fattr == NULL) 3220 goto out; 3221 3222 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3223 if (!status) { 3224 update_changeattr(dir, &res.cinfo); 3225 nfs_post_op_update_inode(inode, res.fattr); 3226 } 3227 out: 3228 nfs_free_fattr(res.fattr); 3229 return status; 3230 } 3231 3232 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3233 { 3234 struct nfs4_exception exception = { }; 3235 int err; 3236 do { 3237 err = nfs4_handle_exception(NFS_SERVER(inode), 3238 _nfs4_proc_link(inode, dir, name), 3239 &exception); 3240 } while (exception.retry); 3241 return err; 3242 } 3243 3244 struct nfs4_createdata { 3245 struct rpc_message msg; 3246 struct nfs4_create_arg arg; 3247 struct nfs4_create_res res; 3248 struct nfs_fh fh; 3249 struct nfs_fattr fattr; 3250 }; 3251 3252 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 3253 struct qstr *name, struct iattr *sattr, u32 ftype) 3254 { 3255 struct nfs4_createdata *data; 3256 3257 data = kzalloc(sizeof(*data), GFP_KERNEL); 3258 if (data != NULL) { 3259 struct nfs_server *server = NFS_SERVER(dir); 3260 3261 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 3262 data->msg.rpc_argp = &data->arg; 3263 data->msg.rpc_resp = &data->res; 3264 data->arg.dir_fh = NFS_FH(dir); 3265 data->arg.server = server; 3266 data->arg.name = name; 3267 data->arg.attrs = sattr; 3268 data->arg.ftype = ftype; 3269 data->arg.bitmask = server->attr_bitmask; 3270 data->res.server = server; 3271 data->res.fh = &data->fh; 3272 data->res.fattr = &data->fattr; 3273 nfs_fattr_init(data->res.fattr); 3274 } 3275 return data; 3276 } 3277 3278 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 3279 { 3280 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 3281 &data->arg.seq_args, &data->res.seq_res, 1); 3282 if (status == 0) { 3283 update_changeattr(dir, &data->res.dir_cinfo); 3284 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 3285 } 3286 return status; 3287 } 3288 3289 static void nfs4_free_createdata(struct nfs4_createdata *data) 3290 { 3291 kfree(data); 3292 } 3293 3294 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3295 struct page *page, unsigned int len, struct iattr *sattr) 3296 { 3297 struct nfs4_createdata *data; 3298 int status = -ENAMETOOLONG; 3299 3300 if (len > NFS4_MAXPATHLEN) 3301 goto out; 3302 3303 status = -ENOMEM; 3304 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 3305 if (data == NULL) 3306 goto out; 3307 3308 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 3309 data->arg.u.symlink.pages = &page; 3310 data->arg.u.symlink.len = len; 3311 3312 status = nfs4_do_create(dir, dentry, data); 3313 3314 nfs4_free_createdata(data); 3315 out: 3316 return status; 3317 } 3318 3319 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3320 struct page *page, unsigned int len, struct iattr *sattr) 3321 { 3322 struct nfs4_exception exception = { }; 3323 int err; 3324 do { 3325 err = nfs4_handle_exception(NFS_SERVER(dir), 3326 _nfs4_proc_symlink(dir, dentry, page, 3327 len, sattr), 3328 &exception); 3329 } while (exception.retry); 3330 return err; 3331 } 3332 3333 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3334 struct iattr *sattr) 3335 { 3336 struct nfs4_createdata *data; 3337 int status = -ENOMEM; 3338 3339 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 3340 if (data == NULL) 3341 goto out; 3342 3343 status = nfs4_do_create(dir, dentry, data); 3344 3345 nfs4_free_createdata(data); 3346 out: 3347 return status; 3348 } 3349 3350 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3351 struct iattr *sattr) 3352 { 3353 struct nfs4_exception exception = { }; 3354 int err; 3355 3356 sattr->ia_mode &= ~current_umask(); 3357 do { 3358 err = nfs4_handle_exception(NFS_SERVER(dir), 3359 _nfs4_proc_mkdir(dir, dentry, sattr), 3360 &exception); 3361 } while (exception.retry); 3362 return err; 3363 } 3364 3365 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3366 u64 cookie, struct page **pages, unsigned int count, int plus) 3367 { 3368 struct inode *dir = dentry->d_inode; 3369 struct nfs4_readdir_arg args = { 3370 .fh = NFS_FH(dir), 3371 .pages = pages, 3372 .pgbase = 0, 3373 .count = count, 3374 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask, 3375 .plus = plus, 3376 }; 3377 struct nfs4_readdir_res res; 3378 struct rpc_message msg = { 3379 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 3380 .rpc_argp = &args, 3381 .rpc_resp = &res, 3382 .rpc_cred = cred, 3383 }; 3384 int status; 3385 3386 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__, 3387 dentry->d_parent->d_name.name, 3388 dentry->d_name.name, 3389 (unsigned long long)cookie); 3390 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); 3391 res.pgbase = args.pgbase; 3392 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 3393 if (status >= 0) { 3394 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); 3395 status += args.pgbase; 3396 } 3397 3398 nfs_invalidate_atime(dir); 3399 3400 dprintk("%s: returns %d\n", __func__, status); 3401 return status; 3402 } 3403 3404 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3405 u64 cookie, struct page **pages, unsigned int count, int plus) 3406 { 3407 struct nfs4_exception exception = { }; 3408 int err; 3409 do { 3410 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), 3411 _nfs4_proc_readdir(dentry, cred, cookie, 3412 pages, count, plus), 3413 &exception); 3414 } while (exception.retry); 3415 return err; 3416 } 3417 3418 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3419 struct iattr *sattr, dev_t rdev) 3420 { 3421 struct nfs4_createdata *data; 3422 int mode = sattr->ia_mode; 3423 int status = -ENOMEM; 3424 3425 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 3426 if (data == NULL) 3427 goto out; 3428 3429 if (S_ISFIFO(mode)) 3430 data->arg.ftype = NF4FIFO; 3431 else if (S_ISBLK(mode)) { 3432 data->arg.ftype = NF4BLK; 3433 data->arg.u.device.specdata1 = MAJOR(rdev); 3434 data->arg.u.device.specdata2 = MINOR(rdev); 3435 } 3436 else if (S_ISCHR(mode)) { 3437 data->arg.ftype = NF4CHR; 3438 data->arg.u.device.specdata1 = MAJOR(rdev); 3439 data->arg.u.device.specdata2 = MINOR(rdev); 3440 } else if (!S_ISSOCK(mode)) { 3441 status = -EINVAL; 3442 goto out_free; 3443 } 3444 3445 status = nfs4_do_create(dir, dentry, data); 3446 out_free: 3447 nfs4_free_createdata(data); 3448 out: 3449 return status; 3450 } 3451 3452 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3453 struct iattr *sattr, dev_t rdev) 3454 { 3455 struct nfs4_exception exception = { }; 3456 int err; 3457 3458 sattr->ia_mode &= ~current_umask(); 3459 do { 3460 err = nfs4_handle_exception(NFS_SERVER(dir), 3461 _nfs4_proc_mknod(dir, dentry, sattr, rdev), 3462 &exception); 3463 } while (exception.retry); 3464 return err; 3465 } 3466 3467 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 3468 struct nfs_fsstat *fsstat) 3469 { 3470 struct nfs4_statfs_arg args = { 3471 .fh = fhandle, 3472 .bitmask = server->attr_bitmask, 3473 }; 3474 struct nfs4_statfs_res res = { 3475 .fsstat = fsstat, 3476 }; 3477 struct rpc_message msg = { 3478 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 3479 .rpc_argp = &args, 3480 .rpc_resp = &res, 3481 }; 3482 3483 nfs_fattr_init(fsstat->fattr); 3484 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3485 } 3486 3487 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 3488 { 3489 struct nfs4_exception exception = { }; 3490 int err; 3491 do { 3492 err = nfs4_handle_exception(server, 3493 _nfs4_proc_statfs(server, fhandle, fsstat), 3494 &exception); 3495 } while (exception.retry); 3496 return err; 3497 } 3498 3499 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 3500 struct nfs_fsinfo *fsinfo) 3501 { 3502 struct nfs4_fsinfo_arg args = { 3503 .fh = fhandle, 3504 .bitmask = server->attr_bitmask, 3505 }; 3506 struct nfs4_fsinfo_res res = { 3507 .fsinfo = fsinfo, 3508 }; 3509 struct rpc_message msg = { 3510 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 3511 .rpc_argp = &args, 3512 .rpc_resp = &res, 3513 }; 3514 3515 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3516 } 3517 3518 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 3519 { 3520 struct nfs4_exception exception = { }; 3521 unsigned long now = jiffies; 3522 int err; 3523 3524 do { 3525 err = _nfs4_do_fsinfo(server, fhandle, fsinfo); 3526 if (err == 0) { 3527 struct nfs_client *clp = server->nfs_client; 3528 3529 spin_lock(&clp->cl_lock); 3530 clp->cl_lease_time = fsinfo->lease_time * HZ; 3531 clp->cl_last_renewal = now; 3532 spin_unlock(&clp->cl_lock); 3533 break; 3534 } 3535 err = nfs4_handle_exception(server, err, &exception); 3536 } while (exception.retry); 3537 return err; 3538 } 3539 3540 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 3541 { 3542 int error; 3543 3544 nfs_fattr_init(fsinfo->fattr); 3545 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 3546 if (error == 0) { 3547 /* block layout checks this! */ 3548 server->pnfs_blksize = fsinfo->blksize; 3549 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype); 3550 } 3551 3552 return error; 3553 } 3554 3555 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 3556 struct nfs_pathconf *pathconf) 3557 { 3558 struct nfs4_pathconf_arg args = { 3559 .fh = fhandle, 3560 .bitmask = server->attr_bitmask, 3561 }; 3562 struct nfs4_pathconf_res res = { 3563 .pathconf = pathconf, 3564 }; 3565 struct rpc_message msg = { 3566 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 3567 .rpc_argp = &args, 3568 .rpc_resp = &res, 3569 }; 3570 3571 /* None of the pathconf attributes are mandatory to implement */ 3572 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 3573 memset(pathconf, 0, sizeof(*pathconf)); 3574 return 0; 3575 } 3576 3577 nfs_fattr_init(pathconf->fattr); 3578 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3579 } 3580 3581 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 3582 struct nfs_pathconf *pathconf) 3583 { 3584 struct nfs4_exception exception = { }; 3585 int err; 3586 3587 do { 3588 err = nfs4_handle_exception(server, 3589 _nfs4_proc_pathconf(server, fhandle, pathconf), 3590 &exception); 3591 } while (exception.retry); 3592 return err; 3593 } 3594 3595 int nfs4_set_rw_stateid(nfs4_stateid *stateid, 3596 const struct nfs_open_context *ctx, 3597 const struct nfs_lock_context *l_ctx, 3598 fmode_t fmode) 3599 { 3600 const struct nfs_lockowner *lockowner = NULL; 3601 3602 if (l_ctx != NULL) 3603 lockowner = &l_ctx->lockowner; 3604 return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner); 3605 } 3606 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid); 3607 3608 static bool nfs4_stateid_is_current(nfs4_stateid *stateid, 3609 const struct nfs_open_context *ctx, 3610 const struct nfs_lock_context *l_ctx, 3611 fmode_t fmode) 3612 { 3613 nfs4_stateid current_stateid; 3614 3615 if (nfs4_set_rw_stateid(¤t_stateid, ctx, l_ctx, fmode)) 3616 return false; 3617 return nfs4_stateid_match(stateid, ¤t_stateid); 3618 } 3619 3620 static bool nfs4_error_stateid_expired(int err) 3621 { 3622 switch (err) { 3623 case -NFS4ERR_DELEG_REVOKED: 3624 case -NFS4ERR_ADMIN_REVOKED: 3625 case -NFS4ERR_BAD_STATEID: 3626 case -NFS4ERR_STALE_STATEID: 3627 case -NFS4ERR_OLD_STATEID: 3628 case -NFS4ERR_OPENMODE: 3629 case -NFS4ERR_EXPIRED: 3630 return true; 3631 } 3632 return false; 3633 } 3634 3635 void __nfs4_read_done_cb(struct nfs_read_data *data) 3636 { 3637 nfs_invalidate_atime(data->header->inode); 3638 } 3639 3640 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data) 3641 { 3642 struct nfs_server *server = NFS_SERVER(data->header->inode); 3643 3644 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { 3645 rpc_restart_call_prepare(task); 3646 return -EAGAIN; 3647 } 3648 3649 __nfs4_read_done_cb(data); 3650 if (task->tk_status > 0) 3651 renew_lease(server, data->timestamp); 3652 return 0; 3653 } 3654 3655 static bool nfs4_read_stateid_changed(struct rpc_task *task, 3656 struct nfs_readargs *args) 3657 { 3658 3659 if (!nfs4_error_stateid_expired(task->tk_status) || 3660 nfs4_stateid_is_current(&args->stateid, 3661 args->context, 3662 args->lock_context, 3663 FMODE_READ)) 3664 return false; 3665 rpc_restart_call_prepare(task); 3666 return true; 3667 } 3668 3669 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data) 3670 { 3671 3672 dprintk("--> %s\n", __func__); 3673 3674 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3675 return -EAGAIN; 3676 if (nfs4_read_stateid_changed(task, &data->args)) 3677 return -EAGAIN; 3678 return data->read_done_cb ? data->read_done_cb(task, data) : 3679 nfs4_read_done_cb(task, data); 3680 } 3681 3682 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg) 3683 { 3684 data->timestamp = jiffies; 3685 data->read_done_cb = nfs4_read_done_cb; 3686 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 3687 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 3688 } 3689 3690 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data) 3691 { 3692 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode), 3693 &data->args.seq_args, 3694 &data->res.seq_res, 3695 task)) 3696 return; 3697 nfs4_set_rw_stateid(&data->args.stateid, data->args.context, 3698 data->args.lock_context, FMODE_READ); 3699 } 3700 3701 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data) 3702 { 3703 struct inode *inode = data->header->inode; 3704 3705 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { 3706 rpc_restart_call_prepare(task); 3707 return -EAGAIN; 3708 } 3709 if (task->tk_status >= 0) { 3710 renew_lease(NFS_SERVER(inode), data->timestamp); 3711 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 3712 } 3713 return 0; 3714 } 3715 3716 static bool nfs4_write_stateid_changed(struct rpc_task *task, 3717 struct nfs_writeargs *args) 3718 { 3719 3720 if (!nfs4_error_stateid_expired(task->tk_status) || 3721 nfs4_stateid_is_current(&args->stateid, 3722 args->context, 3723 args->lock_context, 3724 FMODE_WRITE)) 3725 return false; 3726 rpc_restart_call_prepare(task); 3727 return true; 3728 } 3729 3730 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data) 3731 { 3732 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3733 return -EAGAIN; 3734 if (nfs4_write_stateid_changed(task, &data->args)) 3735 return -EAGAIN; 3736 return data->write_done_cb ? data->write_done_cb(task, data) : 3737 nfs4_write_done_cb(task, data); 3738 } 3739 3740 static 3741 bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data) 3742 { 3743 const struct nfs_pgio_header *hdr = data->header; 3744 3745 /* Don't request attributes for pNFS or O_DIRECT writes */ 3746 if (data->ds_clp != NULL || hdr->dreq != NULL) 3747 return false; 3748 /* Otherwise, request attributes if and only if we don't hold 3749 * a delegation 3750 */ 3751 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; 3752 } 3753 3754 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg) 3755 { 3756 struct nfs_server *server = NFS_SERVER(data->header->inode); 3757 3758 if (!nfs4_write_need_cache_consistency_data(data)) { 3759 data->args.bitmask = NULL; 3760 data->res.fattr = NULL; 3761 } else 3762 data->args.bitmask = server->cache_consistency_bitmask; 3763 3764 if (!data->write_done_cb) 3765 data->write_done_cb = nfs4_write_done_cb; 3766 data->res.server = server; 3767 data->timestamp = jiffies; 3768 3769 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 3770 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 3771 } 3772 3773 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data) 3774 { 3775 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode), 3776 &data->args.seq_args, 3777 &data->res.seq_res, 3778 task)) 3779 return; 3780 nfs4_set_rw_stateid(&data->args.stateid, data->args.context, 3781 data->args.lock_context, FMODE_WRITE); 3782 } 3783 3784 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 3785 { 3786 nfs4_setup_sequence(NFS_SERVER(data->inode), 3787 &data->args.seq_args, 3788 &data->res.seq_res, 3789 task); 3790 } 3791 3792 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 3793 { 3794 struct inode *inode = data->inode; 3795 3796 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { 3797 rpc_restart_call_prepare(task); 3798 return -EAGAIN; 3799 } 3800 return 0; 3801 } 3802 3803 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 3804 { 3805 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3806 return -EAGAIN; 3807 return data->commit_done_cb(task, data); 3808 } 3809 3810 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) 3811 { 3812 struct nfs_server *server = NFS_SERVER(data->inode); 3813 3814 if (data->commit_done_cb == NULL) 3815 data->commit_done_cb = nfs4_commit_done_cb; 3816 data->res.server = server; 3817 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 3818 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 3819 } 3820 3821 struct nfs4_renewdata { 3822 struct nfs_client *client; 3823 unsigned long timestamp; 3824 }; 3825 3826 /* 3827 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 3828 * standalone procedure for queueing an asynchronous RENEW. 3829 */ 3830 static void nfs4_renew_release(void *calldata) 3831 { 3832 struct nfs4_renewdata *data = calldata; 3833 struct nfs_client *clp = data->client; 3834 3835 if (atomic_read(&clp->cl_count) > 1) 3836 nfs4_schedule_state_renewal(clp); 3837 nfs_put_client(clp); 3838 kfree(data); 3839 } 3840 3841 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 3842 { 3843 struct nfs4_renewdata *data = calldata; 3844 struct nfs_client *clp = data->client; 3845 unsigned long timestamp = data->timestamp; 3846 3847 if (task->tk_status < 0) { 3848 /* Unless we're shutting down, schedule state recovery! */ 3849 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 3850 return; 3851 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 3852 nfs4_schedule_lease_recovery(clp); 3853 return; 3854 } 3855 nfs4_schedule_path_down_recovery(clp); 3856 } 3857 do_renew_lease(clp, timestamp); 3858 } 3859 3860 static const struct rpc_call_ops nfs4_renew_ops = { 3861 .rpc_call_done = nfs4_renew_done, 3862 .rpc_release = nfs4_renew_release, 3863 }; 3864 3865 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 3866 { 3867 struct rpc_message msg = { 3868 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 3869 .rpc_argp = clp, 3870 .rpc_cred = cred, 3871 }; 3872 struct nfs4_renewdata *data; 3873 3874 if (renew_flags == 0) 3875 return 0; 3876 if (!atomic_inc_not_zero(&clp->cl_count)) 3877 return -EIO; 3878 data = kmalloc(sizeof(*data), GFP_NOFS); 3879 if (data == NULL) 3880 return -ENOMEM; 3881 data->client = clp; 3882 data->timestamp = jiffies; 3883 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT, 3884 &nfs4_renew_ops, data); 3885 } 3886 3887 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 3888 { 3889 struct rpc_message msg = { 3890 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 3891 .rpc_argp = clp, 3892 .rpc_cred = cred, 3893 }; 3894 unsigned long now = jiffies; 3895 int status; 3896 3897 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 3898 if (status < 0) 3899 return status; 3900 do_renew_lease(clp, now); 3901 return 0; 3902 } 3903 3904 static inline int nfs4_server_supports_acls(struct nfs_server *server) 3905 { 3906 return (server->caps & NFS_CAP_ACLS) 3907 && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 3908 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL); 3909 } 3910 3911 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 3912 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 3913 * the stack. 3914 */ 3915 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 3916 3917 static int buf_to_pages_noslab(const void *buf, size_t buflen, 3918 struct page **pages, unsigned int *pgbase) 3919 { 3920 struct page *newpage, **spages; 3921 int rc = 0; 3922 size_t len; 3923 spages = pages; 3924 3925 do { 3926 len = min_t(size_t, PAGE_SIZE, buflen); 3927 newpage = alloc_page(GFP_KERNEL); 3928 3929 if (newpage == NULL) 3930 goto unwind; 3931 memcpy(page_address(newpage), buf, len); 3932 buf += len; 3933 buflen -= len; 3934 *pages++ = newpage; 3935 rc++; 3936 } while (buflen != 0); 3937 3938 return rc; 3939 3940 unwind: 3941 for(; rc > 0; rc--) 3942 __free_page(spages[rc-1]); 3943 return -ENOMEM; 3944 } 3945 3946 struct nfs4_cached_acl { 3947 int cached; 3948 size_t len; 3949 char data[0]; 3950 }; 3951 3952 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 3953 { 3954 struct nfs_inode *nfsi = NFS_I(inode); 3955 3956 spin_lock(&inode->i_lock); 3957 kfree(nfsi->nfs4_acl); 3958 nfsi->nfs4_acl = acl; 3959 spin_unlock(&inode->i_lock); 3960 } 3961 3962 static void nfs4_zap_acl_attr(struct inode *inode) 3963 { 3964 nfs4_set_cached_acl(inode, NULL); 3965 } 3966 3967 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 3968 { 3969 struct nfs_inode *nfsi = NFS_I(inode); 3970 struct nfs4_cached_acl *acl; 3971 int ret = -ENOENT; 3972 3973 spin_lock(&inode->i_lock); 3974 acl = nfsi->nfs4_acl; 3975 if (acl == NULL) 3976 goto out; 3977 if (buf == NULL) /* user is just asking for length */ 3978 goto out_len; 3979 if (acl->cached == 0) 3980 goto out; 3981 ret = -ERANGE; /* see getxattr(2) man page */ 3982 if (acl->len > buflen) 3983 goto out; 3984 memcpy(buf, acl->data, acl->len); 3985 out_len: 3986 ret = acl->len; 3987 out: 3988 spin_unlock(&inode->i_lock); 3989 return ret; 3990 } 3991 3992 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 3993 { 3994 struct nfs4_cached_acl *acl; 3995 size_t buflen = sizeof(*acl) + acl_len; 3996 3997 if (buflen <= PAGE_SIZE) { 3998 acl = kmalloc(buflen, GFP_KERNEL); 3999 if (acl == NULL) 4000 goto out; 4001 acl->cached = 1; 4002 _copy_from_pages(acl->data, pages, pgbase, acl_len); 4003 } else { 4004 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 4005 if (acl == NULL) 4006 goto out; 4007 acl->cached = 0; 4008 } 4009 acl->len = acl_len; 4010 out: 4011 nfs4_set_cached_acl(inode, acl); 4012 } 4013 4014 /* 4015 * The getxattr API returns the required buffer length when called with a 4016 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 4017 * the required buf. On a NULL buf, we send a page of data to the server 4018 * guessing that the ACL request can be serviced by a page. If so, we cache 4019 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 4020 * the cache. If not so, we throw away the page, and cache the required 4021 * length. The next getxattr call will then produce another round trip to 4022 * the server, this time with the input buf of the required size. 4023 */ 4024 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4025 { 4026 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; 4027 struct nfs_getaclargs args = { 4028 .fh = NFS_FH(inode), 4029 .acl_pages = pages, 4030 .acl_len = buflen, 4031 }; 4032 struct nfs_getaclres res = { 4033 .acl_len = buflen, 4034 }; 4035 struct rpc_message msg = { 4036 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 4037 .rpc_argp = &args, 4038 .rpc_resp = &res, 4039 }; 4040 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4041 int ret = -ENOMEM, i; 4042 4043 /* As long as we're doing a round trip to the server anyway, 4044 * let's be prepared for a page of acl data. */ 4045 if (npages == 0) 4046 npages = 1; 4047 if (npages > ARRAY_SIZE(pages)) 4048 return -ERANGE; 4049 4050 for (i = 0; i < npages; i++) { 4051 pages[i] = alloc_page(GFP_KERNEL); 4052 if (!pages[i]) 4053 goto out_free; 4054 } 4055 4056 /* for decoding across pages */ 4057 res.acl_scratch = alloc_page(GFP_KERNEL); 4058 if (!res.acl_scratch) 4059 goto out_free; 4060 4061 args.acl_len = npages * PAGE_SIZE; 4062 args.acl_pgbase = 0; 4063 4064 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 4065 __func__, buf, buflen, npages, args.acl_len); 4066 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 4067 &msg, &args.seq_args, &res.seq_res, 0); 4068 if (ret) 4069 goto out_free; 4070 4071 /* Handle the case where the passed-in buffer is too short */ 4072 if (res.acl_flags & NFS4_ACL_TRUNC) { 4073 /* Did the user only issue a request for the acl length? */ 4074 if (buf == NULL) 4075 goto out_ok; 4076 ret = -ERANGE; 4077 goto out_free; 4078 } 4079 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); 4080 if (buf) { 4081 if (res.acl_len > buflen) { 4082 ret = -ERANGE; 4083 goto out_free; 4084 } 4085 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 4086 } 4087 out_ok: 4088 ret = res.acl_len; 4089 out_free: 4090 for (i = 0; i < npages; i++) 4091 if (pages[i]) 4092 __free_page(pages[i]); 4093 if (res.acl_scratch) 4094 __free_page(res.acl_scratch); 4095 return ret; 4096 } 4097 4098 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 4099 { 4100 struct nfs4_exception exception = { }; 4101 ssize_t ret; 4102 do { 4103 ret = __nfs4_get_acl_uncached(inode, buf, buflen); 4104 if (ret >= 0) 4105 break; 4106 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 4107 } while (exception.retry); 4108 return ret; 4109 } 4110 4111 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 4112 { 4113 struct nfs_server *server = NFS_SERVER(inode); 4114 int ret; 4115 4116 if (!nfs4_server_supports_acls(server)) 4117 return -EOPNOTSUPP; 4118 ret = nfs_revalidate_inode(server, inode); 4119 if (ret < 0) 4120 return ret; 4121 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 4122 nfs_zap_acl_cache(inode); 4123 ret = nfs4_read_cached_acl(inode, buf, buflen); 4124 if (ret != -ENOENT) 4125 /* -ENOENT is returned if there is no ACL or if there is an ACL 4126 * but no cached acl data, just the acl length */ 4127 return ret; 4128 return nfs4_get_acl_uncached(inode, buf, buflen); 4129 } 4130 4131 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4132 { 4133 struct nfs_server *server = NFS_SERVER(inode); 4134 struct page *pages[NFS4ACL_MAXPAGES]; 4135 struct nfs_setaclargs arg = { 4136 .fh = NFS_FH(inode), 4137 .acl_pages = pages, 4138 .acl_len = buflen, 4139 }; 4140 struct nfs_setaclres res; 4141 struct rpc_message msg = { 4142 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 4143 .rpc_argp = &arg, 4144 .rpc_resp = &res, 4145 }; 4146 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4147 int ret, i; 4148 4149 if (!nfs4_server_supports_acls(server)) 4150 return -EOPNOTSUPP; 4151 if (npages > ARRAY_SIZE(pages)) 4152 return -ERANGE; 4153 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 4154 if (i < 0) 4155 return i; 4156 nfs4_inode_return_delegation(inode); 4157 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4158 4159 /* 4160 * Free each page after tx, so the only ref left is 4161 * held by the network stack 4162 */ 4163 for (; i > 0; i--) 4164 put_page(pages[i-1]); 4165 4166 /* 4167 * Acl update can result in inode attribute update. 4168 * so mark the attribute cache invalid. 4169 */ 4170 spin_lock(&inode->i_lock); 4171 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 4172 spin_unlock(&inode->i_lock); 4173 nfs_access_zap_cache(inode); 4174 nfs_zap_acl_cache(inode); 4175 return ret; 4176 } 4177 4178 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4179 { 4180 struct nfs4_exception exception = { }; 4181 int err; 4182 do { 4183 err = nfs4_handle_exception(NFS_SERVER(inode), 4184 __nfs4_proc_set_acl(inode, buf, buflen), 4185 &exception); 4186 } while (exception.retry); 4187 return err; 4188 } 4189 4190 static int 4191 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state) 4192 { 4193 struct nfs_client *clp = server->nfs_client; 4194 4195 if (task->tk_status >= 0) 4196 return 0; 4197 switch(task->tk_status) { 4198 case -NFS4ERR_DELEG_REVOKED: 4199 case -NFS4ERR_ADMIN_REVOKED: 4200 case -NFS4ERR_BAD_STATEID: 4201 if (state == NULL) 4202 break; 4203 nfs_remove_bad_delegation(state->inode); 4204 case -NFS4ERR_OPENMODE: 4205 if (state == NULL) 4206 break; 4207 if (nfs4_schedule_stateid_recovery(server, state) < 0) 4208 goto stateid_invalid; 4209 goto wait_on_recovery; 4210 case -NFS4ERR_EXPIRED: 4211 if (state != NULL) { 4212 if (nfs4_schedule_stateid_recovery(server, state) < 0) 4213 goto stateid_invalid; 4214 } 4215 case -NFS4ERR_STALE_STATEID: 4216 case -NFS4ERR_STALE_CLIENTID: 4217 nfs4_schedule_lease_recovery(clp); 4218 goto wait_on_recovery; 4219 #if defined(CONFIG_NFS_V4_1) 4220 case -NFS4ERR_BADSESSION: 4221 case -NFS4ERR_BADSLOT: 4222 case -NFS4ERR_BAD_HIGH_SLOT: 4223 case -NFS4ERR_DEADSESSION: 4224 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 4225 case -NFS4ERR_SEQ_FALSE_RETRY: 4226 case -NFS4ERR_SEQ_MISORDERED: 4227 dprintk("%s ERROR %d, Reset session\n", __func__, 4228 task->tk_status); 4229 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 4230 task->tk_status = 0; 4231 return -EAGAIN; 4232 #endif /* CONFIG_NFS_V4_1 */ 4233 case -NFS4ERR_DELAY: 4234 nfs_inc_server_stats(server, NFSIOS_DELAY); 4235 case -NFS4ERR_GRACE: 4236 rpc_delay(task, NFS4_POLL_RETRY_MAX); 4237 task->tk_status = 0; 4238 return -EAGAIN; 4239 case -NFS4ERR_RETRY_UNCACHED_REP: 4240 case -NFS4ERR_OLD_STATEID: 4241 task->tk_status = 0; 4242 return -EAGAIN; 4243 } 4244 task->tk_status = nfs4_map_errors(task->tk_status); 4245 return 0; 4246 stateid_invalid: 4247 task->tk_status = -EIO; 4248 return 0; 4249 wait_on_recovery: 4250 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 4251 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 4252 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 4253 task->tk_status = 0; 4254 return -EAGAIN; 4255 } 4256 4257 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 4258 nfs4_verifier *bootverf) 4259 { 4260 __be32 verf[2]; 4261 4262 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 4263 /* An impossible timestamp guarantees this value 4264 * will never match a generated boot time. */ 4265 verf[0] = 0; 4266 verf[1] = (__be32)(NSEC_PER_SEC + 1); 4267 } else { 4268 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 4269 verf[0] = (__be32)nn->boot_time.tv_sec; 4270 verf[1] = (__be32)nn->boot_time.tv_nsec; 4271 } 4272 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 4273 } 4274 4275 static unsigned int 4276 nfs4_init_nonuniform_client_string(const struct nfs_client *clp, 4277 char *buf, size_t len) 4278 { 4279 unsigned int result; 4280 4281 rcu_read_lock(); 4282 result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s", 4283 clp->cl_ipaddr, 4284 rpc_peeraddr2str(clp->cl_rpcclient, 4285 RPC_DISPLAY_ADDR), 4286 rpc_peeraddr2str(clp->cl_rpcclient, 4287 RPC_DISPLAY_PROTO)); 4288 rcu_read_unlock(); 4289 return result; 4290 } 4291 4292 static unsigned int 4293 nfs4_init_uniform_client_string(const struct nfs_client *clp, 4294 char *buf, size_t len) 4295 { 4296 char *nodename = clp->cl_rpcclient->cl_nodename; 4297 4298 if (nfs4_client_id_uniquifier[0] != '\0') 4299 nodename = nfs4_client_id_uniquifier; 4300 return scnprintf(buf, len, "Linux NFSv%u.%u %s", 4301 clp->rpc_ops->version, clp->cl_minorversion, 4302 nodename); 4303 } 4304 4305 /** 4306 * nfs4_proc_setclientid - Negotiate client ID 4307 * @clp: state data structure 4308 * @program: RPC program for NFSv4 callback service 4309 * @port: IP port number for NFS4 callback service 4310 * @cred: RPC credential to use for this call 4311 * @res: where to place the result 4312 * 4313 * Returns zero, a negative errno, or a negative NFS4ERR status code. 4314 */ 4315 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 4316 unsigned short port, struct rpc_cred *cred, 4317 struct nfs4_setclientid_res *res) 4318 { 4319 nfs4_verifier sc_verifier; 4320 struct nfs4_setclientid setclientid = { 4321 .sc_verifier = &sc_verifier, 4322 .sc_prog = program, 4323 .sc_cb_ident = clp->cl_cb_ident, 4324 }; 4325 struct rpc_message msg = { 4326 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 4327 .rpc_argp = &setclientid, 4328 .rpc_resp = res, 4329 .rpc_cred = cred, 4330 }; 4331 int status; 4332 4333 /* nfs_client_id4 */ 4334 nfs4_init_boot_verifier(clp, &sc_verifier); 4335 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 4336 setclientid.sc_name_len = 4337 nfs4_init_uniform_client_string(clp, 4338 setclientid.sc_name, 4339 sizeof(setclientid.sc_name)); 4340 else 4341 setclientid.sc_name_len = 4342 nfs4_init_nonuniform_client_string(clp, 4343 setclientid.sc_name, 4344 sizeof(setclientid.sc_name)); 4345 /* cb_client4 */ 4346 rcu_read_lock(); 4347 setclientid.sc_netid_len = scnprintf(setclientid.sc_netid, 4348 sizeof(setclientid.sc_netid), 4349 rpc_peeraddr2str(clp->cl_rpcclient, 4350 RPC_DISPLAY_NETID)); 4351 rcu_read_unlock(); 4352 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 4353 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 4354 clp->cl_ipaddr, port >> 8, port & 255); 4355 4356 dprintk("NFS call setclientid auth=%s, '%.*s'\n", 4357 clp->cl_rpcclient->cl_auth->au_ops->au_name, 4358 setclientid.sc_name_len, setclientid.sc_name); 4359 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4360 dprintk("NFS reply setclientid: %d\n", status); 4361 return status; 4362 } 4363 4364 /** 4365 * nfs4_proc_setclientid_confirm - Confirm client ID 4366 * @clp: state data structure 4367 * @res: result of a previous SETCLIENTID 4368 * @cred: RPC credential to use for this call 4369 * 4370 * Returns zero, a negative errno, or a negative NFS4ERR status code. 4371 */ 4372 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 4373 struct nfs4_setclientid_res *arg, 4374 struct rpc_cred *cred) 4375 { 4376 struct rpc_message msg = { 4377 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 4378 .rpc_argp = arg, 4379 .rpc_cred = cred, 4380 }; 4381 int status; 4382 4383 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 4384 clp->cl_rpcclient->cl_auth->au_ops->au_name, 4385 clp->cl_clientid); 4386 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4387 dprintk("NFS reply setclientid_confirm: %d\n", status); 4388 return status; 4389 } 4390 4391 struct nfs4_delegreturndata { 4392 struct nfs4_delegreturnargs args; 4393 struct nfs4_delegreturnres res; 4394 struct nfs_fh fh; 4395 nfs4_stateid stateid; 4396 unsigned long timestamp; 4397 struct nfs_fattr fattr; 4398 int rpc_status; 4399 }; 4400 4401 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 4402 { 4403 struct nfs4_delegreturndata *data = calldata; 4404 4405 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4406 return; 4407 4408 switch (task->tk_status) { 4409 case -NFS4ERR_STALE_STATEID: 4410 case -NFS4ERR_EXPIRED: 4411 case 0: 4412 renew_lease(data->res.server, data->timestamp); 4413 break; 4414 default: 4415 if (nfs4_async_handle_error(task, data->res.server, NULL) == 4416 -EAGAIN) { 4417 rpc_restart_call_prepare(task); 4418 return; 4419 } 4420 } 4421 data->rpc_status = task->tk_status; 4422 } 4423 4424 static void nfs4_delegreturn_release(void *calldata) 4425 { 4426 kfree(calldata); 4427 } 4428 4429 #if defined(CONFIG_NFS_V4_1) 4430 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 4431 { 4432 struct nfs4_delegreturndata *d_data; 4433 4434 d_data = (struct nfs4_delegreturndata *)data; 4435 4436 nfs4_setup_sequence(d_data->res.server, 4437 &d_data->args.seq_args, 4438 &d_data->res.seq_res, 4439 task); 4440 } 4441 #endif /* CONFIG_NFS_V4_1 */ 4442 4443 static const struct rpc_call_ops nfs4_delegreturn_ops = { 4444 #if defined(CONFIG_NFS_V4_1) 4445 .rpc_call_prepare = nfs4_delegreturn_prepare, 4446 #endif /* CONFIG_NFS_V4_1 */ 4447 .rpc_call_done = nfs4_delegreturn_done, 4448 .rpc_release = nfs4_delegreturn_release, 4449 }; 4450 4451 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 4452 { 4453 struct nfs4_delegreturndata *data; 4454 struct nfs_server *server = NFS_SERVER(inode); 4455 struct rpc_task *task; 4456 struct rpc_message msg = { 4457 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 4458 .rpc_cred = cred, 4459 }; 4460 struct rpc_task_setup task_setup_data = { 4461 .rpc_client = server->client, 4462 .rpc_message = &msg, 4463 .callback_ops = &nfs4_delegreturn_ops, 4464 .flags = RPC_TASK_ASYNC, 4465 }; 4466 int status = 0; 4467 4468 data = kzalloc(sizeof(*data), GFP_NOFS); 4469 if (data == NULL) 4470 return -ENOMEM; 4471 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 4472 data->args.fhandle = &data->fh; 4473 data->args.stateid = &data->stateid; 4474 data->args.bitmask = server->cache_consistency_bitmask; 4475 nfs_copy_fh(&data->fh, NFS_FH(inode)); 4476 nfs4_stateid_copy(&data->stateid, stateid); 4477 data->res.fattr = &data->fattr; 4478 data->res.server = server; 4479 nfs_fattr_init(data->res.fattr); 4480 data->timestamp = jiffies; 4481 data->rpc_status = 0; 4482 4483 task_setup_data.callback_data = data; 4484 msg.rpc_argp = &data->args; 4485 msg.rpc_resp = &data->res; 4486 task = rpc_run_task(&task_setup_data); 4487 if (IS_ERR(task)) 4488 return PTR_ERR(task); 4489 if (!issync) 4490 goto out; 4491 status = nfs4_wait_for_completion_rpc_task(task); 4492 if (status != 0) 4493 goto out; 4494 status = data->rpc_status; 4495 if (status == 0) 4496 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 4497 else 4498 nfs_refresh_inode(inode, &data->fattr); 4499 out: 4500 rpc_put_task(task); 4501 return status; 4502 } 4503 4504 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 4505 { 4506 struct nfs_server *server = NFS_SERVER(inode); 4507 struct nfs4_exception exception = { }; 4508 int err; 4509 do { 4510 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); 4511 switch (err) { 4512 case -NFS4ERR_STALE_STATEID: 4513 case -NFS4ERR_EXPIRED: 4514 case 0: 4515 return 0; 4516 } 4517 err = nfs4_handle_exception(server, err, &exception); 4518 } while (exception.retry); 4519 return err; 4520 } 4521 4522 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 4523 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 4524 4525 /* 4526 * sleep, with exponential backoff, and retry the LOCK operation. 4527 */ 4528 static unsigned long 4529 nfs4_set_lock_task_retry(unsigned long timeout) 4530 { 4531 freezable_schedule_timeout_killable(timeout); 4532 timeout <<= 1; 4533 if (timeout > NFS4_LOCK_MAXTIMEOUT) 4534 return NFS4_LOCK_MAXTIMEOUT; 4535 return timeout; 4536 } 4537 4538 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4539 { 4540 struct inode *inode = state->inode; 4541 struct nfs_server *server = NFS_SERVER(inode); 4542 struct nfs_client *clp = server->nfs_client; 4543 struct nfs_lockt_args arg = { 4544 .fh = NFS_FH(inode), 4545 .fl = request, 4546 }; 4547 struct nfs_lockt_res res = { 4548 .denied = request, 4549 }; 4550 struct rpc_message msg = { 4551 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 4552 .rpc_argp = &arg, 4553 .rpc_resp = &res, 4554 .rpc_cred = state->owner->so_cred, 4555 }; 4556 struct nfs4_lock_state *lsp; 4557 int status; 4558 4559 arg.lock_owner.clientid = clp->cl_clientid; 4560 status = nfs4_set_lock_state(state, request); 4561 if (status != 0) 4562 goto out; 4563 lsp = request->fl_u.nfs4_fl.owner; 4564 arg.lock_owner.id = lsp->ls_seqid.owner_id; 4565 arg.lock_owner.s_dev = server->s_dev; 4566 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4567 switch (status) { 4568 case 0: 4569 request->fl_type = F_UNLCK; 4570 break; 4571 case -NFS4ERR_DENIED: 4572 status = 0; 4573 } 4574 request->fl_ops->fl_release_private(request); 4575 out: 4576 return status; 4577 } 4578 4579 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4580 { 4581 struct nfs4_exception exception = { }; 4582 int err; 4583 4584 do { 4585 err = nfs4_handle_exception(NFS_SERVER(state->inode), 4586 _nfs4_proc_getlk(state, cmd, request), 4587 &exception); 4588 } while (exception.retry); 4589 return err; 4590 } 4591 4592 static int do_vfs_lock(struct file *file, struct file_lock *fl) 4593 { 4594 int res = 0; 4595 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 4596 case FL_POSIX: 4597 res = posix_lock_file_wait(file, fl); 4598 break; 4599 case FL_FLOCK: 4600 res = flock_lock_file_wait(file, fl); 4601 break; 4602 default: 4603 BUG(); 4604 } 4605 return res; 4606 } 4607 4608 struct nfs4_unlockdata { 4609 struct nfs_locku_args arg; 4610 struct nfs_locku_res res; 4611 struct nfs4_lock_state *lsp; 4612 struct nfs_open_context *ctx; 4613 struct file_lock fl; 4614 const struct nfs_server *server; 4615 unsigned long timestamp; 4616 }; 4617 4618 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 4619 struct nfs_open_context *ctx, 4620 struct nfs4_lock_state *lsp, 4621 struct nfs_seqid *seqid) 4622 { 4623 struct nfs4_unlockdata *p; 4624 struct inode *inode = lsp->ls_state->inode; 4625 4626 p = kzalloc(sizeof(*p), GFP_NOFS); 4627 if (p == NULL) 4628 return NULL; 4629 p->arg.fh = NFS_FH(inode); 4630 p->arg.fl = &p->fl; 4631 p->arg.seqid = seqid; 4632 p->res.seqid = seqid; 4633 p->arg.stateid = &lsp->ls_stateid; 4634 p->lsp = lsp; 4635 atomic_inc(&lsp->ls_count); 4636 /* Ensure we don't close file until we're done freeing locks! */ 4637 p->ctx = get_nfs_open_context(ctx); 4638 memcpy(&p->fl, fl, sizeof(p->fl)); 4639 p->server = NFS_SERVER(inode); 4640 return p; 4641 } 4642 4643 static void nfs4_locku_release_calldata(void *data) 4644 { 4645 struct nfs4_unlockdata *calldata = data; 4646 nfs_free_seqid(calldata->arg.seqid); 4647 nfs4_put_lock_state(calldata->lsp); 4648 put_nfs_open_context(calldata->ctx); 4649 kfree(calldata); 4650 } 4651 4652 static void nfs4_locku_done(struct rpc_task *task, void *data) 4653 { 4654 struct nfs4_unlockdata *calldata = data; 4655 4656 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 4657 return; 4658 switch (task->tk_status) { 4659 case 0: 4660 nfs4_stateid_copy(&calldata->lsp->ls_stateid, 4661 &calldata->res.stateid); 4662 renew_lease(calldata->server, calldata->timestamp); 4663 break; 4664 case -NFS4ERR_BAD_STATEID: 4665 case -NFS4ERR_OLD_STATEID: 4666 case -NFS4ERR_STALE_STATEID: 4667 case -NFS4ERR_EXPIRED: 4668 break; 4669 default: 4670 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) 4671 rpc_restart_call_prepare(task); 4672 } 4673 nfs_release_seqid(calldata->arg.seqid); 4674 } 4675 4676 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 4677 { 4678 struct nfs4_unlockdata *calldata = data; 4679 4680 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 4681 goto out_wait; 4682 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 4683 /* Note: exit _without_ running nfs4_locku_done */ 4684 goto out_no_action; 4685 } 4686 calldata->timestamp = jiffies; 4687 if (nfs4_setup_sequence(calldata->server, 4688 &calldata->arg.seq_args, 4689 &calldata->res.seq_res, 4690 task) != 0) 4691 nfs_release_seqid(calldata->arg.seqid); 4692 return; 4693 out_no_action: 4694 task->tk_action = NULL; 4695 out_wait: 4696 nfs4_sequence_done(task, &calldata->res.seq_res); 4697 } 4698 4699 static const struct rpc_call_ops nfs4_locku_ops = { 4700 .rpc_call_prepare = nfs4_locku_prepare, 4701 .rpc_call_done = nfs4_locku_done, 4702 .rpc_release = nfs4_locku_release_calldata, 4703 }; 4704 4705 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 4706 struct nfs_open_context *ctx, 4707 struct nfs4_lock_state *lsp, 4708 struct nfs_seqid *seqid) 4709 { 4710 struct nfs4_unlockdata *data; 4711 struct rpc_message msg = { 4712 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 4713 .rpc_cred = ctx->cred, 4714 }; 4715 struct rpc_task_setup task_setup_data = { 4716 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 4717 .rpc_message = &msg, 4718 .callback_ops = &nfs4_locku_ops, 4719 .workqueue = nfsiod_workqueue, 4720 .flags = RPC_TASK_ASYNC, 4721 }; 4722 4723 /* Ensure this is an unlock - when canceling a lock, the 4724 * canceled lock is passed in, and it won't be an unlock. 4725 */ 4726 fl->fl_type = F_UNLCK; 4727 4728 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 4729 if (data == NULL) { 4730 nfs_free_seqid(seqid); 4731 return ERR_PTR(-ENOMEM); 4732 } 4733 4734 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 4735 msg.rpc_argp = &data->arg; 4736 msg.rpc_resp = &data->res; 4737 task_setup_data.callback_data = data; 4738 return rpc_run_task(&task_setup_data); 4739 } 4740 4741 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 4742 { 4743 struct inode *inode = state->inode; 4744 struct nfs4_state_owner *sp = state->owner; 4745 struct nfs_inode *nfsi = NFS_I(inode); 4746 struct nfs_seqid *seqid; 4747 struct nfs4_lock_state *lsp; 4748 struct rpc_task *task; 4749 int status = 0; 4750 unsigned char fl_flags = request->fl_flags; 4751 4752 status = nfs4_set_lock_state(state, request); 4753 /* Unlock _before_ we do the RPC call */ 4754 request->fl_flags |= FL_EXISTS; 4755 /* Exclude nfs_delegation_claim_locks() */ 4756 mutex_lock(&sp->so_delegreturn_mutex); 4757 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 4758 down_read(&nfsi->rwsem); 4759 if (do_vfs_lock(request->fl_file, request) == -ENOENT) { 4760 up_read(&nfsi->rwsem); 4761 mutex_unlock(&sp->so_delegreturn_mutex); 4762 goto out; 4763 } 4764 up_read(&nfsi->rwsem); 4765 mutex_unlock(&sp->so_delegreturn_mutex); 4766 if (status != 0) 4767 goto out; 4768 /* Is this a delegated lock? */ 4769 lsp = request->fl_u.nfs4_fl.owner; 4770 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) 4771 goto out; 4772 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 4773 status = -ENOMEM; 4774 if (seqid == NULL) 4775 goto out; 4776 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 4777 status = PTR_ERR(task); 4778 if (IS_ERR(task)) 4779 goto out; 4780 status = nfs4_wait_for_completion_rpc_task(task); 4781 rpc_put_task(task); 4782 out: 4783 request->fl_flags = fl_flags; 4784 return status; 4785 } 4786 4787 struct nfs4_lockdata { 4788 struct nfs_lock_args arg; 4789 struct nfs_lock_res res; 4790 struct nfs4_lock_state *lsp; 4791 struct nfs_open_context *ctx; 4792 struct file_lock fl; 4793 unsigned long timestamp; 4794 int rpc_status; 4795 int cancelled; 4796 struct nfs_server *server; 4797 }; 4798 4799 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 4800 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 4801 gfp_t gfp_mask) 4802 { 4803 struct nfs4_lockdata *p; 4804 struct inode *inode = lsp->ls_state->inode; 4805 struct nfs_server *server = NFS_SERVER(inode); 4806 4807 p = kzalloc(sizeof(*p), gfp_mask); 4808 if (p == NULL) 4809 return NULL; 4810 4811 p->arg.fh = NFS_FH(inode); 4812 p->arg.fl = &p->fl; 4813 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 4814 if (p->arg.open_seqid == NULL) 4815 goto out_free; 4816 p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask); 4817 if (p->arg.lock_seqid == NULL) 4818 goto out_free_seqid; 4819 p->arg.lock_stateid = &lsp->ls_stateid; 4820 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 4821 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 4822 p->arg.lock_owner.s_dev = server->s_dev; 4823 p->res.lock_seqid = p->arg.lock_seqid; 4824 p->lsp = lsp; 4825 p->server = server; 4826 atomic_inc(&lsp->ls_count); 4827 p->ctx = get_nfs_open_context(ctx); 4828 memcpy(&p->fl, fl, sizeof(p->fl)); 4829 return p; 4830 out_free_seqid: 4831 nfs_free_seqid(p->arg.open_seqid); 4832 out_free: 4833 kfree(p); 4834 return NULL; 4835 } 4836 4837 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 4838 { 4839 struct nfs4_lockdata *data = calldata; 4840 struct nfs4_state *state = data->lsp->ls_state; 4841 4842 dprintk("%s: begin!\n", __func__); 4843 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 4844 goto out_wait; 4845 /* Do we need to do an open_to_lock_owner? */ 4846 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) { 4847 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 4848 goto out_release_lock_seqid; 4849 } 4850 data->arg.open_stateid = &state->open_stateid; 4851 data->arg.new_lock_owner = 1; 4852 data->res.open_seqid = data->arg.open_seqid; 4853 } else 4854 data->arg.new_lock_owner = 0; 4855 if (!nfs4_valid_open_stateid(state)) { 4856 data->rpc_status = -EBADF; 4857 task->tk_action = NULL; 4858 goto out_release_open_seqid; 4859 } 4860 data->timestamp = jiffies; 4861 if (nfs4_setup_sequence(data->server, 4862 &data->arg.seq_args, 4863 &data->res.seq_res, 4864 task) == 0) 4865 return; 4866 out_release_open_seqid: 4867 nfs_release_seqid(data->arg.open_seqid); 4868 out_release_lock_seqid: 4869 nfs_release_seqid(data->arg.lock_seqid); 4870 out_wait: 4871 nfs4_sequence_done(task, &data->res.seq_res); 4872 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); 4873 } 4874 4875 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 4876 { 4877 struct nfs4_lockdata *data = calldata; 4878 4879 dprintk("%s: begin!\n", __func__); 4880 4881 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4882 return; 4883 4884 data->rpc_status = task->tk_status; 4885 if (data->arg.new_lock_owner != 0) { 4886 if (data->rpc_status == 0) 4887 nfs_confirm_seqid(&data->lsp->ls_seqid, 0); 4888 else 4889 goto out; 4890 } 4891 if (data->rpc_status == 0) { 4892 nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid); 4893 set_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags); 4894 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); 4895 } 4896 out: 4897 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 4898 } 4899 4900 static void nfs4_lock_release(void *calldata) 4901 { 4902 struct nfs4_lockdata *data = calldata; 4903 4904 dprintk("%s: begin!\n", __func__); 4905 nfs_free_seqid(data->arg.open_seqid); 4906 if (data->cancelled != 0) { 4907 struct rpc_task *task; 4908 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 4909 data->arg.lock_seqid); 4910 if (!IS_ERR(task)) 4911 rpc_put_task_async(task); 4912 dprintk("%s: cancelling lock!\n", __func__); 4913 } else 4914 nfs_free_seqid(data->arg.lock_seqid); 4915 nfs4_put_lock_state(data->lsp); 4916 put_nfs_open_context(data->ctx); 4917 kfree(data); 4918 dprintk("%s: done!\n", __func__); 4919 } 4920 4921 static const struct rpc_call_ops nfs4_lock_ops = { 4922 .rpc_call_prepare = nfs4_lock_prepare, 4923 .rpc_call_done = nfs4_lock_done, 4924 .rpc_release = nfs4_lock_release, 4925 }; 4926 4927 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 4928 { 4929 switch (error) { 4930 case -NFS4ERR_ADMIN_REVOKED: 4931 case -NFS4ERR_BAD_STATEID: 4932 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 4933 if (new_lock_owner != 0 || 4934 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 4935 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 4936 break; 4937 case -NFS4ERR_STALE_STATEID: 4938 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 4939 case -NFS4ERR_EXPIRED: 4940 nfs4_schedule_lease_recovery(server->nfs_client); 4941 }; 4942 } 4943 4944 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 4945 { 4946 struct nfs4_lockdata *data; 4947 struct rpc_task *task; 4948 struct rpc_message msg = { 4949 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 4950 .rpc_cred = state->owner->so_cred, 4951 }; 4952 struct rpc_task_setup task_setup_data = { 4953 .rpc_client = NFS_CLIENT(state->inode), 4954 .rpc_message = &msg, 4955 .callback_ops = &nfs4_lock_ops, 4956 .workqueue = nfsiod_workqueue, 4957 .flags = RPC_TASK_ASYNC, 4958 }; 4959 int ret; 4960 4961 dprintk("%s: begin!\n", __func__); 4962 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 4963 fl->fl_u.nfs4_fl.owner, 4964 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); 4965 if (data == NULL) 4966 return -ENOMEM; 4967 if (IS_SETLKW(cmd)) 4968 data->arg.block = 1; 4969 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 4970 msg.rpc_argp = &data->arg; 4971 msg.rpc_resp = &data->res; 4972 task_setup_data.callback_data = data; 4973 if (recovery_type > NFS_LOCK_NEW) { 4974 if (recovery_type == NFS_LOCK_RECLAIM) 4975 data->arg.reclaim = NFS_LOCK_RECLAIM; 4976 nfs4_set_sequence_privileged(&data->arg.seq_args); 4977 } 4978 task = rpc_run_task(&task_setup_data); 4979 if (IS_ERR(task)) 4980 return PTR_ERR(task); 4981 ret = nfs4_wait_for_completion_rpc_task(task); 4982 if (ret == 0) { 4983 ret = data->rpc_status; 4984 if (ret) 4985 nfs4_handle_setlk_error(data->server, data->lsp, 4986 data->arg.new_lock_owner, ret); 4987 } else 4988 data->cancelled = 1; 4989 rpc_put_task(task); 4990 dprintk("%s: done, ret = %d!\n", __func__, ret); 4991 return ret; 4992 } 4993 4994 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 4995 { 4996 struct nfs_server *server = NFS_SERVER(state->inode); 4997 struct nfs4_exception exception = { 4998 .inode = state->inode, 4999 }; 5000 int err; 5001 5002 do { 5003 /* Cache the lock if possible... */ 5004 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 5005 return 0; 5006 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 5007 if (err != -NFS4ERR_DELAY) 5008 break; 5009 nfs4_handle_exception(server, err, &exception); 5010 } while (exception.retry); 5011 return err; 5012 } 5013 5014 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 5015 { 5016 struct nfs_server *server = NFS_SERVER(state->inode); 5017 struct nfs4_exception exception = { 5018 .inode = state->inode, 5019 }; 5020 int err; 5021 5022 err = nfs4_set_lock_state(state, request); 5023 if (err != 0) 5024 return err; 5025 do { 5026 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 5027 return 0; 5028 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 5029 switch (err) { 5030 default: 5031 goto out; 5032 case -NFS4ERR_GRACE: 5033 case -NFS4ERR_DELAY: 5034 nfs4_handle_exception(server, err, &exception); 5035 err = 0; 5036 } 5037 } while (exception.retry); 5038 out: 5039 return err; 5040 } 5041 5042 #if defined(CONFIG_NFS_V4_1) 5043 /** 5044 * nfs41_check_expired_locks - possibly free a lock stateid 5045 * 5046 * @state: NFSv4 state for an inode 5047 * 5048 * Returns NFS_OK if recovery for this stateid is now finished. 5049 * Otherwise a negative NFS4ERR value is returned. 5050 */ 5051 static int nfs41_check_expired_locks(struct nfs4_state *state) 5052 { 5053 int status, ret = -NFS4ERR_BAD_STATEID; 5054 struct nfs4_lock_state *lsp; 5055 struct nfs_server *server = NFS_SERVER(state->inode); 5056 5057 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 5058 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 5059 status = nfs41_test_stateid(server, &lsp->ls_stateid); 5060 if (status != NFS_OK) { 5061 /* Free the stateid unless the server 5062 * informs us the stateid is unrecognized. */ 5063 if (status != -NFS4ERR_BAD_STATEID) 5064 nfs41_free_stateid(server, 5065 &lsp->ls_stateid); 5066 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 5067 ret = status; 5068 } 5069 } 5070 }; 5071 5072 return ret; 5073 } 5074 5075 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 5076 { 5077 int status = NFS_OK; 5078 5079 if (test_bit(LK_STATE_IN_USE, &state->flags)) 5080 status = nfs41_check_expired_locks(state); 5081 if (status != NFS_OK) 5082 status = nfs4_lock_expired(state, request); 5083 return status; 5084 } 5085 #endif 5086 5087 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5088 { 5089 struct nfs4_state_owner *sp = state->owner; 5090 struct nfs_inode *nfsi = NFS_I(state->inode); 5091 unsigned char fl_flags = request->fl_flags; 5092 unsigned int seq; 5093 int status = -ENOLCK; 5094 5095 if ((fl_flags & FL_POSIX) && 5096 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 5097 goto out; 5098 /* Is this a delegated open? */ 5099 status = nfs4_set_lock_state(state, request); 5100 if (status != 0) 5101 goto out; 5102 request->fl_flags |= FL_ACCESS; 5103 status = do_vfs_lock(request->fl_file, request); 5104 if (status < 0) 5105 goto out; 5106 down_read(&nfsi->rwsem); 5107 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 5108 /* Yes: cache locks! */ 5109 /* ...but avoid races with delegation recall... */ 5110 request->fl_flags = fl_flags & ~FL_SLEEP; 5111 status = do_vfs_lock(request->fl_file, request); 5112 goto out_unlock; 5113 } 5114 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 5115 up_read(&nfsi->rwsem); 5116 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 5117 if (status != 0) 5118 goto out; 5119 down_read(&nfsi->rwsem); 5120 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) { 5121 status = -NFS4ERR_DELAY; 5122 goto out_unlock; 5123 } 5124 /* Note: we always want to sleep here! */ 5125 request->fl_flags = fl_flags | FL_SLEEP; 5126 if (do_vfs_lock(request->fl_file, request) < 0) 5127 printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock " 5128 "manager!\n", __func__); 5129 out_unlock: 5130 up_read(&nfsi->rwsem); 5131 out: 5132 request->fl_flags = fl_flags; 5133 return status; 5134 } 5135 5136 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 5137 { 5138 struct nfs4_exception exception = { 5139 .state = state, 5140 .inode = state->inode, 5141 }; 5142 int err; 5143 5144 do { 5145 err = _nfs4_proc_setlk(state, cmd, request); 5146 if (err == -NFS4ERR_DENIED) 5147 err = -EAGAIN; 5148 err = nfs4_handle_exception(NFS_SERVER(state->inode), 5149 err, &exception); 5150 } while (exception.retry); 5151 return err; 5152 } 5153 5154 static int 5155 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 5156 { 5157 struct nfs_open_context *ctx; 5158 struct nfs4_state *state; 5159 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 5160 int status; 5161 5162 /* verify open state */ 5163 ctx = nfs_file_open_context(filp); 5164 state = ctx->state; 5165 5166 if (request->fl_start < 0 || request->fl_end < 0) 5167 return -EINVAL; 5168 5169 if (IS_GETLK(cmd)) { 5170 if (state != NULL) 5171 return nfs4_proc_getlk(state, F_GETLK, request); 5172 return 0; 5173 } 5174 5175 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 5176 return -EINVAL; 5177 5178 if (request->fl_type == F_UNLCK) { 5179 if (state != NULL) 5180 return nfs4_proc_unlck(state, cmd, request); 5181 return 0; 5182 } 5183 5184 if (state == NULL) 5185 return -ENOLCK; 5186 /* 5187 * Don't rely on the VFS having checked the file open mode, 5188 * since it won't do this for flock() locks. 5189 */ 5190 switch (request->fl_type) { 5191 case F_RDLCK: 5192 if (!(filp->f_mode & FMODE_READ)) 5193 return -EBADF; 5194 break; 5195 case F_WRLCK: 5196 if (!(filp->f_mode & FMODE_WRITE)) 5197 return -EBADF; 5198 } 5199 5200 do { 5201 status = nfs4_proc_setlk(state, cmd, request); 5202 if ((status != -EAGAIN) || IS_SETLK(cmd)) 5203 break; 5204 timeout = nfs4_set_lock_task_retry(timeout); 5205 status = -ERESTARTSYS; 5206 if (signalled()) 5207 break; 5208 } while(status < 0); 5209 return status; 5210 } 5211 5212 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid) 5213 { 5214 struct nfs_server *server = NFS_SERVER(state->inode); 5215 int err; 5216 5217 err = nfs4_set_lock_state(state, fl); 5218 if (err != 0) 5219 return err; 5220 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 5221 return nfs4_handle_delegation_recall_error(server, state, stateid, err); 5222 } 5223 5224 struct nfs_release_lockowner_data { 5225 struct nfs4_lock_state *lsp; 5226 struct nfs_server *server; 5227 struct nfs_release_lockowner_args args; 5228 }; 5229 5230 static void nfs4_release_lockowner_release(void *calldata) 5231 { 5232 struct nfs_release_lockowner_data *data = calldata; 5233 nfs4_free_lock_state(data->server, data->lsp); 5234 kfree(calldata); 5235 } 5236 5237 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 5238 .rpc_release = nfs4_release_lockowner_release, 5239 }; 5240 5241 static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp) 5242 { 5243 struct nfs_release_lockowner_data *data; 5244 struct rpc_message msg = { 5245 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 5246 }; 5247 5248 if (server->nfs_client->cl_mvops->minor_version != 0) 5249 return -EINVAL; 5250 data = kmalloc(sizeof(*data), GFP_NOFS); 5251 if (!data) 5252 return -ENOMEM; 5253 data->lsp = lsp; 5254 data->server = server; 5255 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 5256 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 5257 data->args.lock_owner.s_dev = server->s_dev; 5258 msg.rpc_argp = &data->args; 5259 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 5260 return 0; 5261 } 5262 5263 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 5264 5265 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key, 5266 const void *buf, size_t buflen, 5267 int flags, int type) 5268 { 5269 if (strcmp(key, "") != 0) 5270 return -EINVAL; 5271 5272 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen); 5273 } 5274 5275 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, 5276 void *buf, size_t buflen, int type) 5277 { 5278 if (strcmp(key, "") != 0) 5279 return -EINVAL; 5280 5281 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen); 5282 } 5283 5284 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, 5285 size_t list_len, const char *name, 5286 size_t name_len, int type) 5287 { 5288 size_t len = sizeof(XATTR_NAME_NFSV4_ACL); 5289 5290 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode))) 5291 return 0; 5292 5293 if (list && len <= list_len) 5294 memcpy(list, XATTR_NAME_NFSV4_ACL, len); 5295 return len; 5296 } 5297 5298 /* 5299 * nfs_fhget will use either the mounted_on_fileid or the fileid 5300 */ 5301 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 5302 { 5303 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 5304 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 5305 (fattr->valid & NFS_ATTR_FATTR_FSID) && 5306 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 5307 return; 5308 5309 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 5310 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 5311 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 5312 fattr->nlink = 2; 5313 } 5314 5315 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 5316 const struct qstr *name, 5317 struct nfs4_fs_locations *fs_locations, 5318 struct page *page) 5319 { 5320 struct nfs_server *server = NFS_SERVER(dir); 5321 u32 bitmask[2] = { 5322 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 5323 }; 5324 struct nfs4_fs_locations_arg args = { 5325 .dir_fh = NFS_FH(dir), 5326 .name = name, 5327 .page = page, 5328 .bitmask = bitmask, 5329 }; 5330 struct nfs4_fs_locations_res res = { 5331 .fs_locations = fs_locations, 5332 }; 5333 struct rpc_message msg = { 5334 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 5335 .rpc_argp = &args, 5336 .rpc_resp = &res, 5337 }; 5338 int status; 5339 5340 dprintk("%s: start\n", __func__); 5341 5342 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 5343 * is not supported */ 5344 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 5345 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; 5346 else 5347 bitmask[0] |= FATTR4_WORD0_FILEID; 5348 5349 nfs_fattr_init(&fs_locations->fattr); 5350 fs_locations->server = server; 5351 fs_locations->nlocations = 0; 5352 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 5353 dprintk("%s: returned status = %d\n", __func__, status); 5354 return status; 5355 } 5356 5357 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 5358 const struct qstr *name, 5359 struct nfs4_fs_locations *fs_locations, 5360 struct page *page) 5361 { 5362 struct nfs4_exception exception = { }; 5363 int err; 5364 do { 5365 err = nfs4_handle_exception(NFS_SERVER(dir), 5366 _nfs4_proc_fs_locations(client, dir, name, fs_locations, page), 5367 &exception); 5368 } while (exception.retry); 5369 return err; 5370 } 5371 5372 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) 5373 { 5374 int status; 5375 struct nfs4_secinfo_arg args = { 5376 .dir_fh = NFS_FH(dir), 5377 .name = name, 5378 }; 5379 struct nfs4_secinfo_res res = { 5380 .flavors = flavors, 5381 }; 5382 struct rpc_message msg = { 5383 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 5384 .rpc_argp = &args, 5385 .rpc_resp = &res, 5386 }; 5387 5388 dprintk("NFS call secinfo %s\n", name->name); 5389 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 5390 dprintk("NFS reply secinfo: %d\n", status); 5391 return status; 5392 } 5393 5394 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 5395 struct nfs4_secinfo_flavors *flavors) 5396 { 5397 struct nfs4_exception exception = { }; 5398 int err; 5399 do { 5400 err = nfs4_handle_exception(NFS_SERVER(dir), 5401 _nfs4_proc_secinfo(dir, name, flavors), 5402 &exception); 5403 } while (exception.retry); 5404 return err; 5405 } 5406 5407 #ifdef CONFIG_NFS_V4_1 5408 /* 5409 * Check the exchange flags returned by the server for invalid flags, having 5410 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 5411 * DS flags set. 5412 */ 5413 static int nfs4_check_cl_exchange_flags(u32 flags) 5414 { 5415 if (flags & ~EXCHGID4_FLAG_MASK_R) 5416 goto out_inval; 5417 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 5418 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 5419 goto out_inval; 5420 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 5421 goto out_inval; 5422 return NFS_OK; 5423 out_inval: 5424 return -NFS4ERR_INVAL; 5425 } 5426 5427 static bool 5428 nfs41_same_server_scope(struct nfs41_server_scope *a, 5429 struct nfs41_server_scope *b) 5430 { 5431 if (a->server_scope_sz == b->server_scope_sz && 5432 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) 5433 return true; 5434 5435 return false; 5436 } 5437 5438 /* 5439 * nfs4_proc_bind_conn_to_session() 5440 * 5441 * The 4.1 client currently uses the same TCP connection for the 5442 * fore and backchannel. 5443 */ 5444 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) 5445 { 5446 int status; 5447 struct nfs41_bind_conn_to_session_res res; 5448 struct rpc_message msg = { 5449 .rpc_proc = 5450 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 5451 .rpc_argp = clp, 5452 .rpc_resp = &res, 5453 .rpc_cred = cred, 5454 }; 5455 5456 dprintk("--> %s\n", __func__); 5457 5458 res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); 5459 if (unlikely(res.session == NULL)) { 5460 status = -ENOMEM; 5461 goto out; 5462 } 5463 5464 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5465 if (status == 0) { 5466 if (memcmp(res.session->sess_id.data, 5467 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 5468 dprintk("NFS: %s: Session ID mismatch\n", __func__); 5469 status = -EIO; 5470 goto out_session; 5471 } 5472 if (res.dir != NFS4_CDFS4_BOTH) { 5473 dprintk("NFS: %s: Unexpected direction from server\n", 5474 __func__); 5475 status = -EIO; 5476 goto out_session; 5477 } 5478 if (res.use_conn_in_rdma_mode) { 5479 dprintk("NFS: %s: Server returned RDMA mode = true\n", 5480 __func__); 5481 status = -EIO; 5482 goto out_session; 5483 } 5484 } 5485 out_session: 5486 kfree(res.session); 5487 out: 5488 dprintk("<-- %s status= %d\n", __func__, status); 5489 return status; 5490 } 5491 5492 /* 5493 * nfs4_proc_exchange_id() 5494 * 5495 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5496 * 5497 * Since the clientid has expired, all compounds using sessions 5498 * associated with the stale clientid will be returning 5499 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 5500 * be in some phase of session reset. 5501 */ 5502 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) 5503 { 5504 nfs4_verifier verifier; 5505 struct nfs41_exchange_id_args args = { 5506 .verifier = &verifier, 5507 .client = clp, 5508 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER, 5509 }; 5510 struct nfs41_exchange_id_res res = { 5511 0 5512 }; 5513 int status; 5514 struct rpc_message msg = { 5515 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 5516 .rpc_argp = &args, 5517 .rpc_resp = &res, 5518 .rpc_cred = cred, 5519 }; 5520 5521 nfs4_init_boot_verifier(clp, &verifier); 5522 args.id_len = nfs4_init_uniform_client_string(clp, args.id, 5523 sizeof(args.id)); 5524 dprintk("NFS call exchange_id auth=%s, '%.*s'\n", 5525 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5526 args.id_len, args.id); 5527 5528 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 5529 GFP_NOFS); 5530 if (unlikely(res.server_owner == NULL)) { 5531 status = -ENOMEM; 5532 goto out; 5533 } 5534 5535 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 5536 GFP_NOFS); 5537 if (unlikely(res.server_scope == NULL)) { 5538 status = -ENOMEM; 5539 goto out_server_owner; 5540 } 5541 5542 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 5543 if (unlikely(res.impl_id == NULL)) { 5544 status = -ENOMEM; 5545 goto out_server_scope; 5546 } 5547 5548 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5549 if (status == 0) 5550 status = nfs4_check_cl_exchange_flags(res.flags); 5551 5552 if (status == 0) { 5553 clp->cl_clientid = res.clientid; 5554 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R); 5555 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) 5556 clp->cl_seqid = res.seqid; 5557 5558 kfree(clp->cl_serverowner); 5559 clp->cl_serverowner = res.server_owner; 5560 res.server_owner = NULL; 5561 5562 /* use the most recent implementation id */ 5563 kfree(clp->cl_implid); 5564 clp->cl_implid = res.impl_id; 5565 5566 if (clp->cl_serverscope != NULL && 5567 !nfs41_same_server_scope(clp->cl_serverscope, 5568 res.server_scope)) { 5569 dprintk("%s: server_scope mismatch detected\n", 5570 __func__); 5571 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 5572 kfree(clp->cl_serverscope); 5573 clp->cl_serverscope = NULL; 5574 } 5575 5576 if (clp->cl_serverscope == NULL) { 5577 clp->cl_serverscope = res.server_scope; 5578 goto out; 5579 } 5580 } else 5581 kfree(res.impl_id); 5582 5583 out_server_owner: 5584 kfree(res.server_owner); 5585 out_server_scope: 5586 kfree(res.server_scope); 5587 out: 5588 if (clp->cl_implid != NULL) 5589 dprintk("NFS reply exchange_id: Server Implementation ID: " 5590 "domain: %s, name: %s, date: %llu,%u\n", 5591 clp->cl_implid->domain, clp->cl_implid->name, 5592 clp->cl_implid->date.seconds, 5593 clp->cl_implid->date.nseconds); 5594 dprintk("NFS reply exchange_id: %d\n", status); 5595 return status; 5596 } 5597 5598 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 5599 struct rpc_cred *cred) 5600 { 5601 struct rpc_message msg = { 5602 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 5603 .rpc_argp = clp, 5604 .rpc_cred = cred, 5605 }; 5606 int status; 5607 5608 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5609 if (status) 5610 dprintk("NFS: Got error %d from the server %s on " 5611 "DESTROY_CLIENTID.", status, clp->cl_hostname); 5612 return status; 5613 } 5614 5615 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 5616 struct rpc_cred *cred) 5617 { 5618 unsigned int loop; 5619 int ret; 5620 5621 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 5622 ret = _nfs4_proc_destroy_clientid(clp, cred); 5623 switch (ret) { 5624 case -NFS4ERR_DELAY: 5625 case -NFS4ERR_CLIENTID_BUSY: 5626 ssleep(1); 5627 break; 5628 default: 5629 return ret; 5630 } 5631 } 5632 return 0; 5633 } 5634 5635 int nfs4_destroy_clientid(struct nfs_client *clp) 5636 { 5637 struct rpc_cred *cred; 5638 int ret = 0; 5639 5640 if (clp->cl_mvops->minor_version < 1) 5641 goto out; 5642 if (clp->cl_exchange_flags == 0) 5643 goto out; 5644 if (clp->cl_preserve_clid) 5645 goto out; 5646 cred = nfs4_get_exchange_id_cred(clp); 5647 ret = nfs4_proc_destroy_clientid(clp, cred); 5648 if (cred) 5649 put_rpccred(cred); 5650 switch (ret) { 5651 case 0: 5652 case -NFS4ERR_STALE_CLIENTID: 5653 clp->cl_exchange_flags = 0; 5654 } 5655 out: 5656 return ret; 5657 } 5658 5659 struct nfs4_get_lease_time_data { 5660 struct nfs4_get_lease_time_args *args; 5661 struct nfs4_get_lease_time_res *res; 5662 struct nfs_client *clp; 5663 }; 5664 5665 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 5666 void *calldata) 5667 { 5668 struct nfs4_get_lease_time_data *data = 5669 (struct nfs4_get_lease_time_data *)calldata; 5670 5671 dprintk("--> %s\n", __func__); 5672 /* just setup sequence, do not trigger session recovery 5673 since we're invoked within one */ 5674 nfs41_setup_sequence(data->clp->cl_session, 5675 &data->args->la_seq_args, 5676 &data->res->lr_seq_res, 5677 task); 5678 dprintk("<-- %s\n", __func__); 5679 } 5680 5681 /* 5682 * Called from nfs4_state_manager thread for session setup, so don't recover 5683 * from sequence operation or clientid errors. 5684 */ 5685 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 5686 { 5687 struct nfs4_get_lease_time_data *data = 5688 (struct nfs4_get_lease_time_data *)calldata; 5689 5690 dprintk("--> %s\n", __func__); 5691 if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) 5692 return; 5693 switch (task->tk_status) { 5694 case -NFS4ERR_DELAY: 5695 case -NFS4ERR_GRACE: 5696 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 5697 rpc_delay(task, NFS4_POLL_RETRY_MIN); 5698 task->tk_status = 0; 5699 /* fall through */ 5700 case -NFS4ERR_RETRY_UNCACHED_REP: 5701 rpc_restart_call_prepare(task); 5702 return; 5703 } 5704 dprintk("<-- %s\n", __func__); 5705 } 5706 5707 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 5708 .rpc_call_prepare = nfs4_get_lease_time_prepare, 5709 .rpc_call_done = nfs4_get_lease_time_done, 5710 }; 5711 5712 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 5713 { 5714 struct rpc_task *task; 5715 struct nfs4_get_lease_time_args args; 5716 struct nfs4_get_lease_time_res res = { 5717 .lr_fsinfo = fsinfo, 5718 }; 5719 struct nfs4_get_lease_time_data data = { 5720 .args = &args, 5721 .res = &res, 5722 .clp = clp, 5723 }; 5724 struct rpc_message msg = { 5725 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 5726 .rpc_argp = &args, 5727 .rpc_resp = &res, 5728 }; 5729 struct rpc_task_setup task_setup = { 5730 .rpc_client = clp->cl_rpcclient, 5731 .rpc_message = &msg, 5732 .callback_ops = &nfs4_get_lease_time_ops, 5733 .callback_data = &data, 5734 .flags = RPC_TASK_TIMEOUT, 5735 }; 5736 int status; 5737 5738 nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); 5739 nfs4_set_sequence_privileged(&args.la_seq_args); 5740 dprintk("--> %s\n", __func__); 5741 task = rpc_run_task(&task_setup); 5742 5743 if (IS_ERR(task)) 5744 status = PTR_ERR(task); 5745 else { 5746 status = task->tk_status; 5747 rpc_put_task(task); 5748 } 5749 dprintk("<-- %s return %d\n", __func__, status); 5750 5751 return status; 5752 } 5753 5754 /* 5755 * Initialize the values to be used by the client in CREATE_SESSION 5756 * If nfs4_init_session set the fore channel request and response sizes, 5757 * use them. 5758 * 5759 * Set the back channel max_resp_sz_cached to zero to force the client to 5760 * always set csa_cachethis to FALSE because the current implementation 5761 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 5762 */ 5763 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) 5764 { 5765 struct nfs4_session *session = args->client->cl_session; 5766 unsigned int mxrqst_sz = session->fc_target_max_rqst_sz, 5767 mxresp_sz = session->fc_target_max_resp_sz; 5768 5769 if (mxrqst_sz == 0) 5770 mxrqst_sz = NFS_MAX_FILE_IO_SIZE; 5771 if (mxresp_sz == 0) 5772 mxresp_sz = NFS_MAX_FILE_IO_SIZE; 5773 /* Fore channel attributes */ 5774 args->fc_attrs.max_rqst_sz = mxrqst_sz; 5775 args->fc_attrs.max_resp_sz = mxresp_sz; 5776 args->fc_attrs.max_ops = NFS4_MAX_OPS; 5777 args->fc_attrs.max_reqs = max_session_slots; 5778 5779 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 5780 "max_ops=%u max_reqs=%u\n", 5781 __func__, 5782 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 5783 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 5784 5785 /* Back channel attributes */ 5786 args->bc_attrs.max_rqst_sz = PAGE_SIZE; 5787 args->bc_attrs.max_resp_sz = PAGE_SIZE; 5788 args->bc_attrs.max_resp_sz_cached = 0; 5789 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 5790 args->bc_attrs.max_reqs = 1; 5791 5792 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 5793 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 5794 __func__, 5795 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 5796 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 5797 args->bc_attrs.max_reqs); 5798 } 5799 5800 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) 5801 { 5802 struct nfs4_channel_attrs *sent = &args->fc_attrs; 5803 struct nfs4_channel_attrs *rcvd = &session->fc_attrs; 5804 5805 if (rcvd->max_resp_sz > sent->max_resp_sz) 5806 return -EINVAL; 5807 /* 5808 * Our requested max_ops is the minimum we need; we're not 5809 * prepared to break up compounds into smaller pieces than that. 5810 * So, no point even trying to continue if the server won't 5811 * cooperate: 5812 */ 5813 if (rcvd->max_ops < sent->max_ops) 5814 return -EINVAL; 5815 if (rcvd->max_reqs == 0) 5816 return -EINVAL; 5817 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 5818 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 5819 return 0; 5820 } 5821 5822 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) 5823 { 5824 struct nfs4_channel_attrs *sent = &args->bc_attrs; 5825 struct nfs4_channel_attrs *rcvd = &session->bc_attrs; 5826 5827 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 5828 return -EINVAL; 5829 if (rcvd->max_resp_sz < sent->max_resp_sz) 5830 return -EINVAL; 5831 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 5832 return -EINVAL; 5833 /* These would render the backchannel useless: */ 5834 if (rcvd->max_ops != sent->max_ops) 5835 return -EINVAL; 5836 if (rcvd->max_reqs != sent->max_reqs) 5837 return -EINVAL; 5838 return 0; 5839 } 5840 5841 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 5842 struct nfs4_session *session) 5843 { 5844 int ret; 5845 5846 ret = nfs4_verify_fore_channel_attrs(args, session); 5847 if (ret) 5848 return ret; 5849 return nfs4_verify_back_channel_attrs(args, session); 5850 } 5851 5852 static int _nfs4_proc_create_session(struct nfs_client *clp, 5853 struct rpc_cred *cred) 5854 { 5855 struct nfs4_session *session = clp->cl_session; 5856 struct nfs41_create_session_args args = { 5857 .client = clp, 5858 .cb_program = NFS4_CALLBACK, 5859 }; 5860 struct nfs41_create_session_res res = { 5861 .client = clp, 5862 }; 5863 struct rpc_message msg = { 5864 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 5865 .rpc_argp = &args, 5866 .rpc_resp = &res, 5867 .rpc_cred = cred, 5868 }; 5869 int status; 5870 5871 nfs4_init_channel_attrs(&args); 5872 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 5873 5874 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5875 5876 if (!status) { 5877 /* Verify the session's negotiated channel_attrs values */ 5878 status = nfs4_verify_channel_attrs(&args, session); 5879 /* Increment the clientid slot sequence id */ 5880 clp->cl_seqid++; 5881 } 5882 5883 return status; 5884 } 5885 5886 /* 5887 * Issues a CREATE_SESSION operation to the server. 5888 * It is the responsibility of the caller to verify the session is 5889 * expired before calling this routine. 5890 */ 5891 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred) 5892 { 5893 int status; 5894 unsigned *ptr; 5895 struct nfs4_session *session = clp->cl_session; 5896 5897 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 5898 5899 status = _nfs4_proc_create_session(clp, cred); 5900 if (status) 5901 goto out; 5902 5903 /* Init or reset the session slot tables */ 5904 status = nfs4_setup_session_slot_tables(session); 5905 dprintk("slot table setup returned %d\n", status); 5906 if (status) 5907 goto out; 5908 5909 ptr = (unsigned *)&session->sess_id.data[0]; 5910 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 5911 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 5912 out: 5913 dprintk("<-- %s\n", __func__); 5914 return status; 5915 } 5916 5917 /* 5918 * Issue the over-the-wire RPC DESTROY_SESSION. 5919 * The caller must serialize access to this routine. 5920 */ 5921 int nfs4_proc_destroy_session(struct nfs4_session *session, 5922 struct rpc_cred *cred) 5923 { 5924 struct rpc_message msg = { 5925 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 5926 .rpc_argp = session, 5927 .rpc_cred = cred, 5928 }; 5929 int status = 0; 5930 5931 dprintk("--> nfs4_proc_destroy_session\n"); 5932 5933 /* session is still being setup */ 5934 if (session->clp->cl_cons_state != NFS_CS_READY) 5935 return status; 5936 5937 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5938 5939 if (status) 5940 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 5941 "Session has been destroyed regardless...\n", status); 5942 5943 dprintk("<-- nfs4_proc_destroy_session\n"); 5944 return status; 5945 } 5946 5947 /* 5948 * Renew the cl_session lease. 5949 */ 5950 struct nfs4_sequence_data { 5951 struct nfs_client *clp; 5952 struct nfs4_sequence_args args; 5953 struct nfs4_sequence_res res; 5954 }; 5955 5956 static void nfs41_sequence_release(void *data) 5957 { 5958 struct nfs4_sequence_data *calldata = data; 5959 struct nfs_client *clp = calldata->clp; 5960 5961 if (atomic_read(&clp->cl_count) > 1) 5962 nfs4_schedule_state_renewal(clp); 5963 nfs_put_client(clp); 5964 kfree(calldata); 5965 } 5966 5967 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 5968 { 5969 switch(task->tk_status) { 5970 case -NFS4ERR_DELAY: 5971 rpc_delay(task, NFS4_POLL_RETRY_MAX); 5972 return -EAGAIN; 5973 default: 5974 nfs4_schedule_lease_recovery(clp); 5975 } 5976 return 0; 5977 } 5978 5979 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 5980 { 5981 struct nfs4_sequence_data *calldata = data; 5982 struct nfs_client *clp = calldata->clp; 5983 5984 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 5985 return; 5986 5987 if (task->tk_status < 0) { 5988 dprintk("%s ERROR %d\n", __func__, task->tk_status); 5989 if (atomic_read(&clp->cl_count) == 1) 5990 goto out; 5991 5992 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 5993 rpc_restart_call_prepare(task); 5994 return; 5995 } 5996 } 5997 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 5998 out: 5999 dprintk("<-- %s\n", __func__); 6000 } 6001 6002 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 6003 { 6004 struct nfs4_sequence_data *calldata = data; 6005 struct nfs_client *clp = calldata->clp; 6006 struct nfs4_sequence_args *args; 6007 struct nfs4_sequence_res *res; 6008 6009 args = task->tk_msg.rpc_argp; 6010 res = task->tk_msg.rpc_resp; 6011 6012 nfs41_setup_sequence(clp->cl_session, args, res, task); 6013 } 6014 6015 static const struct rpc_call_ops nfs41_sequence_ops = { 6016 .rpc_call_done = nfs41_sequence_call_done, 6017 .rpc_call_prepare = nfs41_sequence_prepare, 6018 .rpc_release = nfs41_sequence_release, 6019 }; 6020 6021 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 6022 struct rpc_cred *cred, 6023 bool is_privileged) 6024 { 6025 struct nfs4_sequence_data *calldata; 6026 struct rpc_message msg = { 6027 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 6028 .rpc_cred = cred, 6029 }; 6030 struct rpc_task_setup task_setup_data = { 6031 .rpc_client = clp->cl_rpcclient, 6032 .rpc_message = &msg, 6033 .callback_ops = &nfs41_sequence_ops, 6034 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT, 6035 }; 6036 6037 if (!atomic_inc_not_zero(&clp->cl_count)) 6038 return ERR_PTR(-EIO); 6039 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 6040 if (calldata == NULL) { 6041 nfs_put_client(clp); 6042 return ERR_PTR(-ENOMEM); 6043 } 6044 nfs41_init_sequence(&calldata->args, &calldata->res, 0); 6045 if (is_privileged) 6046 nfs4_set_sequence_privileged(&calldata->args); 6047 msg.rpc_argp = &calldata->args; 6048 msg.rpc_resp = &calldata->res; 6049 calldata->clp = clp; 6050 task_setup_data.callback_data = calldata; 6051 6052 return rpc_run_task(&task_setup_data); 6053 } 6054 6055 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 6056 { 6057 struct rpc_task *task; 6058 int ret = 0; 6059 6060 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 6061 return 0; 6062 task = _nfs41_proc_sequence(clp, cred, false); 6063 if (IS_ERR(task)) 6064 ret = PTR_ERR(task); 6065 else 6066 rpc_put_task_async(task); 6067 dprintk("<-- %s status=%d\n", __func__, ret); 6068 return ret; 6069 } 6070 6071 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 6072 { 6073 struct rpc_task *task; 6074 int ret; 6075 6076 task = _nfs41_proc_sequence(clp, cred, true); 6077 if (IS_ERR(task)) { 6078 ret = PTR_ERR(task); 6079 goto out; 6080 } 6081 ret = rpc_wait_for_completion_task(task); 6082 if (!ret) { 6083 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; 6084 6085 if (task->tk_status == 0) 6086 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 6087 ret = task->tk_status; 6088 } 6089 rpc_put_task(task); 6090 out: 6091 dprintk("<-- %s status=%d\n", __func__, ret); 6092 return ret; 6093 } 6094 6095 struct nfs4_reclaim_complete_data { 6096 struct nfs_client *clp; 6097 struct nfs41_reclaim_complete_args arg; 6098 struct nfs41_reclaim_complete_res res; 6099 }; 6100 6101 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 6102 { 6103 struct nfs4_reclaim_complete_data *calldata = data; 6104 6105 nfs41_setup_sequence(calldata->clp->cl_session, 6106 &calldata->arg.seq_args, 6107 &calldata->res.seq_res, 6108 task); 6109 } 6110 6111 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 6112 { 6113 switch(task->tk_status) { 6114 case 0: 6115 case -NFS4ERR_COMPLETE_ALREADY: 6116 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 6117 break; 6118 case -NFS4ERR_DELAY: 6119 rpc_delay(task, NFS4_POLL_RETRY_MAX); 6120 /* fall through */ 6121 case -NFS4ERR_RETRY_UNCACHED_REP: 6122 return -EAGAIN; 6123 default: 6124 nfs4_schedule_lease_recovery(clp); 6125 } 6126 return 0; 6127 } 6128 6129 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 6130 { 6131 struct nfs4_reclaim_complete_data *calldata = data; 6132 struct nfs_client *clp = calldata->clp; 6133 struct nfs4_sequence_res *res = &calldata->res.seq_res; 6134 6135 dprintk("--> %s\n", __func__); 6136 if (!nfs41_sequence_done(task, res)) 6137 return; 6138 6139 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 6140 rpc_restart_call_prepare(task); 6141 return; 6142 } 6143 dprintk("<-- %s\n", __func__); 6144 } 6145 6146 static void nfs4_free_reclaim_complete_data(void *data) 6147 { 6148 struct nfs4_reclaim_complete_data *calldata = data; 6149 6150 kfree(calldata); 6151 } 6152 6153 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 6154 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 6155 .rpc_call_done = nfs4_reclaim_complete_done, 6156 .rpc_release = nfs4_free_reclaim_complete_data, 6157 }; 6158 6159 /* 6160 * Issue a global reclaim complete. 6161 */ 6162 static int nfs41_proc_reclaim_complete(struct nfs_client *clp) 6163 { 6164 struct nfs4_reclaim_complete_data *calldata; 6165 struct rpc_task *task; 6166 struct rpc_message msg = { 6167 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 6168 }; 6169 struct rpc_task_setup task_setup_data = { 6170 .rpc_client = clp->cl_rpcclient, 6171 .rpc_message = &msg, 6172 .callback_ops = &nfs4_reclaim_complete_call_ops, 6173 .flags = RPC_TASK_ASYNC, 6174 }; 6175 int status = -ENOMEM; 6176 6177 dprintk("--> %s\n", __func__); 6178 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 6179 if (calldata == NULL) 6180 goto out; 6181 calldata->clp = clp; 6182 calldata->arg.one_fs = 0; 6183 6184 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); 6185 nfs4_set_sequence_privileged(&calldata->arg.seq_args); 6186 msg.rpc_argp = &calldata->arg; 6187 msg.rpc_resp = &calldata->res; 6188 task_setup_data.callback_data = calldata; 6189 task = rpc_run_task(&task_setup_data); 6190 if (IS_ERR(task)) { 6191 status = PTR_ERR(task); 6192 goto out; 6193 } 6194 status = nfs4_wait_for_completion_rpc_task(task); 6195 if (status == 0) 6196 status = task->tk_status; 6197 rpc_put_task(task); 6198 return 0; 6199 out: 6200 dprintk("<-- %s status=%d\n", __func__, status); 6201 return status; 6202 } 6203 6204 static void 6205 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 6206 { 6207 struct nfs4_layoutget *lgp = calldata; 6208 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6209 struct nfs4_session *session = nfs4_get_session(server); 6210 6211 dprintk("--> %s\n", __func__); 6212 /* Note the is a race here, where a CB_LAYOUTRECALL can come in 6213 * right now covering the LAYOUTGET we are about to send. 6214 * However, that is not so catastrophic, and there seems 6215 * to be no way to prevent it completely. 6216 */ 6217 if (nfs41_setup_sequence(session, &lgp->args.seq_args, 6218 &lgp->res.seq_res, task)) 6219 return; 6220 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, 6221 NFS_I(lgp->args.inode)->layout, 6222 lgp->args.ctx->state)) { 6223 rpc_exit(task, NFS4_OK); 6224 } 6225 } 6226 6227 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 6228 { 6229 struct nfs4_layoutget *lgp = calldata; 6230 struct inode *inode = lgp->args.inode; 6231 struct nfs_server *server = NFS_SERVER(inode); 6232 struct pnfs_layout_hdr *lo; 6233 struct nfs4_state *state = NULL; 6234 unsigned long timeo, giveup; 6235 6236 dprintk("--> %s\n", __func__); 6237 6238 if (!nfs41_sequence_done(task, &lgp->res.seq_res)) 6239 goto out; 6240 6241 switch (task->tk_status) { 6242 case 0: 6243 goto out; 6244 case -NFS4ERR_LAYOUTTRYLATER: 6245 case -NFS4ERR_RECALLCONFLICT: 6246 timeo = rpc_get_timeout(task->tk_client); 6247 giveup = lgp->args.timestamp + timeo; 6248 if (time_after(giveup, jiffies)) 6249 task->tk_status = -NFS4ERR_DELAY; 6250 break; 6251 case -NFS4ERR_EXPIRED: 6252 case -NFS4ERR_BAD_STATEID: 6253 spin_lock(&inode->i_lock); 6254 lo = NFS_I(inode)->layout; 6255 if (!lo || list_empty(&lo->plh_segs)) { 6256 spin_unlock(&inode->i_lock); 6257 /* If the open stateid was bad, then recover it. */ 6258 state = lgp->args.ctx->state; 6259 } else { 6260 LIST_HEAD(head); 6261 6262 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL); 6263 spin_unlock(&inode->i_lock); 6264 /* Mark the bad layout state as invalid, then 6265 * retry using the open stateid. */ 6266 pnfs_free_lseg_list(&head); 6267 } 6268 } 6269 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) 6270 rpc_restart_call_prepare(task); 6271 out: 6272 dprintk("<-- %s\n", __func__); 6273 } 6274 6275 static size_t max_response_pages(struct nfs_server *server) 6276 { 6277 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 6278 return nfs_page_array_len(0, max_resp_sz); 6279 } 6280 6281 static void nfs4_free_pages(struct page **pages, size_t size) 6282 { 6283 int i; 6284 6285 if (!pages) 6286 return; 6287 6288 for (i = 0; i < size; i++) { 6289 if (!pages[i]) 6290 break; 6291 __free_page(pages[i]); 6292 } 6293 kfree(pages); 6294 } 6295 6296 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) 6297 { 6298 struct page **pages; 6299 int i; 6300 6301 pages = kcalloc(size, sizeof(struct page *), gfp_flags); 6302 if (!pages) { 6303 dprintk("%s: can't alloc array of %zu pages\n", __func__, size); 6304 return NULL; 6305 } 6306 6307 for (i = 0; i < size; i++) { 6308 pages[i] = alloc_page(gfp_flags); 6309 if (!pages[i]) { 6310 dprintk("%s: failed to allocate page\n", __func__); 6311 nfs4_free_pages(pages, size); 6312 return NULL; 6313 } 6314 } 6315 6316 return pages; 6317 } 6318 6319 static void nfs4_layoutget_release(void *calldata) 6320 { 6321 struct nfs4_layoutget *lgp = calldata; 6322 struct inode *inode = lgp->args.inode; 6323 struct nfs_server *server = NFS_SERVER(inode); 6324 size_t max_pages = max_response_pages(server); 6325 6326 dprintk("--> %s\n", __func__); 6327 nfs4_free_pages(lgp->args.layout.pages, max_pages); 6328 pnfs_put_layout_hdr(NFS_I(inode)->layout); 6329 put_nfs_open_context(lgp->args.ctx); 6330 kfree(calldata); 6331 dprintk("<-- %s\n", __func__); 6332 } 6333 6334 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 6335 .rpc_call_prepare = nfs4_layoutget_prepare, 6336 .rpc_call_done = nfs4_layoutget_done, 6337 .rpc_release = nfs4_layoutget_release, 6338 }; 6339 6340 struct pnfs_layout_segment * 6341 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) 6342 { 6343 struct inode *inode = lgp->args.inode; 6344 struct nfs_server *server = NFS_SERVER(inode); 6345 size_t max_pages = max_response_pages(server); 6346 struct rpc_task *task; 6347 struct rpc_message msg = { 6348 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 6349 .rpc_argp = &lgp->args, 6350 .rpc_resp = &lgp->res, 6351 }; 6352 struct rpc_task_setup task_setup_data = { 6353 .rpc_client = server->client, 6354 .rpc_message = &msg, 6355 .callback_ops = &nfs4_layoutget_call_ops, 6356 .callback_data = lgp, 6357 .flags = RPC_TASK_ASYNC, 6358 }; 6359 struct pnfs_layout_segment *lseg = NULL; 6360 int status = 0; 6361 6362 dprintk("--> %s\n", __func__); 6363 6364 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); 6365 if (!lgp->args.layout.pages) { 6366 nfs4_layoutget_release(lgp); 6367 return ERR_PTR(-ENOMEM); 6368 } 6369 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 6370 lgp->args.timestamp = jiffies; 6371 6372 lgp->res.layoutp = &lgp->args.layout; 6373 lgp->res.seq_res.sr_slot = NULL; 6374 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 6375 6376 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */ 6377 pnfs_get_layout_hdr(NFS_I(inode)->layout); 6378 6379 task = rpc_run_task(&task_setup_data); 6380 if (IS_ERR(task)) 6381 return ERR_CAST(task); 6382 status = nfs4_wait_for_completion_rpc_task(task); 6383 if (status == 0) 6384 status = task->tk_status; 6385 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ 6386 if (status == 0 && lgp->res.layoutp->len) 6387 lseg = pnfs_layout_process(lgp); 6388 rpc_put_task(task); 6389 dprintk("<-- %s status=%d\n", __func__, status); 6390 if (status) 6391 return ERR_PTR(status); 6392 return lseg; 6393 } 6394 6395 static void 6396 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 6397 { 6398 struct nfs4_layoutreturn *lrp = calldata; 6399 6400 dprintk("--> %s\n", __func__); 6401 nfs41_setup_sequence(lrp->clp->cl_session, 6402 &lrp->args.seq_args, 6403 &lrp->res.seq_res, 6404 task); 6405 } 6406 6407 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 6408 { 6409 struct nfs4_layoutreturn *lrp = calldata; 6410 struct nfs_server *server; 6411 6412 dprintk("--> %s\n", __func__); 6413 6414 if (!nfs41_sequence_done(task, &lrp->res.seq_res)) 6415 return; 6416 6417 server = NFS_SERVER(lrp->args.inode); 6418 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 6419 rpc_restart_call_prepare(task); 6420 return; 6421 } 6422 dprintk("<-- %s\n", __func__); 6423 } 6424 6425 static void nfs4_layoutreturn_release(void *calldata) 6426 { 6427 struct nfs4_layoutreturn *lrp = calldata; 6428 struct pnfs_layout_hdr *lo = lrp->args.layout; 6429 6430 dprintk("--> %s\n", __func__); 6431 spin_lock(&lo->plh_inode->i_lock); 6432 if (lrp->res.lrs_present) 6433 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 6434 lo->plh_block_lgets--; 6435 spin_unlock(&lo->plh_inode->i_lock); 6436 pnfs_put_layout_hdr(lrp->args.layout); 6437 kfree(calldata); 6438 dprintk("<-- %s\n", __func__); 6439 } 6440 6441 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 6442 .rpc_call_prepare = nfs4_layoutreturn_prepare, 6443 .rpc_call_done = nfs4_layoutreturn_done, 6444 .rpc_release = nfs4_layoutreturn_release, 6445 }; 6446 6447 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp) 6448 { 6449 struct rpc_task *task; 6450 struct rpc_message msg = { 6451 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 6452 .rpc_argp = &lrp->args, 6453 .rpc_resp = &lrp->res, 6454 }; 6455 struct rpc_task_setup task_setup_data = { 6456 .rpc_client = lrp->clp->cl_rpcclient, 6457 .rpc_message = &msg, 6458 .callback_ops = &nfs4_layoutreturn_call_ops, 6459 .callback_data = lrp, 6460 }; 6461 int status; 6462 6463 dprintk("--> %s\n", __func__); 6464 nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); 6465 task = rpc_run_task(&task_setup_data); 6466 if (IS_ERR(task)) 6467 return PTR_ERR(task); 6468 status = task->tk_status; 6469 dprintk("<-- %s status=%d\n", __func__, status); 6470 rpc_put_task(task); 6471 return status; 6472 } 6473 6474 /* 6475 * Retrieve the list of Data Server devices from the MDS. 6476 */ 6477 static int _nfs4_getdevicelist(struct nfs_server *server, 6478 const struct nfs_fh *fh, 6479 struct pnfs_devicelist *devlist) 6480 { 6481 struct nfs4_getdevicelist_args args = { 6482 .fh = fh, 6483 .layoutclass = server->pnfs_curr_ld->id, 6484 }; 6485 struct nfs4_getdevicelist_res res = { 6486 .devlist = devlist, 6487 }; 6488 struct rpc_message msg = { 6489 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST], 6490 .rpc_argp = &args, 6491 .rpc_resp = &res, 6492 }; 6493 int status; 6494 6495 dprintk("--> %s\n", __func__); 6496 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 6497 &res.seq_res, 0); 6498 dprintk("<-- %s status=%d\n", __func__, status); 6499 return status; 6500 } 6501 6502 int nfs4_proc_getdevicelist(struct nfs_server *server, 6503 const struct nfs_fh *fh, 6504 struct pnfs_devicelist *devlist) 6505 { 6506 struct nfs4_exception exception = { }; 6507 int err; 6508 6509 do { 6510 err = nfs4_handle_exception(server, 6511 _nfs4_getdevicelist(server, fh, devlist), 6512 &exception); 6513 } while (exception.retry); 6514 6515 dprintk("%s: err=%d, num_devs=%u\n", __func__, 6516 err, devlist->num_devs); 6517 6518 return err; 6519 } 6520 EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist); 6521 6522 static int 6523 _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) 6524 { 6525 struct nfs4_getdeviceinfo_args args = { 6526 .pdev = pdev, 6527 }; 6528 struct nfs4_getdeviceinfo_res res = { 6529 .pdev = pdev, 6530 }; 6531 struct rpc_message msg = { 6532 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 6533 .rpc_argp = &args, 6534 .rpc_resp = &res, 6535 }; 6536 int status; 6537 6538 dprintk("--> %s\n", __func__); 6539 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 6540 dprintk("<-- %s status=%d\n", __func__, status); 6541 6542 return status; 6543 } 6544 6545 int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) 6546 { 6547 struct nfs4_exception exception = { }; 6548 int err; 6549 6550 do { 6551 err = nfs4_handle_exception(server, 6552 _nfs4_proc_getdeviceinfo(server, pdev), 6553 &exception); 6554 } while (exception.retry); 6555 return err; 6556 } 6557 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 6558 6559 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 6560 { 6561 struct nfs4_layoutcommit_data *data = calldata; 6562 struct nfs_server *server = NFS_SERVER(data->args.inode); 6563 struct nfs4_session *session = nfs4_get_session(server); 6564 6565 nfs41_setup_sequence(session, 6566 &data->args.seq_args, 6567 &data->res.seq_res, 6568 task); 6569 } 6570 6571 static void 6572 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 6573 { 6574 struct nfs4_layoutcommit_data *data = calldata; 6575 struct nfs_server *server = NFS_SERVER(data->args.inode); 6576 6577 if (!nfs41_sequence_done(task, &data->res.seq_res)) 6578 return; 6579 6580 switch (task->tk_status) { /* Just ignore these failures */ 6581 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 6582 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 6583 case -NFS4ERR_BADLAYOUT: /* no layout */ 6584 case -NFS4ERR_GRACE: /* loca_recalim always false */ 6585 task->tk_status = 0; 6586 break; 6587 case 0: 6588 nfs_post_op_update_inode_force_wcc(data->args.inode, 6589 data->res.fattr); 6590 break; 6591 default: 6592 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 6593 rpc_restart_call_prepare(task); 6594 return; 6595 } 6596 } 6597 } 6598 6599 static void nfs4_layoutcommit_release(void *calldata) 6600 { 6601 struct nfs4_layoutcommit_data *data = calldata; 6602 6603 pnfs_cleanup_layoutcommit(data); 6604 put_rpccred(data->cred); 6605 kfree(data); 6606 } 6607 6608 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 6609 .rpc_call_prepare = nfs4_layoutcommit_prepare, 6610 .rpc_call_done = nfs4_layoutcommit_done, 6611 .rpc_release = nfs4_layoutcommit_release, 6612 }; 6613 6614 int 6615 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 6616 { 6617 struct rpc_message msg = { 6618 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 6619 .rpc_argp = &data->args, 6620 .rpc_resp = &data->res, 6621 .rpc_cred = data->cred, 6622 }; 6623 struct rpc_task_setup task_setup_data = { 6624 .task = &data->task, 6625 .rpc_client = NFS_CLIENT(data->args.inode), 6626 .rpc_message = &msg, 6627 .callback_ops = &nfs4_layoutcommit_ops, 6628 .callback_data = data, 6629 .flags = RPC_TASK_ASYNC, 6630 }; 6631 struct rpc_task *task; 6632 int status = 0; 6633 6634 dprintk("NFS: %4d initiating layoutcommit call. sync %d " 6635 "lbw: %llu inode %lu\n", 6636 data->task.tk_pid, sync, 6637 data->args.lastbytewritten, 6638 data->args.inode->i_ino); 6639 6640 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 6641 task = rpc_run_task(&task_setup_data); 6642 if (IS_ERR(task)) 6643 return PTR_ERR(task); 6644 if (sync == false) 6645 goto out; 6646 status = nfs4_wait_for_completion_rpc_task(task); 6647 if (status != 0) 6648 goto out; 6649 status = task->tk_status; 6650 out: 6651 dprintk("%s: status %d\n", __func__, status); 6652 rpc_put_task(task); 6653 return status; 6654 } 6655 6656 static int 6657 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 6658 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 6659 { 6660 struct nfs41_secinfo_no_name_args args = { 6661 .style = SECINFO_STYLE_CURRENT_FH, 6662 }; 6663 struct nfs4_secinfo_res res = { 6664 .flavors = flavors, 6665 }; 6666 struct rpc_message msg = { 6667 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 6668 .rpc_argp = &args, 6669 .rpc_resp = &res, 6670 }; 6671 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 6672 } 6673 6674 static int 6675 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 6676 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 6677 { 6678 struct nfs4_exception exception = { }; 6679 int err; 6680 do { 6681 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 6682 switch (err) { 6683 case 0: 6684 case -NFS4ERR_WRONGSEC: 6685 case -NFS4ERR_NOTSUPP: 6686 goto out; 6687 default: 6688 err = nfs4_handle_exception(server, err, &exception); 6689 } 6690 } while (exception.retry); 6691 out: 6692 return err; 6693 } 6694 6695 static int 6696 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 6697 struct nfs_fsinfo *info) 6698 { 6699 int err; 6700 struct page *page; 6701 rpc_authflavor_t flavor; 6702 struct nfs4_secinfo_flavors *flavors; 6703 6704 page = alloc_page(GFP_KERNEL); 6705 if (!page) { 6706 err = -ENOMEM; 6707 goto out; 6708 } 6709 6710 flavors = page_address(page); 6711 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 6712 6713 /* 6714 * Fall back on "guess and check" method if 6715 * the server doesn't support SECINFO_NO_NAME 6716 */ 6717 if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) { 6718 err = nfs4_find_root_sec(server, fhandle, info); 6719 goto out_freepage; 6720 } 6721 if (err) 6722 goto out_freepage; 6723 6724 flavor = nfs_find_best_sec(flavors); 6725 if (err == 0) 6726 err = nfs4_lookup_root_sec(server, fhandle, info, flavor); 6727 6728 out_freepage: 6729 put_page(page); 6730 if (err == -EACCES) 6731 return -EPERM; 6732 out: 6733 return err; 6734 } 6735 6736 static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6737 { 6738 int status; 6739 struct nfs41_test_stateid_args args = { 6740 .stateid = stateid, 6741 }; 6742 struct nfs41_test_stateid_res res; 6743 struct rpc_message msg = { 6744 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 6745 .rpc_argp = &args, 6746 .rpc_resp = &res, 6747 }; 6748 6749 dprintk("NFS call test_stateid %p\n", stateid); 6750 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); 6751 nfs4_set_sequence_privileged(&args.seq_args); 6752 status = nfs4_call_sync_sequence(server->client, server, &msg, 6753 &args.seq_args, &res.seq_res); 6754 if (status != NFS_OK) { 6755 dprintk("NFS reply test_stateid: failed, %d\n", status); 6756 return status; 6757 } 6758 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 6759 return -res.status; 6760 } 6761 6762 /** 6763 * nfs41_test_stateid - perform a TEST_STATEID operation 6764 * 6765 * @server: server / transport on which to perform the operation 6766 * @stateid: state ID to test 6767 * 6768 * Returns NFS_OK if the server recognizes that "stateid" is valid. 6769 * Otherwise a negative NFS4ERR value is returned if the operation 6770 * failed or the state ID is not currently valid. 6771 */ 6772 static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6773 { 6774 struct nfs4_exception exception = { }; 6775 int err; 6776 do { 6777 err = _nfs41_test_stateid(server, stateid); 6778 if (err != -NFS4ERR_DELAY) 6779 break; 6780 nfs4_handle_exception(server, err, &exception); 6781 } while (exception.retry); 6782 return err; 6783 } 6784 6785 struct nfs_free_stateid_data { 6786 struct nfs_server *server; 6787 struct nfs41_free_stateid_args args; 6788 struct nfs41_free_stateid_res res; 6789 }; 6790 6791 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata) 6792 { 6793 struct nfs_free_stateid_data *data = calldata; 6794 nfs41_setup_sequence(nfs4_get_session(data->server), 6795 &data->args.seq_args, 6796 &data->res.seq_res, 6797 task); 6798 } 6799 6800 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata) 6801 { 6802 struct nfs_free_stateid_data *data = calldata; 6803 6804 nfs41_sequence_done(task, &data->res.seq_res); 6805 6806 switch (task->tk_status) { 6807 case -NFS4ERR_DELAY: 6808 if (nfs4_async_handle_error(task, data->server, NULL) == -EAGAIN) 6809 rpc_restart_call_prepare(task); 6810 } 6811 } 6812 6813 static void nfs41_free_stateid_release(void *calldata) 6814 { 6815 kfree(calldata); 6816 } 6817 6818 const struct rpc_call_ops nfs41_free_stateid_ops = { 6819 .rpc_call_prepare = nfs41_free_stateid_prepare, 6820 .rpc_call_done = nfs41_free_stateid_done, 6821 .rpc_release = nfs41_free_stateid_release, 6822 }; 6823 6824 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server, 6825 nfs4_stateid *stateid, 6826 bool privileged) 6827 { 6828 struct rpc_message msg = { 6829 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 6830 }; 6831 struct rpc_task_setup task_setup = { 6832 .rpc_client = server->client, 6833 .rpc_message = &msg, 6834 .callback_ops = &nfs41_free_stateid_ops, 6835 .flags = RPC_TASK_ASYNC, 6836 }; 6837 struct nfs_free_stateid_data *data; 6838 6839 dprintk("NFS call free_stateid %p\n", stateid); 6840 data = kmalloc(sizeof(*data), GFP_NOFS); 6841 if (!data) 6842 return ERR_PTR(-ENOMEM); 6843 data->server = server; 6844 nfs4_stateid_copy(&data->args.stateid, stateid); 6845 6846 task_setup.callback_data = data; 6847 6848 msg.rpc_argp = &data->args; 6849 msg.rpc_resp = &data->res; 6850 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 6851 if (privileged) 6852 nfs4_set_sequence_privileged(&data->args.seq_args); 6853 6854 return rpc_run_task(&task_setup); 6855 } 6856 6857 /** 6858 * nfs41_free_stateid - perform a FREE_STATEID operation 6859 * 6860 * @server: server / transport on which to perform the operation 6861 * @stateid: state ID to release 6862 * 6863 * Returns NFS_OK if the server freed "stateid". Otherwise a 6864 * negative NFS4ERR value is returned. 6865 */ 6866 static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6867 { 6868 struct rpc_task *task; 6869 int ret; 6870 6871 task = _nfs41_free_stateid(server, stateid, true); 6872 if (IS_ERR(task)) 6873 return PTR_ERR(task); 6874 ret = rpc_wait_for_completion_task(task); 6875 if (!ret) 6876 ret = task->tk_status; 6877 rpc_put_task(task); 6878 return ret; 6879 } 6880 6881 static int nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp) 6882 { 6883 struct rpc_task *task; 6884 6885 task = _nfs41_free_stateid(server, &lsp->ls_stateid, false); 6886 nfs4_free_lock_state(server, lsp); 6887 if (IS_ERR(task)) 6888 return PTR_ERR(task); 6889 rpc_put_task(task); 6890 return 0; 6891 } 6892 6893 static bool nfs41_match_stateid(const nfs4_stateid *s1, 6894 const nfs4_stateid *s2) 6895 { 6896 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 6897 return false; 6898 6899 if (s1->seqid == s2->seqid) 6900 return true; 6901 if (s1->seqid == 0 || s2->seqid == 0) 6902 return true; 6903 6904 return false; 6905 } 6906 6907 #endif /* CONFIG_NFS_V4_1 */ 6908 6909 static bool nfs4_match_stateid(const nfs4_stateid *s1, 6910 const nfs4_stateid *s2) 6911 { 6912 return nfs4_stateid_match(s1, s2); 6913 } 6914 6915 6916 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 6917 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 6918 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 6919 .recover_open = nfs4_open_reclaim, 6920 .recover_lock = nfs4_lock_reclaim, 6921 .establish_clid = nfs4_init_clientid, 6922 .get_clid_cred = nfs4_get_setclientid_cred, 6923 .detect_trunking = nfs40_discover_server_trunking, 6924 }; 6925 6926 #if defined(CONFIG_NFS_V4_1) 6927 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 6928 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 6929 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 6930 .recover_open = nfs4_open_reclaim, 6931 .recover_lock = nfs4_lock_reclaim, 6932 .establish_clid = nfs41_init_clientid, 6933 .get_clid_cred = nfs4_get_exchange_id_cred, 6934 .reclaim_complete = nfs41_proc_reclaim_complete, 6935 .detect_trunking = nfs41_discover_server_trunking, 6936 }; 6937 #endif /* CONFIG_NFS_V4_1 */ 6938 6939 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 6940 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 6941 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 6942 .recover_open = nfs4_open_expired, 6943 .recover_lock = nfs4_lock_expired, 6944 .establish_clid = nfs4_init_clientid, 6945 .get_clid_cred = nfs4_get_setclientid_cred, 6946 }; 6947 6948 #if defined(CONFIG_NFS_V4_1) 6949 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 6950 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 6951 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 6952 .recover_open = nfs41_open_expired, 6953 .recover_lock = nfs41_lock_expired, 6954 .establish_clid = nfs41_init_clientid, 6955 .get_clid_cred = nfs4_get_exchange_id_cred, 6956 }; 6957 #endif /* CONFIG_NFS_V4_1 */ 6958 6959 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 6960 .sched_state_renewal = nfs4_proc_async_renew, 6961 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, 6962 .renew_lease = nfs4_proc_renew, 6963 }; 6964 6965 #if defined(CONFIG_NFS_V4_1) 6966 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 6967 .sched_state_renewal = nfs41_proc_async_sequence, 6968 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, 6969 .renew_lease = nfs4_proc_sequence, 6970 }; 6971 #endif 6972 6973 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 6974 .minor_version = 0, 6975 .init_caps = NFS_CAP_READDIRPLUS 6976 | NFS_CAP_ATOMIC_OPEN 6977 | NFS_CAP_CHANGE_ATTR 6978 | NFS_CAP_POSIX_LOCK, 6979 .call_sync = _nfs4_call_sync, 6980 .match_stateid = nfs4_match_stateid, 6981 .find_root_sec = nfs4_find_root_sec, 6982 .free_lock_state = nfs4_release_lockowner, 6983 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 6984 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 6985 .state_renewal_ops = &nfs40_state_renewal_ops, 6986 }; 6987 6988 #if defined(CONFIG_NFS_V4_1) 6989 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 6990 .minor_version = 1, 6991 .init_caps = NFS_CAP_READDIRPLUS 6992 | NFS_CAP_ATOMIC_OPEN 6993 | NFS_CAP_CHANGE_ATTR 6994 | NFS_CAP_POSIX_LOCK 6995 | NFS_CAP_STATEID_NFSV41 6996 | NFS_CAP_ATOMIC_OPEN_V1, 6997 .call_sync = nfs4_call_sync_sequence, 6998 .match_stateid = nfs41_match_stateid, 6999 .find_root_sec = nfs41_find_root_sec, 7000 .free_lock_state = nfs41_free_lock_state, 7001 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 7002 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 7003 .state_renewal_ops = &nfs41_state_renewal_ops, 7004 }; 7005 #endif 7006 7007 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 7008 [0] = &nfs_v4_0_minor_ops, 7009 #if defined(CONFIG_NFS_V4_1) 7010 [1] = &nfs_v4_1_minor_ops, 7011 #endif 7012 }; 7013 7014 const struct inode_operations nfs4_dir_inode_operations = { 7015 .create = nfs_create, 7016 .lookup = nfs_lookup, 7017 .atomic_open = nfs_atomic_open, 7018 .link = nfs_link, 7019 .unlink = nfs_unlink, 7020 .symlink = nfs_symlink, 7021 .mkdir = nfs_mkdir, 7022 .rmdir = nfs_rmdir, 7023 .mknod = nfs_mknod, 7024 .rename = nfs_rename, 7025 .permission = nfs_permission, 7026 .getattr = nfs_getattr, 7027 .setattr = nfs_setattr, 7028 .getxattr = generic_getxattr, 7029 .setxattr = generic_setxattr, 7030 .listxattr = generic_listxattr, 7031 .removexattr = generic_removexattr, 7032 }; 7033 7034 static const struct inode_operations nfs4_file_inode_operations = { 7035 .permission = nfs_permission, 7036 .getattr = nfs_getattr, 7037 .setattr = nfs_setattr, 7038 .getxattr = generic_getxattr, 7039 .setxattr = generic_setxattr, 7040 .listxattr = generic_listxattr, 7041 .removexattr = generic_removexattr, 7042 }; 7043 7044 const struct nfs_rpc_ops nfs_v4_clientops = { 7045 .version = 4, /* protocol version */ 7046 .dentry_ops = &nfs4_dentry_operations, 7047 .dir_inode_ops = &nfs4_dir_inode_operations, 7048 .file_inode_ops = &nfs4_file_inode_operations, 7049 .file_ops = &nfs4_file_operations, 7050 .getroot = nfs4_proc_get_root, 7051 .submount = nfs4_submount, 7052 .try_mount = nfs4_try_mount, 7053 .getattr = nfs4_proc_getattr, 7054 .setattr = nfs4_proc_setattr, 7055 .lookup = nfs4_proc_lookup, 7056 .access = nfs4_proc_access, 7057 .readlink = nfs4_proc_readlink, 7058 .create = nfs4_proc_create, 7059 .remove = nfs4_proc_remove, 7060 .unlink_setup = nfs4_proc_unlink_setup, 7061 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 7062 .unlink_done = nfs4_proc_unlink_done, 7063 .rename = nfs4_proc_rename, 7064 .rename_setup = nfs4_proc_rename_setup, 7065 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 7066 .rename_done = nfs4_proc_rename_done, 7067 .link = nfs4_proc_link, 7068 .symlink = nfs4_proc_symlink, 7069 .mkdir = nfs4_proc_mkdir, 7070 .rmdir = nfs4_proc_remove, 7071 .readdir = nfs4_proc_readdir, 7072 .mknod = nfs4_proc_mknod, 7073 .statfs = nfs4_proc_statfs, 7074 .fsinfo = nfs4_proc_fsinfo, 7075 .pathconf = nfs4_proc_pathconf, 7076 .set_capabilities = nfs4_server_capabilities, 7077 .decode_dirent = nfs4_decode_dirent, 7078 .read_setup = nfs4_proc_read_setup, 7079 .read_pageio_init = pnfs_pageio_init_read, 7080 .read_rpc_prepare = nfs4_proc_read_rpc_prepare, 7081 .read_done = nfs4_read_done, 7082 .write_setup = nfs4_proc_write_setup, 7083 .write_pageio_init = pnfs_pageio_init_write, 7084 .write_rpc_prepare = nfs4_proc_write_rpc_prepare, 7085 .write_done = nfs4_write_done, 7086 .commit_setup = nfs4_proc_commit_setup, 7087 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 7088 .commit_done = nfs4_commit_done, 7089 .lock = nfs4_proc_lock, 7090 .clear_acl_cache = nfs4_zap_acl_attr, 7091 .close_context = nfs4_close_context, 7092 .open_context = nfs4_atomic_open, 7093 .have_delegation = nfs4_have_delegation, 7094 .return_delegation = nfs4_inode_return_delegation, 7095 .alloc_client = nfs4_alloc_client, 7096 .init_client = nfs4_init_client, 7097 .free_client = nfs4_free_client, 7098 .create_server = nfs4_create_server, 7099 .clone_server = nfs_clone_server, 7100 }; 7101 7102 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 7103 .prefix = XATTR_NAME_NFSV4_ACL, 7104 .list = nfs4_xattr_list_nfs4_acl, 7105 .get = nfs4_xattr_get_nfs4_acl, 7106 .set = nfs4_xattr_set_nfs4_acl, 7107 }; 7108 7109 const struct xattr_handler *nfs4_xattr_handlers[] = { 7110 &nfs4_xattr_nfs4_acl_handler, 7111 NULL 7112 }; 7113 7114 /* 7115 * Local variables: 7116 * c-basic-offset: 8 7117 * End: 7118 */ 7119