1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/nfs_idmap.h> 55 #include <linux/sunrpc/bc_xprt.h> 56 #include <linux/xattr.h> 57 #include <linux/utsname.h> 58 #include <linux/freezer.h> 59 60 #include "nfs4_fs.h" 61 #include "delegation.h" 62 #include "internal.h" 63 #include "iostat.h" 64 #include "callback.h" 65 #include "pnfs.h" 66 #include "netns.h" 67 68 #define NFSDBG_FACILITY NFSDBG_PROC 69 70 #define NFS4_POLL_RETRY_MIN (HZ/10) 71 #define NFS4_POLL_RETRY_MAX (15*HZ) 72 73 #define NFS4_MAX_LOOP_ON_RECOVER (10) 74 75 struct nfs4_opendata; 76 static int _nfs4_proc_open(struct nfs4_opendata *data); 77 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 78 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 79 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *); 80 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 81 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *); 82 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr); 83 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 84 struct nfs_fattr *fattr, struct iattr *sattr, 85 struct nfs4_state *state); 86 #ifdef CONFIG_NFS_V4_1 87 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *); 88 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *); 89 #endif 90 /* Prevent leaks of NFSv4 errors into userland */ 91 static int nfs4_map_errors(int err) 92 { 93 if (err >= -1000) 94 return err; 95 switch (err) { 96 case -NFS4ERR_RESOURCE: 97 return -EREMOTEIO; 98 case -NFS4ERR_WRONGSEC: 99 return -EPERM; 100 case -NFS4ERR_BADOWNER: 101 case -NFS4ERR_BADNAME: 102 return -EINVAL; 103 case -NFS4ERR_SHARE_DENIED: 104 return -EACCES; 105 case -NFS4ERR_MINOR_VERS_MISMATCH: 106 return -EPROTONOSUPPORT; 107 case -NFS4ERR_ACCESS: 108 return -EACCES; 109 default: 110 dprintk("%s could not handle NFSv4 error %d\n", 111 __func__, -err); 112 break; 113 } 114 return -EIO; 115 } 116 117 /* 118 * This is our standard bitmap for GETATTR requests. 119 */ 120 const u32 nfs4_fattr_bitmap[3] = { 121 FATTR4_WORD0_TYPE 122 | FATTR4_WORD0_CHANGE 123 | FATTR4_WORD0_SIZE 124 | FATTR4_WORD0_FSID 125 | FATTR4_WORD0_FILEID, 126 FATTR4_WORD1_MODE 127 | FATTR4_WORD1_NUMLINKS 128 | FATTR4_WORD1_OWNER 129 | FATTR4_WORD1_OWNER_GROUP 130 | FATTR4_WORD1_RAWDEV 131 | FATTR4_WORD1_SPACE_USED 132 | FATTR4_WORD1_TIME_ACCESS 133 | FATTR4_WORD1_TIME_METADATA 134 | FATTR4_WORD1_TIME_MODIFY 135 }; 136 137 static const u32 nfs4_pnfs_open_bitmap[3] = { 138 FATTR4_WORD0_TYPE 139 | FATTR4_WORD0_CHANGE 140 | FATTR4_WORD0_SIZE 141 | FATTR4_WORD0_FSID 142 | FATTR4_WORD0_FILEID, 143 FATTR4_WORD1_MODE 144 | FATTR4_WORD1_NUMLINKS 145 | FATTR4_WORD1_OWNER 146 | FATTR4_WORD1_OWNER_GROUP 147 | FATTR4_WORD1_RAWDEV 148 | FATTR4_WORD1_SPACE_USED 149 | FATTR4_WORD1_TIME_ACCESS 150 | FATTR4_WORD1_TIME_METADATA 151 | FATTR4_WORD1_TIME_MODIFY, 152 FATTR4_WORD2_MDSTHRESHOLD 153 }; 154 155 static const u32 nfs4_open_noattr_bitmap[3] = { 156 FATTR4_WORD0_TYPE 157 | FATTR4_WORD0_CHANGE 158 | FATTR4_WORD0_FILEID, 159 }; 160 161 const u32 nfs4_statfs_bitmap[2] = { 162 FATTR4_WORD0_FILES_AVAIL 163 | FATTR4_WORD0_FILES_FREE 164 | FATTR4_WORD0_FILES_TOTAL, 165 FATTR4_WORD1_SPACE_AVAIL 166 | FATTR4_WORD1_SPACE_FREE 167 | FATTR4_WORD1_SPACE_TOTAL 168 }; 169 170 const u32 nfs4_pathconf_bitmap[2] = { 171 FATTR4_WORD0_MAXLINK 172 | FATTR4_WORD0_MAXNAME, 173 0 174 }; 175 176 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 177 | FATTR4_WORD0_MAXREAD 178 | FATTR4_WORD0_MAXWRITE 179 | FATTR4_WORD0_LEASE_TIME, 180 FATTR4_WORD1_TIME_DELTA 181 | FATTR4_WORD1_FS_LAYOUT_TYPES, 182 FATTR4_WORD2_LAYOUT_BLKSIZE 183 }; 184 185 const u32 nfs4_fs_locations_bitmap[2] = { 186 FATTR4_WORD0_TYPE 187 | FATTR4_WORD0_CHANGE 188 | FATTR4_WORD0_SIZE 189 | FATTR4_WORD0_FSID 190 | FATTR4_WORD0_FILEID 191 | FATTR4_WORD0_FS_LOCATIONS, 192 FATTR4_WORD1_MODE 193 | FATTR4_WORD1_NUMLINKS 194 | FATTR4_WORD1_OWNER 195 | FATTR4_WORD1_OWNER_GROUP 196 | FATTR4_WORD1_RAWDEV 197 | FATTR4_WORD1_SPACE_USED 198 | FATTR4_WORD1_TIME_ACCESS 199 | FATTR4_WORD1_TIME_METADATA 200 | FATTR4_WORD1_TIME_MODIFY 201 | FATTR4_WORD1_MOUNTED_ON_FILEID 202 }; 203 204 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 205 struct nfs4_readdir_arg *readdir) 206 { 207 __be32 *start, *p; 208 209 BUG_ON(readdir->count < 80); 210 if (cookie > 2) { 211 readdir->cookie = cookie; 212 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 213 return; 214 } 215 216 readdir->cookie = 0; 217 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 218 if (cookie == 2) 219 return; 220 221 /* 222 * NFSv4 servers do not return entries for '.' and '..' 223 * Therefore, we fake these entries here. We let '.' 224 * have cookie 0 and '..' have cookie 1. Note that 225 * when talking to the server, we always send cookie 0 226 * instead of 1 or 2. 227 */ 228 start = p = kmap_atomic(*readdir->pages); 229 230 if (cookie == 0) { 231 *p++ = xdr_one; /* next */ 232 *p++ = xdr_zero; /* cookie, first word */ 233 *p++ = xdr_one; /* cookie, second word */ 234 *p++ = xdr_one; /* entry len */ 235 memcpy(p, ".\0\0\0", 4); /* entry */ 236 p++; 237 *p++ = xdr_one; /* bitmap length */ 238 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 239 *p++ = htonl(8); /* attribute buffer length */ 240 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode)); 241 } 242 243 *p++ = xdr_one; /* next */ 244 *p++ = xdr_zero; /* cookie, first word */ 245 *p++ = xdr_two; /* cookie, second word */ 246 *p++ = xdr_two; /* entry len */ 247 memcpy(p, "..\0\0", 4); /* entry */ 248 p++; 249 *p++ = xdr_one; /* bitmap length */ 250 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 251 *p++ = htonl(8); /* attribute buffer length */ 252 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode)); 253 254 readdir->pgbase = (char *)p - (char *)start; 255 readdir->count -= readdir->pgbase; 256 kunmap_atomic(start); 257 } 258 259 static int nfs4_wait_clnt_recover(struct nfs_client *clp) 260 { 261 int res; 262 263 might_sleep(); 264 265 res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING, 266 nfs_wait_bit_killable, TASK_KILLABLE); 267 if (res) 268 return res; 269 270 if (clp->cl_cons_state < 0) 271 return clp->cl_cons_state; 272 return 0; 273 } 274 275 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 276 { 277 int res = 0; 278 279 might_sleep(); 280 281 if (*timeout <= 0) 282 *timeout = NFS4_POLL_RETRY_MIN; 283 if (*timeout > NFS4_POLL_RETRY_MAX) 284 *timeout = NFS4_POLL_RETRY_MAX; 285 freezable_schedule_timeout_killable(*timeout); 286 if (fatal_signal_pending(current)) 287 res = -ERESTARTSYS; 288 *timeout <<= 1; 289 return res; 290 } 291 292 /* This is the error handling routine for processes that are allowed 293 * to sleep. 294 */ 295 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 296 { 297 struct nfs_client *clp = server->nfs_client; 298 struct nfs4_state *state = exception->state; 299 struct inode *inode = exception->inode; 300 int ret = errorcode; 301 302 exception->retry = 0; 303 switch(errorcode) { 304 case 0: 305 return 0; 306 case -NFS4ERR_OPENMODE: 307 if (inode && nfs4_have_delegation(inode, FMODE_READ)) { 308 nfs4_inode_return_delegation(inode); 309 exception->retry = 1; 310 return 0; 311 } 312 if (state == NULL) 313 break; 314 nfs4_schedule_stateid_recovery(server, state); 315 goto wait_on_recovery; 316 case -NFS4ERR_DELEG_REVOKED: 317 case -NFS4ERR_ADMIN_REVOKED: 318 case -NFS4ERR_BAD_STATEID: 319 if (state == NULL) 320 break; 321 nfs_remove_bad_delegation(state->inode); 322 nfs4_schedule_stateid_recovery(server, state); 323 goto wait_on_recovery; 324 case -NFS4ERR_EXPIRED: 325 if (state != NULL) 326 nfs4_schedule_stateid_recovery(server, state); 327 case -NFS4ERR_STALE_STATEID: 328 case -NFS4ERR_STALE_CLIENTID: 329 nfs4_schedule_lease_recovery(clp); 330 goto wait_on_recovery; 331 #if defined(CONFIG_NFS_V4_1) 332 case -NFS4ERR_BADSESSION: 333 case -NFS4ERR_BADSLOT: 334 case -NFS4ERR_BAD_HIGH_SLOT: 335 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 336 case -NFS4ERR_DEADSESSION: 337 case -NFS4ERR_SEQ_FALSE_RETRY: 338 case -NFS4ERR_SEQ_MISORDERED: 339 dprintk("%s ERROR: %d Reset session\n", __func__, 340 errorcode); 341 nfs4_schedule_session_recovery(clp->cl_session, errorcode); 342 goto wait_on_recovery; 343 #endif /* defined(CONFIG_NFS_V4_1) */ 344 case -NFS4ERR_FILE_OPEN: 345 if (exception->timeout > HZ) { 346 /* We have retried a decent amount, time to 347 * fail 348 */ 349 ret = -EBUSY; 350 break; 351 } 352 case -NFS4ERR_GRACE: 353 case -NFS4ERR_DELAY: 354 case -EKEYEXPIRED: 355 ret = nfs4_delay(server->client, &exception->timeout); 356 if (ret != 0) 357 break; 358 case -NFS4ERR_RETRY_UNCACHED_REP: 359 case -NFS4ERR_OLD_STATEID: 360 exception->retry = 1; 361 break; 362 case -NFS4ERR_BADOWNER: 363 /* The following works around a Linux server bug! */ 364 case -NFS4ERR_BADNAME: 365 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 366 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 367 exception->retry = 1; 368 printk(KERN_WARNING "NFS: v4 server %s " 369 "does not accept raw " 370 "uid/gids. " 371 "Reenabling the idmapper.\n", 372 server->nfs_client->cl_hostname); 373 } 374 } 375 /* We failed to handle the error */ 376 return nfs4_map_errors(ret); 377 wait_on_recovery: 378 ret = nfs4_wait_clnt_recover(clp); 379 if (ret == 0) 380 exception->retry = 1; 381 return ret; 382 } 383 384 385 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 386 { 387 spin_lock(&clp->cl_lock); 388 if (time_before(clp->cl_last_renewal,timestamp)) 389 clp->cl_last_renewal = timestamp; 390 spin_unlock(&clp->cl_lock); 391 } 392 393 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 394 { 395 do_renew_lease(server->nfs_client, timestamp); 396 } 397 398 #if defined(CONFIG_NFS_V4_1) 399 400 /* 401 * nfs4_free_slot - free a slot and efficiently update slot table. 402 * 403 * freeing a slot is trivially done by clearing its respective bit 404 * in the bitmap. 405 * If the freed slotid equals highest_used_slotid we want to update it 406 * so that the server would be able to size down the slot table if needed, 407 * otherwise we know that the highest_used_slotid is still in use. 408 * When updating highest_used_slotid there may be "holes" in the bitmap 409 * so we need to scan down from highest_used_slotid to 0 looking for the now 410 * highest slotid in use. 411 * If none found, highest_used_slotid is set to NFS4_NO_SLOT. 412 * 413 * Must be called while holding tbl->slot_tbl_lock 414 */ 415 static void 416 nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid) 417 { 418 BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE); 419 /* clear used bit in bitmap */ 420 __clear_bit(slotid, tbl->used_slots); 421 422 /* update highest_used_slotid when it is freed */ 423 if (slotid == tbl->highest_used_slotid) { 424 slotid = find_last_bit(tbl->used_slots, tbl->max_slots); 425 if (slotid < tbl->max_slots) 426 tbl->highest_used_slotid = slotid; 427 else 428 tbl->highest_used_slotid = NFS4_NO_SLOT; 429 } 430 dprintk("%s: slotid %u highest_used_slotid %d\n", __func__, 431 slotid, tbl->highest_used_slotid); 432 } 433 434 bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy) 435 { 436 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 437 return true; 438 } 439 440 /* 441 * Signal state manager thread if session fore channel is drained 442 */ 443 static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) 444 { 445 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { 446 rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq, 447 nfs4_set_task_privileged, NULL); 448 return; 449 } 450 451 if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT) 452 return; 453 454 dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__); 455 complete(&ses->fc_slot_table.complete); 456 } 457 458 /* 459 * Signal state manager thread if session back channel is drained 460 */ 461 void nfs4_check_drain_bc_complete(struct nfs4_session *ses) 462 { 463 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) || 464 ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT) 465 return; 466 dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__); 467 complete(&ses->bc_slot_table.complete); 468 } 469 470 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 471 { 472 struct nfs4_slot_table *tbl; 473 474 tbl = &res->sr_session->fc_slot_table; 475 if (!res->sr_slot) { 476 /* just wake up the next guy waiting since 477 * we may have not consumed a slot after all */ 478 dprintk("%s: No slot\n", __func__); 479 return; 480 } 481 482 spin_lock(&tbl->slot_tbl_lock); 483 nfs4_free_slot(tbl, res->sr_slot - tbl->slots); 484 nfs4_check_drain_fc_complete(res->sr_session); 485 spin_unlock(&tbl->slot_tbl_lock); 486 res->sr_slot = NULL; 487 } 488 489 static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 490 { 491 unsigned long timestamp; 492 struct nfs_client *clp; 493 494 /* 495 * sr_status remains 1 if an RPC level error occurred. The server 496 * may or may not have processed the sequence operation.. 497 * Proceed as if the server received and processed the sequence 498 * operation. 499 */ 500 if (res->sr_status == 1) 501 res->sr_status = NFS_OK; 502 503 /* don't increment the sequence number if the task wasn't sent */ 504 if (!RPC_WAS_SENT(task)) 505 goto out; 506 507 /* Check the SEQUENCE operation status */ 508 switch (res->sr_status) { 509 case 0: 510 /* Update the slot's sequence and clientid lease timer */ 511 ++res->sr_slot->seq_nr; 512 timestamp = res->sr_renewal_time; 513 clp = res->sr_session->clp; 514 do_renew_lease(clp, timestamp); 515 /* Check sequence flags */ 516 if (res->sr_status_flags != 0) 517 nfs4_schedule_lease_recovery(clp); 518 break; 519 case -NFS4ERR_DELAY: 520 /* The server detected a resend of the RPC call and 521 * returned NFS4ERR_DELAY as per Section 2.10.6.2 522 * of RFC5661. 523 */ 524 dprintk("%s: slot=%td seq=%d: Operation in progress\n", 525 __func__, 526 res->sr_slot - res->sr_session->fc_slot_table.slots, 527 res->sr_slot->seq_nr); 528 goto out_retry; 529 default: 530 /* Just update the slot sequence no. */ 531 ++res->sr_slot->seq_nr; 532 } 533 out: 534 /* The session may be reset by one of the error handlers. */ 535 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 536 nfs41_sequence_free_slot(res); 537 return 1; 538 out_retry: 539 if (!rpc_restart_call(task)) 540 goto out; 541 rpc_delay(task, NFS4_POLL_RETRY_MAX); 542 return 0; 543 } 544 545 static int nfs4_sequence_done(struct rpc_task *task, 546 struct nfs4_sequence_res *res) 547 { 548 if (res->sr_session == NULL) 549 return 1; 550 return nfs41_sequence_done(task, res); 551 } 552 553 /* 554 * nfs4_find_slot - efficiently look for a free slot 555 * 556 * nfs4_find_slot looks for an unset bit in the used_slots bitmap. 557 * If found, we mark the slot as used, update the highest_used_slotid, 558 * and respectively set up the sequence operation args. 559 * The slot number is returned if found, or NFS4_NO_SLOT otherwise. 560 * 561 * Note: must be called with under the slot_tbl_lock. 562 */ 563 static u32 564 nfs4_find_slot(struct nfs4_slot_table *tbl) 565 { 566 u32 slotid; 567 u32 ret_id = NFS4_NO_SLOT; 568 569 dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n", 570 __func__, tbl->used_slots[0], tbl->highest_used_slotid, 571 tbl->max_slots); 572 slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots); 573 if (slotid >= tbl->max_slots) 574 goto out; 575 __set_bit(slotid, tbl->used_slots); 576 if (slotid > tbl->highest_used_slotid || 577 tbl->highest_used_slotid == NFS4_NO_SLOT) 578 tbl->highest_used_slotid = slotid; 579 ret_id = slotid; 580 out: 581 dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n", 582 __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id); 583 return ret_id; 584 } 585 586 static void nfs41_init_sequence(struct nfs4_sequence_args *args, 587 struct nfs4_sequence_res *res, int cache_reply) 588 { 589 args->sa_session = NULL; 590 args->sa_cache_this = 0; 591 if (cache_reply) 592 args->sa_cache_this = 1; 593 res->sr_session = NULL; 594 res->sr_slot = NULL; 595 } 596 597 int nfs41_setup_sequence(struct nfs4_session *session, 598 struct nfs4_sequence_args *args, 599 struct nfs4_sequence_res *res, 600 struct rpc_task *task) 601 { 602 struct nfs4_slot *slot; 603 struct nfs4_slot_table *tbl; 604 u32 slotid; 605 606 dprintk("--> %s\n", __func__); 607 /* slot already allocated? */ 608 if (res->sr_slot != NULL) 609 return 0; 610 611 tbl = &session->fc_slot_table; 612 613 spin_lock(&tbl->slot_tbl_lock); 614 if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) && 615 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { 616 /* The state manager will wait until the slot table is empty */ 617 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 618 spin_unlock(&tbl->slot_tbl_lock); 619 dprintk("%s session is draining\n", __func__); 620 return -EAGAIN; 621 } 622 623 if (!rpc_queue_empty(&tbl->slot_tbl_waitq) && 624 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) { 625 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 626 spin_unlock(&tbl->slot_tbl_lock); 627 dprintk("%s enforce FIFO order\n", __func__); 628 return -EAGAIN; 629 } 630 631 slotid = nfs4_find_slot(tbl); 632 if (slotid == NFS4_NO_SLOT) { 633 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 634 spin_unlock(&tbl->slot_tbl_lock); 635 dprintk("<-- %s: no free slots\n", __func__); 636 return -EAGAIN; 637 } 638 spin_unlock(&tbl->slot_tbl_lock); 639 640 rpc_task_set_priority(task, RPC_PRIORITY_NORMAL); 641 slot = tbl->slots + slotid; 642 args->sa_session = session; 643 args->sa_slotid = slotid; 644 645 dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr); 646 647 res->sr_session = session; 648 res->sr_slot = slot; 649 res->sr_renewal_time = jiffies; 650 res->sr_status_flags = 0; 651 /* 652 * sr_status is only set in decode_sequence, and so will remain 653 * set to 1 if an rpc level failure occurs. 654 */ 655 res->sr_status = 1; 656 return 0; 657 } 658 EXPORT_SYMBOL_GPL(nfs41_setup_sequence); 659 660 int nfs4_setup_sequence(const struct nfs_server *server, 661 struct nfs4_sequence_args *args, 662 struct nfs4_sequence_res *res, 663 struct rpc_task *task) 664 { 665 struct nfs4_session *session = nfs4_get_session(server); 666 int ret = 0; 667 668 if (session == NULL) 669 goto out; 670 671 dprintk("--> %s clp %p session %p sr_slot %td\n", 672 __func__, session->clp, session, res->sr_slot ? 673 res->sr_slot - session->fc_slot_table.slots : -1); 674 675 ret = nfs41_setup_sequence(session, args, res, task); 676 out: 677 dprintk("<-- %s status=%d\n", __func__, ret); 678 return ret; 679 } 680 681 struct nfs41_call_sync_data { 682 const struct nfs_server *seq_server; 683 struct nfs4_sequence_args *seq_args; 684 struct nfs4_sequence_res *seq_res; 685 }; 686 687 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 688 { 689 struct nfs41_call_sync_data *data = calldata; 690 691 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 692 693 if (nfs4_setup_sequence(data->seq_server, data->seq_args, 694 data->seq_res, task)) 695 return; 696 rpc_call_start(task); 697 } 698 699 static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata) 700 { 701 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 702 nfs41_call_sync_prepare(task, calldata); 703 } 704 705 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 706 { 707 struct nfs41_call_sync_data *data = calldata; 708 709 nfs41_sequence_done(task, data->seq_res); 710 } 711 712 static const struct rpc_call_ops nfs41_call_sync_ops = { 713 .rpc_call_prepare = nfs41_call_sync_prepare, 714 .rpc_call_done = nfs41_call_sync_done, 715 }; 716 717 static const struct rpc_call_ops nfs41_call_priv_sync_ops = { 718 .rpc_call_prepare = nfs41_call_priv_sync_prepare, 719 .rpc_call_done = nfs41_call_sync_done, 720 }; 721 722 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 723 struct nfs_server *server, 724 struct rpc_message *msg, 725 struct nfs4_sequence_args *args, 726 struct nfs4_sequence_res *res, 727 int privileged) 728 { 729 int ret; 730 struct rpc_task *task; 731 struct nfs41_call_sync_data data = { 732 .seq_server = server, 733 .seq_args = args, 734 .seq_res = res, 735 }; 736 struct rpc_task_setup task_setup = { 737 .rpc_client = clnt, 738 .rpc_message = msg, 739 .callback_ops = &nfs41_call_sync_ops, 740 .callback_data = &data 741 }; 742 743 if (privileged) 744 task_setup.callback_ops = &nfs41_call_priv_sync_ops; 745 task = rpc_run_task(&task_setup); 746 if (IS_ERR(task)) 747 ret = PTR_ERR(task); 748 else { 749 ret = task->tk_status; 750 rpc_put_task(task); 751 } 752 return ret; 753 } 754 755 int _nfs4_call_sync_session(struct rpc_clnt *clnt, 756 struct nfs_server *server, 757 struct rpc_message *msg, 758 struct nfs4_sequence_args *args, 759 struct nfs4_sequence_res *res, 760 int cache_reply) 761 { 762 nfs41_init_sequence(args, res, cache_reply); 763 return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0); 764 } 765 766 #else 767 static inline 768 void nfs41_init_sequence(struct nfs4_sequence_args *args, 769 struct nfs4_sequence_res *res, int cache_reply) 770 { 771 } 772 773 static int nfs4_sequence_done(struct rpc_task *task, 774 struct nfs4_sequence_res *res) 775 { 776 return 1; 777 } 778 #endif /* CONFIG_NFS_V4_1 */ 779 780 int _nfs4_call_sync(struct rpc_clnt *clnt, 781 struct nfs_server *server, 782 struct rpc_message *msg, 783 struct nfs4_sequence_args *args, 784 struct nfs4_sequence_res *res, 785 int cache_reply) 786 { 787 nfs41_init_sequence(args, res, cache_reply); 788 return rpc_call_sync(clnt, msg, 0); 789 } 790 791 static inline 792 int nfs4_call_sync(struct rpc_clnt *clnt, 793 struct nfs_server *server, 794 struct rpc_message *msg, 795 struct nfs4_sequence_args *args, 796 struct nfs4_sequence_res *res, 797 int cache_reply) 798 { 799 return server->nfs_client->cl_mvops->call_sync(clnt, server, msg, 800 args, res, cache_reply); 801 } 802 803 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 804 { 805 struct nfs_inode *nfsi = NFS_I(dir); 806 807 spin_lock(&dir->i_lock); 808 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 809 if (!cinfo->atomic || cinfo->before != dir->i_version) 810 nfs_force_lookup_revalidate(dir); 811 dir->i_version = cinfo->after; 812 spin_unlock(&dir->i_lock); 813 } 814 815 struct nfs4_opendata { 816 struct kref kref; 817 struct nfs_openargs o_arg; 818 struct nfs_openres o_res; 819 struct nfs_open_confirmargs c_arg; 820 struct nfs_open_confirmres c_res; 821 struct nfs4_string owner_name; 822 struct nfs4_string group_name; 823 struct nfs_fattr f_attr; 824 struct dentry *dir; 825 struct dentry *dentry; 826 struct nfs4_state_owner *owner; 827 struct nfs4_state *state; 828 struct iattr attrs; 829 unsigned long timestamp; 830 unsigned int rpc_done : 1; 831 int rpc_status; 832 int cancelled; 833 }; 834 835 836 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 837 { 838 p->o_res.f_attr = &p->f_attr; 839 p->o_res.seqid = p->o_arg.seqid; 840 p->c_res.seqid = p->c_arg.seqid; 841 p->o_res.server = p->o_arg.server; 842 p->o_res.access_request = p->o_arg.access; 843 nfs_fattr_init(&p->f_attr); 844 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 845 } 846 847 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 848 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 849 const struct iattr *attrs, 850 gfp_t gfp_mask) 851 { 852 struct dentry *parent = dget_parent(dentry); 853 struct inode *dir = parent->d_inode; 854 struct nfs_server *server = NFS_SERVER(dir); 855 struct nfs4_opendata *p; 856 857 p = kzalloc(sizeof(*p), gfp_mask); 858 if (p == NULL) 859 goto err; 860 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask); 861 if (p->o_arg.seqid == NULL) 862 goto err_free; 863 nfs_sb_active(dentry->d_sb); 864 p->dentry = dget(dentry); 865 p->dir = parent; 866 p->owner = sp; 867 atomic_inc(&sp->so_count); 868 p->o_arg.fh = NFS_FH(dir); 869 p->o_arg.open_flags = flags; 870 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 871 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS 872 * will return permission denied for all bits until close */ 873 if (!(flags & O_EXCL)) { 874 /* ask server to check for all possible rights as results 875 * are cached */ 876 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 877 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE; 878 } 879 p->o_arg.clientid = server->nfs_client->cl_clientid; 880 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 881 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 882 p->o_arg.name = &dentry->d_name; 883 p->o_arg.server = server; 884 p->o_arg.bitmask = server->attr_bitmask; 885 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 886 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; 887 if (attrs != NULL && attrs->ia_valid != 0) { 888 __be32 verf[2]; 889 890 p->o_arg.u.attrs = &p->attrs; 891 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 892 893 verf[0] = jiffies; 894 verf[1] = current->pid; 895 memcpy(p->o_arg.u.verifier.data, verf, 896 sizeof(p->o_arg.u.verifier.data)); 897 } 898 p->c_arg.fh = &p->o_res.fh; 899 p->c_arg.stateid = &p->o_res.stateid; 900 p->c_arg.seqid = p->o_arg.seqid; 901 nfs4_init_opendata_res(p); 902 kref_init(&p->kref); 903 return p; 904 err_free: 905 kfree(p); 906 err: 907 dput(parent); 908 return NULL; 909 } 910 911 static void nfs4_opendata_free(struct kref *kref) 912 { 913 struct nfs4_opendata *p = container_of(kref, 914 struct nfs4_opendata, kref); 915 struct super_block *sb = p->dentry->d_sb; 916 917 nfs_free_seqid(p->o_arg.seqid); 918 if (p->state != NULL) 919 nfs4_put_open_state(p->state); 920 nfs4_put_state_owner(p->owner); 921 dput(p->dir); 922 dput(p->dentry); 923 nfs_sb_deactive(sb); 924 nfs_fattr_free_names(&p->f_attr); 925 kfree(p); 926 } 927 928 static void nfs4_opendata_put(struct nfs4_opendata *p) 929 { 930 if (p != NULL) 931 kref_put(&p->kref, nfs4_opendata_free); 932 } 933 934 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) 935 { 936 int ret; 937 938 ret = rpc_wait_for_completion_task(task); 939 return ret; 940 } 941 942 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 943 { 944 int ret = 0; 945 946 if (open_mode & (O_EXCL|O_TRUNC)) 947 goto out; 948 switch (mode & (FMODE_READ|FMODE_WRITE)) { 949 case FMODE_READ: 950 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 951 && state->n_rdonly != 0; 952 break; 953 case FMODE_WRITE: 954 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 955 && state->n_wronly != 0; 956 break; 957 case FMODE_READ|FMODE_WRITE: 958 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 959 && state->n_rdwr != 0; 960 } 961 out: 962 return ret; 963 } 964 965 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) 966 { 967 if (delegation == NULL) 968 return 0; 969 if ((delegation->type & fmode) != fmode) 970 return 0; 971 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 972 return 0; 973 nfs_mark_delegation_referenced(delegation); 974 return 1; 975 } 976 977 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 978 { 979 switch (fmode) { 980 case FMODE_WRITE: 981 state->n_wronly++; 982 break; 983 case FMODE_READ: 984 state->n_rdonly++; 985 break; 986 case FMODE_READ|FMODE_WRITE: 987 state->n_rdwr++; 988 } 989 nfs4_state_set_mode_locked(state, state->state | fmode); 990 } 991 992 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 993 { 994 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 995 nfs4_stateid_copy(&state->stateid, stateid); 996 nfs4_stateid_copy(&state->open_stateid, stateid); 997 switch (fmode) { 998 case FMODE_READ: 999 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1000 break; 1001 case FMODE_WRITE: 1002 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1003 break; 1004 case FMODE_READ|FMODE_WRITE: 1005 set_bit(NFS_O_RDWR_STATE, &state->flags); 1006 } 1007 } 1008 1009 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 1010 { 1011 write_seqlock(&state->seqlock); 1012 nfs_set_open_stateid_locked(state, stateid, fmode); 1013 write_sequnlock(&state->seqlock); 1014 } 1015 1016 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) 1017 { 1018 /* 1019 * Protect the call to nfs4_state_set_mode_locked and 1020 * serialise the stateid update 1021 */ 1022 write_seqlock(&state->seqlock); 1023 if (deleg_stateid != NULL) { 1024 nfs4_stateid_copy(&state->stateid, deleg_stateid); 1025 set_bit(NFS_DELEGATED_STATE, &state->flags); 1026 } 1027 if (open_stateid != NULL) 1028 nfs_set_open_stateid_locked(state, open_stateid, fmode); 1029 write_sequnlock(&state->seqlock); 1030 spin_lock(&state->owner->so_lock); 1031 update_open_stateflags(state, fmode); 1032 spin_unlock(&state->owner->so_lock); 1033 } 1034 1035 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) 1036 { 1037 struct nfs_inode *nfsi = NFS_I(state->inode); 1038 struct nfs_delegation *deleg_cur; 1039 int ret = 0; 1040 1041 fmode &= (FMODE_READ|FMODE_WRITE); 1042 1043 rcu_read_lock(); 1044 deleg_cur = rcu_dereference(nfsi->delegation); 1045 if (deleg_cur == NULL) 1046 goto no_delegation; 1047 1048 spin_lock(&deleg_cur->lock); 1049 if (nfsi->delegation != deleg_cur || 1050 (deleg_cur->type & fmode) != fmode) 1051 goto no_delegation_unlock; 1052 1053 if (delegation == NULL) 1054 delegation = &deleg_cur->stateid; 1055 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) 1056 goto no_delegation_unlock; 1057 1058 nfs_mark_delegation_referenced(deleg_cur); 1059 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); 1060 ret = 1; 1061 no_delegation_unlock: 1062 spin_unlock(&deleg_cur->lock); 1063 no_delegation: 1064 rcu_read_unlock(); 1065 1066 if (!ret && open_stateid != NULL) { 1067 __update_open_stateid(state, open_stateid, NULL, fmode); 1068 ret = 1; 1069 } 1070 1071 return ret; 1072 } 1073 1074 1075 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1076 { 1077 struct nfs_delegation *delegation; 1078 1079 rcu_read_lock(); 1080 delegation = rcu_dereference(NFS_I(inode)->delegation); 1081 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1082 rcu_read_unlock(); 1083 return; 1084 } 1085 rcu_read_unlock(); 1086 nfs4_inode_return_delegation(inode); 1087 } 1088 1089 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1090 { 1091 struct nfs4_state *state = opendata->state; 1092 struct nfs_inode *nfsi = NFS_I(state->inode); 1093 struct nfs_delegation *delegation; 1094 int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC); 1095 fmode_t fmode = opendata->o_arg.fmode; 1096 nfs4_stateid stateid; 1097 int ret = -EAGAIN; 1098 1099 for (;;) { 1100 if (can_open_cached(state, fmode, open_mode)) { 1101 spin_lock(&state->owner->so_lock); 1102 if (can_open_cached(state, fmode, open_mode)) { 1103 update_open_stateflags(state, fmode); 1104 spin_unlock(&state->owner->so_lock); 1105 goto out_return_state; 1106 } 1107 spin_unlock(&state->owner->so_lock); 1108 } 1109 rcu_read_lock(); 1110 delegation = rcu_dereference(nfsi->delegation); 1111 if (!can_open_delegated(delegation, fmode)) { 1112 rcu_read_unlock(); 1113 break; 1114 } 1115 /* Save the delegation */ 1116 nfs4_stateid_copy(&stateid, &delegation->stateid); 1117 rcu_read_unlock(); 1118 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1119 if (ret != 0) 1120 goto out; 1121 ret = -EAGAIN; 1122 1123 /* Try to update the stateid using the delegation */ 1124 if (update_open_stateid(state, NULL, &stateid, fmode)) 1125 goto out_return_state; 1126 } 1127 out: 1128 return ERR_PTR(ret); 1129 out_return_state: 1130 atomic_inc(&state->count); 1131 return state; 1132 } 1133 1134 static void 1135 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) 1136 { 1137 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client; 1138 struct nfs_delegation *delegation; 1139 int delegation_flags = 0; 1140 1141 rcu_read_lock(); 1142 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1143 if (delegation) 1144 delegation_flags = delegation->flags; 1145 rcu_read_unlock(); 1146 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { 1147 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1148 "returning a delegation for " 1149 "OPEN(CLAIM_DELEGATE_CUR)\n", 1150 clp->cl_hostname); 1151 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1152 nfs_inode_set_delegation(state->inode, 1153 data->owner->so_cred, 1154 &data->o_res); 1155 else 1156 nfs_inode_reclaim_delegation(state->inode, 1157 data->owner->so_cred, 1158 &data->o_res); 1159 } 1160 1161 /* 1162 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 1163 * and update the nfs4_state. 1164 */ 1165 static struct nfs4_state * 1166 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 1167 { 1168 struct inode *inode = data->state->inode; 1169 struct nfs4_state *state = data->state; 1170 int ret; 1171 1172 if (!data->rpc_done) { 1173 ret = data->rpc_status; 1174 goto err; 1175 } 1176 1177 ret = -ESTALE; 1178 if (!(data->f_attr.valid & NFS_ATTR_FATTR_TYPE) || 1179 !(data->f_attr.valid & NFS_ATTR_FATTR_FILEID) || 1180 !(data->f_attr.valid & NFS_ATTR_FATTR_CHANGE)) 1181 goto err; 1182 1183 ret = -ENOMEM; 1184 state = nfs4_get_open_state(inode, data->owner); 1185 if (state == NULL) 1186 goto err; 1187 1188 ret = nfs_refresh_inode(inode, &data->f_attr); 1189 if (ret) 1190 goto err; 1191 1192 if (data->o_res.delegation_type != 0) 1193 nfs4_opendata_check_deleg(data, state); 1194 update_open_stateid(state, &data->o_res.stateid, NULL, 1195 data->o_arg.fmode); 1196 1197 return state; 1198 err: 1199 return ERR_PTR(ret); 1200 1201 } 1202 1203 static struct nfs4_state * 1204 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1205 { 1206 struct inode *inode; 1207 struct nfs4_state *state = NULL; 1208 int ret; 1209 1210 if (!data->rpc_done) { 1211 state = nfs4_try_open_cached(data); 1212 goto out; 1213 } 1214 1215 ret = -EAGAIN; 1216 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1217 goto err; 1218 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr); 1219 ret = PTR_ERR(inode); 1220 if (IS_ERR(inode)) 1221 goto err; 1222 ret = -ENOMEM; 1223 state = nfs4_get_open_state(inode, data->owner); 1224 if (state == NULL) 1225 goto err_put_inode; 1226 if (data->o_res.delegation_type != 0) 1227 nfs4_opendata_check_deleg(data, state); 1228 update_open_stateid(state, &data->o_res.stateid, NULL, 1229 data->o_arg.fmode); 1230 iput(inode); 1231 out: 1232 return state; 1233 err_put_inode: 1234 iput(inode); 1235 err: 1236 return ERR_PTR(ret); 1237 } 1238 1239 static struct nfs4_state * 1240 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1241 { 1242 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 1243 return _nfs4_opendata_reclaim_to_nfs4_state(data); 1244 return _nfs4_opendata_to_nfs4_state(data); 1245 } 1246 1247 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) 1248 { 1249 struct nfs_inode *nfsi = NFS_I(state->inode); 1250 struct nfs_open_context *ctx; 1251 1252 spin_lock(&state->inode->i_lock); 1253 list_for_each_entry(ctx, &nfsi->open_files, list) { 1254 if (ctx->state != state) 1255 continue; 1256 get_nfs_open_context(ctx); 1257 spin_unlock(&state->inode->i_lock); 1258 return ctx; 1259 } 1260 spin_unlock(&state->inode->i_lock); 1261 return ERR_PTR(-ENOENT); 1262 } 1263 1264 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state) 1265 { 1266 struct nfs4_opendata *opendata; 1267 1268 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS); 1269 if (opendata == NULL) 1270 return ERR_PTR(-ENOMEM); 1271 opendata->state = state; 1272 atomic_inc(&state->count); 1273 return opendata; 1274 } 1275 1276 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) 1277 { 1278 struct nfs4_state *newstate; 1279 int ret; 1280 1281 opendata->o_arg.open_flags = 0; 1282 opendata->o_arg.fmode = fmode; 1283 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 1284 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 1285 nfs4_init_opendata_res(opendata); 1286 ret = _nfs4_recover_proc_open(opendata); 1287 if (ret != 0) 1288 return ret; 1289 newstate = nfs4_opendata_to_nfs4_state(opendata); 1290 if (IS_ERR(newstate)) 1291 return PTR_ERR(newstate); 1292 nfs4_close_state(newstate, fmode); 1293 *res = newstate; 1294 return 0; 1295 } 1296 1297 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1298 { 1299 struct nfs4_state *newstate; 1300 int ret; 1301 1302 /* memory barrier prior to reading state->n_* */ 1303 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1304 smp_rmb(); 1305 if (state->n_rdwr != 0) { 1306 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1307 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); 1308 if (ret != 0) 1309 return ret; 1310 if (newstate != state) 1311 return -ESTALE; 1312 } 1313 if (state->n_wronly != 0) { 1314 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1315 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); 1316 if (ret != 0) 1317 return ret; 1318 if (newstate != state) 1319 return -ESTALE; 1320 } 1321 if (state->n_rdonly != 0) { 1322 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1323 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); 1324 if (ret != 0) 1325 return ret; 1326 if (newstate != state) 1327 return -ESTALE; 1328 } 1329 /* 1330 * We may have performed cached opens for all three recoveries. 1331 * Check if we need to update the current stateid. 1332 */ 1333 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 1334 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 1335 write_seqlock(&state->seqlock); 1336 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1337 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1338 write_sequnlock(&state->seqlock); 1339 } 1340 return 0; 1341 } 1342 1343 /* 1344 * OPEN_RECLAIM: 1345 * reclaim state on the server after a reboot. 1346 */ 1347 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1348 { 1349 struct nfs_delegation *delegation; 1350 struct nfs4_opendata *opendata; 1351 fmode_t delegation_type = 0; 1352 int status; 1353 1354 opendata = nfs4_open_recoverdata_alloc(ctx, state); 1355 if (IS_ERR(opendata)) 1356 return PTR_ERR(opendata); 1357 opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS; 1358 opendata->o_arg.fh = NFS_FH(state->inode); 1359 rcu_read_lock(); 1360 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1361 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) 1362 delegation_type = delegation->type; 1363 rcu_read_unlock(); 1364 opendata->o_arg.u.delegation_type = delegation_type; 1365 status = nfs4_open_recover(opendata, state); 1366 nfs4_opendata_put(opendata); 1367 return status; 1368 } 1369 1370 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1371 { 1372 struct nfs_server *server = NFS_SERVER(state->inode); 1373 struct nfs4_exception exception = { }; 1374 int err; 1375 do { 1376 err = _nfs4_do_open_reclaim(ctx, state); 1377 if (err != -NFS4ERR_DELAY) 1378 break; 1379 nfs4_handle_exception(server, err, &exception); 1380 } while (exception.retry); 1381 return err; 1382 } 1383 1384 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 1385 { 1386 struct nfs_open_context *ctx; 1387 int ret; 1388 1389 ctx = nfs4_state_find_open_context(state); 1390 if (IS_ERR(ctx)) 1391 return PTR_ERR(ctx); 1392 ret = nfs4_do_open_reclaim(ctx, state); 1393 put_nfs_open_context(ctx); 1394 return ret; 1395 } 1396 1397 static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1398 { 1399 struct nfs4_opendata *opendata; 1400 int ret; 1401 1402 opendata = nfs4_open_recoverdata_alloc(ctx, state); 1403 if (IS_ERR(opendata)) 1404 return PTR_ERR(opendata); 1405 opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR; 1406 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1407 ret = nfs4_open_recover(opendata, state); 1408 nfs4_opendata_put(opendata); 1409 return ret; 1410 } 1411 1412 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1413 { 1414 struct nfs4_exception exception = { }; 1415 struct nfs_server *server = NFS_SERVER(state->inode); 1416 int err; 1417 do { 1418 err = _nfs4_open_delegation_recall(ctx, state, stateid); 1419 switch (err) { 1420 case 0: 1421 case -ENOENT: 1422 case -ESTALE: 1423 goto out; 1424 case -NFS4ERR_BADSESSION: 1425 case -NFS4ERR_BADSLOT: 1426 case -NFS4ERR_BAD_HIGH_SLOT: 1427 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1428 case -NFS4ERR_DEADSESSION: 1429 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 1430 goto out; 1431 case -NFS4ERR_STALE_CLIENTID: 1432 case -NFS4ERR_STALE_STATEID: 1433 case -NFS4ERR_EXPIRED: 1434 /* Don't recall a delegation if it was lost */ 1435 nfs4_schedule_lease_recovery(server->nfs_client); 1436 goto out; 1437 case -ERESTARTSYS: 1438 /* 1439 * The show must go on: exit, but mark the 1440 * stateid as needing recovery. 1441 */ 1442 case -NFS4ERR_DELEG_REVOKED: 1443 case -NFS4ERR_ADMIN_REVOKED: 1444 case -NFS4ERR_BAD_STATEID: 1445 nfs_inode_find_state_and_recover(state->inode, 1446 stateid); 1447 nfs4_schedule_stateid_recovery(server, state); 1448 case -EKEYEXPIRED: 1449 /* 1450 * User RPCSEC_GSS context has expired. 1451 * We cannot recover this stateid now, so 1452 * skip it and allow recovery thread to 1453 * proceed. 1454 */ 1455 case -ENOMEM: 1456 err = 0; 1457 goto out; 1458 } 1459 err = nfs4_handle_exception(server, err, &exception); 1460 } while (exception.retry); 1461 out: 1462 return err; 1463 } 1464 1465 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 1466 { 1467 struct nfs4_opendata *data = calldata; 1468 1469 data->rpc_status = task->tk_status; 1470 if (data->rpc_status == 0) { 1471 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 1472 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1473 renew_lease(data->o_res.server, data->timestamp); 1474 data->rpc_done = 1; 1475 } 1476 } 1477 1478 static void nfs4_open_confirm_release(void *calldata) 1479 { 1480 struct nfs4_opendata *data = calldata; 1481 struct nfs4_state *state = NULL; 1482 1483 /* If this request hasn't been cancelled, do nothing */ 1484 if (data->cancelled == 0) 1485 goto out_free; 1486 /* In case of error, no cleanup! */ 1487 if (!data->rpc_done) 1488 goto out_free; 1489 state = nfs4_opendata_to_nfs4_state(data); 1490 if (!IS_ERR(state)) 1491 nfs4_close_state(state, data->o_arg.fmode); 1492 out_free: 1493 nfs4_opendata_put(data); 1494 } 1495 1496 static const struct rpc_call_ops nfs4_open_confirm_ops = { 1497 .rpc_call_done = nfs4_open_confirm_done, 1498 .rpc_release = nfs4_open_confirm_release, 1499 }; 1500 1501 /* 1502 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 1503 */ 1504 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 1505 { 1506 struct nfs_server *server = NFS_SERVER(data->dir->d_inode); 1507 struct rpc_task *task; 1508 struct rpc_message msg = { 1509 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 1510 .rpc_argp = &data->c_arg, 1511 .rpc_resp = &data->c_res, 1512 .rpc_cred = data->owner->so_cred, 1513 }; 1514 struct rpc_task_setup task_setup_data = { 1515 .rpc_client = server->client, 1516 .rpc_message = &msg, 1517 .callback_ops = &nfs4_open_confirm_ops, 1518 .callback_data = data, 1519 .workqueue = nfsiod_workqueue, 1520 .flags = RPC_TASK_ASYNC, 1521 }; 1522 int status; 1523 1524 kref_get(&data->kref); 1525 data->rpc_done = 0; 1526 data->rpc_status = 0; 1527 data->timestamp = jiffies; 1528 task = rpc_run_task(&task_setup_data); 1529 if (IS_ERR(task)) 1530 return PTR_ERR(task); 1531 status = nfs4_wait_for_completion_rpc_task(task); 1532 if (status != 0) { 1533 data->cancelled = 1; 1534 smp_wmb(); 1535 } else 1536 status = data->rpc_status; 1537 rpc_put_task(task); 1538 return status; 1539 } 1540 1541 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 1542 { 1543 struct nfs4_opendata *data = calldata; 1544 struct nfs4_state_owner *sp = data->owner; 1545 1546 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 1547 return; 1548 /* 1549 * Check if we still need to send an OPEN call, or if we can use 1550 * a delegation instead. 1551 */ 1552 if (data->state != NULL) { 1553 struct nfs_delegation *delegation; 1554 1555 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) 1556 goto out_no_action; 1557 rcu_read_lock(); 1558 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 1559 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR && 1560 can_open_delegated(delegation, data->o_arg.fmode)) 1561 goto unlock_no_action; 1562 rcu_read_unlock(); 1563 } 1564 /* Update client id. */ 1565 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid; 1566 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) { 1567 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 1568 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 1569 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 1570 } 1571 data->timestamp = jiffies; 1572 if (nfs4_setup_sequence(data->o_arg.server, 1573 &data->o_arg.seq_args, 1574 &data->o_res.seq_res, 1575 task) != 0) 1576 nfs_release_seqid(data->o_arg.seqid); 1577 else 1578 rpc_call_start(task); 1579 return; 1580 unlock_no_action: 1581 rcu_read_unlock(); 1582 out_no_action: 1583 task->tk_action = NULL; 1584 1585 } 1586 1587 static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata) 1588 { 1589 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 1590 nfs4_open_prepare(task, calldata); 1591 } 1592 1593 static void nfs4_open_done(struct rpc_task *task, void *calldata) 1594 { 1595 struct nfs4_opendata *data = calldata; 1596 1597 data->rpc_status = task->tk_status; 1598 1599 if (!nfs4_sequence_done(task, &data->o_res.seq_res)) 1600 return; 1601 1602 if (task->tk_status == 0) { 1603 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 1604 switch (data->o_res.f_attr->mode & S_IFMT) { 1605 case S_IFREG: 1606 break; 1607 case S_IFLNK: 1608 data->rpc_status = -ELOOP; 1609 break; 1610 case S_IFDIR: 1611 data->rpc_status = -EISDIR; 1612 break; 1613 default: 1614 data->rpc_status = -ENOTDIR; 1615 } 1616 } 1617 renew_lease(data->o_res.server, data->timestamp); 1618 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 1619 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1620 } 1621 data->rpc_done = 1; 1622 } 1623 1624 static void nfs4_open_release(void *calldata) 1625 { 1626 struct nfs4_opendata *data = calldata; 1627 struct nfs4_state *state = NULL; 1628 1629 /* If this request hasn't been cancelled, do nothing */ 1630 if (data->cancelled == 0) 1631 goto out_free; 1632 /* In case of error, no cleanup! */ 1633 if (data->rpc_status != 0 || !data->rpc_done) 1634 goto out_free; 1635 /* In case we need an open_confirm, no cleanup! */ 1636 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 1637 goto out_free; 1638 state = nfs4_opendata_to_nfs4_state(data); 1639 if (!IS_ERR(state)) 1640 nfs4_close_state(state, data->o_arg.fmode); 1641 out_free: 1642 nfs4_opendata_put(data); 1643 } 1644 1645 static const struct rpc_call_ops nfs4_open_ops = { 1646 .rpc_call_prepare = nfs4_open_prepare, 1647 .rpc_call_done = nfs4_open_done, 1648 .rpc_release = nfs4_open_release, 1649 }; 1650 1651 static const struct rpc_call_ops nfs4_recover_open_ops = { 1652 .rpc_call_prepare = nfs4_recover_open_prepare, 1653 .rpc_call_done = nfs4_open_done, 1654 .rpc_release = nfs4_open_release, 1655 }; 1656 1657 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) 1658 { 1659 struct inode *dir = data->dir->d_inode; 1660 struct nfs_server *server = NFS_SERVER(dir); 1661 struct nfs_openargs *o_arg = &data->o_arg; 1662 struct nfs_openres *o_res = &data->o_res; 1663 struct rpc_task *task; 1664 struct rpc_message msg = { 1665 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 1666 .rpc_argp = o_arg, 1667 .rpc_resp = o_res, 1668 .rpc_cred = data->owner->so_cred, 1669 }; 1670 struct rpc_task_setup task_setup_data = { 1671 .rpc_client = server->client, 1672 .rpc_message = &msg, 1673 .callback_ops = &nfs4_open_ops, 1674 .callback_data = data, 1675 .workqueue = nfsiod_workqueue, 1676 .flags = RPC_TASK_ASYNC, 1677 }; 1678 int status; 1679 1680 nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); 1681 kref_get(&data->kref); 1682 data->rpc_done = 0; 1683 data->rpc_status = 0; 1684 data->cancelled = 0; 1685 if (isrecover) 1686 task_setup_data.callback_ops = &nfs4_recover_open_ops; 1687 task = rpc_run_task(&task_setup_data); 1688 if (IS_ERR(task)) 1689 return PTR_ERR(task); 1690 status = nfs4_wait_for_completion_rpc_task(task); 1691 if (status != 0) { 1692 data->cancelled = 1; 1693 smp_wmb(); 1694 } else 1695 status = data->rpc_status; 1696 rpc_put_task(task); 1697 1698 return status; 1699 } 1700 1701 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 1702 { 1703 struct inode *dir = data->dir->d_inode; 1704 struct nfs_openres *o_res = &data->o_res; 1705 int status; 1706 1707 status = nfs4_run_open_task(data, 1); 1708 if (status != 0 || !data->rpc_done) 1709 return status; 1710 1711 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 1712 1713 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1714 status = _nfs4_proc_open_confirm(data); 1715 if (status != 0) 1716 return status; 1717 } 1718 1719 return status; 1720 } 1721 1722 static int nfs4_opendata_access(struct rpc_cred *cred, 1723 struct nfs4_opendata *opendata, 1724 struct nfs4_state *state, fmode_t fmode) 1725 { 1726 struct nfs_access_entry cache; 1727 u32 mask; 1728 1729 /* access call failed or for some reason the server doesn't 1730 * support any access modes -- defer access call until later */ 1731 if (opendata->o_res.access_supported == 0) 1732 return 0; 1733 1734 mask = 0; 1735 /* don't check MAY_WRITE - a newly created file may not have 1736 * write mode bits, but POSIX allows the creating process to write */ 1737 if (fmode & FMODE_READ) 1738 mask |= MAY_READ; 1739 if (fmode & FMODE_EXEC) 1740 mask |= MAY_EXEC; 1741 1742 cache.cred = cred; 1743 cache.jiffies = jiffies; 1744 nfs_access_set_mask(&cache, opendata->o_res.access_result); 1745 nfs_access_add_cache(state->inode, &cache); 1746 1747 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) 1748 return 0; 1749 1750 /* even though OPEN succeeded, access is denied. Close the file */ 1751 nfs4_close_state(state, fmode); 1752 return -EACCES; 1753 } 1754 1755 /* 1756 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 1757 */ 1758 static int _nfs4_proc_open(struct nfs4_opendata *data) 1759 { 1760 struct inode *dir = data->dir->d_inode; 1761 struct nfs_server *server = NFS_SERVER(dir); 1762 struct nfs_openargs *o_arg = &data->o_arg; 1763 struct nfs_openres *o_res = &data->o_res; 1764 int status; 1765 1766 status = nfs4_run_open_task(data, 0); 1767 if (!data->rpc_done) 1768 return status; 1769 if (status != 0) { 1770 if (status == -NFS4ERR_BADNAME && 1771 !(o_arg->open_flags & O_CREAT)) 1772 return -ENOENT; 1773 return status; 1774 } 1775 1776 nfs_fattr_map_and_free_names(server, &data->f_attr); 1777 1778 if (o_arg->open_flags & O_CREAT) 1779 update_changeattr(dir, &o_res->cinfo); 1780 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 1781 server->caps &= ~NFS_CAP_POSIX_LOCK; 1782 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1783 status = _nfs4_proc_open_confirm(data); 1784 if (status != 0) 1785 return status; 1786 } 1787 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 1788 _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr); 1789 return 0; 1790 } 1791 1792 static int nfs4_client_recover_expired_lease(struct nfs_client *clp) 1793 { 1794 unsigned int loop; 1795 int ret; 1796 1797 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 1798 ret = nfs4_wait_clnt_recover(clp); 1799 if (ret != 0) 1800 break; 1801 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && 1802 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) 1803 break; 1804 nfs4_schedule_state_manager(clp); 1805 ret = -EIO; 1806 } 1807 return ret; 1808 } 1809 1810 static int nfs4_recover_expired_lease(struct nfs_server *server) 1811 { 1812 return nfs4_client_recover_expired_lease(server->nfs_client); 1813 } 1814 1815 /* 1816 * OPEN_EXPIRED: 1817 * reclaim state on the server after a network partition. 1818 * Assumes caller holds the appropriate lock 1819 */ 1820 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 1821 { 1822 struct nfs4_opendata *opendata; 1823 int ret; 1824 1825 opendata = nfs4_open_recoverdata_alloc(ctx, state); 1826 if (IS_ERR(opendata)) 1827 return PTR_ERR(opendata); 1828 ret = nfs4_open_recover(opendata, state); 1829 if (ret == -ESTALE) 1830 d_drop(ctx->dentry); 1831 nfs4_opendata_put(opendata); 1832 return ret; 1833 } 1834 1835 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 1836 { 1837 struct nfs_server *server = NFS_SERVER(state->inode); 1838 struct nfs4_exception exception = { }; 1839 int err; 1840 1841 do { 1842 err = _nfs4_open_expired(ctx, state); 1843 switch (err) { 1844 default: 1845 goto out; 1846 case -NFS4ERR_GRACE: 1847 case -NFS4ERR_DELAY: 1848 nfs4_handle_exception(server, err, &exception); 1849 err = 0; 1850 } 1851 } while (exception.retry); 1852 out: 1853 return err; 1854 } 1855 1856 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 1857 { 1858 struct nfs_open_context *ctx; 1859 int ret; 1860 1861 ctx = nfs4_state_find_open_context(state); 1862 if (IS_ERR(ctx)) 1863 return PTR_ERR(ctx); 1864 ret = nfs4_do_open_expired(ctx, state); 1865 put_nfs_open_context(ctx); 1866 return ret; 1867 } 1868 1869 #if defined(CONFIG_NFS_V4_1) 1870 static void nfs41_clear_delegation_stateid(struct nfs4_state *state) 1871 { 1872 struct nfs_server *server = NFS_SERVER(state->inode); 1873 nfs4_stateid *stateid = &state->stateid; 1874 int status; 1875 1876 /* If a state reset has been done, test_stateid is unneeded */ 1877 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1878 return; 1879 1880 status = nfs41_test_stateid(server, stateid); 1881 if (status != NFS_OK) { 1882 /* Free the stateid unless the server explicitly 1883 * informs us the stateid is unrecognized. */ 1884 if (status != -NFS4ERR_BAD_STATEID) 1885 nfs41_free_stateid(server, stateid); 1886 nfs_remove_bad_delegation(state->inode); 1887 1888 write_seqlock(&state->seqlock); 1889 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1890 write_sequnlock(&state->seqlock); 1891 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1892 } 1893 } 1894 1895 /** 1896 * nfs41_check_open_stateid - possibly free an open stateid 1897 * 1898 * @state: NFSv4 state for an inode 1899 * 1900 * Returns NFS_OK if recovery for this stateid is now finished. 1901 * Otherwise a negative NFS4ERR value is returned. 1902 */ 1903 static int nfs41_check_open_stateid(struct nfs4_state *state) 1904 { 1905 struct nfs_server *server = NFS_SERVER(state->inode); 1906 nfs4_stateid *stateid = &state->open_stateid; 1907 int status; 1908 1909 /* If a state reset has been done, test_stateid is unneeded */ 1910 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) && 1911 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) && 1912 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0)) 1913 return -NFS4ERR_BAD_STATEID; 1914 1915 status = nfs41_test_stateid(server, stateid); 1916 if (status != NFS_OK) { 1917 /* Free the stateid unless the server explicitly 1918 * informs us the stateid is unrecognized. */ 1919 if (status != -NFS4ERR_BAD_STATEID) 1920 nfs41_free_stateid(server, stateid); 1921 1922 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1923 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1924 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1925 } 1926 return status; 1927 } 1928 1929 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 1930 { 1931 int status; 1932 1933 nfs41_clear_delegation_stateid(state); 1934 status = nfs41_check_open_stateid(state); 1935 if (status != NFS_OK) 1936 status = nfs4_open_expired(sp, state); 1937 return status; 1938 } 1939 #endif 1940 1941 /* 1942 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 1943 * fields corresponding to attributes that were used to store the verifier. 1944 * Make sure we clobber those fields in the later setattr call 1945 */ 1946 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr) 1947 { 1948 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 1949 !(sattr->ia_valid & ATTR_ATIME_SET)) 1950 sattr->ia_valid |= ATTR_ATIME; 1951 1952 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 1953 !(sattr->ia_valid & ATTR_MTIME_SET)) 1954 sattr->ia_valid |= ATTR_MTIME; 1955 } 1956 1957 /* 1958 * Returns a referenced nfs4_state 1959 */ 1960 static int _nfs4_do_open(struct inode *dir, 1961 struct dentry *dentry, 1962 fmode_t fmode, 1963 int flags, 1964 struct iattr *sattr, 1965 struct rpc_cred *cred, 1966 struct nfs4_state **res, 1967 struct nfs4_threshold **ctx_th) 1968 { 1969 struct nfs4_state_owner *sp; 1970 struct nfs4_state *state = NULL; 1971 struct nfs_server *server = NFS_SERVER(dir); 1972 struct nfs4_opendata *opendata; 1973 int status; 1974 1975 /* Protect against reboot recovery conflicts */ 1976 status = -ENOMEM; 1977 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 1978 if (sp == NULL) { 1979 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 1980 goto out_err; 1981 } 1982 status = nfs4_recover_expired_lease(server); 1983 if (status != 0) 1984 goto err_put_state_owner; 1985 if (dentry->d_inode != NULL) 1986 nfs4_return_incompatible_delegation(dentry->d_inode, fmode); 1987 status = -ENOMEM; 1988 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL); 1989 if (opendata == NULL) 1990 goto err_put_state_owner; 1991 1992 if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 1993 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 1994 if (!opendata->f_attr.mdsthreshold) 1995 goto err_opendata_put; 1996 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 1997 } 1998 if (dentry->d_inode != NULL) 1999 opendata->state = nfs4_get_open_state(dentry->d_inode, sp); 2000 2001 status = _nfs4_proc_open(opendata); 2002 if (status != 0) 2003 goto err_opendata_put; 2004 2005 state = nfs4_opendata_to_nfs4_state(opendata); 2006 status = PTR_ERR(state); 2007 if (IS_ERR(state)) 2008 goto err_opendata_put; 2009 if (server->caps & NFS_CAP_POSIX_LOCK) 2010 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 2011 2012 status = nfs4_opendata_access(cred, opendata, state, fmode); 2013 if (status != 0) 2014 goto err_opendata_put; 2015 2016 if (opendata->o_arg.open_flags & O_EXCL) { 2017 nfs4_exclusive_attrset(opendata, sattr); 2018 2019 nfs_fattr_init(opendata->o_res.f_attr); 2020 status = nfs4_do_setattr(state->inode, cred, 2021 opendata->o_res.f_attr, sattr, 2022 state); 2023 if (status == 0) 2024 nfs_setattr_update_inode(state->inode, sattr); 2025 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); 2026 } 2027 2028 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) 2029 *ctx_th = opendata->f_attr.mdsthreshold; 2030 else 2031 kfree(opendata->f_attr.mdsthreshold); 2032 opendata->f_attr.mdsthreshold = NULL; 2033 2034 nfs4_opendata_put(opendata); 2035 nfs4_put_state_owner(sp); 2036 *res = state; 2037 return 0; 2038 err_opendata_put: 2039 kfree(opendata->f_attr.mdsthreshold); 2040 nfs4_opendata_put(opendata); 2041 err_put_state_owner: 2042 nfs4_put_state_owner(sp); 2043 out_err: 2044 *res = NULL; 2045 return status; 2046 } 2047 2048 2049 static struct nfs4_state *nfs4_do_open(struct inode *dir, 2050 struct dentry *dentry, 2051 fmode_t fmode, 2052 int flags, 2053 struct iattr *sattr, 2054 struct rpc_cred *cred, 2055 struct nfs4_threshold **ctx_th) 2056 { 2057 struct nfs4_exception exception = { }; 2058 struct nfs4_state *res; 2059 int status; 2060 2061 fmode &= FMODE_READ|FMODE_WRITE|FMODE_EXEC; 2062 do { 2063 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, 2064 &res, ctx_th); 2065 if (status == 0) 2066 break; 2067 /* NOTE: BAD_SEQID means the server and client disagree about the 2068 * book-keeping w.r.t. state-changing operations 2069 * (OPEN/CLOSE/LOCK/LOCKU...) 2070 * It is actually a sign of a bug on the client or on the server. 2071 * 2072 * If we receive a BAD_SEQID error in the particular case of 2073 * doing an OPEN, we assume that nfs_increment_open_seqid() will 2074 * have unhashed the old state_owner for us, and that we can 2075 * therefore safely retry using a new one. We should still warn 2076 * the user though... 2077 */ 2078 if (status == -NFS4ERR_BAD_SEQID) { 2079 pr_warn_ratelimited("NFS: v4 server %s " 2080 " returned a bad sequence-id error!\n", 2081 NFS_SERVER(dir)->nfs_client->cl_hostname); 2082 exception.retry = 1; 2083 continue; 2084 } 2085 /* 2086 * BAD_STATEID on OPEN means that the server cancelled our 2087 * state before it received the OPEN_CONFIRM. 2088 * Recover by retrying the request as per the discussion 2089 * on Page 181 of RFC3530. 2090 */ 2091 if (status == -NFS4ERR_BAD_STATEID) { 2092 exception.retry = 1; 2093 continue; 2094 } 2095 if (status == -EAGAIN) { 2096 /* We must have found a delegation */ 2097 exception.retry = 1; 2098 continue; 2099 } 2100 res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), 2101 status, &exception)); 2102 } while (exception.retry); 2103 return res; 2104 } 2105 2106 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2107 struct nfs_fattr *fattr, struct iattr *sattr, 2108 struct nfs4_state *state) 2109 { 2110 struct nfs_server *server = NFS_SERVER(inode); 2111 struct nfs_setattrargs arg = { 2112 .fh = NFS_FH(inode), 2113 .iap = sattr, 2114 .server = server, 2115 .bitmask = server->attr_bitmask, 2116 }; 2117 struct nfs_setattrres res = { 2118 .fattr = fattr, 2119 .server = server, 2120 }; 2121 struct rpc_message msg = { 2122 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 2123 .rpc_argp = &arg, 2124 .rpc_resp = &res, 2125 .rpc_cred = cred, 2126 }; 2127 unsigned long timestamp = jiffies; 2128 int status; 2129 2130 nfs_fattr_init(fattr); 2131 2132 if (state != NULL) { 2133 struct nfs_lockowner lockowner = { 2134 .l_owner = current->files, 2135 .l_pid = current->tgid, 2136 }; 2137 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, 2138 &lockowner); 2139 } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode, 2140 FMODE_WRITE)) { 2141 /* Use that stateid */ 2142 } else 2143 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 2144 2145 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 2146 if (status == 0 && state != NULL) 2147 renew_lease(server, timestamp); 2148 return status; 2149 } 2150 2151 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2152 struct nfs_fattr *fattr, struct iattr *sattr, 2153 struct nfs4_state *state) 2154 { 2155 struct nfs_server *server = NFS_SERVER(inode); 2156 struct nfs4_exception exception = { 2157 .state = state, 2158 .inode = inode, 2159 }; 2160 int err; 2161 do { 2162 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state); 2163 switch (err) { 2164 case -NFS4ERR_OPENMODE: 2165 if (state && !(state->state & FMODE_WRITE)) { 2166 err = -EBADF; 2167 if (sattr->ia_valid & ATTR_OPEN) 2168 err = -EACCES; 2169 goto out; 2170 } 2171 } 2172 err = nfs4_handle_exception(server, err, &exception); 2173 } while (exception.retry); 2174 out: 2175 return err; 2176 } 2177 2178 struct nfs4_closedata { 2179 struct inode *inode; 2180 struct nfs4_state *state; 2181 struct nfs_closeargs arg; 2182 struct nfs_closeres res; 2183 struct nfs_fattr fattr; 2184 unsigned long timestamp; 2185 bool roc; 2186 u32 roc_barrier; 2187 }; 2188 2189 static void nfs4_free_closedata(void *data) 2190 { 2191 struct nfs4_closedata *calldata = data; 2192 struct nfs4_state_owner *sp = calldata->state->owner; 2193 struct super_block *sb = calldata->state->inode->i_sb; 2194 2195 if (calldata->roc) 2196 pnfs_roc_release(calldata->state->inode); 2197 nfs4_put_open_state(calldata->state); 2198 nfs_free_seqid(calldata->arg.seqid); 2199 nfs4_put_state_owner(sp); 2200 nfs_sb_deactive_async(sb); 2201 kfree(calldata); 2202 } 2203 2204 static void nfs4_close_clear_stateid_flags(struct nfs4_state *state, 2205 fmode_t fmode) 2206 { 2207 spin_lock(&state->owner->so_lock); 2208 if (!(fmode & FMODE_READ)) 2209 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2210 if (!(fmode & FMODE_WRITE)) 2211 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2212 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2213 spin_unlock(&state->owner->so_lock); 2214 } 2215 2216 static void nfs4_close_done(struct rpc_task *task, void *data) 2217 { 2218 struct nfs4_closedata *calldata = data; 2219 struct nfs4_state *state = calldata->state; 2220 struct nfs_server *server = NFS_SERVER(calldata->inode); 2221 2222 dprintk("%s: begin!\n", __func__); 2223 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 2224 return; 2225 /* hmm. we are done with the inode, and in the process of freeing 2226 * the state_owner. we keep this around to process errors 2227 */ 2228 switch (task->tk_status) { 2229 case 0: 2230 if (calldata->roc) 2231 pnfs_roc_set_barrier(state->inode, 2232 calldata->roc_barrier); 2233 nfs_set_open_stateid(state, &calldata->res.stateid, 0); 2234 renew_lease(server, calldata->timestamp); 2235 nfs4_close_clear_stateid_flags(state, 2236 calldata->arg.fmode); 2237 break; 2238 case -NFS4ERR_STALE_STATEID: 2239 case -NFS4ERR_OLD_STATEID: 2240 case -NFS4ERR_BAD_STATEID: 2241 case -NFS4ERR_EXPIRED: 2242 if (calldata->arg.fmode == 0) 2243 break; 2244 default: 2245 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) 2246 rpc_restart_call_prepare(task); 2247 } 2248 nfs_release_seqid(calldata->arg.seqid); 2249 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2250 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2251 } 2252 2253 static void nfs4_close_prepare(struct rpc_task *task, void *data) 2254 { 2255 struct nfs4_closedata *calldata = data; 2256 struct nfs4_state *state = calldata->state; 2257 struct inode *inode = calldata->inode; 2258 int call_close = 0; 2259 2260 dprintk("%s: begin!\n", __func__); 2261 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 2262 return; 2263 2264 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 2265 calldata->arg.fmode = FMODE_READ|FMODE_WRITE; 2266 spin_lock(&state->owner->so_lock); 2267 /* Calculate the change in open mode */ 2268 if (state->n_rdwr == 0) { 2269 if (state->n_rdonly == 0) { 2270 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); 2271 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 2272 calldata->arg.fmode &= ~FMODE_READ; 2273 } 2274 if (state->n_wronly == 0) { 2275 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); 2276 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 2277 calldata->arg.fmode &= ~FMODE_WRITE; 2278 } 2279 } 2280 spin_unlock(&state->owner->so_lock); 2281 2282 if (!call_close) { 2283 /* Note: exit _without_ calling nfs4_close_done */ 2284 task->tk_action = NULL; 2285 goto out; 2286 } 2287 2288 if (calldata->arg.fmode == 0) { 2289 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2290 if (calldata->roc && 2291 pnfs_roc_drain(inode, &calldata->roc_barrier, task)) 2292 goto out; 2293 } 2294 2295 nfs_fattr_init(calldata->res.fattr); 2296 calldata->timestamp = jiffies; 2297 if (nfs4_setup_sequence(NFS_SERVER(inode), 2298 &calldata->arg.seq_args, 2299 &calldata->res.seq_res, 2300 task) != 0) 2301 nfs_release_seqid(calldata->arg.seqid); 2302 else 2303 rpc_call_start(task); 2304 out: 2305 dprintk("%s: done!\n", __func__); 2306 } 2307 2308 static const struct rpc_call_ops nfs4_close_ops = { 2309 .rpc_call_prepare = nfs4_close_prepare, 2310 .rpc_call_done = nfs4_close_done, 2311 .rpc_release = nfs4_free_closedata, 2312 }; 2313 2314 /* 2315 * It is possible for data to be read/written from a mem-mapped file 2316 * after the sys_close call (which hits the vfs layer as a flush). 2317 * This means that we can't safely call nfsv4 close on a file until 2318 * the inode is cleared. This in turn means that we are not good 2319 * NFSv4 citizens - we do not indicate to the server to update the file's 2320 * share state even when we are done with one of the three share 2321 * stateid's in the inode. 2322 * 2323 * NOTE: Caller must be holding the sp->so_owner semaphore! 2324 */ 2325 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 2326 { 2327 struct nfs_server *server = NFS_SERVER(state->inode); 2328 struct nfs4_closedata *calldata; 2329 struct nfs4_state_owner *sp = state->owner; 2330 struct rpc_task *task; 2331 struct rpc_message msg = { 2332 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 2333 .rpc_cred = state->owner->so_cred, 2334 }; 2335 struct rpc_task_setup task_setup_data = { 2336 .rpc_client = server->client, 2337 .rpc_message = &msg, 2338 .callback_ops = &nfs4_close_ops, 2339 .workqueue = nfsiod_workqueue, 2340 .flags = RPC_TASK_ASYNC, 2341 }; 2342 int status = -ENOMEM; 2343 2344 calldata = kzalloc(sizeof(*calldata), gfp_mask); 2345 if (calldata == NULL) 2346 goto out; 2347 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); 2348 calldata->inode = state->inode; 2349 calldata->state = state; 2350 calldata->arg.fh = NFS_FH(state->inode); 2351 calldata->arg.stateid = &state->open_stateid; 2352 /* Serialization for the sequence id */ 2353 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask); 2354 if (calldata->arg.seqid == NULL) 2355 goto out_free_calldata; 2356 calldata->arg.fmode = 0; 2357 calldata->arg.bitmask = server->cache_consistency_bitmask; 2358 calldata->res.fattr = &calldata->fattr; 2359 calldata->res.seqid = calldata->arg.seqid; 2360 calldata->res.server = server; 2361 calldata->roc = pnfs_roc(state->inode); 2362 nfs_sb_active(calldata->inode->i_sb); 2363 2364 msg.rpc_argp = &calldata->arg; 2365 msg.rpc_resp = &calldata->res; 2366 task_setup_data.callback_data = calldata; 2367 task = rpc_run_task(&task_setup_data); 2368 if (IS_ERR(task)) 2369 return PTR_ERR(task); 2370 status = 0; 2371 if (wait) 2372 status = rpc_wait_for_completion_task(task); 2373 rpc_put_task(task); 2374 return status; 2375 out_free_calldata: 2376 kfree(calldata); 2377 out: 2378 nfs4_put_open_state(state); 2379 nfs4_put_state_owner(sp); 2380 return status; 2381 } 2382 2383 static struct inode * 2384 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr) 2385 { 2386 struct nfs4_state *state; 2387 2388 /* Protect against concurrent sillydeletes */ 2389 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, 2390 ctx->cred, &ctx->mdsthreshold); 2391 if (IS_ERR(state)) 2392 return ERR_CAST(state); 2393 ctx->state = state; 2394 return igrab(state->inode); 2395 } 2396 2397 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 2398 { 2399 if (ctx->state == NULL) 2400 return; 2401 if (is_sync) 2402 nfs4_close_sync(ctx->state, ctx->mode); 2403 else 2404 nfs4_close_state(ctx->state, ctx->mode); 2405 } 2406 2407 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2408 { 2409 struct nfs4_server_caps_arg args = { 2410 .fhandle = fhandle, 2411 }; 2412 struct nfs4_server_caps_res res = {}; 2413 struct rpc_message msg = { 2414 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 2415 .rpc_argp = &args, 2416 .rpc_resp = &res, 2417 }; 2418 int status; 2419 2420 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2421 if (status == 0) { 2422 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 2423 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| 2424 NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 2425 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| 2426 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| 2427 NFS_CAP_CTIME|NFS_CAP_MTIME); 2428 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL) 2429 server->caps |= NFS_CAP_ACLS; 2430 if (res.has_links != 0) 2431 server->caps |= NFS_CAP_HARDLINKS; 2432 if (res.has_symlinks != 0) 2433 server->caps |= NFS_CAP_SYMLINKS; 2434 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) 2435 server->caps |= NFS_CAP_FILEID; 2436 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) 2437 server->caps |= NFS_CAP_MODE; 2438 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) 2439 server->caps |= NFS_CAP_NLINK; 2440 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) 2441 server->caps |= NFS_CAP_OWNER; 2442 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) 2443 server->caps |= NFS_CAP_OWNER_GROUP; 2444 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) 2445 server->caps |= NFS_CAP_ATIME; 2446 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) 2447 server->caps |= NFS_CAP_CTIME; 2448 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) 2449 server->caps |= NFS_CAP_MTIME; 2450 2451 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 2452 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 2453 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 2454 server->acl_bitmask = res.acl_bitmask; 2455 server->fh_expire_type = res.fh_expire_type; 2456 } 2457 2458 return status; 2459 } 2460 2461 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2462 { 2463 struct nfs4_exception exception = { }; 2464 int err; 2465 do { 2466 err = nfs4_handle_exception(server, 2467 _nfs4_server_capabilities(server, fhandle), 2468 &exception); 2469 } while (exception.retry); 2470 return err; 2471 } 2472 2473 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2474 struct nfs_fsinfo *info) 2475 { 2476 struct nfs4_lookup_root_arg args = { 2477 .bitmask = nfs4_fattr_bitmap, 2478 }; 2479 struct nfs4_lookup_res res = { 2480 .server = server, 2481 .fattr = info->fattr, 2482 .fh = fhandle, 2483 }; 2484 struct rpc_message msg = { 2485 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 2486 .rpc_argp = &args, 2487 .rpc_resp = &res, 2488 }; 2489 2490 nfs_fattr_init(info->fattr); 2491 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2492 } 2493 2494 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2495 struct nfs_fsinfo *info) 2496 { 2497 struct nfs4_exception exception = { }; 2498 int err; 2499 do { 2500 err = _nfs4_lookup_root(server, fhandle, info); 2501 switch (err) { 2502 case 0: 2503 case -NFS4ERR_WRONGSEC: 2504 goto out; 2505 default: 2506 err = nfs4_handle_exception(server, err, &exception); 2507 } 2508 } while (exception.retry); 2509 out: 2510 return err; 2511 } 2512 2513 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 2514 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 2515 { 2516 struct rpc_auth *auth; 2517 int ret; 2518 2519 auth = rpcauth_create(flavor, server->client); 2520 if (IS_ERR(auth)) { 2521 ret = -EIO; 2522 goto out; 2523 } 2524 ret = nfs4_lookup_root(server, fhandle, info); 2525 out: 2526 return ret; 2527 } 2528 2529 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 2530 struct nfs_fsinfo *info) 2531 { 2532 int i, len, status = 0; 2533 rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS]; 2534 2535 len = rpcauth_list_flavors(flav_array, ARRAY_SIZE(flav_array)); 2536 BUG_ON(len < 0); 2537 2538 for (i = 0; i < len; i++) { 2539 /* AUTH_UNIX is the default flavor if none was specified, 2540 * thus has already been tried. */ 2541 if (flav_array[i] == RPC_AUTH_UNIX) 2542 continue; 2543 2544 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); 2545 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 2546 continue; 2547 break; 2548 } 2549 /* 2550 * -EACCESS could mean that the user doesn't have correct permissions 2551 * to access the mount. It could also mean that we tried to mount 2552 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 2553 * existing mount programs don't handle -EACCES very well so it should 2554 * be mapped to -EPERM instead. 2555 */ 2556 if (status == -EACCES) 2557 status = -EPERM; 2558 return status; 2559 } 2560 2561 /* 2562 * get the file handle for the "/" directory on the server 2563 */ 2564 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 2565 struct nfs_fsinfo *info) 2566 { 2567 int minor_version = server->nfs_client->cl_minorversion; 2568 int status = nfs4_lookup_root(server, fhandle, info); 2569 if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR)) 2570 /* 2571 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM 2572 * by nfs4_map_errors() as this function exits. 2573 */ 2574 status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info); 2575 if (status == 0) 2576 status = nfs4_server_capabilities(server, fhandle); 2577 if (status == 0) 2578 status = nfs4_do_fsinfo(server, fhandle, info); 2579 return nfs4_map_errors(status); 2580 } 2581 2582 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 2583 struct nfs_fsinfo *info) 2584 { 2585 int error; 2586 struct nfs_fattr *fattr = info->fattr; 2587 2588 error = nfs4_server_capabilities(server, mntfh); 2589 if (error < 0) { 2590 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 2591 return error; 2592 } 2593 2594 error = nfs4_proc_getattr(server, mntfh, fattr); 2595 if (error < 0) { 2596 dprintk("nfs4_get_root: getattr error = %d\n", -error); 2597 return error; 2598 } 2599 2600 if (fattr->valid & NFS_ATTR_FATTR_FSID && 2601 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 2602 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 2603 2604 return error; 2605 } 2606 2607 /* 2608 * Get locations and (maybe) other attributes of a referral. 2609 * Note that we'll actually follow the referral later when 2610 * we detect fsid mismatch in inode revalidation 2611 */ 2612 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 2613 const struct qstr *name, struct nfs_fattr *fattr, 2614 struct nfs_fh *fhandle) 2615 { 2616 int status = -ENOMEM; 2617 struct page *page = NULL; 2618 struct nfs4_fs_locations *locations = NULL; 2619 2620 page = alloc_page(GFP_KERNEL); 2621 if (page == NULL) 2622 goto out; 2623 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 2624 if (locations == NULL) 2625 goto out; 2626 2627 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 2628 if (status != 0) 2629 goto out; 2630 /* Make sure server returned a different fsid for the referral */ 2631 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 2632 dprintk("%s: server did not return a different fsid for" 2633 " a referral at %s\n", __func__, name->name); 2634 status = -EIO; 2635 goto out; 2636 } 2637 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 2638 nfs_fixup_referral_attributes(&locations->fattr); 2639 2640 /* replace the lookup nfs_fattr with the locations nfs_fattr */ 2641 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 2642 memset(fhandle, 0, sizeof(struct nfs_fh)); 2643 out: 2644 if (page) 2645 __free_page(page); 2646 kfree(locations); 2647 return status; 2648 } 2649 2650 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2651 { 2652 struct nfs4_getattr_arg args = { 2653 .fh = fhandle, 2654 .bitmask = server->attr_bitmask, 2655 }; 2656 struct nfs4_getattr_res res = { 2657 .fattr = fattr, 2658 .server = server, 2659 }; 2660 struct rpc_message msg = { 2661 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 2662 .rpc_argp = &args, 2663 .rpc_resp = &res, 2664 }; 2665 2666 nfs_fattr_init(fattr); 2667 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2668 } 2669 2670 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2671 { 2672 struct nfs4_exception exception = { }; 2673 int err; 2674 do { 2675 err = nfs4_handle_exception(server, 2676 _nfs4_proc_getattr(server, fhandle, fattr), 2677 &exception); 2678 } while (exception.retry); 2679 return err; 2680 } 2681 2682 /* 2683 * The file is not closed if it is opened due to the a request to change 2684 * the size of the file. The open call will not be needed once the 2685 * VFS layer lookup-intents are implemented. 2686 * 2687 * Close is called when the inode is destroyed. 2688 * If we haven't opened the file for O_WRONLY, we 2689 * need to in the size_change case to obtain a stateid. 2690 * 2691 * Got race? 2692 * Because OPEN is always done by name in nfsv4, it is 2693 * possible that we opened a different file by the same 2694 * name. We can recognize this race condition, but we 2695 * can't do anything about it besides returning an error. 2696 * 2697 * This will be fixed with VFS changes (lookup-intent). 2698 */ 2699 static int 2700 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 2701 struct iattr *sattr) 2702 { 2703 struct inode *inode = dentry->d_inode; 2704 struct rpc_cred *cred = NULL; 2705 struct nfs4_state *state = NULL; 2706 int status; 2707 2708 if (pnfs_ld_layoutret_on_setattr(inode)) 2709 pnfs_return_layout(inode); 2710 2711 nfs_fattr_init(fattr); 2712 2713 /* Deal with open(O_TRUNC) */ 2714 if (sattr->ia_valid & ATTR_OPEN) 2715 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); 2716 2717 /* Optimization: if the end result is no change, don't RPC */ 2718 if ((sattr->ia_valid & ~(ATTR_FILE)) == 0) 2719 return 0; 2720 2721 /* Search for an existing open(O_WRITE) file */ 2722 if (sattr->ia_valid & ATTR_FILE) { 2723 struct nfs_open_context *ctx; 2724 2725 ctx = nfs_file_open_context(sattr->ia_file); 2726 if (ctx) { 2727 cred = ctx->cred; 2728 state = ctx->state; 2729 } 2730 } 2731 2732 status = nfs4_do_setattr(inode, cred, fattr, sattr, state); 2733 if (status == 0) 2734 nfs_setattr_update_inode(inode, sattr); 2735 return status; 2736 } 2737 2738 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 2739 const struct qstr *name, struct nfs_fh *fhandle, 2740 struct nfs_fattr *fattr) 2741 { 2742 struct nfs_server *server = NFS_SERVER(dir); 2743 int status; 2744 struct nfs4_lookup_arg args = { 2745 .bitmask = server->attr_bitmask, 2746 .dir_fh = NFS_FH(dir), 2747 .name = name, 2748 }; 2749 struct nfs4_lookup_res res = { 2750 .server = server, 2751 .fattr = fattr, 2752 .fh = fhandle, 2753 }; 2754 struct rpc_message msg = { 2755 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 2756 .rpc_argp = &args, 2757 .rpc_resp = &res, 2758 }; 2759 2760 nfs_fattr_init(fattr); 2761 2762 dprintk("NFS call lookup %s\n", name->name); 2763 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); 2764 dprintk("NFS reply lookup: %d\n", status); 2765 return status; 2766 } 2767 2768 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 2769 { 2770 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 2771 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 2772 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 2773 fattr->nlink = 2; 2774 } 2775 2776 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 2777 struct qstr *name, struct nfs_fh *fhandle, 2778 struct nfs_fattr *fattr) 2779 { 2780 struct nfs4_exception exception = { }; 2781 struct rpc_clnt *client = *clnt; 2782 int err; 2783 do { 2784 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr); 2785 switch (err) { 2786 case -NFS4ERR_BADNAME: 2787 err = -ENOENT; 2788 goto out; 2789 case -NFS4ERR_MOVED: 2790 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 2791 goto out; 2792 case -NFS4ERR_WRONGSEC: 2793 err = -EPERM; 2794 if (client != *clnt) 2795 goto out; 2796 2797 client = nfs4_create_sec_client(client, dir, name); 2798 if (IS_ERR(client)) 2799 return PTR_ERR(client); 2800 2801 exception.retry = 1; 2802 break; 2803 default: 2804 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 2805 } 2806 } while (exception.retry); 2807 2808 out: 2809 if (err == 0) 2810 *clnt = client; 2811 else if (client != *clnt) 2812 rpc_shutdown_client(client); 2813 2814 return err; 2815 } 2816 2817 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, 2818 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2819 { 2820 int status; 2821 struct rpc_clnt *client = NFS_CLIENT(dir); 2822 2823 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr); 2824 if (client != NFS_CLIENT(dir)) { 2825 rpc_shutdown_client(client); 2826 nfs_fixup_secinfo_attributes(fattr); 2827 } 2828 return status; 2829 } 2830 2831 struct rpc_clnt * 2832 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, 2833 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2834 { 2835 int status; 2836 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir)); 2837 2838 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr); 2839 if (status < 0) { 2840 rpc_shutdown_client(client); 2841 return ERR_PTR(status); 2842 } 2843 return client; 2844 } 2845 2846 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 2847 { 2848 struct nfs_server *server = NFS_SERVER(inode); 2849 struct nfs4_accessargs args = { 2850 .fh = NFS_FH(inode), 2851 .bitmask = server->cache_consistency_bitmask, 2852 }; 2853 struct nfs4_accessres res = { 2854 .server = server, 2855 }; 2856 struct rpc_message msg = { 2857 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 2858 .rpc_argp = &args, 2859 .rpc_resp = &res, 2860 .rpc_cred = entry->cred, 2861 }; 2862 int mode = entry->mask; 2863 int status; 2864 2865 /* 2866 * Determine which access bits we want to ask for... 2867 */ 2868 if (mode & MAY_READ) 2869 args.access |= NFS4_ACCESS_READ; 2870 if (S_ISDIR(inode->i_mode)) { 2871 if (mode & MAY_WRITE) 2872 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; 2873 if (mode & MAY_EXEC) 2874 args.access |= NFS4_ACCESS_LOOKUP; 2875 } else { 2876 if (mode & MAY_WRITE) 2877 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; 2878 if (mode & MAY_EXEC) 2879 args.access |= NFS4_ACCESS_EXECUTE; 2880 } 2881 2882 res.fattr = nfs_alloc_fattr(); 2883 if (res.fattr == NULL) 2884 return -ENOMEM; 2885 2886 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2887 if (!status) { 2888 nfs_access_set_mask(entry, res.access); 2889 nfs_refresh_inode(inode, res.fattr); 2890 } 2891 nfs_free_fattr(res.fattr); 2892 return status; 2893 } 2894 2895 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 2896 { 2897 struct nfs4_exception exception = { }; 2898 int err; 2899 do { 2900 err = nfs4_handle_exception(NFS_SERVER(inode), 2901 _nfs4_proc_access(inode, entry), 2902 &exception); 2903 } while (exception.retry); 2904 return err; 2905 } 2906 2907 /* 2908 * TODO: For the time being, we don't try to get any attributes 2909 * along with any of the zero-copy operations READ, READDIR, 2910 * READLINK, WRITE. 2911 * 2912 * In the case of the first three, we want to put the GETATTR 2913 * after the read-type operation -- this is because it is hard 2914 * to predict the length of a GETATTR response in v4, and thus 2915 * align the READ data correctly. This means that the GETATTR 2916 * may end up partially falling into the page cache, and we should 2917 * shift it into the 'tail' of the xdr_buf before processing. 2918 * To do this efficiently, we need to know the total length 2919 * of data received, which doesn't seem to be available outside 2920 * of the RPC layer. 2921 * 2922 * In the case of WRITE, we also want to put the GETATTR after 2923 * the operation -- in this case because we want to make sure 2924 * we get the post-operation mtime and size. 2925 * 2926 * Both of these changes to the XDR layer would in fact be quite 2927 * minor, but I decided to leave them for a subsequent patch. 2928 */ 2929 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 2930 unsigned int pgbase, unsigned int pglen) 2931 { 2932 struct nfs4_readlink args = { 2933 .fh = NFS_FH(inode), 2934 .pgbase = pgbase, 2935 .pglen = pglen, 2936 .pages = &page, 2937 }; 2938 struct nfs4_readlink_res res; 2939 struct rpc_message msg = { 2940 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 2941 .rpc_argp = &args, 2942 .rpc_resp = &res, 2943 }; 2944 2945 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 2946 } 2947 2948 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 2949 unsigned int pgbase, unsigned int pglen) 2950 { 2951 struct nfs4_exception exception = { }; 2952 int err; 2953 do { 2954 err = nfs4_handle_exception(NFS_SERVER(inode), 2955 _nfs4_proc_readlink(inode, page, pgbase, pglen), 2956 &exception); 2957 } while (exception.retry); 2958 return err; 2959 } 2960 2961 /* 2962 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 2963 */ 2964 static int 2965 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 2966 int flags) 2967 { 2968 struct nfs_open_context *ctx; 2969 struct nfs4_state *state; 2970 int status = 0; 2971 2972 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 2973 if (IS_ERR(ctx)) 2974 return PTR_ERR(ctx); 2975 2976 sattr->ia_mode &= ~current_umask(); 2977 state = nfs4_do_open(dir, dentry, ctx->mode, 2978 flags, sattr, ctx->cred, 2979 &ctx->mdsthreshold); 2980 d_drop(dentry); 2981 if (IS_ERR(state)) { 2982 status = PTR_ERR(state); 2983 goto out; 2984 } 2985 d_add(dentry, igrab(state->inode)); 2986 nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); 2987 ctx->state = state; 2988 out: 2989 put_nfs_open_context(ctx); 2990 return status; 2991 } 2992 2993 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) 2994 { 2995 struct nfs_server *server = NFS_SERVER(dir); 2996 struct nfs_removeargs args = { 2997 .fh = NFS_FH(dir), 2998 .name = *name, 2999 }; 3000 struct nfs_removeres res = { 3001 .server = server, 3002 }; 3003 struct rpc_message msg = { 3004 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 3005 .rpc_argp = &args, 3006 .rpc_resp = &res, 3007 }; 3008 int status; 3009 3010 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 3011 if (status == 0) 3012 update_changeattr(dir, &res.cinfo); 3013 return status; 3014 } 3015 3016 static int nfs4_proc_remove(struct inode *dir, struct qstr *name) 3017 { 3018 struct nfs4_exception exception = { }; 3019 int err; 3020 do { 3021 err = nfs4_handle_exception(NFS_SERVER(dir), 3022 _nfs4_proc_remove(dir, name), 3023 &exception); 3024 } while (exception.retry); 3025 return err; 3026 } 3027 3028 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) 3029 { 3030 struct nfs_server *server = NFS_SERVER(dir); 3031 struct nfs_removeargs *args = msg->rpc_argp; 3032 struct nfs_removeres *res = msg->rpc_resp; 3033 3034 res->server = server; 3035 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 3036 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1); 3037 } 3038 3039 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 3040 { 3041 if (nfs4_setup_sequence(NFS_SERVER(data->dir), 3042 &data->args.seq_args, 3043 &data->res.seq_res, 3044 task)) 3045 return; 3046 rpc_call_start(task); 3047 } 3048 3049 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 3050 { 3051 struct nfs_removeres *res = task->tk_msg.rpc_resp; 3052 3053 if (!nfs4_sequence_done(task, &res->seq_res)) 3054 return 0; 3055 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 3056 return 0; 3057 update_changeattr(dir, &res->cinfo); 3058 return 1; 3059 } 3060 3061 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) 3062 { 3063 struct nfs_server *server = NFS_SERVER(dir); 3064 struct nfs_renameargs *arg = msg->rpc_argp; 3065 struct nfs_renameres *res = msg->rpc_resp; 3066 3067 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 3068 res->server = server; 3069 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1); 3070 } 3071 3072 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 3073 { 3074 if (nfs4_setup_sequence(NFS_SERVER(data->old_dir), 3075 &data->args.seq_args, 3076 &data->res.seq_res, 3077 task)) 3078 return; 3079 rpc_call_start(task); 3080 } 3081 3082 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 3083 struct inode *new_dir) 3084 { 3085 struct nfs_renameres *res = task->tk_msg.rpc_resp; 3086 3087 if (!nfs4_sequence_done(task, &res->seq_res)) 3088 return 0; 3089 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 3090 return 0; 3091 3092 update_changeattr(old_dir, &res->old_cinfo); 3093 update_changeattr(new_dir, &res->new_cinfo); 3094 return 1; 3095 } 3096 3097 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, 3098 struct inode *new_dir, struct qstr *new_name) 3099 { 3100 struct nfs_server *server = NFS_SERVER(old_dir); 3101 struct nfs_renameargs arg = { 3102 .old_dir = NFS_FH(old_dir), 3103 .new_dir = NFS_FH(new_dir), 3104 .old_name = old_name, 3105 .new_name = new_name, 3106 }; 3107 struct nfs_renameres res = { 3108 .server = server, 3109 }; 3110 struct rpc_message msg = { 3111 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME], 3112 .rpc_argp = &arg, 3113 .rpc_resp = &res, 3114 }; 3115 int status = -ENOMEM; 3116 3117 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3118 if (!status) { 3119 update_changeattr(old_dir, &res.old_cinfo); 3120 update_changeattr(new_dir, &res.new_cinfo); 3121 } 3122 return status; 3123 } 3124 3125 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, 3126 struct inode *new_dir, struct qstr *new_name) 3127 { 3128 struct nfs4_exception exception = { }; 3129 int err; 3130 do { 3131 err = nfs4_handle_exception(NFS_SERVER(old_dir), 3132 _nfs4_proc_rename(old_dir, old_name, 3133 new_dir, new_name), 3134 &exception); 3135 } while (exception.retry); 3136 return err; 3137 } 3138 3139 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3140 { 3141 struct nfs_server *server = NFS_SERVER(inode); 3142 struct nfs4_link_arg arg = { 3143 .fh = NFS_FH(inode), 3144 .dir_fh = NFS_FH(dir), 3145 .name = name, 3146 .bitmask = server->attr_bitmask, 3147 }; 3148 struct nfs4_link_res res = { 3149 .server = server, 3150 }; 3151 struct rpc_message msg = { 3152 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 3153 .rpc_argp = &arg, 3154 .rpc_resp = &res, 3155 }; 3156 int status = -ENOMEM; 3157 3158 res.fattr = nfs_alloc_fattr(); 3159 if (res.fattr == NULL) 3160 goto out; 3161 3162 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3163 if (!status) { 3164 update_changeattr(dir, &res.cinfo); 3165 nfs_post_op_update_inode(inode, res.fattr); 3166 } 3167 out: 3168 nfs_free_fattr(res.fattr); 3169 return status; 3170 } 3171 3172 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3173 { 3174 struct nfs4_exception exception = { }; 3175 int err; 3176 do { 3177 err = nfs4_handle_exception(NFS_SERVER(inode), 3178 _nfs4_proc_link(inode, dir, name), 3179 &exception); 3180 } while (exception.retry); 3181 return err; 3182 } 3183 3184 struct nfs4_createdata { 3185 struct rpc_message msg; 3186 struct nfs4_create_arg arg; 3187 struct nfs4_create_res res; 3188 struct nfs_fh fh; 3189 struct nfs_fattr fattr; 3190 }; 3191 3192 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 3193 struct qstr *name, struct iattr *sattr, u32 ftype) 3194 { 3195 struct nfs4_createdata *data; 3196 3197 data = kzalloc(sizeof(*data), GFP_KERNEL); 3198 if (data != NULL) { 3199 struct nfs_server *server = NFS_SERVER(dir); 3200 3201 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 3202 data->msg.rpc_argp = &data->arg; 3203 data->msg.rpc_resp = &data->res; 3204 data->arg.dir_fh = NFS_FH(dir); 3205 data->arg.server = server; 3206 data->arg.name = name; 3207 data->arg.attrs = sattr; 3208 data->arg.ftype = ftype; 3209 data->arg.bitmask = server->attr_bitmask; 3210 data->res.server = server; 3211 data->res.fh = &data->fh; 3212 data->res.fattr = &data->fattr; 3213 nfs_fattr_init(data->res.fattr); 3214 } 3215 return data; 3216 } 3217 3218 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 3219 { 3220 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 3221 &data->arg.seq_args, &data->res.seq_res, 1); 3222 if (status == 0) { 3223 update_changeattr(dir, &data->res.dir_cinfo); 3224 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 3225 } 3226 return status; 3227 } 3228 3229 static void nfs4_free_createdata(struct nfs4_createdata *data) 3230 { 3231 kfree(data); 3232 } 3233 3234 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3235 struct page *page, unsigned int len, struct iattr *sattr) 3236 { 3237 struct nfs4_createdata *data; 3238 int status = -ENAMETOOLONG; 3239 3240 if (len > NFS4_MAXPATHLEN) 3241 goto out; 3242 3243 status = -ENOMEM; 3244 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 3245 if (data == NULL) 3246 goto out; 3247 3248 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 3249 data->arg.u.symlink.pages = &page; 3250 data->arg.u.symlink.len = len; 3251 3252 status = nfs4_do_create(dir, dentry, data); 3253 3254 nfs4_free_createdata(data); 3255 out: 3256 return status; 3257 } 3258 3259 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3260 struct page *page, unsigned int len, struct iattr *sattr) 3261 { 3262 struct nfs4_exception exception = { }; 3263 int err; 3264 do { 3265 err = nfs4_handle_exception(NFS_SERVER(dir), 3266 _nfs4_proc_symlink(dir, dentry, page, 3267 len, sattr), 3268 &exception); 3269 } while (exception.retry); 3270 return err; 3271 } 3272 3273 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3274 struct iattr *sattr) 3275 { 3276 struct nfs4_createdata *data; 3277 int status = -ENOMEM; 3278 3279 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 3280 if (data == NULL) 3281 goto out; 3282 3283 status = nfs4_do_create(dir, dentry, data); 3284 3285 nfs4_free_createdata(data); 3286 out: 3287 return status; 3288 } 3289 3290 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3291 struct iattr *sattr) 3292 { 3293 struct nfs4_exception exception = { }; 3294 int err; 3295 3296 sattr->ia_mode &= ~current_umask(); 3297 do { 3298 err = nfs4_handle_exception(NFS_SERVER(dir), 3299 _nfs4_proc_mkdir(dir, dentry, sattr), 3300 &exception); 3301 } while (exception.retry); 3302 return err; 3303 } 3304 3305 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3306 u64 cookie, struct page **pages, unsigned int count, int plus) 3307 { 3308 struct inode *dir = dentry->d_inode; 3309 struct nfs4_readdir_arg args = { 3310 .fh = NFS_FH(dir), 3311 .pages = pages, 3312 .pgbase = 0, 3313 .count = count, 3314 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask, 3315 .plus = plus, 3316 }; 3317 struct nfs4_readdir_res res; 3318 struct rpc_message msg = { 3319 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 3320 .rpc_argp = &args, 3321 .rpc_resp = &res, 3322 .rpc_cred = cred, 3323 }; 3324 int status; 3325 3326 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__, 3327 dentry->d_parent->d_name.name, 3328 dentry->d_name.name, 3329 (unsigned long long)cookie); 3330 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); 3331 res.pgbase = args.pgbase; 3332 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 3333 if (status >= 0) { 3334 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); 3335 status += args.pgbase; 3336 } 3337 3338 nfs_invalidate_atime(dir); 3339 3340 dprintk("%s: returns %d\n", __func__, status); 3341 return status; 3342 } 3343 3344 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3345 u64 cookie, struct page **pages, unsigned int count, int plus) 3346 { 3347 struct nfs4_exception exception = { }; 3348 int err; 3349 do { 3350 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), 3351 _nfs4_proc_readdir(dentry, cred, cookie, 3352 pages, count, plus), 3353 &exception); 3354 } while (exception.retry); 3355 return err; 3356 } 3357 3358 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3359 struct iattr *sattr, dev_t rdev) 3360 { 3361 struct nfs4_createdata *data; 3362 int mode = sattr->ia_mode; 3363 int status = -ENOMEM; 3364 3365 BUG_ON(!(sattr->ia_valid & ATTR_MODE)); 3366 BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode)); 3367 3368 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 3369 if (data == NULL) 3370 goto out; 3371 3372 if (S_ISFIFO(mode)) 3373 data->arg.ftype = NF4FIFO; 3374 else if (S_ISBLK(mode)) { 3375 data->arg.ftype = NF4BLK; 3376 data->arg.u.device.specdata1 = MAJOR(rdev); 3377 data->arg.u.device.specdata2 = MINOR(rdev); 3378 } 3379 else if (S_ISCHR(mode)) { 3380 data->arg.ftype = NF4CHR; 3381 data->arg.u.device.specdata1 = MAJOR(rdev); 3382 data->arg.u.device.specdata2 = MINOR(rdev); 3383 } 3384 3385 status = nfs4_do_create(dir, dentry, data); 3386 3387 nfs4_free_createdata(data); 3388 out: 3389 return status; 3390 } 3391 3392 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3393 struct iattr *sattr, dev_t rdev) 3394 { 3395 struct nfs4_exception exception = { }; 3396 int err; 3397 3398 sattr->ia_mode &= ~current_umask(); 3399 do { 3400 err = nfs4_handle_exception(NFS_SERVER(dir), 3401 _nfs4_proc_mknod(dir, dentry, sattr, rdev), 3402 &exception); 3403 } while (exception.retry); 3404 return err; 3405 } 3406 3407 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 3408 struct nfs_fsstat *fsstat) 3409 { 3410 struct nfs4_statfs_arg args = { 3411 .fh = fhandle, 3412 .bitmask = server->attr_bitmask, 3413 }; 3414 struct nfs4_statfs_res res = { 3415 .fsstat = fsstat, 3416 }; 3417 struct rpc_message msg = { 3418 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 3419 .rpc_argp = &args, 3420 .rpc_resp = &res, 3421 }; 3422 3423 nfs_fattr_init(fsstat->fattr); 3424 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3425 } 3426 3427 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 3428 { 3429 struct nfs4_exception exception = { }; 3430 int err; 3431 do { 3432 err = nfs4_handle_exception(server, 3433 _nfs4_proc_statfs(server, fhandle, fsstat), 3434 &exception); 3435 } while (exception.retry); 3436 return err; 3437 } 3438 3439 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 3440 struct nfs_fsinfo *fsinfo) 3441 { 3442 struct nfs4_fsinfo_arg args = { 3443 .fh = fhandle, 3444 .bitmask = server->attr_bitmask, 3445 }; 3446 struct nfs4_fsinfo_res res = { 3447 .fsinfo = fsinfo, 3448 }; 3449 struct rpc_message msg = { 3450 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 3451 .rpc_argp = &args, 3452 .rpc_resp = &res, 3453 }; 3454 3455 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3456 } 3457 3458 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 3459 { 3460 struct nfs4_exception exception = { }; 3461 int err; 3462 3463 do { 3464 err = nfs4_handle_exception(server, 3465 _nfs4_do_fsinfo(server, fhandle, fsinfo), 3466 &exception); 3467 } while (exception.retry); 3468 return err; 3469 } 3470 3471 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 3472 { 3473 int error; 3474 3475 nfs_fattr_init(fsinfo->fattr); 3476 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 3477 if (error == 0) { 3478 /* block layout checks this! */ 3479 server->pnfs_blksize = fsinfo->blksize; 3480 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype); 3481 } 3482 3483 return error; 3484 } 3485 3486 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 3487 struct nfs_pathconf *pathconf) 3488 { 3489 struct nfs4_pathconf_arg args = { 3490 .fh = fhandle, 3491 .bitmask = server->attr_bitmask, 3492 }; 3493 struct nfs4_pathconf_res res = { 3494 .pathconf = pathconf, 3495 }; 3496 struct rpc_message msg = { 3497 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 3498 .rpc_argp = &args, 3499 .rpc_resp = &res, 3500 }; 3501 3502 /* None of the pathconf attributes are mandatory to implement */ 3503 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 3504 memset(pathconf, 0, sizeof(*pathconf)); 3505 return 0; 3506 } 3507 3508 nfs_fattr_init(pathconf->fattr); 3509 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3510 } 3511 3512 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 3513 struct nfs_pathconf *pathconf) 3514 { 3515 struct nfs4_exception exception = { }; 3516 int err; 3517 3518 do { 3519 err = nfs4_handle_exception(server, 3520 _nfs4_proc_pathconf(server, fhandle, pathconf), 3521 &exception); 3522 } while (exception.retry); 3523 return err; 3524 } 3525 3526 void __nfs4_read_done_cb(struct nfs_read_data *data) 3527 { 3528 nfs_invalidate_atime(data->header->inode); 3529 } 3530 3531 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data) 3532 { 3533 struct nfs_server *server = NFS_SERVER(data->header->inode); 3534 3535 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { 3536 rpc_restart_call_prepare(task); 3537 return -EAGAIN; 3538 } 3539 3540 __nfs4_read_done_cb(data); 3541 if (task->tk_status > 0) 3542 renew_lease(server, data->timestamp); 3543 return 0; 3544 } 3545 3546 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data) 3547 { 3548 3549 dprintk("--> %s\n", __func__); 3550 3551 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3552 return -EAGAIN; 3553 3554 return data->read_done_cb ? data->read_done_cb(task, data) : 3555 nfs4_read_done_cb(task, data); 3556 } 3557 3558 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg) 3559 { 3560 data->timestamp = jiffies; 3561 data->read_done_cb = nfs4_read_done_cb; 3562 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 3563 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 3564 } 3565 3566 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data) 3567 { 3568 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode), 3569 &data->args.seq_args, 3570 &data->res.seq_res, 3571 task)) 3572 return; 3573 rpc_call_start(task); 3574 } 3575 3576 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data) 3577 { 3578 struct inode *inode = data->header->inode; 3579 3580 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { 3581 rpc_restart_call_prepare(task); 3582 return -EAGAIN; 3583 } 3584 if (task->tk_status >= 0) { 3585 renew_lease(NFS_SERVER(inode), data->timestamp); 3586 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 3587 } 3588 return 0; 3589 } 3590 3591 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data) 3592 { 3593 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3594 return -EAGAIN; 3595 return data->write_done_cb ? data->write_done_cb(task, data) : 3596 nfs4_write_done_cb(task, data); 3597 } 3598 3599 static 3600 bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data) 3601 { 3602 const struct nfs_pgio_header *hdr = data->header; 3603 3604 /* Don't request attributes for pNFS or O_DIRECT writes */ 3605 if (data->ds_clp != NULL || hdr->dreq != NULL) 3606 return false; 3607 /* Otherwise, request attributes if and only if we don't hold 3608 * a delegation 3609 */ 3610 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; 3611 } 3612 3613 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg) 3614 { 3615 struct nfs_server *server = NFS_SERVER(data->header->inode); 3616 3617 if (!nfs4_write_need_cache_consistency_data(data)) { 3618 data->args.bitmask = NULL; 3619 data->res.fattr = NULL; 3620 } else 3621 data->args.bitmask = server->cache_consistency_bitmask; 3622 3623 if (!data->write_done_cb) 3624 data->write_done_cb = nfs4_write_done_cb; 3625 data->res.server = server; 3626 data->timestamp = jiffies; 3627 3628 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 3629 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 3630 } 3631 3632 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data) 3633 { 3634 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode), 3635 &data->args.seq_args, 3636 &data->res.seq_res, 3637 task)) 3638 return; 3639 rpc_call_start(task); 3640 } 3641 3642 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 3643 { 3644 if (nfs4_setup_sequence(NFS_SERVER(data->inode), 3645 &data->args.seq_args, 3646 &data->res.seq_res, 3647 task)) 3648 return; 3649 rpc_call_start(task); 3650 } 3651 3652 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 3653 { 3654 struct inode *inode = data->inode; 3655 3656 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { 3657 rpc_restart_call_prepare(task); 3658 return -EAGAIN; 3659 } 3660 return 0; 3661 } 3662 3663 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 3664 { 3665 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3666 return -EAGAIN; 3667 return data->commit_done_cb(task, data); 3668 } 3669 3670 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) 3671 { 3672 struct nfs_server *server = NFS_SERVER(data->inode); 3673 3674 if (data->commit_done_cb == NULL) 3675 data->commit_done_cb = nfs4_commit_done_cb; 3676 data->res.server = server; 3677 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 3678 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 3679 } 3680 3681 struct nfs4_renewdata { 3682 struct nfs_client *client; 3683 unsigned long timestamp; 3684 }; 3685 3686 /* 3687 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 3688 * standalone procedure for queueing an asynchronous RENEW. 3689 */ 3690 static void nfs4_renew_release(void *calldata) 3691 { 3692 struct nfs4_renewdata *data = calldata; 3693 struct nfs_client *clp = data->client; 3694 3695 if (atomic_read(&clp->cl_count) > 1) 3696 nfs4_schedule_state_renewal(clp); 3697 nfs_put_client(clp); 3698 kfree(data); 3699 } 3700 3701 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 3702 { 3703 struct nfs4_renewdata *data = calldata; 3704 struct nfs_client *clp = data->client; 3705 unsigned long timestamp = data->timestamp; 3706 3707 if (task->tk_status < 0) { 3708 /* Unless we're shutting down, schedule state recovery! */ 3709 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 3710 return; 3711 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 3712 nfs4_schedule_lease_recovery(clp); 3713 return; 3714 } 3715 nfs4_schedule_path_down_recovery(clp); 3716 } 3717 do_renew_lease(clp, timestamp); 3718 } 3719 3720 static const struct rpc_call_ops nfs4_renew_ops = { 3721 .rpc_call_done = nfs4_renew_done, 3722 .rpc_release = nfs4_renew_release, 3723 }; 3724 3725 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 3726 { 3727 struct rpc_message msg = { 3728 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 3729 .rpc_argp = clp, 3730 .rpc_cred = cred, 3731 }; 3732 struct nfs4_renewdata *data; 3733 3734 if (renew_flags == 0) 3735 return 0; 3736 if (!atomic_inc_not_zero(&clp->cl_count)) 3737 return -EIO; 3738 data = kmalloc(sizeof(*data), GFP_NOFS); 3739 if (data == NULL) 3740 return -ENOMEM; 3741 data->client = clp; 3742 data->timestamp = jiffies; 3743 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, 3744 &nfs4_renew_ops, data); 3745 } 3746 3747 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 3748 { 3749 struct rpc_message msg = { 3750 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 3751 .rpc_argp = clp, 3752 .rpc_cred = cred, 3753 }; 3754 unsigned long now = jiffies; 3755 int status; 3756 3757 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); 3758 if (status < 0) 3759 return status; 3760 do_renew_lease(clp, now); 3761 return 0; 3762 } 3763 3764 static inline int nfs4_server_supports_acls(struct nfs_server *server) 3765 { 3766 return (server->caps & NFS_CAP_ACLS) 3767 && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 3768 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL); 3769 } 3770 3771 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 3772 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 3773 * the stack. 3774 */ 3775 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 3776 3777 static int buf_to_pages_noslab(const void *buf, size_t buflen, 3778 struct page **pages, unsigned int *pgbase) 3779 { 3780 struct page *newpage, **spages; 3781 int rc = 0; 3782 size_t len; 3783 spages = pages; 3784 3785 do { 3786 len = min_t(size_t, PAGE_SIZE, buflen); 3787 newpage = alloc_page(GFP_KERNEL); 3788 3789 if (newpage == NULL) 3790 goto unwind; 3791 memcpy(page_address(newpage), buf, len); 3792 buf += len; 3793 buflen -= len; 3794 *pages++ = newpage; 3795 rc++; 3796 } while (buflen != 0); 3797 3798 return rc; 3799 3800 unwind: 3801 for(; rc > 0; rc--) 3802 __free_page(spages[rc-1]); 3803 return -ENOMEM; 3804 } 3805 3806 struct nfs4_cached_acl { 3807 int cached; 3808 size_t len; 3809 char data[0]; 3810 }; 3811 3812 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 3813 { 3814 struct nfs_inode *nfsi = NFS_I(inode); 3815 3816 spin_lock(&inode->i_lock); 3817 kfree(nfsi->nfs4_acl); 3818 nfsi->nfs4_acl = acl; 3819 spin_unlock(&inode->i_lock); 3820 } 3821 3822 static void nfs4_zap_acl_attr(struct inode *inode) 3823 { 3824 nfs4_set_cached_acl(inode, NULL); 3825 } 3826 3827 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 3828 { 3829 struct nfs_inode *nfsi = NFS_I(inode); 3830 struct nfs4_cached_acl *acl; 3831 int ret = -ENOENT; 3832 3833 spin_lock(&inode->i_lock); 3834 acl = nfsi->nfs4_acl; 3835 if (acl == NULL) 3836 goto out; 3837 if (buf == NULL) /* user is just asking for length */ 3838 goto out_len; 3839 if (acl->cached == 0) 3840 goto out; 3841 ret = -ERANGE; /* see getxattr(2) man page */ 3842 if (acl->len > buflen) 3843 goto out; 3844 memcpy(buf, acl->data, acl->len); 3845 out_len: 3846 ret = acl->len; 3847 out: 3848 spin_unlock(&inode->i_lock); 3849 return ret; 3850 } 3851 3852 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 3853 { 3854 struct nfs4_cached_acl *acl; 3855 size_t buflen = sizeof(*acl) + acl_len; 3856 3857 if (buflen <= PAGE_SIZE) { 3858 acl = kmalloc(buflen, GFP_KERNEL); 3859 if (acl == NULL) 3860 goto out; 3861 acl->cached = 1; 3862 _copy_from_pages(acl->data, pages, pgbase, acl_len); 3863 } else { 3864 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 3865 if (acl == NULL) 3866 goto out; 3867 acl->cached = 0; 3868 } 3869 acl->len = acl_len; 3870 out: 3871 nfs4_set_cached_acl(inode, acl); 3872 } 3873 3874 /* 3875 * The getxattr API returns the required buffer length when called with a 3876 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 3877 * the required buf. On a NULL buf, we send a page of data to the server 3878 * guessing that the ACL request can be serviced by a page. If so, we cache 3879 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 3880 * the cache. If not so, we throw away the page, and cache the required 3881 * length. The next getxattr call will then produce another round trip to 3882 * the server, this time with the input buf of the required size. 3883 */ 3884 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 3885 { 3886 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; 3887 struct nfs_getaclargs args = { 3888 .fh = NFS_FH(inode), 3889 .acl_pages = pages, 3890 .acl_len = buflen, 3891 }; 3892 struct nfs_getaclres res = { 3893 .acl_len = buflen, 3894 }; 3895 struct rpc_message msg = { 3896 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 3897 .rpc_argp = &args, 3898 .rpc_resp = &res, 3899 }; 3900 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 3901 int ret = -ENOMEM, i; 3902 3903 /* As long as we're doing a round trip to the server anyway, 3904 * let's be prepared for a page of acl data. */ 3905 if (npages == 0) 3906 npages = 1; 3907 if (npages > ARRAY_SIZE(pages)) 3908 return -ERANGE; 3909 3910 for (i = 0; i < npages; i++) { 3911 pages[i] = alloc_page(GFP_KERNEL); 3912 if (!pages[i]) 3913 goto out_free; 3914 } 3915 3916 /* for decoding across pages */ 3917 res.acl_scratch = alloc_page(GFP_KERNEL); 3918 if (!res.acl_scratch) 3919 goto out_free; 3920 3921 args.acl_len = npages * PAGE_SIZE; 3922 args.acl_pgbase = 0; 3923 3924 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 3925 __func__, buf, buflen, npages, args.acl_len); 3926 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 3927 &msg, &args.seq_args, &res.seq_res, 0); 3928 if (ret) 3929 goto out_free; 3930 3931 /* Handle the case where the passed-in buffer is too short */ 3932 if (res.acl_flags & NFS4_ACL_TRUNC) { 3933 /* Did the user only issue a request for the acl length? */ 3934 if (buf == NULL) 3935 goto out_ok; 3936 ret = -ERANGE; 3937 goto out_free; 3938 } 3939 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); 3940 if (buf) 3941 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 3942 out_ok: 3943 ret = res.acl_len; 3944 out_free: 3945 for (i = 0; i < npages; i++) 3946 if (pages[i]) 3947 __free_page(pages[i]); 3948 if (res.acl_scratch) 3949 __free_page(res.acl_scratch); 3950 return ret; 3951 } 3952 3953 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 3954 { 3955 struct nfs4_exception exception = { }; 3956 ssize_t ret; 3957 do { 3958 ret = __nfs4_get_acl_uncached(inode, buf, buflen); 3959 if (ret >= 0) 3960 break; 3961 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 3962 } while (exception.retry); 3963 return ret; 3964 } 3965 3966 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 3967 { 3968 struct nfs_server *server = NFS_SERVER(inode); 3969 int ret; 3970 3971 if (!nfs4_server_supports_acls(server)) 3972 return -EOPNOTSUPP; 3973 ret = nfs_revalidate_inode(server, inode); 3974 if (ret < 0) 3975 return ret; 3976 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 3977 nfs_zap_acl_cache(inode); 3978 ret = nfs4_read_cached_acl(inode, buf, buflen); 3979 if (ret != -ENOENT) 3980 /* -ENOENT is returned if there is no ACL or if there is an ACL 3981 * but no cached acl data, just the acl length */ 3982 return ret; 3983 return nfs4_get_acl_uncached(inode, buf, buflen); 3984 } 3985 3986 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 3987 { 3988 struct nfs_server *server = NFS_SERVER(inode); 3989 struct page *pages[NFS4ACL_MAXPAGES]; 3990 struct nfs_setaclargs arg = { 3991 .fh = NFS_FH(inode), 3992 .acl_pages = pages, 3993 .acl_len = buflen, 3994 }; 3995 struct nfs_setaclres res; 3996 struct rpc_message msg = { 3997 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 3998 .rpc_argp = &arg, 3999 .rpc_resp = &res, 4000 }; 4001 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 4002 int ret, i; 4003 4004 if (!nfs4_server_supports_acls(server)) 4005 return -EOPNOTSUPP; 4006 if (npages > ARRAY_SIZE(pages)) 4007 return -ERANGE; 4008 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 4009 if (i < 0) 4010 return i; 4011 nfs4_inode_return_delegation(inode); 4012 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4013 4014 /* 4015 * Free each page after tx, so the only ref left is 4016 * held by the network stack 4017 */ 4018 for (; i > 0; i--) 4019 put_page(pages[i-1]); 4020 4021 /* 4022 * Acl update can result in inode attribute update. 4023 * so mark the attribute cache invalid. 4024 */ 4025 spin_lock(&inode->i_lock); 4026 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 4027 spin_unlock(&inode->i_lock); 4028 nfs_access_zap_cache(inode); 4029 nfs_zap_acl_cache(inode); 4030 return ret; 4031 } 4032 4033 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 4034 { 4035 struct nfs4_exception exception = { }; 4036 int err; 4037 do { 4038 err = nfs4_handle_exception(NFS_SERVER(inode), 4039 __nfs4_proc_set_acl(inode, buf, buflen), 4040 &exception); 4041 } while (exception.retry); 4042 return err; 4043 } 4044 4045 static int 4046 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state) 4047 { 4048 struct nfs_client *clp = server->nfs_client; 4049 4050 if (task->tk_status >= 0) 4051 return 0; 4052 switch(task->tk_status) { 4053 case -NFS4ERR_DELEG_REVOKED: 4054 case -NFS4ERR_ADMIN_REVOKED: 4055 case -NFS4ERR_BAD_STATEID: 4056 if (state == NULL) 4057 break; 4058 nfs_remove_bad_delegation(state->inode); 4059 case -NFS4ERR_OPENMODE: 4060 if (state == NULL) 4061 break; 4062 nfs4_schedule_stateid_recovery(server, state); 4063 goto wait_on_recovery; 4064 case -NFS4ERR_EXPIRED: 4065 if (state != NULL) 4066 nfs4_schedule_stateid_recovery(server, state); 4067 case -NFS4ERR_STALE_STATEID: 4068 case -NFS4ERR_STALE_CLIENTID: 4069 nfs4_schedule_lease_recovery(clp); 4070 goto wait_on_recovery; 4071 #if defined(CONFIG_NFS_V4_1) 4072 case -NFS4ERR_BADSESSION: 4073 case -NFS4ERR_BADSLOT: 4074 case -NFS4ERR_BAD_HIGH_SLOT: 4075 case -NFS4ERR_DEADSESSION: 4076 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 4077 case -NFS4ERR_SEQ_FALSE_RETRY: 4078 case -NFS4ERR_SEQ_MISORDERED: 4079 dprintk("%s ERROR %d, Reset session\n", __func__, 4080 task->tk_status); 4081 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 4082 task->tk_status = 0; 4083 return -EAGAIN; 4084 #endif /* CONFIG_NFS_V4_1 */ 4085 case -NFS4ERR_DELAY: 4086 nfs_inc_server_stats(server, NFSIOS_DELAY); 4087 case -NFS4ERR_GRACE: 4088 case -EKEYEXPIRED: 4089 rpc_delay(task, NFS4_POLL_RETRY_MAX); 4090 task->tk_status = 0; 4091 return -EAGAIN; 4092 case -NFS4ERR_RETRY_UNCACHED_REP: 4093 case -NFS4ERR_OLD_STATEID: 4094 task->tk_status = 0; 4095 return -EAGAIN; 4096 } 4097 task->tk_status = nfs4_map_errors(task->tk_status); 4098 return 0; 4099 wait_on_recovery: 4100 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 4101 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 4102 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 4103 task->tk_status = 0; 4104 return -EAGAIN; 4105 } 4106 4107 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 4108 nfs4_verifier *bootverf) 4109 { 4110 __be32 verf[2]; 4111 4112 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 4113 /* An impossible timestamp guarantees this value 4114 * will never match a generated boot time. */ 4115 verf[0] = 0; 4116 verf[1] = (__be32)(NSEC_PER_SEC + 1); 4117 } else { 4118 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 4119 verf[0] = (__be32)nn->boot_time.tv_sec; 4120 verf[1] = (__be32)nn->boot_time.tv_nsec; 4121 } 4122 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 4123 } 4124 4125 static unsigned int 4126 nfs4_init_nonuniform_client_string(const struct nfs_client *clp, 4127 char *buf, size_t len) 4128 { 4129 unsigned int result; 4130 4131 rcu_read_lock(); 4132 result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s", 4133 clp->cl_ipaddr, 4134 rpc_peeraddr2str(clp->cl_rpcclient, 4135 RPC_DISPLAY_ADDR), 4136 rpc_peeraddr2str(clp->cl_rpcclient, 4137 RPC_DISPLAY_PROTO)); 4138 rcu_read_unlock(); 4139 return result; 4140 } 4141 4142 static unsigned int 4143 nfs4_init_uniform_client_string(const struct nfs_client *clp, 4144 char *buf, size_t len) 4145 { 4146 char *nodename = clp->cl_rpcclient->cl_nodename; 4147 4148 if (nfs4_client_id_uniquifier[0] != '\0') 4149 nodename = nfs4_client_id_uniquifier; 4150 return scnprintf(buf, len, "Linux NFSv%u.%u %s", 4151 clp->rpc_ops->version, clp->cl_minorversion, 4152 nodename); 4153 } 4154 4155 /** 4156 * nfs4_proc_setclientid - Negotiate client ID 4157 * @clp: state data structure 4158 * @program: RPC program for NFSv4 callback service 4159 * @port: IP port number for NFS4 callback service 4160 * @cred: RPC credential to use for this call 4161 * @res: where to place the result 4162 * 4163 * Returns zero, a negative errno, or a negative NFS4ERR status code. 4164 */ 4165 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 4166 unsigned short port, struct rpc_cred *cred, 4167 struct nfs4_setclientid_res *res) 4168 { 4169 nfs4_verifier sc_verifier; 4170 struct nfs4_setclientid setclientid = { 4171 .sc_verifier = &sc_verifier, 4172 .sc_prog = program, 4173 .sc_cb_ident = clp->cl_cb_ident, 4174 }; 4175 struct rpc_message msg = { 4176 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 4177 .rpc_argp = &setclientid, 4178 .rpc_resp = res, 4179 .rpc_cred = cred, 4180 }; 4181 int status; 4182 4183 /* nfs_client_id4 */ 4184 nfs4_init_boot_verifier(clp, &sc_verifier); 4185 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 4186 setclientid.sc_name_len = 4187 nfs4_init_uniform_client_string(clp, 4188 setclientid.sc_name, 4189 sizeof(setclientid.sc_name)); 4190 else 4191 setclientid.sc_name_len = 4192 nfs4_init_nonuniform_client_string(clp, 4193 setclientid.sc_name, 4194 sizeof(setclientid.sc_name)); 4195 /* cb_client4 */ 4196 rcu_read_lock(); 4197 setclientid.sc_netid_len = scnprintf(setclientid.sc_netid, 4198 sizeof(setclientid.sc_netid), 4199 rpc_peeraddr2str(clp->cl_rpcclient, 4200 RPC_DISPLAY_NETID)); 4201 rcu_read_unlock(); 4202 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 4203 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 4204 clp->cl_ipaddr, port >> 8, port & 255); 4205 4206 dprintk("NFS call setclientid auth=%s, '%.*s'\n", 4207 clp->cl_rpcclient->cl_auth->au_ops->au_name, 4208 setclientid.sc_name_len, setclientid.sc_name); 4209 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4210 dprintk("NFS reply setclientid: %d\n", status); 4211 return status; 4212 } 4213 4214 /** 4215 * nfs4_proc_setclientid_confirm - Confirm client ID 4216 * @clp: state data structure 4217 * @res: result of a previous SETCLIENTID 4218 * @cred: RPC credential to use for this call 4219 * 4220 * Returns zero, a negative errno, or a negative NFS4ERR status code. 4221 */ 4222 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 4223 struct nfs4_setclientid_res *arg, 4224 struct rpc_cred *cred) 4225 { 4226 struct nfs_fsinfo fsinfo; 4227 struct rpc_message msg = { 4228 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 4229 .rpc_argp = arg, 4230 .rpc_resp = &fsinfo, 4231 .rpc_cred = cred, 4232 }; 4233 unsigned long now; 4234 int status; 4235 4236 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 4237 clp->cl_rpcclient->cl_auth->au_ops->au_name, 4238 clp->cl_clientid); 4239 now = jiffies; 4240 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4241 if (status == 0) { 4242 spin_lock(&clp->cl_lock); 4243 clp->cl_lease_time = fsinfo.lease_time * HZ; 4244 clp->cl_last_renewal = now; 4245 spin_unlock(&clp->cl_lock); 4246 } 4247 dprintk("NFS reply setclientid_confirm: %d\n", status); 4248 return status; 4249 } 4250 4251 struct nfs4_delegreturndata { 4252 struct nfs4_delegreturnargs args; 4253 struct nfs4_delegreturnres res; 4254 struct nfs_fh fh; 4255 nfs4_stateid stateid; 4256 unsigned long timestamp; 4257 struct nfs_fattr fattr; 4258 int rpc_status; 4259 }; 4260 4261 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 4262 { 4263 struct nfs4_delegreturndata *data = calldata; 4264 4265 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4266 return; 4267 4268 switch (task->tk_status) { 4269 case -NFS4ERR_STALE_STATEID: 4270 case -NFS4ERR_EXPIRED: 4271 case 0: 4272 renew_lease(data->res.server, data->timestamp); 4273 break; 4274 default: 4275 if (nfs4_async_handle_error(task, data->res.server, NULL) == 4276 -EAGAIN) { 4277 rpc_restart_call_prepare(task); 4278 return; 4279 } 4280 } 4281 data->rpc_status = task->tk_status; 4282 } 4283 4284 static void nfs4_delegreturn_release(void *calldata) 4285 { 4286 kfree(calldata); 4287 } 4288 4289 #if defined(CONFIG_NFS_V4_1) 4290 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 4291 { 4292 struct nfs4_delegreturndata *d_data; 4293 4294 d_data = (struct nfs4_delegreturndata *)data; 4295 4296 if (nfs4_setup_sequence(d_data->res.server, 4297 &d_data->args.seq_args, 4298 &d_data->res.seq_res, task)) 4299 return; 4300 rpc_call_start(task); 4301 } 4302 #endif /* CONFIG_NFS_V4_1 */ 4303 4304 static const struct rpc_call_ops nfs4_delegreturn_ops = { 4305 #if defined(CONFIG_NFS_V4_1) 4306 .rpc_call_prepare = nfs4_delegreturn_prepare, 4307 #endif /* CONFIG_NFS_V4_1 */ 4308 .rpc_call_done = nfs4_delegreturn_done, 4309 .rpc_release = nfs4_delegreturn_release, 4310 }; 4311 4312 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 4313 { 4314 struct nfs4_delegreturndata *data; 4315 struct nfs_server *server = NFS_SERVER(inode); 4316 struct rpc_task *task; 4317 struct rpc_message msg = { 4318 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 4319 .rpc_cred = cred, 4320 }; 4321 struct rpc_task_setup task_setup_data = { 4322 .rpc_client = server->client, 4323 .rpc_message = &msg, 4324 .callback_ops = &nfs4_delegreturn_ops, 4325 .flags = RPC_TASK_ASYNC, 4326 }; 4327 int status = 0; 4328 4329 data = kzalloc(sizeof(*data), GFP_NOFS); 4330 if (data == NULL) 4331 return -ENOMEM; 4332 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 4333 data->args.fhandle = &data->fh; 4334 data->args.stateid = &data->stateid; 4335 data->args.bitmask = server->cache_consistency_bitmask; 4336 nfs_copy_fh(&data->fh, NFS_FH(inode)); 4337 nfs4_stateid_copy(&data->stateid, stateid); 4338 data->res.fattr = &data->fattr; 4339 data->res.server = server; 4340 nfs_fattr_init(data->res.fattr); 4341 data->timestamp = jiffies; 4342 data->rpc_status = 0; 4343 4344 task_setup_data.callback_data = data; 4345 msg.rpc_argp = &data->args; 4346 msg.rpc_resp = &data->res; 4347 task = rpc_run_task(&task_setup_data); 4348 if (IS_ERR(task)) 4349 return PTR_ERR(task); 4350 if (!issync) 4351 goto out; 4352 status = nfs4_wait_for_completion_rpc_task(task); 4353 if (status != 0) 4354 goto out; 4355 status = data->rpc_status; 4356 if (status == 0) 4357 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 4358 else 4359 nfs_refresh_inode(inode, &data->fattr); 4360 out: 4361 rpc_put_task(task); 4362 return status; 4363 } 4364 4365 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 4366 { 4367 struct nfs_server *server = NFS_SERVER(inode); 4368 struct nfs4_exception exception = { }; 4369 int err; 4370 do { 4371 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); 4372 switch (err) { 4373 case -NFS4ERR_STALE_STATEID: 4374 case -NFS4ERR_EXPIRED: 4375 case 0: 4376 return 0; 4377 } 4378 err = nfs4_handle_exception(server, err, &exception); 4379 } while (exception.retry); 4380 return err; 4381 } 4382 4383 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 4384 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 4385 4386 /* 4387 * sleep, with exponential backoff, and retry the LOCK operation. 4388 */ 4389 static unsigned long 4390 nfs4_set_lock_task_retry(unsigned long timeout) 4391 { 4392 freezable_schedule_timeout_killable(timeout); 4393 timeout <<= 1; 4394 if (timeout > NFS4_LOCK_MAXTIMEOUT) 4395 return NFS4_LOCK_MAXTIMEOUT; 4396 return timeout; 4397 } 4398 4399 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4400 { 4401 struct inode *inode = state->inode; 4402 struct nfs_server *server = NFS_SERVER(inode); 4403 struct nfs_client *clp = server->nfs_client; 4404 struct nfs_lockt_args arg = { 4405 .fh = NFS_FH(inode), 4406 .fl = request, 4407 }; 4408 struct nfs_lockt_res res = { 4409 .denied = request, 4410 }; 4411 struct rpc_message msg = { 4412 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 4413 .rpc_argp = &arg, 4414 .rpc_resp = &res, 4415 .rpc_cred = state->owner->so_cred, 4416 }; 4417 struct nfs4_lock_state *lsp; 4418 int status; 4419 4420 arg.lock_owner.clientid = clp->cl_clientid; 4421 status = nfs4_set_lock_state(state, request); 4422 if (status != 0) 4423 goto out; 4424 lsp = request->fl_u.nfs4_fl.owner; 4425 arg.lock_owner.id = lsp->ls_seqid.owner_id; 4426 arg.lock_owner.s_dev = server->s_dev; 4427 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4428 switch (status) { 4429 case 0: 4430 request->fl_type = F_UNLCK; 4431 break; 4432 case -NFS4ERR_DENIED: 4433 status = 0; 4434 } 4435 request->fl_ops->fl_release_private(request); 4436 out: 4437 return status; 4438 } 4439 4440 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4441 { 4442 struct nfs4_exception exception = { }; 4443 int err; 4444 4445 do { 4446 err = nfs4_handle_exception(NFS_SERVER(state->inode), 4447 _nfs4_proc_getlk(state, cmd, request), 4448 &exception); 4449 } while (exception.retry); 4450 return err; 4451 } 4452 4453 static int do_vfs_lock(struct file *file, struct file_lock *fl) 4454 { 4455 int res = 0; 4456 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 4457 case FL_POSIX: 4458 res = posix_lock_file_wait(file, fl); 4459 break; 4460 case FL_FLOCK: 4461 res = flock_lock_file_wait(file, fl); 4462 break; 4463 default: 4464 BUG(); 4465 } 4466 return res; 4467 } 4468 4469 struct nfs4_unlockdata { 4470 struct nfs_locku_args arg; 4471 struct nfs_locku_res res; 4472 struct nfs4_lock_state *lsp; 4473 struct nfs_open_context *ctx; 4474 struct file_lock fl; 4475 const struct nfs_server *server; 4476 unsigned long timestamp; 4477 }; 4478 4479 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 4480 struct nfs_open_context *ctx, 4481 struct nfs4_lock_state *lsp, 4482 struct nfs_seqid *seqid) 4483 { 4484 struct nfs4_unlockdata *p; 4485 struct inode *inode = lsp->ls_state->inode; 4486 4487 p = kzalloc(sizeof(*p), GFP_NOFS); 4488 if (p == NULL) 4489 return NULL; 4490 p->arg.fh = NFS_FH(inode); 4491 p->arg.fl = &p->fl; 4492 p->arg.seqid = seqid; 4493 p->res.seqid = seqid; 4494 p->arg.stateid = &lsp->ls_stateid; 4495 p->lsp = lsp; 4496 atomic_inc(&lsp->ls_count); 4497 /* Ensure we don't close file until we're done freeing locks! */ 4498 p->ctx = get_nfs_open_context(ctx); 4499 memcpy(&p->fl, fl, sizeof(p->fl)); 4500 p->server = NFS_SERVER(inode); 4501 return p; 4502 } 4503 4504 static void nfs4_locku_release_calldata(void *data) 4505 { 4506 struct nfs4_unlockdata *calldata = data; 4507 nfs_free_seqid(calldata->arg.seqid); 4508 nfs4_put_lock_state(calldata->lsp); 4509 put_nfs_open_context(calldata->ctx); 4510 kfree(calldata); 4511 } 4512 4513 static void nfs4_locku_done(struct rpc_task *task, void *data) 4514 { 4515 struct nfs4_unlockdata *calldata = data; 4516 4517 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 4518 return; 4519 switch (task->tk_status) { 4520 case 0: 4521 nfs4_stateid_copy(&calldata->lsp->ls_stateid, 4522 &calldata->res.stateid); 4523 renew_lease(calldata->server, calldata->timestamp); 4524 break; 4525 case -NFS4ERR_BAD_STATEID: 4526 case -NFS4ERR_OLD_STATEID: 4527 case -NFS4ERR_STALE_STATEID: 4528 case -NFS4ERR_EXPIRED: 4529 break; 4530 default: 4531 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) 4532 rpc_restart_call_prepare(task); 4533 } 4534 nfs_release_seqid(calldata->arg.seqid); 4535 } 4536 4537 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 4538 { 4539 struct nfs4_unlockdata *calldata = data; 4540 4541 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 4542 return; 4543 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 4544 /* Note: exit _without_ running nfs4_locku_done */ 4545 task->tk_action = NULL; 4546 return; 4547 } 4548 calldata->timestamp = jiffies; 4549 if (nfs4_setup_sequence(calldata->server, 4550 &calldata->arg.seq_args, 4551 &calldata->res.seq_res, 4552 task) != 0) 4553 nfs_release_seqid(calldata->arg.seqid); 4554 else 4555 rpc_call_start(task); 4556 } 4557 4558 static const struct rpc_call_ops nfs4_locku_ops = { 4559 .rpc_call_prepare = nfs4_locku_prepare, 4560 .rpc_call_done = nfs4_locku_done, 4561 .rpc_release = nfs4_locku_release_calldata, 4562 }; 4563 4564 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 4565 struct nfs_open_context *ctx, 4566 struct nfs4_lock_state *lsp, 4567 struct nfs_seqid *seqid) 4568 { 4569 struct nfs4_unlockdata *data; 4570 struct rpc_message msg = { 4571 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 4572 .rpc_cred = ctx->cred, 4573 }; 4574 struct rpc_task_setup task_setup_data = { 4575 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 4576 .rpc_message = &msg, 4577 .callback_ops = &nfs4_locku_ops, 4578 .workqueue = nfsiod_workqueue, 4579 .flags = RPC_TASK_ASYNC, 4580 }; 4581 4582 /* Ensure this is an unlock - when canceling a lock, the 4583 * canceled lock is passed in, and it won't be an unlock. 4584 */ 4585 fl->fl_type = F_UNLCK; 4586 4587 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 4588 if (data == NULL) { 4589 nfs_free_seqid(seqid); 4590 return ERR_PTR(-ENOMEM); 4591 } 4592 4593 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 4594 msg.rpc_argp = &data->arg; 4595 msg.rpc_resp = &data->res; 4596 task_setup_data.callback_data = data; 4597 return rpc_run_task(&task_setup_data); 4598 } 4599 4600 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 4601 { 4602 struct nfs_inode *nfsi = NFS_I(state->inode); 4603 struct nfs_seqid *seqid; 4604 struct nfs4_lock_state *lsp; 4605 struct rpc_task *task; 4606 int status = 0; 4607 unsigned char fl_flags = request->fl_flags; 4608 4609 status = nfs4_set_lock_state(state, request); 4610 /* Unlock _before_ we do the RPC call */ 4611 request->fl_flags |= FL_EXISTS; 4612 down_read(&nfsi->rwsem); 4613 if (do_vfs_lock(request->fl_file, request) == -ENOENT) { 4614 up_read(&nfsi->rwsem); 4615 goto out; 4616 } 4617 up_read(&nfsi->rwsem); 4618 if (status != 0) 4619 goto out; 4620 /* Is this a delegated lock? */ 4621 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) 4622 goto out; 4623 lsp = request->fl_u.nfs4_fl.owner; 4624 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 4625 status = -ENOMEM; 4626 if (seqid == NULL) 4627 goto out; 4628 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 4629 status = PTR_ERR(task); 4630 if (IS_ERR(task)) 4631 goto out; 4632 status = nfs4_wait_for_completion_rpc_task(task); 4633 rpc_put_task(task); 4634 out: 4635 request->fl_flags = fl_flags; 4636 return status; 4637 } 4638 4639 struct nfs4_lockdata { 4640 struct nfs_lock_args arg; 4641 struct nfs_lock_res res; 4642 struct nfs4_lock_state *lsp; 4643 struct nfs_open_context *ctx; 4644 struct file_lock fl; 4645 unsigned long timestamp; 4646 int rpc_status; 4647 int cancelled; 4648 struct nfs_server *server; 4649 }; 4650 4651 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 4652 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 4653 gfp_t gfp_mask) 4654 { 4655 struct nfs4_lockdata *p; 4656 struct inode *inode = lsp->ls_state->inode; 4657 struct nfs_server *server = NFS_SERVER(inode); 4658 4659 p = kzalloc(sizeof(*p), gfp_mask); 4660 if (p == NULL) 4661 return NULL; 4662 4663 p->arg.fh = NFS_FH(inode); 4664 p->arg.fl = &p->fl; 4665 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 4666 if (p->arg.open_seqid == NULL) 4667 goto out_free; 4668 p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask); 4669 if (p->arg.lock_seqid == NULL) 4670 goto out_free_seqid; 4671 p->arg.lock_stateid = &lsp->ls_stateid; 4672 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 4673 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 4674 p->arg.lock_owner.s_dev = server->s_dev; 4675 p->res.lock_seqid = p->arg.lock_seqid; 4676 p->lsp = lsp; 4677 p->server = server; 4678 atomic_inc(&lsp->ls_count); 4679 p->ctx = get_nfs_open_context(ctx); 4680 memcpy(&p->fl, fl, sizeof(p->fl)); 4681 return p; 4682 out_free_seqid: 4683 nfs_free_seqid(p->arg.open_seqid); 4684 out_free: 4685 kfree(p); 4686 return NULL; 4687 } 4688 4689 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 4690 { 4691 struct nfs4_lockdata *data = calldata; 4692 struct nfs4_state *state = data->lsp->ls_state; 4693 4694 dprintk("%s: begin!\n", __func__); 4695 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 4696 return; 4697 /* Do we need to do an open_to_lock_owner? */ 4698 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) { 4699 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) 4700 goto out_release_lock_seqid; 4701 data->arg.open_stateid = &state->stateid; 4702 data->arg.new_lock_owner = 1; 4703 data->res.open_seqid = data->arg.open_seqid; 4704 } else 4705 data->arg.new_lock_owner = 0; 4706 data->timestamp = jiffies; 4707 if (nfs4_setup_sequence(data->server, 4708 &data->arg.seq_args, 4709 &data->res.seq_res, 4710 task) == 0) { 4711 rpc_call_start(task); 4712 return; 4713 } 4714 nfs_release_seqid(data->arg.open_seqid); 4715 out_release_lock_seqid: 4716 nfs_release_seqid(data->arg.lock_seqid); 4717 dprintk("%s: done!, ret = %d\n", __func__, task->tk_status); 4718 } 4719 4720 static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata) 4721 { 4722 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 4723 nfs4_lock_prepare(task, calldata); 4724 } 4725 4726 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 4727 { 4728 struct nfs4_lockdata *data = calldata; 4729 4730 dprintk("%s: begin!\n", __func__); 4731 4732 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4733 return; 4734 4735 data->rpc_status = task->tk_status; 4736 if (data->arg.new_lock_owner != 0) { 4737 if (data->rpc_status == 0) 4738 nfs_confirm_seqid(&data->lsp->ls_seqid, 0); 4739 else 4740 goto out; 4741 } 4742 if (data->rpc_status == 0) { 4743 nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid); 4744 set_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags); 4745 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); 4746 } 4747 out: 4748 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 4749 } 4750 4751 static void nfs4_lock_release(void *calldata) 4752 { 4753 struct nfs4_lockdata *data = calldata; 4754 4755 dprintk("%s: begin!\n", __func__); 4756 nfs_free_seqid(data->arg.open_seqid); 4757 if (data->cancelled != 0) { 4758 struct rpc_task *task; 4759 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 4760 data->arg.lock_seqid); 4761 if (!IS_ERR(task)) 4762 rpc_put_task_async(task); 4763 dprintk("%s: cancelling lock!\n", __func__); 4764 } else 4765 nfs_free_seqid(data->arg.lock_seqid); 4766 nfs4_put_lock_state(data->lsp); 4767 put_nfs_open_context(data->ctx); 4768 kfree(data); 4769 dprintk("%s: done!\n", __func__); 4770 } 4771 4772 static const struct rpc_call_ops nfs4_lock_ops = { 4773 .rpc_call_prepare = nfs4_lock_prepare, 4774 .rpc_call_done = nfs4_lock_done, 4775 .rpc_release = nfs4_lock_release, 4776 }; 4777 4778 static const struct rpc_call_ops nfs4_recover_lock_ops = { 4779 .rpc_call_prepare = nfs4_recover_lock_prepare, 4780 .rpc_call_done = nfs4_lock_done, 4781 .rpc_release = nfs4_lock_release, 4782 }; 4783 4784 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 4785 { 4786 switch (error) { 4787 case -NFS4ERR_ADMIN_REVOKED: 4788 case -NFS4ERR_BAD_STATEID: 4789 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 4790 if (new_lock_owner != 0 || 4791 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 4792 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 4793 break; 4794 case -NFS4ERR_STALE_STATEID: 4795 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 4796 case -NFS4ERR_EXPIRED: 4797 nfs4_schedule_lease_recovery(server->nfs_client); 4798 }; 4799 } 4800 4801 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 4802 { 4803 struct nfs4_lockdata *data; 4804 struct rpc_task *task; 4805 struct rpc_message msg = { 4806 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 4807 .rpc_cred = state->owner->so_cred, 4808 }; 4809 struct rpc_task_setup task_setup_data = { 4810 .rpc_client = NFS_CLIENT(state->inode), 4811 .rpc_message = &msg, 4812 .callback_ops = &nfs4_lock_ops, 4813 .workqueue = nfsiod_workqueue, 4814 .flags = RPC_TASK_ASYNC, 4815 }; 4816 int ret; 4817 4818 dprintk("%s: begin!\n", __func__); 4819 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 4820 fl->fl_u.nfs4_fl.owner, 4821 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); 4822 if (data == NULL) 4823 return -ENOMEM; 4824 if (IS_SETLKW(cmd)) 4825 data->arg.block = 1; 4826 if (recovery_type > NFS_LOCK_NEW) { 4827 if (recovery_type == NFS_LOCK_RECLAIM) 4828 data->arg.reclaim = NFS_LOCK_RECLAIM; 4829 task_setup_data.callback_ops = &nfs4_recover_lock_ops; 4830 } 4831 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 4832 msg.rpc_argp = &data->arg; 4833 msg.rpc_resp = &data->res; 4834 task_setup_data.callback_data = data; 4835 task = rpc_run_task(&task_setup_data); 4836 if (IS_ERR(task)) 4837 return PTR_ERR(task); 4838 ret = nfs4_wait_for_completion_rpc_task(task); 4839 if (ret == 0) { 4840 ret = data->rpc_status; 4841 if (ret) 4842 nfs4_handle_setlk_error(data->server, data->lsp, 4843 data->arg.new_lock_owner, ret); 4844 } else 4845 data->cancelled = 1; 4846 rpc_put_task(task); 4847 dprintk("%s: done, ret = %d!\n", __func__, ret); 4848 return ret; 4849 } 4850 4851 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 4852 { 4853 struct nfs_server *server = NFS_SERVER(state->inode); 4854 struct nfs4_exception exception = { 4855 .inode = state->inode, 4856 }; 4857 int err; 4858 4859 do { 4860 /* Cache the lock if possible... */ 4861 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 4862 return 0; 4863 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 4864 if (err != -NFS4ERR_DELAY) 4865 break; 4866 nfs4_handle_exception(server, err, &exception); 4867 } while (exception.retry); 4868 return err; 4869 } 4870 4871 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 4872 { 4873 struct nfs_server *server = NFS_SERVER(state->inode); 4874 struct nfs4_exception exception = { 4875 .inode = state->inode, 4876 }; 4877 int err; 4878 4879 err = nfs4_set_lock_state(state, request); 4880 if (err != 0) 4881 return err; 4882 do { 4883 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 4884 return 0; 4885 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 4886 switch (err) { 4887 default: 4888 goto out; 4889 case -NFS4ERR_GRACE: 4890 case -NFS4ERR_DELAY: 4891 nfs4_handle_exception(server, err, &exception); 4892 err = 0; 4893 } 4894 } while (exception.retry); 4895 out: 4896 return err; 4897 } 4898 4899 #if defined(CONFIG_NFS_V4_1) 4900 /** 4901 * nfs41_check_expired_locks - possibly free a lock stateid 4902 * 4903 * @state: NFSv4 state for an inode 4904 * 4905 * Returns NFS_OK if recovery for this stateid is now finished. 4906 * Otherwise a negative NFS4ERR value is returned. 4907 */ 4908 static int nfs41_check_expired_locks(struct nfs4_state *state) 4909 { 4910 int status, ret = -NFS4ERR_BAD_STATEID; 4911 struct nfs4_lock_state *lsp; 4912 struct nfs_server *server = NFS_SERVER(state->inode); 4913 4914 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 4915 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 4916 status = nfs41_test_stateid(server, &lsp->ls_stateid); 4917 if (status != NFS_OK) { 4918 /* Free the stateid unless the server 4919 * informs us the stateid is unrecognized. */ 4920 if (status != -NFS4ERR_BAD_STATEID) 4921 nfs41_free_stateid(server, 4922 &lsp->ls_stateid); 4923 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 4924 ret = status; 4925 } 4926 } 4927 }; 4928 4929 return ret; 4930 } 4931 4932 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 4933 { 4934 int status = NFS_OK; 4935 4936 if (test_bit(LK_STATE_IN_USE, &state->flags)) 4937 status = nfs41_check_expired_locks(state); 4938 if (status != NFS_OK) 4939 status = nfs4_lock_expired(state, request); 4940 return status; 4941 } 4942 #endif 4943 4944 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4945 { 4946 struct nfs_inode *nfsi = NFS_I(state->inode); 4947 unsigned char fl_flags = request->fl_flags; 4948 int status = -ENOLCK; 4949 4950 if ((fl_flags & FL_POSIX) && 4951 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 4952 goto out; 4953 /* Is this a delegated open? */ 4954 status = nfs4_set_lock_state(state, request); 4955 if (status != 0) 4956 goto out; 4957 request->fl_flags |= FL_ACCESS; 4958 status = do_vfs_lock(request->fl_file, request); 4959 if (status < 0) 4960 goto out; 4961 down_read(&nfsi->rwsem); 4962 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 4963 /* Yes: cache locks! */ 4964 /* ...but avoid races with delegation recall... */ 4965 request->fl_flags = fl_flags & ~FL_SLEEP; 4966 status = do_vfs_lock(request->fl_file, request); 4967 goto out_unlock; 4968 } 4969 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 4970 if (status != 0) 4971 goto out_unlock; 4972 /* Note: we always want to sleep here! */ 4973 request->fl_flags = fl_flags | FL_SLEEP; 4974 if (do_vfs_lock(request->fl_file, request) < 0) 4975 printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock " 4976 "manager!\n", __func__); 4977 out_unlock: 4978 up_read(&nfsi->rwsem); 4979 out: 4980 request->fl_flags = fl_flags; 4981 return status; 4982 } 4983 4984 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4985 { 4986 struct nfs4_exception exception = { 4987 .state = state, 4988 .inode = state->inode, 4989 }; 4990 int err; 4991 4992 do { 4993 err = _nfs4_proc_setlk(state, cmd, request); 4994 if (err == -NFS4ERR_DENIED) 4995 err = -EAGAIN; 4996 err = nfs4_handle_exception(NFS_SERVER(state->inode), 4997 err, &exception); 4998 } while (exception.retry); 4999 return err; 5000 } 5001 5002 static int 5003 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 5004 { 5005 struct nfs_open_context *ctx; 5006 struct nfs4_state *state; 5007 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 5008 int status; 5009 5010 /* verify open state */ 5011 ctx = nfs_file_open_context(filp); 5012 state = ctx->state; 5013 5014 if (request->fl_start < 0 || request->fl_end < 0) 5015 return -EINVAL; 5016 5017 if (IS_GETLK(cmd)) { 5018 if (state != NULL) 5019 return nfs4_proc_getlk(state, F_GETLK, request); 5020 return 0; 5021 } 5022 5023 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 5024 return -EINVAL; 5025 5026 if (request->fl_type == F_UNLCK) { 5027 if (state != NULL) 5028 return nfs4_proc_unlck(state, cmd, request); 5029 return 0; 5030 } 5031 5032 if (state == NULL) 5033 return -ENOLCK; 5034 /* 5035 * Don't rely on the VFS having checked the file open mode, 5036 * since it won't do this for flock() locks. 5037 */ 5038 switch (request->fl_type) { 5039 case F_RDLCK: 5040 if (!(filp->f_mode & FMODE_READ)) 5041 return -EBADF; 5042 break; 5043 case F_WRLCK: 5044 if (!(filp->f_mode & FMODE_WRITE)) 5045 return -EBADF; 5046 } 5047 5048 do { 5049 status = nfs4_proc_setlk(state, cmd, request); 5050 if ((status != -EAGAIN) || IS_SETLK(cmd)) 5051 break; 5052 timeout = nfs4_set_lock_task_retry(timeout); 5053 status = -ERESTARTSYS; 5054 if (signalled()) 5055 break; 5056 } while(status < 0); 5057 return status; 5058 } 5059 5060 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) 5061 { 5062 struct nfs_server *server = NFS_SERVER(state->inode); 5063 struct nfs4_exception exception = { }; 5064 int err; 5065 5066 err = nfs4_set_lock_state(state, fl); 5067 if (err != 0) 5068 goto out; 5069 do { 5070 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 5071 switch (err) { 5072 default: 5073 printk(KERN_ERR "NFS: %s: unhandled error " 5074 "%d.\n", __func__, err); 5075 case 0: 5076 case -ESTALE: 5077 goto out; 5078 case -NFS4ERR_EXPIRED: 5079 nfs4_schedule_stateid_recovery(server, state); 5080 case -NFS4ERR_STALE_CLIENTID: 5081 case -NFS4ERR_STALE_STATEID: 5082 nfs4_schedule_lease_recovery(server->nfs_client); 5083 goto out; 5084 case -NFS4ERR_BADSESSION: 5085 case -NFS4ERR_BADSLOT: 5086 case -NFS4ERR_BAD_HIGH_SLOT: 5087 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 5088 case -NFS4ERR_DEADSESSION: 5089 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 5090 goto out; 5091 case -ERESTARTSYS: 5092 /* 5093 * The show must go on: exit, but mark the 5094 * stateid as needing recovery. 5095 */ 5096 case -NFS4ERR_DELEG_REVOKED: 5097 case -NFS4ERR_ADMIN_REVOKED: 5098 case -NFS4ERR_BAD_STATEID: 5099 case -NFS4ERR_OPENMODE: 5100 nfs4_schedule_stateid_recovery(server, state); 5101 err = 0; 5102 goto out; 5103 case -EKEYEXPIRED: 5104 /* 5105 * User RPCSEC_GSS context has expired. 5106 * We cannot recover this stateid now, so 5107 * skip it and allow recovery thread to 5108 * proceed. 5109 */ 5110 err = 0; 5111 goto out; 5112 case -ENOMEM: 5113 case -NFS4ERR_DENIED: 5114 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 5115 err = 0; 5116 goto out; 5117 case -NFS4ERR_DELAY: 5118 break; 5119 } 5120 err = nfs4_handle_exception(server, err, &exception); 5121 } while (exception.retry); 5122 out: 5123 return err; 5124 } 5125 5126 struct nfs_release_lockowner_data { 5127 struct nfs4_lock_state *lsp; 5128 struct nfs_server *server; 5129 struct nfs_release_lockowner_args args; 5130 }; 5131 5132 static void nfs4_release_lockowner_release(void *calldata) 5133 { 5134 struct nfs_release_lockowner_data *data = calldata; 5135 nfs4_free_lock_state(data->server, data->lsp); 5136 kfree(calldata); 5137 } 5138 5139 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 5140 .rpc_release = nfs4_release_lockowner_release, 5141 }; 5142 5143 int nfs4_release_lockowner(struct nfs4_lock_state *lsp) 5144 { 5145 struct nfs_server *server = lsp->ls_state->owner->so_server; 5146 struct nfs_release_lockowner_data *data; 5147 struct rpc_message msg = { 5148 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 5149 }; 5150 5151 if (server->nfs_client->cl_mvops->minor_version != 0) 5152 return -EINVAL; 5153 data = kmalloc(sizeof(*data), GFP_NOFS); 5154 if (!data) 5155 return -ENOMEM; 5156 data->lsp = lsp; 5157 data->server = server; 5158 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 5159 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 5160 data->args.lock_owner.s_dev = server->s_dev; 5161 msg.rpc_argp = &data->args; 5162 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 5163 return 0; 5164 } 5165 5166 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 5167 5168 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key, 5169 const void *buf, size_t buflen, 5170 int flags, int type) 5171 { 5172 if (strcmp(key, "") != 0) 5173 return -EINVAL; 5174 5175 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen); 5176 } 5177 5178 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, 5179 void *buf, size_t buflen, int type) 5180 { 5181 if (strcmp(key, "") != 0) 5182 return -EINVAL; 5183 5184 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen); 5185 } 5186 5187 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, 5188 size_t list_len, const char *name, 5189 size_t name_len, int type) 5190 { 5191 size_t len = sizeof(XATTR_NAME_NFSV4_ACL); 5192 5193 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode))) 5194 return 0; 5195 5196 if (list && len <= list_len) 5197 memcpy(list, XATTR_NAME_NFSV4_ACL, len); 5198 return len; 5199 } 5200 5201 /* 5202 * nfs_fhget will use either the mounted_on_fileid or the fileid 5203 */ 5204 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 5205 { 5206 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 5207 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 5208 (fattr->valid & NFS_ATTR_FATTR_FSID) && 5209 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 5210 return; 5211 5212 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 5213 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 5214 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 5215 fattr->nlink = 2; 5216 } 5217 5218 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 5219 const struct qstr *name, 5220 struct nfs4_fs_locations *fs_locations, 5221 struct page *page) 5222 { 5223 struct nfs_server *server = NFS_SERVER(dir); 5224 u32 bitmask[2] = { 5225 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 5226 }; 5227 struct nfs4_fs_locations_arg args = { 5228 .dir_fh = NFS_FH(dir), 5229 .name = name, 5230 .page = page, 5231 .bitmask = bitmask, 5232 }; 5233 struct nfs4_fs_locations_res res = { 5234 .fs_locations = fs_locations, 5235 }; 5236 struct rpc_message msg = { 5237 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 5238 .rpc_argp = &args, 5239 .rpc_resp = &res, 5240 }; 5241 int status; 5242 5243 dprintk("%s: start\n", __func__); 5244 5245 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 5246 * is not supported */ 5247 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 5248 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; 5249 else 5250 bitmask[0] |= FATTR4_WORD0_FILEID; 5251 5252 nfs_fattr_init(&fs_locations->fattr); 5253 fs_locations->server = server; 5254 fs_locations->nlocations = 0; 5255 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 5256 dprintk("%s: returned status = %d\n", __func__, status); 5257 return status; 5258 } 5259 5260 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 5261 const struct qstr *name, 5262 struct nfs4_fs_locations *fs_locations, 5263 struct page *page) 5264 { 5265 struct nfs4_exception exception = { }; 5266 int err; 5267 do { 5268 err = nfs4_handle_exception(NFS_SERVER(dir), 5269 _nfs4_proc_fs_locations(client, dir, name, fs_locations, page), 5270 &exception); 5271 } while (exception.retry); 5272 return err; 5273 } 5274 5275 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) 5276 { 5277 int status; 5278 struct nfs4_secinfo_arg args = { 5279 .dir_fh = NFS_FH(dir), 5280 .name = name, 5281 }; 5282 struct nfs4_secinfo_res res = { 5283 .flavors = flavors, 5284 }; 5285 struct rpc_message msg = { 5286 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 5287 .rpc_argp = &args, 5288 .rpc_resp = &res, 5289 }; 5290 5291 dprintk("NFS call secinfo %s\n", name->name); 5292 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 5293 dprintk("NFS reply secinfo: %d\n", status); 5294 return status; 5295 } 5296 5297 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 5298 struct nfs4_secinfo_flavors *flavors) 5299 { 5300 struct nfs4_exception exception = { }; 5301 int err; 5302 do { 5303 err = nfs4_handle_exception(NFS_SERVER(dir), 5304 _nfs4_proc_secinfo(dir, name, flavors), 5305 &exception); 5306 } while (exception.retry); 5307 return err; 5308 } 5309 5310 #ifdef CONFIG_NFS_V4_1 5311 /* 5312 * Check the exchange flags returned by the server for invalid flags, having 5313 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 5314 * DS flags set. 5315 */ 5316 static int nfs4_check_cl_exchange_flags(u32 flags) 5317 { 5318 if (flags & ~EXCHGID4_FLAG_MASK_R) 5319 goto out_inval; 5320 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 5321 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 5322 goto out_inval; 5323 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 5324 goto out_inval; 5325 return NFS_OK; 5326 out_inval: 5327 return -NFS4ERR_INVAL; 5328 } 5329 5330 static bool 5331 nfs41_same_server_scope(struct nfs41_server_scope *a, 5332 struct nfs41_server_scope *b) 5333 { 5334 if (a->server_scope_sz == b->server_scope_sz && 5335 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) 5336 return true; 5337 5338 return false; 5339 } 5340 5341 /* 5342 * nfs4_proc_bind_conn_to_session() 5343 * 5344 * The 4.1 client currently uses the same TCP connection for the 5345 * fore and backchannel. 5346 */ 5347 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) 5348 { 5349 int status; 5350 struct nfs41_bind_conn_to_session_res res; 5351 struct rpc_message msg = { 5352 .rpc_proc = 5353 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 5354 .rpc_argp = clp, 5355 .rpc_resp = &res, 5356 .rpc_cred = cred, 5357 }; 5358 5359 dprintk("--> %s\n", __func__); 5360 BUG_ON(clp == NULL); 5361 5362 res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); 5363 if (unlikely(res.session == NULL)) { 5364 status = -ENOMEM; 5365 goto out; 5366 } 5367 5368 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5369 if (status == 0) { 5370 if (memcmp(res.session->sess_id.data, 5371 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 5372 dprintk("NFS: %s: Session ID mismatch\n", __func__); 5373 status = -EIO; 5374 goto out_session; 5375 } 5376 if (res.dir != NFS4_CDFS4_BOTH) { 5377 dprintk("NFS: %s: Unexpected direction from server\n", 5378 __func__); 5379 status = -EIO; 5380 goto out_session; 5381 } 5382 if (res.use_conn_in_rdma_mode) { 5383 dprintk("NFS: %s: Server returned RDMA mode = true\n", 5384 __func__); 5385 status = -EIO; 5386 goto out_session; 5387 } 5388 } 5389 out_session: 5390 kfree(res.session); 5391 out: 5392 dprintk("<-- %s status= %d\n", __func__, status); 5393 return status; 5394 } 5395 5396 /* 5397 * nfs4_proc_exchange_id() 5398 * 5399 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5400 * 5401 * Since the clientid has expired, all compounds using sessions 5402 * associated with the stale clientid will be returning 5403 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 5404 * be in some phase of session reset. 5405 */ 5406 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) 5407 { 5408 nfs4_verifier verifier; 5409 struct nfs41_exchange_id_args args = { 5410 .verifier = &verifier, 5411 .client = clp, 5412 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER, 5413 }; 5414 struct nfs41_exchange_id_res res = { 5415 0 5416 }; 5417 int status; 5418 struct rpc_message msg = { 5419 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 5420 .rpc_argp = &args, 5421 .rpc_resp = &res, 5422 .rpc_cred = cred, 5423 }; 5424 5425 nfs4_init_boot_verifier(clp, &verifier); 5426 args.id_len = nfs4_init_uniform_client_string(clp, args.id, 5427 sizeof(args.id)); 5428 dprintk("NFS call exchange_id auth=%s, '%.*s'\n", 5429 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5430 args.id_len, args.id); 5431 5432 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 5433 GFP_NOFS); 5434 if (unlikely(res.server_owner == NULL)) { 5435 status = -ENOMEM; 5436 goto out; 5437 } 5438 5439 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 5440 GFP_NOFS); 5441 if (unlikely(res.server_scope == NULL)) { 5442 status = -ENOMEM; 5443 goto out_server_owner; 5444 } 5445 5446 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 5447 if (unlikely(res.impl_id == NULL)) { 5448 status = -ENOMEM; 5449 goto out_server_scope; 5450 } 5451 5452 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5453 if (status == 0) 5454 status = nfs4_check_cl_exchange_flags(res.flags); 5455 5456 if (status == 0) { 5457 clp->cl_clientid = res.clientid; 5458 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R); 5459 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) 5460 clp->cl_seqid = res.seqid; 5461 5462 kfree(clp->cl_serverowner); 5463 clp->cl_serverowner = res.server_owner; 5464 res.server_owner = NULL; 5465 5466 /* use the most recent implementation id */ 5467 kfree(clp->cl_implid); 5468 clp->cl_implid = res.impl_id; 5469 5470 if (clp->cl_serverscope != NULL && 5471 !nfs41_same_server_scope(clp->cl_serverscope, 5472 res.server_scope)) { 5473 dprintk("%s: server_scope mismatch detected\n", 5474 __func__); 5475 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 5476 kfree(clp->cl_serverscope); 5477 clp->cl_serverscope = NULL; 5478 } 5479 5480 if (clp->cl_serverscope == NULL) { 5481 clp->cl_serverscope = res.server_scope; 5482 goto out; 5483 } 5484 } else 5485 kfree(res.impl_id); 5486 5487 out_server_owner: 5488 kfree(res.server_owner); 5489 out_server_scope: 5490 kfree(res.server_scope); 5491 out: 5492 if (clp->cl_implid != NULL) 5493 dprintk("NFS reply exchange_id: Server Implementation ID: " 5494 "domain: %s, name: %s, date: %llu,%u\n", 5495 clp->cl_implid->domain, clp->cl_implid->name, 5496 clp->cl_implid->date.seconds, 5497 clp->cl_implid->date.nseconds); 5498 dprintk("NFS reply exchange_id: %d\n", status); 5499 return status; 5500 } 5501 5502 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 5503 struct rpc_cred *cred) 5504 { 5505 struct rpc_message msg = { 5506 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 5507 .rpc_argp = clp, 5508 .rpc_cred = cred, 5509 }; 5510 int status; 5511 5512 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5513 if (status) 5514 dprintk("NFS: Got error %d from the server %s on " 5515 "DESTROY_CLIENTID.", status, clp->cl_hostname); 5516 return status; 5517 } 5518 5519 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 5520 struct rpc_cred *cred) 5521 { 5522 unsigned int loop; 5523 int ret; 5524 5525 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 5526 ret = _nfs4_proc_destroy_clientid(clp, cred); 5527 switch (ret) { 5528 case -NFS4ERR_DELAY: 5529 case -NFS4ERR_CLIENTID_BUSY: 5530 ssleep(1); 5531 break; 5532 default: 5533 return ret; 5534 } 5535 } 5536 return 0; 5537 } 5538 5539 int nfs4_destroy_clientid(struct nfs_client *clp) 5540 { 5541 struct rpc_cred *cred; 5542 int ret = 0; 5543 5544 if (clp->cl_mvops->minor_version < 1) 5545 goto out; 5546 if (clp->cl_exchange_flags == 0) 5547 goto out; 5548 if (clp->cl_preserve_clid) 5549 goto out; 5550 cred = nfs4_get_exchange_id_cred(clp); 5551 ret = nfs4_proc_destroy_clientid(clp, cred); 5552 if (cred) 5553 put_rpccred(cred); 5554 switch (ret) { 5555 case 0: 5556 case -NFS4ERR_STALE_CLIENTID: 5557 clp->cl_exchange_flags = 0; 5558 } 5559 out: 5560 return ret; 5561 } 5562 5563 struct nfs4_get_lease_time_data { 5564 struct nfs4_get_lease_time_args *args; 5565 struct nfs4_get_lease_time_res *res; 5566 struct nfs_client *clp; 5567 }; 5568 5569 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 5570 void *calldata) 5571 { 5572 int ret; 5573 struct nfs4_get_lease_time_data *data = 5574 (struct nfs4_get_lease_time_data *)calldata; 5575 5576 dprintk("--> %s\n", __func__); 5577 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 5578 /* just setup sequence, do not trigger session recovery 5579 since we're invoked within one */ 5580 ret = nfs41_setup_sequence(data->clp->cl_session, 5581 &data->args->la_seq_args, 5582 &data->res->lr_seq_res, task); 5583 5584 BUG_ON(ret == -EAGAIN); 5585 rpc_call_start(task); 5586 dprintk("<-- %s\n", __func__); 5587 } 5588 5589 /* 5590 * Called from nfs4_state_manager thread for session setup, so don't recover 5591 * from sequence operation or clientid errors. 5592 */ 5593 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 5594 { 5595 struct nfs4_get_lease_time_data *data = 5596 (struct nfs4_get_lease_time_data *)calldata; 5597 5598 dprintk("--> %s\n", __func__); 5599 if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) 5600 return; 5601 switch (task->tk_status) { 5602 case -NFS4ERR_DELAY: 5603 case -NFS4ERR_GRACE: 5604 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 5605 rpc_delay(task, NFS4_POLL_RETRY_MIN); 5606 task->tk_status = 0; 5607 /* fall through */ 5608 case -NFS4ERR_RETRY_UNCACHED_REP: 5609 rpc_restart_call_prepare(task); 5610 return; 5611 } 5612 dprintk("<-- %s\n", __func__); 5613 } 5614 5615 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 5616 .rpc_call_prepare = nfs4_get_lease_time_prepare, 5617 .rpc_call_done = nfs4_get_lease_time_done, 5618 }; 5619 5620 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 5621 { 5622 struct rpc_task *task; 5623 struct nfs4_get_lease_time_args args; 5624 struct nfs4_get_lease_time_res res = { 5625 .lr_fsinfo = fsinfo, 5626 }; 5627 struct nfs4_get_lease_time_data data = { 5628 .args = &args, 5629 .res = &res, 5630 .clp = clp, 5631 }; 5632 struct rpc_message msg = { 5633 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 5634 .rpc_argp = &args, 5635 .rpc_resp = &res, 5636 }; 5637 struct rpc_task_setup task_setup = { 5638 .rpc_client = clp->cl_rpcclient, 5639 .rpc_message = &msg, 5640 .callback_ops = &nfs4_get_lease_time_ops, 5641 .callback_data = &data, 5642 .flags = RPC_TASK_TIMEOUT, 5643 }; 5644 int status; 5645 5646 nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); 5647 dprintk("--> %s\n", __func__); 5648 task = rpc_run_task(&task_setup); 5649 5650 if (IS_ERR(task)) 5651 status = PTR_ERR(task); 5652 else { 5653 status = task->tk_status; 5654 rpc_put_task(task); 5655 } 5656 dprintk("<-- %s return %d\n", __func__, status); 5657 5658 return status; 5659 } 5660 5661 static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags) 5662 { 5663 return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags); 5664 } 5665 5666 static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl, 5667 struct nfs4_slot *new, 5668 u32 max_slots, 5669 u32 ivalue) 5670 { 5671 struct nfs4_slot *old = NULL; 5672 u32 i; 5673 5674 spin_lock(&tbl->slot_tbl_lock); 5675 if (new) { 5676 old = tbl->slots; 5677 tbl->slots = new; 5678 tbl->max_slots = max_slots; 5679 } 5680 tbl->highest_used_slotid = NFS4_NO_SLOT; 5681 for (i = 0; i < tbl->max_slots; i++) 5682 tbl->slots[i].seq_nr = ivalue; 5683 spin_unlock(&tbl->slot_tbl_lock); 5684 kfree(old); 5685 } 5686 5687 /* 5688 * (re)Initialise a slot table 5689 */ 5690 static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs, 5691 u32 ivalue) 5692 { 5693 struct nfs4_slot *new = NULL; 5694 int ret = -ENOMEM; 5695 5696 dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__, 5697 max_reqs, tbl->max_slots); 5698 5699 /* Does the newly negotiated max_reqs match the existing slot table? */ 5700 if (max_reqs != tbl->max_slots) { 5701 new = nfs4_alloc_slots(max_reqs, GFP_NOFS); 5702 if (!new) 5703 goto out; 5704 } 5705 ret = 0; 5706 5707 nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue); 5708 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__, 5709 tbl, tbl->slots, tbl->max_slots); 5710 out: 5711 dprintk("<-- %s: return %d\n", __func__, ret); 5712 return ret; 5713 } 5714 5715 /* Destroy the slot table */ 5716 static void nfs4_destroy_slot_tables(struct nfs4_session *session) 5717 { 5718 if (session->fc_slot_table.slots != NULL) { 5719 kfree(session->fc_slot_table.slots); 5720 session->fc_slot_table.slots = NULL; 5721 } 5722 if (session->bc_slot_table.slots != NULL) { 5723 kfree(session->bc_slot_table.slots); 5724 session->bc_slot_table.slots = NULL; 5725 } 5726 return; 5727 } 5728 5729 /* 5730 * Initialize or reset the forechannel and backchannel tables 5731 */ 5732 static int nfs4_setup_session_slot_tables(struct nfs4_session *ses) 5733 { 5734 struct nfs4_slot_table *tbl; 5735 int status; 5736 5737 dprintk("--> %s\n", __func__); 5738 /* Fore channel */ 5739 tbl = &ses->fc_slot_table; 5740 status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1); 5741 if (status) /* -ENOMEM */ 5742 return status; 5743 /* Back channel */ 5744 tbl = &ses->bc_slot_table; 5745 status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0); 5746 if (status && tbl->slots == NULL) 5747 /* Fore and back channel share a connection so get 5748 * both slot tables or neither */ 5749 nfs4_destroy_slot_tables(ses); 5750 return status; 5751 } 5752 5753 struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp) 5754 { 5755 struct nfs4_session *session; 5756 struct nfs4_slot_table *tbl; 5757 5758 session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); 5759 if (!session) 5760 return NULL; 5761 5762 tbl = &session->fc_slot_table; 5763 tbl->highest_used_slotid = NFS4_NO_SLOT; 5764 spin_lock_init(&tbl->slot_tbl_lock); 5765 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table"); 5766 init_completion(&tbl->complete); 5767 5768 tbl = &session->bc_slot_table; 5769 tbl->highest_used_slotid = NFS4_NO_SLOT; 5770 spin_lock_init(&tbl->slot_tbl_lock); 5771 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table"); 5772 init_completion(&tbl->complete); 5773 5774 session->session_state = 1<<NFS4_SESSION_INITING; 5775 5776 session->clp = clp; 5777 return session; 5778 } 5779 5780 void nfs4_destroy_session(struct nfs4_session *session) 5781 { 5782 struct rpc_xprt *xprt; 5783 struct rpc_cred *cred; 5784 5785 cred = nfs4_get_exchange_id_cred(session->clp); 5786 nfs4_proc_destroy_session(session, cred); 5787 if (cred) 5788 put_rpccred(cred); 5789 5790 rcu_read_lock(); 5791 xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt); 5792 rcu_read_unlock(); 5793 dprintk("%s Destroy backchannel for xprt %p\n", 5794 __func__, xprt); 5795 xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS); 5796 nfs4_destroy_slot_tables(session); 5797 kfree(session); 5798 } 5799 5800 /* 5801 * Initialize the values to be used by the client in CREATE_SESSION 5802 * If nfs4_init_session set the fore channel request and response sizes, 5803 * use them. 5804 * 5805 * Set the back channel max_resp_sz_cached to zero to force the client to 5806 * always set csa_cachethis to FALSE because the current implementation 5807 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 5808 */ 5809 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) 5810 { 5811 struct nfs4_session *session = args->client->cl_session; 5812 unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz, 5813 mxresp_sz = session->fc_attrs.max_resp_sz; 5814 5815 if (mxrqst_sz == 0) 5816 mxrqst_sz = NFS_MAX_FILE_IO_SIZE; 5817 if (mxresp_sz == 0) 5818 mxresp_sz = NFS_MAX_FILE_IO_SIZE; 5819 /* Fore channel attributes */ 5820 args->fc_attrs.max_rqst_sz = mxrqst_sz; 5821 args->fc_attrs.max_resp_sz = mxresp_sz; 5822 args->fc_attrs.max_ops = NFS4_MAX_OPS; 5823 args->fc_attrs.max_reqs = max_session_slots; 5824 5825 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 5826 "max_ops=%u max_reqs=%u\n", 5827 __func__, 5828 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 5829 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 5830 5831 /* Back channel attributes */ 5832 args->bc_attrs.max_rqst_sz = PAGE_SIZE; 5833 args->bc_attrs.max_resp_sz = PAGE_SIZE; 5834 args->bc_attrs.max_resp_sz_cached = 0; 5835 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 5836 args->bc_attrs.max_reqs = 1; 5837 5838 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 5839 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 5840 __func__, 5841 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 5842 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 5843 args->bc_attrs.max_reqs); 5844 } 5845 5846 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) 5847 { 5848 struct nfs4_channel_attrs *sent = &args->fc_attrs; 5849 struct nfs4_channel_attrs *rcvd = &session->fc_attrs; 5850 5851 if (rcvd->max_resp_sz > sent->max_resp_sz) 5852 return -EINVAL; 5853 /* 5854 * Our requested max_ops is the minimum we need; we're not 5855 * prepared to break up compounds into smaller pieces than that. 5856 * So, no point even trying to continue if the server won't 5857 * cooperate: 5858 */ 5859 if (rcvd->max_ops < sent->max_ops) 5860 return -EINVAL; 5861 if (rcvd->max_reqs == 0) 5862 return -EINVAL; 5863 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 5864 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 5865 return 0; 5866 } 5867 5868 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) 5869 { 5870 struct nfs4_channel_attrs *sent = &args->bc_attrs; 5871 struct nfs4_channel_attrs *rcvd = &session->bc_attrs; 5872 5873 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 5874 return -EINVAL; 5875 if (rcvd->max_resp_sz < sent->max_resp_sz) 5876 return -EINVAL; 5877 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 5878 return -EINVAL; 5879 /* These would render the backchannel useless: */ 5880 if (rcvd->max_ops != sent->max_ops) 5881 return -EINVAL; 5882 if (rcvd->max_reqs != sent->max_reqs) 5883 return -EINVAL; 5884 return 0; 5885 } 5886 5887 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 5888 struct nfs4_session *session) 5889 { 5890 int ret; 5891 5892 ret = nfs4_verify_fore_channel_attrs(args, session); 5893 if (ret) 5894 return ret; 5895 return nfs4_verify_back_channel_attrs(args, session); 5896 } 5897 5898 static int _nfs4_proc_create_session(struct nfs_client *clp, 5899 struct rpc_cred *cred) 5900 { 5901 struct nfs4_session *session = clp->cl_session; 5902 struct nfs41_create_session_args args = { 5903 .client = clp, 5904 .cb_program = NFS4_CALLBACK, 5905 }; 5906 struct nfs41_create_session_res res = { 5907 .client = clp, 5908 }; 5909 struct rpc_message msg = { 5910 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 5911 .rpc_argp = &args, 5912 .rpc_resp = &res, 5913 .rpc_cred = cred, 5914 }; 5915 int status; 5916 5917 nfs4_init_channel_attrs(&args); 5918 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 5919 5920 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5921 5922 if (!status) 5923 /* Verify the session's negotiated channel_attrs values */ 5924 status = nfs4_verify_channel_attrs(&args, session); 5925 if (!status) { 5926 /* Increment the clientid slot sequence id */ 5927 clp->cl_seqid++; 5928 } 5929 5930 return status; 5931 } 5932 5933 /* 5934 * Issues a CREATE_SESSION operation to the server. 5935 * It is the responsibility of the caller to verify the session is 5936 * expired before calling this routine. 5937 */ 5938 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred) 5939 { 5940 int status; 5941 unsigned *ptr; 5942 struct nfs4_session *session = clp->cl_session; 5943 5944 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 5945 5946 status = _nfs4_proc_create_session(clp, cred); 5947 if (status) 5948 goto out; 5949 5950 /* Init or reset the session slot tables */ 5951 status = nfs4_setup_session_slot_tables(session); 5952 dprintk("slot table setup returned %d\n", status); 5953 if (status) 5954 goto out; 5955 5956 ptr = (unsigned *)&session->sess_id.data[0]; 5957 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 5958 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 5959 out: 5960 dprintk("<-- %s\n", __func__); 5961 return status; 5962 } 5963 5964 /* 5965 * Issue the over-the-wire RPC DESTROY_SESSION. 5966 * The caller must serialize access to this routine. 5967 */ 5968 int nfs4_proc_destroy_session(struct nfs4_session *session, 5969 struct rpc_cred *cred) 5970 { 5971 struct rpc_message msg = { 5972 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 5973 .rpc_argp = session, 5974 .rpc_cred = cred, 5975 }; 5976 int status = 0; 5977 5978 dprintk("--> nfs4_proc_destroy_session\n"); 5979 5980 /* session is still being setup */ 5981 if (session->clp->cl_cons_state != NFS_CS_READY) 5982 return status; 5983 5984 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5985 5986 if (status) 5987 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 5988 "Session has been destroyed regardless...\n", status); 5989 5990 dprintk("<-- nfs4_proc_destroy_session\n"); 5991 return status; 5992 } 5993 5994 /* 5995 * With sessions, the client is not marked ready until after a 5996 * successful EXCHANGE_ID and CREATE_SESSION. 5997 * 5998 * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate 5999 * other versions of NFS can be tried. 6000 */ 6001 static int nfs41_check_session_ready(struct nfs_client *clp) 6002 { 6003 int ret; 6004 6005 if (clp->cl_cons_state == NFS_CS_SESSION_INITING) { 6006 ret = nfs4_client_recover_expired_lease(clp); 6007 if (ret) 6008 return ret; 6009 } 6010 if (clp->cl_cons_state < NFS_CS_READY) 6011 return -EPROTONOSUPPORT; 6012 smp_rmb(); 6013 return 0; 6014 } 6015 6016 int nfs4_init_session(struct nfs_server *server) 6017 { 6018 struct nfs_client *clp = server->nfs_client; 6019 struct nfs4_session *session; 6020 unsigned int rsize, wsize; 6021 6022 if (!nfs4_has_session(clp)) 6023 return 0; 6024 6025 session = clp->cl_session; 6026 spin_lock(&clp->cl_lock); 6027 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) { 6028 6029 rsize = server->rsize; 6030 if (rsize == 0) 6031 rsize = NFS_MAX_FILE_IO_SIZE; 6032 wsize = server->wsize; 6033 if (wsize == 0) 6034 wsize = NFS_MAX_FILE_IO_SIZE; 6035 6036 session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead; 6037 session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead; 6038 } 6039 spin_unlock(&clp->cl_lock); 6040 6041 return nfs41_check_session_ready(clp); 6042 } 6043 6044 int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time) 6045 { 6046 struct nfs4_session *session = clp->cl_session; 6047 int ret; 6048 6049 spin_lock(&clp->cl_lock); 6050 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) { 6051 /* 6052 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the 6053 * DS lease to be equal to the MDS lease. 6054 */ 6055 clp->cl_lease_time = lease_time; 6056 clp->cl_last_renewal = jiffies; 6057 } 6058 spin_unlock(&clp->cl_lock); 6059 6060 ret = nfs41_check_session_ready(clp); 6061 if (ret) 6062 return ret; 6063 /* Test for the DS role */ 6064 if (!is_ds_client(clp)) 6065 return -ENODEV; 6066 return 0; 6067 } 6068 EXPORT_SYMBOL_GPL(nfs4_init_ds_session); 6069 6070 6071 /* 6072 * Renew the cl_session lease. 6073 */ 6074 struct nfs4_sequence_data { 6075 struct nfs_client *clp; 6076 struct nfs4_sequence_args args; 6077 struct nfs4_sequence_res res; 6078 }; 6079 6080 static void nfs41_sequence_release(void *data) 6081 { 6082 struct nfs4_sequence_data *calldata = data; 6083 struct nfs_client *clp = calldata->clp; 6084 6085 if (atomic_read(&clp->cl_count) > 1) 6086 nfs4_schedule_state_renewal(clp); 6087 nfs_put_client(clp); 6088 kfree(calldata); 6089 } 6090 6091 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 6092 { 6093 switch(task->tk_status) { 6094 case -NFS4ERR_DELAY: 6095 rpc_delay(task, NFS4_POLL_RETRY_MAX); 6096 return -EAGAIN; 6097 default: 6098 nfs4_schedule_lease_recovery(clp); 6099 } 6100 return 0; 6101 } 6102 6103 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 6104 { 6105 struct nfs4_sequence_data *calldata = data; 6106 struct nfs_client *clp = calldata->clp; 6107 6108 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 6109 return; 6110 6111 if (task->tk_status < 0) { 6112 dprintk("%s ERROR %d\n", __func__, task->tk_status); 6113 if (atomic_read(&clp->cl_count) == 1) 6114 goto out; 6115 6116 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 6117 rpc_restart_call_prepare(task); 6118 return; 6119 } 6120 } 6121 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 6122 out: 6123 dprintk("<-- %s\n", __func__); 6124 } 6125 6126 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 6127 { 6128 struct nfs4_sequence_data *calldata = data; 6129 struct nfs_client *clp = calldata->clp; 6130 struct nfs4_sequence_args *args; 6131 struct nfs4_sequence_res *res; 6132 6133 args = task->tk_msg.rpc_argp; 6134 res = task->tk_msg.rpc_resp; 6135 6136 if (nfs41_setup_sequence(clp->cl_session, args, res, task)) 6137 return; 6138 rpc_call_start(task); 6139 } 6140 6141 static const struct rpc_call_ops nfs41_sequence_ops = { 6142 .rpc_call_done = nfs41_sequence_call_done, 6143 .rpc_call_prepare = nfs41_sequence_prepare, 6144 .rpc_release = nfs41_sequence_release, 6145 }; 6146 6147 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 6148 { 6149 struct nfs4_sequence_data *calldata; 6150 struct rpc_message msg = { 6151 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 6152 .rpc_cred = cred, 6153 }; 6154 struct rpc_task_setup task_setup_data = { 6155 .rpc_client = clp->cl_rpcclient, 6156 .rpc_message = &msg, 6157 .callback_ops = &nfs41_sequence_ops, 6158 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT, 6159 }; 6160 6161 if (!atomic_inc_not_zero(&clp->cl_count)) 6162 return ERR_PTR(-EIO); 6163 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 6164 if (calldata == NULL) { 6165 nfs_put_client(clp); 6166 return ERR_PTR(-ENOMEM); 6167 } 6168 nfs41_init_sequence(&calldata->args, &calldata->res, 0); 6169 msg.rpc_argp = &calldata->args; 6170 msg.rpc_resp = &calldata->res; 6171 calldata->clp = clp; 6172 task_setup_data.callback_data = calldata; 6173 6174 return rpc_run_task(&task_setup_data); 6175 } 6176 6177 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 6178 { 6179 struct rpc_task *task; 6180 int ret = 0; 6181 6182 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 6183 return 0; 6184 task = _nfs41_proc_sequence(clp, cred); 6185 if (IS_ERR(task)) 6186 ret = PTR_ERR(task); 6187 else 6188 rpc_put_task_async(task); 6189 dprintk("<-- %s status=%d\n", __func__, ret); 6190 return ret; 6191 } 6192 6193 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 6194 { 6195 struct rpc_task *task; 6196 int ret; 6197 6198 task = _nfs41_proc_sequence(clp, cred); 6199 if (IS_ERR(task)) { 6200 ret = PTR_ERR(task); 6201 goto out; 6202 } 6203 ret = rpc_wait_for_completion_task(task); 6204 if (!ret) { 6205 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; 6206 6207 if (task->tk_status == 0) 6208 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 6209 ret = task->tk_status; 6210 } 6211 rpc_put_task(task); 6212 out: 6213 dprintk("<-- %s status=%d\n", __func__, ret); 6214 return ret; 6215 } 6216 6217 struct nfs4_reclaim_complete_data { 6218 struct nfs_client *clp; 6219 struct nfs41_reclaim_complete_args arg; 6220 struct nfs41_reclaim_complete_res res; 6221 }; 6222 6223 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 6224 { 6225 struct nfs4_reclaim_complete_data *calldata = data; 6226 6227 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED); 6228 if (nfs41_setup_sequence(calldata->clp->cl_session, 6229 &calldata->arg.seq_args, 6230 &calldata->res.seq_res, task)) 6231 return; 6232 6233 rpc_call_start(task); 6234 } 6235 6236 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 6237 { 6238 switch(task->tk_status) { 6239 case 0: 6240 case -NFS4ERR_COMPLETE_ALREADY: 6241 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 6242 break; 6243 case -NFS4ERR_DELAY: 6244 rpc_delay(task, NFS4_POLL_RETRY_MAX); 6245 /* fall through */ 6246 case -NFS4ERR_RETRY_UNCACHED_REP: 6247 return -EAGAIN; 6248 default: 6249 nfs4_schedule_lease_recovery(clp); 6250 } 6251 return 0; 6252 } 6253 6254 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 6255 { 6256 struct nfs4_reclaim_complete_data *calldata = data; 6257 struct nfs_client *clp = calldata->clp; 6258 struct nfs4_sequence_res *res = &calldata->res.seq_res; 6259 6260 dprintk("--> %s\n", __func__); 6261 if (!nfs41_sequence_done(task, res)) 6262 return; 6263 6264 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 6265 rpc_restart_call_prepare(task); 6266 return; 6267 } 6268 dprintk("<-- %s\n", __func__); 6269 } 6270 6271 static void nfs4_free_reclaim_complete_data(void *data) 6272 { 6273 struct nfs4_reclaim_complete_data *calldata = data; 6274 6275 kfree(calldata); 6276 } 6277 6278 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 6279 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 6280 .rpc_call_done = nfs4_reclaim_complete_done, 6281 .rpc_release = nfs4_free_reclaim_complete_data, 6282 }; 6283 6284 /* 6285 * Issue a global reclaim complete. 6286 */ 6287 static int nfs41_proc_reclaim_complete(struct nfs_client *clp) 6288 { 6289 struct nfs4_reclaim_complete_data *calldata; 6290 struct rpc_task *task; 6291 struct rpc_message msg = { 6292 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 6293 }; 6294 struct rpc_task_setup task_setup_data = { 6295 .rpc_client = clp->cl_rpcclient, 6296 .rpc_message = &msg, 6297 .callback_ops = &nfs4_reclaim_complete_call_ops, 6298 .flags = RPC_TASK_ASYNC, 6299 }; 6300 int status = -ENOMEM; 6301 6302 dprintk("--> %s\n", __func__); 6303 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 6304 if (calldata == NULL) 6305 goto out; 6306 calldata->clp = clp; 6307 calldata->arg.one_fs = 0; 6308 6309 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); 6310 msg.rpc_argp = &calldata->arg; 6311 msg.rpc_resp = &calldata->res; 6312 task_setup_data.callback_data = calldata; 6313 task = rpc_run_task(&task_setup_data); 6314 if (IS_ERR(task)) { 6315 status = PTR_ERR(task); 6316 goto out; 6317 } 6318 status = nfs4_wait_for_completion_rpc_task(task); 6319 if (status == 0) 6320 status = task->tk_status; 6321 rpc_put_task(task); 6322 return 0; 6323 out: 6324 dprintk("<-- %s status=%d\n", __func__, status); 6325 return status; 6326 } 6327 6328 static void 6329 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 6330 { 6331 struct nfs4_layoutget *lgp = calldata; 6332 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6333 6334 dprintk("--> %s\n", __func__); 6335 /* Note the is a race here, where a CB_LAYOUTRECALL can come in 6336 * right now covering the LAYOUTGET we are about to send. 6337 * However, that is not so catastrophic, and there seems 6338 * to be no way to prevent it completely. 6339 */ 6340 if (nfs4_setup_sequence(server, &lgp->args.seq_args, 6341 &lgp->res.seq_res, task)) 6342 return; 6343 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, 6344 NFS_I(lgp->args.inode)->layout, 6345 lgp->args.ctx->state)) { 6346 rpc_exit(task, NFS4_OK); 6347 return; 6348 } 6349 rpc_call_start(task); 6350 } 6351 6352 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 6353 { 6354 struct nfs4_layoutget *lgp = calldata; 6355 struct inode *inode = lgp->args.inode; 6356 struct nfs_server *server = NFS_SERVER(inode); 6357 struct pnfs_layout_hdr *lo; 6358 struct nfs4_state *state = NULL; 6359 6360 dprintk("--> %s\n", __func__); 6361 6362 if (!nfs4_sequence_done(task, &lgp->res.seq_res)) 6363 goto out; 6364 6365 switch (task->tk_status) { 6366 case 0: 6367 goto out; 6368 case -NFS4ERR_LAYOUTTRYLATER: 6369 case -NFS4ERR_RECALLCONFLICT: 6370 task->tk_status = -NFS4ERR_DELAY; 6371 break; 6372 case -NFS4ERR_EXPIRED: 6373 case -NFS4ERR_BAD_STATEID: 6374 spin_lock(&inode->i_lock); 6375 lo = NFS_I(inode)->layout; 6376 if (!lo || list_empty(&lo->plh_segs)) { 6377 spin_unlock(&inode->i_lock); 6378 /* If the open stateid was bad, then recover it. */ 6379 state = lgp->args.ctx->state; 6380 } else { 6381 LIST_HEAD(head); 6382 6383 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL); 6384 spin_unlock(&inode->i_lock); 6385 /* Mark the bad layout state as invalid, then 6386 * retry using the open stateid. */ 6387 pnfs_free_lseg_list(&head); 6388 } 6389 } 6390 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) 6391 rpc_restart_call_prepare(task); 6392 out: 6393 dprintk("<-- %s\n", __func__); 6394 } 6395 6396 static size_t max_response_pages(struct nfs_server *server) 6397 { 6398 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 6399 return nfs_page_array_len(0, max_resp_sz); 6400 } 6401 6402 static void nfs4_free_pages(struct page **pages, size_t size) 6403 { 6404 int i; 6405 6406 if (!pages) 6407 return; 6408 6409 for (i = 0; i < size; i++) { 6410 if (!pages[i]) 6411 break; 6412 __free_page(pages[i]); 6413 } 6414 kfree(pages); 6415 } 6416 6417 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) 6418 { 6419 struct page **pages; 6420 int i; 6421 6422 pages = kcalloc(size, sizeof(struct page *), gfp_flags); 6423 if (!pages) { 6424 dprintk("%s: can't alloc array of %zu pages\n", __func__, size); 6425 return NULL; 6426 } 6427 6428 for (i = 0; i < size; i++) { 6429 pages[i] = alloc_page(gfp_flags); 6430 if (!pages[i]) { 6431 dprintk("%s: failed to allocate page\n", __func__); 6432 nfs4_free_pages(pages, size); 6433 return NULL; 6434 } 6435 } 6436 6437 return pages; 6438 } 6439 6440 static void nfs4_layoutget_release(void *calldata) 6441 { 6442 struct nfs4_layoutget *lgp = calldata; 6443 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6444 size_t max_pages = max_response_pages(server); 6445 6446 dprintk("--> %s\n", __func__); 6447 nfs4_free_pages(lgp->args.layout.pages, max_pages); 6448 put_nfs_open_context(lgp->args.ctx); 6449 kfree(calldata); 6450 dprintk("<-- %s\n", __func__); 6451 } 6452 6453 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 6454 .rpc_call_prepare = nfs4_layoutget_prepare, 6455 .rpc_call_done = nfs4_layoutget_done, 6456 .rpc_release = nfs4_layoutget_release, 6457 }; 6458 6459 struct pnfs_layout_segment * 6460 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) 6461 { 6462 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6463 size_t max_pages = max_response_pages(server); 6464 struct rpc_task *task; 6465 struct rpc_message msg = { 6466 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 6467 .rpc_argp = &lgp->args, 6468 .rpc_resp = &lgp->res, 6469 }; 6470 struct rpc_task_setup task_setup_data = { 6471 .rpc_client = server->client, 6472 .rpc_message = &msg, 6473 .callback_ops = &nfs4_layoutget_call_ops, 6474 .callback_data = lgp, 6475 .flags = RPC_TASK_ASYNC, 6476 }; 6477 struct pnfs_layout_segment *lseg = NULL; 6478 int status = 0; 6479 6480 dprintk("--> %s\n", __func__); 6481 6482 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); 6483 if (!lgp->args.layout.pages) { 6484 nfs4_layoutget_release(lgp); 6485 return ERR_PTR(-ENOMEM); 6486 } 6487 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 6488 6489 lgp->res.layoutp = &lgp->args.layout; 6490 lgp->res.seq_res.sr_slot = NULL; 6491 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 6492 task = rpc_run_task(&task_setup_data); 6493 if (IS_ERR(task)) 6494 return ERR_CAST(task); 6495 status = nfs4_wait_for_completion_rpc_task(task); 6496 if (status == 0) 6497 status = task->tk_status; 6498 if (status == 0) 6499 lseg = pnfs_layout_process(lgp); 6500 rpc_put_task(task); 6501 dprintk("<-- %s status=%d\n", __func__, status); 6502 if (status) 6503 return ERR_PTR(status); 6504 return lseg; 6505 } 6506 6507 static void 6508 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 6509 { 6510 struct nfs4_layoutreturn *lrp = calldata; 6511 6512 dprintk("--> %s\n", __func__); 6513 if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args, 6514 &lrp->res.seq_res, task)) 6515 return; 6516 rpc_call_start(task); 6517 } 6518 6519 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 6520 { 6521 struct nfs4_layoutreturn *lrp = calldata; 6522 struct nfs_server *server; 6523 6524 dprintk("--> %s\n", __func__); 6525 6526 if (!nfs4_sequence_done(task, &lrp->res.seq_res)) 6527 return; 6528 6529 server = NFS_SERVER(lrp->args.inode); 6530 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 6531 rpc_restart_call_prepare(task); 6532 return; 6533 } 6534 dprintk("<-- %s\n", __func__); 6535 } 6536 6537 static void nfs4_layoutreturn_release(void *calldata) 6538 { 6539 struct nfs4_layoutreturn *lrp = calldata; 6540 struct pnfs_layout_hdr *lo = lrp->args.layout; 6541 6542 dprintk("--> %s\n", __func__); 6543 spin_lock(&lo->plh_inode->i_lock); 6544 if (lrp->res.lrs_present) 6545 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 6546 lo->plh_block_lgets--; 6547 spin_unlock(&lo->plh_inode->i_lock); 6548 pnfs_put_layout_hdr(lrp->args.layout); 6549 kfree(calldata); 6550 dprintk("<-- %s\n", __func__); 6551 } 6552 6553 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 6554 .rpc_call_prepare = nfs4_layoutreturn_prepare, 6555 .rpc_call_done = nfs4_layoutreturn_done, 6556 .rpc_release = nfs4_layoutreturn_release, 6557 }; 6558 6559 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp) 6560 { 6561 struct rpc_task *task; 6562 struct rpc_message msg = { 6563 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 6564 .rpc_argp = &lrp->args, 6565 .rpc_resp = &lrp->res, 6566 }; 6567 struct rpc_task_setup task_setup_data = { 6568 .rpc_client = lrp->clp->cl_rpcclient, 6569 .rpc_message = &msg, 6570 .callback_ops = &nfs4_layoutreturn_call_ops, 6571 .callback_data = lrp, 6572 }; 6573 int status; 6574 6575 dprintk("--> %s\n", __func__); 6576 nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); 6577 task = rpc_run_task(&task_setup_data); 6578 if (IS_ERR(task)) 6579 return PTR_ERR(task); 6580 status = task->tk_status; 6581 dprintk("<-- %s status=%d\n", __func__, status); 6582 rpc_put_task(task); 6583 return status; 6584 } 6585 6586 /* 6587 * Retrieve the list of Data Server devices from the MDS. 6588 */ 6589 static int _nfs4_getdevicelist(struct nfs_server *server, 6590 const struct nfs_fh *fh, 6591 struct pnfs_devicelist *devlist) 6592 { 6593 struct nfs4_getdevicelist_args args = { 6594 .fh = fh, 6595 .layoutclass = server->pnfs_curr_ld->id, 6596 }; 6597 struct nfs4_getdevicelist_res res = { 6598 .devlist = devlist, 6599 }; 6600 struct rpc_message msg = { 6601 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST], 6602 .rpc_argp = &args, 6603 .rpc_resp = &res, 6604 }; 6605 int status; 6606 6607 dprintk("--> %s\n", __func__); 6608 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 6609 &res.seq_res, 0); 6610 dprintk("<-- %s status=%d\n", __func__, status); 6611 return status; 6612 } 6613 6614 int nfs4_proc_getdevicelist(struct nfs_server *server, 6615 const struct nfs_fh *fh, 6616 struct pnfs_devicelist *devlist) 6617 { 6618 struct nfs4_exception exception = { }; 6619 int err; 6620 6621 do { 6622 err = nfs4_handle_exception(server, 6623 _nfs4_getdevicelist(server, fh, devlist), 6624 &exception); 6625 } while (exception.retry); 6626 6627 dprintk("%s: err=%d, num_devs=%u\n", __func__, 6628 err, devlist->num_devs); 6629 6630 return err; 6631 } 6632 EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist); 6633 6634 static int 6635 _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) 6636 { 6637 struct nfs4_getdeviceinfo_args args = { 6638 .pdev = pdev, 6639 }; 6640 struct nfs4_getdeviceinfo_res res = { 6641 .pdev = pdev, 6642 }; 6643 struct rpc_message msg = { 6644 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 6645 .rpc_argp = &args, 6646 .rpc_resp = &res, 6647 }; 6648 int status; 6649 6650 dprintk("--> %s\n", __func__); 6651 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 6652 dprintk("<-- %s status=%d\n", __func__, status); 6653 6654 return status; 6655 } 6656 6657 int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) 6658 { 6659 struct nfs4_exception exception = { }; 6660 int err; 6661 6662 do { 6663 err = nfs4_handle_exception(server, 6664 _nfs4_proc_getdeviceinfo(server, pdev), 6665 &exception); 6666 } while (exception.retry); 6667 return err; 6668 } 6669 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 6670 6671 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 6672 { 6673 struct nfs4_layoutcommit_data *data = calldata; 6674 struct nfs_server *server = NFS_SERVER(data->args.inode); 6675 6676 if (nfs4_setup_sequence(server, &data->args.seq_args, 6677 &data->res.seq_res, task)) 6678 return; 6679 rpc_call_start(task); 6680 } 6681 6682 static void 6683 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 6684 { 6685 struct nfs4_layoutcommit_data *data = calldata; 6686 struct nfs_server *server = NFS_SERVER(data->args.inode); 6687 6688 if (!nfs4_sequence_done(task, &data->res.seq_res)) 6689 return; 6690 6691 switch (task->tk_status) { /* Just ignore these failures */ 6692 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 6693 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 6694 case -NFS4ERR_BADLAYOUT: /* no layout */ 6695 case -NFS4ERR_GRACE: /* loca_recalim always false */ 6696 task->tk_status = 0; 6697 break; 6698 case 0: 6699 nfs_post_op_update_inode_force_wcc(data->args.inode, 6700 data->res.fattr); 6701 break; 6702 default: 6703 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 6704 rpc_restart_call_prepare(task); 6705 return; 6706 } 6707 } 6708 } 6709 6710 static void nfs4_layoutcommit_release(void *calldata) 6711 { 6712 struct nfs4_layoutcommit_data *data = calldata; 6713 struct pnfs_layout_segment *lseg, *tmp; 6714 unsigned long *bitlock = &NFS_I(data->args.inode)->flags; 6715 6716 pnfs_cleanup_layoutcommit(data); 6717 /* Matched by references in pnfs_set_layoutcommit */ 6718 list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) { 6719 list_del_init(&lseg->pls_lc_list); 6720 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, 6721 &lseg->pls_flags)) 6722 pnfs_put_lseg(lseg); 6723 } 6724 6725 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); 6726 smp_mb__after_clear_bit(); 6727 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); 6728 6729 put_rpccred(data->cred); 6730 kfree(data); 6731 } 6732 6733 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 6734 .rpc_call_prepare = nfs4_layoutcommit_prepare, 6735 .rpc_call_done = nfs4_layoutcommit_done, 6736 .rpc_release = nfs4_layoutcommit_release, 6737 }; 6738 6739 int 6740 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 6741 { 6742 struct rpc_message msg = { 6743 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 6744 .rpc_argp = &data->args, 6745 .rpc_resp = &data->res, 6746 .rpc_cred = data->cred, 6747 }; 6748 struct rpc_task_setup task_setup_data = { 6749 .task = &data->task, 6750 .rpc_client = NFS_CLIENT(data->args.inode), 6751 .rpc_message = &msg, 6752 .callback_ops = &nfs4_layoutcommit_ops, 6753 .callback_data = data, 6754 .flags = RPC_TASK_ASYNC, 6755 }; 6756 struct rpc_task *task; 6757 int status = 0; 6758 6759 dprintk("NFS: %4d initiating layoutcommit call. sync %d " 6760 "lbw: %llu inode %lu\n", 6761 data->task.tk_pid, sync, 6762 data->args.lastbytewritten, 6763 data->args.inode->i_ino); 6764 6765 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 6766 task = rpc_run_task(&task_setup_data); 6767 if (IS_ERR(task)) 6768 return PTR_ERR(task); 6769 if (sync == false) 6770 goto out; 6771 status = nfs4_wait_for_completion_rpc_task(task); 6772 if (status != 0) 6773 goto out; 6774 status = task->tk_status; 6775 out: 6776 dprintk("%s: status %d\n", __func__, status); 6777 rpc_put_task(task); 6778 return status; 6779 } 6780 6781 static int 6782 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 6783 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 6784 { 6785 struct nfs41_secinfo_no_name_args args = { 6786 .style = SECINFO_STYLE_CURRENT_FH, 6787 }; 6788 struct nfs4_secinfo_res res = { 6789 .flavors = flavors, 6790 }; 6791 struct rpc_message msg = { 6792 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 6793 .rpc_argp = &args, 6794 .rpc_resp = &res, 6795 }; 6796 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 6797 } 6798 6799 static int 6800 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 6801 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 6802 { 6803 struct nfs4_exception exception = { }; 6804 int err; 6805 do { 6806 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 6807 switch (err) { 6808 case 0: 6809 case -NFS4ERR_WRONGSEC: 6810 case -NFS4ERR_NOTSUPP: 6811 goto out; 6812 default: 6813 err = nfs4_handle_exception(server, err, &exception); 6814 } 6815 } while (exception.retry); 6816 out: 6817 return err; 6818 } 6819 6820 static int 6821 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 6822 struct nfs_fsinfo *info) 6823 { 6824 int err; 6825 struct page *page; 6826 rpc_authflavor_t flavor; 6827 struct nfs4_secinfo_flavors *flavors; 6828 6829 page = alloc_page(GFP_KERNEL); 6830 if (!page) { 6831 err = -ENOMEM; 6832 goto out; 6833 } 6834 6835 flavors = page_address(page); 6836 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 6837 6838 /* 6839 * Fall back on "guess and check" method if 6840 * the server doesn't support SECINFO_NO_NAME 6841 */ 6842 if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) { 6843 err = nfs4_find_root_sec(server, fhandle, info); 6844 goto out_freepage; 6845 } 6846 if (err) 6847 goto out_freepage; 6848 6849 flavor = nfs_find_best_sec(flavors); 6850 if (err == 0) 6851 err = nfs4_lookup_root_sec(server, fhandle, info, flavor); 6852 6853 out_freepage: 6854 put_page(page); 6855 if (err == -EACCES) 6856 return -EPERM; 6857 out: 6858 return err; 6859 } 6860 6861 static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6862 { 6863 int status; 6864 struct nfs41_test_stateid_args args = { 6865 .stateid = stateid, 6866 }; 6867 struct nfs41_test_stateid_res res; 6868 struct rpc_message msg = { 6869 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 6870 .rpc_argp = &args, 6871 .rpc_resp = &res, 6872 }; 6873 6874 dprintk("NFS call test_stateid %p\n", stateid); 6875 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); 6876 status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 6877 if (status != NFS_OK) { 6878 dprintk("NFS reply test_stateid: failed, %d\n", status); 6879 return status; 6880 } 6881 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 6882 return -res.status; 6883 } 6884 6885 /** 6886 * nfs41_test_stateid - perform a TEST_STATEID operation 6887 * 6888 * @server: server / transport on which to perform the operation 6889 * @stateid: state ID to test 6890 * 6891 * Returns NFS_OK if the server recognizes that "stateid" is valid. 6892 * Otherwise a negative NFS4ERR value is returned if the operation 6893 * failed or the state ID is not currently valid. 6894 */ 6895 static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6896 { 6897 struct nfs4_exception exception = { }; 6898 int err; 6899 do { 6900 err = _nfs41_test_stateid(server, stateid); 6901 if (err != -NFS4ERR_DELAY) 6902 break; 6903 nfs4_handle_exception(server, err, &exception); 6904 } while (exception.retry); 6905 return err; 6906 } 6907 6908 static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6909 { 6910 struct nfs41_free_stateid_args args = { 6911 .stateid = stateid, 6912 }; 6913 struct nfs41_free_stateid_res res; 6914 struct rpc_message msg = { 6915 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 6916 .rpc_argp = &args, 6917 .rpc_resp = &res, 6918 }; 6919 int status; 6920 6921 dprintk("NFS call free_stateid %p\n", stateid); 6922 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); 6923 status = nfs4_call_sync_sequence(server->client, server, &msg, 6924 &args.seq_args, &res.seq_res, 1); 6925 dprintk("NFS reply free_stateid: %d\n", status); 6926 return status; 6927 } 6928 6929 /** 6930 * nfs41_free_stateid - perform a FREE_STATEID operation 6931 * 6932 * @server: server / transport on which to perform the operation 6933 * @stateid: state ID to release 6934 * 6935 * Returns NFS_OK if the server freed "stateid". Otherwise a 6936 * negative NFS4ERR value is returned. 6937 */ 6938 static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6939 { 6940 struct nfs4_exception exception = { }; 6941 int err; 6942 do { 6943 err = _nfs4_free_stateid(server, stateid); 6944 if (err != -NFS4ERR_DELAY) 6945 break; 6946 nfs4_handle_exception(server, err, &exception); 6947 } while (exception.retry); 6948 return err; 6949 } 6950 6951 static bool nfs41_match_stateid(const nfs4_stateid *s1, 6952 const nfs4_stateid *s2) 6953 { 6954 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 6955 return false; 6956 6957 if (s1->seqid == s2->seqid) 6958 return true; 6959 if (s1->seqid == 0 || s2->seqid == 0) 6960 return true; 6961 6962 return false; 6963 } 6964 6965 #endif /* CONFIG_NFS_V4_1 */ 6966 6967 static bool nfs4_match_stateid(const nfs4_stateid *s1, 6968 const nfs4_stateid *s2) 6969 { 6970 return nfs4_stateid_match(s1, s2); 6971 } 6972 6973 6974 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 6975 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 6976 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 6977 .recover_open = nfs4_open_reclaim, 6978 .recover_lock = nfs4_lock_reclaim, 6979 .establish_clid = nfs4_init_clientid, 6980 .get_clid_cred = nfs4_get_setclientid_cred, 6981 .detect_trunking = nfs40_discover_server_trunking, 6982 }; 6983 6984 #if defined(CONFIG_NFS_V4_1) 6985 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 6986 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 6987 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 6988 .recover_open = nfs4_open_reclaim, 6989 .recover_lock = nfs4_lock_reclaim, 6990 .establish_clid = nfs41_init_clientid, 6991 .get_clid_cred = nfs4_get_exchange_id_cred, 6992 .reclaim_complete = nfs41_proc_reclaim_complete, 6993 .detect_trunking = nfs41_discover_server_trunking, 6994 }; 6995 #endif /* CONFIG_NFS_V4_1 */ 6996 6997 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 6998 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 6999 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 7000 .recover_open = nfs4_open_expired, 7001 .recover_lock = nfs4_lock_expired, 7002 .establish_clid = nfs4_init_clientid, 7003 .get_clid_cred = nfs4_get_setclientid_cred, 7004 }; 7005 7006 #if defined(CONFIG_NFS_V4_1) 7007 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 7008 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 7009 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 7010 .recover_open = nfs41_open_expired, 7011 .recover_lock = nfs41_lock_expired, 7012 .establish_clid = nfs41_init_clientid, 7013 .get_clid_cred = nfs4_get_exchange_id_cred, 7014 }; 7015 #endif /* CONFIG_NFS_V4_1 */ 7016 7017 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 7018 .sched_state_renewal = nfs4_proc_async_renew, 7019 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, 7020 .renew_lease = nfs4_proc_renew, 7021 }; 7022 7023 #if defined(CONFIG_NFS_V4_1) 7024 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 7025 .sched_state_renewal = nfs41_proc_async_sequence, 7026 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, 7027 .renew_lease = nfs4_proc_sequence, 7028 }; 7029 #endif 7030 7031 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 7032 .minor_version = 0, 7033 .call_sync = _nfs4_call_sync, 7034 .match_stateid = nfs4_match_stateid, 7035 .find_root_sec = nfs4_find_root_sec, 7036 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 7037 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 7038 .state_renewal_ops = &nfs40_state_renewal_ops, 7039 }; 7040 7041 #if defined(CONFIG_NFS_V4_1) 7042 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 7043 .minor_version = 1, 7044 .call_sync = _nfs4_call_sync_session, 7045 .match_stateid = nfs41_match_stateid, 7046 .find_root_sec = nfs41_find_root_sec, 7047 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 7048 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 7049 .state_renewal_ops = &nfs41_state_renewal_ops, 7050 }; 7051 #endif 7052 7053 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 7054 [0] = &nfs_v4_0_minor_ops, 7055 #if defined(CONFIG_NFS_V4_1) 7056 [1] = &nfs_v4_1_minor_ops, 7057 #endif 7058 }; 7059 7060 const struct inode_operations nfs4_dir_inode_operations = { 7061 .create = nfs_create, 7062 .lookup = nfs_lookup, 7063 .atomic_open = nfs_atomic_open, 7064 .link = nfs_link, 7065 .unlink = nfs_unlink, 7066 .symlink = nfs_symlink, 7067 .mkdir = nfs_mkdir, 7068 .rmdir = nfs_rmdir, 7069 .mknod = nfs_mknod, 7070 .rename = nfs_rename, 7071 .permission = nfs_permission, 7072 .getattr = nfs_getattr, 7073 .setattr = nfs_setattr, 7074 .getxattr = generic_getxattr, 7075 .setxattr = generic_setxattr, 7076 .listxattr = generic_listxattr, 7077 .removexattr = generic_removexattr, 7078 }; 7079 7080 static const struct inode_operations nfs4_file_inode_operations = { 7081 .permission = nfs_permission, 7082 .getattr = nfs_getattr, 7083 .setattr = nfs_setattr, 7084 .getxattr = generic_getxattr, 7085 .setxattr = generic_setxattr, 7086 .listxattr = generic_listxattr, 7087 .removexattr = generic_removexattr, 7088 }; 7089 7090 const struct nfs_rpc_ops nfs_v4_clientops = { 7091 .version = 4, /* protocol version */ 7092 .dentry_ops = &nfs4_dentry_operations, 7093 .dir_inode_ops = &nfs4_dir_inode_operations, 7094 .file_inode_ops = &nfs4_file_inode_operations, 7095 .file_ops = &nfs4_file_operations, 7096 .getroot = nfs4_proc_get_root, 7097 .submount = nfs4_submount, 7098 .try_mount = nfs4_try_mount, 7099 .getattr = nfs4_proc_getattr, 7100 .setattr = nfs4_proc_setattr, 7101 .lookup = nfs4_proc_lookup, 7102 .access = nfs4_proc_access, 7103 .readlink = nfs4_proc_readlink, 7104 .create = nfs4_proc_create, 7105 .remove = nfs4_proc_remove, 7106 .unlink_setup = nfs4_proc_unlink_setup, 7107 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 7108 .unlink_done = nfs4_proc_unlink_done, 7109 .rename = nfs4_proc_rename, 7110 .rename_setup = nfs4_proc_rename_setup, 7111 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 7112 .rename_done = nfs4_proc_rename_done, 7113 .link = nfs4_proc_link, 7114 .symlink = nfs4_proc_symlink, 7115 .mkdir = nfs4_proc_mkdir, 7116 .rmdir = nfs4_proc_remove, 7117 .readdir = nfs4_proc_readdir, 7118 .mknod = nfs4_proc_mknod, 7119 .statfs = nfs4_proc_statfs, 7120 .fsinfo = nfs4_proc_fsinfo, 7121 .pathconf = nfs4_proc_pathconf, 7122 .set_capabilities = nfs4_server_capabilities, 7123 .decode_dirent = nfs4_decode_dirent, 7124 .read_setup = nfs4_proc_read_setup, 7125 .read_pageio_init = pnfs_pageio_init_read, 7126 .read_rpc_prepare = nfs4_proc_read_rpc_prepare, 7127 .read_done = nfs4_read_done, 7128 .write_setup = nfs4_proc_write_setup, 7129 .write_pageio_init = pnfs_pageio_init_write, 7130 .write_rpc_prepare = nfs4_proc_write_rpc_prepare, 7131 .write_done = nfs4_write_done, 7132 .commit_setup = nfs4_proc_commit_setup, 7133 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 7134 .commit_done = nfs4_commit_done, 7135 .lock = nfs4_proc_lock, 7136 .clear_acl_cache = nfs4_zap_acl_attr, 7137 .close_context = nfs4_close_context, 7138 .open_context = nfs4_atomic_open, 7139 .have_delegation = nfs4_have_delegation, 7140 .return_delegation = nfs4_inode_return_delegation, 7141 .alloc_client = nfs4_alloc_client, 7142 .init_client = nfs4_init_client, 7143 .free_client = nfs4_free_client, 7144 .create_server = nfs4_create_server, 7145 .clone_server = nfs_clone_server, 7146 }; 7147 7148 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 7149 .prefix = XATTR_NAME_NFSV4_ACL, 7150 .list = nfs4_xattr_list_nfs4_acl, 7151 .get = nfs4_xattr_get_nfs4_acl, 7152 .set = nfs4_xattr_set_nfs4_acl, 7153 }; 7154 7155 const struct xattr_handler *nfs4_xattr_handlers[] = { 7156 &nfs4_xattr_nfs4_acl_handler, 7157 NULL 7158 }; 7159 7160 /* 7161 * Local variables: 7162 * c-basic-offset: 8 7163 * End: 7164 */ 7165