1 /* 2 * fs/nfs/nfs4proc.c 3 * 4 * Client-side procedure declarations for NFSv4. 5 * 6 * Copyright (c) 2002 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Kendrick Smith <kmsmith@umich.edu> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 #include <linux/mm.h> 39 #include <linux/delay.h> 40 #include <linux/errno.h> 41 #include <linux/string.h> 42 #include <linux/ratelimit.h> 43 #include <linux/printk.h> 44 #include <linux/slab.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/nfs.h> 47 #include <linux/nfs4.h> 48 #include <linux/nfs_fs.h> 49 #include <linux/nfs_page.h> 50 #include <linux/nfs_mount.h> 51 #include <linux/namei.h> 52 #include <linux/mount.h> 53 #include <linux/module.h> 54 #include <linux/nfs_idmap.h> 55 #include <linux/xattr.h> 56 #include <linux/utsname.h> 57 #include <linux/freezer.h> 58 59 #include "nfs4_fs.h" 60 #include "delegation.h" 61 #include "internal.h" 62 #include "iostat.h" 63 #include "callback.h" 64 #include "pnfs.h" 65 #include "netns.h" 66 #include "nfs4session.h" 67 #include "fscache.h" 68 69 #define NFSDBG_FACILITY NFSDBG_PROC 70 71 #define NFS4_POLL_RETRY_MIN (HZ/10) 72 #define NFS4_POLL_RETRY_MAX (15*HZ) 73 74 struct nfs4_opendata; 75 static int _nfs4_proc_open(struct nfs4_opendata *data); 76 static int _nfs4_recover_proc_open(struct nfs4_opendata *data); 77 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 78 static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *); 79 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 80 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *); 81 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr); 82 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 83 struct nfs_fattr *fattr, struct iattr *sattr, 84 struct nfs4_state *state); 85 #ifdef CONFIG_NFS_V4_1 86 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *); 87 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *); 88 #endif 89 /* Prevent leaks of NFSv4 errors into userland */ 90 static int nfs4_map_errors(int err) 91 { 92 if (err >= -1000) 93 return err; 94 switch (err) { 95 case -NFS4ERR_RESOURCE: 96 case -NFS4ERR_LAYOUTTRYLATER: 97 case -NFS4ERR_RECALLCONFLICT: 98 return -EREMOTEIO; 99 case -NFS4ERR_WRONGSEC: 100 return -EPERM; 101 case -NFS4ERR_BADOWNER: 102 case -NFS4ERR_BADNAME: 103 return -EINVAL; 104 case -NFS4ERR_SHARE_DENIED: 105 return -EACCES; 106 case -NFS4ERR_MINOR_VERS_MISMATCH: 107 return -EPROTONOSUPPORT; 108 case -NFS4ERR_ACCESS: 109 return -EACCES; 110 default: 111 dprintk("%s could not handle NFSv4 error %d\n", 112 __func__, -err); 113 break; 114 } 115 return -EIO; 116 } 117 118 /* 119 * This is our standard bitmap for GETATTR requests. 120 */ 121 const u32 nfs4_fattr_bitmap[3] = { 122 FATTR4_WORD0_TYPE 123 | FATTR4_WORD0_CHANGE 124 | FATTR4_WORD0_SIZE 125 | FATTR4_WORD0_FSID 126 | FATTR4_WORD0_FILEID, 127 FATTR4_WORD1_MODE 128 | FATTR4_WORD1_NUMLINKS 129 | FATTR4_WORD1_OWNER 130 | FATTR4_WORD1_OWNER_GROUP 131 | FATTR4_WORD1_RAWDEV 132 | FATTR4_WORD1_SPACE_USED 133 | FATTR4_WORD1_TIME_ACCESS 134 | FATTR4_WORD1_TIME_METADATA 135 | FATTR4_WORD1_TIME_MODIFY 136 }; 137 138 static const u32 nfs4_pnfs_open_bitmap[3] = { 139 FATTR4_WORD0_TYPE 140 | FATTR4_WORD0_CHANGE 141 | FATTR4_WORD0_SIZE 142 | FATTR4_WORD0_FSID 143 | FATTR4_WORD0_FILEID, 144 FATTR4_WORD1_MODE 145 | FATTR4_WORD1_NUMLINKS 146 | FATTR4_WORD1_OWNER 147 | FATTR4_WORD1_OWNER_GROUP 148 | FATTR4_WORD1_RAWDEV 149 | FATTR4_WORD1_SPACE_USED 150 | FATTR4_WORD1_TIME_ACCESS 151 | FATTR4_WORD1_TIME_METADATA 152 | FATTR4_WORD1_TIME_MODIFY, 153 FATTR4_WORD2_MDSTHRESHOLD 154 }; 155 156 static const u32 nfs4_open_noattr_bitmap[3] = { 157 FATTR4_WORD0_TYPE 158 | FATTR4_WORD0_CHANGE 159 | FATTR4_WORD0_FILEID, 160 }; 161 162 const u32 nfs4_statfs_bitmap[2] = { 163 FATTR4_WORD0_FILES_AVAIL 164 | FATTR4_WORD0_FILES_FREE 165 | FATTR4_WORD0_FILES_TOTAL, 166 FATTR4_WORD1_SPACE_AVAIL 167 | FATTR4_WORD1_SPACE_FREE 168 | FATTR4_WORD1_SPACE_TOTAL 169 }; 170 171 const u32 nfs4_pathconf_bitmap[2] = { 172 FATTR4_WORD0_MAXLINK 173 | FATTR4_WORD0_MAXNAME, 174 0 175 }; 176 177 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE 178 | FATTR4_WORD0_MAXREAD 179 | FATTR4_WORD0_MAXWRITE 180 | FATTR4_WORD0_LEASE_TIME, 181 FATTR4_WORD1_TIME_DELTA 182 | FATTR4_WORD1_FS_LAYOUT_TYPES, 183 FATTR4_WORD2_LAYOUT_BLKSIZE 184 }; 185 186 const u32 nfs4_fs_locations_bitmap[2] = { 187 FATTR4_WORD0_TYPE 188 | FATTR4_WORD0_CHANGE 189 | FATTR4_WORD0_SIZE 190 | FATTR4_WORD0_FSID 191 | FATTR4_WORD0_FILEID 192 | FATTR4_WORD0_FS_LOCATIONS, 193 FATTR4_WORD1_MODE 194 | FATTR4_WORD1_NUMLINKS 195 | FATTR4_WORD1_OWNER 196 | FATTR4_WORD1_OWNER_GROUP 197 | FATTR4_WORD1_RAWDEV 198 | FATTR4_WORD1_SPACE_USED 199 | FATTR4_WORD1_TIME_ACCESS 200 | FATTR4_WORD1_TIME_METADATA 201 | FATTR4_WORD1_TIME_MODIFY 202 | FATTR4_WORD1_MOUNTED_ON_FILEID 203 }; 204 205 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry, 206 struct nfs4_readdir_arg *readdir) 207 { 208 __be32 *start, *p; 209 210 if (cookie > 2) { 211 readdir->cookie = cookie; 212 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier)); 213 return; 214 } 215 216 readdir->cookie = 0; 217 memset(&readdir->verifier, 0, sizeof(readdir->verifier)); 218 if (cookie == 2) 219 return; 220 221 /* 222 * NFSv4 servers do not return entries for '.' and '..' 223 * Therefore, we fake these entries here. We let '.' 224 * have cookie 0 and '..' have cookie 1. Note that 225 * when talking to the server, we always send cookie 0 226 * instead of 1 or 2. 227 */ 228 start = p = kmap_atomic(*readdir->pages); 229 230 if (cookie == 0) { 231 *p++ = xdr_one; /* next */ 232 *p++ = xdr_zero; /* cookie, first word */ 233 *p++ = xdr_one; /* cookie, second word */ 234 *p++ = xdr_one; /* entry len */ 235 memcpy(p, ".\0\0\0", 4); /* entry */ 236 p++; 237 *p++ = xdr_one; /* bitmap length */ 238 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 239 *p++ = htonl(8); /* attribute buffer length */ 240 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode)); 241 } 242 243 *p++ = xdr_one; /* next */ 244 *p++ = xdr_zero; /* cookie, first word */ 245 *p++ = xdr_two; /* cookie, second word */ 246 *p++ = xdr_two; /* entry len */ 247 memcpy(p, "..\0\0", 4); /* entry */ 248 p++; 249 *p++ = xdr_one; /* bitmap length */ 250 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */ 251 *p++ = htonl(8); /* attribute buffer length */ 252 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode)); 253 254 readdir->pgbase = (char *)p - (char *)start; 255 readdir->count -= readdir->pgbase; 256 kunmap_atomic(start); 257 } 258 259 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) 260 { 261 int res = 0; 262 263 might_sleep(); 264 265 if (*timeout <= 0) 266 *timeout = NFS4_POLL_RETRY_MIN; 267 if (*timeout > NFS4_POLL_RETRY_MAX) 268 *timeout = NFS4_POLL_RETRY_MAX; 269 freezable_schedule_timeout_killable(*timeout); 270 if (fatal_signal_pending(current)) 271 res = -ERESTARTSYS; 272 *timeout <<= 1; 273 return res; 274 } 275 276 /* This is the error handling routine for processes that are allowed 277 * to sleep. 278 */ 279 static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) 280 { 281 struct nfs_client *clp = server->nfs_client; 282 struct nfs4_state *state = exception->state; 283 struct inode *inode = exception->inode; 284 int ret = errorcode; 285 286 exception->retry = 0; 287 switch(errorcode) { 288 case 0: 289 return 0; 290 case -NFS4ERR_OPENMODE: 291 if (inode && nfs4_have_delegation(inode, FMODE_READ)) { 292 nfs4_inode_return_delegation(inode); 293 exception->retry = 1; 294 return 0; 295 } 296 if (state == NULL) 297 break; 298 nfs4_schedule_stateid_recovery(server, state); 299 goto wait_on_recovery; 300 case -NFS4ERR_DELEG_REVOKED: 301 case -NFS4ERR_ADMIN_REVOKED: 302 case -NFS4ERR_BAD_STATEID: 303 if (state == NULL) 304 break; 305 nfs_remove_bad_delegation(state->inode); 306 nfs4_schedule_stateid_recovery(server, state); 307 goto wait_on_recovery; 308 case -NFS4ERR_EXPIRED: 309 if (state != NULL) 310 nfs4_schedule_stateid_recovery(server, state); 311 case -NFS4ERR_STALE_STATEID: 312 case -NFS4ERR_STALE_CLIENTID: 313 nfs4_schedule_lease_recovery(clp); 314 goto wait_on_recovery; 315 #if defined(CONFIG_NFS_V4_1) 316 case -NFS4ERR_BADSESSION: 317 case -NFS4ERR_BADSLOT: 318 case -NFS4ERR_BAD_HIGH_SLOT: 319 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 320 case -NFS4ERR_DEADSESSION: 321 case -NFS4ERR_SEQ_FALSE_RETRY: 322 case -NFS4ERR_SEQ_MISORDERED: 323 dprintk("%s ERROR: %d Reset session\n", __func__, 324 errorcode); 325 nfs4_schedule_session_recovery(clp->cl_session, errorcode); 326 goto wait_on_recovery; 327 #endif /* defined(CONFIG_NFS_V4_1) */ 328 case -NFS4ERR_FILE_OPEN: 329 if (exception->timeout > HZ) { 330 /* We have retried a decent amount, time to 331 * fail 332 */ 333 ret = -EBUSY; 334 break; 335 } 336 case -NFS4ERR_GRACE: 337 case -NFS4ERR_DELAY: 338 ret = nfs4_delay(server->client, &exception->timeout); 339 if (ret != 0) 340 break; 341 case -NFS4ERR_RETRY_UNCACHED_REP: 342 case -NFS4ERR_OLD_STATEID: 343 exception->retry = 1; 344 break; 345 case -NFS4ERR_BADOWNER: 346 /* The following works around a Linux server bug! */ 347 case -NFS4ERR_BADNAME: 348 if (server->caps & NFS_CAP_UIDGID_NOMAP) { 349 server->caps &= ~NFS_CAP_UIDGID_NOMAP; 350 exception->retry = 1; 351 printk(KERN_WARNING "NFS: v4 server %s " 352 "does not accept raw " 353 "uid/gids. " 354 "Reenabling the idmapper.\n", 355 server->nfs_client->cl_hostname); 356 } 357 } 358 /* We failed to handle the error */ 359 return nfs4_map_errors(ret); 360 wait_on_recovery: 361 ret = nfs4_wait_clnt_recover(clp); 362 if (ret == 0) 363 exception->retry = 1; 364 return ret; 365 } 366 367 368 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) 369 { 370 spin_lock(&clp->cl_lock); 371 if (time_before(clp->cl_last_renewal,timestamp)) 372 clp->cl_last_renewal = timestamp; 373 spin_unlock(&clp->cl_lock); 374 } 375 376 static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 377 { 378 do_renew_lease(server->nfs_client, timestamp); 379 } 380 381 #if defined(CONFIG_NFS_V4_1) 382 383 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) 384 { 385 struct nfs4_session *session; 386 struct nfs4_slot_table *tbl; 387 bool send_new_highest_used_slotid = false; 388 389 if (!res->sr_slot) { 390 /* just wake up the next guy waiting since 391 * we may have not consumed a slot after all */ 392 dprintk("%s: No slot\n", __func__); 393 return; 394 } 395 tbl = res->sr_slot->table; 396 session = tbl->session; 397 398 spin_lock(&tbl->slot_tbl_lock); 399 /* Be nice to the server: try to ensure that the last transmitted 400 * value for highest_user_slotid <= target_highest_slotid 401 */ 402 if (tbl->highest_used_slotid > tbl->target_highest_slotid) 403 send_new_highest_used_slotid = true; 404 405 if (nfs41_wake_and_assign_slot(tbl, res->sr_slot)) { 406 send_new_highest_used_slotid = false; 407 goto out_unlock; 408 } 409 nfs4_free_slot(tbl, res->sr_slot); 410 411 if (tbl->highest_used_slotid != NFS4_NO_SLOT) 412 send_new_highest_used_slotid = false; 413 out_unlock: 414 spin_unlock(&tbl->slot_tbl_lock); 415 res->sr_slot = NULL; 416 if (send_new_highest_used_slotid) 417 nfs41_server_notify_highest_slotid_update(session->clp); 418 } 419 420 static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) 421 { 422 struct nfs4_session *session; 423 struct nfs4_slot *slot; 424 struct nfs_client *clp; 425 bool interrupted = false; 426 int ret = 1; 427 428 /* don't increment the sequence number if the task wasn't sent */ 429 if (!RPC_WAS_SENT(task)) 430 goto out; 431 432 slot = res->sr_slot; 433 session = slot->table->session; 434 435 if (slot->interrupted) { 436 slot->interrupted = 0; 437 interrupted = true; 438 } 439 440 /* Check the SEQUENCE operation status */ 441 switch (res->sr_status) { 442 case 0: 443 /* Update the slot's sequence and clientid lease timer */ 444 ++slot->seq_nr; 445 clp = session->clp; 446 do_renew_lease(clp, res->sr_timestamp); 447 /* Check sequence flags */ 448 if (res->sr_status_flags != 0) 449 nfs4_schedule_lease_recovery(clp); 450 nfs41_update_target_slotid(slot->table, slot, res); 451 break; 452 case 1: 453 /* 454 * sr_status remains 1 if an RPC level error occurred. 455 * The server may or may not have processed the sequence 456 * operation.. 457 * Mark the slot as having hosted an interrupted RPC call. 458 */ 459 slot->interrupted = 1; 460 goto out; 461 case -NFS4ERR_DELAY: 462 /* The server detected a resend of the RPC call and 463 * returned NFS4ERR_DELAY as per Section 2.10.6.2 464 * of RFC5661. 465 */ 466 dprintk("%s: slot=%u seq=%u: Operation in progress\n", 467 __func__, 468 slot->slot_nr, 469 slot->seq_nr); 470 goto out_retry; 471 case -NFS4ERR_BADSLOT: 472 /* 473 * The slot id we used was probably retired. Try again 474 * using a different slot id. 475 */ 476 goto retry_nowait; 477 case -NFS4ERR_SEQ_MISORDERED: 478 /* 479 * Was the last operation on this sequence interrupted? 480 * If so, retry after bumping the sequence number. 481 */ 482 if (interrupted) { 483 ++slot->seq_nr; 484 goto retry_nowait; 485 } 486 /* 487 * Could this slot have been previously retired? 488 * If so, then the server may be expecting seq_nr = 1! 489 */ 490 if (slot->seq_nr != 1) { 491 slot->seq_nr = 1; 492 goto retry_nowait; 493 } 494 break; 495 case -NFS4ERR_SEQ_FALSE_RETRY: 496 ++slot->seq_nr; 497 goto retry_nowait; 498 default: 499 /* Just update the slot sequence no. */ 500 ++slot->seq_nr; 501 } 502 out: 503 /* The session may be reset by one of the error handlers. */ 504 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status); 505 nfs41_sequence_free_slot(res); 506 return ret; 507 retry_nowait: 508 if (rpc_restart_call_prepare(task)) { 509 task->tk_status = 0; 510 ret = 0; 511 } 512 goto out; 513 out_retry: 514 if (!rpc_restart_call(task)) 515 goto out; 516 rpc_delay(task, NFS4_POLL_RETRY_MAX); 517 return 0; 518 } 519 520 static int nfs4_sequence_done(struct rpc_task *task, 521 struct nfs4_sequence_res *res) 522 { 523 if (res->sr_slot == NULL) 524 return 1; 525 return nfs41_sequence_done(task, res); 526 } 527 528 static void nfs41_init_sequence(struct nfs4_sequence_args *args, 529 struct nfs4_sequence_res *res, int cache_reply) 530 { 531 args->sa_slot = NULL; 532 args->sa_cache_this = 0; 533 args->sa_privileged = 0; 534 if (cache_reply) 535 args->sa_cache_this = 1; 536 res->sr_slot = NULL; 537 } 538 539 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) 540 { 541 args->sa_privileged = 1; 542 } 543 544 int nfs41_setup_sequence(struct nfs4_session *session, 545 struct nfs4_sequence_args *args, 546 struct nfs4_sequence_res *res, 547 struct rpc_task *task) 548 { 549 struct nfs4_slot *slot; 550 struct nfs4_slot_table *tbl; 551 552 dprintk("--> %s\n", __func__); 553 /* slot already allocated? */ 554 if (res->sr_slot != NULL) 555 goto out_success; 556 557 tbl = &session->fc_slot_table; 558 559 task->tk_timeout = 0; 560 561 spin_lock(&tbl->slot_tbl_lock); 562 if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) && 563 !args->sa_privileged) { 564 /* The state manager will wait until the slot table is empty */ 565 dprintk("%s session is draining\n", __func__); 566 goto out_sleep; 567 } 568 569 slot = nfs4_alloc_slot(tbl); 570 if (IS_ERR(slot)) { 571 /* If out of memory, try again in 1/4 second */ 572 if (slot == ERR_PTR(-ENOMEM)) 573 task->tk_timeout = HZ >> 2; 574 dprintk("<-- %s: no free slots\n", __func__); 575 goto out_sleep; 576 } 577 spin_unlock(&tbl->slot_tbl_lock); 578 579 args->sa_slot = slot; 580 581 dprintk("<-- %s slotid=%d seqid=%d\n", __func__, 582 slot->slot_nr, slot->seq_nr); 583 584 res->sr_slot = slot; 585 res->sr_timestamp = jiffies; 586 res->sr_status_flags = 0; 587 /* 588 * sr_status is only set in decode_sequence, and so will remain 589 * set to 1 if an rpc level failure occurs. 590 */ 591 res->sr_status = 1; 592 out_success: 593 rpc_call_start(task); 594 return 0; 595 out_sleep: 596 /* Privileged tasks are queued with top priority */ 597 if (args->sa_privileged) 598 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task, 599 NULL, RPC_PRIORITY_PRIVILEGED); 600 else 601 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL); 602 spin_unlock(&tbl->slot_tbl_lock); 603 return -EAGAIN; 604 } 605 EXPORT_SYMBOL_GPL(nfs41_setup_sequence); 606 607 int nfs4_setup_sequence(const struct nfs_server *server, 608 struct nfs4_sequence_args *args, 609 struct nfs4_sequence_res *res, 610 struct rpc_task *task) 611 { 612 struct nfs4_session *session = nfs4_get_session(server); 613 int ret = 0; 614 615 if (session == NULL) { 616 rpc_call_start(task); 617 goto out; 618 } 619 620 dprintk("--> %s clp %p session %p sr_slot %d\n", 621 __func__, session->clp, session, res->sr_slot ? 622 res->sr_slot->slot_nr : -1); 623 624 ret = nfs41_setup_sequence(session, args, res, task); 625 out: 626 dprintk("<-- %s status=%d\n", __func__, ret); 627 return ret; 628 } 629 630 struct nfs41_call_sync_data { 631 const struct nfs_server *seq_server; 632 struct nfs4_sequence_args *seq_args; 633 struct nfs4_sequence_res *seq_res; 634 }; 635 636 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata) 637 { 638 struct nfs41_call_sync_data *data = calldata; 639 struct nfs4_session *session = nfs4_get_session(data->seq_server); 640 641 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server); 642 643 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task); 644 } 645 646 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata) 647 { 648 struct nfs41_call_sync_data *data = calldata; 649 650 nfs41_sequence_done(task, data->seq_res); 651 } 652 653 static const struct rpc_call_ops nfs41_call_sync_ops = { 654 .rpc_call_prepare = nfs41_call_sync_prepare, 655 .rpc_call_done = nfs41_call_sync_done, 656 }; 657 658 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt, 659 struct nfs_server *server, 660 struct rpc_message *msg, 661 struct nfs4_sequence_args *args, 662 struct nfs4_sequence_res *res) 663 { 664 int ret; 665 struct rpc_task *task; 666 struct nfs41_call_sync_data data = { 667 .seq_server = server, 668 .seq_args = args, 669 .seq_res = res, 670 }; 671 struct rpc_task_setup task_setup = { 672 .rpc_client = clnt, 673 .rpc_message = msg, 674 .callback_ops = &nfs41_call_sync_ops, 675 .callback_data = &data 676 }; 677 678 task = rpc_run_task(&task_setup); 679 if (IS_ERR(task)) 680 ret = PTR_ERR(task); 681 else { 682 ret = task->tk_status; 683 rpc_put_task(task); 684 } 685 return ret; 686 } 687 688 #else 689 static 690 void nfs41_init_sequence(struct nfs4_sequence_args *args, 691 struct nfs4_sequence_res *res, int cache_reply) 692 { 693 } 694 695 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) 696 { 697 } 698 699 700 static int nfs4_sequence_done(struct rpc_task *task, 701 struct nfs4_sequence_res *res) 702 { 703 return 1; 704 } 705 #endif /* CONFIG_NFS_V4_1 */ 706 707 static 708 int _nfs4_call_sync(struct rpc_clnt *clnt, 709 struct nfs_server *server, 710 struct rpc_message *msg, 711 struct nfs4_sequence_args *args, 712 struct nfs4_sequence_res *res) 713 { 714 return rpc_call_sync(clnt, msg, 0); 715 } 716 717 static 718 int nfs4_call_sync(struct rpc_clnt *clnt, 719 struct nfs_server *server, 720 struct rpc_message *msg, 721 struct nfs4_sequence_args *args, 722 struct nfs4_sequence_res *res, 723 int cache_reply) 724 { 725 nfs41_init_sequence(args, res, cache_reply); 726 return server->nfs_client->cl_mvops->call_sync(clnt, server, msg, 727 args, res); 728 } 729 730 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) 731 { 732 struct nfs_inode *nfsi = NFS_I(dir); 733 734 spin_lock(&dir->i_lock); 735 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 736 if (!cinfo->atomic || cinfo->before != dir->i_version) 737 nfs_force_lookup_revalidate(dir); 738 dir->i_version = cinfo->after; 739 nfs_fscache_invalidate(dir); 740 spin_unlock(&dir->i_lock); 741 } 742 743 struct nfs4_opendata { 744 struct kref kref; 745 struct nfs_openargs o_arg; 746 struct nfs_openres o_res; 747 struct nfs_open_confirmargs c_arg; 748 struct nfs_open_confirmres c_res; 749 struct nfs4_string owner_name; 750 struct nfs4_string group_name; 751 struct nfs_fattr f_attr; 752 struct dentry *dir; 753 struct dentry *dentry; 754 struct nfs4_state_owner *owner; 755 struct nfs4_state *state; 756 struct iattr attrs; 757 unsigned long timestamp; 758 unsigned int rpc_done : 1; 759 int rpc_status; 760 int cancelled; 761 }; 762 763 764 static void nfs4_init_opendata_res(struct nfs4_opendata *p) 765 { 766 p->o_res.f_attr = &p->f_attr; 767 p->o_res.seqid = p->o_arg.seqid; 768 p->c_res.seqid = p->c_arg.seqid; 769 p->o_res.server = p->o_arg.server; 770 p->o_res.access_request = p->o_arg.access; 771 nfs_fattr_init(&p->f_attr); 772 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 773 } 774 775 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, 776 struct nfs4_state_owner *sp, fmode_t fmode, int flags, 777 const struct iattr *attrs, 778 gfp_t gfp_mask) 779 { 780 struct dentry *parent = dget_parent(dentry); 781 struct inode *dir = parent->d_inode; 782 struct nfs_server *server = NFS_SERVER(dir); 783 struct nfs4_opendata *p; 784 785 p = kzalloc(sizeof(*p), gfp_mask); 786 if (p == NULL) 787 goto err; 788 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask); 789 if (p->o_arg.seqid == NULL) 790 goto err_free; 791 nfs_sb_active(dentry->d_sb); 792 p->dentry = dget(dentry); 793 p->dir = parent; 794 p->owner = sp; 795 atomic_inc(&sp->so_count); 796 p->o_arg.fh = NFS_FH(dir); 797 p->o_arg.open_flags = flags; 798 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); 799 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS 800 * will return permission denied for all bits until close */ 801 if (!(flags & O_EXCL)) { 802 /* ask server to check for all possible rights as results 803 * are cached */ 804 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY | 805 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE; 806 } 807 p->o_arg.clientid = server->nfs_client->cl_clientid; 808 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); 809 p->o_arg.id.uniquifier = sp->so_seqid.owner_id; 810 p->o_arg.name = &dentry->d_name; 811 p->o_arg.server = server; 812 p->o_arg.bitmask = server->attr_bitmask; 813 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 814 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; 815 if (attrs != NULL && attrs->ia_valid != 0) { 816 __be32 verf[2]; 817 818 p->o_arg.u.attrs = &p->attrs; 819 memcpy(&p->attrs, attrs, sizeof(p->attrs)); 820 821 verf[0] = jiffies; 822 verf[1] = current->pid; 823 memcpy(p->o_arg.u.verifier.data, verf, 824 sizeof(p->o_arg.u.verifier.data)); 825 } 826 p->c_arg.fh = &p->o_res.fh; 827 p->c_arg.stateid = &p->o_res.stateid; 828 p->c_arg.seqid = p->o_arg.seqid; 829 nfs4_init_opendata_res(p); 830 kref_init(&p->kref); 831 return p; 832 err_free: 833 kfree(p); 834 err: 835 dput(parent); 836 return NULL; 837 } 838 839 static void nfs4_opendata_free(struct kref *kref) 840 { 841 struct nfs4_opendata *p = container_of(kref, 842 struct nfs4_opendata, kref); 843 struct super_block *sb = p->dentry->d_sb; 844 845 nfs_free_seqid(p->o_arg.seqid); 846 if (p->state != NULL) 847 nfs4_put_open_state(p->state); 848 nfs4_put_state_owner(p->owner); 849 dput(p->dir); 850 dput(p->dentry); 851 nfs_sb_deactive(sb); 852 nfs_fattr_free_names(&p->f_attr); 853 kfree(p); 854 } 855 856 static void nfs4_opendata_put(struct nfs4_opendata *p) 857 { 858 if (p != NULL) 859 kref_put(&p->kref, nfs4_opendata_free); 860 } 861 862 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) 863 { 864 int ret; 865 866 ret = rpc_wait_for_completion_task(task); 867 return ret; 868 } 869 870 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) 871 { 872 int ret = 0; 873 874 if (open_mode & (O_EXCL|O_TRUNC)) 875 goto out; 876 switch (mode & (FMODE_READ|FMODE_WRITE)) { 877 case FMODE_READ: 878 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 879 && state->n_rdonly != 0; 880 break; 881 case FMODE_WRITE: 882 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 883 && state->n_wronly != 0; 884 break; 885 case FMODE_READ|FMODE_WRITE: 886 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 887 && state->n_rdwr != 0; 888 } 889 out: 890 return ret; 891 } 892 893 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) 894 { 895 if (delegation == NULL) 896 return 0; 897 if ((delegation->type & fmode) != fmode) 898 return 0; 899 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) 900 return 0; 901 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) 902 return 0; 903 nfs_mark_delegation_referenced(delegation); 904 return 1; 905 } 906 907 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) 908 { 909 switch (fmode) { 910 case FMODE_WRITE: 911 state->n_wronly++; 912 break; 913 case FMODE_READ: 914 state->n_rdonly++; 915 break; 916 case FMODE_READ|FMODE_WRITE: 917 state->n_rdwr++; 918 } 919 nfs4_state_set_mode_locked(state, state->state | fmode); 920 } 921 922 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 923 { 924 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 925 nfs4_stateid_copy(&state->stateid, stateid); 926 nfs4_stateid_copy(&state->open_stateid, stateid); 927 switch (fmode) { 928 case FMODE_READ: 929 set_bit(NFS_O_RDONLY_STATE, &state->flags); 930 break; 931 case FMODE_WRITE: 932 set_bit(NFS_O_WRONLY_STATE, &state->flags); 933 break; 934 case FMODE_READ|FMODE_WRITE: 935 set_bit(NFS_O_RDWR_STATE, &state->flags); 936 } 937 } 938 939 static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) 940 { 941 write_seqlock(&state->seqlock); 942 nfs_set_open_stateid_locked(state, stateid, fmode); 943 write_sequnlock(&state->seqlock); 944 } 945 946 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) 947 { 948 /* 949 * Protect the call to nfs4_state_set_mode_locked and 950 * serialise the stateid update 951 */ 952 write_seqlock(&state->seqlock); 953 if (deleg_stateid != NULL) { 954 nfs4_stateid_copy(&state->stateid, deleg_stateid); 955 set_bit(NFS_DELEGATED_STATE, &state->flags); 956 } 957 if (open_stateid != NULL) 958 nfs_set_open_stateid_locked(state, open_stateid, fmode); 959 write_sequnlock(&state->seqlock); 960 spin_lock(&state->owner->so_lock); 961 update_open_stateflags(state, fmode); 962 spin_unlock(&state->owner->so_lock); 963 } 964 965 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) 966 { 967 struct nfs_inode *nfsi = NFS_I(state->inode); 968 struct nfs_delegation *deleg_cur; 969 int ret = 0; 970 971 fmode &= (FMODE_READ|FMODE_WRITE); 972 973 rcu_read_lock(); 974 deleg_cur = rcu_dereference(nfsi->delegation); 975 if (deleg_cur == NULL) 976 goto no_delegation; 977 978 spin_lock(&deleg_cur->lock); 979 if (nfsi->delegation != deleg_cur || 980 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) || 981 (deleg_cur->type & fmode) != fmode) 982 goto no_delegation_unlock; 983 984 if (delegation == NULL) 985 delegation = &deleg_cur->stateid; 986 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation)) 987 goto no_delegation_unlock; 988 989 nfs_mark_delegation_referenced(deleg_cur); 990 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); 991 ret = 1; 992 no_delegation_unlock: 993 spin_unlock(&deleg_cur->lock); 994 no_delegation: 995 rcu_read_unlock(); 996 997 if (!ret && open_stateid != NULL) { 998 __update_open_stateid(state, open_stateid, NULL, fmode); 999 ret = 1; 1000 } 1001 1002 return ret; 1003 } 1004 1005 1006 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) 1007 { 1008 struct nfs_delegation *delegation; 1009 1010 rcu_read_lock(); 1011 delegation = rcu_dereference(NFS_I(inode)->delegation); 1012 if (delegation == NULL || (delegation->type & fmode) == fmode) { 1013 rcu_read_unlock(); 1014 return; 1015 } 1016 rcu_read_unlock(); 1017 nfs4_inode_return_delegation(inode); 1018 } 1019 1020 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) 1021 { 1022 struct nfs4_state *state = opendata->state; 1023 struct nfs_inode *nfsi = NFS_I(state->inode); 1024 struct nfs_delegation *delegation; 1025 int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC); 1026 fmode_t fmode = opendata->o_arg.fmode; 1027 nfs4_stateid stateid; 1028 int ret = -EAGAIN; 1029 1030 for (;;) { 1031 if (can_open_cached(state, fmode, open_mode)) { 1032 spin_lock(&state->owner->so_lock); 1033 if (can_open_cached(state, fmode, open_mode)) { 1034 update_open_stateflags(state, fmode); 1035 spin_unlock(&state->owner->so_lock); 1036 goto out_return_state; 1037 } 1038 spin_unlock(&state->owner->so_lock); 1039 } 1040 rcu_read_lock(); 1041 delegation = rcu_dereference(nfsi->delegation); 1042 if (!can_open_delegated(delegation, fmode)) { 1043 rcu_read_unlock(); 1044 break; 1045 } 1046 /* Save the delegation */ 1047 nfs4_stateid_copy(&stateid, &delegation->stateid); 1048 rcu_read_unlock(); 1049 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); 1050 if (ret != 0) 1051 goto out; 1052 ret = -EAGAIN; 1053 1054 /* Try to update the stateid using the delegation */ 1055 if (update_open_stateid(state, NULL, &stateid, fmode)) 1056 goto out_return_state; 1057 } 1058 out: 1059 return ERR_PTR(ret); 1060 out_return_state: 1061 atomic_inc(&state->count); 1062 return state; 1063 } 1064 1065 static void 1066 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) 1067 { 1068 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client; 1069 struct nfs_delegation *delegation; 1070 int delegation_flags = 0; 1071 1072 rcu_read_lock(); 1073 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1074 if (delegation) 1075 delegation_flags = delegation->flags; 1076 rcu_read_unlock(); 1077 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { 1078 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1079 "returning a delegation for " 1080 "OPEN(CLAIM_DELEGATE_CUR)\n", 1081 clp->cl_hostname); 1082 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1083 nfs_inode_set_delegation(state->inode, 1084 data->owner->so_cred, 1085 &data->o_res); 1086 else 1087 nfs_inode_reclaim_delegation(state->inode, 1088 data->owner->so_cred, 1089 &data->o_res); 1090 } 1091 1092 /* 1093 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes 1094 * and update the nfs4_state. 1095 */ 1096 static struct nfs4_state * 1097 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) 1098 { 1099 struct inode *inode = data->state->inode; 1100 struct nfs4_state *state = data->state; 1101 int ret; 1102 1103 if (!data->rpc_done) { 1104 ret = data->rpc_status; 1105 goto err; 1106 } 1107 1108 ret = -ESTALE; 1109 if (!(data->f_attr.valid & NFS_ATTR_FATTR_TYPE) || 1110 !(data->f_attr.valid & NFS_ATTR_FATTR_FILEID) || 1111 !(data->f_attr.valid & NFS_ATTR_FATTR_CHANGE)) 1112 goto err; 1113 1114 ret = -ENOMEM; 1115 state = nfs4_get_open_state(inode, data->owner); 1116 if (state == NULL) 1117 goto err; 1118 1119 ret = nfs_refresh_inode(inode, &data->f_attr); 1120 if (ret) 1121 goto err; 1122 1123 if (data->o_res.delegation_type != 0) 1124 nfs4_opendata_check_deleg(data, state); 1125 update_open_stateid(state, &data->o_res.stateid, NULL, 1126 data->o_arg.fmode); 1127 1128 return state; 1129 err: 1130 return ERR_PTR(ret); 1131 1132 } 1133 1134 static struct nfs4_state * 1135 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1136 { 1137 struct inode *inode; 1138 struct nfs4_state *state = NULL; 1139 int ret; 1140 1141 if (!data->rpc_done) { 1142 state = nfs4_try_open_cached(data); 1143 goto out; 1144 } 1145 1146 ret = -EAGAIN; 1147 if (!(data->f_attr.valid & NFS_ATTR_FATTR)) 1148 goto err; 1149 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr); 1150 ret = PTR_ERR(inode); 1151 if (IS_ERR(inode)) 1152 goto err; 1153 ret = -ENOMEM; 1154 state = nfs4_get_open_state(inode, data->owner); 1155 if (state == NULL) 1156 goto err_put_inode; 1157 if (data->o_res.delegation_type != 0) 1158 nfs4_opendata_check_deleg(data, state); 1159 update_open_stateid(state, &data->o_res.stateid, NULL, 1160 data->o_arg.fmode); 1161 iput(inode); 1162 out: 1163 nfs_release_seqid(data->o_arg.seqid); 1164 return state; 1165 err_put_inode: 1166 iput(inode); 1167 err: 1168 return ERR_PTR(ret); 1169 } 1170 1171 static struct nfs4_state * 1172 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) 1173 { 1174 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) 1175 return _nfs4_opendata_reclaim_to_nfs4_state(data); 1176 return _nfs4_opendata_to_nfs4_state(data); 1177 } 1178 1179 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state) 1180 { 1181 struct nfs_inode *nfsi = NFS_I(state->inode); 1182 struct nfs_open_context *ctx; 1183 1184 spin_lock(&state->inode->i_lock); 1185 list_for_each_entry(ctx, &nfsi->open_files, list) { 1186 if (ctx->state != state) 1187 continue; 1188 get_nfs_open_context(ctx); 1189 spin_unlock(&state->inode->i_lock); 1190 return ctx; 1191 } 1192 spin_unlock(&state->inode->i_lock); 1193 return ERR_PTR(-ENOENT); 1194 } 1195 1196 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state) 1197 { 1198 struct nfs4_opendata *opendata; 1199 1200 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS); 1201 if (opendata == NULL) 1202 return ERR_PTR(-ENOMEM); 1203 opendata->state = state; 1204 atomic_inc(&state->count); 1205 return opendata; 1206 } 1207 1208 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) 1209 { 1210 struct nfs4_state *newstate; 1211 int ret; 1212 1213 opendata->o_arg.open_flags = 0; 1214 opendata->o_arg.fmode = fmode; 1215 memset(&opendata->o_res, 0, sizeof(opendata->o_res)); 1216 memset(&opendata->c_res, 0, sizeof(opendata->c_res)); 1217 nfs4_init_opendata_res(opendata); 1218 ret = _nfs4_recover_proc_open(opendata); 1219 if (ret != 0) 1220 return ret; 1221 newstate = nfs4_opendata_to_nfs4_state(opendata); 1222 if (IS_ERR(newstate)) 1223 return PTR_ERR(newstate); 1224 nfs4_close_state(newstate, fmode); 1225 *res = newstate; 1226 return 0; 1227 } 1228 1229 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state) 1230 { 1231 struct nfs4_state *newstate; 1232 int ret; 1233 1234 /* memory barrier prior to reading state->n_* */ 1235 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1236 smp_rmb(); 1237 if (state->n_rdwr != 0) { 1238 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1239 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); 1240 if (ret != 0) 1241 return ret; 1242 if (newstate != state) 1243 return -ESTALE; 1244 } 1245 if (state->n_wronly != 0) { 1246 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1247 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); 1248 if (ret != 0) 1249 return ret; 1250 if (newstate != state) 1251 return -ESTALE; 1252 } 1253 if (state->n_rdonly != 0) { 1254 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1255 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); 1256 if (ret != 0) 1257 return ret; 1258 if (newstate != state) 1259 return -ESTALE; 1260 } 1261 /* 1262 * We may have performed cached opens for all three recoveries. 1263 * Check if we need to update the current stateid. 1264 */ 1265 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 && 1266 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) { 1267 write_seqlock(&state->seqlock); 1268 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1269 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1270 write_sequnlock(&state->seqlock); 1271 } 1272 return 0; 1273 } 1274 1275 /* 1276 * OPEN_RECLAIM: 1277 * reclaim state on the server after a reboot. 1278 */ 1279 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1280 { 1281 struct nfs_delegation *delegation; 1282 struct nfs4_opendata *opendata; 1283 fmode_t delegation_type = 0; 1284 int status; 1285 1286 opendata = nfs4_open_recoverdata_alloc(ctx, state); 1287 if (IS_ERR(opendata)) 1288 return PTR_ERR(opendata); 1289 opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS; 1290 opendata->o_arg.fh = NFS_FH(state->inode); 1291 rcu_read_lock(); 1292 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 1293 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) 1294 delegation_type = delegation->type; 1295 rcu_read_unlock(); 1296 opendata->o_arg.u.delegation_type = delegation_type; 1297 status = nfs4_open_recover(opendata, state); 1298 nfs4_opendata_put(opendata); 1299 return status; 1300 } 1301 1302 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) 1303 { 1304 struct nfs_server *server = NFS_SERVER(state->inode); 1305 struct nfs4_exception exception = { }; 1306 int err; 1307 do { 1308 err = _nfs4_do_open_reclaim(ctx, state); 1309 if (err != -NFS4ERR_DELAY) 1310 break; 1311 nfs4_handle_exception(server, err, &exception); 1312 } while (exception.retry); 1313 return err; 1314 } 1315 1316 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) 1317 { 1318 struct nfs_open_context *ctx; 1319 int ret; 1320 1321 ctx = nfs4_state_find_open_context(state); 1322 if (IS_ERR(ctx)) 1323 return PTR_ERR(ctx); 1324 ret = nfs4_do_open_reclaim(ctx, state); 1325 put_nfs_open_context(ctx); 1326 return ret; 1327 } 1328 1329 static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1330 { 1331 struct nfs4_opendata *opendata; 1332 int ret; 1333 1334 opendata = nfs4_open_recoverdata_alloc(ctx, state); 1335 if (IS_ERR(opendata)) 1336 return PTR_ERR(opendata); 1337 opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR; 1338 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1339 ret = nfs4_open_recover(opendata, state); 1340 nfs4_opendata_put(opendata); 1341 return ret; 1342 } 1343 1344 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) 1345 { 1346 struct nfs4_exception exception = { }; 1347 struct nfs_server *server = NFS_SERVER(state->inode); 1348 int err; 1349 do { 1350 err = _nfs4_open_delegation_recall(ctx, state, stateid); 1351 switch (err) { 1352 case 0: 1353 case -ENOENT: 1354 case -ESTALE: 1355 goto out; 1356 case -NFS4ERR_BADSESSION: 1357 case -NFS4ERR_BADSLOT: 1358 case -NFS4ERR_BAD_HIGH_SLOT: 1359 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1360 case -NFS4ERR_DEADSESSION: 1361 set_bit(NFS_DELEGATED_STATE, &state->flags); 1362 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 1363 err = -EAGAIN; 1364 goto out; 1365 case -NFS4ERR_STALE_CLIENTID: 1366 case -NFS4ERR_STALE_STATEID: 1367 set_bit(NFS_DELEGATED_STATE, &state->flags); 1368 case -NFS4ERR_EXPIRED: 1369 /* Don't recall a delegation if it was lost */ 1370 nfs4_schedule_lease_recovery(server->nfs_client); 1371 err = -EAGAIN; 1372 goto out; 1373 case -NFS4ERR_DELEG_REVOKED: 1374 case -NFS4ERR_ADMIN_REVOKED: 1375 case -NFS4ERR_BAD_STATEID: 1376 nfs_inode_find_state_and_recover(state->inode, 1377 stateid); 1378 nfs4_schedule_stateid_recovery(server, state); 1379 case -ENOMEM: 1380 err = 0; 1381 goto out; 1382 } 1383 set_bit(NFS_DELEGATED_STATE, &state->flags); 1384 err = nfs4_handle_exception(server, err, &exception); 1385 } while (exception.retry); 1386 out: 1387 return err; 1388 } 1389 1390 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) 1391 { 1392 struct nfs4_opendata *data = calldata; 1393 1394 data->rpc_status = task->tk_status; 1395 if (data->rpc_status == 0) { 1396 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid); 1397 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1398 renew_lease(data->o_res.server, data->timestamp); 1399 data->rpc_done = 1; 1400 } 1401 } 1402 1403 static void nfs4_open_confirm_release(void *calldata) 1404 { 1405 struct nfs4_opendata *data = calldata; 1406 struct nfs4_state *state = NULL; 1407 1408 /* If this request hasn't been cancelled, do nothing */ 1409 if (data->cancelled == 0) 1410 goto out_free; 1411 /* In case of error, no cleanup! */ 1412 if (!data->rpc_done) 1413 goto out_free; 1414 state = nfs4_opendata_to_nfs4_state(data); 1415 if (!IS_ERR(state)) 1416 nfs4_close_state(state, data->o_arg.fmode); 1417 out_free: 1418 nfs4_opendata_put(data); 1419 } 1420 1421 static const struct rpc_call_ops nfs4_open_confirm_ops = { 1422 .rpc_call_done = nfs4_open_confirm_done, 1423 .rpc_release = nfs4_open_confirm_release, 1424 }; 1425 1426 /* 1427 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata 1428 */ 1429 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) 1430 { 1431 struct nfs_server *server = NFS_SERVER(data->dir->d_inode); 1432 struct rpc_task *task; 1433 struct rpc_message msg = { 1434 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM], 1435 .rpc_argp = &data->c_arg, 1436 .rpc_resp = &data->c_res, 1437 .rpc_cred = data->owner->so_cred, 1438 }; 1439 struct rpc_task_setup task_setup_data = { 1440 .rpc_client = server->client, 1441 .rpc_message = &msg, 1442 .callback_ops = &nfs4_open_confirm_ops, 1443 .callback_data = data, 1444 .workqueue = nfsiod_workqueue, 1445 .flags = RPC_TASK_ASYNC, 1446 }; 1447 int status; 1448 1449 kref_get(&data->kref); 1450 data->rpc_done = 0; 1451 data->rpc_status = 0; 1452 data->timestamp = jiffies; 1453 task = rpc_run_task(&task_setup_data); 1454 if (IS_ERR(task)) 1455 return PTR_ERR(task); 1456 status = nfs4_wait_for_completion_rpc_task(task); 1457 if (status != 0) { 1458 data->cancelled = 1; 1459 smp_wmb(); 1460 } else 1461 status = data->rpc_status; 1462 rpc_put_task(task); 1463 return status; 1464 } 1465 1466 static void nfs4_open_prepare(struct rpc_task *task, void *calldata) 1467 { 1468 struct nfs4_opendata *data = calldata; 1469 struct nfs4_state_owner *sp = data->owner; 1470 1471 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0) 1472 goto out_wait; 1473 /* 1474 * Check if we still need to send an OPEN call, or if we can use 1475 * a delegation instead. 1476 */ 1477 if (data->state != NULL) { 1478 struct nfs_delegation *delegation; 1479 1480 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) 1481 goto out_no_action; 1482 rcu_read_lock(); 1483 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); 1484 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR && 1485 can_open_delegated(delegation, data->o_arg.fmode)) 1486 goto unlock_no_action; 1487 rcu_read_unlock(); 1488 } 1489 /* Update client id. */ 1490 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid; 1491 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) { 1492 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; 1493 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; 1494 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh); 1495 } 1496 data->timestamp = jiffies; 1497 if (nfs4_setup_sequence(data->o_arg.server, 1498 &data->o_arg.seq_args, 1499 &data->o_res.seq_res, 1500 task) != 0) 1501 nfs_release_seqid(data->o_arg.seqid); 1502 return; 1503 unlock_no_action: 1504 rcu_read_unlock(); 1505 out_no_action: 1506 task->tk_action = NULL; 1507 out_wait: 1508 nfs4_sequence_done(task, &data->o_res.seq_res); 1509 } 1510 1511 static void nfs4_open_done(struct rpc_task *task, void *calldata) 1512 { 1513 struct nfs4_opendata *data = calldata; 1514 1515 data->rpc_status = task->tk_status; 1516 1517 if (!nfs4_sequence_done(task, &data->o_res.seq_res)) 1518 return; 1519 1520 if (task->tk_status == 0) { 1521 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) { 1522 switch (data->o_res.f_attr->mode & S_IFMT) { 1523 case S_IFREG: 1524 break; 1525 case S_IFLNK: 1526 data->rpc_status = -ELOOP; 1527 break; 1528 case S_IFDIR: 1529 data->rpc_status = -EISDIR; 1530 break; 1531 default: 1532 data->rpc_status = -ENOTDIR; 1533 } 1534 } 1535 renew_lease(data->o_res.server, data->timestamp); 1536 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)) 1537 nfs_confirm_seqid(&data->owner->so_seqid, 0); 1538 } 1539 data->rpc_done = 1; 1540 } 1541 1542 static void nfs4_open_release(void *calldata) 1543 { 1544 struct nfs4_opendata *data = calldata; 1545 struct nfs4_state *state = NULL; 1546 1547 /* If this request hasn't been cancelled, do nothing */ 1548 if (data->cancelled == 0) 1549 goto out_free; 1550 /* In case of error, no cleanup! */ 1551 if (data->rpc_status != 0 || !data->rpc_done) 1552 goto out_free; 1553 /* In case we need an open_confirm, no cleanup! */ 1554 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) 1555 goto out_free; 1556 state = nfs4_opendata_to_nfs4_state(data); 1557 if (!IS_ERR(state)) 1558 nfs4_close_state(state, data->o_arg.fmode); 1559 out_free: 1560 nfs4_opendata_put(data); 1561 } 1562 1563 static const struct rpc_call_ops nfs4_open_ops = { 1564 .rpc_call_prepare = nfs4_open_prepare, 1565 .rpc_call_done = nfs4_open_done, 1566 .rpc_release = nfs4_open_release, 1567 }; 1568 1569 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover) 1570 { 1571 struct inode *dir = data->dir->d_inode; 1572 struct nfs_server *server = NFS_SERVER(dir); 1573 struct nfs_openargs *o_arg = &data->o_arg; 1574 struct nfs_openres *o_res = &data->o_res; 1575 struct rpc_task *task; 1576 struct rpc_message msg = { 1577 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN], 1578 .rpc_argp = o_arg, 1579 .rpc_resp = o_res, 1580 .rpc_cred = data->owner->so_cred, 1581 }; 1582 struct rpc_task_setup task_setup_data = { 1583 .rpc_client = server->client, 1584 .rpc_message = &msg, 1585 .callback_ops = &nfs4_open_ops, 1586 .callback_data = data, 1587 .workqueue = nfsiod_workqueue, 1588 .flags = RPC_TASK_ASYNC, 1589 }; 1590 int status; 1591 1592 nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1); 1593 kref_get(&data->kref); 1594 data->rpc_done = 0; 1595 data->rpc_status = 0; 1596 data->cancelled = 0; 1597 if (isrecover) 1598 nfs4_set_sequence_privileged(&o_arg->seq_args); 1599 task = rpc_run_task(&task_setup_data); 1600 if (IS_ERR(task)) 1601 return PTR_ERR(task); 1602 status = nfs4_wait_for_completion_rpc_task(task); 1603 if (status != 0) { 1604 data->cancelled = 1; 1605 smp_wmb(); 1606 } else 1607 status = data->rpc_status; 1608 rpc_put_task(task); 1609 1610 return status; 1611 } 1612 1613 static int _nfs4_recover_proc_open(struct nfs4_opendata *data) 1614 { 1615 struct inode *dir = data->dir->d_inode; 1616 struct nfs_openres *o_res = &data->o_res; 1617 int status; 1618 1619 status = nfs4_run_open_task(data, 1); 1620 if (status != 0 || !data->rpc_done) 1621 return status; 1622 1623 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 1624 1625 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1626 status = _nfs4_proc_open_confirm(data); 1627 if (status != 0) 1628 return status; 1629 } 1630 1631 return status; 1632 } 1633 1634 static int nfs4_opendata_access(struct rpc_cred *cred, 1635 struct nfs4_opendata *opendata, 1636 struct nfs4_state *state, fmode_t fmode, 1637 int openflags) 1638 { 1639 struct nfs_access_entry cache; 1640 u32 mask; 1641 1642 /* access call failed or for some reason the server doesn't 1643 * support any access modes -- defer access call until later */ 1644 if (opendata->o_res.access_supported == 0) 1645 return 0; 1646 1647 mask = 0; 1648 /* don't check MAY_WRITE - a newly created file may not have 1649 * write mode bits, but POSIX allows the creating process to write. 1650 * use openflags to check for exec, because fmode won't 1651 * always have FMODE_EXEC set when file open for exec. */ 1652 if (openflags & __FMODE_EXEC) { 1653 /* ONLY check for exec rights */ 1654 mask = MAY_EXEC; 1655 } else if (fmode & FMODE_READ) 1656 mask = MAY_READ; 1657 1658 cache.cred = cred; 1659 cache.jiffies = jiffies; 1660 nfs_access_set_mask(&cache, opendata->o_res.access_result); 1661 nfs_access_add_cache(state->inode, &cache); 1662 1663 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) 1664 return 0; 1665 1666 /* even though OPEN succeeded, access is denied. Close the file */ 1667 nfs4_close_state(state, fmode); 1668 return -EACCES; 1669 } 1670 1671 /* 1672 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata 1673 */ 1674 static int _nfs4_proc_open(struct nfs4_opendata *data) 1675 { 1676 struct inode *dir = data->dir->d_inode; 1677 struct nfs_server *server = NFS_SERVER(dir); 1678 struct nfs_openargs *o_arg = &data->o_arg; 1679 struct nfs_openres *o_res = &data->o_res; 1680 int status; 1681 1682 status = nfs4_run_open_task(data, 0); 1683 if (!data->rpc_done) 1684 return status; 1685 if (status != 0) { 1686 if (status == -NFS4ERR_BADNAME && 1687 !(o_arg->open_flags & O_CREAT)) 1688 return -ENOENT; 1689 return status; 1690 } 1691 1692 nfs_fattr_map_and_free_names(server, &data->f_attr); 1693 1694 if (o_arg->open_flags & O_CREAT) 1695 update_changeattr(dir, &o_res->cinfo); 1696 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 1697 server->caps &= ~NFS_CAP_POSIX_LOCK; 1698 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1699 status = _nfs4_proc_open_confirm(data); 1700 if (status != 0) 1701 return status; 1702 } 1703 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) 1704 _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr); 1705 return 0; 1706 } 1707 1708 static int nfs4_recover_expired_lease(struct nfs_server *server) 1709 { 1710 return nfs4_client_recover_expired_lease(server->nfs_client); 1711 } 1712 1713 /* 1714 * OPEN_EXPIRED: 1715 * reclaim state on the server after a network partition. 1716 * Assumes caller holds the appropriate lock 1717 */ 1718 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 1719 { 1720 struct nfs4_opendata *opendata; 1721 int ret; 1722 1723 opendata = nfs4_open_recoverdata_alloc(ctx, state); 1724 if (IS_ERR(opendata)) 1725 return PTR_ERR(opendata); 1726 ret = nfs4_open_recover(opendata, state); 1727 if (ret == -ESTALE) 1728 d_drop(ctx->dentry); 1729 nfs4_opendata_put(opendata); 1730 return ret; 1731 } 1732 1733 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) 1734 { 1735 struct nfs_server *server = NFS_SERVER(state->inode); 1736 struct nfs4_exception exception = { }; 1737 int err; 1738 1739 do { 1740 err = _nfs4_open_expired(ctx, state); 1741 switch (err) { 1742 default: 1743 goto out; 1744 case -NFS4ERR_GRACE: 1745 case -NFS4ERR_DELAY: 1746 nfs4_handle_exception(server, err, &exception); 1747 err = 0; 1748 } 1749 } while (exception.retry); 1750 out: 1751 return err; 1752 } 1753 1754 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 1755 { 1756 struct nfs_open_context *ctx; 1757 int ret; 1758 1759 ctx = nfs4_state_find_open_context(state); 1760 if (IS_ERR(ctx)) 1761 return PTR_ERR(ctx); 1762 ret = nfs4_do_open_expired(ctx, state); 1763 put_nfs_open_context(ctx); 1764 return ret; 1765 } 1766 1767 #if defined(CONFIG_NFS_V4_1) 1768 static void nfs41_clear_delegation_stateid(struct nfs4_state *state) 1769 { 1770 struct nfs_server *server = NFS_SERVER(state->inode); 1771 nfs4_stateid *stateid = &state->stateid; 1772 int status; 1773 1774 /* If a state reset has been done, test_stateid is unneeded */ 1775 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) 1776 return; 1777 1778 status = nfs41_test_stateid(server, stateid); 1779 if (status != NFS_OK) { 1780 /* Free the stateid unless the server explicitly 1781 * informs us the stateid is unrecognized. */ 1782 if (status != -NFS4ERR_BAD_STATEID) 1783 nfs41_free_stateid(server, stateid); 1784 nfs_remove_bad_delegation(state->inode); 1785 1786 write_seqlock(&state->seqlock); 1787 nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1788 write_sequnlock(&state->seqlock); 1789 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1790 } 1791 } 1792 1793 /** 1794 * nfs41_check_open_stateid - possibly free an open stateid 1795 * 1796 * @state: NFSv4 state for an inode 1797 * 1798 * Returns NFS_OK if recovery for this stateid is now finished. 1799 * Otherwise a negative NFS4ERR value is returned. 1800 */ 1801 static int nfs41_check_open_stateid(struct nfs4_state *state) 1802 { 1803 struct nfs_server *server = NFS_SERVER(state->inode); 1804 nfs4_stateid *stateid = &state->open_stateid; 1805 int status; 1806 1807 /* If a state reset has been done, test_stateid is unneeded */ 1808 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) && 1809 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) && 1810 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0)) 1811 return -NFS4ERR_BAD_STATEID; 1812 1813 status = nfs41_test_stateid(server, stateid); 1814 if (status != NFS_OK) { 1815 /* Free the stateid unless the server explicitly 1816 * informs us the stateid is unrecognized. */ 1817 if (status != -NFS4ERR_BAD_STATEID) 1818 nfs41_free_stateid(server, stateid); 1819 1820 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 1821 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 1822 clear_bit(NFS_O_RDWR_STATE, &state->flags); 1823 } 1824 return status; 1825 } 1826 1827 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) 1828 { 1829 int status; 1830 1831 nfs41_clear_delegation_stateid(state); 1832 status = nfs41_check_open_stateid(state); 1833 if (status != NFS_OK) 1834 status = nfs4_open_expired(sp, state); 1835 return status; 1836 } 1837 #endif 1838 1839 /* 1840 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-* 1841 * fields corresponding to attributes that were used to store the verifier. 1842 * Make sure we clobber those fields in the later setattr call 1843 */ 1844 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr) 1845 { 1846 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) && 1847 !(sattr->ia_valid & ATTR_ATIME_SET)) 1848 sattr->ia_valid |= ATTR_ATIME; 1849 1850 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) && 1851 !(sattr->ia_valid & ATTR_MTIME_SET)) 1852 sattr->ia_valid |= ATTR_MTIME; 1853 } 1854 1855 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, 1856 fmode_t fmode, 1857 int flags, 1858 struct nfs4_state **res) 1859 { 1860 struct nfs4_state_owner *sp = opendata->owner; 1861 struct nfs_server *server = sp->so_server; 1862 struct nfs4_state *state; 1863 unsigned int seq; 1864 int ret; 1865 1866 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 1867 1868 ret = _nfs4_proc_open(opendata); 1869 if (ret != 0) 1870 goto out; 1871 1872 state = nfs4_opendata_to_nfs4_state(opendata); 1873 ret = PTR_ERR(state); 1874 if (IS_ERR(state)) 1875 goto out; 1876 if (server->caps & NFS_CAP_POSIX_LOCK) 1877 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags); 1878 1879 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags); 1880 if (ret != 0) 1881 goto out; 1882 1883 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) { 1884 nfs4_schedule_stateid_recovery(server, state); 1885 nfs4_wait_clnt_recover(server->nfs_client); 1886 } 1887 *res = state; 1888 out: 1889 return ret; 1890 } 1891 1892 /* 1893 * Returns a referenced nfs4_state 1894 */ 1895 static int _nfs4_do_open(struct inode *dir, 1896 struct dentry *dentry, 1897 fmode_t fmode, 1898 int flags, 1899 struct iattr *sattr, 1900 struct rpc_cred *cred, 1901 struct nfs4_state **res, 1902 struct nfs4_threshold **ctx_th) 1903 { 1904 struct nfs4_state_owner *sp; 1905 struct nfs4_state *state = NULL; 1906 struct nfs_server *server = NFS_SERVER(dir); 1907 struct nfs4_opendata *opendata; 1908 int status; 1909 1910 /* Protect against reboot recovery conflicts */ 1911 status = -ENOMEM; 1912 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL); 1913 if (sp == NULL) { 1914 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n"); 1915 goto out_err; 1916 } 1917 status = nfs4_recover_expired_lease(server); 1918 if (status != 0) 1919 goto err_put_state_owner; 1920 if (dentry->d_inode != NULL) 1921 nfs4_return_incompatible_delegation(dentry->d_inode, fmode); 1922 status = -ENOMEM; 1923 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL); 1924 if (opendata == NULL) 1925 goto err_put_state_owner; 1926 1927 if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) { 1928 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 1929 if (!opendata->f_attr.mdsthreshold) 1930 goto err_opendata_put; 1931 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0]; 1932 } 1933 if (dentry->d_inode != NULL) 1934 opendata->state = nfs4_get_open_state(dentry->d_inode, sp); 1935 1936 status = _nfs4_open_and_get_state(opendata, fmode, flags, &state); 1937 if (status != 0) 1938 goto err_opendata_put; 1939 1940 if (opendata->o_arg.open_flags & O_EXCL) { 1941 nfs4_exclusive_attrset(opendata, sattr); 1942 1943 nfs_fattr_init(opendata->o_res.f_attr); 1944 status = nfs4_do_setattr(state->inode, cred, 1945 opendata->o_res.f_attr, sattr, 1946 state); 1947 if (status == 0) 1948 nfs_setattr_update_inode(state->inode, sattr); 1949 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); 1950 } 1951 1952 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) 1953 *ctx_th = opendata->f_attr.mdsthreshold; 1954 else 1955 kfree(opendata->f_attr.mdsthreshold); 1956 opendata->f_attr.mdsthreshold = NULL; 1957 1958 nfs4_opendata_put(opendata); 1959 nfs4_put_state_owner(sp); 1960 *res = state; 1961 return 0; 1962 err_opendata_put: 1963 kfree(opendata->f_attr.mdsthreshold); 1964 nfs4_opendata_put(opendata); 1965 err_put_state_owner: 1966 nfs4_put_state_owner(sp); 1967 out_err: 1968 *res = NULL; 1969 return status; 1970 } 1971 1972 1973 static struct nfs4_state *nfs4_do_open(struct inode *dir, 1974 struct dentry *dentry, 1975 fmode_t fmode, 1976 int flags, 1977 struct iattr *sattr, 1978 struct rpc_cred *cred, 1979 struct nfs4_threshold **ctx_th) 1980 { 1981 struct nfs4_exception exception = { }; 1982 struct nfs4_state *res; 1983 int status; 1984 1985 fmode &= FMODE_READ|FMODE_WRITE|FMODE_EXEC; 1986 do { 1987 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, 1988 &res, ctx_th); 1989 if (status == 0) 1990 break; 1991 /* NOTE: BAD_SEQID means the server and client disagree about the 1992 * book-keeping w.r.t. state-changing operations 1993 * (OPEN/CLOSE/LOCK/LOCKU...) 1994 * It is actually a sign of a bug on the client or on the server. 1995 * 1996 * If we receive a BAD_SEQID error in the particular case of 1997 * doing an OPEN, we assume that nfs_increment_open_seqid() will 1998 * have unhashed the old state_owner for us, and that we can 1999 * therefore safely retry using a new one. We should still warn 2000 * the user though... 2001 */ 2002 if (status == -NFS4ERR_BAD_SEQID) { 2003 pr_warn_ratelimited("NFS: v4 server %s " 2004 " returned a bad sequence-id error!\n", 2005 NFS_SERVER(dir)->nfs_client->cl_hostname); 2006 exception.retry = 1; 2007 continue; 2008 } 2009 /* 2010 * BAD_STATEID on OPEN means that the server cancelled our 2011 * state before it received the OPEN_CONFIRM. 2012 * Recover by retrying the request as per the discussion 2013 * on Page 181 of RFC3530. 2014 */ 2015 if (status == -NFS4ERR_BAD_STATEID) { 2016 exception.retry = 1; 2017 continue; 2018 } 2019 if (status == -EAGAIN) { 2020 /* We must have found a delegation */ 2021 exception.retry = 1; 2022 continue; 2023 } 2024 res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), 2025 status, &exception)); 2026 } while (exception.retry); 2027 return res; 2028 } 2029 2030 static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2031 struct nfs_fattr *fattr, struct iattr *sattr, 2032 struct nfs4_state *state) 2033 { 2034 struct nfs_server *server = NFS_SERVER(inode); 2035 struct nfs_setattrargs arg = { 2036 .fh = NFS_FH(inode), 2037 .iap = sattr, 2038 .server = server, 2039 .bitmask = server->attr_bitmask, 2040 }; 2041 struct nfs_setattrres res = { 2042 .fattr = fattr, 2043 .server = server, 2044 }; 2045 struct rpc_message msg = { 2046 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR], 2047 .rpc_argp = &arg, 2048 .rpc_resp = &res, 2049 .rpc_cred = cred, 2050 }; 2051 unsigned long timestamp = jiffies; 2052 int status; 2053 2054 nfs_fattr_init(fattr); 2055 2056 if (state != NULL) { 2057 struct nfs_lockowner lockowner = { 2058 .l_owner = current->files, 2059 .l_pid = current->tgid, 2060 }; 2061 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, 2062 &lockowner); 2063 } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode, 2064 FMODE_WRITE)) { 2065 /* Use that stateid */ 2066 } else 2067 nfs4_stateid_copy(&arg.stateid, &zero_stateid); 2068 2069 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 2070 if (status == 0 && state != NULL) 2071 renew_lease(server, timestamp); 2072 return status; 2073 } 2074 2075 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 2076 struct nfs_fattr *fattr, struct iattr *sattr, 2077 struct nfs4_state *state) 2078 { 2079 struct nfs_server *server = NFS_SERVER(inode); 2080 struct nfs4_exception exception = { 2081 .state = state, 2082 .inode = inode, 2083 }; 2084 int err; 2085 do { 2086 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state); 2087 switch (err) { 2088 case -NFS4ERR_OPENMODE: 2089 if (state && !(state->state & FMODE_WRITE)) { 2090 err = -EBADF; 2091 if (sattr->ia_valid & ATTR_OPEN) 2092 err = -EACCES; 2093 goto out; 2094 } 2095 } 2096 err = nfs4_handle_exception(server, err, &exception); 2097 } while (exception.retry); 2098 out: 2099 return err; 2100 } 2101 2102 struct nfs4_closedata { 2103 struct inode *inode; 2104 struct nfs4_state *state; 2105 struct nfs_closeargs arg; 2106 struct nfs_closeres res; 2107 struct nfs_fattr fattr; 2108 unsigned long timestamp; 2109 bool roc; 2110 u32 roc_barrier; 2111 }; 2112 2113 static void nfs4_free_closedata(void *data) 2114 { 2115 struct nfs4_closedata *calldata = data; 2116 struct nfs4_state_owner *sp = calldata->state->owner; 2117 struct super_block *sb = calldata->state->inode->i_sb; 2118 2119 if (calldata->roc) 2120 pnfs_roc_release(calldata->state->inode); 2121 nfs4_put_open_state(calldata->state); 2122 nfs_free_seqid(calldata->arg.seqid); 2123 nfs4_put_state_owner(sp); 2124 nfs_sb_deactive(sb); 2125 kfree(calldata); 2126 } 2127 2128 static void nfs4_close_clear_stateid_flags(struct nfs4_state *state, 2129 fmode_t fmode) 2130 { 2131 spin_lock(&state->owner->so_lock); 2132 if (!(fmode & FMODE_READ)) 2133 clear_bit(NFS_O_RDONLY_STATE, &state->flags); 2134 if (!(fmode & FMODE_WRITE)) 2135 clear_bit(NFS_O_WRONLY_STATE, &state->flags); 2136 clear_bit(NFS_O_RDWR_STATE, &state->flags); 2137 spin_unlock(&state->owner->so_lock); 2138 } 2139 2140 static void nfs4_close_done(struct rpc_task *task, void *data) 2141 { 2142 struct nfs4_closedata *calldata = data; 2143 struct nfs4_state *state = calldata->state; 2144 struct nfs_server *server = NFS_SERVER(calldata->inode); 2145 2146 dprintk("%s: begin!\n", __func__); 2147 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 2148 return; 2149 /* hmm. we are done with the inode, and in the process of freeing 2150 * the state_owner. we keep this around to process errors 2151 */ 2152 switch (task->tk_status) { 2153 case 0: 2154 if (calldata->roc) 2155 pnfs_roc_set_barrier(state->inode, 2156 calldata->roc_barrier); 2157 nfs_set_open_stateid(state, &calldata->res.stateid, 0); 2158 renew_lease(server, calldata->timestamp); 2159 nfs4_close_clear_stateid_flags(state, 2160 calldata->arg.fmode); 2161 break; 2162 case -NFS4ERR_STALE_STATEID: 2163 case -NFS4ERR_OLD_STATEID: 2164 case -NFS4ERR_BAD_STATEID: 2165 case -NFS4ERR_EXPIRED: 2166 if (calldata->arg.fmode == 0) 2167 break; 2168 default: 2169 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) 2170 rpc_restart_call_prepare(task); 2171 } 2172 nfs_release_seqid(calldata->arg.seqid); 2173 nfs_refresh_inode(calldata->inode, calldata->res.fattr); 2174 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status); 2175 } 2176 2177 static void nfs4_close_prepare(struct rpc_task *task, void *data) 2178 { 2179 struct nfs4_closedata *calldata = data; 2180 struct nfs4_state *state = calldata->state; 2181 struct inode *inode = calldata->inode; 2182 int call_close = 0; 2183 2184 dprintk("%s: begin!\n", __func__); 2185 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 2186 goto out_wait; 2187 2188 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; 2189 calldata->arg.fmode = FMODE_READ|FMODE_WRITE; 2190 spin_lock(&state->owner->so_lock); 2191 /* Calculate the change in open mode */ 2192 if (state->n_rdwr == 0) { 2193 if (state->n_rdonly == 0) { 2194 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); 2195 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 2196 calldata->arg.fmode &= ~FMODE_READ; 2197 } 2198 if (state->n_wronly == 0) { 2199 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); 2200 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); 2201 calldata->arg.fmode &= ~FMODE_WRITE; 2202 } 2203 } 2204 spin_unlock(&state->owner->so_lock); 2205 2206 if (!call_close) { 2207 /* Note: exit _without_ calling nfs4_close_done */ 2208 goto out_no_action; 2209 } 2210 2211 if (calldata->arg.fmode == 0) { 2212 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; 2213 if (calldata->roc && 2214 pnfs_roc_drain(inode, &calldata->roc_barrier, task)) 2215 goto out_wait; 2216 } 2217 2218 nfs_fattr_init(calldata->res.fattr); 2219 calldata->timestamp = jiffies; 2220 if (nfs4_setup_sequence(NFS_SERVER(inode), 2221 &calldata->arg.seq_args, 2222 &calldata->res.seq_res, 2223 task) != 0) 2224 nfs_release_seqid(calldata->arg.seqid); 2225 dprintk("%s: done!\n", __func__); 2226 return; 2227 out_no_action: 2228 task->tk_action = NULL; 2229 out_wait: 2230 nfs4_sequence_done(task, &calldata->res.seq_res); 2231 } 2232 2233 static const struct rpc_call_ops nfs4_close_ops = { 2234 .rpc_call_prepare = nfs4_close_prepare, 2235 .rpc_call_done = nfs4_close_done, 2236 .rpc_release = nfs4_free_closedata, 2237 }; 2238 2239 /* 2240 * It is possible for data to be read/written from a mem-mapped file 2241 * after the sys_close call (which hits the vfs layer as a flush). 2242 * This means that we can't safely call nfsv4 close on a file until 2243 * the inode is cleared. This in turn means that we are not good 2244 * NFSv4 citizens - we do not indicate to the server to update the file's 2245 * share state even when we are done with one of the three share 2246 * stateid's in the inode. 2247 * 2248 * NOTE: Caller must be holding the sp->so_owner semaphore! 2249 */ 2250 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) 2251 { 2252 struct nfs_server *server = NFS_SERVER(state->inode); 2253 struct nfs4_closedata *calldata; 2254 struct nfs4_state_owner *sp = state->owner; 2255 struct rpc_task *task; 2256 struct rpc_message msg = { 2257 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], 2258 .rpc_cred = state->owner->so_cred, 2259 }; 2260 struct rpc_task_setup task_setup_data = { 2261 .rpc_client = server->client, 2262 .rpc_message = &msg, 2263 .callback_ops = &nfs4_close_ops, 2264 .workqueue = nfsiod_workqueue, 2265 .flags = RPC_TASK_ASYNC, 2266 }; 2267 int status = -ENOMEM; 2268 2269 calldata = kzalloc(sizeof(*calldata), gfp_mask); 2270 if (calldata == NULL) 2271 goto out; 2272 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1); 2273 calldata->inode = state->inode; 2274 calldata->state = state; 2275 calldata->arg.fh = NFS_FH(state->inode); 2276 calldata->arg.stateid = &state->open_stateid; 2277 /* Serialization for the sequence id */ 2278 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask); 2279 if (calldata->arg.seqid == NULL) 2280 goto out_free_calldata; 2281 calldata->arg.fmode = 0; 2282 calldata->arg.bitmask = server->cache_consistency_bitmask; 2283 calldata->res.fattr = &calldata->fattr; 2284 calldata->res.seqid = calldata->arg.seqid; 2285 calldata->res.server = server; 2286 calldata->roc = pnfs_roc(state->inode); 2287 nfs_sb_active(calldata->inode->i_sb); 2288 2289 msg.rpc_argp = &calldata->arg; 2290 msg.rpc_resp = &calldata->res; 2291 task_setup_data.callback_data = calldata; 2292 task = rpc_run_task(&task_setup_data); 2293 if (IS_ERR(task)) 2294 return PTR_ERR(task); 2295 status = 0; 2296 if (wait) 2297 status = rpc_wait_for_completion_task(task); 2298 rpc_put_task(task); 2299 return status; 2300 out_free_calldata: 2301 kfree(calldata); 2302 out: 2303 nfs4_put_open_state(state); 2304 nfs4_put_state_owner(sp); 2305 return status; 2306 } 2307 2308 static struct inode * 2309 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr) 2310 { 2311 struct nfs4_state *state; 2312 2313 /* Protect against concurrent sillydeletes */ 2314 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, 2315 ctx->cred, &ctx->mdsthreshold); 2316 if (IS_ERR(state)) 2317 return ERR_CAST(state); 2318 ctx->state = state; 2319 return igrab(state->inode); 2320 } 2321 2322 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) 2323 { 2324 if (ctx->state == NULL) 2325 return; 2326 if (is_sync) 2327 nfs4_close_sync(ctx->state, ctx->mode); 2328 else 2329 nfs4_close_state(ctx->state, ctx->mode); 2330 } 2331 2332 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2333 { 2334 struct nfs4_server_caps_arg args = { 2335 .fhandle = fhandle, 2336 }; 2337 struct nfs4_server_caps_res res = {}; 2338 struct rpc_message msg = { 2339 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS], 2340 .rpc_argp = &args, 2341 .rpc_resp = &res, 2342 }; 2343 int status; 2344 2345 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2346 if (status == 0) { 2347 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask)); 2348 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS| 2349 NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 2350 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER| 2351 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME| 2352 NFS_CAP_CTIME|NFS_CAP_MTIME); 2353 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL) 2354 server->caps |= NFS_CAP_ACLS; 2355 if (res.has_links != 0) 2356 server->caps |= NFS_CAP_HARDLINKS; 2357 if (res.has_symlinks != 0) 2358 server->caps |= NFS_CAP_SYMLINKS; 2359 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID) 2360 server->caps |= NFS_CAP_FILEID; 2361 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE) 2362 server->caps |= NFS_CAP_MODE; 2363 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS) 2364 server->caps |= NFS_CAP_NLINK; 2365 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER) 2366 server->caps |= NFS_CAP_OWNER; 2367 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP) 2368 server->caps |= NFS_CAP_OWNER_GROUP; 2369 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS) 2370 server->caps |= NFS_CAP_ATIME; 2371 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA) 2372 server->caps |= NFS_CAP_CTIME; 2373 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY) 2374 server->caps |= NFS_CAP_MTIME; 2375 2376 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask)); 2377 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; 2378 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; 2379 server->acl_bitmask = res.acl_bitmask; 2380 server->fh_expire_type = res.fh_expire_type; 2381 } 2382 2383 return status; 2384 } 2385 2386 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) 2387 { 2388 struct nfs4_exception exception = { }; 2389 int err; 2390 do { 2391 err = nfs4_handle_exception(server, 2392 _nfs4_server_capabilities(server, fhandle), 2393 &exception); 2394 } while (exception.retry); 2395 return err; 2396 } 2397 2398 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2399 struct nfs_fsinfo *info) 2400 { 2401 struct nfs4_lookup_root_arg args = { 2402 .bitmask = nfs4_fattr_bitmap, 2403 }; 2404 struct nfs4_lookup_res res = { 2405 .server = server, 2406 .fattr = info->fattr, 2407 .fh = fhandle, 2408 }; 2409 struct rpc_message msg = { 2410 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT], 2411 .rpc_argp = &args, 2412 .rpc_resp = &res, 2413 }; 2414 2415 nfs_fattr_init(info->fattr); 2416 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2417 } 2418 2419 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, 2420 struct nfs_fsinfo *info) 2421 { 2422 struct nfs4_exception exception = { }; 2423 int err; 2424 do { 2425 err = _nfs4_lookup_root(server, fhandle, info); 2426 switch (err) { 2427 case 0: 2428 case -NFS4ERR_WRONGSEC: 2429 goto out; 2430 default: 2431 err = nfs4_handle_exception(server, err, &exception); 2432 } 2433 } while (exception.retry); 2434 out: 2435 return err; 2436 } 2437 2438 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 2439 struct nfs_fsinfo *info, rpc_authflavor_t flavor) 2440 { 2441 struct rpc_auth *auth; 2442 int ret; 2443 2444 auth = rpcauth_create(flavor, server->client); 2445 if (IS_ERR(auth)) { 2446 ret = -EIO; 2447 goto out; 2448 } 2449 ret = nfs4_lookup_root(server, fhandle, info); 2450 out: 2451 return ret; 2452 } 2453 2454 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 2455 struct nfs_fsinfo *info) 2456 { 2457 int i, len, status = 0; 2458 rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS]; 2459 2460 len = rpcauth_list_flavors(flav_array, ARRAY_SIZE(flav_array)); 2461 if (len < 0) 2462 return len; 2463 2464 for (i = 0; i < len; i++) { 2465 /* AUTH_UNIX is the default flavor if none was specified, 2466 * thus has already been tried. */ 2467 if (flav_array[i] == RPC_AUTH_UNIX) 2468 continue; 2469 2470 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); 2471 if (status == -NFS4ERR_WRONGSEC || status == -EACCES) 2472 continue; 2473 break; 2474 } 2475 /* 2476 * -EACCESS could mean that the user doesn't have correct permissions 2477 * to access the mount. It could also mean that we tried to mount 2478 * with a gss auth flavor, but rpc.gssd isn't running. Either way, 2479 * existing mount programs don't handle -EACCES very well so it should 2480 * be mapped to -EPERM instead. 2481 */ 2482 if (status == -EACCES) 2483 status = -EPERM; 2484 return status; 2485 } 2486 2487 /* 2488 * get the file handle for the "/" directory on the server 2489 */ 2490 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle, 2491 struct nfs_fsinfo *info) 2492 { 2493 int minor_version = server->nfs_client->cl_minorversion; 2494 int status = nfs4_lookup_root(server, fhandle, info); 2495 if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR)) 2496 /* 2497 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM 2498 * by nfs4_map_errors() as this function exits. 2499 */ 2500 status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info); 2501 if (status == 0) 2502 status = nfs4_server_capabilities(server, fhandle); 2503 if (status == 0) 2504 status = nfs4_do_fsinfo(server, fhandle, info); 2505 return nfs4_map_errors(status); 2506 } 2507 2508 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh, 2509 struct nfs_fsinfo *info) 2510 { 2511 int error; 2512 struct nfs_fattr *fattr = info->fattr; 2513 2514 error = nfs4_server_capabilities(server, mntfh); 2515 if (error < 0) { 2516 dprintk("nfs4_get_root: getcaps error = %d\n", -error); 2517 return error; 2518 } 2519 2520 error = nfs4_proc_getattr(server, mntfh, fattr); 2521 if (error < 0) { 2522 dprintk("nfs4_get_root: getattr error = %d\n", -error); 2523 return error; 2524 } 2525 2526 if (fattr->valid & NFS_ATTR_FATTR_FSID && 2527 !nfs_fsid_equal(&server->fsid, &fattr->fsid)) 2528 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); 2529 2530 return error; 2531 } 2532 2533 /* 2534 * Get locations and (maybe) other attributes of a referral. 2535 * Note that we'll actually follow the referral later when 2536 * we detect fsid mismatch in inode revalidation 2537 */ 2538 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, 2539 const struct qstr *name, struct nfs_fattr *fattr, 2540 struct nfs_fh *fhandle) 2541 { 2542 int status = -ENOMEM; 2543 struct page *page = NULL; 2544 struct nfs4_fs_locations *locations = NULL; 2545 2546 page = alloc_page(GFP_KERNEL); 2547 if (page == NULL) 2548 goto out; 2549 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL); 2550 if (locations == NULL) 2551 goto out; 2552 2553 status = nfs4_proc_fs_locations(client, dir, name, locations, page); 2554 if (status != 0) 2555 goto out; 2556 /* Make sure server returned a different fsid for the referral */ 2557 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { 2558 dprintk("%s: server did not return a different fsid for" 2559 " a referral at %s\n", __func__, name->name); 2560 status = -EIO; 2561 goto out; 2562 } 2563 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ 2564 nfs_fixup_referral_attributes(&locations->fattr); 2565 2566 /* replace the lookup nfs_fattr with the locations nfs_fattr */ 2567 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); 2568 memset(fhandle, 0, sizeof(struct nfs_fh)); 2569 out: 2570 if (page) 2571 __free_page(page); 2572 kfree(locations); 2573 return status; 2574 } 2575 2576 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2577 { 2578 struct nfs4_getattr_arg args = { 2579 .fh = fhandle, 2580 .bitmask = server->attr_bitmask, 2581 }; 2582 struct nfs4_getattr_res res = { 2583 .fattr = fattr, 2584 .server = server, 2585 }; 2586 struct rpc_message msg = { 2587 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR], 2588 .rpc_argp = &args, 2589 .rpc_resp = &res, 2590 }; 2591 2592 nfs_fattr_init(fattr); 2593 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2594 } 2595 2596 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2597 { 2598 struct nfs4_exception exception = { }; 2599 int err; 2600 do { 2601 err = nfs4_handle_exception(server, 2602 _nfs4_proc_getattr(server, fhandle, fattr), 2603 &exception); 2604 } while (exception.retry); 2605 return err; 2606 } 2607 2608 /* 2609 * The file is not closed if it is opened due to the a request to change 2610 * the size of the file. The open call will not be needed once the 2611 * VFS layer lookup-intents are implemented. 2612 * 2613 * Close is called when the inode is destroyed. 2614 * If we haven't opened the file for O_WRONLY, we 2615 * need to in the size_change case to obtain a stateid. 2616 * 2617 * Got race? 2618 * Because OPEN is always done by name in nfsv4, it is 2619 * possible that we opened a different file by the same 2620 * name. We can recognize this race condition, but we 2621 * can't do anything about it besides returning an error. 2622 * 2623 * This will be fixed with VFS changes (lookup-intent). 2624 */ 2625 static int 2626 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, 2627 struct iattr *sattr) 2628 { 2629 struct inode *inode = dentry->d_inode; 2630 struct rpc_cred *cred = NULL; 2631 struct nfs4_state *state = NULL; 2632 int status; 2633 2634 if (pnfs_ld_layoutret_on_setattr(inode)) 2635 pnfs_return_layout(inode); 2636 2637 nfs_fattr_init(fattr); 2638 2639 /* Deal with open(O_TRUNC) */ 2640 if (sattr->ia_valid & ATTR_OPEN) 2641 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); 2642 2643 /* Optimization: if the end result is no change, don't RPC */ 2644 if ((sattr->ia_valid & ~(ATTR_FILE)) == 0) 2645 return 0; 2646 2647 /* Search for an existing open(O_WRITE) file */ 2648 if (sattr->ia_valid & ATTR_FILE) { 2649 struct nfs_open_context *ctx; 2650 2651 ctx = nfs_file_open_context(sattr->ia_file); 2652 if (ctx) { 2653 cred = ctx->cred; 2654 state = ctx->state; 2655 } 2656 } 2657 2658 status = nfs4_do_setattr(inode, cred, fattr, sattr, state); 2659 if (status == 0) 2660 nfs_setattr_update_inode(inode, sattr); 2661 return status; 2662 } 2663 2664 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, 2665 const struct qstr *name, struct nfs_fh *fhandle, 2666 struct nfs_fattr *fattr) 2667 { 2668 struct nfs_server *server = NFS_SERVER(dir); 2669 int status; 2670 struct nfs4_lookup_arg args = { 2671 .bitmask = server->attr_bitmask, 2672 .dir_fh = NFS_FH(dir), 2673 .name = name, 2674 }; 2675 struct nfs4_lookup_res res = { 2676 .server = server, 2677 .fattr = fattr, 2678 .fh = fhandle, 2679 }; 2680 struct rpc_message msg = { 2681 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP], 2682 .rpc_argp = &args, 2683 .rpc_resp = &res, 2684 }; 2685 2686 nfs_fattr_init(fattr); 2687 2688 dprintk("NFS call lookup %s\n", name->name); 2689 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0); 2690 dprintk("NFS reply lookup: %d\n", status); 2691 return status; 2692 } 2693 2694 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) 2695 { 2696 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 2697 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; 2698 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 2699 fattr->nlink = 2; 2700 } 2701 2702 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, 2703 struct qstr *name, struct nfs_fh *fhandle, 2704 struct nfs_fattr *fattr) 2705 { 2706 struct nfs4_exception exception = { }; 2707 struct rpc_clnt *client = *clnt; 2708 int err; 2709 do { 2710 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr); 2711 switch (err) { 2712 case -NFS4ERR_BADNAME: 2713 err = -ENOENT; 2714 goto out; 2715 case -NFS4ERR_MOVED: 2716 err = nfs4_get_referral(client, dir, name, fattr, fhandle); 2717 goto out; 2718 case -NFS4ERR_WRONGSEC: 2719 err = -EPERM; 2720 if (client != *clnt) 2721 goto out; 2722 2723 client = nfs4_create_sec_client(client, dir, name); 2724 if (IS_ERR(client)) 2725 return PTR_ERR(client); 2726 2727 exception.retry = 1; 2728 break; 2729 default: 2730 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); 2731 } 2732 } while (exception.retry); 2733 2734 out: 2735 if (err == 0) 2736 *clnt = client; 2737 else if (client != *clnt) 2738 rpc_shutdown_client(client); 2739 2740 return err; 2741 } 2742 2743 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, 2744 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2745 { 2746 int status; 2747 struct rpc_clnt *client = NFS_CLIENT(dir); 2748 2749 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr); 2750 if (client != NFS_CLIENT(dir)) { 2751 rpc_shutdown_client(client); 2752 nfs_fixup_secinfo_attributes(fattr); 2753 } 2754 return status; 2755 } 2756 2757 struct rpc_clnt * 2758 nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, 2759 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2760 { 2761 int status; 2762 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir)); 2763 2764 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr); 2765 if (status < 0) { 2766 rpc_shutdown_client(client); 2767 return ERR_PTR(status); 2768 } 2769 return client; 2770 } 2771 2772 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 2773 { 2774 struct nfs_server *server = NFS_SERVER(inode); 2775 struct nfs4_accessargs args = { 2776 .fh = NFS_FH(inode), 2777 .bitmask = server->cache_consistency_bitmask, 2778 }; 2779 struct nfs4_accessres res = { 2780 .server = server, 2781 }; 2782 struct rpc_message msg = { 2783 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS], 2784 .rpc_argp = &args, 2785 .rpc_resp = &res, 2786 .rpc_cred = entry->cred, 2787 }; 2788 int mode = entry->mask; 2789 int status; 2790 2791 /* 2792 * Determine which access bits we want to ask for... 2793 */ 2794 if (mode & MAY_READ) 2795 args.access |= NFS4_ACCESS_READ; 2796 if (S_ISDIR(inode->i_mode)) { 2797 if (mode & MAY_WRITE) 2798 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE; 2799 if (mode & MAY_EXEC) 2800 args.access |= NFS4_ACCESS_LOOKUP; 2801 } else { 2802 if (mode & MAY_WRITE) 2803 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND; 2804 if (mode & MAY_EXEC) 2805 args.access |= NFS4_ACCESS_EXECUTE; 2806 } 2807 2808 res.fattr = nfs_alloc_fattr(); 2809 if (res.fattr == NULL) 2810 return -ENOMEM; 2811 2812 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 2813 if (!status) { 2814 nfs_access_set_mask(entry, res.access); 2815 nfs_refresh_inode(inode, res.fattr); 2816 } 2817 nfs_free_fattr(res.fattr); 2818 return status; 2819 } 2820 2821 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) 2822 { 2823 struct nfs4_exception exception = { }; 2824 int err; 2825 do { 2826 err = nfs4_handle_exception(NFS_SERVER(inode), 2827 _nfs4_proc_access(inode, entry), 2828 &exception); 2829 } while (exception.retry); 2830 return err; 2831 } 2832 2833 /* 2834 * TODO: For the time being, we don't try to get any attributes 2835 * along with any of the zero-copy operations READ, READDIR, 2836 * READLINK, WRITE. 2837 * 2838 * In the case of the first three, we want to put the GETATTR 2839 * after the read-type operation -- this is because it is hard 2840 * to predict the length of a GETATTR response in v4, and thus 2841 * align the READ data correctly. This means that the GETATTR 2842 * may end up partially falling into the page cache, and we should 2843 * shift it into the 'tail' of the xdr_buf before processing. 2844 * To do this efficiently, we need to know the total length 2845 * of data received, which doesn't seem to be available outside 2846 * of the RPC layer. 2847 * 2848 * In the case of WRITE, we also want to put the GETATTR after 2849 * the operation -- in this case because we want to make sure 2850 * we get the post-operation mtime and size. 2851 * 2852 * Both of these changes to the XDR layer would in fact be quite 2853 * minor, but I decided to leave them for a subsequent patch. 2854 */ 2855 static int _nfs4_proc_readlink(struct inode *inode, struct page *page, 2856 unsigned int pgbase, unsigned int pglen) 2857 { 2858 struct nfs4_readlink args = { 2859 .fh = NFS_FH(inode), 2860 .pgbase = pgbase, 2861 .pglen = pglen, 2862 .pages = &page, 2863 }; 2864 struct nfs4_readlink_res res; 2865 struct rpc_message msg = { 2866 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK], 2867 .rpc_argp = &args, 2868 .rpc_resp = &res, 2869 }; 2870 2871 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0); 2872 } 2873 2874 static int nfs4_proc_readlink(struct inode *inode, struct page *page, 2875 unsigned int pgbase, unsigned int pglen) 2876 { 2877 struct nfs4_exception exception = { }; 2878 int err; 2879 do { 2880 err = nfs4_handle_exception(NFS_SERVER(inode), 2881 _nfs4_proc_readlink(inode, page, pgbase, pglen), 2882 &exception); 2883 } while (exception.retry); 2884 return err; 2885 } 2886 2887 /* 2888 * This is just for mknod. open(O_CREAT) will always do ->open_context(). 2889 */ 2890 static int 2891 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, 2892 int flags) 2893 { 2894 struct nfs_open_context *ctx; 2895 struct nfs4_state *state; 2896 int status = 0; 2897 2898 ctx = alloc_nfs_open_context(dentry, FMODE_READ); 2899 if (IS_ERR(ctx)) 2900 return PTR_ERR(ctx); 2901 2902 sattr->ia_mode &= ~current_umask(); 2903 state = nfs4_do_open(dir, dentry, ctx->mode, 2904 flags, sattr, ctx->cred, 2905 &ctx->mdsthreshold); 2906 d_drop(dentry); 2907 if (IS_ERR(state)) { 2908 status = PTR_ERR(state); 2909 goto out; 2910 } 2911 d_add(dentry, igrab(state->inode)); 2912 nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); 2913 ctx->state = state; 2914 out: 2915 put_nfs_open_context(ctx); 2916 return status; 2917 } 2918 2919 static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) 2920 { 2921 struct nfs_server *server = NFS_SERVER(dir); 2922 struct nfs_removeargs args = { 2923 .fh = NFS_FH(dir), 2924 .name = *name, 2925 }; 2926 struct nfs_removeres res = { 2927 .server = server, 2928 }; 2929 struct rpc_message msg = { 2930 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], 2931 .rpc_argp = &args, 2932 .rpc_resp = &res, 2933 }; 2934 int status; 2935 2936 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 2937 if (status == 0) 2938 update_changeattr(dir, &res.cinfo); 2939 return status; 2940 } 2941 2942 static int nfs4_proc_remove(struct inode *dir, struct qstr *name) 2943 { 2944 struct nfs4_exception exception = { }; 2945 int err; 2946 do { 2947 err = nfs4_handle_exception(NFS_SERVER(dir), 2948 _nfs4_proc_remove(dir, name), 2949 &exception); 2950 } while (exception.retry); 2951 return err; 2952 } 2953 2954 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) 2955 { 2956 struct nfs_server *server = NFS_SERVER(dir); 2957 struct nfs_removeargs *args = msg->rpc_argp; 2958 struct nfs_removeres *res = msg->rpc_resp; 2959 2960 res->server = server; 2961 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 2962 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1); 2963 } 2964 2965 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) 2966 { 2967 nfs4_setup_sequence(NFS_SERVER(data->dir), 2968 &data->args.seq_args, 2969 &data->res.seq_res, 2970 task); 2971 } 2972 2973 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) 2974 { 2975 struct nfs_removeres *res = task->tk_msg.rpc_resp; 2976 2977 if (!nfs4_sequence_done(task, &res->seq_res)) 2978 return 0; 2979 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 2980 return 0; 2981 update_changeattr(dir, &res->cinfo); 2982 return 1; 2983 } 2984 2985 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir) 2986 { 2987 struct nfs_server *server = NFS_SERVER(dir); 2988 struct nfs_renameargs *arg = msg->rpc_argp; 2989 struct nfs_renameres *res = msg->rpc_resp; 2990 2991 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 2992 res->server = server; 2993 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1); 2994 } 2995 2996 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data) 2997 { 2998 nfs4_setup_sequence(NFS_SERVER(data->old_dir), 2999 &data->args.seq_args, 3000 &data->res.seq_res, 3001 task); 3002 } 3003 3004 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, 3005 struct inode *new_dir) 3006 { 3007 struct nfs_renameres *res = task->tk_msg.rpc_resp; 3008 3009 if (!nfs4_sequence_done(task, &res->seq_res)) 3010 return 0; 3011 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 3012 return 0; 3013 3014 update_changeattr(old_dir, &res->old_cinfo); 3015 update_changeattr(new_dir, &res->new_cinfo); 3016 return 1; 3017 } 3018 3019 static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, 3020 struct inode *new_dir, struct qstr *new_name) 3021 { 3022 struct nfs_server *server = NFS_SERVER(old_dir); 3023 struct nfs_renameargs arg = { 3024 .old_dir = NFS_FH(old_dir), 3025 .new_dir = NFS_FH(new_dir), 3026 .old_name = old_name, 3027 .new_name = new_name, 3028 }; 3029 struct nfs_renameres res = { 3030 .server = server, 3031 }; 3032 struct rpc_message msg = { 3033 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME], 3034 .rpc_argp = &arg, 3035 .rpc_resp = &res, 3036 }; 3037 int status = -ENOMEM; 3038 3039 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3040 if (!status) { 3041 update_changeattr(old_dir, &res.old_cinfo); 3042 update_changeattr(new_dir, &res.new_cinfo); 3043 } 3044 return status; 3045 } 3046 3047 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, 3048 struct inode *new_dir, struct qstr *new_name) 3049 { 3050 struct nfs4_exception exception = { }; 3051 int err; 3052 do { 3053 err = nfs4_handle_exception(NFS_SERVER(old_dir), 3054 _nfs4_proc_rename(old_dir, old_name, 3055 new_dir, new_name), 3056 &exception); 3057 } while (exception.retry); 3058 return err; 3059 } 3060 3061 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3062 { 3063 struct nfs_server *server = NFS_SERVER(inode); 3064 struct nfs4_link_arg arg = { 3065 .fh = NFS_FH(inode), 3066 .dir_fh = NFS_FH(dir), 3067 .name = name, 3068 .bitmask = server->attr_bitmask, 3069 }; 3070 struct nfs4_link_res res = { 3071 .server = server, 3072 }; 3073 struct rpc_message msg = { 3074 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], 3075 .rpc_argp = &arg, 3076 .rpc_resp = &res, 3077 }; 3078 int status = -ENOMEM; 3079 3080 res.fattr = nfs_alloc_fattr(); 3081 if (res.fattr == NULL) 3082 goto out; 3083 3084 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3085 if (!status) { 3086 update_changeattr(dir, &res.cinfo); 3087 nfs_post_op_update_inode(inode, res.fattr); 3088 } 3089 out: 3090 nfs_free_fattr(res.fattr); 3091 return status; 3092 } 3093 3094 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) 3095 { 3096 struct nfs4_exception exception = { }; 3097 int err; 3098 do { 3099 err = nfs4_handle_exception(NFS_SERVER(inode), 3100 _nfs4_proc_link(inode, dir, name), 3101 &exception); 3102 } while (exception.retry); 3103 return err; 3104 } 3105 3106 struct nfs4_createdata { 3107 struct rpc_message msg; 3108 struct nfs4_create_arg arg; 3109 struct nfs4_create_res res; 3110 struct nfs_fh fh; 3111 struct nfs_fattr fattr; 3112 }; 3113 3114 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 3115 struct qstr *name, struct iattr *sattr, u32 ftype) 3116 { 3117 struct nfs4_createdata *data; 3118 3119 data = kzalloc(sizeof(*data), GFP_KERNEL); 3120 if (data != NULL) { 3121 struct nfs_server *server = NFS_SERVER(dir); 3122 3123 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE]; 3124 data->msg.rpc_argp = &data->arg; 3125 data->msg.rpc_resp = &data->res; 3126 data->arg.dir_fh = NFS_FH(dir); 3127 data->arg.server = server; 3128 data->arg.name = name; 3129 data->arg.attrs = sattr; 3130 data->arg.ftype = ftype; 3131 data->arg.bitmask = server->attr_bitmask; 3132 data->res.server = server; 3133 data->res.fh = &data->fh; 3134 data->res.fattr = &data->fattr; 3135 nfs_fattr_init(data->res.fattr); 3136 } 3137 return data; 3138 } 3139 3140 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data) 3141 { 3142 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg, 3143 &data->arg.seq_args, &data->res.seq_res, 1); 3144 if (status == 0) { 3145 update_changeattr(dir, &data->res.dir_cinfo); 3146 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 3147 } 3148 return status; 3149 } 3150 3151 static void nfs4_free_createdata(struct nfs4_createdata *data) 3152 { 3153 kfree(data); 3154 } 3155 3156 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3157 struct page *page, unsigned int len, struct iattr *sattr) 3158 { 3159 struct nfs4_createdata *data; 3160 int status = -ENAMETOOLONG; 3161 3162 if (len > NFS4_MAXPATHLEN) 3163 goto out; 3164 3165 status = -ENOMEM; 3166 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK); 3167 if (data == NULL) 3168 goto out; 3169 3170 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK]; 3171 data->arg.u.symlink.pages = &page; 3172 data->arg.u.symlink.len = len; 3173 3174 status = nfs4_do_create(dir, dentry, data); 3175 3176 nfs4_free_createdata(data); 3177 out: 3178 return status; 3179 } 3180 3181 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, 3182 struct page *page, unsigned int len, struct iattr *sattr) 3183 { 3184 struct nfs4_exception exception = { }; 3185 int err; 3186 do { 3187 err = nfs4_handle_exception(NFS_SERVER(dir), 3188 _nfs4_proc_symlink(dir, dentry, page, 3189 len, sattr), 3190 &exception); 3191 } while (exception.retry); 3192 return err; 3193 } 3194 3195 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3196 struct iattr *sattr) 3197 { 3198 struct nfs4_createdata *data; 3199 int status = -ENOMEM; 3200 3201 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR); 3202 if (data == NULL) 3203 goto out; 3204 3205 status = nfs4_do_create(dir, dentry, data); 3206 3207 nfs4_free_createdata(data); 3208 out: 3209 return status; 3210 } 3211 3212 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, 3213 struct iattr *sattr) 3214 { 3215 struct nfs4_exception exception = { }; 3216 int err; 3217 3218 sattr->ia_mode &= ~current_umask(); 3219 do { 3220 err = nfs4_handle_exception(NFS_SERVER(dir), 3221 _nfs4_proc_mkdir(dir, dentry, sattr), 3222 &exception); 3223 } while (exception.retry); 3224 return err; 3225 } 3226 3227 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3228 u64 cookie, struct page **pages, unsigned int count, int plus) 3229 { 3230 struct inode *dir = dentry->d_inode; 3231 struct nfs4_readdir_arg args = { 3232 .fh = NFS_FH(dir), 3233 .pages = pages, 3234 .pgbase = 0, 3235 .count = count, 3236 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask, 3237 .plus = plus, 3238 }; 3239 struct nfs4_readdir_res res; 3240 struct rpc_message msg = { 3241 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR], 3242 .rpc_argp = &args, 3243 .rpc_resp = &res, 3244 .rpc_cred = cred, 3245 }; 3246 int status; 3247 3248 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__, 3249 dentry->d_parent->d_name.name, 3250 dentry->d_name.name, 3251 (unsigned long long)cookie); 3252 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); 3253 res.pgbase = args.pgbase; 3254 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 3255 if (status >= 0) { 3256 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); 3257 status += args.pgbase; 3258 } 3259 3260 nfs_invalidate_atime(dir); 3261 3262 dprintk("%s: returns %d\n", __func__, status); 3263 return status; 3264 } 3265 3266 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, 3267 u64 cookie, struct page **pages, unsigned int count, int plus) 3268 { 3269 struct nfs4_exception exception = { }; 3270 int err; 3271 do { 3272 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), 3273 _nfs4_proc_readdir(dentry, cred, cookie, 3274 pages, count, plus), 3275 &exception); 3276 } while (exception.retry); 3277 return err; 3278 } 3279 3280 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3281 struct iattr *sattr, dev_t rdev) 3282 { 3283 struct nfs4_createdata *data; 3284 int mode = sattr->ia_mode; 3285 int status = -ENOMEM; 3286 3287 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK); 3288 if (data == NULL) 3289 goto out; 3290 3291 if (S_ISFIFO(mode)) 3292 data->arg.ftype = NF4FIFO; 3293 else if (S_ISBLK(mode)) { 3294 data->arg.ftype = NF4BLK; 3295 data->arg.u.device.specdata1 = MAJOR(rdev); 3296 data->arg.u.device.specdata2 = MINOR(rdev); 3297 } 3298 else if (S_ISCHR(mode)) { 3299 data->arg.ftype = NF4CHR; 3300 data->arg.u.device.specdata1 = MAJOR(rdev); 3301 data->arg.u.device.specdata2 = MINOR(rdev); 3302 } else if (!S_ISSOCK(mode)) { 3303 status = -EINVAL; 3304 goto out_free; 3305 } 3306 3307 status = nfs4_do_create(dir, dentry, data); 3308 out_free: 3309 nfs4_free_createdata(data); 3310 out: 3311 return status; 3312 } 3313 3314 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, 3315 struct iattr *sattr, dev_t rdev) 3316 { 3317 struct nfs4_exception exception = { }; 3318 int err; 3319 3320 sattr->ia_mode &= ~current_umask(); 3321 do { 3322 err = nfs4_handle_exception(NFS_SERVER(dir), 3323 _nfs4_proc_mknod(dir, dentry, sattr, rdev), 3324 &exception); 3325 } while (exception.retry); 3326 return err; 3327 } 3328 3329 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, 3330 struct nfs_fsstat *fsstat) 3331 { 3332 struct nfs4_statfs_arg args = { 3333 .fh = fhandle, 3334 .bitmask = server->attr_bitmask, 3335 }; 3336 struct nfs4_statfs_res res = { 3337 .fsstat = fsstat, 3338 }; 3339 struct rpc_message msg = { 3340 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS], 3341 .rpc_argp = &args, 3342 .rpc_resp = &res, 3343 }; 3344 3345 nfs_fattr_init(fsstat->fattr); 3346 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3347 } 3348 3349 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) 3350 { 3351 struct nfs4_exception exception = { }; 3352 int err; 3353 do { 3354 err = nfs4_handle_exception(server, 3355 _nfs4_proc_statfs(server, fhandle, fsstat), 3356 &exception); 3357 } while (exception.retry); 3358 return err; 3359 } 3360 3361 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, 3362 struct nfs_fsinfo *fsinfo) 3363 { 3364 struct nfs4_fsinfo_arg args = { 3365 .fh = fhandle, 3366 .bitmask = server->attr_bitmask, 3367 }; 3368 struct nfs4_fsinfo_res res = { 3369 .fsinfo = fsinfo, 3370 }; 3371 struct rpc_message msg = { 3372 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO], 3373 .rpc_argp = &args, 3374 .rpc_resp = &res, 3375 }; 3376 3377 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3378 } 3379 3380 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 3381 { 3382 struct nfs4_exception exception = { }; 3383 int err; 3384 3385 do { 3386 err = nfs4_handle_exception(server, 3387 _nfs4_do_fsinfo(server, fhandle, fsinfo), 3388 &exception); 3389 } while (exception.retry); 3390 return err; 3391 } 3392 3393 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) 3394 { 3395 int error; 3396 3397 nfs_fattr_init(fsinfo->fattr); 3398 error = nfs4_do_fsinfo(server, fhandle, fsinfo); 3399 if (error == 0) { 3400 /* block layout checks this! */ 3401 server->pnfs_blksize = fsinfo->blksize; 3402 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype); 3403 } 3404 3405 return error; 3406 } 3407 3408 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 3409 struct nfs_pathconf *pathconf) 3410 { 3411 struct nfs4_pathconf_arg args = { 3412 .fh = fhandle, 3413 .bitmask = server->attr_bitmask, 3414 }; 3415 struct nfs4_pathconf_res res = { 3416 .pathconf = pathconf, 3417 }; 3418 struct rpc_message msg = { 3419 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF], 3420 .rpc_argp = &args, 3421 .rpc_resp = &res, 3422 }; 3423 3424 /* None of the pathconf attributes are mandatory to implement */ 3425 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) { 3426 memset(pathconf, 0, sizeof(*pathconf)); 3427 return 0; 3428 } 3429 3430 nfs_fattr_init(pathconf->fattr); 3431 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 3432 } 3433 3434 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, 3435 struct nfs_pathconf *pathconf) 3436 { 3437 struct nfs4_exception exception = { }; 3438 int err; 3439 3440 do { 3441 err = nfs4_handle_exception(server, 3442 _nfs4_proc_pathconf(server, fhandle, pathconf), 3443 &exception); 3444 } while (exception.retry); 3445 return err; 3446 } 3447 3448 void __nfs4_read_done_cb(struct nfs_read_data *data) 3449 { 3450 nfs_invalidate_atime(data->header->inode); 3451 } 3452 3453 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data) 3454 { 3455 struct nfs_server *server = NFS_SERVER(data->header->inode); 3456 3457 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { 3458 rpc_restart_call_prepare(task); 3459 return -EAGAIN; 3460 } 3461 3462 __nfs4_read_done_cb(data); 3463 if (task->tk_status > 0) 3464 renew_lease(server, data->timestamp); 3465 return 0; 3466 } 3467 3468 static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data) 3469 { 3470 3471 dprintk("--> %s\n", __func__); 3472 3473 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3474 return -EAGAIN; 3475 3476 return data->read_done_cb ? data->read_done_cb(task, data) : 3477 nfs4_read_done_cb(task, data); 3478 } 3479 3480 static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg) 3481 { 3482 data->timestamp = jiffies; 3483 data->read_done_cb = nfs4_read_done_cb; 3484 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; 3485 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); 3486 } 3487 3488 static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data) 3489 { 3490 nfs4_setup_sequence(NFS_SERVER(data->header->inode), 3491 &data->args.seq_args, 3492 &data->res.seq_res, 3493 task); 3494 } 3495 3496 static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data) 3497 { 3498 struct inode *inode = data->header->inode; 3499 3500 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { 3501 rpc_restart_call_prepare(task); 3502 return -EAGAIN; 3503 } 3504 if (task->tk_status >= 0) { 3505 renew_lease(NFS_SERVER(inode), data->timestamp); 3506 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 3507 } 3508 return 0; 3509 } 3510 3511 static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data) 3512 { 3513 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3514 return -EAGAIN; 3515 return data->write_done_cb ? data->write_done_cb(task, data) : 3516 nfs4_write_done_cb(task, data); 3517 } 3518 3519 static 3520 bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data) 3521 { 3522 const struct nfs_pgio_header *hdr = data->header; 3523 3524 /* Don't request attributes for pNFS or O_DIRECT writes */ 3525 if (data->ds_clp != NULL || hdr->dreq != NULL) 3526 return false; 3527 /* Otherwise, request attributes if and only if we don't hold 3528 * a delegation 3529 */ 3530 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0; 3531 } 3532 3533 static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg) 3534 { 3535 struct nfs_server *server = NFS_SERVER(data->header->inode); 3536 3537 if (!nfs4_write_need_cache_consistency_data(data)) { 3538 data->args.bitmask = NULL; 3539 data->res.fattr = NULL; 3540 } else 3541 data->args.bitmask = server->cache_consistency_bitmask; 3542 3543 if (!data->write_done_cb) 3544 data->write_done_cb = nfs4_write_done_cb; 3545 data->res.server = server; 3546 data->timestamp = jiffies; 3547 3548 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; 3549 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 3550 } 3551 3552 static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data) 3553 { 3554 nfs4_setup_sequence(NFS_SERVER(data->header->inode), 3555 &data->args.seq_args, 3556 &data->res.seq_res, 3557 task); 3558 } 3559 3560 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data) 3561 { 3562 nfs4_setup_sequence(NFS_SERVER(data->inode), 3563 &data->args.seq_args, 3564 &data->res.seq_res, 3565 task); 3566 } 3567 3568 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) 3569 { 3570 struct inode *inode = data->inode; 3571 3572 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { 3573 rpc_restart_call_prepare(task); 3574 return -EAGAIN; 3575 } 3576 return 0; 3577 } 3578 3579 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data) 3580 { 3581 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3582 return -EAGAIN; 3583 return data->commit_done_cb(task, data); 3584 } 3585 3586 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg) 3587 { 3588 struct nfs_server *server = NFS_SERVER(data->inode); 3589 3590 if (data->commit_done_cb == NULL) 3591 data->commit_done_cb = nfs4_commit_done_cb; 3592 data->res.server = server; 3593 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 3594 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 3595 } 3596 3597 struct nfs4_renewdata { 3598 struct nfs_client *client; 3599 unsigned long timestamp; 3600 }; 3601 3602 /* 3603 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special 3604 * standalone procedure for queueing an asynchronous RENEW. 3605 */ 3606 static void nfs4_renew_release(void *calldata) 3607 { 3608 struct nfs4_renewdata *data = calldata; 3609 struct nfs_client *clp = data->client; 3610 3611 if (atomic_read(&clp->cl_count) > 1) 3612 nfs4_schedule_state_renewal(clp); 3613 nfs_put_client(clp); 3614 kfree(data); 3615 } 3616 3617 static void nfs4_renew_done(struct rpc_task *task, void *calldata) 3618 { 3619 struct nfs4_renewdata *data = calldata; 3620 struct nfs_client *clp = data->client; 3621 unsigned long timestamp = data->timestamp; 3622 3623 if (task->tk_status < 0) { 3624 /* Unless we're shutting down, schedule state recovery! */ 3625 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0) 3626 return; 3627 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) { 3628 nfs4_schedule_lease_recovery(clp); 3629 return; 3630 } 3631 nfs4_schedule_path_down_recovery(clp); 3632 } 3633 do_renew_lease(clp, timestamp); 3634 } 3635 3636 static const struct rpc_call_ops nfs4_renew_ops = { 3637 .rpc_call_done = nfs4_renew_done, 3638 .rpc_release = nfs4_renew_release, 3639 }; 3640 3641 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 3642 { 3643 struct rpc_message msg = { 3644 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 3645 .rpc_argp = clp, 3646 .rpc_cred = cred, 3647 }; 3648 struct nfs4_renewdata *data; 3649 3650 if (renew_flags == 0) 3651 return 0; 3652 if (!atomic_inc_not_zero(&clp->cl_count)) 3653 return -EIO; 3654 data = kmalloc(sizeof(*data), GFP_NOFS); 3655 if (data == NULL) 3656 return -ENOMEM; 3657 data->client = clp; 3658 data->timestamp = jiffies; 3659 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT, 3660 &nfs4_renew_ops, data); 3661 } 3662 3663 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred) 3664 { 3665 struct rpc_message msg = { 3666 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW], 3667 .rpc_argp = clp, 3668 .rpc_cred = cred, 3669 }; 3670 unsigned long now = jiffies; 3671 int status; 3672 3673 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); 3674 if (status < 0) 3675 return status; 3676 do_renew_lease(clp, now); 3677 return 0; 3678 } 3679 3680 static inline int nfs4_server_supports_acls(struct nfs_server *server) 3681 { 3682 return (server->caps & NFS_CAP_ACLS) 3683 && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL) 3684 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL); 3685 } 3686 3687 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that 3688 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on 3689 * the stack. 3690 */ 3691 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) 3692 3693 static int buf_to_pages_noslab(const void *buf, size_t buflen, 3694 struct page **pages, unsigned int *pgbase) 3695 { 3696 struct page *newpage, **spages; 3697 int rc = 0; 3698 size_t len; 3699 spages = pages; 3700 3701 do { 3702 len = min_t(size_t, PAGE_SIZE, buflen); 3703 newpage = alloc_page(GFP_KERNEL); 3704 3705 if (newpage == NULL) 3706 goto unwind; 3707 memcpy(page_address(newpage), buf, len); 3708 buf += len; 3709 buflen -= len; 3710 *pages++ = newpage; 3711 rc++; 3712 } while (buflen != 0); 3713 3714 return rc; 3715 3716 unwind: 3717 for(; rc > 0; rc--) 3718 __free_page(spages[rc-1]); 3719 return -ENOMEM; 3720 } 3721 3722 struct nfs4_cached_acl { 3723 int cached; 3724 size_t len; 3725 char data[0]; 3726 }; 3727 3728 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl) 3729 { 3730 struct nfs_inode *nfsi = NFS_I(inode); 3731 3732 spin_lock(&inode->i_lock); 3733 kfree(nfsi->nfs4_acl); 3734 nfsi->nfs4_acl = acl; 3735 spin_unlock(&inode->i_lock); 3736 } 3737 3738 static void nfs4_zap_acl_attr(struct inode *inode) 3739 { 3740 nfs4_set_cached_acl(inode, NULL); 3741 } 3742 3743 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen) 3744 { 3745 struct nfs_inode *nfsi = NFS_I(inode); 3746 struct nfs4_cached_acl *acl; 3747 int ret = -ENOENT; 3748 3749 spin_lock(&inode->i_lock); 3750 acl = nfsi->nfs4_acl; 3751 if (acl == NULL) 3752 goto out; 3753 if (buf == NULL) /* user is just asking for length */ 3754 goto out_len; 3755 if (acl->cached == 0) 3756 goto out; 3757 ret = -ERANGE; /* see getxattr(2) man page */ 3758 if (acl->len > buflen) 3759 goto out; 3760 memcpy(buf, acl->data, acl->len); 3761 out_len: 3762 ret = acl->len; 3763 out: 3764 spin_unlock(&inode->i_lock); 3765 return ret; 3766 } 3767 3768 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) 3769 { 3770 struct nfs4_cached_acl *acl; 3771 size_t buflen = sizeof(*acl) + acl_len; 3772 3773 if (buflen <= PAGE_SIZE) { 3774 acl = kmalloc(buflen, GFP_KERNEL); 3775 if (acl == NULL) 3776 goto out; 3777 acl->cached = 1; 3778 _copy_from_pages(acl->data, pages, pgbase, acl_len); 3779 } else { 3780 acl = kmalloc(sizeof(*acl), GFP_KERNEL); 3781 if (acl == NULL) 3782 goto out; 3783 acl->cached = 0; 3784 } 3785 acl->len = acl_len; 3786 out: 3787 nfs4_set_cached_acl(inode, acl); 3788 } 3789 3790 /* 3791 * The getxattr API returns the required buffer length when called with a 3792 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating 3793 * the required buf. On a NULL buf, we send a page of data to the server 3794 * guessing that the ACL request can be serviced by a page. If so, we cache 3795 * up to the page of ACL data, and the 2nd call to getxattr is serviced by 3796 * the cache. If not so, we throw away the page, and cache the required 3797 * length. The next getxattr call will then produce another round trip to 3798 * the server, this time with the input buf of the required size. 3799 */ 3800 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 3801 { 3802 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, }; 3803 struct nfs_getaclargs args = { 3804 .fh = NFS_FH(inode), 3805 .acl_pages = pages, 3806 .acl_len = buflen, 3807 }; 3808 struct nfs_getaclres res = { 3809 .acl_len = buflen, 3810 }; 3811 struct rpc_message msg = { 3812 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], 3813 .rpc_argp = &args, 3814 .rpc_resp = &res, 3815 }; 3816 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 3817 int ret = -ENOMEM, i; 3818 3819 /* As long as we're doing a round trip to the server anyway, 3820 * let's be prepared for a page of acl data. */ 3821 if (npages == 0) 3822 npages = 1; 3823 if (npages > ARRAY_SIZE(pages)) 3824 return -ERANGE; 3825 3826 for (i = 0; i < npages; i++) { 3827 pages[i] = alloc_page(GFP_KERNEL); 3828 if (!pages[i]) 3829 goto out_free; 3830 } 3831 3832 /* for decoding across pages */ 3833 res.acl_scratch = alloc_page(GFP_KERNEL); 3834 if (!res.acl_scratch) 3835 goto out_free; 3836 3837 args.acl_len = npages * PAGE_SIZE; 3838 args.acl_pgbase = 0; 3839 3840 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", 3841 __func__, buf, buflen, npages, args.acl_len); 3842 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), 3843 &msg, &args.seq_args, &res.seq_res, 0); 3844 if (ret) 3845 goto out_free; 3846 3847 /* Handle the case where the passed-in buffer is too short */ 3848 if (res.acl_flags & NFS4_ACL_TRUNC) { 3849 /* Did the user only issue a request for the acl length? */ 3850 if (buf == NULL) 3851 goto out_ok; 3852 ret = -ERANGE; 3853 goto out_free; 3854 } 3855 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); 3856 if (buf) { 3857 if (res.acl_len > buflen) { 3858 ret = -ERANGE; 3859 goto out_free; 3860 } 3861 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); 3862 } 3863 out_ok: 3864 ret = res.acl_len; 3865 out_free: 3866 for (i = 0; i < npages; i++) 3867 if (pages[i]) 3868 __free_page(pages[i]); 3869 if (res.acl_scratch) 3870 __free_page(res.acl_scratch); 3871 return ret; 3872 } 3873 3874 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) 3875 { 3876 struct nfs4_exception exception = { }; 3877 ssize_t ret; 3878 do { 3879 ret = __nfs4_get_acl_uncached(inode, buf, buflen); 3880 if (ret >= 0) 3881 break; 3882 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception); 3883 } while (exception.retry); 3884 return ret; 3885 } 3886 3887 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen) 3888 { 3889 struct nfs_server *server = NFS_SERVER(inode); 3890 int ret; 3891 3892 if (!nfs4_server_supports_acls(server)) 3893 return -EOPNOTSUPP; 3894 ret = nfs_revalidate_inode(server, inode); 3895 if (ret < 0) 3896 return ret; 3897 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL) 3898 nfs_zap_acl_cache(inode); 3899 ret = nfs4_read_cached_acl(inode, buf, buflen); 3900 if (ret != -ENOENT) 3901 /* -ENOENT is returned if there is no ACL or if there is an ACL 3902 * but no cached acl data, just the acl length */ 3903 return ret; 3904 return nfs4_get_acl_uncached(inode, buf, buflen); 3905 } 3906 3907 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 3908 { 3909 struct nfs_server *server = NFS_SERVER(inode); 3910 struct page *pages[NFS4ACL_MAXPAGES]; 3911 struct nfs_setaclargs arg = { 3912 .fh = NFS_FH(inode), 3913 .acl_pages = pages, 3914 .acl_len = buflen, 3915 }; 3916 struct nfs_setaclres res; 3917 struct rpc_message msg = { 3918 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL], 3919 .rpc_argp = &arg, 3920 .rpc_resp = &res, 3921 }; 3922 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); 3923 int ret, i; 3924 3925 if (!nfs4_server_supports_acls(server)) 3926 return -EOPNOTSUPP; 3927 if (npages > ARRAY_SIZE(pages)) 3928 return -ERANGE; 3929 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 3930 if (i < 0) 3931 return i; 3932 nfs4_inode_return_delegation(inode); 3933 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 3934 3935 /* 3936 * Free each page after tx, so the only ref left is 3937 * held by the network stack 3938 */ 3939 for (; i > 0; i--) 3940 put_page(pages[i-1]); 3941 3942 /* 3943 * Acl update can result in inode attribute update. 3944 * so mark the attribute cache invalid. 3945 */ 3946 spin_lock(&inode->i_lock); 3947 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR; 3948 spin_unlock(&inode->i_lock); 3949 nfs_access_zap_cache(inode); 3950 nfs_zap_acl_cache(inode); 3951 return ret; 3952 } 3953 3954 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) 3955 { 3956 struct nfs4_exception exception = { }; 3957 int err; 3958 do { 3959 err = nfs4_handle_exception(NFS_SERVER(inode), 3960 __nfs4_proc_set_acl(inode, buf, buflen), 3961 &exception); 3962 } while (exception.retry); 3963 return err; 3964 } 3965 3966 static int 3967 nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state) 3968 { 3969 struct nfs_client *clp = server->nfs_client; 3970 3971 if (task->tk_status >= 0) 3972 return 0; 3973 switch(task->tk_status) { 3974 case -NFS4ERR_DELEG_REVOKED: 3975 case -NFS4ERR_ADMIN_REVOKED: 3976 case -NFS4ERR_BAD_STATEID: 3977 if (state == NULL) 3978 break; 3979 nfs_remove_bad_delegation(state->inode); 3980 case -NFS4ERR_OPENMODE: 3981 if (state == NULL) 3982 break; 3983 nfs4_schedule_stateid_recovery(server, state); 3984 goto wait_on_recovery; 3985 case -NFS4ERR_EXPIRED: 3986 if (state != NULL) 3987 nfs4_schedule_stateid_recovery(server, state); 3988 case -NFS4ERR_STALE_STATEID: 3989 case -NFS4ERR_STALE_CLIENTID: 3990 nfs4_schedule_lease_recovery(clp); 3991 goto wait_on_recovery; 3992 #if defined(CONFIG_NFS_V4_1) 3993 case -NFS4ERR_BADSESSION: 3994 case -NFS4ERR_BADSLOT: 3995 case -NFS4ERR_BAD_HIGH_SLOT: 3996 case -NFS4ERR_DEADSESSION: 3997 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 3998 case -NFS4ERR_SEQ_FALSE_RETRY: 3999 case -NFS4ERR_SEQ_MISORDERED: 4000 dprintk("%s ERROR %d, Reset session\n", __func__, 4001 task->tk_status); 4002 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); 4003 task->tk_status = 0; 4004 return -EAGAIN; 4005 #endif /* CONFIG_NFS_V4_1 */ 4006 case -NFS4ERR_DELAY: 4007 nfs_inc_server_stats(server, NFSIOS_DELAY); 4008 case -NFS4ERR_GRACE: 4009 rpc_delay(task, NFS4_POLL_RETRY_MAX); 4010 task->tk_status = 0; 4011 return -EAGAIN; 4012 case -NFS4ERR_RETRY_UNCACHED_REP: 4013 case -NFS4ERR_OLD_STATEID: 4014 task->tk_status = 0; 4015 return -EAGAIN; 4016 } 4017 task->tk_status = nfs4_map_errors(task->tk_status); 4018 return 0; 4019 wait_on_recovery: 4020 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 4021 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 4022 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 4023 task->tk_status = 0; 4024 return -EAGAIN; 4025 } 4026 4027 static void nfs4_init_boot_verifier(const struct nfs_client *clp, 4028 nfs4_verifier *bootverf) 4029 { 4030 __be32 verf[2]; 4031 4032 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) { 4033 /* An impossible timestamp guarantees this value 4034 * will never match a generated boot time. */ 4035 verf[0] = 0; 4036 verf[1] = (__be32)(NSEC_PER_SEC + 1); 4037 } else { 4038 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); 4039 verf[0] = (__be32)nn->boot_time.tv_sec; 4040 verf[1] = (__be32)nn->boot_time.tv_nsec; 4041 } 4042 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 4043 } 4044 4045 static unsigned int 4046 nfs4_init_nonuniform_client_string(const struct nfs_client *clp, 4047 char *buf, size_t len) 4048 { 4049 unsigned int result; 4050 4051 rcu_read_lock(); 4052 result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s", 4053 clp->cl_ipaddr, 4054 rpc_peeraddr2str(clp->cl_rpcclient, 4055 RPC_DISPLAY_ADDR), 4056 rpc_peeraddr2str(clp->cl_rpcclient, 4057 RPC_DISPLAY_PROTO)); 4058 rcu_read_unlock(); 4059 return result; 4060 } 4061 4062 static unsigned int 4063 nfs4_init_uniform_client_string(const struct nfs_client *clp, 4064 char *buf, size_t len) 4065 { 4066 char *nodename = clp->cl_rpcclient->cl_nodename; 4067 4068 if (nfs4_client_id_uniquifier[0] != '\0') 4069 nodename = nfs4_client_id_uniquifier; 4070 return scnprintf(buf, len, "Linux NFSv%u.%u %s", 4071 clp->rpc_ops->version, clp->cl_minorversion, 4072 nodename); 4073 } 4074 4075 /** 4076 * nfs4_proc_setclientid - Negotiate client ID 4077 * @clp: state data structure 4078 * @program: RPC program for NFSv4 callback service 4079 * @port: IP port number for NFS4 callback service 4080 * @cred: RPC credential to use for this call 4081 * @res: where to place the result 4082 * 4083 * Returns zero, a negative errno, or a negative NFS4ERR status code. 4084 */ 4085 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, 4086 unsigned short port, struct rpc_cred *cred, 4087 struct nfs4_setclientid_res *res) 4088 { 4089 nfs4_verifier sc_verifier; 4090 struct nfs4_setclientid setclientid = { 4091 .sc_verifier = &sc_verifier, 4092 .sc_prog = program, 4093 .sc_cb_ident = clp->cl_cb_ident, 4094 }; 4095 struct rpc_message msg = { 4096 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], 4097 .rpc_argp = &setclientid, 4098 .rpc_resp = res, 4099 .rpc_cred = cred, 4100 }; 4101 int status; 4102 4103 /* nfs_client_id4 */ 4104 nfs4_init_boot_verifier(clp, &sc_verifier); 4105 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) 4106 setclientid.sc_name_len = 4107 nfs4_init_uniform_client_string(clp, 4108 setclientid.sc_name, 4109 sizeof(setclientid.sc_name)); 4110 else 4111 setclientid.sc_name_len = 4112 nfs4_init_nonuniform_client_string(clp, 4113 setclientid.sc_name, 4114 sizeof(setclientid.sc_name)); 4115 /* cb_client4 */ 4116 rcu_read_lock(); 4117 setclientid.sc_netid_len = scnprintf(setclientid.sc_netid, 4118 sizeof(setclientid.sc_netid), 4119 rpc_peeraddr2str(clp->cl_rpcclient, 4120 RPC_DISPLAY_NETID)); 4121 rcu_read_unlock(); 4122 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr, 4123 sizeof(setclientid.sc_uaddr), "%s.%u.%u", 4124 clp->cl_ipaddr, port >> 8, port & 255); 4125 4126 dprintk("NFS call setclientid auth=%s, '%.*s'\n", 4127 clp->cl_rpcclient->cl_auth->au_ops->au_name, 4128 setclientid.sc_name_len, setclientid.sc_name); 4129 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4130 dprintk("NFS reply setclientid: %d\n", status); 4131 return status; 4132 } 4133 4134 /** 4135 * nfs4_proc_setclientid_confirm - Confirm client ID 4136 * @clp: state data structure 4137 * @res: result of a previous SETCLIENTID 4138 * @cred: RPC credential to use for this call 4139 * 4140 * Returns zero, a negative errno, or a negative NFS4ERR status code. 4141 */ 4142 int nfs4_proc_setclientid_confirm(struct nfs_client *clp, 4143 struct nfs4_setclientid_res *arg, 4144 struct rpc_cred *cred) 4145 { 4146 struct nfs_fsinfo fsinfo; 4147 struct rpc_message msg = { 4148 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM], 4149 .rpc_argp = arg, 4150 .rpc_resp = &fsinfo, 4151 .rpc_cred = cred, 4152 }; 4153 unsigned long now; 4154 int status; 4155 4156 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n", 4157 clp->cl_rpcclient->cl_auth->au_ops->au_name, 4158 clp->cl_clientid); 4159 now = jiffies; 4160 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 4161 if (status == 0) { 4162 spin_lock(&clp->cl_lock); 4163 clp->cl_lease_time = fsinfo.lease_time * HZ; 4164 clp->cl_last_renewal = now; 4165 spin_unlock(&clp->cl_lock); 4166 } 4167 dprintk("NFS reply setclientid_confirm: %d\n", status); 4168 return status; 4169 } 4170 4171 struct nfs4_delegreturndata { 4172 struct nfs4_delegreturnargs args; 4173 struct nfs4_delegreturnres res; 4174 struct nfs_fh fh; 4175 nfs4_stateid stateid; 4176 unsigned long timestamp; 4177 struct nfs_fattr fattr; 4178 int rpc_status; 4179 }; 4180 4181 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) 4182 { 4183 struct nfs4_delegreturndata *data = calldata; 4184 4185 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4186 return; 4187 4188 switch (task->tk_status) { 4189 case -NFS4ERR_STALE_STATEID: 4190 case -NFS4ERR_EXPIRED: 4191 case 0: 4192 renew_lease(data->res.server, data->timestamp); 4193 break; 4194 default: 4195 if (nfs4_async_handle_error(task, data->res.server, NULL) == 4196 -EAGAIN) { 4197 rpc_restart_call_prepare(task); 4198 return; 4199 } 4200 } 4201 data->rpc_status = task->tk_status; 4202 } 4203 4204 static void nfs4_delegreturn_release(void *calldata) 4205 { 4206 kfree(calldata); 4207 } 4208 4209 #if defined(CONFIG_NFS_V4_1) 4210 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) 4211 { 4212 struct nfs4_delegreturndata *d_data; 4213 4214 d_data = (struct nfs4_delegreturndata *)data; 4215 4216 nfs4_setup_sequence(d_data->res.server, 4217 &d_data->args.seq_args, 4218 &d_data->res.seq_res, 4219 task); 4220 } 4221 #endif /* CONFIG_NFS_V4_1 */ 4222 4223 static const struct rpc_call_ops nfs4_delegreturn_ops = { 4224 #if defined(CONFIG_NFS_V4_1) 4225 .rpc_call_prepare = nfs4_delegreturn_prepare, 4226 #endif /* CONFIG_NFS_V4_1 */ 4227 .rpc_call_done = nfs4_delegreturn_done, 4228 .rpc_release = nfs4_delegreturn_release, 4229 }; 4230 4231 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 4232 { 4233 struct nfs4_delegreturndata *data; 4234 struct nfs_server *server = NFS_SERVER(inode); 4235 struct rpc_task *task; 4236 struct rpc_message msg = { 4237 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN], 4238 .rpc_cred = cred, 4239 }; 4240 struct rpc_task_setup task_setup_data = { 4241 .rpc_client = server->client, 4242 .rpc_message = &msg, 4243 .callback_ops = &nfs4_delegreturn_ops, 4244 .flags = RPC_TASK_ASYNC, 4245 }; 4246 int status = 0; 4247 4248 data = kzalloc(sizeof(*data), GFP_NOFS); 4249 if (data == NULL) 4250 return -ENOMEM; 4251 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 4252 data->args.fhandle = &data->fh; 4253 data->args.stateid = &data->stateid; 4254 data->args.bitmask = server->cache_consistency_bitmask; 4255 nfs_copy_fh(&data->fh, NFS_FH(inode)); 4256 nfs4_stateid_copy(&data->stateid, stateid); 4257 data->res.fattr = &data->fattr; 4258 data->res.server = server; 4259 nfs_fattr_init(data->res.fattr); 4260 data->timestamp = jiffies; 4261 data->rpc_status = 0; 4262 4263 task_setup_data.callback_data = data; 4264 msg.rpc_argp = &data->args; 4265 msg.rpc_resp = &data->res; 4266 task = rpc_run_task(&task_setup_data); 4267 if (IS_ERR(task)) 4268 return PTR_ERR(task); 4269 if (!issync) 4270 goto out; 4271 status = nfs4_wait_for_completion_rpc_task(task); 4272 if (status != 0) 4273 goto out; 4274 status = data->rpc_status; 4275 if (status == 0) 4276 nfs_post_op_update_inode_force_wcc(inode, &data->fattr); 4277 else 4278 nfs_refresh_inode(inode, &data->fattr); 4279 out: 4280 rpc_put_task(task); 4281 return status; 4282 } 4283 4284 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) 4285 { 4286 struct nfs_server *server = NFS_SERVER(inode); 4287 struct nfs4_exception exception = { }; 4288 int err; 4289 do { 4290 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); 4291 switch (err) { 4292 case -NFS4ERR_STALE_STATEID: 4293 case -NFS4ERR_EXPIRED: 4294 case 0: 4295 return 0; 4296 } 4297 err = nfs4_handle_exception(server, err, &exception); 4298 } while (exception.retry); 4299 return err; 4300 } 4301 4302 #define NFS4_LOCK_MINTIMEOUT (1 * HZ) 4303 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ) 4304 4305 /* 4306 * sleep, with exponential backoff, and retry the LOCK operation. 4307 */ 4308 static unsigned long 4309 nfs4_set_lock_task_retry(unsigned long timeout) 4310 { 4311 freezable_schedule_timeout_killable(timeout); 4312 timeout <<= 1; 4313 if (timeout > NFS4_LOCK_MAXTIMEOUT) 4314 return NFS4_LOCK_MAXTIMEOUT; 4315 return timeout; 4316 } 4317 4318 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4319 { 4320 struct inode *inode = state->inode; 4321 struct nfs_server *server = NFS_SERVER(inode); 4322 struct nfs_client *clp = server->nfs_client; 4323 struct nfs_lockt_args arg = { 4324 .fh = NFS_FH(inode), 4325 .fl = request, 4326 }; 4327 struct nfs_lockt_res res = { 4328 .denied = request, 4329 }; 4330 struct rpc_message msg = { 4331 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT], 4332 .rpc_argp = &arg, 4333 .rpc_resp = &res, 4334 .rpc_cred = state->owner->so_cred, 4335 }; 4336 struct nfs4_lock_state *lsp; 4337 int status; 4338 4339 arg.lock_owner.clientid = clp->cl_clientid; 4340 status = nfs4_set_lock_state(state, request); 4341 if (status != 0) 4342 goto out; 4343 lsp = request->fl_u.nfs4_fl.owner; 4344 arg.lock_owner.id = lsp->ls_seqid.owner_id; 4345 arg.lock_owner.s_dev = server->s_dev; 4346 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 4347 switch (status) { 4348 case 0: 4349 request->fl_type = F_UNLCK; 4350 break; 4351 case -NFS4ERR_DENIED: 4352 status = 0; 4353 } 4354 request->fl_ops->fl_release_private(request); 4355 out: 4356 return status; 4357 } 4358 4359 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4360 { 4361 struct nfs4_exception exception = { }; 4362 int err; 4363 4364 do { 4365 err = nfs4_handle_exception(NFS_SERVER(state->inode), 4366 _nfs4_proc_getlk(state, cmd, request), 4367 &exception); 4368 } while (exception.retry); 4369 return err; 4370 } 4371 4372 static int do_vfs_lock(struct file *file, struct file_lock *fl) 4373 { 4374 int res = 0; 4375 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 4376 case FL_POSIX: 4377 res = posix_lock_file_wait(file, fl); 4378 break; 4379 case FL_FLOCK: 4380 res = flock_lock_file_wait(file, fl); 4381 break; 4382 default: 4383 BUG(); 4384 } 4385 return res; 4386 } 4387 4388 struct nfs4_unlockdata { 4389 struct nfs_locku_args arg; 4390 struct nfs_locku_res res; 4391 struct nfs4_lock_state *lsp; 4392 struct nfs_open_context *ctx; 4393 struct file_lock fl; 4394 const struct nfs_server *server; 4395 unsigned long timestamp; 4396 }; 4397 4398 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, 4399 struct nfs_open_context *ctx, 4400 struct nfs4_lock_state *lsp, 4401 struct nfs_seqid *seqid) 4402 { 4403 struct nfs4_unlockdata *p; 4404 struct inode *inode = lsp->ls_state->inode; 4405 4406 p = kzalloc(sizeof(*p), GFP_NOFS); 4407 if (p == NULL) 4408 return NULL; 4409 p->arg.fh = NFS_FH(inode); 4410 p->arg.fl = &p->fl; 4411 p->arg.seqid = seqid; 4412 p->res.seqid = seqid; 4413 p->arg.stateid = &lsp->ls_stateid; 4414 p->lsp = lsp; 4415 atomic_inc(&lsp->ls_count); 4416 /* Ensure we don't close file until we're done freeing locks! */ 4417 p->ctx = get_nfs_open_context(ctx); 4418 memcpy(&p->fl, fl, sizeof(p->fl)); 4419 p->server = NFS_SERVER(inode); 4420 return p; 4421 } 4422 4423 static void nfs4_locku_release_calldata(void *data) 4424 { 4425 struct nfs4_unlockdata *calldata = data; 4426 nfs_free_seqid(calldata->arg.seqid); 4427 nfs4_put_lock_state(calldata->lsp); 4428 put_nfs_open_context(calldata->ctx); 4429 kfree(calldata); 4430 } 4431 4432 static void nfs4_locku_done(struct rpc_task *task, void *data) 4433 { 4434 struct nfs4_unlockdata *calldata = data; 4435 4436 if (!nfs4_sequence_done(task, &calldata->res.seq_res)) 4437 return; 4438 switch (task->tk_status) { 4439 case 0: 4440 nfs4_stateid_copy(&calldata->lsp->ls_stateid, 4441 &calldata->res.stateid); 4442 renew_lease(calldata->server, calldata->timestamp); 4443 break; 4444 case -NFS4ERR_BAD_STATEID: 4445 case -NFS4ERR_OLD_STATEID: 4446 case -NFS4ERR_STALE_STATEID: 4447 case -NFS4ERR_EXPIRED: 4448 break; 4449 default: 4450 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) 4451 rpc_restart_call_prepare(task); 4452 } 4453 nfs_release_seqid(calldata->arg.seqid); 4454 } 4455 4456 static void nfs4_locku_prepare(struct rpc_task *task, void *data) 4457 { 4458 struct nfs4_unlockdata *calldata = data; 4459 4460 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) 4461 goto out_wait; 4462 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { 4463 /* Note: exit _without_ running nfs4_locku_done */ 4464 goto out_no_action; 4465 } 4466 calldata->timestamp = jiffies; 4467 if (nfs4_setup_sequence(calldata->server, 4468 &calldata->arg.seq_args, 4469 &calldata->res.seq_res, 4470 task) != 0) 4471 nfs_release_seqid(calldata->arg.seqid); 4472 return; 4473 out_no_action: 4474 task->tk_action = NULL; 4475 out_wait: 4476 nfs4_sequence_done(task, &calldata->res.seq_res); 4477 } 4478 4479 static const struct rpc_call_ops nfs4_locku_ops = { 4480 .rpc_call_prepare = nfs4_locku_prepare, 4481 .rpc_call_done = nfs4_locku_done, 4482 .rpc_release = nfs4_locku_release_calldata, 4483 }; 4484 4485 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, 4486 struct nfs_open_context *ctx, 4487 struct nfs4_lock_state *lsp, 4488 struct nfs_seqid *seqid) 4489 { 4490 struct nfs4_unlockdata *data; 4491 struct rpc_message msg = { 4492 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], 4493 .rpc_cred = ctx->cred, 4494 }; 4495 struct rpc_task_setup task_setup_data = { 4496 .rpc_client = NFS_CLIENT(lsp->ls_state->inode), 4497 .rpc_message = &msg, 4498 .callback_ops = &nfs4_locku_ops, 4499 .workqueue = nfsiod_workqueue, 4500 .flags = RPC_TASK_ASYNC, 4501 }; 4502 4503 /* Ensure this is an unlock - when canceling a lock, the 4504 * canceled lock is passed in, and it won't be an unlock. 4505 */ 4506 fl->fl_type = F_UNLCK; 4507 4508 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); 4509 if (data == NULL) { 4510 nfs_free_seqid(seqid); 4511 return ERR_PTR(-ENOMEM); 4512 } 4513 4514 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 4515 msg.rpc_argp = &data->arg; 4516 msg.rpc_resp = &data->res; 4517 task_setup_data.callback_data = data; 4518 return rpc_run_task(&task_setup_data); 4519 } 4520 4521 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) 4522 { 4523 struct inode *inode = state->inode; 4524 struct nfs4_state_owner *sp = state->owner; 4525 struct nfs_inode *nfsi = NFS_I(inode); 4526 struct nfs_seqid *seqid; 4527 struct nfs4_lock_state *lsp; 4528 struct rpc_task *task; 4529 int status = 0; 4530 unsigned char fl_flags = request->fl_flags; 4531 4532 status = nfs4_set_lock_state(state, request); 4533 /* Unlock _before_ we do the RPC call */ 4534 request->fl_flags |= FL_EXISTS; 4535 /* Exclude nfs_delegation_claim_locks() */ 4536 mutex_lock(&sp->so_delegreturn_mutex); 4537 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */ 4538 down_read(&nfsi->rwsem); 4539 if (do_vfs_lock(request->fl_file, request) == -ENOENT) { 4540 up_read(&nfsi->rwsem); 4541 mutex_unlock(&sp->so_delegreturn_mutex); 4542 goto out; 4543 } 4544 up_read(&nfsi->rwsem); 4545 mutex_unlock(&sp->so_delegreturn_mutex); 4546 if (status != 0) 4547 goto out; 4548 /* Is this a delegated lock? */ 4549 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) 4550 goto out; 4551 lsp = request->fl_u.nfs4_fl.owner; 4552 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); 4553 status = -ENOMEM; 4554 if (seqid == NULL) 4555 goto out; 4556 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); 4557 status = PTR_ERR(task); 4558 if (IS_ERR(task)) 4559 goto out; 4560 status = nfs4_wait_for_completion_rpc_task(task); 4561 rpc_put_task(task); 4562 out: 4563 request->fl_flags = fl_flags; 4564 return status; 4565 } 4566 4567 struct nfs4_lockdata { 4568 struct nfs_lock_args arg; 4569 struct nfs_lock_res res; 4570 struct nfs4_lock_state *lsp; 4571 struct nfs_open_context *ctx; 4572 struct file_lock fl; 4573 unsigned long timestamp; 4574 int rpc_status; 4575 int cancelled; 4576 struct nfs_server *server; 4577 }; 4578 4579 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, 4580 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp, 4581 gfp_t gfp_mask) 4582 { 4583 struct nfs4_lockdata *p; 4584 struct inode *inode = lsp->ls_state->inode; 4585 struct nfs_server *server = NFS_SERVER(inode); 4586 4587 p = kzalloc(sizeof(*p), gfp_mask); 4588 if (p == NULL) 4589 return NULL; 4590 4591 p->arg.fh = NFS_FH(inode); 4592 p->arg.fl = &p->fl; 4593 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); 4594 if (p->arg.open_seqid == NULL) 4595 goto out_free; 4596 p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask); 4597 if (p->arg.lock_seqid == NULL) 4598 goto out_free_seqid; 4599 p->arg.lock_stateid = &lsp->ls_stateid; 4600 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; 4601 p->arg.lock_owner.id = lsp->ls_seqid.owner_id; 4602 p->arg.lock_owner.s_dev = server->s_dev; 4603 p->res.lock_seqid = p->arg.lock_seqid; 4604 p->lsp = lsp; 4605 p->server = server; 4606 atomic_inc(&lsp->ls_count); 4607 p->ctx = get_nfs_open_context(ctx); 4608 memcpy(&p->fl, fl, sizeof(p->fl)); 4609 return p; 4610 out_free_seqid: 4611 nfs_free_seqid(p->arg.open_seqid); 4612 out_free: 4613 kfree(p); 4614 return NULL; 4615 } 4616 4617 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) 4618 { 4619 struct nfs4_lockdata *data = calldata; 4620 struct nfs4_state *state = data->lsp->ls_state; 4621 4622 dprintk("%s: begin!\n", __func__); 4623 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) 4624 goto out_wait; 4625 /* Do we need to do an open_to_lock_owner? */ 4626 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) { 4627 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { 4628 goto out_release_lock_seqid; 4629 } 4630 data->arg.open_stateid = &state->stateid; 4631 data->arg.new_lock_owner = 1; 4632 data->res.open_seqid = data->arg.open_seqid; 4633 } else 4634 data->arg.new_lock_owner = 0; 4635 data->timestamp = jiffies; 4636 if (nfs4_setup_sequence(data->server, 4637 &data->arg.seq_args, 4638 &data->res.seq_res, 4639 task) == 0) 4640 return; 4641 nfs_release_seqid(data->arg.open_seqid); 4642 out_release_lock_seqid: 4643 nfs_release_seqid(data->arg.lock_seqid); 4644 out_wait: 4645 nfs4_sequence_done(task, &data->res.seq_res); 4646 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status); 4647 } 4648 4649 static void nfs4_lock_done(struct rpc_task *task, void *calldata) 4650 { 4651 struct nfs4_lockdata *data = calldata; 4652 4653 dprintk("%s: begin!\n", __func__); 4654 4655 if (!nfs4_sequence_done(task, &data->res.seq_res)) 4656 return; 4657 4658 data->rpc_status = task->tk_status; 4659 if (data->arg.new_lock_owner != 0) { 4660 if (data->rpc_status == 0) 4661 nfs_confirm_seqid(&data->lsp->ls_seqid, 0); 4662 else 4663 goto out; 4664 } 4665 if (data->rpc_status == 0) { 4666 nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid); 4667 set_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags); 4668 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); 4669 } 4670 out: 4671 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); 4672 } 4673 4674 static void nfs4_lock_release(void *calldata) 4675 { 4676 struct nfs4_lockdata *data = calldata; 4677 4678 dprintk("%s: begin!\n", __func__); 4679 nfs_free_seqid(data->arg.open_seqid); 4680 if (data->cancelled != 0) { 4681 struct rpc_task *task; 4682 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 4683 data->arg.lock_seqid); 4684 if (!IS_ERR(task)) 4685 rpc_put_task_async(task); 4686 dprintk("%s: cancelling lock!\n", __func__); 4687 } else 4688 nfs_free_seqid(data->arg.lock_seqid); 4689 nfs4_put_lock_state(data->lsp); 4690 put_nfs_open_context(data->ctx); 4691 kfree(data); 4692 dprintk("%s: done!\n", __func__); 4693 } 4694 4695 static const struct rpc_call_ops nfs4_lock_ops = { 4696 .rpc_call_prepare = nfs4_lock_prepare, 4697 .rpc_call_done = nfs4_lock_done, 4698 .rpc_release = nfs4_lock_release, 4699 }; 4700 4701 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 4702 { 4703 switch (error) { 4704 case -NFS4ERR_ADMIN_REVOKED: 4705 case -NFS4ERR_BAD_STATEID: 4706 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 4707 if (new_lock_owner != 0 || 4708 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) 4709 nfs4_schedule_stateid_recovery(server, lsp->ls_state); 4710 break; 4711 case -NFS4ERR_STALE_STATEID: 4712 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 4713 case -NFS4ERR_EXPIRED: 4714 nfs4_schedule_lease_recovery(server->nfs_client); 4715 }; 4716 } 4717 4718 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type) 4719 { 4720 struct nfs4_lockdata *data; 4721 struct rpc_task *task; 4722 struct rpc_message msg = { 4723 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK], 4724 .rpc_cred = state->owner->so_cred, 4725 }; 4726 struct rpc_task_setup task_setup_data = { 4727 .rpc_client = NFS_CLIENT(state->inode), 4728 .rpc_message = &msg, 4729 .callback_ops = &nfs4_lock_ops, 4730 .workqueue = nfsiod_workqueue, 4731 .flags = RPC_TASK_ASYNC, 4732 }; 4733 int ret; 4734 4735 dprintk("%s: begin!\n", __func__); 4736 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file), 4737 fl->fl_u.nfs4_fl.owner, 4738 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS); 4739 if (data == NULL) 4740 return -ENOMEM; 4741 if (IS_SETLKW(cmd)) 4742 data->arg.block = 1; 4743 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1); 4744 msg.rpc_argp = &data->arg; 4745 msg.rpc_resp = &data->res; 4746 task_setup_data.callback_data = data; 4747 if (recovery_type > NFS_LOCK_NEW) { 4748 if (recovery_type == NFS_LOCK_RECLAIM) 4749 data->arg.reclaim = NFS_LOCK_RECLAIM; 4750 nfs4_set_sequence_privileged(&data->arg.seq_args); 4751 } 4752 task = rpc_run_task(&task_setup_data); 4753 if (IS_ERR(task)) 4754 return PTR_ERR(task); 4755 ret = nfs4_wait_for_completion_rpc_task(task); 4756 if (ret == 0) { 4757 ret = data->rpc_status; 4758 if (ret) 4759 nfs4_handle_setlk_error(data->server, data->lsp, 4760 data->arg.new_lock_owner, ret); 4761 } else 4762 data->cancelled = 1; 4763 rpc_put_task(task); 4764 dprintk("%s: done, ret = %d!\n", __func__, ret); 4765 return ret; 4766 } 4767 4768 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) 4769 { 4770 struct nfs_server *server = NFS_SERVER(state->inode); 4771 struct nfs4_exception exception = { 4772 .inode = state->inode, 4773 }; 4774 int err; 4775 4776 do { 4777 /* Cache the lock if possible... */ 4778 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 4779 return 0; 4780 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM); 4781 if (err != -NFS4ERR_DELAY) 4782 break; 4783 nfs4_handle_exception(server, err, &exception); 4784 } while (exception.retry); 4785 return err; 4786 } 4787 4788 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) 4789 { 4790 struct nfs_server *server = NFS_SERVER(state->inode); 4791 struct nfs4_exception exception = { 4792 .inode = state->inode, 4793 }; 4794 int err; 4795 4796 err = nfs4_set_lock_state(state, request); 4797 if (err != 0) 4798 return err; 4799 do { 4800 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0) 4801 return 0; 4802 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED); 4803 switch (err) { 4804 default: 4805 goto out; 4806 case -NFS4ERR_GRACE: 4807 case -NFS4ERR_DELAY: 4808 nfs4_handle_exception(server, err, &exception); 4809 err = 0; 4810 } 4811 } while (exception.retry); 4812 out: 4813 return err; 4814 } 4815 4816 #if defined(CONFIG_NFS_V4_1) 4817 /** 4818 * nfs41_check_expired_locks - possibly free a lock stateid 4819 * 4820 * @state: NFSv4 state for an inode 4821 * 4822 * Returns NFS_OK if recovery for this stateid is now finished. 4823 * Otherwise a negative NFS4ERR value is returned. 4824 */ 4825 static int nfs41_check_expired_locks(struct nfs4_state *state) 4826 { 4827 int status, ret = -NFS4ERR_BAD_STATEID; 4828 struct nfs4_lock_state *lsp; 4829 struct nfs_server *server = NFS_SERVER(state->inode); 4830 4831 list_for_each_entry(lsp, &state->lock_states, ls_locks) { 4832 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { 4833 status = nfs41_test_stateid(server, &lsp->ls_stateid); 4834 if (status != NFS_OK) { 4835 /* Free the stateid unless the server 4836 * informs us the stateid is unrecognized. */ 4837 if (status != -NFS4ERR_BAD_STATEID) 4838 nfs41_free_stateid(server, 4839 &lsp->ls_stateid); 4840 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); 4841 ret = status; 4842 } 4843 } 4844 }; 4845 4846 return ret; 4847 } 4848 4849 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request) 4850 { 4851 int status = NFS_OK; 4852 4853 if (test_bit(LK_STATE_IN_USE, &state->flags)) 4854 status = nfs41_check_expired_locks(state); 4855 if (status != NFS_OK) 4856 status = nfs4_lock_expired(state, request); 4857 return status; 4858 } 4859 #endif 4860 4861 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4862 { 4863 struct nfs4_state_owner *sp = state->owner; 4864 struct nfs_inode *nfsi = NFS_I(state->inode); 4865 unsigned char fl_flags = request->fl_flags; 4866 unsigned int seq; 4867 int status = -ENOLCK; 4868 4869 if ((fl_flags & FL_POSIX) && 4870 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags)) 4871 goto out; 4872 /* Is this a delegated open? */ 4873 status = nfs4_set_lock_state(state, request); 4874 if (status != 0) 4875 goto out; 4876 request->fl_flags |= FL_ACCESS; 4877 status = do_vfs_lock(request->fl_file, request); 4878 if (status < 0) 4879 goto out; 4880 down_read(&nfsi->rwsem); 4881 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { 4882 /* Yes: cache locks! */ 4883 /* ...but avoid races with delegation recall... */ 4884 request->fl_flags = fl_flags & ~FL_SLEEP; 4885 status = do_vfs_lock(request->fl_file, request); 4886 goto out_unlock; 4887 } 4888 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 4889 up_read(&nfsi->rwsem); 4890 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); 4891 if (status != 0) 4892 goto out; 4893 down_read(&nfsi->rwsem); 4894 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) { 4895 status = -NFS4ERR_DELAY; 4896 goto out_unlock; 4897 } 4898 /* Note: we always want to sleep here! */ 4899 request->fl_flags = fl_flags | FL_SLEEP; 4900 if (do_vfs_lock(request->fl_file, request) < 0) 4901 printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock " 4902 "manager!\n", __func__); 4903 out_unlock: 4904 up_read(&nfsi->rwsem); 4905 out: 4906 request->fl_flags = fl_flags; 4907 return status; 4908 } 4909 4910 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) 4911 { 4912 struct nfs4_exception exception = { 4913 .state = state, 4914 .inode = state->inode, 4915 }; 4916 int err; 4917 4918 do { 4919 err = _nfs4_proc_setlk(state, cmd, request); 4920 if (err == -NFS4ERR_DENIED) 4921 err = -EAGAIN; 4922 err = nfs4_handle_exception(NFS_SERVER(state->inode), 4923 err, &exception); 4924 } while (exception.retry); 4925 return err; 4926 } 4927 4928 static int 4929 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) 4930 { 4931 struct nfs_open_context *ctx; 4932 struct nfs4_state *state; 4933 unsigned long timeout = NFS4_LOCK_MINTIMEOUT; 4934 int status; 4935 4936 /* verify open state */ 4937 ctx = nfs_file_open_context(filp); 4938 state = ctx->state; 4939 4940 if (request->fl_start < 0 || request->fl_end < 0) 4941 return -EINVAL; 4942 4943 if (IS_GETLK(cmd)) { 4944 if (state != NULL) 4945 return nfs4_proc_getlk(state, F_GETLK, request); 4946 return 0; 4947 } 4948 4949 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd))) 4950 return -EINVAL; 4951 4952 if (request->fl_type == F_UNLCK) { 4953 if (state != NULL) 4954 return nfs4_proc_unlck(state, cmd, request); 4955 return 0; 4956 } 4957 4958 if (state == NULL) 4959 return -ENOLCK; 4960 /* 4961 * Don't rely on the VFS having checked the file open mode, 4962 * since it won't do this for flock() locks. 4963 */ 4964 switch (request->fl_type) { 4965 case F_RDLCK: 4966 if (!(filp->f_mode & FMODE_READ)) 4967 return -EBADF; 4968 break; 4969 case F_WRLCK: 4970 if (!(filp->f_mode & FMODE_WRITE)) 4971 return -EBADF; 4972 } 4973 4974 do { 4975 status = nfs4_proc_setlk(state, cmd, request); 4976 if ((status != -EAGAIN) || IS_SETLK(cmd)) 4977 break; 4978 timeout = nfs4_set_lock_task_retry(timeout); 4979 status = -ERESTARTSYS; 4980 if (signalled()) 4981 break; 4982 } while(status < 0); 4983 return status; 4984 } 4985 4986 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) 4987 { 4988 struct nfs_server *server = NFS_SERVER(state->inode); 4989 struct nfs4_exception exception = { }; 4990 int err; 4991 4992 err = nfs4_set_lock_state(state, fl); 4993 if (err != 0) 4994 goto out; 4995 do { 4996 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); 4997 switch (err) { 4998 default: 4999 printk(KERN_ERR "NFS: %s: unhandled error " 5000 "%d.\n", __func__, err); 5001 case 0: 5002 case -ESTALE: 5003 goto out; 5004 case -NFS4ERR_STALE_CLIENTID: 5005 case -NFS4ERR_STALE_STATEID: 5006 set_bit(NFS_DELEGATED_STATE, &state->flags); 5007 case -NFS4ERR_EXPIRED: 5008 nfs4_schedule_lease_recovery(server->nfs_client); 5009 err = -EAGAIN; 5010 goto out; 5011 case -NFS4ERR_BADSESSION: 5012 case -NFS4ERR_BADSLOT: 5013 case -NFS4ERR_BAD_HIGH_SLOT: 5014 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 5015 case -NFS4ERR_DEADSESSION: 5016 set_bit(NFS_DELEGATED_STATE, &state->flags); 5017 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); 5018 err = -EAGAIN; 5019 goto out; 5020 case -NFS4ERR_DELEG_REVOKED: 5021 case -NFS4ERR_ADMIN_REVOKED: 5022 case -NFS4ERR_BAD_STATEID: 5023 case -NFS4ERR_OPENMODE: 5024 nfs4_schedule_stateid_recovery(server, state); 5025 err = 0; 5026 goto out; 5027 case -ENOMEM: 5028 case -NFS4ERR_DENIED: 5029 /* kill_proc(fl->fl_pid, SIGLOST, 1); */ 5030 err = 0; 5031 goto out; 5032 } 5033 set_bit(NFS_DELEGATED_STATE, &state->flags); 5034 err = nfs4_handle_exception(server, err, &exception); 5035 } while (exception.retry); 5036 out: 5037 return err; 5038 } 5039 5040 struct nfs_release_lockowner_data { 5041 struct nfs4_lock_state *lsp; 5042 struct nfs_server *server; 5043 struct nfs_release_lockowner_args args; 5044 }; 5045 5046 static void nfs4_release_lockowner_release(void *calldata) 5047 { 5048 struct nfs_release_lockowner_data *data = calldata; 5049 nfs4_free_lock_state(data->server, data->lsp); 5050 kfree(calldata); 5051 } 5052 5053 static const struct rpc_call_ops nfs4_release_lockowner_ops = { 5054 .rpc_release = nfs4_release_lockowner_release, 5055 }; 5056 5057 int nfs4_release_lockowner(struct nfs4_lock_state *lsp) 5058 { 5059 struct nfs_server *server = lsp->ls_state->owner->so_server; 5060 struct nfs_release_lockowner_data *data; 5061 struct rpc_message msg = { 5062 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER], 5063 }; 5064 5065 if (server->nfs_client->cl_mvops->minor_version != 0) 5066 return -EINVAL; 5067 data = kmalloc(sizeof(*data), GFP_NOFS); 5068 if (!data) 5069 return -ENOMEM; 5070 data->lsp = lsp; 5071 data->server = server; 5072 data->args.lock_owner.clientid = server->nfs_client->cl_clientid; 5073 data->args.lock_owner.id = lsp->ls_seqid.owner_id; 5074 data->args.lock_owner.s_dev = server->s_dev; 5075 msg.rpc_argp = &data->args; 5076 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); 5077 return 0; 5078 } 5079 5080 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" 5081 5082 static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key, 5083 const void *buf, size_t buflen, 5084 int flags, int type) 5085 { 5086 if (strcmp(key, "") != 0) 5087 return -EINVAL; 5088 5089 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen); 5090 } 5091 5092 static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, 5093 void *buf, size_t buflen, int type) 5094 { 5095 if (strcmp(key, "") != 0) 5096 return -EINVAL; 5097 5098 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen); 5099 } 5100 5101 static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, 5102 size_t list_len, const char *name, 5103 size_t name_len, int type) 5104 { 5105 size_t len = sizeof(XATTR_NAME_NFSV4_ACL); 5106 5107 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode))) 5108 return 0; 5109 5110 if (list && len <= list_len) 5111 memcpy(list, XATTR_NAME_NFSV4_ACL, len); 5112 return len; 5113 } 5114 5115 /* 5116 * nfs_fhget will use either the mounted_on_fileid or the fileid 5117 */ 5118 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) 5119 { 5120 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || 5121 (fattr->valid & NFS_ATTR_FATTR_FILEID)) && 5122 (fattr->valid & NFS_ATTR_FATTR_FSID) && 5123 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS))) 5124 return; 5125 5126 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | 5127 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL; 5128 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; 5129 fattr->nlink = 2; 5130 } 5131 5132 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 5133 const struct qstr *name, 5134 struct nfs4_fs_locations *fs_locations, 5135 struct page *page) 5136 { 5137 struct nfs_server *server = NFS_SERVER(dir); 5138 u32 bitmask[2] = { 5139 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, 5140 }; 5141 struct nfs4_fs_locations_arg args = { 5142 .dir_fh = NFS_FH(dir), 5143 .name = name, 5144 .page = page, 5145 .bitmask = bitmask, 5146 }; 5147 struct nfs4_fs_locations_res res = { 5148 .fs_locations = fs_locations, 5149 }; 5150 struct rpc_message msg = { 5151 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS], 5152 .rpc_argp = &args, 5153 .rpc_resp = &res, 5154 }; 5155 int status; 5156 5157 dprintk("%s: start\n", __func__); 5158 5159 /* Ask for the fileid of the absent filesystem if mounted_on_fileid 5160 * is not supported */ 5161 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) 5162 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; 5163 else 5164 bitmask[0] |= FATTR4_WORD0_FILEID; 5165 5166 nfs_fattr_init(&fs_locations->fattr); 5167 fs_locations->server = server; 5168 fs_locations->nlocations = 0; 5169 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); 5170 dprintk("%s: returned status = %d\n", __func__, status); 5171 return status; 5172 } 5173 5174 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, 5175 const struct qstr *name, 5176 struct nfs4_fs_locations *fs_locations, 5177 struct page *page) 5178 { 5179 struct nfs4_exception exception = { }; 5180 int err; 5181 do { 5182 err = nfs4_handle_exception(NFS_SERVER(dir), 5183 _nfs4_proc_fs_locations(client, dir, name, fs_locations, page), 5184 &exception); 5185 } while (exception.retry); 5186 return err; 5187 } 5188 5189 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) 5190 { 5191 int status; 5192 struct nfs4_secinfo_arg args = { 5193 .dir_fh = NFS_FH(dir), 5194 .name = name, 5195 }; 5196 struct nfs4_secinfo_res res = { 5197 .flavors = flavors, 5198 }; 5199 struct rpc_message msg = { 5200 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO], 5201 .rpc_argp = &args, 5202 .rpc_resp = &res, 5203 }; 5204 5205 dprintk("NFS call secinfo %s\n", name->name); 5206 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); 5207 dprintk("NFS reply secinfo: %d\n", status); 5208 return status; 5209 } 5210 5211 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, 5212 struct nfs4_secinfo_flavors *flavors) 5213 { 5214 struct nfs4_exception exception = { }; 5215 int err; 5216 do { 5217 err = nfs4_handle_exception(NFS_SERVER(dir), 5218 _nfs4_proc_secinfo(dir, name, flavors), 5219 &exception); 5220 } while (exception.retry); 5221 return err; 5222 } 5223 5224 #ifdef CONFIG_NFS_V4_1 5225 /* 5226 * Check the exchange flags returned by the server for invalid flags, having 5227 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or 5228 * DS flags set. 5229 */ 5230 static int nfs4_check_cl_exchange_flags(u32 flags) 5231 { 5232 if (flags & ~EXCHGID4_FLAG_MASK_R) 5233 goto out_inval; 5234 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && 5235 (flags & EXCHGID4_FLAG_USE_NON_PNFS)) 5236 goto out_inval; 5237 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) 5238 goto out_inval; 5239 return NFS_OK; 5240 out_inval: 5241 return -NFS4ERR_INVAL; 5242 } 5243 5244 static bool 5245 nfs41_same_server_scope(struct nfs41_server_scope *a, 5246 struct nfs41_server_scope *b) 5247 { 5248 if (a->server_scope_sz == b->server_scope_sz && 5249 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) 5250 return true; 5251 5252 return false; 5253 } 5254 5255 /* 5256 * nfs4_proc_bind_conn_to_session() 5257 * 5258 * The 4.1 client currently uses the same TCP connection for the 5259 * fore and backchannel. 5260 */ 5261 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) 5262 { 5263 int status; 5264 struct nfs41_bind_conn_to_session_res res; 5265 struct rpc_message msg = { 5266 .rpc_proc = 5267 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], 5268 .rpc_argp = clp, 5269 .rpc_resp = &res, 5270 .rpc_cred = cred, 5271 }; 5272 5273 dprintk("--> %s\n", __func__); 5274 5275 res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); 5276 if (unlikely(res.session == NULL)) { 5277 status = -ENOMEM; 5278 goto out; 5279 } 5280 5281 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5282 if (status == 0) { 5283 if (memcmp(res.session->sess_id.data, 5284 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { 5285 dprintk("NFS: %s: Session ID mismatch\n", __func__); 5286 status = -EIO; 5287 goto out_session; 5288 } 5289 if (res.dir != NFS4_CDFS4_BOTH) { 5290 dprintk("NFS: %s: Unexpected direction from server\n", 5291 __func__); 5292 status = -EIO; 5293 goto out_session; 5294 } 5295 if (res.use_conn_in_rdma_mode) { 5296 dprintk("NFS: %s: Server returned RDMA mode = true\n", 5297 __func__); 5298 status = -EIO; 5299 goto out_session; 5300 } 5301 } 5302 out_session: 5303 kfree(res.session); 5304 out: 5305 dprintk("<-- %s status= %d\n", __func__, status); 5306 return status; 5307 } 5308 5309 /* 5310 * nfs4_proc_exchange_id() 5311 * 5312 * Returns zero, a negative errno, or a negative NFS4ERR status code. 5313 * 5314 * Since the clientid has expired, all compounds using sessions 5315 * associated with the stale clientid will be returning 5316 * NFS4ERR_BADSESSION in the sequence operation, and will therefore 5317 * be in some phase of session reset. 5318 */ 5319 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) 5320 { 5321 nfs4_verifier verifier; 5322 struct nfs41_exchange_id_args args = { 5323 .verifier = &verifier, 5324 .client = clp, 5325 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER, 5326 }; 5327 struct nfs41_exchange_id_res res = { 5328 0 5329 }; 5330 int status; 5331 struct rpc_message msg = { 5332 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID], 5333 .rpc_argp = &args, 5334 .rpc_resp = &res, 5335 .rpc_cred = cred, 5336 }; 5337 5338 nfs4_init_boot_verifier(clp, &verifier); 5339 args.id_len = nfs4_init_uniform_client_string(clp, args.id, 5340 sizeof(args.id)); 5341 dprintk("NFS call exchange_id auth=%s, '%.*s'\n", 5342 clp->cl_rpcclient->cl_auth->au_ops->au_name, 5343 args.id_len, args.id); 5344 5345 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), 5346 GFP_NOFS); 5347 if (unlikely(res.server_owner == NULL)) { 5348 status = -ENOMEM; 5349 goto out; 5350 } 5351 5352 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope), 5353 GFP_NOFS); 5354 if (unlikely(res.server_scope == NULL)) { 5355 status = -ENOMEM; 5356 goto out_server_owner; 5357 } 5358 5359 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS); 5360 if (unlikely(res.impl_id == NULL)) { 5361 status = -ENOMEM; 5362 goto out_server_scope; 5363 } 5364 5365 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5366 if (status == 0) 5367 status = nfs4_check_cl_exchange_flags(res.flags); 5368 5369 if (status == 0) { 5370 clp->cl_clientid = res.clientid; 5371 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R); 5372 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) 5373 clp->cl_seqid = res.seqid; 5374 5375 kfree(clp->cl_serverowner); 5376 clp->cl_serverowner = res.server_owner; 5377 res.server_owner = NULL; 5378 5379 /* use the most recent implementation id */ 5380 kfree(clp->cl_implid); 5381 clp->cl_implid = res.impl_id; 5382 5383 if (clp->cl_serverscope != NULL && 5384 !nfs41_same_server_scope(clp->cl_serverscope, 5385 res.server_scope)) { 5386 dprintk("%s: server_scope mismatch detected\n", 5387 __func__); 5388 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 5389 kfree(clp->cl_serverscope); 5390 clp->cl_serverscope = NULL; 5391 } 5392 5393 if (clp->cl_serverscope == NULL) { 5394 clp->cl_serverscope = res.server_scope; 5395 goto out; 5396 } 5397 } else 5398 kfree(res.impl_id); 5399 5400 out_server_owner: 5401 kfree(res.server_owner); 5402 out_server_scope: 5403 kfree(res.server_scope); 5404 out: 5405 if (clp->cl_implid != NULL) 5406 dprintk("NFS reply exchange_id: Server Implementation ID: " 5407 "domain: %s, name: %s, date: %llu,%u\n", 5408 clp->cl_implid->domain, clp->cl_implid->name, 5409 clp->cl_implid->date.seconds, 5410 clp->cl_implid->date.nseconds); 5411 dprintk("NFS reply exchange_id: %d\n", status); 5412 return status; 5413 } 5414 5415 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp, 5416 struct rpc_cred *cred) 5417 { 5418 struct rpc_message msg = { 5419 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID], 5420 .rpc_argp = clp, 5421 .rpc_cred = cred, 5422 }; 5423 int status; 5424 5425 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5426 if (status) 5427 dprintk("NFS: Got error %d from the server %s on " 5428 "DESTROY_CLIENTID.", status, clp->cl_hostname); 5429 return status; 5430 } 5431 5432 static int nfs4_proc_destroy_clientid(struct nfs_client *clp, 5433 struct rpc_cred *cred) 5434 { 5435 unsigned int loop; 5436 int ret; 5437 5438 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) { 5439 ret = _nfs4_proc_destroy_clientid(clp, cred); 5440 switch (ret) { 5441 case -NFS4ERR_DELAY: 5442 case -NFS4ERR_CLIENTID_BUSY: 5443 ssleep(1); 5444 break; 5445 default: 5446 return ret; 5447 } 5448 } 5449 return 0; 5450 } 5451 5452 int nfs4_destroy_clientid(struct nfs_client *clp) 5453 { 5454 struct rpc_cred *cred; 5455 int ret = 0; 5456 5457 if (clp->cl_mvops->minor_version < 1) 5458 goto out; 5459 if (clp->cl_exchange_flags == 0) 5460 goto out; 5461 if (clp->cl_preserve_clid) 5462 goto out; 5463 cred = nfs4_get_exchange_id_cred(clp); 5464 ret = nfs4_proc_destroy_clientid(clp, cred); 5465 if (cred) 5466 put_rpccred(cred); 5467 switch (ret) { 5468 case 0: 5469 case -NFS4ERR_STALE_CLIENTID: 5470 clp->cl_exchange_flags = 0; 5471 } 5472 out: 5473 return ret; 5474 } 5475 5476 struct nfs4_get_lease_time_data { 5477 struct nfs4_get_lease_time_args *args; 5478 struct nfs4_get_lease_time_res *res; 5479 struct nfs_client *clp; 5480 }; 5481 5482 static void nfs4_get_lease_time_prepare(struct rpc_task *task, 5483 void *calldata) 5484 { 5485 struct nfs4_get_lease_time_data *data = 5486 (struct nfs4_get_lease_time_data *)calldata; 5487 5488 dprintk("--> %s\n", __func__); 5489 /* just setup sequence, do not trigger session recovery 5490 since we're invoked within one */ 5491 nfs41_setup_sequence(data->clp->cl_session, 5492 &data->args->la_seq_args, 5493 &data->res->lr_seq_res, 5494 task); 5495 dprintk("<-- %s\n", __func__); 5496 } 5497 5498 /* 5499 * Called from nfs4_state_manager thread for session setup, so don't recover 5500 * from sequence operation or clientid errors. 5501 */ 5502 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) 5503 { 5504 struct nfs4_get_lease_time_data *data = 5505 (struct nfs4_get_lease_time_data *)calldata; 5506 5507 dprintk("--> %s\n", __func__); 5508 if (!nfs41_sequence_done(task, &data->res->lr_seq_res)) 5509 return; 5510 switch (task->tk_status) { 5511 case -NFS4ERR_DELAY: 5512 case -NFS4ERR_GRACE: 5513 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); 5514 rpc_delay(task, NFS4_POLL_RETRY_MIN); 5515 task->tk_status = 0; 5516 /* fall through */ 5517 case -NFS4ERR_RETRY_UNCACHED_REP: 5518 rpc_restart_call_prepare(task); 5519 return; 5520 } 5521 dprintk("<-- %s\n", __func__); 5522 } 5523 5524 static const struct rpc_call_ops nfs4_get_lease_time_ops = { 5525 .rpc_call_prepare = nfs4_get_lease_time_prepare, 5526 .rpc_call_done = nfs4_get_lease_time_done, 5527 }; 5528 5529 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo) 5530 { 5531 struct rpc_task *task; 5532 struct nfs4_get_lease_time_args args; 5533 struct nfs4_get_lease_time_res res = { 5534 .lr_fsinfo = fsinfo, 5535 }; 5536 struct nfs4_get_lease_time_data data = { 5537 .args = &args, 5538 .res = &res, 5539 .clp = clp, 5540 }; 5541 struct rpc_message msg = { 5542 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME], 5543 .rpc_argp = &args, 5544 .rpc_resp = &res, 5545 }; 5546 struct rpc_task_setup task_setup = { 5547 .rpc_client = clp->cl_rpcclient, 5548 .rpc_message = &msg, 5549 .callback_ops = &nfs4_get_lease_time_ops, 5550 .callback_data = &data, 5551 .flags = RPC_TASK_TIMEOUT, 5552 }; 5553 int status; 5554 5555 nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0); 5556 nfs4_set_sequence_privileged(&args.la_seq_args); 5557 dprintk("--> %s\n", __func__); 5558 task = rpc_run_task(&task_setup); 5559 5560 if (IS_ERR(task)) 5561 status = PTR_ERR(task); 5562 else { 5563 status = task->tk_status; 5564 rpc_put_task(task); 5565 } 5566 dprintk("<-- %s return %d\n", __func__, status); 5567 5568 return status; 5569 } 5570 5571 /* 5572 * Initialize the values to be used by the client in CREATE_SESSION 5573 * If nfs4_init_session set the fore channel request and response sizes, 5574 * use them. 5575 * 5576 * Set the back channel max_resp_sz_cached to zero to force the client to 5577 * always set csa_cachethis to FALSE because the current implementation 5578 * of the back channel DRC only supports caching the CB_SEQUENCE operation. 5579 */ 5580 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) 5581 { 5582 struct nfs4_session *session = args->client->cl_session; 5583 unsigned int mxrqst_sz = session->fc_target_max_rqst_sz, 5584 mxresp_sz = session->fc_target_max_resp_sz; 5585 5586 if (mxrqst_sz == 0) 5587 mxrqst_sz = NFS_MAX_FILE_IO_SIZE; 5588 if (mxresp_sz == 0) 5589 mxresp_sz = NFS_MAX_FILE_IO_SIZE; 5590 /* Fore channel attributes */ 5591 args->fc_attrs.max_rqst_sz = mxrqst_sz; 5592 args->fc_attrs.max_resp_sz = mxresp_sz; 5593 args->fc_attrs.max_ops = NFS4_MAX_OPS; 5594 args->fc_attrs.max_reqs = max_session_slots; 5595 5596 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u " 5597 "max_ops=%u max_reqs=%u\n", 5598 __func__, 5599 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, 5600 args->fc_attrs.max_ops, args->fc_attrs.max_reqs); 5601 5602 /* Back channel attributes */ 5603 args->bc_attrs.max_rqst_sz = PAGE_SIZE; 5604 args->bc_attrs.max_resp_sz = PAGE_SIZE; 5605 args->bc_attrs.max_resp_sz_cached = 0; 5606 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS; 5607 args->bc_attrs.max_reqs = 1; 5608 5609 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u " 5610 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", 5611 __func__, 5612 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, 5613 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, 5614 args->bc_attrs.max_reqs); 5615 } 5616 5617 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) 5618 { 5619 struct nfs4_channel_attrs *sent = &args->fc_attrs; 5620 struct nfs4_channel_attrs *rcvd = &session->fc_attrs; 5621 5622 if (rcvd->max_resp_sz > sent->max_resp_sz) 5623 return -EINVAL; 5624 /* 5625 * Our requested max_ops is the minimum we need; we're not 5626 * prepared to break up compounds into smaller pieces than that. 5627 * So, no point even trying to continue if the server won't 5628 * cooperate: 5629 */ 5630 if (rcvd->max_ops < sent->max_ops) 5631 return -EINVAL; 5632 if (rcvd->max_reqs == 0) 5633 return -EINVAL; 5634 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE) 5635 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE; 5636 return 0; 5637 } 5638 5639 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) 5640 { 5641 struct nfs4_channel_attrs *sent = &args->bc_attrs; 5642 struct nfs4_channel_attrs *rcvd = &session->bc_attrs; 5643 5644 if (rcvd->max_rqst_sz > sent->max_rqst_sz) 5645 return -EINVAL; 5646 if (rcvd->max_resp_sz < sent->max_resp_sz) 5647 return -EINVAL; 5648 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) 5649 return -EINVAL; 5650 /* These would render the backchannel useless: */ 5651 if (rcvd->max_ops != sent->max_ops) 5652 return -EINVAL; 5653 if (rcvd->max_reqs != sent->max_reqs) 5654 return -EINVAL; 5655 return 0; 5656 } 5657 5658 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, 5659 struct nfs4_session *session) 5660 { 5661 int ret; 5662 5663 ret = nfs4_verify_fore_channel_attrs(args, session); 5664 if (ret) 5665 return ret; 5666 return nfs4_verify_back_channel_attrs(args, session); 5667 } 5668 5669 static int _nfs4_proc_create_session(struct nfs_client *clp, 5670 struct rpc_cred *cred) 5671 { 5672 struct nfs4_session *session = clp->cl_session; 5673 struct nfs41_create_session_args args = { 5674 .client = clp, 5675 .cb_program = NFS4_CALLBACK, 5676 }; 5677 struct nfs41_create_session_res res = { 5678 .client = clp, 5679 }; 5680 struct rpc_message msg = { 5681 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 5682 .rpc_argp = &args, 5683 .rpc_resp = &res, 5684 .rpc_cred = cred, 5685 }; 5686 int status; 5687 5688 nfs4_init_channel_attrs(&args); 5689 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN); 5690 5691 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5692 5693 if (!status) { 5694 /* Verify the session's negotiated channel_attrs values */ 5695 status = nfs4_verify_channel_attrs(&args, session); 5696 /* Increment the clientid slot sequence id */ 5697 clp->cl_seqid++; 5698 } 5699 5700 return status; 5701 } 5702 5703 /* 5704 * Issues a CREATE_SESSION operation to the server. 5705 * It is the responsibility of the caller to verify the session is 5706 * expired before calling this routine. 5707 */ 5708 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred) 5709 { 5710 int status; 5711 unsigned *ptr; 5712 struct nfs4_session *session = clp->cl_session; 5713 5714 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 5715 5716 status = _nfs4_proc_create_session(clp, cred); 5717 if (status) 5718 goto out; 5719 5720 /* Init or reset the session slot tables */ 5721 status = nfs4_setup_session_slot_tables(session); 5722 dprintk("slot table setup returned %d\n", status); 5723 if (status) 5724 goto out; 5725 5726 ptr = (unsigned *)&session->sess_id.data[0]; 5727 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__, 5728 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]); 5729 out: 5730 dprintk("<-- %s\n", __func__); 5731 return status; 5732 } 5733 5734 /* 5735 * Issue the over-the-wire RPC DESTROY_SESSION. 5736 * The caller must serialize access to this routine. 5737 */ 5738 int nfs4_proc_destroy_session(struct nfs4_session *session, 5739 struct rpc_cred *cred) 5740 { 5741 struct rpc_message msg = { 5742 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION], 5743 .rpc_argp = session, 5744 .rpc_cred = cred, 5745 }; 5746 int status = 0; 5747 5748 dprintk("--> nfs4_proc_destroy_session\n"); 5749 5750 /* session is still being setup */ 5751 if (session->clp->cl_cons_state != NFS_CS_READY) 5752 return status; 5753 5754 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5755 5756 if (status) 5757 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. " 5758 "Session has been destroyed regardless...\n", status); 5759 5760 dprintk("<-- nfs4_proc_destroy_session\n"); 5761 return status; 5762 } 5763 5764 /* 5765 * Renew the cl_session lease. 5766 */ 5767 struct nfs4_sequence_data { 5768 struct nfs_client *clp; 5769 struct nfs4_sequence_args args; 5770 struct nfs4_sequence_res res; 5771 }; 5772 5773 static void nfs41_sequence_release(void *data) 5774 { 5775 struct nfs4_sequence_data *calldata = data; 5776 struct nfs_client *clp = calldata->clp; 5777 5778 if (atomic_read(&clp->cl_count) > 1) 5779 nfs4_schedule_state_renewal(clp); 5780 nfs_put_client(clp); 5781 kfree(calldata); 5782 } 5783 5784 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp) 5785 { 5786 switch(task->tk_status) { 5787 case -NFS4ERR_DELAY: 5788 rpc_delay(task, NFS4_POLL_RETRY_MAX); 5789 return -EAGAIN; 5790 default: 5791 nfs4_schedule_lease_recovery(clp); 5792 } 5793 return 0; 5794 } 5795 5796 static void nfs41_sequence_call_done(struct rpc_task *task, void *data) 5797 { 5798 struct nfs4_sequence_data *calldata = data; 5799 struct nfs_client *clp = calldata->clp; 5800 5801 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp)) 5802 return; 5803 5804 if (task->tk_status < 0) { 5805 dprintk("%s ERROR %d\n", __func__, task->tk_status); 5806 if (atomic_read(&clp->cl_count) == 1) 5807 goto out; 5808 5809 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) { 5810 rpc_restart_call_prepare(task); 5811 return; 5812 } 5813 } 5814 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred); 5815 out: 5816 dprintk("<-- %s\n", __func__); 5817 } 5818 5819 static void nfs41_sequence_prepare(struct rpc_task *task, void *data) 5820 { 5821 struct nfs4_sequence_data *calldata = data; 5822 struct nfs_client *clp = calldata->clp; 5823 struct nfs4_sequence_args *args; 5824 struct nfs4_sequence_res *res; 5825 5826 args = task->tk_msg.rpc_argp; 5827 res = task->tk_msg.rpc_resp; 5828 5829 nfs41_setup_sequence(clp->cl_session, args, res, task); 5830 } 5831 5832 static const struct rpc_call_ops nfs41_sequence_ops = { 5833 .rpc_call_done = nfs41_sequence_call_done, 5834 .rpc_call_prepare = nfs41_sequence_prepare, 5835 .rpc_release = nfs41_sequence_release, 5836 }; 5837 5838 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, 5839 struct rpc_cred *cred, 5840 bool is_privileged) 5841 { 5842 struct nfs4_sequence_data *calldata; 5843 struct rpc_message msg = { 5844 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE], 5845 .rpc_cred = cred, 5846 }; 5847 struct rpc_task_setup task_setup_data = { 5848 .rpc_client = clp->cl_rpcclient, 5849 .rpc_message = &msg, 5850 .callback_ops = &nfs41_sequence_ops, 5851 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT, 5852 }; 5853 5854 if (!atomic_inc_not_zero(&clp->cl_count)) 5855 return ERR_PTR(-EIO); 5856 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 5857 if (calldata == NULL) { 5858 nfs_put_client(clp); 5859 return ERR_PTR(-ENOMEM); 5860 } 5861 nfs41_init_sequence(&calldata->args, &calldata->res, 0); 5862 if (is_privileged) 5863 nfs4_set_sequence_privileged(&calldata->args); 5864 msg.rpc_argp = &calldata->args; 5865 msg.rpc_resp = &calldata->res; 5866 calldata->clp = clp; 5867 task_setup_data.callback_data = calldata; 5868 5869 return rpc_run_task(&task_setup_data); 5870 } 5871 5872 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags) 5873 { 5874 struct rpc_task *task; 5875 int ret = 0; 5876 5877 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0) 5878 return 0; 5879 task = _nfs41_proc_sequence(clp, cred, false); 5880 if (IS_ERR(task)) 5881 ret = PTR_ERR(task); 5882 else 5883 rpc_put_task_async(task); 5884 dprintk("<-- %s status=%d\n", __func__, ret); 5885 return ret; 5886 } 5887 5888 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) 5889 { 5890 struct rpc_task *task; 5891 int ret; 5892 5893 task = _nfs41_proc_sequence(clp, cred, true); 5894 if (IS_ERR(task)) { 5895 ret = PTR_ERR(task); 5896 goto out; 5897 } 5898 ret = rpc_wait_for_completion_task(task); 5899 if (!ret) { 5900 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; 5901 5902 if (task->tk_status == 0) 5903 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 5904 ret = task->tk_status; 5905 } 5906 rpc_put_task(task); 5907 out: 5908 dprintk("<-- %s status=%d\n", __func__, ret); 5909 return ret; 5910 } 5911 5912 struct nfs4_reclaim_complete_data { 5913 struct nfs_client *clp; 5914 struct nfs41_reclaim_complete_args arg; 5915 struct nfs41_reclaim_complete_res res; 5916 }; 5917 5918 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data) 5919 { 5920 struct nfs4_reclaim_complete_data *calldata = data; 5921 5922 nfs41_setup_sequence(calldata->clp->cl_session, 5923 &calldata->arg.seq_args, 5924 &calldata->res.seq_res, 5925 task); 5926 } 5927 5928 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp) 5929 { 5930 switch(task->tk_status) { 5931 case 0: 5932 case -NFS4ERR_COMPLETE_ALREADY: 5933 case -NFS4ERR_WRONG_CRED: /* What to do here? */ 5934 break; 5935 case -NFS4ERR_DELAY: 5936 rpc_delay(task, NFS4_POLL_RETRY_MAX); 5937 /* fall through */ 5938 case -NFS4ERR_RETRY_UNCACHED_REP: 5939 return -EAGAIN; 5940 default: 5941 nfs4_schedule_lease_recovery(clp); 5942 } 5943 return 0; 5944 } 5945 5946 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data) 5947 { 5948 struct nfs4_reclaim_complete_data *calldata = data; 5949 struct nfs_client *clp = calldata->clp; 5950 struct nfs4_sequence_res *res = &calldata->res.seq_res; 5951 5952 dprintk("--> %s\n", __func__); 5953 if (!nfs41_sequence_done(task, res)) 5954 return; 5955 5956 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) { 5957 rpc_restart_call_prepare(task); 5958 return; 5959 } 5960 dprintk("<-- %s\n", __func__); 5961 } 5962 5963 static void nfs4_free_reclaim_complete_data(void *data) 5964 { 5965 struct nfs4_reclaim_complete_data *calldata = data; 5966 5967 kfree(calldata); 5968 } 5969 5970 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = { 5971 .rpc_call_prepare = nfs4_reclaim_complete_prepare, 5972 .rpc_call_done = nfs4_reclaim_complete_done, 5973 .rpc_release = nfs4_free_reclaim_complete_data, 5974 }; 5975 5976 /* 5977 * Issue a global reclaim complete. 5978 */ 5979 static int nfs41_proc_reclaim_complete(struct nfs_client *clp) 5980 { 5981 struct nfs4_reclaim_complete_data *calldata; 5982 struct rpc_task *task; 5983 struct rpc_message msg = { 5984 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE], 5985 }; 5986 struct rpc_task_setup task_setup_data = { 5987 .rpc_client = clp->cl_rpcclient, 5988 .rpc_message = &msg, 5989 .callback_ops = &nfs4_reclaim_complete_call_ops, 5990 .flags = RPC_TASK_ASYNC, 5991 }; 5992 int status = -ENOMEM; 5993 5994 dprintk("--> %s\n", __func__); 5995 calldata = kzalloc(sizeof(*calldata), GFP_NOFS); 5996 if (calldata == NULL) 5997 goto out; 5998 calldata->clp = clp; 5999 calldata->arg.one_fs = 0; 6000 6001 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0); 6002 nfs4_set_sequence_privileged(&calldata->arg.seq_args); 6003 msg.rpc_argp = &calldata->arg; 6004 msg.rpc_resp = &calldata->res; 6005 task_setup_data.callback_data = calldata; 6006 task = rpc_run_task(&task_setup_data); 6007 if (IS_ERR(task)) { 6008 status = PTR_ERR(task); 6009 goto out; 6010 } 6011 status = nfs4_wait_for_completion_rpc_task(task); 6012 if (status == 0) 6013 status = task->tk_status; 6014 rpc_put_task(task); 6015 return 0; 6016 out: 6017 dprintk("<-- %s status=%d\n", __func__, status); 6018 return status; 6019 } 6020 6021 static void 6022 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) 6023 { 6024 struct nfs4_layoutget *lgp = calldata; 6025 struct nfs_server *server = NFS_SERVER(lgp->args.inode); 6026 struct nfs4_session *session = nfs4_get_session(server); 6027 6028 dprintk("--> %s\n", __func__); 6029 /* Note the is a race here, where a CB_LAYOUTRECALL can come in 6030 * right now covering the LAYOUTGET we are about to send. 6031 * However, that is not so catastrophic, and there seems 6032 * to be no way to prevent it completely. 6033 */ 6034 if (nfs41_setup_sequence(session, &lgp->args.seq_args, 6035 &lgp->res.seq_res, task)) 6036 return; 6037 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, 6038 NFS_I(lgp->args.inode)->layout, 6039 lgp->args.ctx->state)) { 6040 rpc_exit(task, NFS4_OK); 6041 } 6042 } 6043 6044 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) 6045 { 6046 struct nfs4_layoutget *lgp = calldata; 6047 struct inode *inode = lgp->args.inode; 6048 struct nfs_server *server = NFS_SERVER(inode); 6049 struct pnfs_layout_hdr *lo; 6050 struct nfs4_state *state = NULL; 6051 unsigned long timeo, giveup; 6052 6053 dprintk("--> %s\n", __func__); 6054 6055 if (!nfs41_sequence_done(task, &lgp->res.seq_res)) 6056 goto out; 6057 6058 switch (task->tk_status) { 6059 case 0: 6060 goto out; 6061 case -NFS4ERR_LAYOUTTRYLATER: 6062 case -NFS4ERR_RECALLCONFLICT: 6063 timeo = rpc_get_timeout(task->tk_client); 6064 giveup = lgp->args.timestamp + timeo; 6065 if (time_after(giveup, jiffies)) 6066 task->tk_status = -NFS4ERR_DELAY; 6067 break; 6068 case -NFS4ERR_EXPIRED: 6069 case -NFS4ERR_BAD_STATEID: 6070 spin_lock(&inode->i_lock); 6071 lo = NFS_I(inode)->layout; 6072 if (!lo || list_empty(&lo->plh_segs)) { 6073 spin_unlock(&inode->i_lock); 6074 /* If the open stateid was bad, then recover it. */ 6075 state = lgp->args.ctx->state; 6076 } else { 6077 LIST_HEAD(head); 6078 6079 pnfs_mark_matching_lsegs_invalid(lo, &head, NULL); 6080 spin_unlock(&inode->i_lock); 6081 /* Mark the bad layout state as invalid, then 6082 * retry using the open stateid. */ 6083 pnfs_free_lseg_list(&head); 6084 } 6085 } 6086 if (nfs4_async_handle_error(task, server, state) == -EAGAIN) 6087 rpc_restart_call_prepare(task); 6088 out: 6089 dprintk("<-- %s\n", __func__); 6090 } 6091 6092 static size_t max_response_pages(struct nfs_server *server) 6093 { 6094 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; 6095 return nfs_page_array_len(0, max_resp_sz); 6096 } 6097 6098 static void nfs4_free_pages(struct page **pages, size_t size) 6099 { 6100 int i; 6101 6102 if (!pages) 6103 return; 6104 6105 for (i = 0; i < size; i++) { 6106 if (!pages[i]) 6107 break; 6108 __free_page(pages[i]); 6109 } 6110 kfree(pages); 6111 } 6112 6113 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) 6114 { 6115 struct page **pages; 6116 int i; 6117 6118 pages = kcalloc(size, sizeof(struct page *), gfp_flags); 6119 if (!pages) { 6120 dprintk("%s: can't alloc array of %zu pages\n", __func__, size); 6121 return NULL; 6122 } 6123 6124 for (i = 0; i < size; i++) { 6125 pages[i] = alloc_page(gfp_flags); 6126 if (!pages[i]) { 6127 dprintk("%s: failed to allocate page\n", __func__); 6128 nfs4_free_pages(pages, size); 6129 return NULL; 6130 } 6131 } 6132 6133 return pages; 6134 } 6135 6136 static void nfs4_layoutget_release(void *calldata) 6137 { 6138 struct nfs4_layoutget *lgp = calldata; 6139 struct inode *inode = lgp->args.inode; 6140 struct nfs_server *server = NFS_SERVER(inode); 6141 size_t max_pages = max_response_pages(server); 6142 6143 dprintk("--> %s\n", __func__); 6144 nfs4_free_pages(lgp->args.layout.pages, max_pages); 6145 pnfs_put_layout_hdr(NFS_I(inode)->layout); 6146 put_nfs_open_context(lgp->args.ctx); 6147 kfree(calldata); 6148 dprintk("<-- %s\n", __func__); 6149 } 6150 6151 static const struct rpc_call_ops nfs4_layoutget_call_ops = { 6152 .rpc_call_prepare = nfs4_layoutget_prepare, 6153 .rpc_call_done = nfs4_layoutget_done, 6154 .rpc_release = nfs4_layoutget_release, 6155 }; 6156 6157 struct pnfs_layout_segment * 6158 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) 6159 { 6160 struct inode *inode = lgp->args.inode; 6161 struct nfs_server *server = NFS_SERVER(inode); 6162 size_t max_pages = max_response_pages(server); 6163 struct rpc_task *task; 6164 struct rpc_message msg = { 6165 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], 6166 .rpc_argp = &lgp->args, 6167 .rpc_resp = &lgp->res, 6168 }; 6169 struct rpc_task_setup task_setup_data = { 6170 .rpc_client = server->client, 6171 .rpc_message = &msg, 6172 .callback_ops = &nfs4_layoutget_call_ops, 6173 .callback_data = lgp, 6174 .flags = RPC_TASK_ASYNC, 6175 }; 6176 struct pnfs_layout_segment *lseg = NULL; 6177 int status = 0; 6178 6179 dprintk("--> %s\n", __func__); 6180 6181 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); 6182 if (!lgp->args.layout.pages) { 6183 nfs4_layoutget_release(lgp); 6184 return ERR_PTR(-ENOMEM); 6185 } 6186 lgp->args.layout.pglen = max_pages * PAGE_SIZE; 6187 lgp->args.timestamp = jiffies; 6188 6189 lgp->res.layoutp = &lgp->args.layout; 6190 lgp->res.seq_res.sr_slot = NULL; 6191 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); 6192 6193 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */ 6194 pnfs_get_layout_hdr(NFS_I(inode)->layout); 6195 6196 task = rpc_run_task(&task_setup_data); 6197 if (IS_ERR(task)) 6198 return ERR_CAST(task); 6199 status = nfs4_wait_for_completion_rpc_task(task); 6200 if (status == 0) 6201 status = task->tk_status; 6202 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ 6203 if (status == 0 && lgp->res.layoutp->len) 6204 lseg = pnfs_layout_process(lgp); 6205 rpc_put_task(task); 6206 dprintk("<-- %s status=%d\n", __func__, status); 6207 if (status) 6208 return ERR_PTR(status); 6209 return lseg; 6210 } 6211 6212 static void 6213 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata) 6214 { 6215 struct nfs4_layoutreturn *lrp = calldata; 6216 6217 dprintk("--> %s\n", __func__); 6218 nfs41_setup_sequence(lrp->clp->cl_session, 6219 &lrp->args.seq_args, 6220 &lrp->res.seq_res, 6221 task); 6222 } 6223 6224 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) 6225 { 6226 struct nfs4_layoutreturn *lrp = calldata; 6227 struct nfs_server *server; 6228 6229 dprintk("--> %s\n", __func__); 6230 6231 if (!nfs41_sequence_done(task, &lrp->res.seq_res)) 6232 return; 6233 6234 server = NFS_SERVER(lrp->args.inode); 6235 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 6236 rpc_restart_call_prepare(task); 6237 return; 6238 } 6239 dprintk("<-- %s\n", __func__); 6240 } 6241 6242 static void nfs4_layoutreturn_release(void *calldata) 6243 { 6244 struct nfs4_layoutreturn *lrp = calldata; 6245 struct pnfs_layout_hdr *lo = lrp->args.layout; 6246 6247 dprintk("--> %s\n", __func__); 6248 spin_lock(&lo->plh_inode->i_lock); 6249 if (lrp->res.lrs_present) 6250 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 6251 lo->plh_block_lgets--; 6252 spin_unlock(&lo->plh_inode->i_lock); 6253 pnfs_put_layout_hdr(lrp->args.layout); 6254 kfree(calldata); 6255 dprintk("<-- %s\n", __func__); 6256 } 6257 6258 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { 6259 .rpc_call_prepare = nfs4_layoutreturn_prepare, 6260 .rpc_call_done = nfs4_layoutreturn_done, 6261 .rpc_release = nfs4_layoutreturn_release, 6262 }; 6263 6264 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp) 6265 { 6266 struct rpc_task *task; 6267 struct rpc_message msg = { 6268 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN], 6269 .rpc_argp = &lrp->args, 6270 .rpc_resp = &lrp->res, 6271 }; 6272 struct rpc_task_setup task_setup_data = { 6273 .rpc_client = lrp->clp->cl_rpcclient, 6274 .rpc_message = &msg, 6275 .callback_ops = &nfs4_layoutreturn_call_ops, 6276 .callback_data = lrp, 6277 }; 6278 int status; 6279 6280 dprintk("--> %s\n", __func__); 6281 nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); 6282 task = rpc_run_task(&task_setup_data); 6283 if (IS_ERR(task)) 6284 return PTR_ERR(task); 6285 status = task->tk_status; 6286 dprintk("<-- %s status=%d\n", __func__, status); 6287 rpc_put_task(task); 6288 return status; 6289 } 6290 6291 /* 6292 * Retrieve the list of Data Server devices from the MDS. 6293 */ 6294 static int _nfs4_getdevicelist(struct nfs_server *server, 6295 const struct nfs_fh *fh, 6296 struct pnfs_devicelist *devlist) 6297 { 6298 struct nfs4_getdevicelist_args args = { 6299 .fh = fh, 6300 .layoutclass = server->pnfs_curr_ld->id, 6301 }; 6302 struct nfs4_getdevicelist_res res = { 6303 .devlist = devlist, 6304 }; 6305 struct rpc_message msg = { 6306 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST], 6307 .rpc_argp = &args, 6308 .rpc_resp = &res, 6309 }; 6310 int status; 6311 6312 dprintk("--> %s\n", __func__); 6313 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, 6314 &res.seq_res, 0); 6315 dprintk("<-- %s status=%d\n", __func__, status); 6316 return status; 6317 } 6318 6319 int nfs4_proc_getdevicelist(struct nfs_server *server, 6320 const struct nfs_fh *fh, 6321 struct pnfs_devicelist *devlist) 6322 { 6323 struct nfs4_exception exception = { }; 6324 int err; 6325 6326 do { 6327 err = nfs4_handle_exception(server, 6328 _nfs4_getdevicelist(server, fh, devlist), 6329 &exception); 6330 } while (exception.retry); 6331 6332 dprintk("%s: err=%d, num_devs=%u\n", __func__, 6333 err, devlist->num_devs); 6334 6335 return err; 6336 } 6337 EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist); 6338 6339 static int 6340 _nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) 6341 { 6342 struct nfs4_getdeviceinfo_args args = { 6343 .pdev = pdev, 6344 }; 6345 struct nfs4_getdeviceinfo_res res = { 6346 .pdev = pdev, 6347 }; 6348 struct rpc_message msg = { 6349 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO], 6350 .rpc_argp = &args, 6351 .rpc_resp = &res, 6352 }; 6353 int status; 6354 6355 dprintk("--> %s\n", __func__); 6356 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 6357 dprintk("<-- %s status=%d\n", __func__, status); 6358 6359 return status; 6360 } 6361 6362 int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev) 6363 { 6364 struct nfs4_exception exception = { }; 6365 int err; 6366 6367 do { 6368 err = nfs4_handle_exception(server, 6369 _nfs4_proc_getdeviceinfo(server, pdev), 6370 &exception); 6371 } while (exception.retry); 6372 return err; 6373 } 6374 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo); 6375 6376 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata) 6377 { 6378 struct nfs4_layoutcommit_data *data = calldata; 6379 struct nfs_server *server = NFS_SERVER(data->args.inode); 6380 struct nfs4_session *session = nfs4_get_session(server); 6381 6382 nfs41_setup_sequence(session, 6383 &data->args.seq_args, 6384 &data->res.seq_res, 6385 task); 6386 } 6387 6388 static void 6389 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) 6390 { 6391 struct nfs4_layoutcommit_data *data = calldata; 6392 struct nfs_server *server = NFS_SERVER(data->args.inode); 6393 6394 if (!nfs41_sequence_done(task, &data->res.seq_res)) 6395 return; 6396 6397 switch (task->tk_status) { /* Just ignore these failures */ 6398 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ 6399 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ 6400 case -NFS4ERR_BADLAYOUT: /* no layout */ 6401 case -NFS4ERR_GRACE: /* loca_recalim always false */ 6402 task->tk_status = 0; 6403 break; 6404 case 0: 6405 nfs_post_op_update_inode_force_wcc(data->args.inode, 6406 data->res.fattr); 6407 break; 6408 default: 6409 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { 6410 rpc_restart_call_prepare(task); 6411 return; 6412 } 6413 } 6414 } 6415 6416 static void nfs4_layoutcommit_release(void *calldata) 6417 { 6418 struct nfs4_layoutcommit_data *data = calldata; 6419 struct pnfs_layout_segment *lseg, *tmp; 6420 unsigned long *bitlock = &NFS_I(data->args.inode)->flags; 6421 6422 pnfs_cleanup_layoutcommit(data); 6423 /* Matched by references in pnfs_set_layoutcommit */ 6424 list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) { 6425 list_del_init(&lseg->pls_lc_list); 6426 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, 6427 &lseg->pls_flags)) 6428 pnfs_put_lseg(lseg); 6429 } 6430 6431 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); 6432 smp_mb__after_clear_bit(); 6433 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); 6434 6435 put_rpccred(data->cred); 6436 kfree(data); 6437 } 6438 6439 static const struct rpc_call_ops nfs4_layoutcommit_ops = { 6440 .rpc_call_prepare = nfs4_layoutcommit_prepare, 6441 .rpc_call_done = nfs4_layoutcommit_done, 6442 .rpc_release = nfs4_layoutcommit_release, 6443 }; 6444 6445 int 6446 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) 6447 { 6448 struct rpc_message msg = { 6449 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT], 6450 .rpc_argp = &data->args, 6451 .rpc_resp = &data->res, 6452 .rpc_cred = data->cred, 6453 }; 6454 struct rpc_task_setup task_setup_data = { 6455 .task = &data->task, 6456 .rpc_client = NFS_CLIENT(data->args.inode), 6457 .rpc_message = &msg, 6458 .callback_ops = &nfs4_layoutcommit_ops, 6459 .callback_data = data, 6460 .flags = RPC_TASK_ASYNC, 6461 }; 6462 struct rpc_task *task; 6463 int status = 0; 6464 6465 dprintk("NFS: %4d initiating layoutcommit call. sync %d " 6466 "lbw: %llu inode %lu\n", 6467 data->task.tk_pid, sync, 6468 data->args.lastbytewritten, 6469 data->args.inode->i_ino); 6470 6471 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 6472 task = rpc_run_task(&task_setup_data); 6473 if (IS_ERR(task)) 6474 return PTR_ERR(task); 6475 if (sync == false) 6476 goto out; 6477 status = nfs4_wait_for_completion_rpc_task(task); 6478 if (status != 0) 6479 goto out; 6480 status = task->tk_status; 6481 out: 6482 dprintk("%s: status %d\n", __func__, status); 6483 rpc_put_task(task); 6484 return status; 6485 } 6486 6487 static int 6488 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 6489 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 6490 { 6491 struct nfs41_secinfo_no_name_args args = { 6492 .style = SECINFO_STYLE_CURRENT_FH, 6493 }; 6494 struct nfs4_secinfo_res res = { 6495 .flavors = flavors, 6496 }; 6497 struct rpc_message msg = { 6498 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME], 6499 .rpc_argp = &args, 6500 .rpc_resp = &res, 6501 }; 6502 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); 6503 } 6504 6505 static int 6506 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, 6507 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors) 6508 { 6509 struct nfs4_exception exception = { }; 6510 int err; 6511 do { 6512 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 6513 switch (err) { 6514 case 0: 6515 case -NFS4ERR_WRONGSEC: 6516 case -NFS4ERR_NOTSUPP: 6517 goto out; 6518 default: 6519 err = nfs4_handle_exception(server, err, &exception); 6520 } 6521 } while (exception.retry); 6522 out: 6523 return err; 6524 } 6525 6526 static int 6527 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, 6528 struct nfs_fsinfo *info) 6529 { 6530 int err; 6531 struct page *page; 6532 rpc_authflavor_t flavor; 6533 struct nfs4_secinfo_flavors *flavors; 6534 6535 page = alloc_page(GFP_KERNEL); 6536 if (!page) { 6537 err = -ENOMEM; 6538 goto out; 6539 } 6540 6541 flavors = page_address(page); 6542 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); 6543 6544 /* 6545 * Fall back on "guess and check" method if 6546 * the server doesn't support SECINFO_NO_NAME 6547 */ 6548 if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) { 6549 err = nfs4_find_root_sec(server, fhandle, info); 6550 goto out_freepage; 6551 } 6552 if (err) 6553 goto out_freepage; 6554 6555 flavor = nfs_find_best_sec(flavors); 6556 if (err == 0) 6557 err = nfs4_lookup_root_sec(server, fhandle, info, flavor); 6558 6559 out_freepage: 6560 put_page(page); 6561 if (err == -EACCES) 6562 return -EPERM; 6563 out: 6564 return err; 6565 } 6566 6567 static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6568 { 6569 int status; 6570 struct nfs41_test_stateid_args args = { 6571 .stateid = stateid, 6572 }; 6573 struct nfs41_test_stateid_res res; 6574 struct rpc_message msg = { 6575 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID], 6576 .rpc_argp = &args, 6577 .rpc_resp = &res, 6578 }; 6579 6580 dprintk("NFS call test_stateid %p\n", stateid); 6581 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); 6582 nfs4_set_sequence_privileged(&args.seq_args); 6583 status = nfs4_call_sync_sequence(server->client, server, &msg, 6584 &args.seq_args, &res.seq_res); 6585 if (status != NFS_OK) { 6586 dprintk("NFS reply test_stateid: failed, %d\n", status); 6587 return status; 6588 } 6589 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status); 6590 return -res.status; 6591 } 6592 6593 /** 6594 * nfs41_test_stateid - perform a TEST_STATEID operation 6595 * 6596 * @server: server / transport on which to perform the operation 6597 * @stateid: state ID to test 6598 * 6599 * Returns NFS_OK if the server recognizes that "stateid" is valid. 6600 * Otherwise a negative NFS4ERR value is returned if the operation 6601 * failed or the state ID is not currently valid. 6602 */ 6603 static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6604 { 6605 struct nfs4_exception exception = { }; 6606 int err; 6607 do { 6608 err = _nfs41_test_stateid(server, stateid); 6609 if (err != -NFS4ERR_DELAY) 6610 break; 6611 nfs4_handle_exception(server, err, &exception); 6612 } while (exception.retry); 6613 return err; 6614 } 6615 6616 static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6617 { 6618 struct nfs41_free_stateid_args args = { 6619 .stateid = stateid, 6620 }; 6621 struct nfs41_free_stateid_res res; 6622 struct rpc_message msg = { 6623 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID], 6624 .rpc_argp = &args, 6625 .rpc_resp = &res, 6626 }; 6627 int status; 6628 6629 dprintk("NFS call free_stateid %p\n", stateid); 6630 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0); 6631 nfs4_set_sequence_privileged(&args.seq_args); 6632 status = nfs4_call_sync_sequence(server->client, server, &msg, 6633 &args.seq_args, &res.seq_res); 6634 dprintk("NFS reply free_stateid: %d\n", status); 6635 return status; 6636 } 6637 6638 /** 6639 * nfs41_free_stateid - perform a FREE_STATEID operation 6640 * 6641 * @server: server / transport on which to perform the operation 6642 * @stateid: state ID to release 6643 * 6644 * Returns NFS_OK if the server freed "stateid". Otherwise a 6645 * negative NFS4ERR value is returned. 6646 */ 6647 static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid) 6648 { 6649 struct nfs4_exception exception = { }; 6650 int err; 6651 do { 6652 err = _nfs4_free_stateid(server, stateid); 6653 if (err != -NFS4ERR_DELAY) 6654 break; 6655 nfs4_handle_exception(server, err, &exception); 6656 } while (exception.retry); 6657 return err; 6658 } 6659 6660 static bool nfs41_match_stateid(const nfs4_stateid *s1, 6661 const nfs4_stateid *s2) 6662 { 6663 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0) 6664 return false; 6665 6666 if (s1->seqid == s2->seqid) 6667 return true; 6668 if (s1->seqid == 0 || s2->seqid == 0) 6669 return true; 6670 6671 return false; 6672 } 6673 6674 #endif /* CONFIG_NFS_V4_1 */ 6675 6676 static bool nfs4_match_stateid(const nfs4_stateid *s1, 6677 const nfs4_stateid *s2) 6678 { 6679 return nfs4_stateid_match(s1, s2); 6680 } 6681 6682 6683 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = { 6684 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 6685 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 6686 .recover_open = nfs4_open_reclaim, 6687 .recover_lock = nfs4_lock_reclaim, 6688 .establish_clid = nfs4_init_clientid, 6689 .get_clid_cred = nfs4_get_setclientid_cred, 6690 .detect_trunking = nfs40_discover_server_trunking, 6691 }; 6692 6693 #if defined(CONFIG_NFS_V4_1) 6694 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { 6695 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, 6696 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, 6697 .recover_open = nfs4_open_reclaim, 6698 .recover_lock = nfs4_lock_reclaim, 6699 .establish_clid = nfs41_init_clientid, 6700 .get_clid_cred = nfs4_get_exchange_id_cred, 6701 .reclaim_complete = nfs41_proc_reclaim_complete, 6702 .detect_trunking = nfs41_discover_server_trunking, 6703 }; 6704 #endif /* CONFIG_NFS_V4_1 */ 6705 6706 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 6707 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 6708 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 6709 .recover_open = nfs4_open_expired, 6710 .recover_lock = nfs4_lock_expired, 6711 .establish_clid = nfs4_init_clientid, 6712 .get_clid_cred = nfs4_get_setclientid_cred, 6713 }; 6714 6715 #if defined(CONFIG_NFS_V4_1) 6716 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = { 6717 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 6718 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 6719 .recover_open = nfs41_open_expired, 6720 .recover_lock = nfs41_lock_expired, 6721 .establish_clid = nfs41_init_clientid, 6722 .get_clid_cred = nfs4_get_exchange_id_cred, 6723 }; 6724 #endif /* CONFIG_NFS_V4_1 */ 6725 6726 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = { 6727 .sched_state_renewal = nfs4_proc_async_renew, 6728 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked, 6729 .renew_lease = nfs4_proc_renew, 6730 }; 6731 6732 #if defined(CONFIG_NFS_V4_1) 6733 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = { 6734 .sched_state_renewal = nfs41_proc_async_sequence, 6735 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked, 6736 .renew_lease = nfs4_proc_sequence, 6737 }; 6738 #endif 6739 6740 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { 6741 .minor_version = 0, 6742 .call_sync = _nfs4_call_sync, 6743 .match_stateid = nfs4_match_stateid, 6744 .find_root_sec = nfs4_find_root_sec, 6745 .reboot_recovery_ops = &nfs40_reboot_recovery_ops, 6746 .nograce_recovery_ops = &nfs40_nograce_recovery_ops, 6747 .state_renewal_ops = &nfs40_state_renewal_ops, 6748 }; 6749 6750 #if defined(CONFIG_NFS_V4_1) 6751 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { 6752 .minor_version = 1, 6753 .call_sync = nfs4_call_sync_sequence, 6754 .match_stateid = nfs41_match_stateid, 6755 .find_root_sec = nfs41_find_root_sec, 6756 .reboot_recovery_ops = &nfs41_reboot_recovery_ops, 6757 .nograce_recovery_ops = &nfs41_nograce_recovery_ops, 6758 .state_renewal_ops = &nfs41_state_renewal_ops, 6759 }; 6760 #endif 6761 6762 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { 6763 [0] = &nfs_v4_0_minor_ops, 6764 #if defined(CONFIG_NFS_V4_1) 6765 [1] = &nfs_v4_1_minor_ops, 6766 #endif 6767 }; 6768 6769 const struct inode_operations nfs4_dir_inode_operations = { 6770 .create = nfs_create, 6771 .lookup = nfs_lookup, 6772 .atomic_open = nfs_atomic_open, 6773 .link = nfs_link, 6774 .unlink = nfs_unlink, 6775 .symlink = nfs_symlink, 6776 .mkdir = nfs_mkdir, 6777 .rmdir = nfs_rmdir, 6778 .mknod = nfs_mknod, 6779 .rename = nfs_rename, 6780 .permission = nfs_permission, 6781 .getattr = nfs_getattr, 6782 .setattr = nfs_setattr, 6783 .getxattr = generic_getxattr, 6784 .setxattr = generic_setxattr, 6785 .listxattr = generic_listxattr, 6786 .removexattr = generic_removexattr, 6787 }; 6788 6789 static const struct inode_operations nfs4_file_inode_operations = { 6790 .permission = nfs_permission, 6791 .getattr = nfs_getattr, 6792 .setattr = nfs_setattr, 6793 .getxattr = generic_getxattr, 6794 .setxattr = generic_setxattr, 6795 .listxattr = generic_listxattr, 6796 .removexattr = generic_removexattr, 6797 }; 6798 6799 const struct nfs_rpc_ops nfs_v4_clientops = { 6800 .version = 4, /* protocol version */ 6801 .dentry_ops = &nfs4_dentry_operations, 6802 .dir_inode_ops = &nfs4_dir_inode_operations, 6803 .file_inode_ops = &nfs4_file_inode_operations, 6804 .file_ops = &nfs4_file_operations, 6805 .getroot = nfs4_proc_get_root, 6806 .submount = nfs4_submount, 6807 .try_mount = nfs4_try_mount, 6808 .getattr = nfs4_proc_getattr, 6809 .setattr = nfs4_proc_setattr, 6810 .lookup = nfs4_proc_lookup, 6811 .access = nfs4_proc_access, 6812 .readlink = nfs4_proc_readlink, 6813 .create = nfs4_proc_create, 6814 .remove = nfs4_proc_remove, 6815 .unlink_setup = nfs4_proc_unlink_setup, 6816 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare, 6817 .unlink_done = nfs4_proc_unlink_done, 6818 .rename = nfs4_proc_rename, 6819 .rename_setup = nfs4_proc_rename_setup, 6820 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare, 6821 .rename_done = nfs4_proc_rename_done, 6822 .link = nfs4_proc_link, 6823 .symlink = nfs4_proc_symlink, 6824 .mkdir = nfs4_proc_mkdir, 6825 .rmdir = nfs4_proc_remove, 6826 .readdir = nfs4_proc_readdir, 6827 .mknod = nfs4_proc_mknod, 6828 .statfs = nfs4_proc_statfs, 6829 .fsinfo = nfs4_proc_fsinfo, 6830 .pathconf = nfs4_proc_pathconf, 6831 .set_capabilities = nfs4_server_capabilities, 6832 .decode_dirent = nfs4_decode_dirent, 6833 .read_setup = nfs4_proc_read_setup, 6834 .read_pageio_init = pnfs_pageio_init_read, 6835 .read_rpc_prepare = nfs4_proc_read_rpc_prepare, 6836 .read_done = nfs4_read_done, 6837 .write_setup = nfs4_proc_write_setup, 6838 .write_pageio_init = pnfs_pageio_init_write, 6839 .write_rpc_prepare = nfs4_proc_write_rpc_prepare, 6840 .write_done = nfs4_write_done, 6841 .commit_setup = nfs4_proc_commit_setup, 6842 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare, 6843 .commit_done = nfs4_commit_done, 6844 .lock = nfs4_proc_lock, 6845 .clear_acl_cache = nfs4_zap_acl_attr, 6846 .close_context = nfs4_close_context, 6847 .open_context = nfs4_atomic_open, 6848 .have_delegation = nfs4_have_delegation, 6849 .return_delegation = nfs4_inode_return_delegation, 6850 .alloc_client = nfs4_alloc_client, 6851 .init_client = nfs4_init_client, 6852 .free_client = nfs4_free_client, 6853 .create_server = nfs4_create_server, 6854 .clone_server = nfs_clone_server, 6855 }; 6856 6857 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 6858 .prefix = XATTR_NAME_NFSV4_ACL, 6859 .list = nfs4_xattr_list_nfs4_acl, 6860 .get = nfs4_xattr_get_nfs4_acl, 6861 .set = nfs4_xattr_set_nfs4_acl, 6862 }; 6863 6864 const struct xattr_handler *nfs4_xattr_handlers[] = { 6865 &nfs4_xattr_nfs4_acl_handler, 6866 NULL 6867 }; 6868 6869 /* 6870 * Local variables: 6871 * c-basic-offset: 8 6872 * End: 6873 */ 6874