1 /* 2 * linux/fs/nfs/callback_proc.c 3 * 4 * Copyright (C) 2004 Trond Myklebust 5 * 6 * NFSv4 callback procedures 7 */ 8 #include <linux/nfs4.h> 9 #include <linux/nfs_fs.h> 10 #include <linux/slab.h> 11 #include <linux/rcupdate.h> 12 #include "nfs4_fs.h" 13 #include "callback.h" 14 #include "delegation.h" 15 #include "internal.h" 16 #include "pnfs.h" 17 #include "nfs4session.h" 18 #include "nfs4trace.h" 19 20 #ifdef NFS_DEBUG 21 #define NFSDBG_FACILITY NFSDBG_CALLBACK 22 #endif 23 24 __be32 nfs4_callback_getattr(struct cb_getattrargs *args, 25 struct cb_getattrres *res, 26 struct cb_process_state *cps) 27 { 28 struct nfs_delegation *delegation; 29 struct nfs_inode *nfsi; 30 struct inode *inode; 31 32 res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION); 33 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */ 34 goto out; 35 36 res->bitmap[0] = res->bitmap[1] = 0; 37 res->status = htonl(NFS4ERR_BADHANDLE); 38 39 dprintk_rcu("NFS: GETATTR callback request from %s\n", 40 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); 41 42 inode = nfs_delegation_find_inode(cps->clp, &args->fh); 43 if (inode == NULL) { 44 trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL, 45 -ntohl(res->status)); 46 goto out; 47 } 48 nfsi = NFS_I(inode); 49 rcu_read_lock(); 50 delegation = rcu_dereference(nfsi->delegation); 51 if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0) 52 goto out_iput; 53 res->size = i_size_read(inode); 54 res->change_attr = delegation->change_attr; 55 if (nfsi->nrequests != 0) 56 res->change_attr++; 57 res->ctime = inode->i_ctime; 58 res->mtime = inode->i_mtime; 59 res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) & 60 args->bitmap[0]; 61 res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) & 62 args->bitmap[1]; 63 res->status = 0; 64 out_iput: 65 rcu_read_unlock(); 66 trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status)); 67 iput(inode); 68 out: 69 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status)); 70 return res->status; 71 } 72 73 __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy, 74 struct cb_process_state *cps) 75 { 76 struct inode *inode; 77 __be32 res; 78 79 res = htonl(NFS4ERR_OP_NOT_IN_SESSION); 80 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */ 81 goto out; 82 83 dprintk_rcu("NFS: RECALL callback request from %s\n", 84 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); 85 86 res = htonl(NFS4ERR_BADHANDLE); 87 inode = nfs_delegation_find_inode(cps->clp, &args->fh); 88 if (inode == NULL) 89 goto out; 90 /* Set up a helper thread to actually return the delegation */ 91 switch (nfs_async_inode_return_delegation(inode, &args->stateid)) { 92 case 0: 93 res = 0; 94 break; 95 case -ENOENT: 96 res = htonl(NFS4ERR_BAD_STATEID); 97 break; 98 default: 99 res = htonl(NFS4ERR_RESOURCE); 100 } 101 trace_nfs4_recall_delegation(inode, -ntohl(res)); 102 iput(inode); 103 out: 104 dprintk("%s: exit with status = %d\n", __func__, ntohl(res)); 105 return res; 106 } 107 108 #if defined(CONFIG_NFS_V4_1) 109 110 /* 111 * Lookup a layout by filehandle. 112 * 113 * Note: gets a refcount on the layout hdr and on its respective inode. 114 * Caller must put the layout hdr and the inode. 115 * 116 * TODO: keep track of all layouts (and delegations) in a hash table 117 * hashed by filehandle. 118 */ 119 static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp, 120 struct nfs_fh *fh, nfs4_stateid *stateid) 121 { 122 struct nfs_server *server; 123 struct inode *ino; 124 struct pnfs_layout_hdr *lo; 125 126 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { 127 list_for_each_entry(lo, &server->layouts, plh_layouts) { 128 if (!nfs4_stateid_match_other(&lo->plh_stateid, stateid)) 129 continue; 130 if (nfs_compare_fh(fh, &NFS_I(lo->plh_inode)->fh)) 131 continue; 132 ino = igrab(lo->plh_inode); 133 if (!ino) 134 break; 135 spin_lock(&ino->i_lock); 136 /* Is this layout in the process of being freed? */ 137 if (NFS_I(ino)->layout != lo) { 138 spin_unlock(&ino->i_lock); 139 iput(ino); 140 break; 141 } 142 pnfs_get_layout_hdr(lo); 143 spin_unlock(&ino->i_lock); 144 return lo; 145 } 146 } 147 148 return NULL; 149 } 150 151 static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp, 152 struct nfs_fh *fh, nfs4_stateid *stateid) 153 { 154 struct pnfs_layout_hdr *lo; 155 156 spin_lock(&clp->cl_lock); 157 rcu_read_lock(); 158 lo = get_layout_by_fh_locked(clp, fh, stateid); 159 rcu_read_unlock(); 160 spin_unlock(&clp->cl_lock); 161 162 return lo; 163 } 164 165 static u32 initiate_file_draining(struct nfs_client *clp, 166 struct cb_layoutrecallargs *args) 167 { 168 struct inode *ino; 169 struct pnfs_layout_hdr *lo; 170 u32 rv = NFS4ERR_NOMATCHING_LAYOUT; 171 LIST_HEAD(free_me_list); 172 173 lo = get_layout_by_fh(clp, &args->cbl_fh, &args->cbl_stateid); 174 if (!lo) 175 goto out; 176 177 ino = lo->plh_inode; 178 179 spin_lock(&ino->i_lock); 180 pnfs_set_layout_stateid(lo, &args->cbl_stateid, true); 181 spin_unlock(&ino->i_lock); 182 183 pnfs_layoutcommit_inode(ino, false); 184 185 spin_lock(&ino->i_lock); 186 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) || 187 pnfs_mark_matching_lsegs_invalid(lo, &free_me_list, 188 &args->cbl_range)) { 189 rv = NFS4ERR_DELAY; 190 goto unlock; 191 } 192 193 if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) { 194 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, 195 &args->cbl_range); 196 } 197 unlock: 198 spin_unlock(&ino->i_lock); 199 pnfs_free_lseg_list(&free_me_list); 200 pnfs_put_layout_hdr(lo); 201 trace_nfs4_cb_layoutrecall_inode(clp, &args->cbl_fh, ino, -rv); 202 iput(ino); 203 out: 204 return rv; 205 } 206 207 static u32 initiate_bulk_draining(struct nfs_client *clp, 208 struct cb_layoutrecallargs *args) 209 { 210 int stat; 211 212 if (args->cbl_recall_type == RETURN_FSID) 213 stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true); 214 else 215 stat = pnfs_destroy_layouts_byclid(clp, true); 216 if (stat != 0) 217 return NFS4ERR_DELAY; 218 return NFS4ERR_NOMATCHING_LAYOUT; 219 } 220 221 static u32 do_callback_layoutrecall(struct nfs_client *clp, 222 struct cb_layoutrecallargs *args) 223 { 224 u32 res; 225 226 dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type); 227 if (args->cbl_recall_type == RETURN_FILE) 228 res = initiate_file_draining(clp, args); 229 else 230 res = initiate_bulk_draining(clp, args); 231 dprintk("%s returning %i\n", __func__, res); 232 return res; 233 234 } 235 236 __be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args, 237 void *dummy, struct cb_process_state *cps) 238 { 239 u32 res; 240 241 dprintk("%s: -->\n", __func__); 242 243 if (cps->clp) 244 res = do_callback_layoutrecall(cps->clp, args); 245 else 246 res = NFS4ERR_OP_NOT_IN_SESSION; 247 248 dprintk("%s: exit with status = %d\n", __func__, res); 249 return cpu_to_be32(res); 250 } 251 252 static void pnfs_recall_all_layouts(struct nfs_client *clp) 253 { 254 struct cb_layoutrecallargs args; 255 256 /* Pretend we got a CB_LAYOUTRECALL(ALL) */ 257 memset(&args, 0, sizeof(args)); 258 args.cbl_recall_type = RETURN_ALL; 259 /* FIXME we ignore errors, what should we do? */ 260 do_callback_layoutrecall(clp, &args); 261 } 262 263 __be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args, 264 void *dummy, struct cb_process_state *cps) 265 { 266 int i; 267 __be32 res = 0; 268 struct nfs_client *clp = cps->clp; 269 struct nfs_server *server = NULL; 270 271 dprintk("%s: -->\n", __func__); 272 273 if (!clp) { 274 res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION); 275 goto out; 276 } 277 278 for (i = 0; i < args->ndevs; i++) { 279 struct cb_devicenotifyitem *dev = &args->devs[i]; 280 281 if (!server || 282 server->pnfs_curr_ld->id != dev->cbd_layout_type) { 283 rcu_read_lock(); 284 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) 285 if (server->pnfs_curr_ld && 286 server->pnfs_curr_ld->id == dev->cbd_layout_type) { 287 rcu_read_unlock(); 288 goto found; 289 } 290 rcu_read_unlock(); 291 dprintk("%s: layout type %u not found\n", 292 __func__, dev->cbd_layout_type); 293 continue; 294 } 295 296 found: 297 nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id); 298 } 299 300 out: 301 kfree(args->devs); 302 dprintk("%s: exit with status = %u\n", 303 __func__, be32_to_cpu(res)); 304 return res; 305 } 306 307 /* 308 * Validate the sequenceID sent by the server. 309 * Return success if the sequenceID is one more than what we last saw on 310 * this slot, accounting for wraparound. Increments the slot's sequence. 311 * 312 * We don't yet implement a duplicate request cache, instead we set the 313 * back channel ca_maxresponsesize_cached to zero. This is OK for now 314 * since we only currently implement idempotent callbacks anyway. 315 * 316 * We have a single slot backchannel at this time, so we don't bother 317 * checking the used_slots bit array on the table. The lower layer guarantees 318 * a single outstanding callback request at a time. 319 */ 320 static __be32 321 validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args) 322 { 323 struct nfs4_slot *slot; 324 325 dprintk("%s enter. slotid %u seqid %u\n", 326 __func__, args->csa_slotid, args->csa_sequenceid); 327 328 if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS) 329 return htonl(NFS4ERR_BADSLOT); 330 331 slot = tbl->slots + args->csa_slotid; 332 dprintk("%s slot table seqid: %u\n", __func__, slot->seq_nr); 333 334 /* Normal */ 335 if (likely(args->csa_sequenceid == slot->seq_nr + 1)) 336 goto out_ok; 337 338 /* Replay */ 339 if (args->csa_sequenceid == slot->seq_nr) { 340 dprintk("%s seqid %u is a replay\n", 341 __func__, args->csa_sequenceid); 342 /* Signal process_op to set this error on next op */ 343 if (args->csa_cachethis == 0) 344 return htonl(NFS4ERR_RETRY_UNCACHED_REP); 345 346 /* The ca_maxresponsesize_cached is 0 with no DRC */ 347 else if (args->csa_cachethis == 1) 348 return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE); 349 } 350 351 /* Wraparound */ 352 if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) { 353 slot->seq_nr = 1; 354 goto out_ok; 355 } 356 357 /* Misordered request */ 358 return htonl(NFS4ERR_SEQ_MISORDERED); 359 out_ok: 360 tbl->highest_used_slotid = args->csa_slotid; 361 return htonl(NFS4_OK); 362 } 363 364 /* 365 * For each referring call triple, check the session's slot table for 366 * a match. If the slot is in use and the sequence numbers match, the 367 * client is still waiting for a response to the original request. 368 */ 369 static bool referring_call_exists(struct nfs_client *clp, 370 uint32_t nrclists, 371 struct referring_call_list *rclists) 372 { 373 bool status = 0; 374 int i, j; 375 struct nfs4_session *session; 376 struct nfs4_slot_table *tbl; 377 struct referring_call_list *rclist; 378 struct referring_call *ref; 379 380 /* 381 * XXX When client trunking is implemented, this becomes 382 * a session lookup from within the loop 383 */ 384 session = clp->cl_session; 385 tbl = &session->fc_slot_table; 386 387 for (i = 0; i < nrclists; i++) { 388 rclist = &rclists[i]; 389 if (memcmp(session->sess_id.data, 390 rclist->rcl_sessionid.data, 391 NFS4_MAX_SESSIONID_LEN) != 0) 392 continue; 393 394 for (j = 0; j < rclist->rcl_nrefcalls; j++) { 395 ref = &rclist->rcl_refcalls[j]; 396 397 dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u " 398 "slotid %u\n", __func__, 399 ((u32 *)&rclist->rcl_sessionid.data)[0], 400 ((u32 *)&rclist->rcl_sessionid.data)[1], 401 ((u32 *)&rclist->rcl_sessionid.data)[2], 402 ((u32 *)&rclist->rcl_sessionid.data)[3], 403 ref->rc_sequenceid, ref->rc_slotid); 404 405 spin_lock(&tbl->slot_tbl_lock); 406 status = (test_bit(ref->rc_slotid, tbl->used_slots) && 407 tbl->slots[ref->rc_slotid].seq_nr == 408 ref->rc_sequenceid); 409 spin_unlock(&tbl->slot_tbl_lock); 410 if (status) 411 goto out; 412 } 413 } 414 415 out: 416 return status; 417 } 418 419 __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, 420 struct cb_sequenceres *res, 421 struct cb_process_state *cps) 422 { 423 struct nfs4_slot_table *tbl; 424 struct nfs4_slot *slot; 425 struct nfs_client *clp; 426 int i; 427 __be32 status = htonl(NFS4ERR_BADSESSION); 428 429 clp = nfs4_find_client_sessionid(cps->net, args->csa_addr, 430 &args->csa_sessionid, cps->minorversion); 431 if (clp == NULL) 432 goto out; 433 434 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) 435 goto out; 436 437 tbl = &clp->cl_session->bc_slot_table; 438 slot = tbl->slots + args->csa_slotid; 439 440 spin_lock(&tbl->slot_tbl_lock); 441 /* state manager is resetting the session */ 442 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) { 443 status = htonl(NFS4ERR_DELAY); 444 /* Return NFS4ERR_BADSESSION if we're draining the session 445 * in order to reset it. 446 */ 447 if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) 448 status = htonl(NFS4ERR_BADSESSION); 449 goto out_unlock; 450 } 451 452 memcpy(&res->csr_sessionid, &args->csa_sessionid, 453 sizeof(res->csr_sessionid)); 454 res->csr_sequenceid = args->csa_sequenceid; 455 res->csr_slotid = args->csa_slotid; 456 res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; 457 res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; 458 459 status = validate_seqid(tbl, args); 460 if (status) 461 goto out_unlock; 462 463 cps->slotid = args->csa_slotid; 464 465 /* 466 * Check for pending referring calls. If a match is found, a 467 * related callback was received before the response to the original 468 * call. 469 */ 470 if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) { 471 status = htonl(NFS4ERR_DELAY); 472 goto out_unlock; 473 } 474 475 /* 476 * RFC5661 20.9.3 477 * If CB_SEQUENCE returns an error, then the state of the slot 478 * (sequence ID, cached reply) MUST NOT change. 479 */ 480 slot->seq_nr++; 481 out_unlock: 482 spin_unlock(&tbl->slot_tbl_lock); 483 484 out: 485 cps->clp = clp; /* put in nfs4_callback_compound */ 486 for (i = 0; i < args->csa_nrclists; i++) 487 kfree(args->csa_rclists[i].rcl_refcalls); 488 kfree(args->csa_rclists); 489 490 if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) { 491 cps->drc_status = status; 492 status = 0; 493 } else 494 res->csr_status = status; 495 496 trace_nfs4_cb_sequence(args, res, status); 497 dprintk("%s: exit with status = %d res->csr_status %d\n", __func__, 498 ntohl(status), ntohl(res->csr_status)); 499 return status; 500 } 501 502 static bool 503 validate_bitmap_values(unsigned long mask) 504 { 505 return (mask & ~RCA4_TYPE_MASK_ALL) == 0; 506 } 507 508 __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy, 509 struct cb_process_state *cps) 510 { 511 __be32 status; 512 fmode_t flags = 0; 513 514 status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION); 515 if (!cps->clp) /* set in cb_sequence */ 516 goto out; 517 518 dprintk_rcu("NFS: RECALL_ANY callback request from %s\n", 519 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); 520 521 status = cpu_to_be32(NFS4ERR_INVAL); 522 if (!validate_bitmap_values(args->craa_type_mask)) 523 goto out; 524 525 status = cpu_to_be32(NFS4_OK); 526 if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *) 527 &args->craa_type_mask)) 528 flags = FMODE_READ; 529 if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *) 530 &args->craa_type_mask)) 531 flags |= FMODE_WRITE; 532 if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *) 533 &args->craa_type_mask)) 534 pnfs_recall_all_layouts(cps->clp); 535 if (flags) 536 nfs_expire_unused_delegation_types(cps->clp, flags); 537 out: 538 dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 539 return status; 540 } 541 542 /* Reduce the fore channel's max_slots to the target value */ 543 __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy, 544 struct cb_process_state *cps) 545 { 546 struct nfs4_slot_table *fc_tbl; 547 __be32 status; 548 549 status = htonl(NFS4ERR_OP_NOT_IN_SESSION); 550 if (!cps->clp) /* set in cb_sequence */ 551 goto out; 552 553 dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n", 554 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR), 555 args->crsa_target_highest_slotid); 556 557 fc_tbl = &cps->clp->cl_session->fc_slot_table; 558 559 status = htonl(NFS4_OK); 560 561 nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid); 562 nfs41_notify_server(cps->clp); 563 out: 564 dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); 565 return status; 566 } 567 #endif /* CONFIG_NFS_V4_1 */ 568