1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/fs.h> 4 #include <linux/wait.h> 5 #include <linux/slab.h> 6 #include <linux/gfp.h> 7 #include <linux/sched.h> 8 #include <linux/debugfs.h> 9 #include <linux/seq_file.h> 10 #include <linux/ratelimit.h> 11 12 #include "super.h" 13 #include "mds_client.h" 14 15 #include <linux/ceph/ceph_features.h> 16 #include <linux/ceph/messenger.h> 17 #include <linux/ceph/decode.h> 18 #include <linux/ceph/pagelist.h> 19 #include <linux/ceph/auth.h> 20 #include <linux/ceph/debugfs.h> 21 22 /* 23 * A cluster of MDS (metadata server) daemons is responsible for 24 * managing the file system namespace (the directory hierarchy and 25 * inodes) and for coordinating shared access to storage. Metadata is 26 * partitioning hierarchically across a number of servers, and that 27 * partition varies over time as the cluster adjusts the distribution 28 * in order to balance load. 29 * 30 * The MDS client is primarily responsible to managing synchronous 31 * metadata requests for operations like open, unlink, and so forth. 32 * If there is a MDS failure, we find out about it when we (possibly 33 * request and) receive a new MDS map, and can resubmit affected 34 * requests. 35 * 36 * For the most part, though, we take advantage of a lossless 37 * communications channel to the MDS, and do not need to worry about 38 * timing out or resubmitting requests. 39 * 40 * We maintain a stateful "session" with each MDS we interact with. 41 * Within each session, we sent periodic heartbeat messages to ensure 42 * any capabilities or leases we have been issues remain valid. If 43 * the session times out and goes stale, our leases and capabilities 44 * are no longer valid. 45 */ 46 47 struct ceph_reconnect_state { 48 int nr_caps; 49 struct ceph_pagelist *pagelist; 50 unsigned msg_version; 51 }; 52 53 static void __wake_requests(struct ceph_mds_client *mdsc, 54 struct list_head *head); 55 56 static const struct ceph_connection_operations mds_con_ops; 57 58 59 /* 60 * mds reply parsing 61 */ 62 63 /* 64 * parse individual inode info 65 */ 66 static int parse_reply_info_in(void **p, void *end, 67 struct ceph_mds_reply_info_in *info, 68 u64 features) 69 { 70 int err = -EIO; 71 72 info->in = *p; 73 *p += sizeof(struct ceph_mds_reply_inode) + 74 sizeof(*info->in->fragtree.splits) * 75 le32_to_cpu(info->in->fragtree.nsplits); 76 77 ceph_decode_32_safe(p, end, info->symlink_len, bad); 78 ceph_decode_need(p, end, info->symlink_len, bad); 79 info->symlink = *p; 80 *p += info->symlink_len; 81 82 if (features & CEPH_FEATURE_DIRLAYOUTHASH) 83 ceph_decode_copy_safe(p, end, &info->dir_layout, 84 sizeof(info->dir_layout), bad); 85 else 86 memset(&info->dir_layout, 0, sizeof(info->dir_layout)); 87 88 ceph_decode_32_safe(p, end, info->xattr_len, bad); 89 ceph_decode_need(p, end, info->xattr_len, bad); 90 info->xattr_data = *p; 91 *p += info->xattr_len; 92 93 if (features & CEPH_FEATURE_MDS_INLINE_DATA) { 94 ceph_decode_64_safe(p, end, info->inline_version, bad); 95 ceph_decode_32_safe(p, end, info->inline_len, bad); 96 ceph_decode_need(p, end, info->inline_len, bad); 97 info->inline_data = *p; 98 *p += info->inline_len; 99 } else 100 info->inline_version = CEPH_INLINE_NONE; 101 102 info->pool_ns_len = 0; 103 info->pool_ns_data = NULL; 104 if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) { 105 ceph_decode_32_safe(p, end, info->pool_ns_len, bad); 106 if (info->pool_ns_len > 0) { 107 ceph_decode_need(p, end, info->pool_ns_len, bad); 108 info->pool_ns_data = *p; 109 *p += info->pool_ns_len; 110 } 111 } 112 113 return 0; 114 bad: 115 return err; 116 } 117 118 /* 119 * parse a normal reply, which may contain a (dir+)dentry and/or a 120 * target inode. 121 */ 122 static int parse_reply_info_trace(void **p, void *end, 123 struct ceph_mds_reply_info_parsed *info, 124 u64 features) 125 { 126 int err; 127 128 if (info->head->is_dentry) { 129 err = parse_reply_info_in(p, end, &info->diri, features); 130 if (err < 0) 131 goto out_bad; 132 133 if (unlikely(*p + sizeof(*info->dirfrag) > end)) 134 goto bad; 135 info->dirfrag = *p; 136 *p += sizeof(*info->dirfrag) + 137 sizeof(u32)*le32_to_cpu(info->dirfrag->ndist); 138 if (unlikely(*p > end)) 139 goto bad; 140 141 ceph_decode_32_safe(p, end, info->dname_len, bad); 142 ceph_decode_need(p, end, info->dname_len, bad); 143 info->dname = *p; 144 *p += info->dname_len; 145 info->dlease = *p; 146 *p += sizeof(*info->dlease); 147 } 148 149 if (info->head->is_target) { 150 err = parse_reply_info_in(p, end, &info->targeti, features); 151 if (err < 0) 152 goto out_bad; 153 } 154 155 if (unlikely(*p != end)) 156 goto bad; 157 return 0; 158 159 bad: 160 err = -EIO; 161 out_bad: 162 pr_err("problem parsing mds trace %d\n", err); 163 return err; 164 } 165 166 /* 167 * parse readdir results 168 */ 169 static int parse_reply_info_dir(void **p, void *end, 170 struct ceph_mds_reply_info_parsed *info, 171 u64 features) 172 { 173 u32 num, i = 0; 174 int err; 175 176 info->dir_dir = *p; 177 if (*p + sizeof(*info->dir_dir) > end) 178 goto bad; 179 *p += sizeof(*info->dir_dir) + 180 sizeof(u32)*le32_to_cpu(info->dir_dir->ndist); 181 if (*p > end) 182 goto bad; 183 184 ceph_decode_need(p, end, sizeof(num) + 2, bad); 185 num = ceph_decode_32(p); 186 { 187 u16 flags = ceph_decode_16(p); 188 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END); 189 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE); 190 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER); 191 info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH); 192 } 193 if (num == 0) 194 goto done; 195 196 BUG_ON(!info->dir_entries); 197 if ((unsigned long)(info->dir_entries + num) > 198 (unsigned long)info->dir_entries + info->dir_buf_size) { 199 pr_err("dir contents are larger than expected\n"); 200 WARN_ON(1); 201 goto bad; 202 } 203 204 info->dir_nr = num; 205 while (num) { 206 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i; 207 /* dentry */ 208 ceph_decode_need(p, end, sizeof(u32)*2, bad); 209 rde->name_len = ceph_decode_32(p); 210 ceph_decode_need(p, end, rde->name_len, bad); 211 rde->name = *p; 212 *p += rde->name_len; 213 dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name); 214 rde->lease = *p; 215 *p += sizeof(struct ceph_mds_reply_lease); 216 217 /* inode */ 218 err = parse_reply_info_in(p, end, &rde->inode, features); 219 if (err < 0) 220 goto out_bad; 221 /* ceph_readdir_prepopulate() will update it */ 222 rde->offset = 0; 223 i++; 224 num--; 225 } 226 227 done: 228 if (*p != end) 229 goto bad; 230 return 0; 231 232 bad: 233 err = -EIO; 234 out_bad: 235 pr_err("problem parsing dir contents %d\n", err); 236 return err; 237 } 238 239 /* 240 * parse fcntl F_GETLK results 241 */ 242 static int parse_reply_info_filelock(void **p, void *end, 243 struct ceph_mds_reply_info_parsed *info, 244 u64 features) 245 { 246 if (*p + sizeof(*info->filelock_reply) > end) 247 goto bad; 248 249 info->filelock_reply = *p; 250 *p += sizeof(*info->filelock_reply); 251 252 if (unlikely(*p != end)) 253 goto bad; 254 return 0; 255 256 bad: 257 return -EIO; 258 } 259 260 /* 261 * parse create results 262 */ 263 static int parse_reply_info_create(void **p, void *end, 264 struct ceph_mds_reply_info_parsed *info, 265 u64 features) 266 { 267 if (features & CEPH_FEATURE_REPLY_CREATE_INODE) { 268 if (*p == end) { 269 info->has_create_ino = false; 270 } else { 271 info->has_create_ino = true; 272 info->ino = ceph_decode_64(p); 273 } 274 } 275 276 if (unlikely(*p != end)) 277 goto bad; 278 return 0; 279 280 bad: 281 return -EIO; 282 } 283 284 /* 285 * parse extra results 286 */ 287 static int parse_reply_info_extra(void **p, void *end, 288 struct ceph_mds_reply_info_parsed *info, 289 u64 features) 290 { 291 u32 op = le32_to_cpu(info->head->op); 292 293 if (op == CEPH_MDS_OP_GETFILELOCK) 294 return parse_reply_info_filelock(p, end, info, features); 295 else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP) 296 return parse_reply_info_dir(p, end, info, features); 297 else if (op == CEPH_MDS_OP_CREATE) 298 return parse_reply_info_create(p, end, info, features); 299 else 300 return -EIO; 301 } 302 303 /* 304 * parse entire mds reply 305 */ 306 static int parse_reply_info(struct ceph_msg *msg, 307 struct ceph_mds_reply_info_parsed *info, 308 u64 features) 309 { 310 void *p, *end; 311 u32 len; 312 int err; 313 314 info->head = msg->front.iov_base; 315 p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head); 316 end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); 317 318 /* trace */ 319 ceph_decode_32_safe(&p, end, len, bad); 320 if (len > 0) { 321 ceph_decode_need(&p, end, len, bad); 322 err = parse_reply_info_trace(&p, p+len, info, features); 323 if (err < 0) 324 goto out_bad; 325 } 326 327 /* extra */ 328 ceph_decode_32_safe(&p, end, len, bad); 329 if (len > 0) { 330 ceph_decode_need(&p, end, len, bad); 331 err = parse_reply_info_extra(&p, p+len, info, features); 332 if (err < 0) 333 goto out_bad; 334 } 335 336 /* snap blob */ 337 ceph_decode_32_safe(&p, end, len, bad); 338 info->snapblob_len = len; 339 info->snapblob = p; 340 p += len; 341 342 if (p != end) 343 goto bad; 344 return 0; 345 346 bad: 347 err = -EIO; 348 out_bad: 349 pr_err("mds parse_reply err %d\n", err); 350 return err; 351 } 352 353 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info) 354 { 355 if (!info->dir_entries) 356 return; 357 free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size)); 358 } 359 360 361 /* 362 * sessions 363 */ 364 const char *ceph_session_state_name(int s) 365 { 366 switch (s) { 367 case CEPH_MDS_SESSION_NEW: return "new"; 368 case CEPH_MDS_SESSION_OPENING: return "opening"; 369 case CEPH_MDS_SESSION_OPEN: return "open"; 370 case CEPH_MDS_SESSION_HUNG: return "hung"; 371 case CEPH_MDS_SESSION_CLOSING: return "closing"; 372 case CEPH_MDS_SESSION_RESTARTING: return "restarting"; 373 case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting"; 374 case CEPH_MDS_SESSION_REJECTED: return "rejected"; 375 default: return "???"; 376 } 377 } 378 379 static struct ceph_mds_session *get_session(struct ceph_mds_session *s) 380 { 381 if (refcount_inc_not_zero(&s->s_ref)) { 382 dout("mdsc get_session %p %d -> %d\n", s, 383 refcount_read(&s->s_ref)-1, refcount_read(&s->s_ref)); 384 return s; 385 } else { 386 dout("mdsc get_session %p 0 -- FAIL", s); 387 return NULL; 388 } 389 } 390 391 void ceph_put_mds_session(struct ceph_mds_session *s) 392 { 393 dout("mdsc put_session %p %d -> %d\n", s, 394 refcount_read(&s->s_ref), refcount_read(&s->s_ref)-1); 395 if (refcount_dec_and_test(&s->s_ref)) { 396 if (s->s_auth.authorizer) 397 ceph_auth_destroy_authorizer(s->s_auth.authorizer); 398 kfree(s); 399 } 400 } 401 402 /* 403 * called under mdsc->mutex 404 */ 405 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc, 406 int mds) 407 { 408 struct ceph_mds_session *session; 409 410 if (mds >= mdsc->max_sessions || !mdsc->sessions[mds]) 411 return NULL; 412 session = mdsc->sessions[mds]; 413 dout("lookup_mds_session %p %d\n", session, 414 refcount_read(&session->s_ref)); 415 get_session(session); 416 return session; 417 } 418 419 static bool __have_session(struct ceph_mds_client *mdsc, int mds) 420 { 421 if (mds >= mdsc->max_sessions) 422 return false; 423 return mdsc->sessions[mds]; 424 } 425 426 static int __verify_registered_session(struct ceph_mds_client *mdsc, 427 struct ceph_mds_session *s) 428 { 429 if (s->s_mds >= mdsc->max_sessions || 430 mdsc->sessions[s->s_mds] != s) 431 return -ENOENT; 432 return 0; 433 } 434 435 /* 436 * create+register a new session for given mds. 437 * called under mdsc->mutex. 438 */ 439 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, 440 int mds) 441 { 442 struct ceph_mds_session *s; 443 444 if (mds >= mdsc->mdsmap->m_num_mds) 445 return ERR_PTR(-EINVAL); 446 447 s = kzalloc(sizeof(*s), GFP_NOFS); 448 if (!s) 449 return ERR_PTR(-ENOMEM); 450 s->s_mdsc = mdsc; 451 s->s_mds = mds; 452 s->s_state = CEPH_MDS_SESSION_NEW; 453 s->s_ttl = 0; 454 s->s_seq = 0; 455 mutex_init(&s->s_mutex); 456 457 ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr); 458 459 spin_lock_init(&s->s_gen_ttl_lock); 460 s->s_cap_gen = 0; 461 s->s_cap_ttl = jiffies - 1; 462 463 spin_lock_init(&s->s_cap_lock); 464 s->s_renew_requested = 0; 465 s->s_renew_seq = 0; 466 INIT_LIST_HEAD(&s->s_caps); 467 s->s_nr_caps = 0; 468 s->s_trim_caps = 0; 469 refcount_set(&s->s_ref, 1); 470 INIT_LIST_HEAD(&s->s_waiting); 471 INIT_LIST_HEAD(&s->s_unsafe); 472 s->s_num_cap_releases = 0; 473 s->s_cap_reconnect = 0; 474 s->s_cap_iterator = NULL; 475 INIT_LIST_HEAD(&s->s_cap_releases); 476 INIT_LIST_HEAD(&s->s_cap_flushing); 477 478 dout("register_session mds%d\n", mds); 479 if (mds >= mdsc->max_sessions) { 480 int newmax = 1 << get_count_order(mds+1); 481 struct ceph_mds_session **sa; 482 483 dout("register_session realloc to %d\n", newmax); 484 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS); 485 if (!sa) 486 goto fail_realloc; 487 if (mdsc->sessions) { 488 memcpy(sa, mdsc->sessions, 489 mdsc->max_sessions * sizeof(void *)); 490 kfree(mdsc->sessions); 491 } 492 mdsc->sessions = sa; 493 mdsc->max_sessions = newmax; 494 } 495 mdsc->sessions[mds] = s; 496 atomic_inc(&mdsc->num_sessions); 497 refcount_inc(&s->s_ref); /* one ref to sessions[], one to caller */ 498 499 ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds, 500 ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); 501 502 return s; 503 504 fail_realloc: 505 kfree(s); 506 return ERR_PTR(-ENOMEM); 507 } 508 509 /* 510 * called under mdsc->mutex 511 */ 512 static void __unregister_session(struct ceph_mds_client *mdsc, 513 struct ceph_mds_session *s) 514 { 515 dout("__unregister_session mds%d %p\n", s->s_mds, s); 516 BUG_ON(mdsc->sessions[s->s_mds] != s); 517 mdsc->sessions[s->s_mds] = NULL; 518 ceph_con_close(&s->s_con); 519 ceph_put_mds_session(s); 520 atomic_dec(&mdsc->num_sessions); 521 } 522 523 /* 524 * drop session refs in request. 525 * 526 * should be last request ref, or hold mdsc->mutex 527 */ 528 static void put_request_session(struct ceph_mds_request *req) 529 { 530 if (req->r_session) { 531 ceph_put_mds_session(req->r_session); 532 req->r_session = NULL; 533 } 534 } 535 536 void ceph_mdsc_release_request(struct kref *kref) 537 { 538 struct ceph_mds_request *req = container_of(kref, 539 struct ceph_mds_request, 540 r_kref); 541 destroy_reply_info(&req->r_reply_info); 542 if (req->r_request) 543 ceph_msg_put(req->r_request); 544 if (req->r_reply) 545 ceph_msg_put(req->r_reply); 546 if (req->r_inode) { 547 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); 548 iput(req->r_inode); 549 } 550 if (req->r_parent) 551 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); 552 iput(req->r_target_inode); 553 if (req->r_dentry) 554 dput(req->r_dentry); 555 if (req->r_old_dentry) 556 dput(req->r_old_dentry); 557 if (req->r_old_dentry_dir) { 558 /* 559 * track (and drop pins for) r_old_dentry_dir 560 * separately, since r_old_dentry's d_parent may have 561 * changed between the dir mutex being dropped and 562 * this request being freed. 563 */ 564 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir), 565 CEPH_CAP_PIN); 566 iput(req->r_old_dentry_dir); 567 } 568 kfree(req->r_path1); 569 kfree(req->r_path2); 570 if (req->r_pagelist) 571 ceph_pagelist_release(req->r_pagelist); 572 put_request_session(req); 573 ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); 574 kfree(req); 575 } 576 577 DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node) 578 579 /* 580 * lookup session, bump ref if found. 581 * 582 * called under mdsc->mutex. 583 */ 584 static struct ceph_mds_request * 585 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid) 586 { 587 struct ceph_mds_request *req; 588 589 req = lookup_request(&mdsc->request_tree, tid); 590 if (req) 591 ceph_mdsc_get_request(req); 592 593 return req; 594 } 595 596 /* 597 * Register an in-flight request, and assign a tid. Link to directory 598 * are modifying (if any). 599 * 600 * Called under mdsc->mutex. 601 */ 602 static void __register_request(struct ceph_mds_client *mdsc, 603 struct ceph_mds_request *req, 604 struct inode *dir) 605 { 606 req->r_tid = ++mdsc->last_tid; 607 if (req->r_num_caps) 608 ceph_reserve_caps(mdsc, &req->r_caps_reservation, 609 req->r_num_caps); 610 dout("__register_request %p tid %lld\n", req, req->r_tid); 611 ceph_mdsc_get_request(req); 612 insert_request(&mdsc->request_tree, req); 613 614 req->r_uid = current_fsuid(); 615 req->r_gid = current_fsgid(); 616 617 if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK) 618 mdsc->oldest_tid = req->r_tid; 619 620 if (dir) { 621 ihold(dir); 622 req->r_unsafe_dir = dir; 623 } 624 } 625 626 static void __unregister_request(struct ceph_mds_client *mdsc, 627 struct ceph_mds_request *req) 628 { 629 dout("__unregister_request %p tid %lld\n", req, req->r_tid); 630 631 /* Never leave an unregistered request on an unsafe list! */ 632 list_del_init(&req->r_unsafe_item); 633 634 if (req->r_tid == mdsc->oldest_tid) { 635 struct rb_node *p = rb_next(&req->r_node); 636 mdsc->oldest_tid = 0; 637 while (p) { 638 struct ceph_mds_request *next_req = 639 rb_entry(p, struct ceph_mds_request, r_node); 640 if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) { 641 mdsc->oldest_tid = next_req->r_tid; 642 break; 643 } 644 p = rb_next(p); 645 } 646 } 647 648 erase_request(&mdsc->request_tree, req); 649 650 if (req->r_unsafe_dir && 651 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { 652 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); 653 spin_lock(&ci->i_unsafe_lock); 654 list_del_init(&req->r_unsafe_dir_item); 655 spin_unlock(&ci->i_unsafe_lock); 656 } 657 if (req->r_target_inode && 658 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { 659 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); 660 spin_lock(&ci->i_unsafe_lock); 661 list_del_init(&req->r_unsafe_target_item); 662 spin_unlock(&ci->i_unsafe_lock); 663 } 664 665 if (req->r_unsafe_dir) { 666 iput(req->r_unsafe_dir); 667 req->r_unsafe_dir = NULL; 668 } 669 670 complete_all(&req->r_safe_completion); 671 672 ceph_mdsc_put_request(req); 673 } 674 675 /* 676 * Walk back up the dentry tree until we hit a dentry representing a 677 * non-snapshot inode. We do this using the rcu_read_lock (which must be held 678 * when calling this) to ensure that the objects won't disappear while we're 679 * working with them. Once we hit a candidate dentry, we attempt to take a 680 * reference to it, and return that as the result. 681 */ 682 static struct inode *get_nonsnap_parent(struct dentry *dentry) 683 { 684 struct inode *inode = NULL; 685 686 while (dentry && !IS_ROOT(dentry)) { 687 inode = d_inode_rcu(dentry); 688 if (!inode || ceph_snap(inode) == CEPH_NOSNAP) 689 break; 690 dentry = dentry->d_parent; 691 } 692 if (inode) 693 inode = igrab(inode); 694 return inode; 695 } 696 697 /* 698 * Choose mds to send request to next. If there is a hint set in the 699 * request (e.g., due to a prior forward hint from the mds), use that. 700 * Otherwise, consult frag tree and/or caps to identify the 701 * appropriate mds. If all else fails, choose randomly. 702 * 703 * Called under mdsc->mutex. 704 */ 705 static int __choose_mds(struct ceph_mds_client *mdsc, 706 struct ceph_mds_request *req) 707 { 708 struct inode *inode; 709 struct ceph_inode_info *ci; 710 struct ceph_cap *cap; 711 int mode = req->r_direct_mode; 712 int mds = -1; 713 u32 hash = req->r_direct_hash; 714 bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags); 715 716 /* 717 * is there a specific mds we should try? ignore hint if we have 718 * no session and the mds is not up (active or recovering). 719 */ 720 if (req->r_resend_mds >= 0 && 721 (__have_session(mdsc, req->r_resend_mds) || 722 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { 723 dout("choose_mds using resend_mds mds%d\n", 724 req->r_resend_mds); 725 return req->r_resend_mds; 726 } 727 728 if (mode == USE_RANDOM_MDS) 729 goto random; 730 731 inode = NULL; 732 if (req->r_inode) { 733 if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) { 734 inode = req->r_inode; 735 ihold(inode); 736 } else { 737 /* req->r_dentry is non-null for LSSNAP request */ 738 rcu_read_lock(); 739 inode = get_nonsnap_parent(req->r_dentry); 740 rcu_read_unlock(); 741 dout("__choose_mds using snapdir's parent %p\n", inode); 742 } 743 } else if (req->r_dentry) { 744 /* ignore race with rename; old or new d_parent is okay */ 745 struct dentry *parent; 746 struct inode *dir; 747 748 rcu_read_lock(); 749 parent = req->r_dentry->d_parent; 750 dir = req->r_parent ? : d_inode_rcu(parent); 751 752 if (!dir || dir->i_sb != mdsc->fsc->sb) { 753 /* not this fs or parent went negative */ 754 inode = d_inode(req->r_dentry); 755 if (inode) 756 ihold(inode); 757 } else if (ceph_snap(dir) != CEPH_NOSNAP) { 758 /* direct snapped/virtual snapdir requests 759 * based on parent dir inode */ 760 inode = get_nonsnap_parent(parent); 761 dout("__choose_mds using nonsnap parent %p\n", inode); 762 } else { 763 /* dentry target */ 764 inode = d_inode(req->r_dentry); 765 if (!inode || mode == USE_AUTH_MDS) { 766 /* dir + name */ 767 inode = igrab(dir); 768 hash = ceph_dentry_hash(dir, req->r_dentry); 769 is_hash = true; 770 } else { 771 ihold(inode); 772 } 773 } 774 rcu_read_unlock(); 775 } 776 777 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash, 778 (int)hash, mode); 779 if (!inode) 780 goto random; 781 ci = ceph_inode(inode); 782 783 if (is_hash && S_ISDIR(inode->i_mode)) { 784 struct ceph_inode_frag frag; 785 int found; 786 787 ceph_choose_frag(ci, hash, &frag, &found); 788 if (found) { 789 if (mode == USE_ANY_MDS && frag.ndist > 0) { 790 u8 r; 791 792 /* choose a random replica */ 793 get_random_bytes(&r, 1); 794 r %= frag.ndist; 795 mds = frag.dist[r]; 796 dout("choose_mds %p %llx.%llx " 797 "frag %u mds%d (%d/%d)\n", 798 inode, ceph_vinop(inode), 799 frag.frag, mds, 800 (int)r, frag.ndist); 801 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= 802 CEPH_MDS_STATE_ACTIVE) 803 goto out; 804 } 805 806 /* since this file/dir wasn't known to be 807 * replicated, then we want to look for the 808 * authoritative mds. */ 809 mode = USE_AUTH_MDS; 810 if (frag.mds >= 0) { 811 /* choose auth mds */ 812 mds = frag.mds; 813 dout("choose_mds %p %llx.%llx " 814 "frag %u mds%d (auth)\n", 815 inode, ceph_vinop(inode), frag.frag, mds); 816 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= 817 CEPH_MDS_STATE_ACTIVE) 818 goto out; 819 } 820 } 821 } 822 823 spin_lock(&ci->i_ceph_lock); 824 cap = NULL; 825 if (mode == USE_AUTH_MDS) 826 cap = ci->i_auth_cap; 827 if (!cap && !RB_EMPTY_ROOT(&ci->i_caps)) 828 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); 829 if (!cap) { 830 spin_unlock(&ci->i_ceph_lock); 831 iput(inode); 832 goto random; 833 } 834 mds = cap->session->s_mds; 835 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n", 836 inode, ceph_vinop(inode), mds, 837 cap == ci->i_auth_cap ? "auth " : "", cap); 838 spin_unlock(&ci->i_ceph_lock); 839 out: 840 iput(inode); 841 return mds; 842 843 random: 844 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); 845 dout("choose_mds chose random mds%d\n", mds); 846 return mds; 847 } 848 849 850 /* 851 * session messages 852 */ 853 static struct ceph_msg *create_session_msg(u32 op, u64 seq) 854 { 855 struct ceph_msg *msg; 856 struct ceph_mds_session_head *h; 857 858 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS, 859 false); 860 if (!msg) { 861 pr_err("create_session_msg ENOMEM creating msg\n"); 862 return NULL; 863 } 864 h = msg->front.iov_base; 865 h->op = cpu_to_le32(op); 866 h->seq = cpu_to_le64(seq); 867 868 return msg; 869 } 870 871 /* 872 * session message, specialization for CEPH_SESSION_REQUEST_OPEN 873 * to include additional client metadata fields. 874 */ 875 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq) 876 { 877 struct ceph_msg *msg; 878 struct ceph_mds_session_head *h; 879 int i = -1; 880 int metadata_bytes = 0; 881 int metadata_key_count = 0; 882 struct ceph_options *opt = mdsc->fsc->client->options; 883 struct ceph_mount_options *fsopt = mdsc->fsc->mount_options; 884 void *p; 885 886 const char* metadata[][2] = { 887 {"hostname", mdsc->nodename}, 888 {"kernel_version", init_utsname()->release}, 889 {"entity_id", opt->name ? : ""}, 890 {"root", fsopt->server_path ? : "/"}, 891 {NULL, NULL} 892 }; 893 894 /* Calculate serialized length of metadata */ 895 metadata_bytes = 4; /* map length */ 896 for (i = 0; metadata[i][0]; ++i) { 897 metadata_bytes += 8 + strlen(metadata[i][0]) + 898 strlen(metadata[i][1]); 899 metadata_key_count++; 900 } 901 902 /* Allocate the message */ 903 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes, 904 GFP_NOFS, false); 905 if (!msg) { 906 pr_err("create_session_msg ENOMEM creating msg\n"); 907 return NULL; 908 } 909 h = msg->front.iov_base; 910 h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN); 911 h->seq = cpu_to_le64(seq); 912 913 /* 914 * Serialize client metadata into waiting buffer space, using 915 * the format that userspace expects for map<string, string> 916 * 917 * ClientSession messages with metadata are v2 918 */ 919 msg->hdr.version = cpu_to_le16(2); 920 msg->hdr.compat_version = cpu_to_le16(1); 921 922 /* The write pointer, following the session_head structure */ 923 p = msg->front.iov_base + sizeof(*h); 924 925 /* Number of entries in the map */ 926 ceph_encode_32(&p, metadata_key_count); 927 928 /* Two length-prefixed strings for each entry in the map */ 929 for (i = 0; metadata[i][0]; ++i) { 930 size_t const key_len = strlen(metadata[i][0]); 931 size_t const val_len = strlen(metadata[i][1]); 932 933 ceph_encode_32(&p, key_len); 934 memcpy(p, metadata[i][0], key_len); 935 p += key_len; 936 ceph_encode_32(&p, val_len); 937 memcpy(p, metadata[i][1], val_len); 938 p += val_len; 939 } 940 941 return msg; 942 } 943 944 /* 945 * send session open request. 946 * 947 * called under mdsc->mutex 948 */ 949 static int __open_session(struct ceph_mds_client *mdsc, 950 struct ceph_mds_session *session) 951 { 952 struct ceph_msg *msg; 953 int mstate; 954 int mds = session->s_mds; 955 956 /* wait for mds to go active? */ 957 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); 958 dout("open_session to mds%d (%s)\n", mds, 959 ceph_mds_state_name(mstate)); 960 session->s_state = CEPH_MDS_SESSION_OPENING; 961 session->s_renew_requested = jiffies; 962 963 /* send connect message */ 964 msg = create_session_open_msg(mdsc, session->s_seq); 965 if (!msg) 966 return -ENOMEM; 967 ceph_con_send(&session->s_con, msg); 968 return 0; 969 } 970 971 /* 972 * open sessions for any export targets for the given mds 973 * 974 * called under mdsc->mutex 975 */ 976 static struct ceph_mds_session * 977 __open_export_target_session(struct ceph_mds_client *mdsc, int target) 978 { 979 struct ceph_mds_session *session; 980 981 session = __ceph_lookup_mds_session(mdsc, target); 982 if (!session) { 983 session = register_session(mdsc, target); 984 if (IS_ERR(session)) 985 return session; 986 } 987 if (session->s_state == CEPH_MDS_SESSION_NEW || 988 session->s_state == CEPH_MDS_SESSION_CLOSING) 989 __open_session(mdsc, session); 990 991 return session; 992 } 993 994 struct ceph_mds_session * 995 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target) 996 { 997 struct ceph_mds_session *session; 998 999 dout("open_export_target_session to mds%d\n", target); 1000 1001 mutex_lock(&mdsc->mutex); 1002 session = __open_export_target_session(mdsc, target); 1003 mutex_unlock(&mdsc->mutex); 1004 1005 return session; 1006 } 1007 1008 static void __open_export_target_sessions(struct ceph_mds_client *mdsc, 1009 struct ceph_mds_session *session) 1010 { 1011 struct ceph_mds_info *mi; 1012 struct ceph_mds_session *ts; 1013 int i, mds = session->s_mds; 1014 1015 if (mds >= mdsc->mdsmap->m_num_mds) 1016 return; 1017 1018 mi = &mdsc->mdsmap->m_info[mds]; 1019 dout("open_export_target_sessions for mds%d (%d targets)\n", 1020 session->s_mds, mi->num_export_targets); 1021 1022 for (i = 0; i < mi->num_export_targets; i++) { 1023 ts = __open_export_target_session(mdsc, mi->export_targets[i]); 1024 if (!IS_ERR(ts)) 1025 ceph_put_mds_session(ts); 1026 } 1027 } 1028 1029 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, 1030 struct ceph_mds_session *session) 1031 { 1032 mutex_lock(&mdsc->mutex); 1033 __open_export_target_sessions(mdsc, session); 1034 mutex_unlock(&mdsc->mutex); 1035 } 1036 1037 /* 1038 * session caps 1039 */ 1040 1041 /* caller holds s_cap_lock, we drop it */ 1042 static void cleanup_cap_releases(struct ceph_mds_client *mdsc, 1043 struct ceph_mds_session *session) 1044 __releases(session->s_cap_lock) 1045 { 1046 LIST_HEAD(tmp_list); 1047 list_splice_init(&session->s_cap_releases, &tmp_list); 1048 session->s_num_cap_releases = 0; 1049 spin_unlock(&session->s_cap_lock); 1050 1051 dout("cleanup_cap_releases mds%d\n", session->s_mds); 1052 while (!list_empty(&tmp_list)) { 1053 struct ceph_cap *cap; 1054 /* zero out the in-progress message */ 1055 cap = list_first_entry(&tmp_list, 1056 struct ceph_cap, session_caps); 1057 list_del(&cap->session_caps); 1058 ceph_put_cap(mdsc, cap); 1059 } 1060 } 1061 1062 static void cleanup_session_requests(struct ceph_mds_client *mdsc, 1063 struct ceph_mds_session *session) 1064 { 1065 struct ceph_mds_request *req; 1066 struct rb_node *p; 1067 1068 dout("cleanup_session_requests mds%d\n", session->s_mds); 1069 mutex_lock(&mdsc->mutex); 1070 while (!list_empty(&session->s_unsafe)) { 1071 req = list_first_entry(&session->s_unsafe, 1072 struct ceph_mds_request, r_unsafe_item); 1073 pr_warn_ratelimited(" dropping unsafe request %llu\n", 1074 req->r_tid); 1075 __unregister_request(mdsc, req); 1076 } 1077 /* zero r_attempts, so kick_requests() will re-send requests */ 1078 p = rb_first(&mdsc->request_tree); 1079 while (p) { 1080 req = rb_entry(p, struct ceph_mds_request, r_node); 1081 p = rb_next(p); 1082 if (req->r_session && 1083 req->r_session->s_mds == session->s_mds) 1084 req->r_attempts = 0; 1085 } 1086 mutex_unlock(&mdsc->mutex); 1087 } 1088 1089 /* 1090 * Helper to safely iterate over all caps associated with a session, with 1091 * special care taken to handle a racing __ceph_remove_cap(). 1092 * 1093 * Caller must hold session s_mutex. 1094 */ 1095 static int iterate_session_caps(struct ceph_mds_session *session, 1096 int (*cb)(struct inode *, struct ceph_cap *, 1097 void *), void *arg) 1098 { 1099 struct list_head *p; 1100 struct ceph_cap *cap; 1101 struct inode *inode, *last_inode = NULL; 1102 struct ceph_cap *old_cap = NULL; 1103 int ret; 1104 1105 dout("iterate_session_caps %p mds%d\n", session, session->s_mds); 1106 spin_lock(&session->s_cap_lock); 1107 p = session->s_caps.next; 1108 while (p != &session->s_caps) { 1109 cap = list_entry(p, struct ceph_cap, session_caps); 1110 inode = igrab(&cap->ci->vfs_inode); 1111 if (!inode) { 1112 p = p->next; 1113 continue; 1114 } 1115 session->s_cap_iterator = cap; 1116 spin_unlock(&session->s_cap_lock); 1117 1118 if (last_inode) { 1119 iput(last_inode); 1120 last_inode = NULL; 1121 } 1122 if (old_cap) { 1123 ceph_put_cap(session->s_mdsc, old_cap); 1124 old_cap = NULL; 1125 } 1126 1127 ret = cb(inode, cap, arg); 1128 last_inode = inode; 1129 1130 spin_lock(&session->s_cap_lock); 1131 p = p->next; 1132 if (!cap->ci) { 1133 dout("iterate_session_caps finishing cap %p removal\n", 1134 cap); 1135 BUG_ON(cap->session != session); 1136 cap->session = NULL; 1137 list_del_init(&cap->session_caps); 1138 session->s_nr_caps--; 1139 if (cap->queue_release) { 1140 list_add_tail(&cap->session_caps, 1141 &session->s_cap_releases); 1142 session->s_num_cap_releases++; 1143 } else { 1144 old_cap = cap; /* put_cap it w/o locks held */ 1145 } 1146 } 1147 if (ret < 0) 1148 goto out; 1149 } 1150 ret = 0; 1151 out: 1152 session->s_cap_iterator = NULL; 1153 spin_unlock(&session->s_cap_lock); 1154 1155 iput(last_inode); 1156 if (old_cap) 1157 ceph_put_cap(session->s_mdsc, old_cap); 1158 1159 return ret; 1160 } 1161 1162 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, 1163 void *arg) 1164 { 1165 struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg; 1166 struct ceph_inode_info *ci = ceph_inode(inode); 1167 LIST_HEAD(to_remove); 1168 bool drop = false; 1169 bool invalidate = false; 1170 1171 dout("removing cap %p, ci is %p, inode is %p\n", 1172 cap, ci, &ci->vfs_inode); 1173 spin_lock(&ci->i_ceph_lock); 1174 __ceph_remove_cap(cap, false); 1175 if (!ci->i_auth_cap) { 1176 struct ceph_cap_flush *cf; 1177 struct ceph_mds_client *mdsc = fsc->mdsc; 1178 1179 ci->i_ceph_flags |= CEPH_I_CAP_DROPPED; 1180 1181 if (ci->i_wrbuffer_ref > 0 && 1182 READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) 1183 invalidate = true; 1184 1185 while (!list_empty(&ci->i_cap_flush_list)) { 1186 cf = list_first_entry(&ci->i_cap_flush_list, 1187 struct ceph_cap_flush, i_list); 1188 list_move(&cf->i_list, &to_remove); 1189 } 1190 1191 spin_lock(&mdsc->cap_dirty_lock); 1192 1193 list_for_each_entry(cf, &to_remove, i_list) 1194 list_del(&cf->g_list); 1195 1196 if (!list_empty(&ci->i_dirty_item)) { 1197 pr_warn_ratelimited( 1198 " dropping dirty %s state for %p %lld\n", 1199 ceph_cap_string(ci->i_dirty_caps), 1200 inode, ceph_ino(inode)); 1201 ci->i_dirty_caps = 0; 1202 list_del_init(&ci->i_dirty_item); 1203 drop = true; 1204 } 1205 if (!list_empty(&ci->i_flushing_item)) { 1206 pr_warn_ratelimited( 1207 " dropping dirty+flushing %s state for %p %lld\n", 1208 ceph_cap_string(ci->i_flushing_caps), 1209 inode, ceph_ino(inode)); 1210 ci->i_flushing_caps = 0; 1211 list_del_init(&ci->i_flushing_item); 1212 mdsc->num_cap_flushing--; 1213 drop = true; 1214 } 1215 spin_unlock(&mdsc->cap_dirty_lock); 1216 1217 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) { 1218 list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove); 1219 ci->i_prealloc_cap_flush = NULL; 1220 } 1221 } 1222 spin_unlock(&ci->i_ceph_lock); 1223 while (!list_empty(&to_remove)) { 1224 struct ceph_cap_flush *cf; 1225 cf = list_first_entry(&to_remove, 1226 struct ceph_cap_flush, i_list); 1227 list_del(&cf->i_list); 1228 ceph_free_cap_flush(cf); 1229 } 1230 1231 wake_up_all(&ci->i_cap_wq); 1232 if (invalidate) 1233 ceph_queue_invalidate(inode); 1234 if (drop) 1235 iput(inode); 1236 return 0; 1237 } 1238 1239 /* 1240 * caller must hold session s_mutex 1241 */ 1242 static void remove_session_caps(struct ceph_mds_session *session) 1243 { 1244 struct ceph_fs_client *fsc = session->s_mdsc->fsc; 1245 struct super_block *sb = fsc->sb; 1246 dout("remove_session_caps on %p\n", session); 1247 iterate_session_caps(session, remove_session_caps_cb, fsc); 1248 1249 wake_up_all(&fsc->mdsc->cap_flushing_wq); 1250 1251 spin_lock(&session->s_cap_lock); 1252 if (session->s_nr_caps > 0) { 1253 struct inode *inode; 1254 struct ceph_cap *cap, *prev = NULL; 1255 struct ceph_vino vino; 1256 /* 1257 * iterate_session_caps() skips inodes that are being 1258 * deleted, we need to wait until deletions are complete. 1259 * __wait_on_freeing_inode() is designed for the job, 1260 * but it is not exported, so use lookup inode function 1261 * to access it. 1262 */ 1263 while (!list_empty(&session->s_caps)) { 1264 cap = list_entry(session->s_caps.next, 1265 struct ceph_cap, session_caps); 1266 if (cap == prev) 1267 break; 1268 prev = cap; 1269 vino = cap->ci->i_vino; 1270 spin_unlock(&session->s_cap_lock); 1271 1272 inode = ceph_find_inode(sb, vino); 1273 iput(inode); 1274 1275 spin_lock(&session->s_cap_lock); 1276 } 1277 } 1278 1279 // drop cap expires and unlock s_cap_lock 1280 cleanup_cap_releases(session->s_mdsc, session); 1281 1282 BUG_ON(session->s_nr_caps > 0); 1283 BUG_ON(!list_empty(&session->s_cap_flushing)); 1284 } 1285 1286 /* 1287 * wake up any threads waiting on this session's caps. if the cap is 1288 * old (didn't get renewed on the client reconnect), remove it now. 1289 * 1290 * caller must hold s_mutex. 1291 */ 1292 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, 1293 void *arg) 1294 { 1295 struct ceph_inode_info *ci = ceph_inode(inode); 1296 1297 if (arg) { 1298 spin_lock(&ci->i_ceph_lock); 1299 ci->i_wanted_max_size = 0; 1300 ci->i_requested_max_size = 0; 1301 spin_unlock(&ci->i_ceph_lock); 1302 } 1303 wake_up_all(&ci->i_cap_wq); 1304 return 0; 1305 } 1306 1307 static void wake_up_session_caps(struct ceph_mds_session *session, 1308 int reconnect) 1309 { 1310 dout("wake_up_session_caps %p mds%d\n", session, session->s_mds); 1311 iterate_session_caps(session, wake_up_session_cb, 1312 (void *)(unsigned long)reconnect); 1313 } 1314 1315 /* 1316 * Send periodic message to MDS renewing all currently held caps. The 1317 * ack will reset the expiration for all caps from this session. 1318 * 1319 * caller holds s_mutex 1320 */ 1321 static int send_renew_caps(struct ceph_mds_client *mdsc, 1322 struct ceph_mds_session *session) 1323 { 1324 struct ceph_msg *msg; 1325 int state; 1326 1327 if (time_after_eq(jiffies, session->s_cap_ttl) && 1328 time_after_eq(session->s_cap_ttl, session->s_renew_requested)) 1329 pr_info("mds%d caps stale\n", session->s_mds); 1330 session->s_renew_requested = jiffies; 1331 1332 /* do not try to renew caps until a recovering mds has reconnected 1333 * with its clients. */ 1334 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); 1335 if (state < CEPH_MDS_STATE_RECONNECT) { 1336 dout("send_renew_caps ignoring mds%d (%s)\n", 1337 session->s_mds, ceph_mds_state_name(state)); 1338 return 0; 1339 } 1340 1341 dout("send_renew_caps to mds%d (%s)\n", session->s_mds, 1342 ceph_mds_state_name(state)); 1343 msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, 1344 ++session->s_renew_seq); 1345 if (!msg) 1346 return -ENOMEM; 1347 ceph_con_send(&session->s_con, msg); 1348 return 0; 1349 } 1350 1351 static int send_flushmsg_ack(struct ceph_mds_client *mdsc, 1352 struct ceph_mds_session *session, u64 seq) 1353 { 1354 struct ceph_msg *msg; 1355 1356 dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n", 1357 session->s_mds, ceph_session_state_name(session->s_state), seq); 1358 msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq); 1359 if (!msg) 1360 return -ENOMEM; 1361 ceph_con_send(&session->s_con, msg); 1362 return 0; 1363 } 1364 1365 1366 /* 1367 * Note new cap ttl, and any transition from stale -> not stale (fresh?). 1368 * 1369 * Called under session->s_mutex 1370 */ 1371 static void renewed_caps(struct ceph_mds_client *mdsc, 1372 struct ceph_mds_session *session, int is_renew) 1373 { 1374 int was_stale; 1375 int wake = 0; 1376 1377 spin_lock(&session->s_cap_lock); 1378 was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl); 1379 1380 session->s_cap_ttl = session->s_renew_requested + 1381 mdsc->mdsmap->m_session_timeout*HZ; 1382 1383 if (was_stale) { 1384 if (time_before(jiffies, session->s_cap_ttl)) { 1385 pr_info("mds%d caps renewed\n", session->s_mds); 1386 wake = 1; 1387 } else { 1388 pr_info("mds%d caps still stale\n", session->s_mds); 1389 } 1390 } 1391 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n", 1392 session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh", 1393 time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh"); 1394 spin_unlock(&session->s_cap_lock); 1395 1396 if (wake) 1397 wake_up_session_caps(session, 0); 1398 } 1399 1400 /* 1401 * send a session close request 1402 */ 1403 static int request_close_session(struct ceph_mds_client *mdsc, 1404 struct ceph_mds_session *session) 1405 { 1406 struct ceph_msg *msg; 1407 1408 dout("request_close_session mds%d state %s seq %lld\n", 1409 session->s_mds, ceph_session_state_name(session->s_state), 1410 session->s_seq); 1411 msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); 1412 if (!msg) 1413 return -ENOMEM; 1414 ceph_con_send(&session->s_con, msg); 1415 return 1; 1416 } 1417 1418 /* 1419 * Called with s_mutex held. 1420 */ 1421 static int __close_session(struct ceph_mds_client *mdsc, 1422 struct ceph_mds_session *session) 1423 { 1424 if (session->s_state >= CEPH_MDS_SESSION_CLOSING) 1425 return 0; 1426 session->s_state = CEPH_MDS_SESSION_CLOSING; 1427 return request_close_session(mdsc, session); 1428 } 1429 1430 /* 1431 * Trim old(er) caps. 1432 * 1433 * Because we can't cache an inode without one or more caps, we do 1434 * this indirectly: if a cap is unused, we prune its aliases, at which 1435 * point the inode will hopefully get dropped to. 1436 * 1437 * Yes, this is a bit sloppy. Our only real goal here is to respond to 1438 * memory pressure from the MDS, though, so it needn't be perfect. 1439 */ 1440 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) 1441 { 1442 struct ceph_mds_session *session = arg; 1443 struct ceph_inode_info *ci = ceph_inode(inode); 1444 int used, wanted, oissued, mine; 1445 1446 if (session->s_trim_caps <= 0) 1447 return -1; 1448 1449 spin_lock(&ci->i_ceph_lock); 1450 mine = cap->issued | cap->implemented; 1451 used = __ceph_caps_used(ci); 1452 wanted = __ceph_caps_file_wanted(ci); 1453 oissued = __ceph_caps_issued_other(ci, cap); 1454 1455 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n", 1456 inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued), 1457 ceph_cap_string(used), ceph_cap_string(wanted)); 1458 if (cap == ci->i_auth_cap) { 1459 if (ci->i_dirty_caps || ci->i_flushing_caps || 1460 !list_empty(&ci->i_cap_snaps)) 1461 goto out; 1462 if ((used | wanted) & CEPH_CAP_ANY_WR) 1463 goto out; 1464 } 1465 /* The inode has cached pages, but it's no longer used. 1466 * we can safely drop it */ 1467 if (wanted == 0 && used == CEPH_CAP_FILE_CACHE && 1468 !(oissued & CEPH_CAP_FILE_CACHE)) { 1469 used = 0; 1470 oissued = 0; 1471 } 1472 if ((used | wanted) & ~oissued & mine) 1473 goto out; /* we need these caps */ 1474 1475 session->s_trim_caps--; 1476 if (oissued) { 1477 /* we aren't the only cap.. just remove us */ 1478 __ceph_remove_cap(cap, true); 1479 } else { 1480 /* try dropping referring dentries */ 1481 spin_unlock(&ci->i_ceph_lock); 1482 d_prune_aliases(inode); 1483 dout("trim_caps_cb %p cap %p pruned, count now %d\n", 1484 inode, cap, atomic_read(&inode->i_count)); 1485 return 0; 1486 } 1487 1488 out: 1489 spin_unlock(&ci->i_ceph_lock); 1490 return 0; 1491 } 1492 1493 /* 1494 * Trim session cap count down to some max number. 1495 */ 1496 static int trim_caps(struct ceph_mds_client *mdsc, 1497 struct ceph_mds_session *session, 1498 int max_caps) 1499 { 1500 int trim_caps = session->s_nr_caps - max_caps; 1501 1502 dout("trim_caps mds%d start: %d / %d, trim %d\n", 1503 session->s_mds, session->s_nr_caps, max_caps, trim_caps); 1504 if (trim_caps > 0) { 1505 session->s_trim_caps = trim_caps; 1506 iterate_session_caps(session, trim_caps_cb, session); 1507 dout("trim_caps mds%d done: %d / %d, trimmed %d\n", 1508 session->s_mds, session->s_nr_caps, max_caps, 1509 trim_caps - session->s_trim_caps); 1510 session->s_trim_caps = 0; 1511 } 1512 1513 ceph_send_cap_releases(mdsc, session); 1514 return 0; 1515 } 1516 1517 static int check_caps_flush(struct ceph_mds_client *mdsc, 1518 u64 want_flush_tid) 1519 { 1520 int ret = 1; 1521 1522 spin_lock(&mdsc->cap_dirty_lock); 1523 if (!list_empty(&mdsc->cap_flush_list)) { 1524 struct ceph_cap_flush *cf = 1525 list_first_entry(&mdsc->cap_flush_list, 1526 struct ceph_cap_flush, g_list); 1527 if (cf->tid <= want_flush_tid) { 1528 dout("check_caps_flush still flushing tid " 1529 "%llu <= %llu\n", cf->tid, want_flush_tid); 1530 ret = 0; 1531 } 1532 } 1533 spin_unlock(&mdsc->cap_dirty_lock); 1534 return ret; 1535 } 1536 1537 /* 1538 * flush all dirty inode data to disk. 1539 * 1540 * returns true if we've flushed through want_flush_tid 1541 */ 1542 static void wait_caps_flush(struct ceph_mds_client *mdsc, 1543 u64 want_flush_tid) 1544 { 1545 dout("check_caps_flush want %llu\n", want_flush_tid); 1546 1547 wait_event(mdsc->cap_flushing_wq, 1548 check_caps_flush(mdsc, want_flush_tid)); 1549 1550 dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid); 1551 } 1552 1553 /* 1554 * called under s_mutex 1555 */ 1556 void ceph_send_cap_releases(struct ceph_mds_client *mdsc, 1557 struct ceph_mds_session *session) 1558 { 1559 struct ceph_msg *msg = NULL; 1560 struct ceph_mds_cap_release *head; 1561 struct ceph_mds_cap_item *item; 1562 struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc; 1563 struct ceph_cap *cap; 1564 LIST_HEAD(tmp_list); 1565 int num_cap_releases; 1566 __le32 barrier, *cap_barrier; 1567 1568 down_read(&osdc->lock); 1569 barrier = cpu_to_le32(osdc->epoch_barrier); 1570 up_read(&osdc->lock); 1571 1572 spin_lock(&session->s_cap_lock); 1573 again: 1574 list_splice_init(&session->s_cap_releases, &tmp_list); 1575 num_cap_releases = session->s_num_cap_releases; 1576 session->s_num_cap_releases = 0; 1577 spin_unlock(&session->s_cap_lock); 1578 1579 while (!list_empty(&tmp_list)) { 1580 if (!msg) { 1581 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, 1582 PAGE_SIZE, GFP_NOFS, false); 1583 if (!msg) 1584 goto out_err; 1585 head = msg->front.iov_base; 1586 head->num = cpu_to_le32(0); 1587 msg->front.iov_len = sizeof(*head); 1588 1589 msg->hdr.version = cpu_to_le16(2); 1590 msg->hdr.compat_version = cpu_to_le16(1); 1591 } 1592 1593 cap = list_first_entry(&tmp_list, struct ceph_cap, 1594 session_caps); 1595 list_del(&cap->session_caps); 1596 num_cap_releases--; 1597 1598 head = msg->front.iov_base; 1599 le32_add_cpu(&head->num, 1); 1600 item = msg->front.iov_base + msg->front.iov_len; 1601 item->ino = cpu_to_le64(cap->cap_ino); 1602 item->cap_id = cpu_to_le64(cap->cap_id); 1603 item->migrate_seq = cpu_to_le32(cap->mseq); 1604 item->seq = cpu_to_le32(cap->issue_seq); 1605 msg->front.iov_len += sizeof(*item); 1606 1607 ceph_put_cap(mdsc, cap); 1608 1609 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { 1610 // Append cap_barrier field 1611 cap_barrier = msg->front.iov_base + msg->front.iov_len; 1612 *cap_barrier = barrier; 1613 msg->front.iov_len += sizeof(*cap_barrier); 1614 1615 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 1616 dout("send_cap_releases mds%d %p\n", session->s_mds, msg); 1617 ceph_con_send(&session->s_con, msg); 1618 msg = NULL; 1619 } 1620 } 1621 1622 BUG_ON(num_cap_releases != 0); 1623 1624 spin_lock(&session->s_cap_lock); 1625 if (!list_empty(&session->s_cap_releases)) 1626 goto again; 1627 spin_unlock(&session->s_cap_lock); 1628 1629 if (msg) { 1630 // Append cap_barrier field 1631 cap_barrier = msg->front.iov_base + msg->front.iov_len; 1632 *cap_barrier = barrier; 1633 msg->front.iov_len += sizeof(*cap_barrier); 1634 1635 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 1636 dout("send_cap_releases mds%d %p\n", session->s_mds, msg); 1637 ceph_con_send(&session->s_con, msg); 1638 } 1639 return; 1640 out_err: 1641 pr_err("send_cap_releases mds%d, failed to allocate message\n", 1642 session->s_mds); 1643 spin_lock(&session->s_cap_lock); 1644 list_splice(&tmp_list, &session->s_cap_releases); 1645 session->s_num_cap_releases += num_cap_releases; 1646 spin_unlock(&session->s_cap_lock); 1647 } 1648 1649 /* 1650 * requests 1651 */ 1652 1653 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req, 1654 struct inode *dir) 1655 { 1656 struct ceph_inode_info *ci = ceph_inode(dir); 1657 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1658 struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options; 1659 size_t size = sizeof(struct ceph_mds_reply_dir_entry); 1660 int order, num_entries; 1661 1662 spin_lock(&ci->i_ceph_lock); 1663 num_entries = ci->i_files + ci->i_subdirs; 1664 spin_unlock(&ci->i_ceph_lock); 1665 num_entries = max(num_entries, 1); 1666 num_entries = min(num_entries, opt->max_readdir); 1667 1668 order = get_order(size * num_entries); 1669 while (order >= 0) { 1670 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL | 1671 __GFP_NOWARN, 1672 order); 1673 if (rinfo->dir_entries) 1674 break; 1675 order--; 1676 } 1677 if (!rinfo->dir_entries) 1678 return -ENOMEM; 1679 1680 num_entries = (PAGE_SIZE << order) / size; 1681 num_entries = min(num_entries, opt->max_readdir); 1682 1683 rinfo->dir_buf_size = PAGE_SIZE << order; 1684 req->r_num_caps = num_entries + 1; 1685 req->r_args.readdir.max_entries = cpu_to_le32(num_entries); 1686 req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes); 1687 return 0; 1688 } 1689 1690 /* 1691 * Create an mds request. 1692 */ 1693 struct ceph_mds_request * 1694 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) 1695 { 1696 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); 1697 1698 if (!req) 1699 return ERR_PTR(-ENOMEM); 1700 1701 mutex_init(&req->r_fill_mutex); 1702 req->r_mdsc = mdsc; 1703 req->r_started = jiffies; 1704 req->r_resend_mds = -1; 1705 INIT_LIST_HEAD(&req->r_unsafe_dir_item); 1706 INIT_LIST_HEAD(&req->r_unsafe_target_item); 1707 req->r_fmode = -1; 1708 kref_init(&req->r_kref); 1709 RB_CLEAR_NODE(&req->r_node); 1710 INIT_LIST_HEAD(&req->r_wait); 1711 init_completion(&req->r_completion); 1712 init_completion(&req->r_safe_completion); 1713 INIT_LIST_HEAD(&req->r_unsafe_item); 1714 1715 req->r_stamp = timespec_trunc(current_kernel_time(), mdsc->fsc->sb->s_time_gran); 1716 1717 req->r_op = op; 1718 req->r_direct_mode = mode; 1719 return req; 1720 } 1721 1722 /* 1723 * return oldest (lowest) request, tid in request tree, 0 if none. 1724 * 1725 * called under mdsc->mutex. 1726 */ 1727 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) 1728 { 1729 if (RB_EMPTY_ROOT(&mdsc->request_tree)) 1730 return NULL; 1731 return rb_entry(rb_first(&mdsc->request_tree), 1732 struct ceph_mds_request, r_node); 1733 } 1734 1735 static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc) 1736 { 1737 return mdsc->oldest_tid; 1738 } 1739 1740 /* 1741 * Build a dentry's path. Allocate on heap; caller must kfree. Based 1742 * on build_path_from_dentry in fs/cifs/dir.c. 1743 * 1744 * If @stop_on_nosnap, generate path relative to the first non-snapped 1745 * inode. 1746 * 1747 * Encode hidden .snap dirs as a double /, i.e. 1748 * foo/.snap/bar -> foo//bar 1749 */ 1750 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, 1751 int stop_on_nosnap) 1752 { 1753 struct dentry *temp; 1754 char *path; 1755 int len, pos; 1756 unsigned seq; 1757 1758 if (!dentry) 1759 return ERR_PTR(-EINVAL); 1760 1761 retry: 1762 len = 0; 1763 seq = read_seqbegin(&rename_lock); 1764 rcu_read_lock(); 1765 for (temp = dentry; !IS_ROOT(temp);) { 1766 struct inode *inode = d_inode(temp); 1767 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) 1768 len++; /* slash only */ 1769 else if (stop_on_nosnap && inode && 1770 ceph_snap(inode) == CEPH_NOSNAP) 1771 break; 1772 else 1773 len += 1 + temp->d_name.len; 1774 temp = temp->d_parent; 1775 } 1776 rcu_read_unlock(); 1777 if (len) 1778 len--; /* no leading '/' */ 1779 1780 path = kmalloc(len+1, GFP_NOFS); 1781 if (!path) 1782 return ERR_PTR(-ENOMEM); 1783 pos = len; 1784 path[pos] = 0; /* trailing null */ 1785 rcu_read_lock(); 1786 for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) { 1787 struct inode *inode; 1788 1789 spin_lock(&temp->d_lock); 1790 inode = d_inode(temp); 1791 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { 1792 dout("build_path path+%d: %p SNAPDIR\n", 1793 pos, temp); 1794 } else if (stop_on_nosnap && inode && 1795 ceph_snap(inode) == CEPH_NOSNAP) { 1796 spin_unlock(&temp->d_lock); 1797 break; 1798 } else { 1799 pos -= temp->d_name.len; 1800 if (pos < 0) { 1801 spin_unlock(&temp->d_lock); 1802 break; 1803 } 1804 strncpy(path + pos, temp->d_name.name, 1805 temp->d_name.len); 1806 } 1807 spin_unlock(&temp->d_lock); 1808 if (pos) 1809 path[--pos] = '/'; 1810 temp = temp->d_parent; 1811 } 1812 rcu_read_unlock(); 1813 if (pos != 0 || read_seqretry(&rename_lock, seq)) { 1814 pr_err("build_path did not end path lookup where " 1815 "expected, namelen is %d, pos is %d\n", len, pos); 1816 /* presumably this is only possible if racing with a 1817 rename of one of the parent directories (we can not 1818 lock the dentries above us to prevent this, but 1819 retrying should be harmless) */ 1820 kfree(path); 1821 goto retry; 1822 } 1823 1824 *base = ceph_ino(d_inode(temp)); 1825 *plen = len; 1826 dout("build_path on %p %d built %llx '%.*s'\n", 1827 dentry, d_count(dentry), *base, len, path); 1828 return path; 1829 } 1830 1831 static int build_dentry_path(struct dentry *dentry, struct inode *dir, 1832 const char **ppath, int *ppathlen, u64 *pino, 1833 int *pfreepath) 1834 { 1835 char *path; 1836 1837 rcu_read_lock(); 1838 if (!dir) 1839 dir = d_inode_rcu(dentry->d_parent); 1840 if (dir && ceph_snap(dir) == CEPH_NOSNAP) { 1841 *pino = ceph_ino(dir); 1842 rcu_read_unlock(); 1843 *ppath = dentry->d_name.name; 1844 *ppathlen = dentry->d_name.len; 1845 return 0; 1846 } 1847 rcu_read_unlock(); 1848 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); 1849 if (IS_ERR(path)) 1850 return PTR_ERR(path); 1851 *ppath = path; 1852 *pfreepath = 1; 1853 return 0; 1854 } 1855 1856 static int build_inode_path(struct inode *inode, 1857 const char **ppath, int *ppathlen, u64 *pino, 1858 int *pfreepath) 1859 { 1860 struct dentry *dentry; 1861 char *path; 1862 1863 if (ceph_snap(inode) == CEPH_NOSNAP) { 1864 *pino = ceph_ino(inode); 1865 *ppathlen = 0; 1866 return 0; 1867 } 1868 dentry = d_find_alias(inode); 1869 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); 1870 dput(dentry); 1871 if (IS_ERR(path)) 1872 return PTR_ERR(path); 1873 *ppath = path; 1874 *pfreepath = 1; 1875 return 0; 1876 } 1877 1878 /* 1879 * request arguments may be specified via an inode *, a dentry *, or 1880 * an explicit ino+path. 1881 */ 1882 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, 1883 struct inode *rdiri, const char *rpath, 1884 u64 rino, const char **ppath, int *pathlen, 1885 u64 *ino, int *freepath) 1886 { 1887 int r = 0; 1888 1889 if (rinode) { 1890 r = build_inode_path(rinode, ppath, pathlen, ino, freepath); 1891 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode), 1892 ceph_snap(rinode)); 1893 } else if (rdentry) { 1894 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino, 1895 freepath); 1896 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, 1897 *ppath); 1898 } else if (rpath || rino) { 1899 *ino = rino; 1900 *ppath = rpath; 1901 *pathlen = rpath ? strlen(rpath) : 0; 1902 dout(" path %.*s\n", *pathlen, rpath); 1903 } 1904 1905 return r; 1906 } 1907 1908 /* 1909 * called under mdsc->mutex 1910 */ 1911 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, 1912 struct ceph_mds_request *req, 1913 int mds, bool drop_cap_releases) 1914 { 1915 struct ceph_msg *msg; 1916 struct ceph_mds_request_head *head; 1917 const char *path1 = NULL; 1918 const char *path2 = NULL; 1919 u64 ino1 = 0, ino2 = 0; 1920 int pathlen1 = 0, pathlen2 = 0; 1921 int freepath1 = 0, freepath2 = 0; 1922 int len; 1923 u16 releases; 1924 void *p, *end; 1925 int ret; 1926 1927 ret = set_request_path_attr(req->r_inode, req->r_dentry, 1928 req->r_parent, req->r_path1, req->r_ino1.ino, 1929 &path1, &pathlen1, &ino1, &freepath1); 1930 if (ret < 0) { 1931 msg = ERR_PTR(ret); 1932 goto out; 1933 } 1934 1935 ret = set_request_path_attr(NULL, req->r_old_dentry, 1936 req->r_old_dentry_dir, 1937 req->r_path2, req->r_ino2.ino, 1938 &path2, &pathlen2, &ino2, &freepath2); 1939 if (ret < 0) { 1940 msg = ERR_PTR(ret); 1941 goto out_free1; 1942 } 1943 1944 len = sizeof(*head) + 1945 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) + 1946 sizeof(struct ceph_timespec); 1947 1948 /* calculate (max) length for cap releases */ 1949 len += sizeof(struct ceph_mds_request_release) * 1950 (!!req->r_inode_drop + !!req->r_dentry_drop + 1951 !!req->r_old_inode_drop + !!req->r_old_dentry_drop); 1952 if (req->r_dentry_drop) 1953 len += req->r_dentry->d_name.len; 1954 if (req->r_old_dentry_drop) 1955 len += req->r_old_dentry->d_name.len; 1956 1957 msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false); 1958 if (!msg) { 1959 msg = ERR_PTR(-ENOMEM); 1960 goto out_free2; 1961 } 1962 1963 msg->hdr.version = cpu_to_le16(2); 1964 msg->hdr.tid = cpu_to_le64(req->r_tid); 1965 1966 head = msg->front.iov_base; 1967 p = msg->front.iov_base + sizeof(*head); 1968 end = msg->front.iov_base + msg->front.iov_len; 1969 1970 head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); 1971 head->op = cpu_to_le32(req->r_op); 1972 head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid)); 1973 head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid)); 1974 head->args = req->r_args; 1975 1976 ceph_encode_filepath(&p, end, ino1, path1); 1977 ceph_encode_filepath(&p, end, ino2, path2); 1978 1979 /* make note of release offset, in case we need to replay */ 1980 req->r_request_release_offset = p - msg->front.iov_base; 1981 1982 /* cap releases */ 1983 releases = 0; 1984 if (req->r_inode_drop) 1985 releases += ceph_encode_inode_release(&p, 1986 req->r_inode ? req->r_inode : d_inode(req->r_dentry), 1987 mds, req->r_inode_drop, req->r_inode_unless, 0); 1988 if (req->r_dentry_drop) 1989 releases += ceph_encode_dentry_release(&p, req->r_dentry, 1990 req->r_parent, mds, req->r_dentry_drop, 1991 req->r_dentry_unless); 1992 if (req->r_old_dentry_drop) 1993 releases += ceph_encode_dentry_release(&p, req->r_old_dentry, 1994 req->r_old_dentry_dir, mds, 1995 req->r_old_dentry_drop, 1996 req->r_old_dentry_unless); 1997 if (req->r_old_inode_drop) 1998 releases += ceph_encode_inode_release(&p, 1999 d_inode(req->r_old_dentry), 2000 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); 2001 2002 if (drop_cap_releases) { 2003 releases = 0; 2004 p = msg->front.iov_base + req->r_request_release_offset; 2005 } 2006 2007 head->num_releases = cpu_to_le16(releases); 2008 2009 /* time stamp */ 2010 { 2011 struct ceph_timespec ts; 2012 ceph_encode_timespec(&ts, &req->r_stamp); 2013 ceph_encode_copy(&p, &ts, sizeof(ts)); 2014 } 2015 2016 BUG_ON(p > end); 2017 msg->front.iov_len = p - msg->front.iov_base; 2018 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2019 2020 if (req->r_pagelist) { 2021 struct ceph_pagelist *pagelist = req->r_pagelist; 2022 refcount_inc(&pagelist->refcnt); 2023 ceph_msg_data_add_pagelist(msg, pagelist); 2024 msg->hdr.data_len = cpu_to_le32(pagelist->length); 2025 } else { 2026 msg->hdr.data_len = 0; 2027 } 2028 2029 msg->hdr.data_off = cpu_to_le16(0); 2030 2031 out_free2: 2032 if (freepath2) 2033 kfree((char *)path2); 2034 out_free1: 2035 if (freepath1) 2036 kfree((char *)path1); 2037 out: 2038 return msg; 2039 } 2040 2041 /* 2042 * called under mdsc->mutex if error, under no mutex if 2043 * success. 2044 */ 2045 static void complete_request(struct ceph_mds_client *mdsc, 2046 struct ceph_mds_request *req) 2047 { 2048 if (req->r_callback) 2049 req->r_callback(mdsc, req); 2050 else 2051 complete_all(&req->r_completion); 2052 } 2053 2054 /* 2055 * called under mdsc->mutex 2056 */ 2057 static int __prepare_send_request(struct ceph_mds_client *mdsc, 2058 struct ceph_mds_request *req, 2059 int mds, bool drop_cap_releases) 2060 { 2061 struct ceph_mds_request_head *rhead; 2062 struct ceph_msg *msg; 2063 int flags = 0; 2064 2065 req->r_attempts++; 2066 if (req->r_inode) { 2067 struct ceph_cap *cap = 2068 ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds); 2069 2070 if (cap) 2071 req->r_sent_on_mseq = cap->mseq; 2072 else 2073 req->r_sent_on_mseq = -1; 2074 } 2075 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, 2076 req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); 2077 2078 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { 2079 void *p; 2080 /* 2081 * Replay. Do not regenerate message (and rebuild 2082 * paths, etc.); just use the original message. 2083 * Rebuilding paths will break for renames because 2084 * d_move mangles the src name. 2085 */ 2086 msg = req->r_request; 2087 rhead = msg->front.iov_base; 2088 2089 flags = le32_to_cpu(rhead->flags); 2090 flags |= CEPH_MDS_FLAG_REPLAY; 2091 rhead->flags = cpu_to_le32(flags); 2092 2093 if (req->r_target_inode) 2094 rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); 2095 2096 rhead->num_retry = req->r_attempts - 1; 2097 2098 /* remove cap/dentry releases from message */ 2099 rhead->num_releases = 0; 2100 2101 /* time stamp */ 2102 p = msg->front.iov_base + req->r_request_release_offset; 2103 { 2104 struct ceph_timespec ts; 2105 ceph_encode_timespec(&ts, &req->r_stamp); 2106 ceph_encode_copy(&p, &ts, sizeof(ts)); 2107 } 2108 2109 msg->front.iov_len = p - msg->front.iov_base; 2110 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2111 return 0; 2112 } 2113 2114 if (req->r_request) { 2115 ceph_msg_put(req->r_request); 2116 req->r_request = NULL; 2117 } 2118 msg = create_request_message(mdsc, req, mds, drop_cap_releases); 2119 if (IS_ERR(msg)) { 2120 req->r_err = PTR_ERR(msg); 2121 return PTR_ERR(msg); 2122 } 2123 req->r_request = msg; 2124 2125 rhead = msg->front.iov_base; 2126 rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); 2127 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) 2128 flags |= CEPH_MDS_FLAG_REPLAY; 2129 if (req->r_parent) 2130 flags |= CEPH_MDS_FLAG_WANT_DENTRY; 2131 rhead->flags = cpu_to_le32(flags); 2132 rhead->num_fwd = req->r_num_fwd; 2133 rhead->num_retry = req->r_attempts - 1; 2134 rhead->ino = 0; 2135 2136 dout(" r_parent = %p\n", req->r_parent); 2137 return 0; 2138 } 2139 2140 /* 2141 * send request, or put it on the appropriate wait list. 2142 */ 2143 static int __do_request(struct ceph_mds_client *mdsc, 2144 struct ceph_mds_request *req) 2145 { 2146 struct ceph_mds_session *session = NULL; 2147 int mds = -1; 2148 int err = 0; 2149 2150 if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { 2151 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) 2152 __unregister_request(mdsc, req); 2153 goto out; 2154 } 2155 2156 if (req->r_timeout && 2157 time_after_eq(jiffies, req->r_started + req->r_timeout)) { 2158 dout("do_request timed out\n"); 2159 err = -EIO; 2160 goto finish; 2161 } 2162 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { 2163 dout("do_request forced umount\n"); 2164 err = -EIO; 2165 goto finish; 2166 } 2167 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) { 2168 if (mdsc->mdsmap_err) { 2169 err = mdsc->mdsmap_err; 2170 dout("do_request mdsmap err %d\n", err); 2171 goto finish; 2172 } 2173 if (mdsc->mdsmap->m_epoch == 0) { 2174 dout("do_request no mdsmap, waiting for map\n"); 2175 list_add(&req->r_wait, &mdsc->waiting_for_map); 2176 goto finish; 2177 } 2178 if (!(mdsc->fsc->mount_options->flags & 2179 CEPH_MOUNT_OPT_MOUNTWAIT) && 2180 !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) { 2181 err = -ENOENT; 2182 pr_info("probably no mds server is up\n"); 2183 goto finish; 2184 } 2185 } 2186 2187 put_request_session(req); 2188 2189 mds = __choose_mds(mdsc, req); 2190 if (mds < 0 || 2191 ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { 2192 dout("do_request no mds or not active, waiting for map\n"); 2193 list_add(&req->r_wait, &mdsc->waiting_for_map); 2194 goto out; 2195 } 2196 2197 /* get, open session */ 2198 session = __ceph_lookup_mds_session(mdsc, mds); 2199 if (!session) { 2200 session = register_session(mdsc, mds); 2201 if (IS_ERR(session)) { 2202 err = PTR_ERR(session); 2203 goto finish; 2204 } 2205 } 2206 req->r_session = get_session(session); 2207 2208 dout("do_request mds%d session %p state %s\n", mds, session, 2209 ceph_session_state_name(session->s_state)); 2210 if (session->s_state != CEPH_MDS_SESSION_OPEN && 2211 session->s_state != CEPH_MDS_SESSION_HUNG) { 2212 if (session->s_state == CEPH_MDS_SESSION_REJECTED) { 2213 err = -EACCES; 2214 goto out_session; 2215 } 2216 if (session->s_state == CEPH_MDS_SESSION_NEW || 2217 session->s_state == CEPH_MDS_SESSION_CLOSING) 2218 __open_session(mdsc, session); 2219 list_add(&req->r_wait, &session->s_waiting); 2220 goto out_session; 2221 } 2222 2223 /* send request */ 2224 req->r_resend_mds = -1; /* forget any previous mds hint */ 2225 2226 if (req->r_request_started == 0) /* note request start time */ 2227 req->r_request_started = jiffies; 2228 2229 err = __prepare_send_request(mdsc, req, mds, false); 2230 if (!err) { 2231 ceph_msg_get(req->r_request); 2232 ceph_con_send(&session->s_con, req->r_request); 2233 } 2234 2235 out_session: 2236 ceph_put_mds_session(session); 2237 finish: 2238 if (err) { 2239 dout("__do_request early error %d\n", err); 2240 req->r_err = err; 2241 complete_request(mdsc, req); 2242 __unregister_request(mdsc, req); 2243 } 2244 out: 2245 return err; 2246 } 2247 2248 /* 2249 * called under mdsc->mutex 2250 */ 2251 static void __wake_requests(struct ceph_mds_client *mdsc, 2252 struct list_head *head) 2253 { 2254 struct ceph_mds_request *req; 2255 LIST_HEAD(tmp_list); 2256 2257 list_splice_init(head, &tmp_list); 2258 2259 while (!list_empty(&tmp_list)) { 2260 req = list_entry(tmp_list.next, 2261 struct ceph_mds_request, r_wait); 2262 list_del_init(&req->r_wait); 2263 dout(" wake request %p tid %llu\n", req, req->r_tid); 2264 __do_request(mdsc, req); 2265 } 2266 } 2267 2268 /* 2269 * Wake up threads with requests pending for @mds, so that they can 2270 * resubmit their requests to a possibly different mds. 2271 */ 2272 static void kick_requests(struct ceph_mds_client *mdsc, int mds) 2273 { 2274 struct ceph_mds_request *req; 2275 struct rb_node *p = rb_first(&mdsc->request_tree); 2276 2277 dout("kick_requests mds%d\n", mds); 2278 while (p) { 2279 req = rb_entry(p, struct ceph_mds_request, r_node); 2280 p = rb_next(p); 2281 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) 2282 continue; 2283 if (req->r_attempts > 0) 2284 continue; /* only new requests */ 2285 if (req->r_session && 2286 req->r_session->s_mds == mds) { 2287 dout(" kicking tid %llu\n", req->r_tid); 2288 list_del_init(&req->r_wait); 2289 __do_request(mdsc, req); 2290 } 2291 } 2292 } 2293 2294 void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, 2295 struct ceph_mds_request *req) 2296 { 2297 dout("submit_request on %p\n", req); 2298 mutex_lock(&mdsc->mutex); 2299 __register_request(mdsc, req, NULL); 2300 __do_request(mdsc, req); 2301 mutex_unlock(&mdsc->mutex); 2302 } 2303 2304 /* 2305 * Synchrously perform an mds request. Take care of all of the 2306 * session setup, forwarding, retry details. 2307 */ 2308 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, 2309 struct inode *dir, 2310 struct ceph_mds_request *req) 2311 { 2312 int err; 2313 2314 dout("do_request on %p\n", req); 2315 2316 /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */ 2317 if (req->r_inode) 2318 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); 2319 if (req->r_parent) 2320 ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); 2321 if (req->r_old_dentry_dir) 2322 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), 2323 CEPH_CAP_PIN); 2324 2325 /* issue */ 2326 mutex_lock(&mdsc->mutex); 2327 __register_request(mdsc, req, dir); 2328 __do_request(mdsc, req); 2329 2330 if (req->r_err) { 2331 err = req->r_err; 2332 goto out; 2333 } 2334 2335 /* wait */ 2336 mutex_unlock(&mdsc->mutex); 2337 dout("do_request waiting\n"); 2338 if (!req->r_timeout && req->r_wait_for_completion) { 2339 err = req->r_wait_for_completion(mdsc, req); 2340 } else { 2341 long timeleft = wait_for_completion_killable_timeout( 2342 &req->r_completion, 2343 ceph_timeout_jiffies(req->r_timeout)); 2344 if (timeleft > 0) 2345 err = 0; 2346 else if (!timeleft) 2347 err = -EIO; /* timed out */ 2348 else 2349 err = timeleft; /* killed */ 2350 } 2351 dout("do_request waited, got %d\n", err); 2352 mutex_lock(&mdsc->mutex); 2353 2354 /* only abort if we didn't race with a real reply */ 2355 if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { 2356 err = le32_to_cpu(req->r_reply_info.head->result); 2357 } else if (err < 0) { 2358 dout("aborted request %lld with %d\n", req->r_tid, err); 2359 2360 /* 2361 * ensure we aren't running concurrently with 2362 * ceph_fill_trace or ceph_readdir_prepopulate, which 2363 * rely on locks (dir mutex) held by our caller. 2364 */ 2365 mutex_lock(&req->r_fill_mutex); 2366 req->r_err = err; 2367 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags); 2368 mutex_unlock(&req->r_fill_mutex); 2369 2370 if (req->r_parent && 2371 (req->r_op & CEPH_MDS_OP_WRITE)) 2372 ceph_invalidate_dir_request(req); 2373 } else { 2374 err = req->r_err; 2375 } 2376 2377 out: 2378 mutex_unlock(&mdsc->mutex); 2379 dout("do_request %p done, result %d\n", req, err); 2380 return err; 2381 } 2382 2383 /* 2384 * Invalidate dir's completeness, dentry lease state on an aborted MDS 2385 * namespace request. 2386 */ 2387 void ceph_invalidate_dir_request(struct ceph_mds_request *req) 2388 { 2389 struct inode *inode = req->r_parent; 2390 2391 dout("invalidate_dir_request %p (complete, lease(s))\n", inode); 2392 2393 ceph_dir_clear_complete(inode); 2394 if (req->r_dentry) 2395 ceph_invalidate_dentry_lease(req->r_dentry); 2396 if (req->r_old_dentry) 2397 ceph_invalidate_dentry_lease(req->r_old_dentry); 2398 } 2399 2400 /* 2401 * Handle mds reply. 2402 * 2403 * We take the session mutex and parse and process the reply immediately. 2404 * This preserves the logical ordering of replies, capabilities, etc., sent 2405 * by the MDS as they are applied to our local cache. 2406 */ 2407 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) 2408 { 2409 struct ceph_mds_client *mdsc = session->s_mdsc; 2410 struct ceph_mds_request *req; 2411 struct ceph_mds_reply_head *head = msg->front.iov_base; 2412 struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ 2413 struct ceph_snap_realm *realm; 2414 u64 tid; 2415 int err, result; 2416 int mds = session->s_mds; 2417 2418 if (msg->front.iov_len < sizeof(*head)) { 2419 pr_err("mdsc_handle_reply got corrupt (short) reply\n"); 2420 ceph_msg_dump(msg); 2421 return; 2422 } 2423 2424 /* get request, session */ 2425 tid = le64_to_cpu(msg->hdr.tid); 2426 mutex_lock(&mdsc->mutex); 2427 req = lookup_get_request(mdsc, tid); 2428 if (!req) { 2429 dout("handle_reply on unknown tid %llu\n", tid); 2430 mutex_unlock(&mdsc->mutex); 2431 return; 2432 } 2433 dout("handle_reply %p\n", req); 2434 2435 /* correct session? */ 2436 if (req->r_session != session) { 2437 pr_err("mdsc_handle_reply got %llu on session mds%d" 2438 " not mds%d\n", tid, session->s_mds, 2439 req->r_session ? req->r_session->s_mds : -1); 2440 mutex_unlock(&mdsc->mutex); 2441 goto out; 2442 } 2443 2444 /* dup? */ 2445 if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) || 2446 (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) { 2447 pr_warn("got a dup %s reply on %llu from mds%d\n", 2448 head->safe ? "safe" : "unsafe", tid, mds); 2449 mutex_unlock(&mdsc->mutex); 2450 goto out; 2451 } 2452 if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) { 2453 pr_warn("got unsafe after safe on %llu from mds%d\n", 2454 tid, mds); 2455 mutex_unlock(&mdsc->mutex); 2456 goto out; 2457 } 2458 2459 result = le32_to_cpu(head->result); 2460 2461 /* 2462 * Handle an ESTALE 2463 * if we're not talking to the authority, send to them 2464 * if the authority has changed while we weren't looking, 2465 * send to new authority 2466 * Otherwise we just have to return an ESTALE 2467 */ 2468 if (result == -ESTALE) { 2469 dout("got ESTALE on request %llu", req->r_tid); 2470 req->r_resend_mds = -1; 2471 if (req->r_direct_mode != USE_AUTH_MDS) { 2472 dout("not using auth, setting for that now"); 2473 req->r_direct_mode = USE_AUTH_MDS; 2474 __do_request(mdsc, req); 2475 mutex_unlock(&mdsc->mutex); 2476 goto out; 2477 } else { 2478 int mds = __choose_mds(mdsc, req); 2479 if (mds >= 0 && mds != req->r_session->s_mds) { 2480 dout("but auth changed, so resending"); 2481 __do_request(mdsc, req); 2482 mutex_unlock(&mdsc->mutex); 2483 goto out; 2484 } 2485 } 2486 dout("have to return ESTALE on request %llu", req->r_tid); 2487 } 2488 2489 2490 if (head->safe) { 2491 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags); 2492 __unregister_request(mdsc, req); 2493 2494 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { 2495 /* 2496 * We already handled the unsafe response, now do the 2497 * cleanup. No need to examine the response; the MDS 2498 * doesn't include any result info in the safe 2499 * response. And even if it did, there is nothing 2500 * useful we could do with a revised return value. 2501 */ 2502 dout("got safe reply %llu, mds%d\n", tid, mds); 2503 2504 /* last unsafe request during umount? */ 2505 if (mdsc->stopping && !__get_oldest_req(mdsc)) 2506 complete_all(&mdsc->safe_umount_waiters); 2507 mutex_unlock(&mdsc->mutex); 2508 goto out; 2509 } 2510 } else { 2511 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags); 2512 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); 2513 if (req->r_unsafe_dir) { 2514 struct ceph_inode_info *ci = 2515 ceph_inode(req->r_unsafe_dir); 2516 spin_lock(&ci->i_unsafe_lock); 2517 list_add_tail(&req->r_unsafe_dir_item, 2518 &ci->i_unsafe_dirops); 2519 spin_unlock(&ci->i_unsafe_lock); 2520 } 2521 } 2522 2523 dout("handle_reply tid %lld result %d\n", tid, result); 2524 rinfo = &req->r_reply_info; 2525 err = parse_reply_info(msg, rinfo, session->s_con.peer_features); 2526 mutex_unlock(&mdsc->mutex); 2527 2528 mutex_lock(&session->s_mutex); 2529 if (err < 0) { 2530 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid); 2531 ceph_msg_dump(msg); 2532 goto out_err; 2533 } 2534 2535 /* snap trace */ 2536 realm = NULL; 2537 if (rinfo->snapblob_len) { 2538 down_write(&mdsc->snap_rwsem); 2539 ceph_update_snap_trace(mdsc, rinfo->snapblob, 2540 rinfo->snapblob + rinfo->snapblob_len, 2541 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP, 2542 &realm); 2543 downgrade_write(&mdsc->snap_rwsem); 2544 } else { 2545 down_read(&mdsc->snap_rwsem); 2546 } 2547 2548 /* insert trace into our cache */ 2549 mutex_lock(&req->r_fill_mutex); 2550 current->journal_info = req; 2551 err = ceph_fill_trace(mdsc->fsc->sb, req); 2552 if (err == 0) { 2553 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || 2554 req->r_op == CEPH_MDS_OP_LSSNAP)) 2555 ceph_readdir_prepopulate(req, req->r_session); 2556 ceph_unreserve_caps(mdsc, &req->r_caps_reservation); 2557 } 2558 current->journal_info = NULL; 2559 mutex_unlock(&req->r_fill_mutex); 2560 2561 up_read(&mdsc->snap_rwsem); 2562 if (realm) 2563 ceph_put_snap_realm(mdsc, realm); 2564 2565 if (err == 0 && req->r_target_inode && 2566 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { 2567 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); 2568 spin_lock(&ci->i_unsafe_lock); 2569 list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops); 2570 spin_unlock(&ci->i_unsafe_lock); 2571 } 2572 out_err: 2573 mutex_lock(&mdsc->mutex); 2574 if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { 2575 if (err) { 2576 req->r_err = err; 2577 } else { 2578 req->r_reply = ceph_msg_get(msg); 2579 set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags); 2580 } 2581 } else { 2582 dout("reply arrived after request %lld was aborted\n", tid); 2583 } 2584 mutex_unlock(&mdsc->mutex); 2585 2586 mutex_unlock(&session->s_mutex); 2587 2588 /* kick calling process */ 2589 complete_request(mdsc, req); 2590 out: 2591 ceph_mdsc_put_request(req); 2592 return; 2593 } 2594 2595 2596 2597 /* 2598 * handle mds notification that our request has been forwarded. 2599 */ 2600 static void handle_forward(struct ceph_mds_client *mdsc, 2601 struct ceph_mds_session *session, 2602 struct ceph_msg *msg) 2603 { 2604 struct ceph_mds_request *req; 2605 u64 tid = le64_to_cpu(msg->hdr.tid); 2606 u32 next_mds; 2607 u32 fwd_seq; 2608 int err = -EINVAL; 2609 void *p = msg->front.iov_base; 2610 void *end = p + msg->front.iov_len; 2611 2612 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 2613 next_mds = ceph_decode_32(&p); 2614 fwd_seq = ceph_decode_32(&p); 2615 2616 mutex_lock(&mdsc->mutex); 2617 req = lookup_get_request(mdsc, tid); 2618 if (!req) { 2619 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds); 2620 goto out; /* dup reply? */ 2621 } 2622 2623 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { 2624 dout("forward tid %llu aborted, unregistering\n", tid); 2625 __unregister_request(mdsc, req); 2626 } else if (fwd_seq <= req->r_num_fwd) { 2627 dout("forward tid %llu to mds%d - old seq %d <= %d\n", 2628 tid, next_mds, req->r_num_fwd, fwd_seq); 2629 } else { 2630 /* resend. forward race not possible; mds would drop */ 2631 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); 2632 BUG_ON(req->r_err); 2633 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)); 2634 req->r_attempts = 0; 2635 req->r_num_fwd = fwd_seq; 2636 req->r_resend_mds = next_mds; 2637 put_request_session(req); 2638 __do_request(mdsc, req); 2639 } 2640 ceph_mdsc_put_request(req); 2641 out: 2642 mutex_unlock(&mdsc->mutex); 2643 return; 2644 2645 bad: 2646 pr_err("mdsc_handle_forward decode error err=%d\n", err); 2647 } 2648 2649 /* 2650 * handle a mds session control message 2651 */ 2652 static void handle_session(struct ceph_mds_session *session, 2653 struct ceph_msg *msg) 2654 { 2655 struct ceph_mds_client *mdsc = session->s_mdsc; 2656 u32 op; 2657 u64 seq; 2658 int mds = session->s_mds; 2659 struct ceph_mds_session_head *h = msg->front.iov_base; 2660 int wake = 0; 2661 2662 /* decode */ 2663 if (msg->front.iov_len != sizeof(*h)) 2664 goto bad; 2665 op = le32_to_cpu(h->op); 2666 seq = le64_to_cpu(h->seq); 2667 2668 mutex_lock(&mdsc->mutex); 2669 if (op == CEPH_SESSION_CLOSE) { 2670 get_session(session); 2671 __unregister_session(mdsc, session); 2672 } 2673 /* FIXME: this ttl calculation is generous */ 2674 session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; 2675 mutex_unlock(&mdsc->mutex); 2676 2677 mutex_lock(&session->s_mutex); 2678 2679 dout("handle_session mds%d %s %p state %s seq %llu\n", 2680 mds, ceph_session_op_name(op), session, 2681 ceph_session_state_name(session->s_state), seq); 2682 2683 if (session->s_state == CEPH_MDS_SESSION_HUNG) { 2684 session->s_state = CEPH_MDS_SESSION_OPEN; 2685 pr_info("mds%d came back\n", session->s_mds); 2686 } 2687 2688 switch (op) { 2689 case CEPH_SESSION_OPEN: 2690 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) 2691 pr_info("mds%d reconnect success\n", session->s_mds); 2692 session->s_state = CEPH_MDS_SESSION_OPEN; 2693 renewed_caps(mdsc, session, 0); 2694 wake = 1; 2695 if (mdsc->stopping) 2696 __close_session(mdsc, session); 2697 break; 2698 2699 case CEPH_SESSION_RENEWCAPS: 2700 if (session->s_renew_seq == seq) 2701 renewed_caps(mdsc, session, 1); 2702 break; 2703 2704 case CEPH_SESSION_CLOSE: 2705 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) 2706 pr_info("mds%d reconnect denied\n", session->s_mds); 2707 cleanup_session_requests(mdsc, session); 2708 remove_session_caps(session); 2709 wake = 2; /* for good measure */ 2710 wake_up_all(&mdsc->session_close_wq); 2711 break; 2712 2713 case CEPH_SESSION_STALE: 2714 pr_info("mds%d caps went stale, renewing\n", 2715 session->s_mds); 2716 spin_lock(&session->s_gen_ttl_lock); 2717 session->s_cap_gen++; 2718 session->s_cap_ttl = jiffies - 1; 2719 spin_unlock(&session->s_gen_ttl_lock); 2720 send_renew_caps(mdsc, session); 2721 break; 2722 2723 case CEPH_SESSION_RECALL_STATE: 2724 trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); 2725 break; 2726 2727 case CEPH_SESSION_FLUSHMSG: 2728 send_flushmsg_ack(mdsc, session, seq); 2729 break; 2730 2731 case CEPH_SESSION_FORCE_RO: 2732 dout("force_session_readonly %p\n", session); 2733 spin_lock(&session->s_cap_lock); 2734 session->s_readonly = true; 2735 spin_unlock(&session->s_cap_lock); 2736 wake_up_session_caps(session, 0); 2737 break; 2738 2739 case CEPH_SESSION_REJECT: 2740 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING); 2741 pr_info("mds%d rejected session\n", session->s_mds); 2742 session->s_state = CEPH_MDS_SESSION_REJECTED; 2743 cleanup_session_requests(mdsc, session); 2744 remove_session_caps(session); 2745 wake = 2; /* for good measure */ 2746 break; 2747 2748 default: 2749 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); 2750 WARN_ON(1); 2751 } 2752 2753 mutex_unlock(&session->s_mutex); 2754 if (wake) { 2755 mutex_lock(&mdsc->mutex); 2756 __wake_requests(mdsc, &session->s_waiting); 2757 if (wake == 2) 2758 kick_requests(mdsc, mds); 2759 mutex_unlock(&mdsc->mutex); 2760 } 2761 if (op == CEPH_SESSION_CLOSE) 2762 ceph_put_mds_session(session); 2763 return; 2764 2765 bad: 2766 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds, 2767 (int)msg->front.iov_len); 2768 ceph_msg_dump(msg); 2769 return; 2770 } 2771 2772 2773 /* 2774 * called under session->mutex. 2775 */ 2776 static void replay_unsafe_requests(struct ceph_mds_client *mdsc, 2777 struct ceph_mds_session *session) 2778 { 2779 struct ceph_mds_request *req, *nreq; 2780 struct rb_node *p; 2781 int err; 2782 2783 dout("replay_unsafe_requests mds%d\n", session->s_mds); 2784 2785 mutex_lock(&mdsc->mutex); 2786 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { 2787 err = __prepare_send_request(mdsc, req, session->s_mds, true); 2788 if (!err) { 2789 ceph_msg_get(req->r_request); 2790 ceph_con_send(&session->s_con, req->r_request); 2791 } 2792 } 2793 2794 /* 2795 * also re-send old requests when MDS enters reconnect stage. So that MDS 2796 * can process completed request in clientreplay stage. 2797 */ 2798 p = rb_first(&mdsc->request_tree); 2799 while (p) { 2800 req = rb_entry(p, struct ceph_mds_request, r_node); 2801 p = rb_next(p); 2802 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) 2803 continue; 2804 if (req->r_attempts == 0) 2805 continue; /* only old requests */ 2806 if (req->r_session && 2807 req->r_session->s_mds == session->s_mds) { 2808 err = __prepare_send_request(mdsc, req, 2809 session->s_mds, true); 2810 if (!err) { 2811 ceph_msg_get(req->r_request); 2812 ceph_con_send(&session->s_con, req->r_request); 2813 } 2814 } 2815 } 2816 mutex_unlock(&mdsc->mutex); 2817 } 2818 2819 /* 2820 * Encode information about a cap for a reconnect with the MDS. 2821 */ 2822 static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, 2823 void *arg) 2824 { 2825 union { 2826 struct ceph_mds_cap_reconnect v2; 2827 struct ceph_mds_cap_reconnect_v1 v1; 2828 } rec; 2829 struct ceph_inode_info *ci; 2830 struct ceph_reconnect_state *recon_state = arg; 2831 struct ceph_pagelist *pagelist = recon_state->pagelist; 2832 char *path; 2833 int pathlen, err; 2834 u64 pathbase; 2835 u64 snap_follows; 2836 struct dentry *dentry; 2837 2838 ci = cap->ci; 2839 2840 dout(" adding %p ino %llx.%llx cap %p %lld %s\n", 2841 inode, ceph_vinop(inode), cap, cap->cap_id, 2842 ceph_cap_string(cap->issued)); 2843 err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode)); 2844 if (err) 2845 return err; 2846 2847 dentry = d_find_alias(inode); 2848 if (dentry) { 2849 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0); 2850 if (IS_ERR(path)) { 2851 err = PTR_ERR(path); 2852 goto out_dput; 2853 } 2854 } else { 2855 path = NULL; 2856 pathlen = 0; 2857 pathbase = 0; 2858 } 2859 2860 spin_lock(&ci->i_ceph_lock); 2861 cap->seq = 0; /* reset cap seq */ 2862 cap->issue_seq = 0; /* and issue_seq */ 2863 cap->mseq = 0; /* and migrate_seq */ 2864 cap->cap_gen = cap->session->s_cap_gen; 2865 2866 if (recon_state->msg_version >= 2) { 2867 rec.v2.cap_id = cpu_to_le64(cap->cap_id); 2868 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); 2869 rec.v2.issued = cpu_to_le32(cap->issued); 2870 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); 2871 rec.v2.pathbase = cpu_to_le64(pathbase); 2872 rec.v2.flock_len = 0; 2873 } else { 2874 rec.v1.cap_id = cpu_to_le64(cap->cap_id); 2875 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); 2876 rec.v1.issued = cpu_to_le32(cap->issued); 2877 rec.v1.size = cpu_to_le64(inode->i_size); 2878 ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime); 2879 ceph_encode_timespec(&rec.v1.atime, &inode->i_atime); 2880 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); 2881 rec.v1.pathbase = cpu_to_le64(pathbase); 2882 } 2883 2884 if (list_empty(&ci->i_cap_snaps)) { 2885 snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0; 2886 } else { 2887 struct ceph_cap_snap *capsnap = 2888 list_first_entry(&ci->i_cap_snaps, 2889 struct ceph_cap_snap, ci_item); 2890 snap_follows = capsnap->follows; 2891 } 2892 spin_unlock(&ci->i_ceph_lock); 2893 2894 if (recon_state->msg_version >= 2) { 2895 int num_fcntl_locks, num_flock_locks; 2896 struct ceph_filelock *flocks; 2897 size_t struct_len, total_len = 0; 2898 u8 struct_v = 0; 2899 2900 encode_again: 2901 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); 2902 flocks = kmalloc((num_fcntl_locks+num_flock_locks) * 2903 sizeof(struct ceph_filelock), GFP_NOFS); 2904 if (!flocks) { 2905 err = -ENOMEM; 2906 goto out_free; 2907 } 2908 err = ceph_encode_locks_to_buffer(inode, flocks, 2909 num_fcntl_locks, 2910 num_flock_locks); 2911 if (err) { 2912 kfree(flocks); 2913 if (err == -ENOSPC) 2914 goto encode_again; 2915 goto out_free; 2916 } 2917 2918 if (recon_state->msg_version >= 3) { 2919 /* version, compat_version and struct_len */ 2920 total_len = 2 * sizeof(u8) + sizeof(u32); 2921 struct_v = 2; 2922 } 2923 /* 2924 * number of encoded locks is stable, so copy to pagelist 2925 */ 2926 struct_len = 2 * sizeof(u32) + 2927 (num_fcntl_locks + num_flock_locks) * 2928 sizeof(struct ceph_filelock); 2929 rec.v2.flock_len = cpu_to_le32(struct_len); 2930 2931 struct_len += sizeof(rec.v2); 2932 struct_len += sizeof(u32) + pathlen; 2933 2934 if (struct_v >= 2) 2935 struct_len += sizeof(u64); /* snap_follows */ 2936 2937 total_len += struct_len; 2938 err = ceph_pagelist_reserve(pagelist, total_len); 2939 2940 if (!err) { 2941 if (recon_state->msg_version >= 3) { 2942 ceph_pagelist_encode_8(pagelist, struct_v); 2943 ceph_pagelist_encode_8(pagelist, 1); 2944 ceph_pagelist_encode_32(pagelist, struct_len); 2945 } 2946 ceph_pagelist_encode_string(pagelist, path, pathlen); 2947 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2)); 2948 ceph_locks_to_pagelist(flocks, pagelist, 2949 num_fcntl_locks, 2950 num_flock_locks); 2951 if (struct_v >= 2) 2952 ceph_pagelist_encode_64(pagelist, snap_follows); 2953 } 2954 kfree(flocks); 2955 } else { 2956 size_t size = sizeof(u32) + pathlen + sizeof(rec.v1); 2957 err = ceph_pagelist_reserve(pagelist, size); 2958 if (!err) { 2959 ceph_pagelist_encode_string(pagelist, path, pathlen); 2960 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1)); 2961 } 2962 } 2963 2964 recon_state->nr_caps++; 2965 out_free: 2966 kfree(path); 2967 out_dput: 2968 dput(dentry); 2969 return err; 2970 } 2971 2972 2973 /* 2974 * If an MDS fails and recovers, clients need to reconnect in order to 2975 * reestablish shared state. This includes all caps issued through 2976 * this session _and_ the snap_realm hierarchy. Because it's not 2977 * clear which snap realms the mds cares about, we send everything we 2978 * know about.. that ensures we'll then get any new info the 2979 * recovering MDS might have. 2980 * 2981 * This is a relatively heavyweight operation, but it's rare. 2982 * 2983 * called with mdsc->mutex held. 2984 */ 2985 static void send_mds_reconnect(struct ceph_mds_client *mdsc, 2986 struct ceph_mds_session *session) 2987 { 2988 struct ceph_msg *reply; 2989 struct rb_node *p; 2990 int mds = session->s_mds; 2991 int err = -ENOMEM; 2992 int s_nr_caps; 2993 struct ceph_pagelist *pagelist; 2994 struct ceph_reconnect_state recon_state; 2995 2996 pr_info("mds%d reconnect start\n", mds); 2997 2998 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); 2999 if (!pagelist) 3000 goto fail_nopagelist; 3001 ceph_pagelist_init(pagelist); 3002 3003 reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false); 3004 if (!reply) 3005 goto fail_nomsg; 3006 3007 mutex_lock(&session->s_mutex); 3008 session->s_state = CEPH_MDS_SESSION_RECONNECTING; 3009 session->s_seq = 0; 3010 3011 dout("session %p state %s\n", session, 3012 ceph_session_state_name(session->s_state)); 3013 3014 spin_lock(&session->s_gen_ttl_lock); 3015 session->s_cap_gen++; 3016 spin_unlock(&session->s_gen_ttl_lock); 3017 3018 spin_lock(&session->s_cap_lock); 3019 /* don't know if session is readonly */ 3020 session->s_readonly = 0; 3021 /* 3022 * notify __ceph_remove_cap() that we are composing cap reconnect. 3023 * If a cap get released before being added to the cap reconnect, 3024 * __ceph_remove_cap() should skip queuing cap release. 3025 */ 3026 session->s_cap_reconnect = 1; 3027 /* drop old cap expires; we're about to reestablish that state */ 3028 cleanup_cap_releases(mdsc, session); 3029 3030 /* trim unused caps to reduce MDS's cache rejoin time */ 3031 if (mdsc->fsc->sb->s_root) 3032 shrink_dcache_parent(mdsc->fsc->sb->s_root); 3033 3034 ceph_con_close(&session->s_con); 3035 ceph_con_open(&session->s_con, 3036 CEPH_ENTITY_TYPE_MDS, mds, 3037 ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); 3038 3039 /* replay unsafe requests */ 3040 replay_unsafe_requests(mdsc, session); 3041 3042 down_read(&mdsc->snap_rwsem); 3043 3044 /* traverse this session's caps */ 3045 s_nr_caps = session->s_nr_caps; 3046 err = ceph_pagelist_encode_32(pagelist, s_nr_caps); 3047 if (err) 3048 goto fail; 3049 3050 recon_state.nr_caps = 0; 3051 recon_state.pagelist = pagelist; 3052 if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) 3053 recon_state.msg_version = 3; 3054 else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK) 3055 recon_state.msg_version = 2; 3056 else 3057 recon_state.msg_version = 1; 3058 err = iterate_session_caps(session, encode_caps_cb, &recon_state); 3059 if (err < 0) 3060 goto fail; 3061 3062 spin_lock(&session->s_cap_lock); 3063 session->s_cap_reconnect = 0; 3064 spin_unlock(&session->s_cap_lock); 3065 3066 /* 3067 * snaprealms. we provide mds with the ino, seq (version), and 3068 * parent for all of our realms. If the mds has any newer info, 3069 * it will tell us. 3070 */ 3071 for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { 3072 struct ceph_snap_realm *realm = 3073 rb_entry(p, struct ceph_snap_realm, node); 3074 struct ceph_mds_snaprealm_reconnect sr_rec; 3075 3076 dout(" adding snap realm %llx seq %lld parent %llx\n", 3077 realm->ino, realm->seq, realm->parent_ino); 3078 sr_rec.ino = cpu_to_le64(realm->ino); 3079 sr_rec.seq = cpu_to_le64(realm->seq); 3080 sr_rec.parent = cpu_to_le64(realm->parent_ino); 3081 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec)); 3082 if (err) 3083 goto fail; 3084 } 3085 3086 reply->hdr.version = cpu_to_le16(recon_state.msg_version); 3087 3088 /* raced with cap release? */ 3089 if (s_nr_caps != recon_state.nr_caps) { 3090 struct page *page = list_first_entry(&pagelist->head, 3091 struct page, lru); 3092 __le32 *addr = kmap_atomic(page); 3093 *addr = cpu_to_le32(recon_state.nr_caps); 3094 kunmap_atomic(addr); 3095 } 3096 3097 reply->hdr.data_len = cpu_to_le32(pagelist->length); 3098 ceph_msg_data_add_pagelist(reply, pagelist); 3099 3100 ceph_early_kick_flushing_caps(mdsc, session); 3101 3102 ceph_con_send(&session->s_con, reply); 3103 3104 mutex_unlock(&session->s_mutex); 3105 3106 mutex_lock(&mdsc->mutex); 3107 __wake_requests(mdsc, &session->s_waiting); 3108 mutex_unlock(&mdsc->mutex); 3109 3110 up_read(&mdsc->snap_rwsem); 3111 return; 3112 3113 fail: 3114 ceph_msg_put(reply); 3115 up_read(&mdsc->snap_rwsem); 3116 mutex_unlock(&session->s_mutex); 3117 fail_nomsg: 3118 ceph_pagelist_release(pagelist); 3119 fail_nopagelist: 3120 pr_err("error %d preparing reconnect for mds%d\n", err, mds); 3121 return; 3122 } 3123 3124 3125 /* 3126 * compare old and new mdsmaps, kicking requests 3127 * and closing out old connections as necessary 3128 * 3129 * called under mdsc->mutex. 3130 */ 3131 static void check_new_map(struct ceph_mds_client *mdsc, 3132 struct ceph_mdsmap *newmap, 3133 struct ceph_mdsmap *oldmap) 3134 { 3135 int i; 3136 int oldstate, newstate; 3137 struct ceph_mds_session *s; 3138 3139 dout("check_new_map new %u old %u\n", 3140 newmap->m_epoch, oldmap->m_epoch); 3141 3142 for (i = 0; i < oldmap->m_num_mds && i < mdsc->max_sessions; i++) { 3143 if (!mdsc->sessions[i]) 3144 continue; 3145 s = mdsc->sessions[i]; 3146 oldstate = ceph_mdsmap_get_state(oldmap, i); 3147 newstate = ceph_mdsmap_get_state(newmap, i); 3148 3149 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n", 3150 i, ceph_mds_state_name(oldstate), 3151 ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "", 3152 ceph_mds_state_name(newstate), 3153 ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "", 3154 ceph_session_state_name(s->s_state)); 3155 3156 if (i >= newmap->m_num_mds || 3157 memcmp(ceph_mdsmap_get_addr(oldmap, i), 3158 ceph_mdsmap_get_addr(newmap, i), 3159 sizeof(struct ceph_entity_addr))) { 3160 if (s->s_state == CEPH_MDS_SESSION_OPENING) { 3161 /* the session never opened, just close it 3162 * out now */ 3163 get_session(s); 3164 __unregister_session(mdsc, s); 3165 __wake_requests(mdsc, &s->s_waiting); 3166 ceph_put_mds_session(s); 3167 } else if (i >= newmap->m_num_mds) { 3168 /* force close session for stopped mds */ 3169 get_session(s); 3170 __unregister_session(mdsc, s); 3171 __wake_requests(mdsc, &s->s_waiting); 3172 kick_requests(mdsc, i); 3173 mutex_unlock(&mdsc->mutex); 3174 3175 mutex_lock(&s->s_mutex); 3176 cleanup_session_requests(mdsc, s); 3177 remove_session_caps(s); 3178 mutex_unlock(&s->s_mutex); 3179 3180 ceph_put_mds_session(s); 3181 3182 mutex_lock(&mdsc->mutex); 3183 } else { 3184 /* just close it */ 3185 mutex_unlock(&mdsc->mutex); 3186 mutex_lock(&s->s_mutex); 3187 mutex_lock(&mdsc->mutex); 3188 ceph_con_close(&s->s_con); 3189 mutex_unlock(&s->s_mutex); 3190 s->s_state = CEPH_MDS_SESSION_RESTARTING; 3191 } 3192 } else if (oldstate == newstate) { 3193 continue; /* nothing new with this mds */ 3194 } 3195 3196 /* 3197 * send reconnect? 3198 */ 3199 if (s->s_state == CEPH_MDS_SESSION_RESTARTING && 3200 newstate >= CEPH_MDS_STATE_RECONNECT) { 3201 mutex_unlock(&mdsc->mutex); 3202 send_mds_reconnect(mdsc, s); 3203 mutex_lock(&mdsc->mutex); 3204 } 3205 3206 /* 3207 * kick request on any mds that has gone active. 3208 */ 3209 if (oldstate < CEPH_MDS_STATE_ACTIVE && 3210 newstate >= CEPH_MDS_STATE_ACTIVE) { 3211 if (oldstate != CEPH_MDS_STATE_CREATING && 3212 oldstate != CEPH_MDS_STATE_STARTING) 3213 pr_info("mds%d recovery completed\n", s->s_mds); 3214 kick_requests(mdsc, i); 3215 ceph_kick_flushing_caps(mdsc, s); 3216 wake_up_session_caps(s, 1); 3217 } 3218 } 3219 3220 for (i = 0; i < newmap->m_num_mds && i < mdsc->max_sessions; i++) { 3221 s = mdsc->sessions[i]; 3222 if (!s) 3223 continue; 3224 if (!ceph_mdsmap_is_laggy(newmap, i)) 3225 continue; 3226 if (s->s_state == CEPH_MDS_SESSION_OPEN || 3227 s->s_state == CEPH_MDS_SESSION_HUNG || 3228 s->s_state == CEPH_MDS_SESSION_CLOSING) { 3229 dout(" connecting to export targets of laggy mds%d\n", 3230 i); 3231 __open_export_target_sessions(mdsc, s); 3232 } 3233 } 3234 } 3235 3236 3237 3238 /* 3239 * leases 3240 */ 3241 3242 /* 3243 * caller must hold session s_mutex, dentry->d_lock 3244 */ 3245 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry) 3246 { 3247 struct ceph_dentry_info *di = ceph_dentry(dentry); 3248 3249 ceph_put_mds_session(di->lease_session); 3250 di->lease_session = NULL; 3251 } 3252 3253 static void handle_lease(struct ceph_mds_client *mdsc, 3254 struct ceph_mds_session *session, 3255 struct ceph_msg *msg) 3256 { 3257 struct super_block *sb = mdsc->fsc->sb; 3258 struct inode *inode; 3259 struct dentry *parent, *dentry; 3260 struct ceph_dentry_info *di; 3261 int mds = session->s_mds; 3262 struct ceph_mds_lease *h = msg->front.iov_base; 3263 u32 seq; 3264 struct ceph_vino vino; 3265 struct qstr dname; 3266 int release = 0; 3267 3268 dout("handle_lease from mds%d\n", mds); 3269 3270 /* decode */ 3271 if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) 3272 goto bad; 3273 vino.ino = le64_to_cpu(h->ino); 3274 vino.snap = CEPH_NOSNAP; 3275 seq = le32_to_cpu(h->seq); 3276 dname.name = (void *)h + sizeof(*h) + sizeof(u32); 3277 dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32); 3278 if (dname.len != get_unaligned_le32(h+1)) 3279 goto bad; 3280 3281 /* lookup inode */ 3282 inode = ceph_find_inode(sb, vino); 3283 dout("handle_lease %s, ino %llx %p %.*s\n", 3284 ceph_lease_op_name(h->action), vino.ino, inode, 3285 dname.len, dname.name); 3286 3287 mutex_lock(&session->s_mutex); 3288 session->s_seq++; 3289 3290 if (!inode) { 3291 dout("handle_lease no inode %llx\n", vino.ino); 3292 goto release; 3293 } 3294 3295 /* dentry */ 3296 parent = d_find_alias(inode); 3297 if (!parent) { 3298 dout("no parent dentry on inode %p\n", inode); 3299 WARN_ON(1); 3300 goto release; /* hrm... */ 3301 } 3302 dname.hash = full_name_hash(parent, dname.name, dname.len); 3303 dentry = d_lookup(parent, &dname); 3304 dput(parent); 3305 if (!dentry) 3306 goto release; 3307 3308 spin_lock(&dentry->d_lock); 3309 di = ceph_dentry(dentry); 3310 switch (h->action) { 3311 case CEPH_MDS_LEASE_REVOKE: 3312 if (di->lease_session == session) { 3313 if (ceph_seq_cmp(di->lease_seq, seq) > 0) 3314 h->seq = cpu_to_le32(di->lease_seq); 3315 __ceph_mdsc_drop_dentry_lease(dentry); 3316 } 3317 release = 1; 3318 break; 3319 3320 case CEPH_MDS_LEASE_RENEW: 3321 if (di->lease_session == session && 3322 di->lease_gen == session->s_cap_gen && 3323 di->lease_renew_from && 3324 di->lease_renew_after == 0) { 3325 unsigned long duration = 3326 msecs_to_jiffies(le32_to_cpu(h->duration_ms)); 3327 3328 di->lease_seq = seq; 3329 di->time = di->lease_renew_from + duration; 3330 di->lease_renew_after = di->lease_renew_from + 3331 (duration >> 1); 3332 di->lease_renew_from = 0; 3333 } 3334 break; 3335 } 3336 spin_unlock(&dentry->d_lock); 3337 dput(dentry); 3338 3339 if (!release) 3340 goto out; 3341 3342 release: 3343 /* let's just reuse the same message */ 3344 h->action = CEPH_MDS_LEASE_REVOKE_ACK; 3345 ceph_msg_get(msg); 3346 ceph_con_send(&session->s_con, msg); 3347 3348 out: 3349 iput(inode); 3350 mutex_unlock(&session->s_mutex); 3351 return; 3352 3353 bad: 3354 pr_err("corrupt lease message\n"); 3355 ceph_msg_dump(msg); 3356 } 3357 3358 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, 3359 struct inode *inode, 3360 struct dentry *dentry, char action, 3361 u32 seq) 3362 { 3363 struct ceph_msg *msg; 3364 struct ceph_mds_lease *lease; 3365 int len = sizeof(*lease) + sizeof(u32); 3366 int dnamelen = 0; 3367 3368 dout("lease_send_msg inode %p dentry %p %s to mds%d\n", 3369 inode, dentry, ceph_lease_op_name(action), session->s_mds); 3370 dnamelen = dentry->d_name.len; 3371 len += dnamelen; 3372 3373 msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false); 3374 if (!msg) 3375 return; 3376 lease = msg->front.iov_base; 3377 lease->action = action; 3378 lease->ino = cpu_to_le64(ceph_vino(inode).ino); 3379 lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap); 3380 lease->seq = cpu_to_le32(seq); 3381 put_unaligned_le32(dnamelen, lease + 1); 3382 memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen); 3383 3384 /* 3385 * if this is a preemptive lease RELEASE, no need to 3386 * flush request stream, since the actual request will 3387 * soon follow. 3388 */ 3389 msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE); 3390 3391 ceph_con_send(&session->s_con, msg); 3392 } 3393 3394 /* 3395 * drop all leases (and dentry refs) in preparation for umount 3396 */ 3397 static void drop_leases(struct ceph_mds_client *mdsc) 3398 { 3399 int i; 3400 3401 dout("drop_leases\n"); 3402 mutex_lock(&mdsc->mutex); 3403 for (i = 0; i < mdsc->max_sessions; i++) { 3404 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); 3405 if (!s) 3406 continue; 3407 mutex_unlock(&mdsc->mutex); 3408 mutex_lock(&s->s_mutex); 3409 mutex_unlock(&s->s_mutex); 3410 ceph_put_mds_session(s); 3411 mutex_lock(&mdsc->mutex); 3412 } 3413 mutex_unlock(&mdsc->mutex); 3414 } 3415 3416 3417 3418 /* 3419 * delayed work -- periodically trim expired leases, renew caps with mds 3420 */ 3421 static void schedule_delayed(struct ceph_mds_client *mdsc) 3422 { 3423 int delay = 5; 3424 unsigned hz = round_jiffies_relative(HZ * delay); 3425 schedule_delayed_work(&mdsc->delayed_work, hz); 3426 } 3427 3428 static void delayed_work(struct work_struct *work) 3429 { 3430 int i; 3431 struct ceph_mds_client *mdsc = 3432 container_of(work, struct ceph_mds_client, delayed_work.work); 3433 int renew_interval; 3434 int renew_caps; 3435 3436 dout("mdsc delayed_work\n"); 3437 ceph_check_delayed_caps(mdsc); 3438 3439 mutex_lock(&mdsc->mutex); 3440 renew_interval = mdsc->mdsmap->m_session_timeout >> 2; 3441 renew_caps = time_after_eq(jiffies, HZ*renew_interval + 3442 mdsc->last_renew_caps); 3443 if (renew_caps) 3444 mdsc->last_renew_caps = jiffies; 3445 3446 for (i = 0; i < mdsc->max_sessions; i++) { 3447 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); 3448 if (!s) 3449 continue; 3450 if (s->s_state == CEPH_MDS_SESSION_CLOSING) { 3451 dout("resending session close request for mds%d\n", 3452 s->s_mds); 3453 request_close_session(mdsc, s); 3454 ceph_put_mds_session(s); 3455 continue; 3456 } 3457 if (s->s_ttl && time_after(jiffies, s->s_ttl)) { 3458 if (s->s_state == CEPH_MDS_SESSION_OPEN) { 3459 s->s_state = CEPH_MDS_SESSION_HUNG; 3460 pr_info("mds%d hung\n", s->s_mds); 3461 } 3462 } 3463 if (s->s_state < CEPH_MDS_SESSION_OPEN) { 3464 /* this mds is failed or recovering, just wait */ 3465 ceph_put_mds_session(s); 3466 continue; 3467 } 3468 mutex_unlock(&mdsc->mutex); 3469 3470 mutex_lock(&s->s_mutex); 3471 if (renew_caps) 3472 send_renew_caps(mdsc, s); 3473 else 3474 ceph_con_keepalive(&s->s_con); 3475 if (s->s_state == CEPH_MDS_SESSION_OPEN || 3476 s->s_state == CEPH_MDS_SESSION_HUNG) 3477 ceph_send_cap_releases(mdsc, s); 3478 mutex_unlock(&s->s_mutex); 3479 ceph_put_mds_session(s); 3480 3481 mutex_lock(&mdsc->mutex); 3482 } 3483 mutex_unlock(&mdsc->mutex); 3484 3485 schedule_delayed(mdsc); 3486 } 3487 3488 int ceph_mdsc_init(struct ceph_fs_client *fsc) 3489 3490 { 3491 struct ceph_mds_client *mdsc; 3492 3493 mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS); 3494 if (!mdsc) 3495 return -ENOMEM; 3496 mdsc->fsc = fsc; 3497 fsc->mdsc = mdsc; 3498 mutex_init(&mdsc->mutex); 3499 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); 3500 if (!mdsc->mdsmap) { 3501 kfree(mdsc); 3502 return -ENOMEM; 3503 } 3504 3505 init_completion(&mdsc->safe_umount_waiters); 3506 init_waitqueue_head(&mdsc->session_close_wq); 3507 INIT_LIST_HEAD(&mdsc->waiting_for_map); 3508 mdsc->sessions = NULL; 3509 atomic_set(&mdsc->num_sessions, 0); 3510 mdsc->max_sessions = 0; 3511 mdsc->stopping = 0; 3512 mdsc->last_snap_seq = 0; 3513 init_rwsem(&mdsc->snap_rwsem); 3514 mdsc->snap_realms = RB_ROOT; 3515 INIT_LIST_HEAD(&mdsc->snap_empty); 3516 spin_lock_init(&mdsc->snap_empty_lock); 3517 mdsc->last_tid = 0; 3518 mdsc->oldest_tid = 0; 3519 mdsc->request_tree = RB_ROOT; 3520 INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); 3521 mdsc->last_renew_caps = jiffies; 3522 INIT_LIST_HEAD(&mdsc->cap_delay_list); 3523 spin_lock_init(&mdsc->cap_delay_lock); 3524 INIT_LIST_HEAD(&mdsc->snap_flush_list); 3525 spin_lock_init(&mdsc->snap_flush_lock); 3526 mdsc->last_cap_flush_tid = 1; 3527 INIT_LIST_HEAD(&mdsc->cap_flush_list); 3528 INIT_LIST_HEAD(&mdsc->cap_dirty); 3529 INIT_LIST_HEAD(&mdsc->cap_dirty_migrating); 3530 mdsc->num_cap_flushing = 0; 3531 spin_lock_init(&mdsc->cap_dirty_lock); 3532 init_waitqueue_head(&mdsc->cap_flushing_wq); 3533 spin_lock_init(&mdsc->dentry_lru_lock); 3534 INIT_LIST_HEAD(&mdsc->dentry_lru); 3535 3536 ceph_caps_init(mdsc); 3537 ceph_adjust_min_caps(mdsc, fsc->min_caps); 3538 3539 init_rwsem(&mdsc->pool_perm_rwsem); 3540 mdsc->pool_perm_tree = RB_ROOT; 3541 3542 strncpy(mdsc->nodename, utsname()->nodename, 3543 sizeof(mdsc->nodename) - 1); 3544 return 0; 3545 } 3546 3547 /* 3548 * Wait for safe replies on open mds requests. If we time out, drop 3549 * all requests from the tree to avoid dangling dentry refs. 3550 */ 3551 static void wait_requests(struct ceph_mds_client *mdsc) 3552 { 3553 struct ceph_options *opts = mdsc->fsc->client->options; 3554 struct ceph_mds_request *req; 3555 3556 mutex_lock(&mdsc->mutex); 3557 if (__get_oldest_req(mdsc)) { 3558 mutex_unlock(&mdsc->mutex); 3559 3560 dout("wait_requests waiting for requests\n"); 3561 wait_for_completion_timeout(&mdsc->safe_umount_waiters, 3562 ceph_timeout_jiffies(opts->mount_timeout)); 3563 3564 /* tear down remaining requests */ 3565 mutex_lock(&mdsc->mutex); 3566 while ((req = __get_oldest_req(mdsc))) { 3567 dout("wait_requests timed out on tid %llu\n", 3568 req->r_tid); 3569 __unregister_request(mdsc, req); 3570 } 3571 } 3572 mutex_unlock(&mdsc->mutex); 3573 dout("wait_requests done\n"); 3574 } 3575 3576 /* 3577 * called before mount is ro, and before dentries are torn down. 3578 * (hmm, does this still race with new lookups?) 3579 */ 3580 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) 3581 { 3582 dout("pre_umount\n"); 3583 mdsc->stopping = 1; 3584 3585 drop_leases(mdsc); 3586 ceph_flush_dirty_caps(mdsc); 3587 wait_requests(mdsc); 3588 3589 /* 3590 * wait for reply handlers to drop their request refs and 3591 * their inode/dcache refs 3592 */ 3593 ceph_msgr_flush(); 3594 } 3595 3596 /* 3597 * wait for all write mds requests to flush. 3598 */ 3599 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) 3600 { 3601 struct ceph_mds_request *req = NULL, *nextreq; 3602 struct rb_node *n; 3603 3604 mutex_lock(&mdsc->mutex); 3605 dout("wait_unsafe_requests want %lld\n", want_tid); 3606 restart: 3607 req = __get_oldest_req(mdsc); 3608 while (req && req->r_tid <= want_tid) { 3609 /* find next request */ 3610 n = rb_next(&req->r_node); 3611 if (n) 3612 nextreq = rb_entry(n, struct ceph_mds_request, r_node); 3613 else 3614 nextreq = NULL; 3615 if (req->r_op != CEPH_MDS_OP_SETFILELOCK && 3616 (req->r_op & CEPH_MDS_OP_WRITE)) { 3617 /* write op */ 3618 ceph_mdsc_get_request(req); 3619 if (nextreq) 3620 ceph_mdsc_get_request(nextreq); 3621 mutex_unlock(&mdsc->mutex); 3622 dout("wait_unsafe_requests wait on %llu (want %llu)\n", 3623 req->r_tid, want_tid); 3624 wait_for_completion(&req->r_safe_completion); 3625 mutex_lock(&mdsc->mutex); 3626 ceph_mdsc_put_request(req); 3627 if (!nextreq) 3628 break; /* next dne before, so we're done! */ 3629 if (RB_EMPTY_NODE(&nextreq->r_node)) { 3630 /* next request was removed from tree */ 3631 ceph_mdsc_put_request(nextreq); 3632 goto restart; 3633 } 3634 ceph_mdsc_put_request(nextreq); /* won't go away */ 3635 } 3636 req = nextreq; 3637 } 3638 mutex_unlock(&mdsc->mutex); 3639 dout("wait_unsafe_requests done\n"); 3640 } 3641 3642 void ceph_mdsc_sync(struct ceph_mds_client *mdsc) 3643 { 3644 u64 want_tid, want_flush; 3645 3646 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) 3647 return; 3648 3649 dout("sync\n"); 3650 mutex_lock(&mdsc->mutex); 3651 want_tid = mdsc->last_tid; 3652 mutex_unlock(&mdsc->mutex); 3653 3654 ceph_flush_dirty_caps(mdsc); 3655 spin_lock(&mdsc->cap_dirty_lock); 3656 want_flush = mdsc->last_cap_flush_tid; 3657 if (!list_empty(&mdsc->cap_flush_list)) { 3658 struct ceph_cap_flush *cf = 3659 list_last_entry(&mdsc->cap_flush_list, 3660 struct ceph_cap_flush, g_list); 3661 cf->wake = true; 3662 } 3663 spin_unlock(&mdsc->cap_dirty_lock); 3664 3665 dout("sync want tid %lld flush_seq %lld\n", 3666 want_tid, want_flush); 3667 3668 wait_unsafe_requests(mdsc, want_tid); 3669 wait_caps_flush(mdsc, want_flush); 3670 } 3671 3672 /* 3673 * true if all sessions are closed, or we force unmount 3674 */ 3675 static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped) 3676 { 3677 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) 3678 return true; 3679 return atomic_read(&mdsc->num_sessions) <= skipped; 3680 } 3681 3682 /* 3683 * called after sb is ro. 3684 */ 3685 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) 3686 { 3687 struct ceph_options *opts = mdsc->fsc->client->options; 3688 struct ceph_mds_session *session; 3689 int i; 3690 int skipped = 0; 3691 3692 dout("close_sessions\n"); 3693 3694 /* close sessions */ 3695 mutex_lock(&mdsc->mutex); 3696 for (i = 0; i < mdsc->max_sessions; i++) { 3697 session = __ceph_lookup_mds_session(mdsc, i); 3698 if (!session) 3699 continue; 3700 mutex_unlock(&mdsc->mutex); 3701 mutex_lock(&session->s_mutex); 3702 if (__close_session(mdsc, session) <= 0) 3703 skipped++; 3704 mutex_unlock(&session->s_mutex); 3705 ceph_put_mds_session(session); 3706 mutex_lock(&mdsc->mutex); 3707 } 3708 mutex_unlock(&mdsc->mutex); 3709 3710 dout("waiting for sessions to close\n"); 3711 wait_event_timeout(mdsc->session_close_wq, 3712 done_closing_sessions(mdsc, skipped), 3713 ceph_timeout_jiffies(opts->mount_timeout)); 3714 3715 /* tear down remaining sessions */ 3716 mutex_lock(&mdsc->mutex); 3717 for (i = 0; i < mdsc->max_sessions; i++) { 3718 if (mdsc->sessions[i]) { 3719 session = get_session(mdsc->sessions[i]); 3720 __unregister_session(mdsc, session); 3721 mutex_unlock(&mdsc->mutex); 3722 mutex_lock(&session->s_mutex); 3723 remove_session_caps(session); 3724 mutex_unlock(&session->s_mutex); 3725 ceph_put_mds_session(session); 3726 mutex_lock(&mdsc->mutex); 3727 } 3728 } 3729 WARN_ON(!list_empty(&mdsc->cap_delay_list)); 3730 mutex_unlock(&mdsc->mutex); 3731 3732 ceph_cleanup_empty_realms(mdsc); 3733 3734 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ 3735 3736 dout("stopped\n"); 3737 } 3738 3739 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) 3740 { 3741 struct ceph_mds_session *session; 3742 int mds; 3743 3744 dout("force umount\n"); 3745 3746 mutex_lock(&mdsc->mutex); 3747 for (mds = 0; mds < mdsc->max_sessions; mds++) { 3748 session = __ceph_lookup_mds_session(mdsc, mds); 3749 if (!session) 3750 continue; 3751 mutex_unlock(&mdsc->mutex); 3752 mutex_lock(&session->s_mutex); 3753 __close_session(mdsc, session); 3754 if (session->s_state == CEPH_MDS_SESSION_CLOSING) { 3755 cleanup_session_requests(mdsc, session); 3756 remove_session_caps(session); 3757 } 3758 mutex_unlock(&session->s_mutex); 3759 ceph_put_mds_session(session); 3760 mutex_lock(&mdsc->mutex); 3761 kick_requests(mdsc, mds); 3762 } 3763 __wake_requests(mdsc, &mdsc->waiting_for_map); 3764 mutex_unlock(&mdsc->mutex); 3765 } 3766 3767 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) 3768 { 3769 dout("stop\n"); 3770 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ 3771 if (mdsc->mdsmap) 3772 ceph_mdsmap_destroy(mdsc->mdsmap); 3773 kfree(mdsc->sessions); 3774 ceph_caps_finalize(mdsc); 3775 ceph_pool_perm_destroy(mdsc); 3776 } 3777 3778 void ceph_mdsc_destroy(struct ceph_fs_client *fsc) 3779 { 3780 struct ceph_mds_client *mdsc = fsc->mdsc; 3781 dout("mdsc_destroy %p\n", mdsc); 3782 3783 /* flush out any connection work with references to us */ 3784 ceph_msgr_flush(); 3785 3786 ceph_mdsc_stop(mdsc); 3787 3788 fsc->mdsc = NULL; 3789 kfree(mdsc); 3790 dout("mdsc_destroy %p done\n", mdsc); 3791 } 3792 3793 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) 3794 { 3795 struct ceph_fs_client *fsc = mdsc->fsc; 3796 const char *mds_namespace = fsc->mount_options->mds_namespace; 3797 void *p = msg->front.iov_base; 3798 void *end = p + msg->front.iov_len; 3799 u32 epoch; 3800 u32 map_len; 3801 u32 num_fs; 3802 u32 mount_fscid = (u32)-1; 3803 u8 struct_v, struct_cv; 3804 int err = -EINVAL; 3805 3806 ceph_decode_need(&p, end, sizeof(u32), bad); 3807 epoch = ceph_decode_32(&p); 3808 3809 dout("handle_fsmap epoch %u\n", epoch); 3810 3811 ceph_decode_need(&p, end, 2 + sizeof(u32), bad); 3812 struct_v = ceph_decode_8(&p); 3813 struct_cv = ceph_decode_8(&p); 3814 map_len = ceph_decode_32(&p); 3815 3816 ceph_decode_need(&p, end, sizeof(u32) * 3, bad); 3817 p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */ 3818 3819 num_fs = ceph_decode_32(&p); 3820 while (num_fs-- > 0) { 3821 void *info_p, *info_end; 3822 u32 info_len; 3823 u8 info_v, info_cv; 3824 u32 fscid, namelen; 3825 3826 ceph_decode_need(&p, end, 2 + sizeof(u32), bad); 3827 info_v = ceph_decode_8(&p); 3828 info_cv = ceph_decode_8(&p); 3829 info_len = ceph_decode_32(&p); 3830 ceph_decode_need(&p, end, info_len, bad); 3831 info_p = p; 3832 info_end = p + info_len; 3833 p = info_end; 3834 3835 ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad); 3836 fscid = ceph_decode_32(&info_p); 3837 namelen = ceph_decode_32(&info_p); 3838 ceph_decode_need(&info_p, info_end, namelen, bad); 3839 3840 if (mds_namespace && 3841 strlen(mds_namespace) == namelen && 3842 !strncmp(mds_namespace, (char *)info_p, namelen)) { 3843 mount_fscid = fscid; 3844 break; 3845 } 3846 } 3847 3848 ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch); 3849 if (mount_fscid != (u32)-1) { 3850 fsc->client->monc.fs_cluster_id = mount_fscid; 3851 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, 3852 0, true); 3853 ceph_monc_renew_subs(&fsc->client->monc); 3854 } else { 3855 err = -ENOENT; 3856 goto err_out; 3857 } 3858 return; 3859 bad: 3860 pr_err("error decoding fsmap\n"); 3861 err_out: 3862 mutex_lock(&mdsc->mutex); 3863 mdsc->mdsmap_err = -ENOENT; 3864 __wake_requests(mdsc, &mdsc->waiting_for_map); 3865 mutex_unlock(&mdsc->mutex); 3866 return; 3867 } 3868 3869 /* 3870 * handle mds map update. 3871 */ 3872 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) 3873 { 3874 u32 epoch; 3875 u32 maplen; 3876 void *p = msg->front.iov_base; 3877 void *end = p + msg->front.iov_len; 3878 struct ceph_mdsmap *newmap, *oldmap; 3879 struct ceph_fsid fsid; 3880 int err = -EINVAL; 3881 3882 ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); 3883 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 3884 if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0) 3885 return; 3886 epoch = ceph_decode_32(&p); 3887 maplen = ceph_decode_32(&p); 3888 dout("handle_map epoch %u len %d\n", epoch, (int)maplen); 3889 3890 /* do we need it? */ 3891 mutex_lock(&mdsc->mutex); 3892 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { 3893 dout("handle_map epoch %u <= our %u\n", 3894 epoch, mdsc->mdsmap->m_epoch); 3895 mutex_unlock(&mdsc->mutex); 3896 return; 3897 } 3898 3899 newmap = ceph_mdsmap_decode(&p, end); 3900 if (IS_ERR(newmap)) { 3901 err = PTR_ERR(newmap); 3902 goto bad_unlock; 3903 } 3904 3905 /* swap into place */ 3906 if (mdsc->mdsmap) { 3907 oldmap = mdsc->mdsmap; 3908 mdsc->mdsmap = newmap; 3909 check_new_map(mdsc, newmap, oldmap); 3910 ceph_mdsmap_destroy(oldmap); 3911 } else { 3912 mdsc->mdsmap = newmap; /* first mds map */ 3913 } 3914 mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; 3915 3916 __wake_requests(mdsc, &mdsc->waiting_for_map); 3917 ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP, 3918 mdsc->mdsmap->m_epoch); 3919 3920 mutex_unlock(&mdsc->mutex); 3921 schedule_delayed(mdsc); 3922 return; 3923 3924 bad_unlock: 3925 mutex_unlock(&mdsc->mutex); 3926 bad: 3927 pr_err("error decoding mdsmap %d\n", err); 3928 return; 3929 } 3930 3931 static struct ceph_connection *con_get(struct ceph_connection *con) 3932 { 3933 struct ceph_mds_session *s = con->private; 3934 3935 if (get_session(s)) { 3936 dout("mdsc con_get %p ok (%d)\n", s, refcount_read(&s->s_ref)); 3937 return con; 3938 } 3939 dout("mdsc con_get %p FAIL\n", s); 3940 return NULL; 3941 } 3942 3943 static void con_put(struct ceph_connection *con) 3944 { 3945 struct ceph_mds_session *s = con->private; 3946 3947 dout("mdsc con_put %p (%d)\n", s, refcount_read(&s->s_ref) - 1); 3948 ceph_put_mds_session(s); 3949 } 3950 3951 /* 3952 * if the client is unresponsive for long enough, the mds will kill 3953 * the session entirely. 3954 */ 3955 static void peer_reset(struct ceph_connection *con) 3956 { 3957 struct ceph_mds_session *s = con->private; 3958 struct ceph_mds_client *mdsc = s->s_mdsc; 3959 3960 pr_warn("mds%d closed our session\n", s->s_mds); 3961 send_mds_reconnect(mdsc, s); 3962 } 3963 3964 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) 3965 { 3966 struct ceph_mds_session *s = con->private; 3967 struct ceph_mds_client *mdsc = s->s_mdsc; 3968 int type = le16_to_cpu(msg->hdr.type); 3969 3970 mutex_lock(&mdsc->mutex); 3971 if (__verify_registered_session(mdsc, s) < 0) { 3972 mutex_unlock(&mdsc->mutex); 3973 goto out; 3974 } 3975 mutex_unlock(&mdsc->mutex); 3976 3977 switch (type) { 3978 case CEPH_MSG_MDS_MAP: 3979 ceph_mdsc_handle_mdsmap(mdsc, msg); 3980 break; 3981 case CEPH_MSG_FS_MAP_USER: 3982 ceph_mdsc_handle_fsmap(mdsc, msg); 3983 break; 3984 case CEPH_MSG_CLIENT_SESSION: 3985 handle_session(s, msg); 3986 break; 3987 case CEPH_MSG_CLIENT_REPLY: 3988 handle_reply(s, msg); 3989 break; 3990 case CEPH_MSG_CLIENT_REQUEST_FORWARD: 3991 handle_forward(mdsc, s, msg); 3992 break; 3993 case CEPH_MSG_CLIENT_CAPS: 3994 ceph_handle_caps(s, msg); 3995 break; 3996 case CEPH_MSG_CLIENT_SNAP: 3997 ceph_handle_snap(mdsc, s, msg); 3998 break; 3999 case CEPH_MSG_CLIENT_LEASE: 4000 handle_lease(mdsc, s, msg); 4001 break; 4002 4003 default: 4004 pr_err("received unknown message type %d %s\n", type, 4005 ceph_msg_type_name(type)); 4006 } 4007 out: 4008 ceph_msg_put(msg); 4009 } 4010 4011 /* 4012 * authentication 4013 */ 4014 4015 /* 4016 * Note: returned pointer is the address of a structure that's 4017 * managed separately. Caller must *not* attempt to free it. 4018 */ 4019 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, 4020 int *proto, int force_new) 4021 { 4022 struct ceph_mds_session *s = con->private; 4023 struct ceph_mds_client *mdsc = s->s_mdsc; 4024 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; 4025 struct ceph_auth_handshake *auth = &s->s_auth; 4026 4027 if (force_new && auth->authorizer) { 4028 ceph_auth_destroy_authorizer(auth->authorizer); 4029 auth->authorizer = NULL; 4030 } 4031 if (!auth->authorizer) { 4032 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, 4033 auth); 4034 if (ret) 4035 return ERR_PTR(ret); 4036 } else { 4037 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS, 4038 auth); 4039 if (ret) 4040 return ERR_PTR(ret); 4041 } 4042 *proto = ac->protocol; 4043 4044 return auth; 4045 } 4046 4047 4048 static int verify_authorizer_reply(struct ceph_connection *con) 4049 { 4050 struct ceph_mds_session *s = con->private; 4051 struct ceph_mds_client *mdsc = s->s_mdsc; 4052 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; 4053 4054 return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer); 4055 } 4056 4057 static int invalidate_authorizer(struct ceph_connection *con) 4058 { 4059 struct ceph_mds_session *s = con->private; 4060 struct ceph_mds_client *mdsc = s->s_mdsc; 4061 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; 4062 4063 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); 4064 4065 return ceph_monc_validate_auth(&mdsc->fsc->client->monc); 4066 } 4067 4068 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con, 4069 struct ceph_msg_header *hdr, int *skip) 4070 { 4071 struct ceph_msg *msg; 4072 int type = (int) le16_to_cpu(hdr->type); 4073 int front_len = (int) le32_to_cpu(hdr->front_len); 4074 4075 if (con->in_msg) 4076 return con->in_msg; 4077 4078 *skip = 0; 4079 msg = ceph_msg_new(type, front_len, GFP_NOFS, false); 4080 if (!msg) { 4081 pr_err("unable to allocate msg type %d len %d\n", 4082 type, front_len); 4083 return NULL; 4084 } 4085 4086 return msg; 4087 } 4088 4089 static int mds_sign_message(struct ceph_msg *msg) 4090 { 4091 struct ceph_mds_session *s = msg->con->private; 4092 struct ceph_auth_handshake *auth = &s->s_auth; 4093 4094 return ceph_auth_sign_message(auth, msg); 4095 } 4096 4097 static int mds_check_message_signature(struct ceph_msg *msg) 4098 { 4099 struct ceph_mds_session *s = msg->con->private; 4100 struct ceph_auth_handshake *auth = &s->s_auth; 4101 4102 return ceph_auth_check_message_signature(auth, msg); 4103 } 4104 4105 static const struct ceph_connection_operations mds_con_ops = { 4106 .get = con_get, 4107 .put = con_put, 4108 .dispatch = dispatch, 4109 .get_authorizer = get_authorizer, 4110 .verify_authorizer_reply = verify_authorizer_reply, 4111 .invalidate_authorizer = invalidate_authorizer, 4112 .peer_reset = peer_reset, 4113 .alloc_msg = mds_alloc_msg, 4114 .sign_message = mds_sign_message, 4115 .check_message_signature = mds_check_message_signature, 4116 }; 4117 4118 /* eof */ 4119