1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/fs.h> 4 #include <linux/wait.h> 5 #include <linux/slab.h> 6 #include <linux/gfp.h> 7 #include <linux/sched.h> 8 #include <linux/debugfs.h> 9 #include <linux/seq_file.h> 10 #include <linux/utsname.h> 11 #include <linux/ratelimit.h> 12 13 #include "super.h" 14 #include "mds_client.h" 15 16 #include <linux/ceph/ceph_features.h> 17 #include <linux/ceph/messenger.h> 18 #include <linux/ceph/decode.h> 19 #include <linux/ceph/pagelist.h> 20 #include <linux/ceph/auth.h> 21 #include <linux/ceph/debugfs.h> 22 23 /* 24 * A cluster of MDS (metadata server) daemons is responsible for 25 * managing the file system namespace (the directory hierarchy and 26 * inodes) and for coordinating shared access to storage. Metadata is 27 * partitioning hierarchically across a number of servers, and that 28 * partition varies over time as the cluster adjusts the distribution 29 * in order to balance load. 30 * 31 * The MDS client is primarily responsible to managing synchronous 32 * metadata requests for operations like open, unlink, and so forth. 33 * If there is a MDS failure, we find out about it when we (possibly 34 * request and) receive a new MDS map, and can resubmit affected 35 * requests. 36 * 37 * For the most part, though, we take advantage of a lossless 38 * communications channel to the MDS, and do not need to worry about 39 * timing out or resubmitting requests. 40 * 41 * We maintain a stateful "session" with each MDS we interact with. 42 * Within each session, we sent periodic heartbeat messages to ensure 43 * any capabilities or leases we have been issues remain valid. If 44 * the session times out and goes stale, our leases and capabilities 45 * are no longer valid. 46 */ 47 48 struct ceph_reconnect_state { 49 int nr_caps; 50 struct ceph_pagelist *pagelist; 51 unsigned msg_version; 52 }; 53 54 static void __wake_requests(struct ceph_mds_client *mdsc, 55 struct list_head *head); 56 57 static const struct ceph_connection_operations mds_con_ops; 58 59 60 /* 61 * mds reply parsing 62 */ 63 64 /* 65 * parse individual inode info 66 */ 67 static int parse_reply_info_in(void **p, void *end, 68 struct ceph_mds_reply_info_in *info, 69 u64 features) 70 { 71 int err = -EIO; 72 73 info->in = *p; 74 *p += sizeof(struct ceph_mds_reply_inode) + 75 sizeof(*info->in->fragtree.splits) * 76 le32_to_cpu(info->in->fragtree.nsplits); 77 78 ceph_decode_32_safe(p, end, info->symlink_len, bad); 79 ceph_decode_need(p, end, info->symlink_len, bad); 80 info->symlink = *p; 81 *p += info->symlink_len; 82 83 if (features & CEPH_FEATURE_DIRLAYOUTHASH) 84 ceph_decode_copy_safe(p, end, &info->dir_layout, 85 sizeof(info->dir_layout), bad); 86 else 87 memset(&info->dir_layout, 0, sizeof(info->dir_layout)); 88 89 ceph_decode_32_safe(p, end, info->xattr_len, bad); 90 ceph_decode_need(p, end, info->xattr_len, bad); 91 info->xattr_data = *p; 92 *p += info->xattr_len; 93 94 if (features & CEPH_FEATURE_MDS_INLINE_DATA) { 95 ceph_decode_64_safe(p, end, info->inline_version, bad); 96 ceph_decode_32_safe(p, end, info->inline_len, bad); 97 ceph_decode_need(p, end, info->inline_len, bad); 98 info->inline_data = *p; 99 *p += info->inline_len; 100 } else 101 info->inline_version = CEPH_INLINE_NONE; 102 103 info->pool_ns_len = 0; 104 info->pool_ns_data = NULL; 105 if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) { 106 ceph_decode_32_safe(p, end, info->pool_ns_len, bad); 107 if (info->pool_ns_len > 0) { 108 ceph_decode_need(p, end, info->pool_ns_len, bad); 109 info->pool_ns_data = *p; 110 *p += info->pool_ns_len; 111 } 112 } 113 114 return 0; 115 bad: 116 return err; 117 } 118 119 /* 120 * parse a normal reply, which may contain a (dir+)dentry and/or a 121 * target inode. 122 */ 123 static int parse_reply_info_trace(void **p, void *end, 124 struct ceph_mds_reply_info_parsed *info, 125 u64 features) 126 { 127 int err; 128 129 if (info->head->is_dentry) { 130 err = parse_reply_info_in(p, end, &info->diri, features); 131 if (err < 0) 132 goto out_bad; 133 134 if (unlikely(*p + sizeof(*info->dirfrag) > end)) 135 goto bad; 136 info->dirfrag = *p; 137 *p += sizeof(*info->dirfrag) + 138 sizeof(u32)*le32_to_cpu(info->dirfrag->ndist); 139 if (unlikely(*p > end)) 140 goto bad; 141 142 ceph_decode_32_safe(p, end, info->dname_len, bad); 143 ceph_decode_need(p, end, info->dname_len, bad); 144 info->dname = *p; 145 *p += info->dname_len; 146 info->dlease = *p; 147 *p += sizeof(*info->dlease); 148 } 149 150 if (info->head->is_target) { 151 err = parse_reply_info_in(p, end, &info->targeti, features); 152 if (err < 0) 153 goto out_bad; 154 } 155 156 if (unlikely(*p != end)) 157 goto bad; 158 return 0; 159 160 bad: 161 err = -EIO; 162 out_bad: 163 pr_err("problem parsing mds trace %d\n", err); 164 return err; 165 } 166 167 /* 168 * parse readdir results 169 */ 170 static int parse_reply_info_dir(void **p, void *end, 171 struct ceph_mds_reply_info_parsed *info, 172 u64 features) 173 { 174 u32 num, i = 0; 175 int err; 176 177 info->dir_dir = *p; 178 if (*p + sizeof(*info->dir_dir) > end) 179 goto bad; 180 *p += sizeof(*info->dir_dir) + 181 sizeof(u32)*le32_to_cpu(info->dir_dir->ndist); 182 if (*p > end) 183 goto bad; 184 185 ceph_decode_need(p, end, sizeof(num) + 2, bad); 186 num = ceph_decode_32(p); 187 { 188 u16 flags = ceph_decode_16(p); 189 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END); 190 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE); 191 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER); 192 } 193 if (num == 0) 194 goto done; 195 196 BUG_ON(!info->dir_entries); 197 if ((unsigned long)(info->dir_entries + num) > 198 (unsigned long)info->dir_entries + info->dir_buf_size) { 199 pr_err("dir contents are larger than expected\n"); 200 WARN_ON(1); 201 goto bad; 202 } 203 204 info->dir_nr = num; 205 while (num) { 206 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i; 207 /* dentry */ 208 ceph_decode_need(p, end, sizeof(u32)*2, bad); 209 rde->name_len = ceph_decode_32(p); 210 ceph_decode_need(p, end, rde->name_len, bad); 211 rde->name = *p; 212 *p += rde->name_len; 213 dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name); 214 rde->lease = *p; 215 *p += sizeof(struct ceph_mds_reply_lease); 216 217 /* inode */ 218 err = parse_reply_info_in(p, end, &rde->inode, features); 219 if (err < 0) 220 goto out_bad; 221 /* ceph_readdir_prepopulate() will update it */ 222 rde->offset = 0; 223 i++; 224 num--; 225 } 226 227 done: 228 if (*p != end) 229 goto bad; 230 return 0; 231 232 bad: 233 err = -EIO; 234 out_bad: 235 pr_err("problem parsing dir contents %d\n", err); 236 return err; 237 } 238 239 /* 240 * parse fcntl F_GETLK results 241 */ 242 static int parse_reply_info_filelock(void **p, void *end, 243 struct ceph_mds_reply_info_parsed *info, 244 u64 features) 245 { 246 if (*p + sizeof(*info->filelock_reply) > end) 247 goto bad; 248 249 info->filelock_reply = *p; 250 *p += sizeof(*info->filelock_reply); 251 252 if (unlikely(*p != end)) 253 goto bad; 254 return 0; 255 256 bad: 257 return -EIO; 258 } 259 260 /* 261 * parse create results 262 */ 263 static int parse_reply_info_create(void **p, void *end, 264 struct ceph_mds_reply_info_parsed *info, 265 u64 features) 266 { 267 if (features & CEPH_FEATURE_REPLY_CREATE_INODE) { 268 if (*p == end) { 269 info->has_create_ino = false; 270 } else { 271 info->has_create_ino = true; 272 info->ino = ceph_decode_64(p); 273 } 274 } 275 276 if (unlikely(*p != end)) 277 goto bad; 278 return 0; 279 280 bad: 281 return -EIO; 282 } 283 284 /* 285 * parse extra results 286 */ 287 static int parse_reply_info_extra(void **p, void *end, 288 struct ceph_mds_reply_info_parsed *info, 289 u64 features) 290 { 291 u32 op = le32_to_cpu(info->head->op); 292 293 if (op == CEPH_MDS_OP_GETFILELOCK) 294 return parse_reply_info_filelock(p, end, info, features); 295 else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP) 296 return parse_reply_info_dir(p, end, info, features); 297 else if (op == CEPH_MDS_OP_CREATE) 298 return parse_reply_info_create(p, end, info, features); 299 else 300 return -EIO; 301 } 302 303 /* 304 * parse entire mds reply 305 */ 306 static int parse_reply_info(struct ceph_msg *msg, 307 struct ceph_mds_reply_info_parsed *info, 308 u64 features) 309 { 310 void *p, *end; 311 u32 len; 312 int err; 313 314 info->head = msg->front.iov_base; 315 p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head); 316 end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); 317 318 /* trace */ 319 ceph_decode_32_safe(&p, end, len, bad); 320 if (len > 0) { 321 ceph_decode_need(&p, end, len, bad); 322 err = parse_reply_info_trace(&p, p+len, info, features); 323 if (err < 0) 324 goto out_bad; 325 } 326 327 /* extra */ 328 ceph_decode_32_safe(&p, end, len, bad); 329 if (len > 0) { 330 ceph_decode_need(&p, end, len, bad); 331 err = parse_reply_info_extra(&p, p+len, info, features); 332 if (err < 0) 333 goto out_bad; 334 } 335 336 /* snap blob */ 337 ceph_decode_32_safe(&p, end, len, bad); 338 info->snapblob_len = len; 339 info->snapblob = p; 340 p += len; 341 342 if (p != end) 343 goto bad; 344 return 0; 345 346 bad: 347 err = -EIO; 348 out_bad: 349 pr_err("mds parse_reply err %d\n", err); 350 return err; 351 } 352 353 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info) 354 { 355 if (!info->dir_entries) 356 return; 357 free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size)); 358 } 359 360 361 /* 362 * sessions 363 */ 364 const char *ceph_session_state_name(int s) 365 { 366 switch (s) { 367 case CEPH_MDS_SESSION_NEW: return "new"; 368 case CEPH_MDS_SESSION_OPENING: return "opening"; 369 case CEPH_MDS_SESSION_OPEN: return "open"; 370 case CEPH_MDS_SESSION_HUNG: return "hung"; 371 case CEPH_MDS_SESSION_CLOSING: return "closing"; 372 case CEPH_MDS_SESSION_RESTARTING: return "restarting"; 373 case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting"; 374 case CEPH_MDS_SESSION_REJECTED: return "rejected"; 375 default: return "???"; 376 } 377 } 378 379 static struct ceph_mds_session *get_session(struct ceph_mds_session *s) 380 { 381 if (atomic_inc_not_zero(&s->s_ref)) { 382 dout("mdsc get_session %p %d -> %d\n", s, 383 atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref)); 384 return s; 385 } else { 386 dout("mdsc get_session %p 0 -- FAIL", s); 387 return NULL; 388 } 389 } 390 391 void ceph_put_mds_session(struct ceph_mds_session *s) 392 { 393 dout("mdsc put_session %p %d -> %d\n", s, 394 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); 395 if (atomic_dec_and_test(&s->s_ref)) { 396 if (s->s_auth.authorizer) 397 ceph_auth_destroy_authorizer(s->s_auth.authorizer); 398 kfree(s); 399 } 400 } 401 402 /* 403 * called under mdsc->mutex 404 */ 405 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc, 406 int mds) 407 { 408 struct ceph_mds_session *session; 409 410 if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL) 411 return NULL; 412 session = mdsc->sessions[mds]; 413 dout("lookup_mds_session %p %d\n", session, 414 atomic_read(&session->s_ref)); 415 get_session(session); 416 return session; 417 } 418 419 static bool __have_session(struct ceph_mds_client *mdsc, int mds) 420 { 421 if (mds >= mdsc->max_sessions) 422 return false; 423 return mdsc->sessions[mds]; 424 } 425 426 static int __verify_registered_session(struct ceph_mds_client *mdsc, 427 struct ceph_mds_session *s) 428 { 429 if (s->s_mds >= mdsc->max_sessions || 430 mdsc->sessions[s->s_mds] != s) 431 return -ENOENT; 432 return 0; 433 } 434 435 /* 436 * create+register a new session for given mds. 437 * called under mdsc->mutex. 438 */ 439 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, 440 int mds) 441 { 442 struct ceph_mds_session *s; 443 444 if (mds >= mdsc->mdsmap->m_max_mds) 445 return ERR_PTR(-EINVAL); 446 447 s = kzalloc(sizeof(*s), GFP_NOFS); 448 if (!s) 449 return ERR_PTR(-ENOMEM); 450 s->s_mdsc = mdsc; 451 s->s_mds = mds; 452 s->s_state = CEPH_MDS_SESSION_NEW; 453 s->s_ttl = 0; 454 s->s_seq = 0; 455 mutex_init(&s->s_mutex); 456 457 ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr); 458 459 spin_lock_init(&s->s_gen_ttl_lock); 460 s->s_cap_gen = 0; 461 s->s_cap_ttl = jiffies - 1; 462 463 spin_lock_init(&s->s_cap_lock); 464 s->s_renew_requested = 0; 465 s->s_renew_seq = 0; 466 INIT_LIST_HEAD(&s->s_caps); 467 s->s_nr_caps = 0; 468 s->s_trim_caps = 0; 469 atomic_set(&s->s_ref, 1); 470 INIT_LIST_HEAD(&s->s_waiting); 471 INIT_LIST_HEAD(&s->s_unsafe); 472 s->s_num_cap_releases = 0; 473 s->s_cap_reconnect = 0; 474 s->s_cap_iterator = NULL; 475 INIT_LIST_HEAD(&s->s_cap_releases); 476 INIT_LIST_HEAD(&s->s_cap_flushing); 477 478 dout("register_session mds%d\n", mds); 479 if (mds >= mdsc->max_sessions) { 480 int newmax = 1 << get_count_order(mds+1); 481 struct ceph_mds_session **sa; 482 483 dout("register_session realloc to %d\n", newmax); 484 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS); 485 if (sa == NULL) 486 goto fail_realloc; 487 if (mdsc->sessions) { 488 memcpy(sa, mdsc->sessions, 489 mdsc->max_sessions * sizeof(void *)); 490 kfree(mdsc->sessions); 491 } 492 mdsc->sessions = sa; 493 mdsc->max_sessions = newmax; 494 } 495 mdsc->sessions[mds] = s; 496 atomic_inc(&mdsc->num_sessions); 497 atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */ 498 499 ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds, 500 ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); 501 502 return s; 503 504 fail_realloc: 505 kfree(s); 506 return ERR_PTR(-ENOMEM); 507 } 508 509 /* 510 * called under mdsc->mutex 511 */ 512 static void __unregister_session(struct ceph_mds_client *mdsc, 513 struct ceph_mds_session *s) 514 { 515 dout("__unregister_session mds%d %p\n", s->s_mds, s); 516 BUG_ON(mdsc->sessions[s->s_mds] != s); 517 mdsc->sessions[s->s_mds] = NULL; 518 ceph_con_close(&s->s_con); 519 ceph_put_mds_session(s); 520 atomic_dec(&mdsc->num_sessions); 521 } 522 523 /* 524 * drop session refs in request. 525 * 526 * should be last request ref, or hold mdsc->mutex 527 */ 528 static void put_request_session(struct ceph_mds_request *req) 529 { 530 if (req->r_session) { 531 ceph_put_mds_session(req->r_session); 532 req->r_session = NULL; 533 } 534 } 535 536 void ceph_mdsc_release_request(struct kref *kref) 537 { 538 struct ceph_mds_request *req = container_of(kref, 539 struct ceph_mds_request, 540 r_kref); 541 destroy_reply_info(&req->r_reply_info); 542 if (req->r_request) 543 ceph_msg_put(req->r_request); 544 if (req->r_reply) 545 ceph_msg_put(req->r_reply); 546 if (req->r_inode) { 547 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); 548 iput(req->r_inode); 549 } 550 if (req->r_parent) 551 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); 552 iput(req->r_target_inode); 553 if (req->r_dentry) 554 dput(req->r_dentry); 555 if (req->r_old_dentry) 556 dput(req->r_old_dentry); 557 if (req->r_old_dentry_dir) { 558 /* 559 * track (and drop pins for) r_old_dentry_dir 560 * separately, since r_old_dentry's d_parent may have 561 * changed between the dir mutex being dropped and 562 * this request being freed. 563 */ 564 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir), 565 CEPH_CAP_PIN); 566 iput(req->r_old_dentry_dir); 567 } 568 kfree(req->r_path1); 569 kfree(req->r_path2); 570 if (req->r_pagelist) 571 ceph_pagelist_release(req->r_pagelist); 572 put_request_session(req); 573 ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); 574 kfree(req); 575 } 576 577 DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node) 578 579 /* 580 * lookup session, bump ref if found. 581 * 582 * called under mdsc->mutex. 583 */ 584 static struct ceph_mds_request * 585 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid) 586 { 587 struct ceph_mds_request *req; 588 589 req = lookup_request(&mdsc->request_tree, tid); 590 if (req) 591 ceph_mdsc_get_request(req); 592 593 return req; 594 } 595 596 /* 597 * Register an in-flight request, and assign a tid. Link to directory 598 * are modifying (if any). 599 * 600 * Called under mdsc->mutex. 601 */ 602 static void __register_request(struct ceph_mds_client *mdsc, 603 struct ceph_mds_request *req, 604 struct inode *dir) 605 { 606 req->r_tid = ++mdsc->last_tid; 607 if (req->r_num_caps) 608 ceph_reserve_caps(mdsc, &req->r_caps_reservation, 609 req->r_num_caps); 610 dout("__register_request %p tid %lld\n", req, req->r_tid); 611 ceph_mdsc_get_request(req); 612 insert_request(&mdsc->request_tree, req); 613 614 req->r_uid = current_fsuid(); 615 req->r_gid = current_fsgid(); 616 617 if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK) 618 mdsc->oldest_tid = req->r_tid; 619 620 if (dir) { 621 ihold(dir); 622 req->r_unsafe_dir = dir; 623 } 624 } 625 626 static void __unregister_request(struct ceph_mds_client *mdsc, 627 struct ceph_mds_request *req) 628 { 629 dout("__unregister_request %p tid %lld\n", req, req->r_tid); 630 631 /* Never leave an unregistered request on an unsafe list! */ 632 list_del_init(&req->r_unsafe_item); 633 634 if (req->r_tid == mdsc->oldest_tid) { 635 struct rb_node *p = rb_next(&req->r_node); 636 mdsc->oldest_tid = 0; 637 while (p) { 638 struct ceph_mds_request *next_req = 639 rb_entry(p, struct ceph_mds_request, r_node); 640 if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) { 641 mdsc->oldest_tid = next_req->r_tid; 642 break; 643 } 644 p = rb_next(p); 645 } 646 } 647 648 erase_request(&mdsc->request_tree, req); 649 650 if (req->r_unsafe_dir && 651 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { 652 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); 653 spin_lock(&ci->i_unsafe_lock); 654 list_del_init(&req->r_unsafe_dir_item); 655 spin_unlock(&ci->i_unsafe_lock); 656 } 657 if (req->r_target_inode && 658 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { 659 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); 660 spin_lock(&ci->i_unsafe_lock); 661 list_del_init(&req->r_unsafe_target_item); 662 spin_unlock(&ci->i_unsafe_lock); 663 } 664 665 if (req->r_unsafe_dir) { 666 iput(req->r_unsafe_dir); 667 req->r_unsafe_dir = NULL; 668 } 669 670 complete_all(&req->r_safe_completion); 671 672 ceph_mdsc_put_request(req); 673 } 674 675 /* 676 * Walk back up the dentry tree until we hit a dentry representing a 677 * non-snapshot inode. We do this using the rcu_read_lock (which must be held 678 * when calling this) to ensure that the objects won't disappear while we're 679 * working with them. Once we hit a candidate dentry, we attempt to take a 680 * reference to it, and return that as the result. 681 */ 682 static struct inode *get_nonsnap_parent(struct dentry *dentry) 683 { 684 struct inode *inode = NULL; 685 686 while (dentry && !IS_ROOT(dentry)) { 687 inode = d_inode_rcu(dentry); 688 if (!inode || ceph_snap(inode) == CEPH_NOSNAP) 689 break; 690 dentry = dentry->d_parent; 691 } 692 if (inode) 693 inode = igrab(inode); 694 return inode; 695 } 696 697 /* 698 * Choose mds to send request to next. If there is a hint set in the 699 * request (e.g., due to a prior forward hint from the mds), use that. 700 * Otherwise, consult frag tree and/or caps to identify the 701 * appropriate mds. If all else fails, choose randomly. 702 * 703 * Called under mdsc->mutex. 704 */ 705 static int __choose_mds(struct ceph_mds_client *mdsc, 706 struct ceph_mds_request *req) 707 { 708 struct inode *inode; 709 struct ceph_inode_info *ci; 710 struct ceph_cap *cap; 711 int mode = req->r_direct_mode; 712 int mds = -1; 713 u32 hash = req->r_direct_hash; 714 bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags); 715 716 /* 717 * is there a specific mds we should try? ignore hint if we have 718 * no session and the mds is not up (active or recovering). 719 */ 720 if (req->r_resend_mds >= 0 && 721 (__have_session(mdsc, req->r_resend_mds) || 722 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { 723 dout("choose_mds using resend_mds mds%d\n", 724 req->r_resend_mds); 725 return req->r_resend_mds; 726 } 727 728 if (mode == USE_RANDOM_MDS) 729 goto random; 730 731 inode = NULL; 732 if (req->r_inode) { 733 inode = req->r_inode; 734 ihold(inode); 735 } else if (req->r_dentry) { 736 /* ignore race with rename; old or new d_parent is okay */ 737 struct dentry *parent; 738 struct inode *dir; 739 740 rcu_read_lock(); 741 parent = req->r_dentry->d_parent; 742 dir = req->r_parent ? : d_inode_rcu(parent); 743 744 if (!dir || dir->i_sb != mdsc->fsc->sb) { 745 /* not this fs or parent went negative */ 746 inode = d_inode(req->r_dentry); 747 if (inode) 748 ihold(inode); 749 } else if (ceph_snap(dir) != CEPH_NOSNAP) { 750 /* direct snapped/virtual snapdir requests 751 * based on parent dir inode */ 752 inode = get_nonsnap_parent(parent); 753 dout("__choose_mds using nonsnap parent %p\n", inode); 754 } else { 755 /* dentry target */ 756 inode = d_inode(req->r_dentry); 757 if (!inode || mode == USE_AUTH_MDS) { 758 /* dir + name */ 759 inode = igrab(dir); 760 hash = ceph_dentry_hash(dir, req->r_dentry); 761 is_hash = true; 762 } else { 763 ihold(inode); 764 } 765 } 766 rcu_read_unlock(); 767 } 768 769 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash, 770 (int)hash, mode); 771 if (!inode) 772 goto random; 773 ci = ceph_inode(inode); 774 775 if (is_hash && S_ISDIR(inode->i_mode)) { 776 struct ceph_inode_frag frag; 777 int found; 778 779 ceph_choose_frag(ci, hash, &frag, &found); 780 if (found) { 781 if (mode == USE_ANY_MDS && frag.ndist > 0) { 782 u8 r; 783 784 /* choose a random replica */ 785 get_random_bytes(&r, 1); 786 r %= frag.ndist; 787 mds = frag.dist[r]; 788 dout("choose_mds %p %llx.%llx " 789 "frag %u mds%d (%d/%d)\n", 790 inode, ceph_vinop(inode), 791 frag.frag, mds, 792 (int)r, frag.ndist); 793 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= 794 CEPH_MDS_STATE_ACTIVE) 795 goto out; 796 } 797 798 /* since this file/dir wasn't known to be 799 * replicated, then we want to look for the 800 * authoritative mds. */ 801 mode = USE_AUTH_MDS; 802 if (frag.mds >= 0) { 803 /* choose auth mds */ 804 mds = frag.mds; 805 dout("choose_mds %p %llx.%llx " 806 "frag %u mds%d (auth)\n", 807 inode, ceph_vinop(inode), frag.frag, mds); 808 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= 809 CEPH_MDS_STATE_ACTIVE) 810 goto out; 811 } 812 } 813 } 814 815 spin_lock(&ci->i_ceph_lock); 816 cap = NULL; 817 if (mode == USE_AUTH_MDS) 818 cap = ci->i_auth_cap; 819 if (!cap && !RB_EMPTY_ROOT(&ci->i_caps)) 820 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); 821 if (!cap) { 822 spin_unlock(&ci->i_ceph_lock); 823 iput(inode); 824 goto random; 825 } 826 mds = cap->session->s_mds; 827 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n", 828 inode, ceph_vinop(inode), mds, 829 cap == ci->i_auth_cap ? "auth " : "", cap); 830 spin_unlock(&ci->i_ceph_lock); 831 out: 832 iput(inode); 833 return mds; 834 835 random: 836 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); 837 dout("choose_mds chose random mds%d\n", mds); 838 return mds; 839 } 840 841 842 /* 843 * session messages 844 */ 845 static struct ceph_msg *create_session_msg(u32 op, u64 seq) 846 { 847 struct ceph_msg *msg; 848 struct ceph_mds_session_head *h; 849 850 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS, 851 false); 852 if (!msg) { 853 pr_err("create_session_msg ENOMEM creating msg\n"); 854 return NULL; 855 } 856 h = msg->front.iov_base; 857 h->op = cpu_to_le32(op); 858 h->seq = cpu_to_le64(seq); 859 860 return msg; 861 } 862 863 /* 864 * session message, specialization for CEPH_SESSION_REQUEST_OPEN 865 * to include additional client metadata fields. 866 */ 867 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq) 868 { 869 struct ceph_msg *msg; 870 struct ceph_mds_session_head *h; 871 int i = -1; 872 int metadata_bytes = 0; 873 int metadata_key_count = 0; 874 struct ceph_options *opt = mdsc->fsc->client->options; 875 struct ceph_mount_options *fsopt = mdsc->fsc->mount_options; 876 void *p; 877 878 const char* metadata[][2] = { 879 {"hostname", utsname()->nodename}, 880 {"kernel_version", utsname()->release}, 881 {"entity_id", opt->name ? : ""}, 882 {"root", fsopt->server_path ? : "/"}, 883 {NULL, NULL} 884 }; 885 886 /* Calculate serialized length of metadata */ 887 metadata_bytes = 4; /* map length */ 888 for (i = 0; metadata[i][0] != NULL; ++i) { 889 metadata_bytes += 8 + strlen(metadata[i][0]) + 890 strlen(metadata[i][1]); 891 metadata_key_count++; 892 } 893 894 /* Allocate the message */ 895 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes, 896 GFP_NOFS, false); 897 if (!msg) { 898 pr_err("create_session_msg ENOMEM creating msg\n"); 899 return NULL; 900 } 901 h = msg->front.iov_base; 902 h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN); 903 h->seq = cpu_to_le64(seq); 904 905 /* 906 * Serialize client metadata into waiting buffer space, using 907 * the format that userspace expects for map<string, string> 908 * 909 * ClientSession messages with metadata are v2 910 */ 911 msg->hdr.version = cpu_to_le16(2); 912 msg->hdr.compat_version = cpu_to_le16(1); 913 914 /* The write pointer, following the session_head structure */ 915 p = msg->front.iov_base + sizeof(*h); 916 917 /* Number of entries in the map */ 918 ceph_encode_32(&p, metadata_key_count); 919 920 /* Two length-prefixed strings for each entry in the map */ 921 for (i = 0; metadata[i][0] != NULL; ++i) { 922 size_t const key_len = strlen(metadata[i][0]); 923 size_t const val_len = strlen(metadata[i][1]); 924 925 ceph_encode_32(&p, key_len); 926 memcpy(p, metadata[i][0], key_len); 927 p += key_len; 928 ceph_encode_32(&p, val_len); 929 memcpy(p, metadata[i][1], val_len); 930 p += val_len; 931 } 932 933 return msg; 934 } 935 936 /* 937 * send session open request. 938 * 939 * called under mdsc->mutex 940 */ 941 static int __open_session(struct ceph_mds_client *mdsc, 942 struct ceph_mds_session *session) 943 { 944 struct ceph_msg *msg; 945 int mstate; 946 int mds = session->s_mds; 947 948 /* wait for mds to go active? */ 949 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); 950 dout("open_session to mds%d (%s)\n", mds, 951 ceph_mds_state_name(mstate)); 952 session->s_state = CEPH_MDS_SESSION_OPENING; 953 session->s_renew_requested = jiffies; 954 955 /* send connect message */ 956 msg = create_session_open_msg(mdsc, session->s_seq); 957 if (!msg) 958 return -ENOMEM; 959 ceph_con_send(&session->s_con, msg); 960 return 0; 961 } 962 963 /* 964 * open sessions for any export targets for the given mds 965 * 966 * called under mdsc->mutex 967 */ 968 static struct ceph_mds_session * 969 __open_export_target_session(struct ceph_mds_client *mdsc, int target) 970 { 971 struct ceph_mds_session *session; 972 973 session = __ceph_lookup_mds_session(mdsc, target); 974 if (!session) { 975 session = register_session(mdsc, target); 976 if (IS_ERR(session)) 977 return session; 978 } 979 if (session->s_state == CEPH_MDS_SESSION_NEW || 980 session->s_state == CEPH_MDS_SESSION_CLOSING) 981 __open_session(mdsc, session); 982 983 return session; 984 } 985 986 struct ceph_mds_session * 987 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target) 988 { 989 struct ceph_mds_session *session; 990 991 dout("open_export_target_session to mds%d\n", target); 992 993 mutex_lock(&mdsc->mutex); 994 session = __open_export_target_session(mdsc, target); 995 mutex_unlock(&mdsc->mutex); 996 997 return session; 998 } 999 1000 static void __open_export_target_sessions(struct ceph_mds_client *mdsc, 1001 struct ceph_mds_session *session) 1002 { 1003 struct ceph_mds_info *mi; 1004 struct ceph_mds_session *ts; 1005 int i, mds = session->s_mds; 1006 1007 if (mds >= mdsc->mdsmap->m_max_mds) 1008 return; 1009 1010 mi = &mdsc->mdsmap->m_info[mds]; 1011 dout("open_export_target_sessions for mds%d (%d targets)\n", 1012 session->s_mds, mi->num_export_targets); 1013 1014 for (i = 0; i < mi->num_export_targets; i++) { 1015 ts = __open_export_target_session(mdsc, mi->export_targets[i]); 1016 if (!IS_ERR(ts)) 1017 ceph_put_mds_session(ts); 1018 } 1019 } 1020 1021 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, 1022 struct ceph_mds_session *session) 1023 { 1024 mutex_lock(&mdsc->mutex); 1025 __open_export_target_sessions(mdsc, session); 1026 mutex_unlock(&mdsc->mutex); 1027 } 1028 1029 /* 1030 * session caps 1031 */ 1032 1033 /* caller holds s_cap_lock, we drop it */ 1034 static void cleanup_cap_releases(struct ceph_mds_client *mdsc, 1035 struct ceph_mds_session *session) 1036 __releases(session->s_cap_lock) 1037 { 1038 LIST_HEAD(tmp_list); 1039 list_splice_init(&session->s_cap_releases, &tmp_list); 1040 session->s_num_cap_releases = 0; 1041 spin_unlock(&session->s_cap_lock); 1042 1043 dout("cleanup_cap_releases mds%d\n", session->s_mds); 1044 while (!list_empty(&tmp_list)) { 1045 struct ceph_cap *cap; 1046 /* zero out the in-progress message */ 1047 cap = list_first_entry(&tmp_list, 1048 struct ceph_cap, session_caps); 1049 list_del(&cap->session_caps); 1050 ceph_put_cap(mdsc, cap); 1051 } 1052 } 1053 1054 static void cleanup_session_requests(struct ceph_mds_client *mdsc, 1055 struct ceph_mds_session *session) 1056 { 1057 struct ceph_mds_request *req; 1058 struct rb_node *p; 1059 1060 dout("cleanup_session_requests mds%d\n", session->s_mds); 1061 mutex_lock(&mdsc->mutex); 1062 while (!list_empty(&session->s_unsafe)) { 1063 req = list_first_entry(&session->s_unsafe, 1064 struct ceph_mds_request, r_unsafe_item); 1065 pr_warn_ratelimited(" dropping unsafe request %llu\n", 1066 req->r_tid); 1067 __unregister_request(mdsc, req); 1068 } 1069 /* zero r_attempts, so kick_requests() will re-send requests */ 1070 p = rb_first(&mdsc->request_tree); 1071 while (p) { 1072 req = rb_entry(p, struct ceph_mds_request, r_node); 1073 p = rb_next(p); 1074 if (req->r_session && 1075 req->r_session->s_mds == session->s_mds) 1076 req->r_attempts = 0; 1077 } 1078 mutex_unlock(&mdsc->mutex); 1079 } 1080 1081 /* 1082 * Helper to safely iterate over all caps associated with a session, with 1083 * special care taken to handle a racing __ceph_remove_cap(). 1084 * 1085 * Caller must hold session s_mutex. 1086 */ 1087 static int iterate_session_caps(struct ceph_mds_session *session, 1088 int (*cb)(struct inode *, struct ceph_cap *, 1089 void *), void *arg) 1090 { 1091 struct list_head *p; 1092 struct ceph_cap *cap; 1093 struct inode *inode, *last_inode = NULL; 1094 struct ceph_cap *old_cap = NULL; 1095 int ret; 1096 1097 dout("iterate_session_caps %p mds%d\n", session, session->s_mds); 1098 spin_lock(&session->s_cap_lock); 1099 p = session->s_caps.next; 1100 while (p != &session->s_caps) { 1101 cap = list_entry(p, struct ceph_cap, session_caps); 1102 inode = igrab(&cap->ci->vfs_inode); 1103 if (!inode) { 1104 p = p->next; 1105 continue; 1106 } 1107 session->s_cap_iterator = cap; 1108 spin_unlock(&session->s_cap_lock); 1109 1110 if (last_inode) { 1111 iput(last_inode); 1112 last_inode = NULL; 1113 } 1114 if (old_cap) { 1115 ceph_put_cap(session->s_mdsc, old_cap); 1116 old_cap = NULL; 1117 } 1118 1119 ret = cb(inode, cap, arg); 1120 last_inode = inode; 1121 1122 spin_lock(&session->s_cap_lock); 1123 p = p->next; 1124 if (cap->ci == NULL) { 1125 dout("iterate_session_caps finishing cap %p removal\n", 1126 cap); 1127 BUG_ON(cap->session != session); 1128 cap->session = NULL; 1129 list_del_init(&cap->session_caps); 1130 session->s_nr_caps--; 1131 if (cap->queue_release) { 1132 list_add_tail(&cap->session_caps, 1133 &session->s_cap_releases); 1134 session->s_num_cap_releases++; 1135 } else { 1136 old_cap = cap; /* put_cap it w/o locks held */ 1137 } 1138 } 1139 if (ret < 0) 1140 goto out; 1141 } 1142 ret = 0; 1143 out: 1144 session->s_cap_iterator = NULL; 1145 spin_unlock(&session->s_cap_lock); 1146 1147 iput(last_inode); 1148 if (old_cap) 1149 ceph_put_cap(session->s_mdsc, old_cap); 1150 1151 return ret; 1152 } 1153 1154 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, 1155 void *arg) 1156 { 1157 struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg; 1158 struct ceph_inode_info *ci = ceph_inode(inode); 1159 LIST_HEAD(to_remove); 1160 bool drop = false; 1161 bool invalidate = false; 1162 1163 dout("removing cap %p, ci is %p, inode is %p\n", 1164 cap, ci, &ci->vfs_inode); 1165 spin_lock(&ci->i_ceph_lock); 1166 __ceph_remove_cap(cap, false); 1167 if (!ci->i_auth_cap) { 1168 struct ceph_cap_flush *cf; 1169 struct ceph_mds_client *mdsc = fsc->mdsc; 1170 1171 ci->i_ceph_flags |= CEPH_I_CAP_DROPPED; 1172 1173 if (ci->i_wrbuffer_ref > 0 && 1174 READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) 1175 invalidate = true; 1176 1177 while (!list_empty(&ci->i_cap_flush_list)) { 1178 cf = list_first_entry(&ci->i_cap_flush_list, 1179 struct ceph_cap_flush, i_list); 1180 list_move(&cf->i_list, &to_remove); 1181 } 1182 1183 spin_lock(&mdsc->cap_dirty_lock); 1184 1185 list_for_each_entry(cf, &to_remove, i_list) 1186 list_del(&cf->g_list); 1187 1188 if (!list_empty(&ci->i_dirty_item)) { 1189 pr_warn_ratelimited( 1190 " dropping dirty %s state for %p %lld\n", 1191 ceph_cap_string(ci->i_dirty_caps), 1192 inode, ceph_ino(inode)); 1193 ci->i_dirty_caps = 0; 1194 list_del_init(&ci->i_dirty_item); 1195 drop = true; 1196 } 1197 if (!list_empty(&ci->i_flushing_item)) { 1198 pr_warn_ratelimited( 1199 " dropping dirty+flushing %s state for %p %lld\n", 1200 ceph_cap_string(ci->i_flushing_caps), 1201 inode, ceph_ino(inode)); 1202 ci->i_flushing_caps = 0; 1203 list_del_init(&ci->i_flushing_item); 1204 mdsc->num_cap_flushing--; 1205 drop = true; 1206 } 1207 spin_unlock(&mdsc->cap_dirty_lock); 1208 1209 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) { 1210 list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove); 1211 ci->i_prealloc_cap_flush = NULL; 1212 } 1213 } 1214 spin_unlock(&ci->i_ceph_lock); 1215 while (!list_empty(&to_remove)) { 1216 struct ceph_cap_flush *cf; 1217 cf = list_first_entry(&to_remove, 1218 struct ceph_cap_flush, i_list); 1219 list_del(&cf->i_list); 1220 ceph_free_cap_flush(cf); 1221 } 1222 1223 wake_up_all(&ci->i_cap_wq); 1224 if (invalidate) 1225 ceph_queue_invalidate(inode); 1226 if (drop) 1227 iput(inode); 1228 return 0; 1229 } 1230 1231 /* 1232 * caller must hold session s_mutex 1233 */ 1234 static void remove_session_caps(struct ceph_mds_session *session) 1235 { 1236 struct ceph_fs_client *fsc = session->s_mdsc->fsc; 1237 struct super_block *sb = fsc->sb; 1238 dout("remove_session_caps on %p\n", session); 1239 iterate_session_caps(session, remove_session_caps_cb, fsc); 1240 1241 wake_up_all(&fsc->mdsc->cap_flushing_wq); 1242 1243 spin_lock(&session->s_cap_lock); 1244 if (session->s_nr_caps > 0) { 1245 struct inode *inode; 1246 struct ceph_cap *cap, *prev = NULL; 1247 struct ceph_vino vino; 1248 /* 1249 * iterate_session_caps() skips inodes that are being 1250 * deleted, we need to wait until deletions are complete. 1251 * __wait_on_freeing_inode() is designed for the job, 1252 * but it is not exported, so use lookup inode function 1253 * to access it. 1254 */ 1255 while (!list_empty(&session->s_caps)) { 1256 cap = list_entry(session->s_caps.next, 1257 struct ceph_cap, session_caps); 1258 if (cap == prev) 1259 break; 1260 prev = cap; 1261 vino = cap->ci->i_vino; 1262 spin_unlock(&session->s_cap_lock); 1263 1264 inode = ceph_find_inode(sb, vino); 1265 iput(inode); 1266 1267 spin_lock(&session->s_cap_lock); 1268 } 1269 } 1270 1271 // drop cap expires and unlock s_cap_lock 1272 cleanup_cap_releases(session->s_mdsc, session); 1273 1274 BUG_ON(session->s_nr_caps > 0); 1275 BUG_ON(!list_empty(&session->s_cap_flushing)); 1276 } 1277 1278 /* 1279 * wake up any threads waiting on this session's caps. if the cap is 1280 * old (didn't get renewed on the client reconnect), remove it now. 1281 * 1282 * caller must hold s_mutex. 1283 */ 1284 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, 1285 void *arg) 1286 { 1287 struct ceph_inode_info *ci = ceph_inode(inode); 1288 1289 if (arg) { 1290 spin_lock(&ci->i_ceph_lock); 1291 ci->i_wanted_max_size = 0; 1292 ci->i_requested_max_size = 0; 1293 spin_unlock(&ci->i_ceph_lock); 1294 } 1295 wake_up_all(&ci->i_cap_wq); 1296 return 0; 1297 } 1298 1299 static void wake_up_session_caps(struct ceph_mds_session *session, 1300 int reconnect) 1301 { 1302 dout("wake_up_session_caps %p mds%d\n", session, session->s_mds); 1303 iterate_session_caps(session, wake_up_session_cb, 1304 (void *)(unsigned long)reconnect); 1305 } 1306 1307 /* 1308 * Send periodic message to MDS renewing all currently held caps. The 1309 * ack will reset the expiration for all caps from this session. 1310 * 1311 * caller holds s_mutex 1312 */ 1313 static int send_renew_caps(struct ceph_mds_client *mdsc, 1314 struct ceph_mds_session *session) 1315 { 1316 struct ceph_msg *msg; 1317 int state; 1318 1319 if (time_after_eq(jiffies, session->s_cap_ttl) && 1320 time_after_eq(session->s_cap_ttl, session->s_renew_requested)) 1321 pr_info("mds%d caps stale\n", session->s_mds); 1322 session->s_renew_requested = jiffies; 1323 1324 /* do not try to renew caps until a recovering mds has reconnected 1325 * with its clients. */ 1326 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); 1327 if (state < CEPH_MDS_STATE_RECONNECT) { 1328 dout("send_renew_caps ignoring mds%d (%s)\n", 1329 session->s_mds, ceph_mds_state_name(state)); 1330 return 0; 1331 } 1332 1333 dout("send_renew_caps to mds%d (%s)\n", session->s_mds, 1334 ceph_mds_state_name(state)); 1335 msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, 1336 ++session->s_renew_seq); 1337 if (!msg) 1338 return -ENOMEM; 1339 ceph_con_send(&session->s_con, msg); 1340 return 0; 1341 } 1342 1343 static int send_flushmsg_ack(struct ceph_mds_client *mdsc, 1344 struct ceph_mds_session *session, u64 seq) 1345 { 1346 struct ceph_msg *msg; 1347 1348 dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n", 1349 session->s_mds, ceph_session_state_name(session->s_state), seq); 1350 msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq); 1351 if (!msg) 1352 return -ENOMEM; 1353 ceph_con_send(&session->s_con, msg); 1354 return 0; 1355 } 1356 1357 1358 /* 1359 * Note new cap ttl, and any transition from stale -> not stale (fresh?). 1360 * 1361 * Called under session->s_mutex 1362 */ 1363 static void renewed_caps(struct ceph_mds_client *mdsc, 1364 struct ceph_mds_session *session, int is_renew) 1365 { 1366 int was_stale; 1367 int wake = 0; 1368 1369 spin_lock(&session->s_cap_lock); 1370 was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl); 1371 1372 session->s_cap_ttl = session->s_renew_requested + 1373 mdsc->mdsmap->m_session_timeout*HZ; 1374 1375 if (was_stale) { 1376 if (time_before(jiffies, session->s_cap_ttl)) { 1377 pr_info("mds%d caps renewed\n", session->s_mds); 1378 wake = 1; 1379 } else { 1380 pr_info("mds%d caps still stale\n", session->s_mds); 1381 } 1382 } 1383 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n", 1384 session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh", 1385 time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh"); 1386 spin_unlock(&session->s_cap_lock); 1387 1388 if (wake) 1389 wake_up_session_caps(session, 0); 1390 } 1391 1392 /* 1393 * send a session close request 1394 */ 1395 static int request_close_session(struct ceph_mds_client *mdsc, 1396 struct ceph_mds_session *session) 1397 { 1398 struct ceph_msg *msg; 1399 1400 dout("request_close_session mds%d state %s seq %lld\n", 1401 session->s_mds, ceph_session_state_name(session->s_state), 1402 session->s_seq); 1403 msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); 1404 if (!msg) 1405 return -ENOMEM; 1406 ceph_con_send(&session->s_con, msg); 1407 return 1; 1408 } 1409 1410 /* 1411 * Called with s_mutex held. 1412 */ 1413 static int __close_session(struct ceph_mds_client *mdsc, 1414 struct ceph_mds_session *session) 1415 { 1416 if (session->s_state >= CEPH_MDS_SESSION_CLOSING) 1417 return 0; 1418 session->s_state = CEPH_MDS_SESSION_CLOSING; 1419 return request_close_session(mdsc, session); 1420 } 1421 1422 /* 1423 * Trim old(er) caps. 1424 * 1425 * Because we can't cache an inode without one or more caps, we do 1426 * this indirectly: if a cap is unused, we prune its aliases, at which 1427 * point the inode will hopefully get dropped to. 1428 * 1429 * Yes, this is a bit sloppy. Our only real goal here is to respond to 1430 * memory pressure from the MDS, though, so it needn't be perfect. 1431 */ 1432 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) 1433 { 1434 struct ceph_mds_session *session = arg; 1435 struct ceph_inode_info *ci = ceph_inode(inode); 1436 int used, wanted, oissued, mine; 1437 1438 if (session->s_trim_caps <= 0) 1439 return -1; 1440 1441 spin_lock(&ci->i_ceph_lock); 1442 mine = cap->issued | cap->implemented; 1443 used = __ceph_caps_used(ci); 1444 wanted = __ceph_caps_file_wanted(ci); 1445 oissued = __ceph_caps_issued_other(ci, cap); 1446 1447 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n", 1448 inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued), 1449 ceph_cap_string(used), ceph_cap_string(wanted)); 1450 if (cap == ci->i_auth_cap) { 1451 if (ci->i_dirty_caps || ci->i_flushing_caps || 1452 !list_empty(&ci->i_cap_snaps)) 1453 goto out; 1454 if ((used | wanted) & CEPH_CAP_ANY_WR) 1455 goto out; 1456 } 1457 /* The inode has cached pages, but it's no longer used. 1458 * we can safely drop it */ 1459 if (wanted == 0 && used == CEPH_CAP_FILE_CACHE && 1460 !(oissued & CEPH_CAP_FILE_CACHE)) { 1461 used = 0; 1462 oissued = 0; 1463 } 1464 if ((used | wanted) & ~oissued & mine) 1465 goto out; /* we need these caps */ 1466 1467 session->s_trim_caps--; 1468 if (oissued) { 1469 /* we aren't the only cap.. just remove us */ 1470 __ceph_remove_cap(cap, true); 1471 } else { 1472 /* try dropping referring dentries */ 1473 spin_unlock(&ci->i_ceph_lock); 1474 d_prune_aliases(inode); 1475 dout("trim_caps_cb %p cap %p pruned, count now %d\n", 1476 inode, cap, atomic_read(&inode->i_count)); 1477 return 0; 1478 } 1479 1480 out: 1481 spin_unlock(&ci->i_ceph_lock); 1482 return 0; 1483 } 1484 1485 /* 1486 * Trim session cap count down to some max number. 1487 */ 1488 static int trim_caps(struct ceph_mds_client *mdsc, 1489 struct ceph_mds_session *session, 1490 int max_caps) 1491 { 1492 int trim_caps = session->s_nr_caps - max_caps; 1493 1494 dout("trim_caps mds%d start: %d / %d, trim %d\n", 1495 session->s_mds, session->s_nr_caps, max_caps, trim_caps); 1496 if (trim_caps > 0) { 1497 session->s_trim_caps = trim_caps; 1498 iterate_session_caps(session, trim_caps_cb, session); 1499 dout("trim_caps mds%d done: %d / %d, trimmed %d\n", 1500 session->s_mds, session->s_nr_caps, max_caps, 1501 trim_caps - session->s_trim_caps); 1502 session->s_trim_caps = 0; 1503 } 1504 1505 ceph_send_cap_releases(mdsc, session); 1506 return 0; 1507 } 1508 1509 static int check_caps_flush(struct ceph_mds_client *mdsc, 1510 u64 want_flush_tid) 1511 { 1512 int ret = 1; 1513 1514 spin_lock(&mdsc->cap_dirty_lock); 1515 if (!list_empty(&mdsc->cap_flush_list)) { 1516 struct ceph_cap_flush *cf = 1517 list_first_entry(&mdsc->cap_flush_list, 1518 struct ceph_cap_flush, g_list); 1519 if (cf->tid <= want_flush_tid) { 1520 dout("check_caps_flush still flushing tid " 1521 "%llu <= %llu\n", cf->tid, want_flush_tid); 1522 ret = 0; 1523 } 1524 } 1525 spin_unlock(&mdsc->cap_dirty_lock); 1526 return ret; 1527 } 1528 1529 /* 1530 * flush all dirty inode data to disk. 1531 * 1532 * returns true if we've flushed through want_flush_tid 1533 */ 1534 static void wait_caps_flush(struct ceph_mds_client *mdsc, 1535 u64 want_flush_tid) 1536 { 1537 dout("check_caps_flush want %llu\n", want_flush_tid); 1538 1539 wait_event(mdsc->cap_flushing_wq, 1540 check_caps_flush(mdsc, want_flush_tid)); 1541 1542 dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid); 1543 } 1544 1545 /* 1546 * called under s_mutex 1547 */ 1548 void ceph_send_cap_releases(struct ceph_mds_client *mdsc, 1549 struct ceph_mds_session *session) 1550 { 1551 struct ceph_msg *msg = NULL; 1552 struct ceph_mds_cap_release *head; 1553 struct ceph_mds_cap_item *item; 1554 struct ceph_cap *cap; 1555 LIST_HEAD(tmp_list); 1556 int num_cap_releases; 1557 1558 spin_lock(&session->s_cap_lock); 1559 again: 1560 list_splice_init(&session->s_cap_releases, &tmp_list); 1561 num_cap_releases = session->s_num_cap_releases; 1562 session->s_num_cap_releases = 0; 1563 spin_unlock(&session->s_cap_lock); 1564 1565 while (!list_empty(&tmp_list)) { 1566 if (!msg) { 1567 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, 1568 PAGE_SIZE, GFP_NOFS, false); 1569 if (!msg) 1570 goto out_err; 1571 head = msg->front.iov_base; 1572 head->num = cpu_to_le32(0); 1573 msg->front.iov_len = sizeof(*head); 1574 } 1575 cap = list_first_entry(&tmp_list, struct ceph_cap, 1576 session_caps); 1577 list_del(&cap->session_caps); 1578 num_cap_releases--; 1579 1580 head = msg->front.iov_base; 1581 le32_add_cpu(&head->num, 1); 1582 item = msg->front.iov_base + msg->front.iov_len; 1583 item->ino = cpu_to_le64(cap->cap_ino); 1584 item->cap_id = cpu_to_le64(cap->cap_id); 1585 item->migrate_seq = cpu_to_le32(cap->mseq); 1586 item->seq = cpu_to_le32(cap->issue_seq); 1587 msg->front.iov_len += sizeof(*item); 1588 1589 ceph_put_cap(mdsc, cap); 1590 1591 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { 1592 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 1593 dout("send_cap_releases mds%d %p\n", session->s_mds, msg); 1594 ceph_con_send(&session->s_con, msg); 1595 msg = NULL; 1596 } 1597 } 1598 1599 BUG_ON(num_cap_releases != 0); 1600 1601 spin_lock(&session->s_cap_lock); 1602 if (!list_empty(&session->s_cap_releases)) 1603 goto again; 1604 spin_unlock(&session->s_cap_lock); 1605 1606 if (msg) { 1607 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 1608 dout("send_cap_releases mds%d %p\n", session->s_mds, msg); 1609 ceph_con_send(&session->s_con, msg); 1610 } 1611 return; 1612 out_err: 1613 pr_err("send_cap_releases mds%d, failed to allocate message\n", 1614 session->s_mds); 1615 spin_lock(&session->s_cap_lock); 1616 list_splice(&tmp_list, &session->s_cap_releases); 1617 session->s_num_cap_releases += num_cap_releases; 1618 spin_unlock(&session->s_cap_lock); 1619 } 1620 1621 /* 1622 * requests 1623 */ 1624 1625 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req, 1626 struct inode *dir) 1627 { 1628 struct ceph_inode_info *ci = ceph_inode(dir); 1629 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1630 struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options; 1631 size_t size = sizeof(struct ceph_mds_reply_dir_entry); 1632 int order, num_entries; 1633 1634 spin_lock(&ci->i_ceph_lock); 1635 num_entries = ci->i_files + ci->i_subdirs; 1636 spin_unlock(&ci->i_ceph_lock); 1637 num_entries = max(num_entries, 1); 1638 num_entries = min(num_entries, opt->max_readdir); 1639 1640 order = get_order(size * num_entries); 1641 while (order >= 0) { 1642 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL | 1643 __GFP_NOWARN, 1644 order); 1645 if (rinfo->dir_entries) 1646 break; 1647 order--; 1648 } 1649 if (!rinfo->dir_entries) 1650 return -ENOMEM; 1651 1652 num_entries = (PAGE_SIZE << order) / size; 1653 num_entries = min(num_entries, opt->max_readdir); 1654 1655 rinfo->dir_buf_size = PAGE_SIZE << order; 1656 req->r_num_caps = num_entries + 1; 1657 req->r_args.readdir.max_entries = cpu_to_le32(num_entries); 1658 req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes); 1659 return 0; 1660 } 1661 1662 /* 1663 * Create an mds request. 1664 */ 1665 struct ceph_mds_request * 1666 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) 1667 { 1668 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); 1669 1670 if (!req) 1671 return ERR_PTR(-ENOMEM); 1672 1673 mutex_init(&req->r_fill_mutex); 1674 req->r_mdsc = mdsc; 1675 req->r_started = jiffies; 1676 req->r_resend_mds = -1; 1677 INIT_LIST_HEAD(&req->r_unsafe_dir_item); 1678 INIT_LIST_HEAD(&req->r_unsafe_target_item); 1679 req->r_fmode = -1; 1680 kref_init(&req->r_kref); 1681 RB_CLEAR_NODE(&req->r_node); 1682 INIT_LIST_HEAD(&req->r_wait); 1683 init_completion(&req->r_completion); 1684 init_completion(&req->r_safe_completion); 1685 INIT_LIST_HEAD(&req->r_unsafe_item); 1686 1687 req->r_stamp = current_fs_time(mdsc->fsc->sb); 1688 1689 req->r_op = op; 1690 req->r_direct_mode = mode; 1691 return req; 1692 } 1693 1694 /* 1695 * return oldest (lowest) request, tid in request tree, 0 if none. 1696 * 1697 * called under mdsc->mutex. 1698 */ 1699 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) 1700 { 1701 if (RB_EMPTY_ROOT(&mdsc->request_tree)) 1702 return NULL; 1703 return rb_entry(rb_first(&mdsc->request_tree), 1704 struct ceph_mds_request, r_node); 1705 } 1706 1707 static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc) 1708 { 1709 return mdsc->oldest_tid; 1710 } 1711 1712 /* 1713 * Build a dentry's path. Allocate on heap; caller must kfree. Based 1714 * on build_path_from_dentry in fs/cifs/dir.c. 1715 * 1716 * If @stop_on_nosnap, generate path relative to the first non-snapped 1717 * inode. 1718 * 1719 * Encode hidden .snap dirs as a double /, i.e. 1720 * foo/.snap/bar -> foo//bar 1721 */ 1722 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, 1723 int stop_on_nosnap) 1724 { 1725 struct dentry *temp; 1726 char *path; 1727 int len, pos; 1728 unsigned seq; 1729 1730 if (dentry == NULL) 1731 return ERR_PTR(-EINVAL); 1732 1733 retry: 1734 len = 0; 1735 seq = read_seqbegin(&rename_lock); 1736 rcu_read_lock(); 1737 for (temp = dentry; !IS_ROOT(temp);) { 1738 struct inode *inode = d_inode(temp); 1739 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) 1740 len++; /* slash only */ 1741 else if (stop_on_nosnap && inode && 1742 ceph_snap(inode) == CEPH_NOSNAP) 1743 break; 1744 else 1745 len += 1 + temp->d_name.len; 1746 temp = temp->d_parent; 1747 } 1748 rcu_read_unlock(); 1749 if (len) 1750 len--; /* no leading '/' */ 1751 1752 path = kmalloc(len+1, GFP_NOFS); 1753 if (path == NULL) 1754 return ERR_PTR(-ENOMEM); 1755 pos = len; 1756 path[pos] = 0; /* trailing null */ 1757 rcu_read_lock(); 1758 for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) { 1759 struct inode *inode; 1760 1761 spin_lock(&temp->d_lock); 1762 inode = d_inode(temp); 1763 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { 1764 dout("build_path path+%d: %p SNAPDIR\n", 1765 pos, temp); 1766 } else if (stop_on_nosnap && inode && 1767 ceph_snap(inode) == CEPH_NOSNAP) { 1768 spin_unlock(&temp->d_lock); 1769 break; 1770 } else { 1771 pos -= temp->d_name.len; 1772 if (pos < 0) { 1773 spin_unlock(&temp->d_lock); 1774 break; 1775 } 1776 strncpy(path + pos, temp->d_name.name, 1777 temp->d_name.len); 1778 } 1779 spin_unlock(&temp->d_lock); 1780 if (pos) 1781 path[--pos] = '/'; 1782 temp = temp->d_parent; 1783 } 1784 rcu_read_unlock(); 1785 if (pos != 0 || read_seqretry(&rename_lock, seq)) { 1786 pr_err("build_path did not end path lookup where " 1787 "expected, namelen is %d, pos is %d\n", len, pos); 1788 /* presumably this is only possible if racing with a 1789 rename of one of the parent directories (we can not 1790 lock the dentries above us to prevent this, but 1791 retrying should be harmless) */ 1792 kfree(path); 1793 goto retry; 1794 } 1795 1796 *base = ceph_ino(d_inode(temp)); 1797 *plen = len; 1798 dout("build_path on %p %d built %llx '%.*s'\n", 1799 dentry, d_count(dentry), *base, len, path); 1800 return path; 1801 } 1802 1803 static int build_dentry_path(struct dentry *dentry, struct inode *dir, 1804 const char **ppath, int *ppathlen, u64 *pino, 1805 int *pfreepath) 1806 { 1807 char *path; 1808 1809 rcu_read_lock(); 1810 if (!dir) 1811 dir = d_inode_rcu(dentry->d_parent); 1812 if (dir && ceph_snap(dir) == CEPH_NOSNAP) { 1813 *pino = ceph_ino(dir); 1814 rcu_read_unlock(); 1815 *ppath = dentry->d_name.name; 1816 *ppathlen = dentry->d_name.len; 1817 return 0; 1818 } 1819 rcu_read_unlock(); 1820 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); 1821 if (IS_ERR(path)) 1822 return PTR_ERR(path); 1823 *ppath = path; 1824 *pfreepath = 1; 1825 return 0; 1826 } 1827 1828 static int build_inode_path(struct inode *inode, 1829 const char **ppath, int *ppathlen, u64 *pino, 1830 int *pfreepath) 1831 { 1832 struct dentry *dentry; 1833 char *path; 1834 1835 if (ceph_snap(inode) == CEPH_NOSNAP) { 1836 *pino = ceph_ino(inode); 1837 *ppathlen = 0; 1838 return 0; 1839 } 1840 dentry = d_find_alias(inode); 1841 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); 1842 dput(dentry); 1843 if (IS_ERR(path)) 1844 return PTR_ERR(path); 1845 *ppath = path; 1846 *pfreepath = 1; 1847 return 0; 1848 } 1849 1850 /* 1851 * request arguments may be specified via an inode *, a dentry *, or 1852 * an explicit ino+path. 1853 */ 1854 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, 1855 struct inode *rdiri, const char *rpath, 1856 u64 rino, const char **ppath, int *pathlen, 1857 u64 *ino, int *freepath) 1858 { 1859 int r = 0; 1860 1861 if (rinode) { 1862 r = build_inode_path(rinode, ppath, pathlen, ino, freepath); 1863 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode), 1864 ceph_snap(rinode)); 1865 } else if (rdentry) { 1866 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino, 1867 freepath); 1868 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, 1869 *ppath); 1870 } else if (rpath || rino) { 1871 *ino = rino; 1872 *ppath = rpath; 1873 *pathlen = rpath ? strlen(rpath) : 0; 1874 dout(" path %.*s\n", *pathlen, rpath); 1875 } 1876 1877 return r; 1878 } 1879 1880 /* 1881 * called under mdsc->mutex 1882 */ 1883 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, 1884 struct ceph_mds_request *req, 1885 int mds, bool drop_cap_releases) 1886 { 1887 struct ceph_msg *msg; 1888 struct ceph_mds_request_head *head; 1889 const char *path1 = NULL; 1890 const char *path2 = NULL; 1891 u64 ino1 = 0, ino2 = 0; 1892 int pathlen1 = 0, pathlen2 = 0; 1893 int freepath1 = 0, freepath2 = 0; 1894 int len; 1895 u16 releases; 1896 void *p, *end; 1897 int ret; 1898 1899 ret = set_request_path_attr(req->r_inode, req->r_dentry, 1900 req->r_parent, req->r_path1, req->r_ino1.ino, 1901 &path1, &pathlen1, &ino1, &freepath1); 1902 if (ret < 0) { 1903 msg = ERR_PTR(ret); 1904 goto out; 1905 } 1906 1907 ret = set_request_path_attr(NULL, req->r_old_dentry, 1908 req->r_old_dentry_dir, 1909 req->r_path2, req->r_ino2.ino, 1910 &path2, &pathlen2, &ino2, &freepath2); 1911 if (ret < 0) { 1912 msg = ERR_PTR(ret); 1913 goto out_free1; 1914 } 1915 1916 len = sizeof(*head) + 1917 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) + 1918 sizeof(struct ceph_timespec); 1919 1920 /* calculate (max) length for cap releases */ 1921 len += sizeof(struct ceph_mds_request_release) * 1922 (!!req->r_inode_drop + !!req->r_dentry_drop + 1923 !!req->r_old_inode_drop + !!req->r_old_dentry_drop); 1924 if (req->r_dentry_drop) 1925 len += req->r_dentry->d_name.len; 1926 if (req->r_old_dentry_drop) 1927 len += req->r_old_dentry->d_name.len; 1928 1929 msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false); 1930 if (!msg) { 1931 msg = ERR_PTR(-ENOMEM); 1932 goto out_free2; 1933 } 1934 1935 msg->hdr.version = cpu_to_le16(2); 1936 msg->hdr.tid = cpu_to_le64(req->r_tid); 1937 1938 head = msg->front.iov_base; 1939 p = msg->front.iov_base + sizeof(*head); 1940 end = msg->front.iov_base + msg->front.iov_len; 1941 1942 head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); 1943 head->op = cpu_to_le32(req->r_op); 1944 head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid)); 1945 head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid)); 1946 head->args = req->r_args; 1947 1948 ceph_encode_filepath(&p, end, ino1, path1); 1949 ceph_encode_filepath(&p, end, ino2, path2); 1950 1951 /* make note of release offset, in case we need to replay */ 1952 req->r_request_release_offset = p - msg->front.iov_base; 1953 1954 /* cap releases */ 1955 releases = 0; 1956 if (req->r_inode_drop) 1957 releases += ceph_encode_inode_release(&p, 1958 req->r_inode ? req->r_inode : d_inode(req->r_dentry), 1959 mds, req->r_inode_drop, req->r_inode_unless, 0); 1960 if (req->r_dentry_drop) 1961 releases += ceph_encode_dentry_release(&p, req->r_dentry, 1962 req->r_parent, mds, req->r_dentry_drop, 1963 req->r_dentry_unless); 1964 if (req->r_old_dentry_drop) 1965 releases += ceph_encode_dentry_release(&p, req->r_old_dentry, 1966 req->r_old_dentry_dir, mds, 1967 req->r_old_dentry_drop, 1968 req->r_old_dentry_unless); 1969 if (req->r_old_inode_drop) 1970 releases += ceph_encode_inode_release(&p, 1971 d_inode(req->r_old_dentry), 1972 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); 1973 1974 if (drop_cap_releases) { 1975 releases = 0; 1976 p = msg->front.iov_base + req->r_request_release_offset; 1977 } 1978 1979 head->num_releases = cpu_to_le16(releases); 1980 1981 /* time stamp */ 1982 { 1983 struct ceph_timespec ts; 1984 ceph_encode_timespec(&ts, &req->r_stamp); 1985 ceph_encode_copy(&p, &ts, sizeof(ts)); 1986 } 1987 1988 BUG_ON(p > end); 1989 msg->front.iov_len = p - msg->front.iov_base; 1990 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 1991 1992 if (req->r_pagelist) { 1993 struct ceph_pagelist *pagelist = req->r_pagelist; 1994 atomic_inc(&pagelist->refcnt); 1995 ceph_msg_data_add_pagelist(msg, pagelist); 1996 msg->hdr.data_len = cpu_to_le32(pagelist->length); 1997 } else { 1998 msg->hdr.data_len = 0; 1999 } 2000 2001 msg->hdr.data_off = cpu_to_le16(0); 2002 2003 out_free2: 2004 if (freepath2) 2005 kfree((char *)path2); 2006 out_free1: 2007 if (freepath1) 2008 kfree((char *)path1); 2009 out: 2010 return msg; 2011 } 2012 2013 /* 2014 * called under mdsc->mutex if error, under no mutex if 2015 * success. 2016 */ 2017 static void complete_request(struct ceph_mds_client *mdsc, 2018 struct ceph_mds_request *req) 2019 { 2020 if (req->r_callback) 2021 req->r_callback(mdsc, req); 2022 else 2023 complete_all(&req->r_completion); 2024 } 2025 2026 /* 2027 * called under mdsc->mutex 2028 */ 2029 static int __prepare_send_request(struct ceph_mds_client *mdsc, 2030 struct ceph_mds_request *req, 2031 int mds, bool drop_cap_releases) 2032 { 2033 struct ceph_mds_request_head *rhead; 2034 struct ceph_msg *msg; 2035 int flags = 0; 2036 2037 req->r_attempts++; 2038 if (req->r_inode) { 2039 struct ceph_cap *cap = 2040 ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds); 2041 2042 if (cap) 2043 req->r_sent_on_mseq = cap->mseq; 2044 else 2045 req->r_sent_on_mseq = -1; 2046 } 2047 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, 2048 req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); 2049 2050 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { 2051 void *p; 2052 /* 2053 * Replay. Do not regenerate message (and rebuild 2054 * paths, etc.); just use the original message. 2055 * Rebuilding paths will break for renames because 2056 * d_move mangles the src name. 2057 */ 2058 msg = req->r_request; 2059 rhead = msg->front.iov_base; 2060 2061 flags = le32_to_cpu(rhead->flags); 2062 flags |= CEPH_MDS_FLAG_REPLAY; 2063 rhead->flags = cpu_to_le32(flags); 2064 2065 if (req->r_target_inode) 2066 rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); 2067 2068 rhead->num_retry = req->r_attempts - 1; 2069 2070 /* remove cap/dentry releases from message */ 2071 rhead->num_releases = 0; 2072 2073 /* time stamp */ 2074 p = msg->front.iov_base + req->r_request_release_offset; 2075 { 2076 struct ceph_timespec ts; 2077 ceph_encode_timespec(&ts, &req->r_stamp); 2078 ceph_encode_copy(&p, &ts, sizeof(ts)); 2079 } 2080 2081 msg->front.iov_len = p - msg->front.iov_base; 2082 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 2083 return 0; 2084 } 2085 2086 if (req->r_request) { 2087 ceph_msg_put(req->r_request); 2088 req->r_request = NULL; 2089 } 2090 msg = create_request_message(mdsc, req, mds, drop_cap_releases); 2091 if (IS_ERR(msg)) { 2092 req->r_err = PTR_ERR(msg); 2093 return PTR_ERR(msg); 2094 } 2095 req->r_request = msg; 2096 2097 rhead = msg->front.iov_base; 2098 rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); 2099 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) 2100 flags |= CEPH_MDS_FLAG_REPLAY; 2101 if (req->r_parent) 2102 flags |= CEPH_MDS_FLAG_WANT_DENTRY; 2103 rhead->flags = cpu_to_le32(flags); 2104 rhead->num_fwd = req->r_num_fwd; 2105 rhead->num_retry = req->r_attempts - 1; 2106 rhead->ino = 0; 2107 2108 dout(" r_parent = %p\n", req->r_parent); 2109 return 0; 2110 } 2111 2112 /* 2113 * send request, or put it on the appropriate wait list. 2114 */ 2115 static int __do_request(struct ceph_mds_client *mdsc, 2116 struct ceph_mds_request *req) 2117 { 2118 struct ceph_mds_session *session = NULL; 2119 int mds = -1; 2120 int err = 0; 2121 2122 if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { 2123 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) 2124 __unregister_request(mdsc, req); 2125 goto out; 2126 } 2127 2128 if (req->r_timeout && 2129 time_after_eq(jiffies, req->r_started + req->r_timeout)) { 2130 dout("do_request timed out\n"); 2131 err = -EIO; 2132 goto finish; 2133 } 2134 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { 2135 dout("do_request forced umount\n"); 2136 err = -EIO; 2137 goto finish; 2138 } 2139 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) { 2140 if (mdsc->mdsmap_err) { 2141 err = mdsc->mdsmap_err; 2142 dout("do_request mdsmap err %d\n", err); 2143 goto finish; 2144 } 2145 if (mdsc->mdsmap->m_epoch == 0) { 2146 dout("do_request no mdsmap, waiting for map\n"); 2147 list_add(&req->r_wait, &mdsc->waiting_for_map); 2148 goto finish; 2149 } 2150 if (!(mdsc->fsc->mount_options->flags & 2151 CEPH_MOUNT_OPT_MOUNTWAIT) && 2152 !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) { 2153 err = -ENOENT; 2154 pr_info("probably no mds server is up\n"); 2155 goto finish; 2156 } 2157 } 2158 2159 put_request_session(req); 2160 2161 mds = __choose_mds(mdsc, req); 2162 if (mds < 0 || 2163 ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { 2164 dout("do_request no mds or not active, waiting for map\n"); 2165 list_add(&req->r_wait, &mdsc->waiting_for_map); 2166 goto out; 2167 } 2168 2169 /* get, open session */ 2170 session = __ceph_lookup_mds_session(mdsc, mds); 2171 if (!session) { 2172 session = register_session(mdsc, mds); 2173 if (IS_ERR(session)) { 2174 err = PTR_ERR(session); 2175 goto finish; 2176 } 2177 } 2178 req->r_session = get_session(session); 2179 2180 dout("do_request mds%d session %p state %s\n", mds, session, 2181 ceph_session_state_name(session->s_state)); 2182 if (session->s_state != CEPH_MDS_SESSION_OPEN && 2183 session->s_state != CEPH_MDS_SESSION_HUNG) { 2184 if (session->s_state == CEPH_MDS_SESSION_REJECTED) { 2185 err = -EACCES; 2186 goto out_session; 2187 } 2188 if (session->s_state == CEPH_MDS_SESSION_NEW || 2189 session->s_state == CEPH_MDS_SESSION_CLOSING) 2190 __open_session(mdsc, session); 2191 list_add(&req->r_wait, &session->s_waiting); 2192 goto out_session; 2193 } 2194 2195 /* send request */ 2196 req->r_resend_mds = -1; /* forget any previous mds hint */ 2197 2198 if (req->r_request_started == 0) /* note request start time */ 2199 req->r_request_started = jiffies; 2200 2201 err = __prepare_send_request(mdsc, req, mds, false); 2202 if (!err) { 2203 ceph_msg_get(req->r_request); 2204 ceph_con_send(&session->s_con, req->r_request); 2205 } 2206 2207 out_session: 2208 ceph_put_mds_session(session); 2209 finish: 2210 if (err) { 2211 dout("__do_request early error %d\n", err); 2212 req->r_err = err; 2213 complete_request(mdsc, req); 2214 __unregister_request(mdsc, req); 2215 } 2216 out: 2217 return err; 2218 } 2219 2220 /* 2221 * called under mdsc->mutex 2222 */ 2223 static void __wake_requests(struct ceph_mds_client *mdsc, 2224 struct list_head *head) 2225 { 2226 struct ceph_mds_request *req; 2227 LIST_HEAD(tmp_list); 2228 2229 list_splice_init(head, &tmp_list); 2230 2231 while (!list_empty(&tmp_list)) { 2232 req = list_entry(tmp_list.next, 2233 struct ceph_mds_request, r_wait); 2234 list_del_init(&req->r_wait); 2235 dout(" wake request %p tid %llu\n", req, req->r_tid); 2236 __do_request(mdsc, req); 2237 } 2238 } 2239 2240 /* 2241 * Wake up threads with requests pending for @mds, so that they can 2242 * resubmit their requests to a possibly different mds. 2243 */ 2244 static void kick_requests(struct ceph_mds_client *mdsc, int mds) 2245 { 2246 struct ceph_mds_request *req; 2247 struct rb_node *p = rb_first(&mdsc->request_tree); 2248 2249 dout("kick_requests mds%d\n", mds); 2250 while (p) { 2251 req = rb_entry(p, struct ceph_mds_request, r_node); 2252 p = rb_next(p); 2253 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) 2254 continue; 2255 if (req->r_attempts > 0) 2256 continue; /* only new requests */ 2257 if (req->r_session && 2258 req->r_session->s_mds == mds) { 2259 dout(" kicking tid %llu\n", req->r_tid); 2260 list_del_init(&req->r_wait); 2261 __do_request(mdsc, req); 2262 } 2263 } 2264 } 2265 2266 void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, 2267 struct ceph_mds_request *req) 2268 { 2269 dout("submit_request on %p\n", req); 2270 mutex_lock(&mdsc->mutex); 2271 __register_request(mdsc, req, NULL); 2272 __do_request(mdsc, req); 2273 mutex_unlock(&mdsc->mutex); 2274 } 2275 2276 /* 2277 * Synchrously perform an mds request. Take care of all of the 2278 * session setup, forwarding, retry details. 2279 */ 2280 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, 2281 struct inode *dir, 2282 struct ceph_mds_request *req) 2283 { 2284 int err; 2285 2286 dout("do_request on %p\n", req); 2287 2288 /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */ 2289 if (req->r_inode) 2290 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); 2291 if (req->r_parent) 2292 ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); 2293 if (req->r_old_dentry_dir) 2294 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), 2295 CEPH_CAP_PIN); 2296 2297 /* issue */ 2298 mutex_lock(&mdsc->mutex); 2299 __register_request(mdsc, req, dir); 2300 __do_request(mdsc, req); 2301 2302 if (req->r_err) { 2303 err = req->r_err; 2304 goto out; 2305 } 2306 2307 /* wait */ 2308 mutex_unlock(&mdsc->mutex); 2309 dout("do_request waiting\n"); 2310 if (!req->r_timeout && req->r_wait_for_completion) { 2311 err = req->r_wait_for_completion(mdsc, req); 2312 } else { 2313 long timeleft = wait_for_completion_killable_timeout( 2314 &req->r_completion, 2315 ceph_timeout_jiffies(req->r_timeout)); 2316 if (timeleft > 0) 2317 err = 0; 2318 else if (!timeleft) 2319 err = -EIO; /* timed out */ 2320 else 2321 err = timeleft; /* killed */ 2322 } 2323 dout("do_request waited, got %d\n", err); 2324 mutex_lock(&mdsc->mutex); 2325 2326 /* only abort if we didn't race with a real reply */ 2327 if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) { 2328 err = le32_to_cpu(req->r_reply_info.head->result); 2329 } else if (err < 0) { 2330 dout("aborted request %lld with %d\n", req->r_tid, err); 2331 2332 /* 2333 * ensure we aren't running concurrently with 2334 * ceph_fill_trace or ceph_readdir_prepopulate, which 2335 * rely on locks (dir mutex) held by our caller. 2336 */ 2337 mutex_lock(&req->r_fill_mutex); 2338 req->r_err = err; 2339 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags); 2340 mutex_unlock(&req->r_fill_mutex); 2341 2342 if (req->r_parent && 2343 (req->r_op & CEPH_MDS_OP_WRITE)) 2344 ceph_invalidate_dir_request(req); 2345 } else { 2346 err = req->r_err; 2347 } 2348 2349 out: 2350 mutex_unlock(&mdsc->mutex); 2351 dout("do_request %p done, result %d\n", req, err); 2352 return err; 2353 } 2354 2355 /* 2356 * Invalidate dir's completeness, dentry lease state on an aborted MDS 2357 * namespace request. 2358 */ 2359 void ceph_invalidate_dir_request(struct ceph_mds_request *req) 2360 { 2361 struct inode *inode = req->r_parent; 2362 2363 dout("invalidate_dir_request %p (complete, lease(s))\n", inode); 2364 2365 ceph_dir_clear_complete(inode); 2366 if (req->r_dentry) 2367 ceph_invalidate_dentry_lease(req->r_dentry); 2368 if (req->r_old_dentry) 2369 ceph_invalidate_dentry_lease(req->r_old_dentry); 2370 } 2371 2372 /* 2373 * Handle mds reply. 2374 * 2375 * We take the session mutex and parse and process the reply immediately. 2376 * This preserves the logical ordering of replies, capabilities, etc., sent 2377 * by the MDS as they are applied to our local cache. 2378 */ 2379 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) 2380 { 2381 struct ceph_mds_client *mdsc = session->s_mdsc; 2382 struct ceph_mds_request *req; 2383 struct ceph_mds_reply_head *head = msg->front.iov_base; 2384 struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ 2385 struct ceph_snap_realm *realm; 2386 u64 tid; 2387 int err, result; 2388 int mds = session->s_mds; 2389 2390 if (msg->front.iov_len < sizeof(*head)) { 2391 pr_err("mdsc_handle_reply got corrupt (short) reply\n"); 2392 ceph_msg_dump(msg); 2393 return; 2394 } 2395 2396 /* get request, session */ 2397 tid = le64_to_cpu(msg->hdr.tid); 2398 mutex_lock(&mdsc->mutex); 2399 req = lookup_get_request(mdsc, tid); 2400 if (!req) { 2401 dout("handle_reply on unknown tid %llu\n", tid); 2402 mutex_unlock(&mdsc->mutex); 2403 return; 2404 } 2405 dout("handle_reply %p\n", req); 2406 2407 /* correct session? */ 2408 if (req->r_session != session) { 2409 pr_err("mdsc_handle_reply got %llu on session mds%d" 2410 " not mds%d\n", tid, session->s_mds, 2411 req->r_session ? req->r_session->s_mds : -1); 2412 mutex_unlock(&mdsc->mutex); 2413 goto out; 2414 } 2415 2416 /* dup? */ 2417 if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) || 2418 (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) { 2419 pr_warn("got a dup %s reply on %llu from mds%d\n", 2420 head->safe ? "safe" : "unsafe", tid, mds); 2421 mutex_unlock(&mdsc->mutex); 2422 goto out; 2423 } 2424 if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) { 2425 pr_warn("got unsafe after safe on %llu from mds%d\n", 2426 tid, mds); 2427 mutex_unlock(&mdsc->mutex); 2428 goto out; 2429 } 2430 2431 result = le32_to_cpu(head->result); 2432 2433 /* 2434 * Handle an ESTALE 2435 * if we're not talking to the authority, send to them 2436 * if the authority has changed while we weren't looking, 2437 * send to new authority 2438 * Otherwise we just have to return an ESTALE 2439 */ 2440 if (result == -ESTALE) { 2441 dout("got ESTALE on request %llu", req->r_tid); 2442 req->r_resend_mds = -1; 2443 if (req->r_direct_mode != USE_AUTH_MDS) { 2444 dout("not using auth, setting for that now"); 2445 req->r_direct_mode = USE_AUTH_MDS; 2446 __do_request(mdsc, req); 2447 mutex_unlock(&mdsc->mutex); 2448 goto out; 2449 } else { 2450 int mds = __choose_mds(mdsc, req); 2451 if (mds >= 0 && mds != req->r_session->s_mds) { 2452 dout("but auth changed, so resending"); 2453 __do_request(mdsc, req); 2454 mutex_unlock(&mdsc->mutex); 2455 goto out; 2456 } 2457 } 2458 dout("have to return ESTALE on request %llu", req->r_tid); 2459 } 2460 2461 2462 if (head->safe) { 2463 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags); 2464 __unregister_request(mdsc, req); 2465 2466 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { 2467 /* 2468 * We already handled the unsafe response, now do the 2469 * cleanup. No need to examine the response; the MDS 2470 * doesn't include any result info in the safe 2471 * response. And even if it did, there is nothing 2472 * useful we could do with a revised return value. 2473 */ 2474 dout("got safe reply %llu, mds%d\n", tid, mds); 2475 2476 /* last unsafe request during umount? */ 2477 if (mdsc->stopping && !__get_oldest_req(mdsc)) 2478 complete_all(&mdsc->safe_umount_waiters); 2479 mutex_unlock(&mdsc->mutex); 2480 goto out; 2481 } 2482 } else { 2483 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags); 2484 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); 2485 if (req->r_unsafe_dir) { 2486 struct ceph_inode_info *ci = 2487 ceph_inode(req->r_unsafe_dir); 2488 spin_lock(&ci->i_unsafe_lock); 2489 list_add_tail(&req->r_unsafe_dir_item, 2490 &ci->i_unsafe_dirops); 2491 spin_unlock(&ci->i_unsafe_lock); 2492 } 2493 } 2494 2495 dout("handle_reply tid %lld result %d\n", tid, result); 2496 rinfo = &req->r_reply_info; 2497 err = parse_reply_info(msg, rinfo, session->s_con.peer_features); 2498 mutex_unlock(&mdsc->mutex); 2499 2500 mutex_lock(&session->s_mutex); 2501 if (err < 0) { 2502 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid); 2503 ceph_msg_dump(msg); 2504 goto out_err; 2505 } 2506 2507 /* snap trace */ 2508 realm = NULL; 2509 if (rinfo->snapblob_len) { 2510 down_write(&mdsc->snap_rwsem); 2511 ceph_update_snap_trace(mdsc, rinfo->snapblob, 2512 rinfo->snapblob + rinfo->snapblob_len, 2513 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP, 2514 &realm); 2515 downgrade_write(&mdsc->snap_rwsem); 2516 } else { 2517 down_read(&mdsc->snap_rwsem); 2518 } 2519 2520 /* insert trace into our cache */ 2521 mutex_lock(&req->r_fill_mutex); 2522 current->journal_info = req; 2523 err = ceph_fill_trace(mdsc->fsc->sb, req); 2524 if (err == 0) { 2525 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || 2526 req->r_op == CEPH_MDS_OP_LSSNAP)) 2527 ceph_readdir_prepopulate(req, req->r_session); 2528 ceph_unreserve_caps(mdsc, &req->r_caps_reservation); 2529 } 2530 current->journal_info = NULL; 2531 mutex_unlock(&req->r_fill_mutex); 2532 2533 up_read(&mdsc->snap_rwsem); 2534 if (realm) 2535 ceph_put_snap_realm(mdsc, realm); 2536 2537 if (err == 0 && req->r_target_inode && 2538 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) { 2539 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode); 2540 spin_lock(&ci->i_unsafe_lock); 2541 list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops); 2542 spin_unlock(&ci->i_unsafe_lock); 2543 } 2544 out_err: 2545 mutex_lock(&mdsc->mutex); 2546 if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { 2547 if (err) { 2548 req->r_err = err; 2549 } else { 2550 req->r_reply = ceph_msg_get(msg); 2551 set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags); 2552 } 2553 } else { 2554 dout("reply arrived after request %lld was aborted\n", tid); 2555 } 2556 mutex_unlock(&mdsc->mutex); 2557 2558 mutex_unlock(&session->s_mutex); 2559 2560 /* kick calling process */ 2561 complete_request(mdsc, req); 2562 out: 2563 ceph_mdsc_put_request(req); 2564 return; 2565 } 2566 2567 2568 2569 /* 2570 * handle mds notification that our request has been forwarded. 2571 */ 2572 static void handle_forward(struct ceph_mds_client *mdsc, 2573 struct ceph_mds_session *session, 2574 struct ceph_msg *msg) 2575 { 2576 struct ceph_mds_request *req; 2577 u64 tid = le64_to_cpu(msg->hdr.tid); 2578 u32 next_mds; 2579 u32 fwd_seq; 2580 int err = -EINVAL; 2581 void *p = msg->front.iov_base; 2582 void *end = p + msg->front.iov_len; 2583 2584 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 2585 next_mds = ceph_decode_32(&p); 2586 fwd_seq = ceph_decode_32(&p); 2587 2588 mutex_lock(&mdsc->mutex); 2589 req = lookup_get_request(mdsc, tid); 2590 if (!req) { 2591 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds); 2592 goto out; /* dup reply? */ 2593 } 2594 2595 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { 2596 dout("forward tid %llu aborted, unregistering\n", tid); 2597 __unregister_request(mdsc, req); 2598 } else if (fwd_seq <= req->r_num_fwd) { 2599 dout("forward tid %llu to mds%d - old seq %d <= %d\n", 2600 tid, next_mds, req->r_num_fwd, fwd_seq); 2601 } else { 2602 /* resend. forward race not possible; mds would drop */ 2603 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); 2604 BUG_ON(req->r_err); 2605 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)); 2606 req->r_attempts = 0; 2607 req->r_num_fwd = fwd_seq; 2608 req->r_resend_mds = next_mds; 2609 put_request_session(req); 2610 __do_request(mdsc, req); 2611 } 2612 ceph_mdsc_put_request(req); 2613 out: 2614 mutex_unlock(&mdsc->mutex); 2615 return; 2616 2617 bad: 2618 pr_err("mdsc_handle_forward decode error err=%d\n", err); 2619 } 2620 2621 /* 2622 * handle a mds session control message 2623 */ 2624 static void handle_session(struct ceph_mds_session *session, 2625 struct ceph_msg *msg) 2626 { 2627 struct ceph_mds_client *mdsc = session->s_mdsc; 2628 u32 op; 2629 u64 seq; 2630 int mds = session->s_mds; 2631 struct ceph_mds_session_head *h = msg->front.iov_base; 2632 int wake = 0; 2633 2634 /* decode */ 2635 if (msg->front.iov_len != sizeof(*h)) 2636 goto bad; 2637 op = le32_to_cpu(h->op); 2638 seq = le64_to_cpu(h->seq); 2639 2640 mutex_lock(&mdsc->mutex); 2641 if (op == CEPH_SESSION_CLOSE) 2642 __unregister_session(mdsc, session); 2643 /* FIXME: this ttl calculation is generous */ 2644 session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; 2645 mutex_unlock(&mdsc->mutex); 2646 2647 mutex_lock(&session->s_mutex); 2648 2649 dout("handle_session mds%d %s %p state %s seq %llu\n", 2650 mds, ceph_session_op_name(op), session, 2651 ceph_session_state_name(session->s_state), seq); 2652 2653 if (session->s_state == CEPH_MDS_SESSION_HUNG) { 2654 session->s_state = CEPH_MDS_SESSION_OPEN; 2655 pr_info("mds%d came back\n", session->s_mds); 2656 } 2657 2658 switch (op) { 2659 case CEPH_SESSION_OPEN: 2660 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) 2661 pr_info("mds%d reconnect success\n", session->s_mds); 2662 session->s_state = CEPH_MDS_SESSION_OPEN; 2663 renewed_caps(mdsc, session, 0); 2664 wake = 1; 2665 if (mdsc->stopping) 2666 __close_session(mdsc, session); 2667 break; 2668 2669 case CEPH_SESSION_RENEWCAPS: 2670 if (session->s_renew_seq == seq) 2671 renewed_caps(mdsc, session, 1); 2672 break; 2673 2674 case CEPH_SESSION_CLOSE: 2675 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) 2676 pr_info("mds%d reconnect denied\n", session->s_mds); 2677 cleanup_session_requests(mdsc, session); 2678 remove_session_caps(session); 2679 wake = 2; /* for good measure */ 2680 wake_up_all(&mdsc->session_close_wq); 2681 break; 2682 2683 case CEPH_SESSION_STALE: 2684 pr_info("mds%d caps went stale, renewing\n", 2685 session->s_mds); 2686 spin_lock(&session->s_gen_ttl_lock); 2687 session->s_cap_gen++; 2688 session->s_cap_ttl = jiffies - 1; 2689 spin_unlock(&session->s_gen_ttl_lock); 2690 send_renew_caps(mdsc, session); 2691 break; 2692 2693 case CEPH_SESSION_RECALL_STATE: 2694 trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); 2695 break; 2696 2697 case CEPH_SESSION_FLUSHMSG: 2698 send_flushmsg_ack(mdsc, session, seq); 2699 break; 2700 2701 case CEPH_SESSION_FORCE_RO: 2702 dout("force_session_readonly %p\n", session); 2703 spin_lock(&session->s_cap_lock); 2704 session->s_readonly = true; 2705 spin_unlock(&session->s_cap_lock); 2706 wake_up_session_caps(session, 0); 2707 break; 2708 2709 case CEPH_SESSION_REJECT: 2710 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING); 2711 pr_info("mds%d rejected session\n", session->s_mds); 2712 session->s_state = CEPH_MDS_SESSION_REJECTED; 2713 cleanup_session_requests(mdsc, session); 2714 remove_session_caps(session); 2715 wake = 2; /* for good measure */ 2716 break; 2717 2718 default: 2719 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); 2720 WARN_ON(1); 2721 } 2722 2723 mutex_unlock(&session->s_mutex); 2724 if (wake) { 2725 mutex_lock(&mdsc->mutex); 2726 __wake_requests(mdsc, &session->s_waiting); 2727 if (wake == 2) 2728 kick_requests(mdsc, mds); 2729 mutex_unlock(&mdsc->mutex); 2730 } 2731 return; 2732 2733 bad: 2734 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds, 2735 (int)msg->front.iov_len); 2736 ceph_msg_dump(msg); 2737 return; 2738 } 2739 2740 2741 /* 2742 * called under session->mutex. 2743 */ 2744 static void replay_unsafe_requests(struct ceph_mds_client *mdsc, 2745 struct ceph_mds_session *session) 2746 { 2747 struct ceph_mds_request *req, *nreq; 2748 struct rb_node *p; 2749 int err; 2750 2751 dout("replay_unsafe_requests mds%d\n", session->s_mds); 2752 2753 mutex_lock(&mdsc->mutex); 2754 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { 2755 err = __prepare_send_request(mdsc, req, session->s_mds, true); 2756 if (!err) { 2757 ceph_msg_get(req->r_request); 2758 ceph_con_send(&session->s_con, req->r_request); 2759 } 2760 } 2761 2762 /* 2763 * also re-send old requests when MDS enters reconnect stage. So that MDS 2764 * can process completed request in clientreplay stage. 2765 */ 2766 p = rb_first(&mdsc->request_tree); 2767 while (p) { 2768 req = rb_entry(p, struct ceph_mds_request, r_node); 2769 p = rb_next(p); 2770 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) 2771 continue; 2772 if (req->r_attempts == 0) 2773 continue; /* only old requests */ 2774 if (req->r_session && 2775 req->r_session->s_mds == session->s_mds) { 2776 err = __prepare_send_request(mdsc, req, 2777 session->s_mds, true); 2778 if (!err) { 2779 ceph_msg_get(req->r_request); 2780 ceph_con_send(&session->s_con, req->r_request); 2781 } 2782 } 2783 } 2784 mutex_unlock(&mdsc->mutex); 2785 } 2786 2787 /* 2788 * Encode information about a cap for a reconnect with the MDS. 2789 */ 2790 static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, 2791 void *arg) 2792 { 2793 union { 2794 struct ceph_mds_cap_reconnect v2; 2795 struct ceph_mds_cap_reconnect_v1 v1; 2796 } rec; 2797 struct ceph_inode_info *ci; 2798 struct ceph_reconnect_state *recon_state = arg; 2799 struct ceph_pagelist *pagelist = recon_state->pagelist; 2800 char *path; 2801 int pathlen, err; 2802 u64 pathbase; 2803 u64 snap_follows; 2804 struct dentry *dentry; 2805 2806 ci = cap->ci; 2807 2808 dout(" adding %p ino %llx.%llx cap %p %lld %s\n", 2809 inode, ceph_vinop(inode), cap, cap->cap_id, 2810 ceph_cap_string(cap->issued)); 2811 err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode)); 2812 if (err) 2813 return err; 2814 2815 dentry = d_find_alias(inode); 2816 if (dentry) { 2817 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0); 2818 if (IS_ERR(path)) { 2819 err = PTR_ERR(path); 2820 goto out_dput; 2821 } 2822 } else { 2823 path = NULL; 2824 pathlen = 0; 2825 pathbase = 0; 2826 } 2827 2828 spin_lock(&ci->i_ceph_lock); 2829 cap->seq = 0; /* reset cap seq */ 2830 cap->issue_seq = 0; /* and issue_seq */ 2831 cap->mseq = 0; /* and migrate_seq */ 2832 cap->cap_gen = cap->session->s_cap_gen; 2833 2834 if (recon_state->msg_version >= 2) { 2835 rec.v2.cap_id = cpu_to_le64(cap->cap_id); 2836 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); 2837 rec.v2.issued = cpu_to_le32(cap->issued); 2838 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); 2839 rec.v2.pathbase = cpu_to_le64(pathbase); 2840 rec.v2.flock_len = 0; 2841 } else { 2842 rec.v1.cap_id = cpu_to_le64(cap->cap_id); 2843 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); 2844 rec.v1.issued = cpu_to_le32(cap->issued); 2845 rec.v1.size = cpu_to_le64(inode->i_size); 2846 ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime); 2847 ceph_encode_timespec(&rec.v1.atime, &inode->i_atime); 2848 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); 2849 rec.v1.pathbase = cpu_to_le64(pathbase); 2850 } 2851 2852 if (list_empty(&ci->i_cap_snaps)) { 2853 snap_follows = 0; 2854 } else { 2855 struct ceph_cap_snap *capsnap = 2856 list_first_entry(&ci->i_cap_snaps, 2857 struct ceph_cap_snap, ci_item); 2858 snap_follows = capsnap->follows; 2859 } 2860 spin_unlock(&ci->i_ceph_lock); 2861 2862 if (recon_state->msg_version >= 2) { 2863 int num_fcntl_locks, num_flock_locks; 2864 struct ceph_filelock *flocks; 2865 size_t struct_len, total_len = 0; 2866 u8 struct_v = 0; 2867 2868 encode_again: 2869 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); 2870 flocks = kmalloc((num_fcntl_locks+num_flock_locks) * 2871 sizeof(struct ceph_filelock), GFP_NOFS); 2872 if (!flocks) { 2873 err = -ENOMEM; 2874 goto out_free; 2875 } 2876 err = ceph_encode_locks_to_buffer(inode, flocks, 2877 num_fcntl_locks, 2878 num_flock_locks); 2879 if (err) { 2880 kfree(flocks); 2881 if (err == -ENOSPC) 2882 goto encode_again; 2883 goto out_free; 2884 } 2885 2886 if (recon_state->msg_version >= 3) { 2887 /* version, compat_version and struct_len */ 2888 total_len = 2 * sizeof(u8) + sizeof(u32); 2889 struct_v = 2; 2890 } 2891 /* 2892 * number of encoded locks is stable, so copy to pagelist 2893 */ 2894 struct_len = 2 * sizeof(u32) + 2895 (num_fcntl_locks + num_flock_locks) * 2896 sizeof(struct ceph_filelock); 2897 rec.v2.flock_len = cpu_to_le32(struct_len); 2898 2899 struct_len += sizeof(rec.v2); 2900 struct_len += sizeof(u32) + pathlen; 2901 2902 if (struct_v >= 2) 2903 struct_len += sizeof(u64); /* snap_follows */ 2904 2905 total_len += struct_len; 2906 err = ceph_pagelist_reserve(pagelist, total_len); 2907 2908 if (!err) { 2909 if (recon_state->msg_version >= 3) { 2910 ceph_pagelist_encode_8(pagelist, struct_v); 2911 ceph_pagelist_encode_8(pagelist, 1); 2912 ceph_pagelist_encode_32(pagelist, struct_len); 2913 } 2914 ceph_pagelist_encode_string(pagelist, path, pathlen); 2915 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2)); 2916 ceph_locks_to_pagelist(flocks, pagelist, 2917 num_fcntl_locks, 2918 num_flock_locks); 2919 if (struct_v >= 2) 2920 ceph_pagelist_encode_64(pagelist, snap_follows); 2921 } 2922 kfree(flocks); 2923 } else { 2924 size_t size = sizeof(u32) + pathlen + sizeof(rec.v1); 2925 err = ceph_pagelist_reserve(pagelist, size); 2926 if (!err) { 2927 ceph_pagelist_encode_string(pagelist, path, pathlen); 2928 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1)); 2929 } 2930 } 2931 2932 recon_state->nr_caps++; 2933 out_free: 2934 kfree(path); 2935 out_dput: 2936 dput(dentry); 2937 return err; 2938 } 2939 2940 2941 /* 2942 * If an MDS fails and recovers, clients need to reconnect in order to 2943 * reestablish shared state. This includes all caps issued through 2944 * this session _and_ the snap_realm hierarchy. Because it's not 2945 * clear which snap realms the mds cares about, we send everything we 2946 * know about.. that ensures we'll then get any new info the 2947 * recovering MDS might have. 2948 * 2949 * This is a relatively heavyweight operation, but it's rare. 2950 * 2951 * called with mdsc->mutex held. 2952 */ 2953 static void send_mds_reconnect(struct ceph_mds_client *mdsc, 2954 struct ceph_mds_session *session) 2955 { 2956 struct ceph_msg *reply; 2957 struct rb_node *p; 2958 int mds = session->s_mds; 2959 int err = -ENOMEM; 2960 int s_nr_caps; 2961 struct ceph_pagelist *pagelist; 2962 struct ceph_reconnect_state recon_state; 2963 2964 pr_info("mds%d reconnect start\n", mds); 2965 2966 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); 2967 if (!pagelist) 2968 goto fail_nopagelist; 2969 ceph_pagelist_init(pagelist); 2970 2971 reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false); 2972 if (!reply) 2973 goto fail_nomsg; 2974 2975 mutex_lock(&session->s_mutex); 2976 session->s_state = CEPH_MDS_SESSION_RECONNECTING; 2977 session->s_seq = 0; 2978 2979 dout("session %p state %s\n", session, 2980 ceph_session_state_name(session->s_state)); 2981 2982 spin_lock(&session->s_gen_ttl_lock); 2983 session->s_cap_gen++; 2984 spin_unlock(&session->s_gen_ttl_lock); 2985 2986 spin_lock(&session->s_cap_lock); 2987 /* don't know if session is readonly */ 2988 session->s_readonly = 0; 2989 /* 2990 * notify __ceph_remove_cap() that we are composing cap reconnect. 2991 * If a cap get released before being added to the cap reconnect, 2992 * __ceph_remove_cap() should skip queuing cap release. 2993 */ 2994 session->s_cap_reconnect = 1; 2995 /* drop old cap expires; we're about to reestablish that state */ 2996 cleanup_cap_releases(mdsc, session); 2997 2998 /* trim unused caps to reduce MDS's cache rejoin time */ 2999 if (mdsc->fsc->sb->s_root) 3000 shrink_dcache_parent(mdsc->fsc->sb->s_root); 3001 3002 ceph_con_close(&session->s_con); 3003 ceph_con_open(&session->s_con, 3004 CEPH_ENTITY_TYPE_MDS, mds, 3005 ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); 3006 3007 /* replay unsafe requests */ 3008 replay_unsafe_requests(mdsc, session); 3009 3010 down_read(&mdsc->snap_rwsem); 3011 3012 /* traverse this session's caps */ 3013 s_nr_caps = session->s_nr_caps; 3014 err = ceph_pagelist_encode_32(pagelist, s_nr_caps); 3015 if (err) 3016 goto fail; 3017 3018 recon_state.nr_caps = 0; 3019 recon_state.pagelist = pagelist; 3020 if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) 3021 recon_state.msg_version = 3; 3022 else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK) 3023 recon_state.msg_version = 2; 3024 else 3025 recon_state.msg_version = 1; 3026 err = iterate_session_caps(session, encode_caps_cb, &recon_state); 3027 if (err < 0) 3028 goto fail; 3029 3030 spin_lock(&session->s_cap_lock); 3031 session->s_cap_reconnect = 0; 3032 spin_unlock(&session->s_cap_lock); 3033 3034 /* 3035 * snaprealms. we provide mds with the ino, seq (version), and 3036 * parent for all of our realms. If the mds has any newer info, 3037 * it will tell us. 3038 */ 3039 for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { 3040 struct ceph_snap_realm *realm = 3041 rb_entry(p, struct ceph_snap_realm, node); 3042 struct ceph_mds_snaprealm_reconnect sr_rec; 3043 3044 dout(" adding snap realm %llx seq %lld parent %llx\n", 3045 realm->ino, realm->seq, realm->parent_ino); 3046 sr_rec.ino = cpu_to_le64(realm->ino); 3047 sr_rec.seq = cpu_to_le64(realm->seq); 3048 sr_rec.parent = cpu_to_le64(realm->parent_ino); 3049 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec)); 3050 if (err) 3051 goto fail; 3052 } 3053 3054 reply->hdr.version = cpu_to_le16(recon_state.msg_version); 3055 3056 /* raced with cap release? */ 3057 if (s_nr_caps != recon_state.nr_caps) { 3058 struct page *page = list_first_entry(&pagelist->head, 3059 struct page, lru); 3060 __le32 *addr = kmap_atomic(page); 3061 *addr = cpu_to_le32(recon_state.nr_caps); 3062 kunmap_atomic(addr); 3063 } 3064 3065 reply->hdr.data_len = cpu_to_le32(pagelist->length); 3066 ceph_msg_data_add_pagelist(reply, pagelist); 3067 3068 ceph_early_kick_flushing_caps(mdsc, session); 3069 3070 ceph_con_send(&session->s_con, reply); 3071 3072 mutex_unlock(&session->s_mutex); 3073 3074 mutex_lock(&mdsc->mutex); 3075 __wake_requests(mdsc, &session->s_waiting); 3076 mutex_unlock(&mdsc->mutex); 3077 3078 up_read(&mdsc->snap_rwsem); 3079 return; 3080 3081 fail: 3082 ceph_msg_put(reply); 3083 up_read(&mdsc->snap_rwsem); 3084 mutex_unlock(&session->s_mutex); 3085 fail_nomsg: 3086 ceph_pagelist_release(pagelist); 3087 fail_nopagelist: 3088 pr_err("error %d preparing reconnect for mds%d\n", err, mds); 3089 return; 3090 } 3091 3092 3093 /* 3094 * compare old and new mdsmaps, kicking requests 3095 * and closing out old connections as necessary 3096 * 3097 * called under mdsc->mutex. 3098 */ 3099 static void check_new_map(struct ceph_mds_client *mdsc, 3100 struct ceph_mdsmap *newmap, 3101 struct ceph_mdsmap *oldmap) 3102 { 3103 int i; 3104 int oldstate, newstate; 3105 struct ceph_mds_session *s; 3106 3107 dout("check_new_map new %u old %u\n", 3108 newmap->m_epoch, oldmap->m_epoch); 3109 3110 for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) { 3111 if (mdsc->sessions[i] == NULL) 3112 continue; 3113 s = mdsc->sessions[i]; 3114 oldstate = ceph_mdsmap_get_state(oldmap, i); 3115 newstate = ceph_mdsmap_get_state(newmap, i); 3116 3117 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n", 3118 i, ceph_mds_state_name(oldstate), 3119 ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "", 3120 ceph_mds_state_name(newstate), 3121 ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "", 3122 ceph_session_state_name(s->s_state)); 3123 3124 if (i >= newmap->m_max_mds || 3125 memcmp(ceph_mdsmap_get_addr(oldmap, i), 3126 ceph_mdsmap_get_addr(newmap, i), 3127 sizeof(struct ceph_entity_addr))) { 3128 if (s->s_state == CEPH_MDS_SESSION_OPENING) { 3129 /* the session never opened, just close it 3130 * out now */ 3131 __wake_requests(mdsc, &s->s_waiting); 3132 __unregister_session(mdsc, s); 3133 } else { 3134 /* just close it */ 3135 mutex_unlock(&mdsc->mutex); 3136 mutex_lock(&s->s_mutex); 3137 mutex_lock(&mdsc->mutex); 3138 ceph_con_close(&s->s_con); 3139 mutex_unlock(&s->s_mutex); 3140 s->s_state = CEPH_MDS_SESSION_RESTARTING; 3141 } 3142 } else if (oldstate == newstate) { 3143 continue; /* nothing new with this mds */ 3144 } 3145 3146 /* 3147 * send reconnect? 3148 */ 3149 if (s->s_state == CEPH_MDS_SESSION_RESTARTING && 3150 newstate >= CEPH_MDS_STATE_RECONNECT) { 3151 mutex_unlock(&mdsc->mutex); 3152 send_mds_reconnect(mdsc, s); 3153 mutex_lock(&mdsc->mutex); 3154 } 3155 3156 /* 3157 * kick request on any mds that has gone active. 3158 */ 3159 if (oldstate < CEPH_MDS_STATE_ACTIVE && 3160 newstate >= CEPH_MDS_STATE_ACTIVE) { 3161 if (oldstate != CEPH_MDS_STATE_CREATING && 3162 oldstate != CEPH_MDS_STATE_STARTING) 3163 pr_info("mds%d recovery completed\n", s->s_mds); 3164 kick_requests(mdsc, i); 3165 ceph_kick_flushing_caps(mdsc, s); 3166 wake_up_session_caps(s, 1); 3167 } 3168 } 3169 3170 for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) { 3171 s = mdsc->sessions[i]; 3172 if (!s) 3173 continue; 3174 if (!ceph_mdsmap_is_laggy(newmap, i)) 3175 continue; 3176 if (s->s_state == CEPH_MDS_SESSION_OPEN || 3177 s->s_state == CEPH_MDS_SESSION_HUNG || 3178 s->s_state == CEPH_MDS_SESSION_CLOSING) { 3179 dout(" connecting to export targets of laggy mds%d\n", 3180 i); 3181 __open_export_target_sessions(mdsc, s); 3182 } 3183 } 3184 } 3185 3186 3187 3188 /* 3189 * leases 3190 */ 3191 3192 /* 3193 * caller must hold session s_mutex, dentry->d_lock 3194 */ 3195 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry) 3196 { 3197 struct ceph_dentry_info *di = ceph_dentry(dentry); 3198 3199 ceph_put_mds_session(di->lease_session); 3200 di->lease_session = NULL; 3201 } 3202 3203 static void handle_lease(struct ceph_mds_client *mdsc, 3204 struct ceph_mds_session *session, 3205 struct ceph_msg *msg) 3206 { 3207 struct super_block *sb = mdsc->fsc->sb; 3208 struct inode *inode; 3209 struct dentry *parent, *dentry; 3210 struct ceph_dentry_info *di; 3211 int mds = session->s_mds; 3212 struct ceph_mds_lease *h = msg->front.iov_base; 3213 u32 seq; 3214 struct ceph_vino vino; 3215 struct qstr dname; 3216 int release = 0; 3217 3218 dout("handle_lease from mds%d\n", mds); 3219 3220 /* decode */ 3221 if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) 3222 goto bad; 3223 vino.ino = le64_to_cpu(h->ino); 3224 vino.snap = CEPH_NOSNAP; 3225 seq = le32_to_cpu(h->seq); 3226 dname.name = (void *)h + sizeof(*h) + sizeof(u32); 3227 dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32); 3228 if (dname.len != get_unaligned_le32(h+1)) 3229 goto bad; 3230 3231 /* lookup inode */ 3232 inode = ceph_find_inode(sb, vino); 3233 dout("handle_lease %s, ino %llx %p %.*s\n", 3234 ceph_lease_op_name(h->action), vino.ino, inode, 3235 dname.len, dname.name); 3236 3237 mutex_lock(&session->s_mutex); 3238 session->s_seq++; 3239 3240 if (inode == NULL) { 3241 dout("handle_lease no inode %llx\n", vino.ino); 3242 goto release; 3243 } 3244 3245 /* dentry */ 3246 parent = d_find_alias(inode); 3247 if (!parent) { 3248 dout("no parent dentry on inode %p\n", inode); 3249 WARN_ON(1); 3250 goto release; /* hrm... */ 3251 } 3252 dname.hash = full_name_hash(parent, dname.name, dname.len); 3253 dentry = d_lookup(parent, &dname); 3254 dput(parent); 3255 if (!dentry) 3256 goto release; 3257 3258 spin_lock(&dentry->d_lock); 3259 di = ceph_dentry(dentry); 3260 switch (h->action) { 3261 case CEPH_MDS_LEASE_REVOKE: 3262 if (di->lease_session == session) { 3263 if (ceph_seq_cmp(di->lease_seq, seq) > 0) 3264 h->seq = cpu_to_le32(di->lease_seq); 3265 __ceph_mdsc_drop_dentry_lease(dentry); 3266 } 3267 release = 1; 3268 break; 3269 3270 case CEPH_MDS_LEASE_RENEW: 3271 if (di->lease_session == session && 3272 di->lease_gen == session->s_cap_gen && 3273 di->lease_renew_from && 3274 di->lease_renew_after == 0) { 3275 unsigned long duration = 3276 msecs_to_jiffies(le32_to_cpu(h->duration_ms)); 3277 3278 di->lease_seq = seq; 3279 di->time = di->lease_renew_from + duration; 3280 di->lease_renew_after = di->lease_renew_from + 3281 (duration >> 1); 3282 di->lease_renew_from = 0; 3283 } 3284 break; 3285 } 3286 spin_unlock(&dentry->d_lock); 3287 dput(dentry); 3288 3289 if (!release) 3290 goto out; 3291 3292 release: 3293 /* let's just reuse the same message */ 3294 h->action = CEPH_MDS_LEASE_REVOKE_ACK; 3295 ceph_msg_get(msg); 3296 ceph_con_send(&session->s_con, msg); 3297 3298 out: 3299 iput(inode); 3300 mutex_unlock(&session->s_mutex); 3301 return; 3302 3303 bad: 3304 pr_err("corrupt lease message\n"); 3305 ceph_msg_dump(msg); 3306 } 3307 3308 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, 3309 struct inode *inode, 3310 struct dentry *dentry, char action, 3311 u32 seq) 3312 { 3313 struct ceph_msg *msg; 3314 struct ceph_mds_lease *lease; 3315 int len = sizeof(*lease) + sizeof(u32); 3316 int dnamelen = 0; 3317 3318 dout("lease_send_msg inode %p dentry %p %s to mds%d\n", 3319 inode, dentry, ceph_lease_op_name(action), session->s_mds); 3320 dnamelen = dentry->d_name.len; 3321 len += dnamelen; 3322 3323 msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false); 3324 if (!msg) 3325 return; 3326 lease = msg->front.iov_base; 3327 lease->action = action; 3328 lease->ino = cpu_to_le64(ceph_vino(inode).ino); 3329 lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap); 3330 lease->seq = cpu_to_le32(seq); 3331 put_unaligned_le32(dnamelen, lease + 1); 3332 memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen); 3333 3334 /* 3335 * if this is a preemptive lease RELEASE, no need to 3336 * flush request stream, since the actual request will 3337 * soon follow. 3338 */ 3339 msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE); 3340 3341 ceph_con_send(&session->s_con, msg); 3342 } 3343 3344 /* 3345 * drop all leases (and dentry refs) in preparation for umount 3346 */ 3347 static void drop_leases(struct ceph_mds_client *mdsc) 3348 { 3349 int i; 3350 3351 dout("drop_leases\n"); 3352 mutex_lock(&mdsc->mutex); 3353 for (i = 0; i < mdsc->max_sessions; i++) { 3354 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); 3355 if (!s) 3356 continue; 3357 mutex_unlock(&mdsc->mutex); 3358 mutex_lock(&s->s_mutex); 3359 mutex_unlock(&s->s_mutex); 3360 ceph_put_mds_session(s); 3361 mutex_lock(&mdsc->mutex); 3362 } 3363 mutex_unlock(&mdsc->mutex); 3364 } 3365 3366 3367 3368 /* 3369 * delayed work -- periodically trim expired leases, renew caps with mds 3370 */ 3371 static void schedule_delayed(struct ceph_mds_client *mdsc) 3372 { 3373 int delay = 5; 3374 unsigned hz = round_jiffies_relative(HZ * delay); 3375 schedule_delayed_work(&mdsc->delayed_work, hz); 3376 } 3377 3378 static void delayed_work(struct work_struct *work) 3379 { 3380 int i; 3381 struct ceph_mds_client *mdsc = 3382 container_of(work, struct ceph_mds_client, delayed_work.work); 3383 int renew_interval; 3384 int renew_caps; 3385 3386 dout("mdsc delayed_work\n"); 3387 ceph_check_delayed_caps(mdsc); 3388 3389 mutex_lock(&mdsc->mutex); 3390 renew_interval = mdsc->mdsmap->m_session_timeout >> 2; 3391 renew_caps = time_after_eq(jiffies, HZ*renew_interval + 3392 mdsc->last_renew_caps); 3393 if (renew_caps) 3394 mdsc->last_renew_caps = jiffies; 3395 3396 for (i = 0; i < mdsc->max_sessions; i++) { 3397 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); 3398 if (s == NULL) 3399 continue; 3400 if (s->s_state == CEPH_MDS_SESSION_CLOSING) { 3401 dout("resending session close request for mds%d\n", 3402 s->s_mds); 3403 request_close_session(mdsc, s); 3404 ceph_put_mds_session(s); 3405 continue; 3406 } 3407 if (s->s_ttl && time_after(jiffies, s->s_ttl)) { 3408 if (s->s_state == CEPH_MDS_SESSION_OPEN) { 3409 s->s_state = CEPH_MDS_SESSION_HUNG; 3410 pr_info("mds%d hung\n", s->s_mds); 3411 } 3412 } 3413 if (s->s_state < CEPH_MDS_SESSION_OPEN) { 3414 /* this mds is failed or recovering, just wait */ 3415 ceph_put_mds_session(s); 3416 continue; 3417 } 3418 mutex_unlock(&mdsc->mutex); 3419 3420 mutex_lock(&s->s_mutex); 3421 if (renew_caps) 3422 send_renew_caps(mdsc, s); 3423 else 3424 ceph_con_keepalive(&s->s_con); 3425 if (s->s_state == CEPH_MDS_SESSION_OPEN || 3426 s->s_state == CEPH_MDS_SESSION_HUNG) 3427 ceph_send_cap_releases(mdsc, s); 3428 mutex_unlock(&s->s_mutex); 3429 ceph_put_mds_session(s); 3430 3431 mutex_lock(&mdsc->mutex); 3432 } 3433 mutex_unlock(&mdsc->mutex); 3434 3435 schedule_delayed(mdsc); 3436 } 3437 3438 int ceph_mdsc_init(struct ceph_fs_client *fsc) 3439 3440 { 3441 struct ceph_mds_client *mdsc; 3442 3443 mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS); 3444 if (!mdsc) 3445 return -ENOMEM; 3446 mdsc->fsc = fsc; 3447 fsc->mdsc = mdsc; 3448 mutex_init(&mdsc->mutex); 3449 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); 3450 if (mdsc->mdsmap == NULL) { 3451 kfree(mdsc); 3452 return -ENOMEM; 3453 } 3454 3455 init_completion(&mdsc->safe_umount_waiters); 3456 init_waitqueue_head(&mdsc->session_close_wq); 3457 INIT_LIST_HEAD(&mdsc->waiting_for_map); 3458 mdsc->sessions = NULL; 3459 atomic_set(&mdsc->num_sessions, 0); 3460 mdsc->max_sessions = 0; 3461 mdsc->stopping = 0; 3462 mdsc->last_snap_seq = 0; 3463 init_rwsem(&mdsc->snap_rwsem); 3464 mdsc->snap_realms = RB_ROOT; 3465 INIT_LIST_HEAD(&mdsc->snap_empty); 3466 spin_lock_init(&mdsc->snap_empty_lock); 3467 mdsc->last_tid = 0; 3468 mdsc->oldest_tid = 0; 3469 mdsc->request_tree = RB_ROOT; 3470 INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); 3471 mdsc->last_renew_caps = jiffies; 3472 INIT_LIST_HEAD(&mdsc->cap_delay_list); 3473 spin_lock_init(&mdsc->cap_delay_lock); 3474 INIT_LIST_HEAD(&mdsc->snap_flush_list); 3475 spin_lock_init(&mdsc->snap_flush_lock); 3476 mdsc->last_cap_flush_tid = 1; 3477 INIT_LIST_HEAD(&mdsc->cap_flush_list); 3478 INIT_LIST_HEAD(&mdsc->cap_dirty); 3479 INIT_LIST_HEAD(&mdsc->cap_dirty_migrating); 3480 mdsc->num_cap_flushing = 0; 3481 spin_lock_init(&mdsc->cap_dirty_lock); 3482 init_waitqueue_head(&mdsc->cap_flushing_wq); 3483 spin_lock_init(&mdsc->dentry_lru_lock); 3484 INIT_LIST_HEAD(&mdsc->dentry_lru); 3485 3486 ceph_caps_init(mdsc); 3487 ceph_adjust_min_caps(mdsc, fsc->min_caps); 3488 3489 init_rwsem(&mdsc->pool_perm_rwsem); 3490 mdsc->pool_perm_tree = RB_ROOT; 3491 3492 return 0; 3493 } 3494 3495 /* 3496 * Wait for safe replies on open mds requests. If we time out, drop 3497 * all requests from the tree to avoid dangling dentry refs. 3498 */ 3499 static void wait_requests(struct ceph_mds_client *mdsc) 3500 { 3501 struct ceph_options *opts = mdsc->fsc->client->options; 3502 struct ceph_mds_request *req; 3503 3504 mutex_lock(&mdsc->mutex); 3505 if (__get_oldest_req(mdsc)) { 3506 mutex_unlock(&mdsc->mutex); 3507 3508 dout("wait_requests waiting for requests\n"); 3509 wait_for_completion_timeout(&mdsc->safe_umount_waiters, 3510 ceph_timeout_jiffies(opts->mount_timeout)); 3511 3512 /* tear down remaining requests */ 3513 mutex_lock(&mdsc->mutex); 3514 while ((req = __get_oldest_req(mdsc))) { 3515 dout("wait_requests timed out on tid %llu\n", 3516 req->r_tid); 3517 __unregister_request(mdsc, req); 3518 } 3519 } 3520 mutex_unlock(&mdsc->mutex); 3521 dout("wait_requests done\n"); 3522 } 3523 3524 /* 3525 * called before mount is ro, and before dentries are torn down. 3526 * (hmm, does this still race with new lookups?) 3527 */ 3528 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) 3529 { 3530 dout("pre_umount\n"); 3531 mdsc->stopping = 1; 3532 3533 drop_leases(mdsc); 3534 ceph_flush_dirty_caps(mdsc); 3535 wait_requests(mdsc); 3536 3537 /* 3538 * wait for reply handlers to drop their request refs and 3539 * their inode/dcache refs 3540 */ 3541 ceph_msgr_flush(); 3542 } 3543 3544 /* 3545 * wait for all write mds requests to flush. 3546 */ 3547 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) 3548 { 3549 struct ceph_mds_request *req = NULL, *nextreq; 3550 struct rb_node *n; 3551 3552 mutex_lock(&mdsc->mutex); 3553 dout("wait_unsafe_requests want %lld\n", want_tid); 3554 restart: 3555 req = __get_oldest_req(mdsc); 3556 while (req && req->r_tid <= want_tid) { 3557 /* find next request */ 3558 n = rb_next(&req->r_node); 3559 if (n) 3560 nextreq = rb_entry(n, struct ceph_mds_request, r_node); 3561 else 3562 nextreq = NULL; 3563 if (req->r_op != CEPH_MDS_OP_SETFILELOCK && 3564 (req->r_op & CEPH_MDS_OP_WRITE)) { 3565 /* write op */ 3566 ceph_mdsc_get_request(req); 3567 if (nextreq) 3568 ceph_mdsc_get_request(nextreq); 3569 mutex_unlock(&mdsc->mutex); 3570 dout("wait_unsafe_requests wait on %llu (want %llu)\n", 3571 req->r_tid, want_tid); 3572 wait_for_completion(&req->r_safe_completion); 3573 mutex_lock(&mdsc->mutex); 3574 ceph_mdsc_put_request(req); 3575 if (!nextreq) 3576 break; /* next dne before, so we're done! */ 3577 if (RB_EMPTY_NODE(&nextreq->r_node)) { 3578 /* next request was removed from tree */ 3579 ceph_mdsc_put_request(nextreq); 3580 goto restart; 3581 } 3582 ceph_mdsc_put_request(nextreq); /* won't go away */ 3583 } 3584 req = nextreq; 3585 } 3586 mutex_unlock(&mdsc->mutex); 3587 dout("wait_unsafe_requests done\n"); 3588 } 3589 3590 void ceph_mdsc_sync(struct ceph_mds_client *mdsc) 3591 { 3592 u64 want_tid, want_flush; 3593 3594 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) 3595 return; 3596 3597 dout("sync\n"); 3598 mutex_lock(&mdsc->mutex); 3599 want_tid = mdsc->last_tid; 3600 mutex_unlock(&mdsc->mutex); 3601 3602 ceph_flush_dirty_caps(mdsc); 3603 spin_lock(&mdsc->cap_dirty_lock); 3604 want_flush = mdsc->last_cap_flush_tid; 3605 if (!list_empty(&mdsc->cap_flush_list)) { 3606 struct ceph_cap_flush *cf = 3607 list_last_entry(&mdsc->cap_flush_list, 3608 struct ceph_cap_flush, g_list); 3609 cf->wake = true; 3610 } 3611 spin_unlock(&mdsc->cap_dirty_lock); 3612 3613 dout("sync want tid %lld flush_seq %lld\n", 3614 want_tid, want_flush); 3615 3616 wait_unsafe_requests(mdsc, want_tid); 3617 wait_caps_flush(mdsc, want_flush); 3618 } 3619 3620 /* 3621 * true if all sessions are closed, or we force unmount 3622 */ 3623 static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped) 3624 { 3625 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) 3626 return true; 3627 return atomic_read(&mdsc->num_sessions) <= skipped; 3628 } 3629 3630 /* 3631 * called after sb is ro. 3632 */ 3633 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) 3634 { 3635 struct ceph_options *opts = mdsc->fsc->client->options; 3636 struct ceph_mds_session *session; 3637 int i; 3638 int skipped = 0; 3639 3640 dout("close_sessions\n"); 3641 3642 /* close sessions */ 3643 mutex_lock(&mdsc->mutex); 3644 for (i = 0; i < mdsc->max_sessions; i++) { 3645 session = __ceph_lookup_mds_session(mdsc, i); 3646 if (!session) 3647 continue; 3648 mutex_unlock(&mdsc->mutex); 3649 mutex_lock(&session->s_mutex); 3650 if (__close_session(mdsc, session) <= 0) 3651 skipped++; 3652 mutex_unlock(&session->s_mutex); 3653 ceph_put_mds_session(session); 3654 mutex_lock(&mdsc->mutex); 3655 } 3656 mutex_unlock(&mdsc->mutex); 3657 3658 dout("waiting for sessions to close\n"); 3659 wait_event_timeout(mdsc->session_close_wq, 3660 done_closing_sessions(mdsc, skipped), 3661 ceph_timeout_jiffies(opts->mount_timeout)); 3662 3663 /* tear down remaining sessions */ 3664 mutex_lock(&mdsc->mutex); 3665 for (i = 0; i < mdsc->max_sessions; i++) { 3666 if (mdsc->sessions[i]) { 3667 session = get_session(mdsc->sessions[i]); 3668 __unregister_session(mdsc, session); 3669 mutex_unlock(&mdsc->mutex); 3670 mutex_lock(&session->s_mutex); 3671 remove_session_caps(session); 3672 mutex_unlock(&session->s_mutex); 3673 ceph_put_mds_session(session); 3674 mutex_lock(&mdsc->mutex); 3675 } 3676 } 3677 WARN_ON(!list_empty(&mdsc->cap_delay_list)); 3678 mutex_unlock(&mdsc->mutex); 3679 3680 ceph_cleanup_empty_realms(mdsc); 3681 3682 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ 3683 3684 dout("stopped\n"); 3685 } 3686 3687 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) 3688 { 3689 struct ceph_mds_session *session; 3690 int mds; 3691 3692 dout("force umount\n"); 3693 3694 mutex_lock(&mdsc->mutex); 3695 for (mds = 0; mds < mdsc->max_sessions; mds++) { 3696 session = __ceph_lookup_mds_session(mdsc, mds); 3697 if (!session) 3698 continue; 3699 mutex_unlock(&mdsc->mutex); 3700 mutex_lock(&session->s_mutex); 3701 __close_session(mdsc, session); 3702 if (session->s_state == CEPH_MDS_SESSION_CLOSING) { 3703 cleanup_session_requests(mdsc, session); 3704 remove_session_caps(session); 3705 } 3706 mutex_unlock(&session->s_mutex); 3707 ceph_put_mds_session(session); 3708 mutex_lock(&mdsc->mutex); 3709 kick_requests(mdsc, mds); 3710 } 3711 __wake_requests(mdsc, &mdsc->waiting_for_map); 3712 mutex_unlock(&mdsc->mutex); 3713 } 3714 3715 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) 3716 { 3717 dout("stop\n"); 3718 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ 3719 if (mdsc->mdsmap) 3720 ceph_mdsmap_destroy(mdsc->mdsmap); 3721 kfree(mdsc->sessions); 3722 ceph_caps_finalize(mdsc); 3723 ceph_pool_perm_destroy(mdsc); 3724 } 3725 3726 void ceph_mdsc_destroy(struct ceph_fs_client *fsc) 3727 { 3728 struct ceph_mds_client *mdsc = fsc->mdsc; 3729 3730 dout("mdsc_destroy %p\n", mdsc); 3731 ceph_mdsc_stop(mdsc); 3732 3733 /* flush out any connection work with references to us */ 3734 ceph_msgr_flush(); 3735 3736 fsc->mdsc = NULL; 3737 kfree(mdsc); 3738 dout("mdsc_destroy %p done\n", mdsc); 3739 } 3740 3741 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) 3742 { 3743 struct ceph_fs_client *fsc = mdsc->fsc; 3744 const char *mds_namespace = fsc->mount_options->mds_namespace; 3745 void *p = msg->front.iov_base; 3746 void *end = p + msg->front.iov_len; 3747 u32 epoch; 3748 u32 map_len; 3749 u32 num_fs; 3750 u32 mount_fscid = (u32)-1; 3751 u8 struct_v, struct_cv; 3752 int err = -EINVAL; 3753 3754 ceph_decode_need(&p, end, sizeof(u32), bad); 3755 epoch = ceph_decode_32(&p); 3756 3757 dout("handle_fsmap epoch %u\n", epoch); 3758 3759 ceph_decode_need(&p, end, 2 + sizeof(u32), bad); 3760 struct_v = ceph_decode_8(&p); 3761 struct_cv = ceph_decode_8(&p); 3762 map_len = ceph_decode_32(&p); 3763 3764 ceph_decode_need(&p, end, sizeof(u32) * 3, bad); 3765 p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */ 3766 3767 num_fs = ceph_decode_32(&p); 3768 while (num_fs-- > 0) { 3769 void *info_p, *info_end; 3770 u32 info_len; 3771 u8 info_v, info_cv; 3772 u32 fscid, namelen; 3773 3774 ceph_decode_need(&p, end, 2 + sizeof(u32), bad); 3775 info_v = ceph_decode_8(&p); 3776 info_cv = ceph_decode_8(&p); 3777 info_len = ceph_decode_32(&p); 3778 ceph_decode_need(&p, end, info_len, bad); 3779 info_p = p; 3780 info_end = p + info_len; 3781 p = info_end; 3782 3783 ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad); 3784 fscid = ceph_decode_32(&info_p); 3785 namelen = ceph_decode_32(&info_p); 3786 ceph_decode_need(&info_p, info_end, namelen, bad); 3787 3788 if (mds_namespace && 3789 strlen(mds_namespace) == namelen && 3790 !strncmp(mds_namespace, (char *)info_p, namelen)) { 3791 mount_fscid = fscid; 3792 break; 3793 } 3794 } 3795 3796 ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch); 3797 if (mount_fscid != (u32)-1) { 3798 fsc->client->monc.fs_cluster_id = mount_fscid; 3799 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, 3800 0, true); 3801 ceph_monc_renew_subs(&fsc->client->monc); 3802 } else { 3803 err = -ENOENT; 3804 goto err_out; 3805 } 3806 return; 3807 bad: 3808 pr_err("error decoding fsmap\n"); 3809 err_out: 3810 mutex_lock(&mdsc->mutex); 3811 mdsc->mdsmap_err = -ENOENT; 3812 __wake_requests(mdsc, &mdsc->waiting_for_map); 3813 mutex_unlock(&mdsc->mutex); 3814 return; 3815 } 3816 3817 /* 3818 * handle mds map update. 3819 */ 3820 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) 3821 { 3822 u32 epoch; 3823 u32 maplen; 3824 void *p = msg->front.iov_base; 3825 void *end = p + msg->front.iov_len; 3826 struct ceph_mdsmap *newmap, *oldmap; 3827 struct ceph_fsid fsid; 3828 int err = -EINVAL; 3829 3830 ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); 3831 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 3832 if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0) 3833 return; 3834 epoch = ceph_decode_32(&p); 3835 maplen = ceph_decode_32(&p); 3836 dout("handle_map epoch %u len %d\n", epoch, (int)maplen); 3837 3838 /* do we need it? */ 3839 mutex_lock(&mdsc->mutex); 3840 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { 3841 dout("handle_map epoch %u <= our %u\n", 3842 epoch, mdsc->mdsmap->m_epoch); 3843 mutex_unlock(&mdsc->mutex); 3844 return; 3845 } 3846 3847 newmap = ceph_mdsmap_decode(&p, end); 3848 if (IS_ERR(newmap)) { 3849 err = PTR_ERR(newmap); 3850 goto bad_unlock; 3851 } 3852 3853 /* swap into place */ 3854 if (mdsc->mdsmap) { 3855 oldmap = mdsc->mdsmap; 3856 mdsc->mdsmap = newmap; 3857 check_new_map(mdsc, newmap, oldmap); 3858 ceph_mdsmap_destroy(oldmap); 3859 } else { 3860 mdsc->mdsmap = newmap; /* first mds map */ 3861 } 3862 mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; 3863 3864 __wake_requests(mdsc, &mdsc->waiting_for_map); 3865 ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP, 3866 mdsc->mdsmap->m_epoch); 3867 3868 mutex_unlock(&mdsc->mutex); 3869 schedule_delayed(mdsc); 3870 return; 3871 3872 bad_unlock: 3873 mutex_unlock(&mdsc->mutex); 3874 bad: 3875 pr_err("error decoding mdsmap %d\n", err); 3876 return; 3877 } 3878 3879 static struct ceph_connection *con_get(struct ceph_connection *con) 3880 { 3881 struct ceph_mds_session *s = con->private; 3882 3883 if (get_session(s)) { 3884 dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref)); 3885 return con; 3886 } 3887 dout("mdsc con_get %p FAIL\n", s); 3888 return NULL; 3889 } 3890 3891 static void con_put(struct ceph_connection *con) 3892 { 3893 struct ceph_mds_session *s = con->private; 3894 3895 dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1); 3896 ceph_put_mds_session(s); 3897 } 3898 3899 /* 3900 * if the client is unresponsive for long enough, the mds will kill 3901 * the session entirely. 3902 */ 3903 static void peer_reset(struct ceph_connection *con) 3904 { 3905 struct ceph_mds_session *s = con->private; 3906 struct ceph_mds_client *mdsc = s->s_mdsc; 3907 3908 pr_warn("mds%d closed our session\n", s->s_mds); 3909 send_mds_reconnect(mdsc, s); 3910 } 3911 3912 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) 3913 { 3914 struct ceph_mds_session *s = con->private; 3915 struct ceph_mds_client *mdsc = s->s_mdsc; 3916 int type = le16_to_cpu(msg->hdr.type); 3917 3918 mutex_lock(&mdsc->mutex); 3919 if (__verify_registered_session(mdsc, s) < 0) { 3920 mutex_unlock(&mdsc->mutex); 3921 goto out; 3922 } 3923 mutex_unlock(&mdsc->mutex); 3924 3925 switch (type) { 3926 case CEPH_MSG_MDS_MAP: 3927 ceph_mdsc_handle_mdsmap(mdsc, msg); 3928 break; 3929 case CEPH_MSG_FS_MAP_USER: 3930 ceph_mdsc_handle_fsmap(mdsc, msg); 3931 break; 3932 case CEPH_MSG_CLIENT_SESSION: 3933 handle_session(s, msg); 3934 break; 3935 case CEPH_MSG_CLIENT_REPLY: 3936 handle_reply(s, msg); 3937 break; 3938 case CEPH_MSG_CLIENT_REQUEST_FORWARD: 3939 handle_forward(mdsc, s, msg); 3940 break; 3941 case CEPH_MSG_CLIENT_CAPS: 3942 ceph_handle_caps(s, msg); 3943 break; 3944 case CEPH_MSG_CLIENT_SNAP: 3945 ceph_handle_snap(mdsc, s, msg); 3946 break; 3947 case CEPH_MSG_CLIENT_LEASE: 3948 handle_lease(mdsc, s, msg); 3949 break; 3950 3951 default: 3952 pr_err("received unknown message type %d %s\n", type, 3953 ceph_msg_type_name(type)); 3954 } 3955 out: 3956 ceph_msg_put(msg); 3957 } 3958 3959 /* 3960 * authentication 3961 */ 3962 3963 /* 3964 * Note: returned pointer is the address of a structure that's 3965 * managed separately. Caller must *not* attempt to free it. 3966 */ 3967 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, 3968 int *proto, int force_new) 3969 { 3970 struct ceph_mds_session *s = con->private; 3971 struct ceph_mds_client *mdsc = s->s_mdsc; 3972 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; 3973 struct ceph_auth_handshake *auth = &s->s_auth; 3974 3975 if (force_new && auth->authorizer) { 3976 ceph_auth_destroy_authorizer(auth->authorizer); 3977 auth->authorizer = NULL; 3978 } 3979 if (!auth->authorizer) { 3980 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, 3981 auth); 3982 if (ret) 3983 return ERR_PTR(ret); 3984 } else { 3985 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS, 3986 auth); 3987 if (ret) 3988 return ERR_PTR(ret); 3989 } 3990 *proto = ac->protocol; 3991 3992 return auth; 3993 } 3994 3995 3996 static int verify_authorizer_reply(struct ceph_connection *con) 3997 { 3998 struct ceph_mds_session *s = con->private; 3999 struct ceph_mds_client *mdsc = s->s_mdsc; 4000 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; 4001 4002 return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer); 4003 } 4004 4005 static int invalidate_authorizer(struct ceph_connection *con) 4006 { 4007 struct ceph_mds_session *s = con->private; 4008 struct ceph_mds_client *mdsc = s->s_mdsc; 4009 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; 4010 4011 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); 4012 4013 return ceph_monc_validate_auth(&mdsc->fsc->client->monc); 4014 } 4015 4016 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con, 4017 struct ceph_msg_header *hdr, int *skip) 4018 { 4019 struct ceph_msg *msg; 4020 int type = (int) le16_to_cpu(hdr->type); 4021 int front_len = (int) le32_to_cpu(hdr->front_len); 4022 4023 if (con->in_msg) 4024 return con->in_msg; 4025 4026 *skip = 0; 4027 msg = ceph_msg_new(type, front_len, GFP_NOFS, false); 4028 if (!msg) { 4029 pr_err("unable to allocate msg type %d len %d\n", 4030 type, front_len); 4031 return NULL; 4032 } 4033 4034 return msg; 4035 } 4036 4037 static int mds_sign_message(struct ceph_msg *msg) 4038 { 4039 struct ceph_mds_session *s = msg->con->private; 4040 struct ceph_auth_handshake *auth = &s->s_auth; 4041 4042 return ceph_auth_sign_message(auth, msg); 4043 } 4044 4045 static int mds_check_message_signature(struct ceph_msg *msg) 4046 { 4047 struct ceph_mds_session *s = msg->con->private; 4048 struct ceph_auth_handshake *auth = &s->s_auth; 4049 4050 return ceph_auth_check_message_signature(auth, msg); 4051 } 4052 4053 static const struct ceph_connection_operations mds_con_ops = { 4054 .get = con_get, 4055 .put = con_put, 4056 .dispatch = dispatch, 4057 .get_authorizer = get_authorizer, 4058 .verify_authorizer_reply = verify_authorizer_reply, 4059 .invalidate_authorizer = invalidate_authorizer, 4060 .peer_reset = peer_reset, 4061 .alloc_msg = mds_alloc_msg, 4062 .sign_message = mds_sign_message, 4063 .check_message_signature = mds_check_message_signature, 4064 }; 4065 4066 /* eof */ 4067