1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/crc32c.h> 4 #include <linux/ctype.h> 5 #include <linux/highmem.h> 6 #include <linux/inet.h> 7 #include <linux/kthread.h> 8 #include <linux/net.h> 9 #include <linux/slab.h> 10 #include <linux/socket.h> 11 #include <linux/string.h> 12 #ifdef CONFIG_BLOCK 13 #include <linux/bio.h> 14 #endif /* CONFIG_BLOCK */ 15 #include <linux/dns_resolver.h> 16 #include <net/tcp.h> 17 18 #include <linux/ceph/ceph_features.h> 19 #include <linux/ceph/libceph.h> 20 #include <linux/ceph/messenger.h> 21 #include <linux/ceph/decode.h> 22 #include <linux/ceph/pagelist.h> 23 #include <linux/export.h> 24 25 #define list_entry_next(pos, member) \ 26 list_entry(pos->member.next, typeof(*pos), member) 27 28 /* 29 * Ceph uses the messenger to exchange ceph_msg messages with other 30 * hosts in the system. The messenger provides ordered and reliable 31 * delivery. We tolerate TCP disconnects by reconnecting (with 32 * exponential backoff) in the case of a fault (disconnection, bad 33 * crc, protocol error). Acks allow sent messages to be discarded by 34 * the sender. 35 */ 36 37 /* 38 * We track the state of the socket on a given connection using 39 * values defined below. The transition to a new socket state is 40 * handled by a function which verifies we aren't coming from an 41 * unexpected state. 42 * 43 * -------- 44 * | NEW* | transient initial state 45 * -------- 46 * | con_sock_state_init() 47 * v 48 * ---------- 49 * | CLOSED | initialized, but no socket (and no 50 * ---------- TCP connection) 51 * ^ \ 52 * | \ con_sock_state_connecting() 53 * | ---------------------- 54 * | \ 55 * + con_sock_state_closed() \ 56 * |+--------------------------- \ 57 * | \ \ \ 58 * | ----------- \ \ 59 * | | CLOSING | socket event; \ \ 60 * | ----------- await close \ \ 61 * | ^ \ | 62 * | | \ | 63 * | + con_sock_state_closing() \ | 64 * | / \ | | 65 * | / --------------- | | 66 * | / \ v v 67 * | / -------------- 68 * | / -----------------| CONNECTING | socket created, TCP 69 * | | / -------------- connect initiated 70 * | | | con_sock_state_connected() 71 * | | v 72 * ------------- 73 * | CONNECTED | TCP connection established 74 * ------------- 75 * 76 * State values for ceph_connection->sock_state; NEW is assumed to be 0. 77 */ 78 79 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */ 80 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */ 81 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */ 82 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */ 83 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */ 84 85 /* 86 * connection states 87 */ 88 #define CON_STATE_CLOSED 1 /* -> PREOPEN */ 89 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */ 90 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */ 91 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */ 92 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */ 93 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */ 94 95 /* 96 * ceph_connection flag bits 97 */ 98 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop 99 * messages on errors */ 100 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */ 101 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */ 102 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */ 103 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */ 104 105 static bool con_flag_valid(unsigned long con_flag) 106 { 107 switch (con_flag) { 108 case CON_FLAG_LOSSYTX: 109 case CON_FLAG_KEEPALIVE_PENDING: 110 case CON_FLAG_WRITE_PENDING: 111 case CON_FLAG_SOCK_CLOSED: 112 case CON_FLAG_BACKOFF: 113 return true; 114 default: 115 return false; 116 } 117 } 118 119 static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag) 120 { 121 BUG_ON(!con_flag_valid(con_flag)); 122 123 clear_bit(con_flag, &con->flags); 124 } 125 126 static void con_flag_set(struct ceph_connection *con, unsigned long con_flag) 127 { 128 BUG_ON(!con_flag_valid(con_flag)); 129 130 set_bit(con_flag, &con->flags); 131 } 132 133 static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag) 134 { 135 BUG_ON(!con_flag_valid(con_flag)); 136 137 return test_bit(con_flag, &con->flags); 138 } 139 140 static bool con_flag_test_and_clear(struct ceph_connection *con, 141 unsigned long con_flag) 142 { 143 BUG_ON(!con_flag_valid(con_flag)); 144 145 return test_and_clear_bit(con_flag, &con->flags); 146 } 147 148 static bool con_flag_test_and_set(struct ceph_connection *con, 149 unsigned long con_flag) 150 { 151 BUG_ON(!con_flag_valid(con_flag)); 152 153 return test_and_set_bit(con_flag, &con->flags); 154 } 155 156 /* Slab caches for frequently-allocated structures */ 157 158 static struct kmem_cache *ceph_msg_cache; 159 static struct kmem_cache *ceph_msg_data_cache; 160 161 /* static tag bytes (protocol control messages) */ 162 static char tag_msg = CEPH_MSGR_TAG_MSG; 163 static char tag_ack = CEPH_MSGR_TAG_ACK; 164 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 165 166 #ifdef CONFIG_LOCKDEP 167 static struct lock_class_key socket_class; 168 #endif 169 170 /* 171 * When skipping (ignoring) a block of input we read it into a "skip 172 * buffer," which is this many bytes in size. 173 */ 174 #define SKIP_BUF_SIZE 1024 175 176 static void queue_con(struct ceph_connection *con); 177 static void con_work(struct work_struct *); 178 static void con_fault(struct ceph_connection *con); 179 180 /* 181 * Nicely render a sockaddr as a string. An array of formatted 182 * strings is used, to approximate reentrancy. 183 */ 184 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */ 185 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG) 186 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1) 187 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */ 188 189 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN]; 190 static atomic_t addr_str_seq = ATOMIC_INIT(0); 191 192 static struct page *zero_page; /* used in certain error cases */ 193 194 const char *ceph_pr_addr(const struct sockaddr_storage *ss) 195 { 196 int i; 197 char *s; 198 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 199 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 200 201 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK; 202 s = addr_str[i]; 203 204 switch (ss->ss_family) { 205 case AF_INET: 206 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr, 207 ntohs(in4->sin_port)); 208 break; 209 210 case AF_INET6: 211 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr, 212 ntohs(in6->sin6_port)); 213 break; 214 215 default: 216 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)", 217 ss->ss_family); 218 } 219 220 return s; 221 } 222 EXPORT_SYMBOL(ceph_pr_addr); 223 224 static void encode_my_addr(struct ceph_messenger *msgr) 225 { 226 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr)); 227 ceph_encode_addr(&msgr->my_enc_addr); 228 } 229 230 /* 231 * work queue for all reading and writing to/from the socket. 232 */ 233 static struct workqueue_struct *ceph_msgr_wq; 234 235 static int ceph_msgr_slab_init(void) 236 { 237 BUG_ON(ceph_msg_cache); 238 ceph_msg_cache = kmem_cache_create("ceph_msg", 239 sizeof (struct ceph_msg), 240 __alignof__(struct ceph_msg), 0, NULL); 241 242 if (!ceph_msg_cache) 243 return -ENOMEM; 244 245 BUG_ON(ceph_msg_data_cache); 246 ceph_msg_data_cache = kmem_cache_create("ceph_msg_data", 247 sizeof (struct ceph_msg_data), 248 __alignof__(struct ceph_msg_data), 249 0, NULL); 250 if (ceph_msg_data_cache) 251 return 0; 252 253 kmem_cache_destroy(ceph_msg_cache); 254 ceph_msg_cache = NULL; 255 256 return -ENOMEM; 257 } 258 259 static void ceph_msgr_slab_exit(void) 260 { 261 BUG_ON(!ceph_msg_data_cache); 262 kmem_cache_destroy(ceph_msg_data_cache); 263 ceph_msg_data_cache = NULL; 264 265 BUG_ON(!ceph_msg_cache); 266 kmem_cache_destroy(ceph_msg_cache); 267 ceph_msg_cache = NULL; 268 } 269 270 static void _ceph_msgr_exit(void) 271 { 272 if (ceph_msgr_wq) { 273 destroy_workqueue(ceph_msgr_wq); 274 ceph_msgr_wq = NULL; 275 } 276 277 ceph_msgr_slab_exit(); 278 279 BUG_ON(zero_page == NULL); 280 kunmap(zero_page); 281 page_cache_release(zero_page); 282 zero_page = NULL; 283 } 284 285 int ceph_msgr_init(void) 286 { 287 BUG_ON(zero_page != NULL); 288 zero_page = ZERO_PAGE(0); 289 page_cache_get(zero_page); 290 291 if (ceph_msgr_slab_init()) 292 return -ENOMEM; 293 294 ceph_msgr_wq = alloc_workqueue("ceph-msgr", 0, 0); 295 if (ceph_msgr_wq) 296 return 0; 297 298 pr_err("msgr_init failed to create workqueue\n"); 299 _ceph_msgr_exit(); 300 301 return -ENOMEM; 302 } 303 EXPORT_SYMBOL(ceph_msgr_init); 304 305 void ceph_msgr_exit(void) 306 { 307 BUG_ON(ceph_msgr_wq == NULL); 308 309 _ceph_msgr_exit(); 310 } 311 EXPORT_SYMBOL(ceph_msgr_exit); 312 313 void ceph_msgr_flush(void) 314 { 315 flush_workqueue(ceph_msgr_wq); 316 } 317 EXPORT_SYMBOL(ceph_msgr_flush); 318 319 /* Connection socket state transition functions */ 320 321 static void con_sock_state_init(struct ceph_connection *con) 322 { 323 int old_state; 324 325 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 326 if (WARN_ON(old_state != CON_SOCK_STATE_NEW)) 327 printk("%s: unexpected old state %d\n", __func__, old_state); 328 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 329 CON_SOCK_STATE_CLOSED); 330 } 331 332 static void con_sock_state_connecting(struct ceph_connection *con) 333 { 334 int old_state; 335 336 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING); 337 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED)) 338 printk("%s: unexpected old state %d\n", __func__, old_state); 339 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 340 CON_SOCK_STATE_CONNECTING); 341 } 342 343 static void con_sock_state_connected(struct ceph_connection *con) 344 { 345 int old_state; 346 347 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED); 348 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING)) 349 printk("%s: unexpected old state %d\n", __func__, old_state); 350 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 351 CON_SOCK_STATE_CONNECTED); 352 } 353 354 static void con_sock_state_closing(struct ceph_connection *con) 355 { 356 int old_state; 357 358 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING); 359 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING && 360 old_state != CON_SOCK_STATE_CONNECTED && 361 old_state != CON_SOCK_STATE_CLOSING)) 362 printk("%s: unexpected old state %d\n", __func__, old_state); 363 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 364 CON_SOCK_STATE_CLOSING); 365 } 366 367 static void con_sock_state_closed(struct ceph_connection *con) 368 { 369 int old_state; 370 371 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 372 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED && 373 old_state != CON_SOCK_STATE_CLOSING && 374 old_state != CON_SOCK_STATE_CONNECTING && 375 old_state != CON_SOCK_STATE_CLOSED)) 376 printk("%s: unexpected old state %d\n", __func__, old_state); 377 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 378 CON_SOCK_STATE_CLOSED); 379 } 380 381 /* 382 * socket callback functions 383 */ 384 385 /* data available on socket, or listen socket received a connect */ 386 static void ceph_sock_data_ready(struct sock *sk) 387 { 388 struct ceph_connection *con = sk->sk_user_data; 389 if (atomic_read(&con->msgr->stopping)) { 390 return; 391 } 392 393 if (sk->sk_state != TCP_CLOSE_WAIT) { 394 dout("%s on %p state = %lu, queueing work\n", __func__, 395 con, con->state); 396 queue_con(con); 397 } 398 } 399 400 /* socket has buffer space for writing */ 401 static void ceph_sock_write_space(struct sock *sk) 402 { 403 struct ceph_connection *con = sk->sk_user_data; 404 405 /* only queue to workqueue if there is data we want to write, 406 * and there is sufficient space in the socket buffer to accept 407 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space() 408 * doesn't get called again until try_write() fills the socket 409 * buffer. See net/ipv4/tcp_input.c:tcp_check_space() 410 * and net/core/stream.c:sk_stream_write_space(). 411 */ 412 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) { 413 if (sk_stream_is_writeable(sk)) { 414 dout("%s %p queueing write work\n", __func__, con); 415 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 416 queue_con(con); 417 } 418 } else { 419 dout("%s %p nothing to write\n", __func__, con); 420 } 421 } 422 423 /* socket's state has changed */ 424 static void ceph_sock_state_change(struct sock *sk) 425 { 426 struct ceph_connection *con = sk->sk_user_data; 427 428 dout("%s %p state = %lu sk_state = %u\n", __func__, 429 con, con->state, sk->sk_state); 430 431 switch (sk->sk_state) { 432 case TCP_CLOSE: 433 dout("%s TCP_CLOSE\n", __func__); 434 case TCP_CLOSE_WAIT: 435 dout("%s TCP_CLOSE_WAIT\n", __func__); 436 con_sock_state_closing(con); 437 con_flag_set(con, CON_FLAG_SOCK_CLOSED); 438 queue_con(con); 439 break; 440 case TCP_ESTABLISHED: 441 dout("%s TCP_ESTABLISHED\n", __func__); 442 con_sock_state_connected(con); 443 queue_con(con); 444 break; 445 default: /* Everything else is uninteresting */ 446 break; 447 } 448 } 449 450 /* 451 * set up socket callbacks 452 */ 453 static void set_sock_callbacks(struct socket *sock, 454 struct ceph_connection *con) 455 { 456 struct sock *sk = sock->sk; 457 sk->sk_user_data = con; 458 sk->sk_data_ready = ceph_sock_data_ready; 459 sk->sk_write_space = ceph_sock_write_space; 460 sk->sk_state_change = ceph_sock_state_change; 461 } 462 463 464 /* 465 * socket helpers 466 */ 467 468 /* 469 * initiate connection to a remote socket. 470 */ 471 static int ceph_tcp_connect(struct ceph_connection *con) 472 { 473 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 474 struct socket *sock; 475 int ret; 476 477 BUG_ON(con->sock); 478 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM, 479 IPPROTO_TCP, &sock); 480 if (ret) 481 return ret; 482 sock->sk->sk_allocation = GFP_NOFS; 483 484 #ifdef CONFIG_LOCKDEP 485 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 486 #endif 487 488 set_sock_callbacks(sock, con); 489 490 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); 491 492 con_sock_state_connecting(con); 493 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), 494 O_NONBLOCK); 495 if (ret == -EINPROGRESS) { 496 dout("connect %s EINPROGRESS sk_state = %u\n", 497 ceph_pr_addr(&con->peer_addr.in_addr), 498 sock->sk->sk_state); 499 } else if (ret < 0) { 500 pr_err("connect %s error %d\n", 501 ceph_pr_addr(&con->peer_addr.in_addr), ret); 502 sock_release(sock); 503 con->error_msg = "connect error"; 504 505 return ret; 506 } 507 con->sock = sock; 508 return 0; 509 } 510 511 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) 512 { 513 struct kvec iov = {buf, len}; 514 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 515 int r; 516 517 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); 518 if (r == -EAGAIN) 519 r = 0; 520 return r; 521 } 522 523 static int ceph_tcp_recvpage(struct socket *sock, struct page *page, 524 int page_offset, size_t length) 525 { 526 void *kaddr; 527 int ret; 528 529 BUG_ON(page_offset + length > PAGE_SIZE); 530 531 kaddr = kmap(page); 532 BUG_ON(!kaddr); 533 ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length); 534 kunmap(page); 535 536 return ret; 537 } 538 539 /* 540 * write something. @more is true if caller will be sending more data 541 * shortly. 542 */ 543 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, 544 size_t kvlen, size_t len, int more) 545 { 546 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 547 int r; 548 549 if (more) 550 msg.msg_flags |= MSG_MORE; 551 else 552 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 553 554 r = kernel_sendmsg(sock, &msg, iov, kvlen, len); 555 if (r == -EAGAIN) 556 r = 0; 557 return r; 558 } 559 560 static int __ceph_tcp_sendpage(struct socket *sock, struct page *page, 561 int offset, size_t size, bool more) 562 { 563 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); 564 int ret; 565 566 ret = kernel_sendpage(sock, page, offset, size, flags); 567 if (ret == -EAGAIN) 568 ret = 0; 569 570 return ret; 571 } 572 573 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 574 int offset, size_t size, bool more) 575 { 576 int ret; 577 struct kvec iov; 578 579 /* sendpage cannot properly handle pages with page_count == 0, 580 * we need to fallback to sendmsg if that's the case */ 581 if (page_count(page) >= 1) 582 return __ceph_tcp_sendpage(sock, page, offset, size, more); 583 584 iov.iov_base = kmap(page) + offset; 585 iov.iov_len = size; 586 ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more); 587 kunmap(page); 588 589 return ret; 590 } 591 592 /* 593 * Shutdown/close the socket for the given connection. 594 */ 595 static int con_close_socket(struct ceph_connection *con) 596 { 597 int rc = 0; 598 599 dout("con_close_socket on %p sock %p\n", con, con->sock); 600 if (con->sock) { 601 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); 602 sock_release(con->sock); 603 con->sock = NULL; 604 } 605 606 /* 607 * Forcibly clear the SOCK_CLOSED flag. It gets set 608 * independent of the connection mutex, and we could have 609 * received a socket close event before we had the chance to 610 * shut the socket down. 611 */ 612 con_flag_clear(con, CON_FLAG_SOCK_CLOSED); 613 614 con_sock_state_closed(con); 615 return rc; 616 } 617 618 /* 619 * Reset a connection. Discard all incoming and outgoing messages 620 * and clear *_seq state. 621 */ 622 static void ceph_msg_remove(struct ceph_msg *msg) 623 { 624 list_del_init(&msg->list_head); 625 BUG_ON(msg->con == NULL); 626 msg->con->ops->put(msg->con); 627 msg->con = NULL; 628 629 ceph_msg_put(msg); 630 } 631 static void ceph_msg_remove_list(struct list_head *head) 632 { 633 while (!list_empty(head)) { 634 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, 635 list_head); 636 ceph_msg_remove(msg); 637 } 638 } 639 640 static void reset_connection(struct ceph_connection *con) 641 { 642 /* reset connection, out_queue, msg_ and connect_seq */ 643 /* discard existing out_queue and msg_seq */ 644 dout("reset_connection %p\n", con); 645 ceph_msg_remove_list(&con->out_queue); 646 ceph_msg_remove_list(&con->out_sent); 647 648 if (con->in_msg) { 649 BUG_ON(con->in_msg->con != con); 650 con->in_msg->con = NULL; 651 ceph_msg_put(con->in_msg); 652 con->in_msg = NULL; 653 con->ops->put(con); 654 } 655 656 con->connect_seq = 0; 657 con->out_seq = 0; 658 if (con->out_msg) { 659 ceph_msg_put(con->out_msg); 660 con->out_msg = NULL; 661 } 662 con->in_seq = 0; 663 con->in_seq_acked = 0; 664 } 665 666 /* 667 * mark a peer down. drop any open connections. 668 */ 669 void ceph_con_close(struct ceph_connection *con) 670 { 671 mutex_lock(&con->mutex); 672 dout("con_close %p peer %s\n", con, 673 ceph_pr_addr(&con->peer_addr.in_addr)); 674 con->state = CON_STATE_CLOSED; 675 676 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */ 677 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING); 678 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 679 con_flag_clear(con, CON_FLAG_BACKOFF); 680 681 reset_connection(con); 682 con->peer_global_seq = 0; 683 cancel_delayed_work(&con->work); 684 con_close_socket(con); 685 mutex_unlock(&con->mutex); 686 } 687 EXPORT_SYMBOL(ceph_con_close); 688 689 /* 690 * Reopen a closed connection, with a new peer address. 691 */ 692 void ceph_con_open(struct ceph_connection *con, 693 __u8 entity_type, __u64 entity_num, 694 struct ceph_entity_addr *addr) 695 { 696 mutex_lock(&con->mutex); 697 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 698 699 WARN_ON(con->state != CON_STATE_CLOSED); 700 con->state = CON_STATE_PREOPEN; 701 702 con->peer_name.type = (__u8) entity_type; 703 con->peer_name.num = cpu_to_le64(entity_num); 704 705 memcpy(&con->peer_addr, addr, sizeof(*addr)); 706 con->delay = 0; /* reset backoff memory */ 707 mutex_unlock(&con->mutex); 708 queue_con(con); 709 } 710 EXPORT_SYMBOL(ceph_con_open); 711 712 /* 713 * return true if this connection ever successfully opened 714 */ 715 bool ceph_con_opened(struct ceph_connection *con) 716 { 717 return con->connect_seq > 0; 718 } 719 720 /* 721 * initialize a new connection. 722 */ 723 void ceph_con_init(struct ceph_connection *con, void *private, 724 const struct ceph_connection_operations *ops, 725 struct ceph_messenger *msgr) 726 { 727 dout("con_init %p\n", con); 728 memset(con, 0, sizeof(*con)); 729 con->private = private; 730 con->ops = ops; 731 con->msgr = msgr; 732 733 con_sock_state_init(con); 734 735 mutex_init(&con->mutex); 736 INIT_LIST_HEAD(&con->out_queue); 737 INIT_LIST_HEAD(&con->out_sent); 738 INIT_DELAYED_WORK(&con->work, con_work); 739 740 con->state = CON_STATE_CLOSED; 741 } 742 EXPORT_SYMBOL(ceph_con_init); 743 744 745 /* 746 * We maintain a global counter to order connection attempts. Get 747 * a unique seq greater than @gt. 748 */ 749 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) 750 { 751 u32 ret; 752 753 spin_lock(&msgr->global_seq_lock); 754 if (msgr->global_seq < gt) 755 msgr->global_seq = gt; 756 ret = ++msgr->global_seq; 757 spin_unlock(&msgr->global_seq_lock); 758 return ret; 759 } 760 761 static void con_out_kvec_reset(struct ceph_connection *con) 762 { 763 con->out_kvec_left = 0; 764 con->out_kvec_bytes = 0; 765 con->out_kvec_cur = &con->out_kvec[0]; 766 } 767 768 static void con_out_kvec_add(struct ceph_connection *con, 769 size_t size, void *data) 770 { 771 int index; 772 773 index = con->out_kvec_left; 774 BUG_ON(index >= ARRAY_SIZE(con->out_kvec)); 775 776 con->out_kvec[index].iov_len = size; 777 con->out_kvec[index].iov_base = data; 778 con->out_kvec_left++; 779 con->out_kvec_bytes += size; 780 } 781 782 #ifdef CONFIG_BLOCK 783 784 /* 785 * For a bio data item, a piece is whatever remains of the next 786 * entry in the current bio iovec, or the first entry in the next 787 * bio in the list. 788 */ 789 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor, 790 size_t length) 791 { 792 struct ceph_msg_data *data = cursor->data; 793 struct bio *bio; 794 795 BUG_ON(data->type != CEPH_MSG_DATA_BIO); 796 797 bio = data->bio; 798 BUG_ON(!bio); 799 800 cursor->resid = min(length, data->bio_length); 801 cursor->bio = bio; 802 cursor->bvec_iter = bio->bi_iter; 803 cursor->last_piece = 804 cursor->resid <= bio_iter_len(bio, cursor->bvec_iter); 805 } 806 807 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor, 808 size_t *page_offset, 809 size_t *length) 810 { 811 struct ceph_msg_data *data = cursor->data; 812 struct bio *bio; 813 struct bio_vec bio_vec; 814 815 BUG_ON(data->type != CEPH_MSG_DATA_BIO); 816 817 bio = cursor->bio; 818 BUG_ON(!bio); 819 820 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); 821 822 *page_offset = (size_t) bio_vec.bv_offset; 823 BUG_ON(*page_offset >= PAGE_SIZE); 824 if (cursor->last_piece) /* pagelist offset is always 0 */ 825 *length = cursor->resid; 826 else 827 *length = (size_t) bio_vec.bv_len; 828 BUG_ON(*length > cursor->resid); 829 BUG_ON(*page_offset + *length > PAGE_SIZE); 830 831 return bio_vec.bv_page; 832 } 833 834 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, 835 size_t bytes) 836 { 837 struct bio *bio; 838 struct bio_vec bio_vec; 839 840 BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO); 841 842 bio = cursor->bio; 843 BUG_ON(!bio); 844 845 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); 846 847 /* Advance the cursor offset */ 848 849 BUG_ON(cursor->resid < bytes); 850 cursor->resid -= bytes; 851 852 bio_advance_iter(bio, &cursor->bvec_iter, bytes); 853 854 if (bytes < bio_vec.bv_len) 855 return false; /* more bytes to process in this segment */ 856 857 /* Move on to the next segment, and possibly the next bio */ 858 859 if (!cursor->bvec_iter.bi_size) { 860 bio = bio->bi_next; 861 cursor->bio = bio; 862 if (bio) 863 cursor->bvec_iter = bio->bi_iter; 864 else 865 memset(&cursor->bvec_iter, 0, 866 sizeof(cursor->bvec_iter)); 867 } 868 869 if (!cursor->last_piece) { 870 BUG_ON(!cursor->resid); 871 BUG_ON(!bio); 872 /* A short read is OK, so use <= rather than == */ 873 if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter)) 874 cursor->last_piece = true; 875 } 876 877 return true; 878 } 879 #endif /* CONFIG_BLOCK */ 880 881 /* 882 * For a page array, a piece comes from the first page in the array 883 * that has not already been fully consumed. 884 */ 885 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor, 886 size_t length) 887 { 888 struct ceph_msg_data *data = cursor->data; 889 int page_count; 890 891 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 892 893 BUG_ON(!data->pages); 894 BUG_ON(!data->length); 895 896 cursor->resid = min(length, data->length); 897 page_count = calc_pages_for(data->alignment, (u64)data->length); 898 cursor->page_offset = data->alignment & ~PAGE_MASK; 899 cursor->page_index = 0; 900 BUG_ON(page_count > (int)USHRT_MAX); 901 cursor->page_count = (unsigned short)page_count; 902 BUG_ON(length > SIZE_MAX - cursor->page_offset); 903 cursor->last_piece = (size_t)cursor->page_offset + length <= PAGE_SIZE; 904 } 905 906 static struct page * 907 ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor, 908 size_t *page_offset, size_t *length) 909 { 910 struct ceph_msg_data *data = cursor->data; 911 912 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 913 914 BUG_ON(cursor->page_index >= cursor->page_count); 915 BUG_ON(cursor->page_offset >= PAGE_SIZE); 916 917 *page_offset = cursor->page_offset; 918 if (cursor->last_piece) 919 *length = cursor->resid; 920 else 921 *length = PAGE_SIZE - *page_offset; 922 923 return data->pages[cursor->page_index]; 924 } 925 926 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor, 927 size_t bytes) 928 { 929 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES); 930 931 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE); 932 933 /* Advance the cursor page offset */ 934 935 cursor->resid -= bytes; 936 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK; 937 if (!bytes || cursor->page_offset) 938 return false; /* more bytes to process in the current page */ 939 940 if (!cursor->resid) 941 return false; /* no more data */ 942 943 /* Move on to the next page; offset is already at 0 */ 944 945 BUG_ON(cursor->page_index >= cursor->page_count); 946 cursor->page_index++; 947 cursor->last_piece = cursor->resid <= PAGE_SIZE; 948 949 return true; 950 } 951 952 /* 953 * For a pagelist, a piece is whatever remains to be consumed in the 954 * first page in the list, or the front of the next page. 955 */ 956 static void 957 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor, 958 size_t length) 959 { 960 struct ceph_msg_data *data = cursor->data; 961 struct ceph_pagelist *pagelist; 962 struct page *page; 963 964 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 965 966 pagelist = data->pagelist; 967 BUG_ON(!pagelist); 968 969 if (!length) 970 return; /* pagelist can be assigned but empty */ 971 972 BUG_ON(list_empty(&pagelist->head)); 973 page = list_first_entry(&pagelist->head, struct page, lru); 974 975 cursor->resid = min(length, pagelist->length); 976 cursor->page = page; 977 cursor->offset = 0; 978 cursor->last_piece = cursor->resid <= PAGE_SIZE; 979 } 980 981 static struct page * 982 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor, 983 size_t *page_offset, size_t *length) 984 { 985 struct ceph_msg_data *data = cursor->data; 986 struct ceph_pagelist *pagelist; 987 988 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 989 990 pagelist = data->pagelist; 991 BUG_ON(!pagelist); 992 993 BUG_ON(!cursor->page); 994 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 995 996 /* offset of first page in pagelist is always 0 */ 997 *page_offset = cursor->offset & ~PAGE_MASK; 998 if (cursor->last_piece) 999 *length = cursor->resid; 1000 else 1001 *length = PAGE_SIZE - *page_offset; 1002 1003 return cursor->page; 1004 } 1005 1006 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor, 1007 size_t bytes) 1008 { 1009 struct ceph_msg_data *data = cursor->data; 1010 struct ceph_pagelist *pagelist; 1011 1012 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1013 1014 pagelist = data->pagelist; 1015 BUG_ON(!pagelist); 1016 1017 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 1018 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE); 1019 1020 /* Advance the cursor offset */ 1021 1022 cursor->resid -= bytes; 1023 cursor->offset += bytes; 1024 /* offset of first page in pagelist is always 0 */ 1025 if (!bytes || cursor->offset & ~PAGE_MASK) 1026 return false; /* more bytes to process in the current page */ 1027 1028 if (!cursor->resid) 1029 return false; /* no more data */ 1030 1031 /* Move on to the next page */ 1032 1033 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head)); 1034 cursor->page = list_entry_next(cursor->page, lru); 1035 cursor->last_piece = cursor->resid <= PAGE_SIZE; 1036 1037 return true; 1038 } 1039 1040 /* 1041 * Message data is handled (sent or received) in pieces, where each 1042 * piece resides on a single page. The network layer might not 1043 * consume an entire piece at once. A data item's cursor keeps 1044 * track of which piece is next to process and how much remains to 1045 * be processed in that piece. It also tracks whether the current 1046 * piece is the last one in the data item. 1047 */ 1048 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor) 1049 { 1050 size_t length = cursor->total_resid; 1051 1052 switch (cursor->data->type) { 1053 case CEPH_MSG_DATA_PAGELIST: 1054 ceph_msg_data_pagelist_cursor_init(cursor, length); 1055 break; 1056 case CEPH_MSG_DATA_PAGES: 1057 ceph_msg_data_pages_cursor_init(cursor, length); 1058 break; 1059 #ifdef CONFIG_BLOCK 1060 case CEPH_MSG_DATA_BIO: 1061 ceph_msg_data_bio_cursor_init(cursor, length); 1062 break; 1063 #endif /* CONFIG_BLOCK */ 1064 case CEPH_MSG_DATA_NONE: 1065 default: 1066 /* BUG(); */ 1067 break; 1068 } 1069 cursor->need_crc = true; 1070 } 1071 1072 static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) 1073 { 1074 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1075 struct ceph_msg_data *data; 1076 1077 BUG_ON(!length); 1078 BUG_ON(length > msg->data_length); 1079 BUG_ON(list_empty(&msg->data)); 1080 1081 cursor->data_head = &msg->data; 1082 cursor->total_resid = length; 1083 data = list_first_entry(&msg->data, struct ceph_msg_data, links); 1084 cursor->data = data; 1085 1086 __ceph_msg_data_cursor_init(cursor); 1087 } 1088 1089 /* 1090 * Return the page containing the next piece to process for a given 1091 * data item, and supply the page offset and length of that piece. 1092 * Indicate whether this is the last piece in this data item. 1093 */ 1094 static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, 1095 size_t *page_offset, size_t *length, 1096 bool *last_piece) 1097 { 1098 struct page *page; 1099 1100 switch (cursor->data->type) { 1101 case CEPH_MSG_DATA_PAGELIST: 1102 page = ceph_msg_data_pagelist_next(cursor, page_offset, length); 1103 break; 1104 case CEPH_MSG_DATA_PAGES: 1105 page = ceph_msg_data_pages_next(cursor, page_offset, length); 1106 break; 1107 #ifdef CONFIG_BLOCK 1108 case CEPH_MSG_DATA_BIO: 1109 page = ceph_msg_data_bio_next(cursor, page_offset, length); 1110 break; 1111 #endif /* CONFIG_BLOCK */ 1112 case CEPH_MSG_DATA_NONE: 1113 default: 1114 page = NULL; 1115 break; 1116 } 1117 BUG_ON(!page); 1118 BUG_ON(*page_offset + *length > PAGE_SIZE); 1119 BUG_ON(!*length); 1120 if (last_piece) 1121 *last_piece = cursor->last_piece; 1122 1123 return page; 1124 } 1125 1126 /* 1127 * Returns true if the result moves the cursor on to the next piece 1128 * of the data item. 1129 */ 1130 static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, 1131 size_t bytes) 1132 { 1133 bool new_piece; 1134 1135 BUG_ON(bytes > cursor->resid); 1136 switch (cursor->data->type) { 1137 case CEPH_MSG_DATA_PAGELIST: 1138 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes); 1139 break; 1140 case CEPH_MSG_DATA_PAGES: 1141 new_piece = ceph_msg_data_pages_advance(cursor, bytes); 1142 break; 1143 #ifdef CONFIG_BLOCK 1144 case CEPH_MSG_DATA_BIO: 1145 new_piece = ceph_msg_data_bio_advance(cursor, bytes); 1146 break; 1147 #endif /* CONFIG_BLOCK */ 1148 case CEPH_MSG_DATA_NONE: 1149 default: 1150 BUG(); 1151 break; 1152 } 1153 cursor->total_resid -= bytes; 1154 1155 if (!cursor->resid && cursor->total_resid) { 1156 WARN_ON(!cursor->last_piece); 1157 BUG_ON(list_is_last(&cursor->data->links, cursor->data_head)); 1158 cursor->data = list_entry_next(cursor->data, links); 1159 __ceph_msg_data_cursor_init(cursor); 1160 new_piece = true; 1161 } 1162 cursor->need_crc = new_piece; 1163 1164 return new_piece; 1165 } 1166 1167 static void prepare_message_data(struct ceph_msg *msg, u32 data_len) 1168 { 1169 BUG_ON(!msg); 1170 BUG_ON(!data_len); 1171 1172 /* Initialize data cursor */ 1173 1174 ceph_msg_data_cursor_init(msg, (size_t)data_len); 1175 } 1176 1177 /* 1178 * Prepare footer for currently outgoing message, and finish things 1179 * off. Assumes out_kvec* are already valid.. we just add on to the end. 1180 */ 1181 static void prepare_write_message_footer(struct ceph_connection *con) 1182 { 1183 struct ceph_msg *m = con->out_msg; 1184 int v = con->out_kvec_left; 1185 1186 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE; 1187 1188 dout("prepare_write_message_footer %p\n", con); 1189 con->out_kvec_is_msg = true; 1190 con->out_kvec[v].iov_base = &m->footer; 1191 con->out_kvec[v].iov_len = sizeof(m->footer); 1192 con->out_kvec_bytes += sizeof(m->footer); 1193 con->out_kvec_left++; 1194 con->out_more = m->more_to_follow; 1195 con->out_msg_done = true; 1196 } 1197 1198 /* 1199 * Prepare headers for the next outgoing message. 1200 */ 1201 static void prepare_write_message(struct ceph_connection *con) 1202 { 1203 struct ceph_msg *m; 1204 u32 crc; 1205 1206 con_out_kvec_reset(con); 1207 con->out_kvec_is_msg = true; 1208 con->out_msg_done = false; 1209 1210 /* Sneak an ack in there first? If we can get it into the same 1211 * TCP packet that's a good thing. */ 1212 if (con->in_seq > con->in_seq_acked) { 1213 con->in_seq_acked = con->in_seq; 1214 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1215 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1216 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1217 &con->out_temp_ack); 1218 } 1219 1220 BUG_ON(list_empty(&con->out_queue)); 1221 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); 1222 con->out_msg = m; 1223 BUG_ON(m->con != con); 1224 1225 /* put message on sent list */ 1226 ceph_msg_get(m); 1227 list_move_tail(&m->list_head, &con->out_sent); 1228 1229 /* 1230 * only assign outgoing seq # if we haven't sent this message 1231 * yet. if it is requeued, resend with it's original seq. 1232 */ 1233 if (m->needs_out_seq) { 1234 m->hdr.seq = cpu_to_le64(++con->out_seq); 1235 m->needs_out_seq = false; 1236 } 1237 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len)); 1238 1239 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n", 1240 m, con->out_seq, le16_to_cpu(m->hdr.type), 1241 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), 1242 m->data_length); 1243 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); 1244 1245 /* tag + hdr + front + middle */ 1246 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); 1247 con_out_kvec_add(con, sizeof (m->hdr), &m->hdr); 1248 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); 1249 1250 if (m->middle) 1251 con_out_kvec_add(con, m->middle->vec.iov_len, 1252 m->middle->vec.iov_base); 1253 1254 /* fill in crc (except data pages), footer */ 1255 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc)); 1256 con->out_msg->hdr.crc = cpu_to_le32(crc); 1257 con->out_msg->footer.flags = 0; 1258 1259 crc = crc32c(0, m->front.iov_base, m->front.iov_len); 1260 con->out_msg->footer.front_crc = cpu_to_le32(crc); 1261 if (m->middle) { 1262 crc = crc32c(0, m->middle->vec.iov_base, 1263 m->middle->vec.iov_len); 1264 con->out_msg->footer.middle_crc = cpu_to_le32(crc); 1265 } else 1266 con->out_msg->footer.middle_crc = 0; 1267 dout("%s front_crc %u middle_crc %u\n", __func__, 1268 le32_to_cpu(con->out_msg->footer.front_crc), 1269 le32_to_cpu(con->out_msg->footer.middle_crc)); 1270 1271 /* is there a data payload? */ 1272 con->out_msg->footer.data_crc = 0; 1273 if (m->data_length) { 1274 prepare_message_data(con->out_msg, m->data_length); 1275 con->out_more = 1; /* data + footer will follow */ 1276 } else { 1277 /* no, queue up footer too and be done */ 1278 prepare_write_message_footer(con); 1279 } 1280 1281 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1282 } 1283 1284 /* 1285 * Prepare an ack. 1286 */ 1287 static void prepare_write_ack(struct ceph_connection *con) 1288 { 1289 dout("prepare_write_ack %p %llu -> %llu\n", con, 1290 con->in_seq_acked, con->in_seq); 1291 con->in_seq_acked = con->in_seq; 1292 1293 con_out_kvec_reset(con); 1294 1295 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1296 1297 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1298 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1299 &con->out_temp_ack); 1300 1301 con->out_more = 1; /* more will follow.. eventually.. */ 1302 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1303 } 1304 1305 /* 1306 * Prepare to share the seq during handshake 1307 */ 1308 static void prepare_write_seq(struct ceph_connection *con) 1309 { 1310 dout("prepare_write_seq %p %llu -> %llu\n", con, 1311 con->in_seq_acked, con->in_seq); 1312 con->in_seq_acked = con->in_seq; 1313 1314 con_out_kvec_reset(con); 1315 1316 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1317 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1318 &con->out_temp_ack); 1319 1320 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1321 } 1322 1323 /* 1324 * Prepare to write keepalive byte. 1325 */ 1326 static void prepare_write_keepalive(struct ceph_connection *con) 1327 { 1328 dout("prepare_write_keepalive %p\n", con); 1329 con_out_kvec_reset(con); 1330 con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive); 1331 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1332 } 1333 1334 /* 1335 * Connection negotiation. 1336 */ 1337 1338 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con, 1339 int *auth_proto) 1340 { 1341 struct ceph_auth_handshake *auth; 1342 1343 if (!con->ops->get_authorizer) { 1344 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN; 1345 con->out_connect.authorizer_len = 0; 1346 return NULL; 1347 } 1348 1349 /* Can't hold the mutex while getting authorizer */ 1350 mutex_unlock(&con->mutex); 1351 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry); 1352 mutex_lock(&con->mutex); 1353 1354 if (IS_ERR(auth)) 1355 return auth; 1356 if (con->state != CON_STATE_NEGOTIATING) 1357 return ERR_PTR(-EAGAIN); 1358 1359 con->auth_reply_buf = auth->authorizer_reply_buf; 1360 con->auth_reply_buf_len = auth->authorizer_reply_buf_len; 1361 return auth; 1362 } 1363 1364 /* 1365 * We connected to a peer and are saying hello. 1366 */ 1367 static void prepare_write_banner(struct ceph_connection *con) 1368 { 1369 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER); 1370 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr), 1371 &con->msgr->my_enc_addr); 1372 1373 con->out_more = 0; 1374 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1375 } 1376 1377 static int prepare_write_connect(struct ceph_connection *con) 1378 { 1379 unsigned int global_seq = get_global_seq(con->msgr, 0); 1380 int proto; 1381 int auth_proto; 1382 struct ceph_auth_handshake *auth; 1383 1384 switch (con->peer_name.type) { 1385 case CEPH_ENTITY_TYPE_MON: 1386 proto = CEPH_MONC_PROTOCOL; 1387 break; 1388 case CEPH_ENTITY_TYPE_OSD: 1389 proto = CEPH_OSDC_PROTOCOL; 1390 break; 1391 case CEPH_ENTITY_TYPE_MDS: 1392 proto = CEPH_MDSC_PROTOCOL; 1393 break; 1394 default: 1395 BUG(); 1396 } 1397 1398 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, 1399 con->connect_seq, global_seq, proto); 1400 1401 con->out_connect.features = cpu_to_le64(con->msgr->supported_features); 1402 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); 1403 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); 1404 con->out_connect.global_seq = cpu_to_le32(global_seq); 1405 con->out_connect.protocol_version = cpu_to_le32(proto); 1406 con->out_connect.flags = 0; 1407 1408 auth_proto = CEPH_AUTH_UNKNOWN; 1409 auth = get_connect_authorizer(con, &auth_proto); 1410 if (IS_ERR(auth)) 1411 return PTR_ERR(auth); 1412 1413 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto); 1414 con->out_connect.authorizer_len = auth ? 1415 cpu_to_le32(auth->authorizer_buf_len) : 0; 1416 1417 con_out_kvec_add(con, sizeof (con->out_connect), 1418 &con->out_connect); 1419 if (auth && auth->authorizer_buf_len) 1420 con_out_kvec_add(con, auth->authorizer_buf_len, 1421 auth->authorizer_buf); 1422 1423 con->out_more = 0; 1424 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1425 1426 return 0; 1427 } 1428 1429 /* 1430 * write as much of pending kvecs to the socket as we can. 1431 * 1 -> done 1432 * 0 -> socket full, but more to do 1433 * <0 -> error 1434 */ 1435 static int write_partial_kvec(struct ceph_connection *con) 1436 { 1437 int ret; 1438 1439 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes); 1440 while (con->out_kvec_bytes > 0) { 1441 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur, 1442 con->out_kvec_left, con->out_kvec_bytes, 1443 con->out_more); 1444 if (ret <= 0) 1445 goto out; 1446 con->out_kvec_bytes -= ret; 1447 if (con->out_kvec_bytes == 0) 1448 break; /* done */ 1449 1450 /* account for full iov entries consumed */ 1451 while (ret >= con->out_kvec_cur->iov_len) { 1452 BUG_ON(!con->out_kvec_left); 1453 ret -= con->out_kvec_cur->iov_len; 1454 con->out_kvec_cur++; 1455 con->out_kvec_left--; 1456 } 1457 /* and for a partially-consumed entry */ 1458 if (ret) { 1459 con->out_kvec_cur->iov_len -= ret; 1460 con->out_kvec_cur->iov_base += ret; 1461 } 1462 } 1463 con->out_kvec_left = 0; 1464 con->out_kvec_is_msg = false; 1465 ret = 1; 1466 out: 1467 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con, 1468 con->out_kvec_bytes, con->out_kvec_left, ret); 1469 return ret; /* done! */ 1470 } 1471 1472 static u32 ceph_crc32c_page(u32 crc, struct page *page, 1473 unsigned int page_offset, 1474 unsigned int length) 1475 { 1476 char *kaddr; 1477 1478 kaddr = kmap(page); 1479 BUG_ON(kaddr == NULL); 1480 crc = crc32c(crc, kaddr + page_offset, length); 1481 kunmap(page); 1482 1483 return crc; 1484 } 1485 /* 1486 * Write as much message data payload as we can. If we finish, queue 1487 * up the footer. 1488 * 1 -> done, footer is now queued in out_kvec[]. 1489 * 0 -> socket full, but more to do 1490 * <0 -> error 1491 */ 1492 static int write_partial_message_data(struct ceph_connection *con) 1493 { 1494 struct ceph_msg *msg = con->out_msg; 1495 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1496 bool do_datacrc = !con->msgr->nocrc; 1497 u32 crc; 1498 1499 dout("%s %p msg %p\n", __func__, con, msg); 1500 1501 if (list_empty(&msg->data)) 1502 return -EINVAL; 1503 1504 /* 1505 * Iterate through each page that contains data to be 1506 * written, and send as much as possible for each. 1507 * 1508 * If we are calculating the data crc (the default), we will 1509 * need to map the page. If we have no pages, they have 1510 * been revoked, so use the zero page. 1511 */ 1512 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0; 1513 while (cursor->resid) { 1514 struct page *page; 1515 size_t page_offset; 1516 size_t length; 1517 bool last_piece; 1518 bool need_crc; 1519 int ret; 1520 1521 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, 1522 &last_piece); 1523 ret = ceph_tcp_sendpage(con->sock, page, page_offset, 1524 length, last_piece); 1525 if (ret <= 0) { 1526 if (do_datacrc) 1527 msg->footer.data_crc = cpu_to_le32(crc); 1528 1529 return ret; 1530 } 1531 if (do_datacrc && cursor->need_crc) 1532 crc = ceph_crc32c_page(crc, page, page_offset, length); 1533 need_crc = ceph_msg_data_advance(&msg->cursor, (size_t)ret); 1534 } 1535 1536 dout("%s %p msg %p done\n", __func__, con, msg); 1537 1538 /* prepare and queue up footer, too */ 1539 if (do_datacrc) 1540 msg->footer.data_crc = cpu_to_le32(crc); 1541 else 1542 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; 1543 con_out_kvec_reset(con); 1544 prepare_write_message_footer(con); 1545 1546 return 1; /* must return > 0 to indicate success */ 1547 } 1548 1549 /* 1550 * write some zeros 1551 */ 1552 static int write_partial_skip(struct ceph_connection *con) 1553 { 1554 int ret; 1555 1556 while (con->out_skip > 0) { 1557 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE); 1558 1559 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true); 1560 if (ret <= 0) 1561 goto out; 1562 con->out_skip -= ret; 1563 } 1564 ret = 1; 1565 out: 1566 return ret; 1567 } 1568 1569 /* 1570 * Prepare to read connection handshake, or an ack. 1571 */ 1572 static void prepare_read_banner(struct ceph_connection *con) 1573 { 1574 dout("prepare_read_banner %p\n", con); 1575 con->in_base_pos = 0; 1576 } 1577 1578 static void prepare_read_connect(struct ceph_connection *con) 1579 { 1580 dout("prepare_read_connect %p\n", con); 1581 con->in_base_pos = 0; 1582 } 1583 1584 static void prepare_read_ack(struct ceph_connection *con) 1585 { 1586 dout("prepare_read_ack %p\n", con); 1587 con->in_base_pos = 0; 1588 } 1589 1590 static void prepare_read_seq(struct ceph_connection *con) 1591 { 1592 dout("prepare_read_seq %p\n", con); 1593 con->in_base_pos = 0; 1594 con->in_tag = CEPH_MSGR_TAG_SEQ; 1595 } 1596 1597 static void prepare_read_tag(struct ceph_connection *con) 1598 { 1599 dout("prepare_read_tag %p\n", con); 1600 con->in_base_pos = 0; 1601 con->in_tag = CEPH_MSGR_TAG_READY; 1602 } 1603 1604 /* 1605 * Prepare to read a message. 1606 */ 1607 static int prepare_read_message(struct ceph_connection *con) 1608 { 1609 dout("prepare_read_message %p\n", con); 1610 BUG_ON(con->in_msg != NULL); 1611 con->in_base_pos = 0; 1612 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0; 1613 return 0; 1614 } 1615 1616 1617 static int read_partial(struct ceph_connection *con, 1618 int end, int size, void *object) 1619 { 1620 while (con->in_base_pos < end) { 1621 int left = end - con->in_base_pos; 1622 int have = size - left; 1623 int ret = ceph_tcp_recvmsg(con->sock, object + have, left); 1624 if (ret <= 0) 1625 return ret; 1626 con->in_base_pos += ret; 1627 } 1628 return 1; 1629 } 1630 1631 1632 /* 1633 * Read all or part of the connect-side handshake on a new connection 1634 */ 1635 static int read_partial_banner(struct ceph_connection *con) 1636 { 1637 int size; 1638 int end; 1639 int ret; 1640 1641 dout("read_partial_banner %p at %d\n", con, con->in_base_pos); 1642 1643 /* peer's banner */ 1644 size = strlen(CEPH_BANNER); 1645 end = size; 1646 ret = read_partial(con, end, size, con->in_banner); 1647 if (ret <= 0) 1648 goto out; 1649 1650 size = sizeof (con->actual_peer_addr); 1651 end += size; 1652 ret = read_partial(con, end, size, &con->actual_peer_addr); 1653 if (ret <= 0) 1654 goto out; 1655 1656 size = sizeof (con->peer_addr_for_me); 1657 end += size; 1658 ret = read_partial(con, end, size, &con->peer_addr_for_me); 1659 if (ret <= 0) 1660 goto out; 1661 1662 out: 1663 return ret; 1664 } 1665 1666 static int read_partial_connect(struct ceph_connection *con) 1667 { 1668 int size; 1669 int end; 1670 int ret; 1671 1672 dout("read_partial_connect %p at %d\n", con, con->in_base_pos); 1673 1674 size = sizeof (con->in_reply); 1675 end = size; 1676 ret = read_partial(con, end, size, &con->in_reply); 1677 if (ret <= 0) 1678 goto out; 1679 1680 size = le32_to_cpu(con->in_reply.authorizer_len); 1681 end += size; 1682 ret = read_partial(con, end, size, con->auth_reply_buf); 1683 if (ret <= 0) 1684 goto out; 1685 1686 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n", 1687 con, (int)con->in_reply.tag, 1688 le32_to_cpu(con->in_reply.connect_seq), 1689 le32_to_cpu(con->in_reply.global_seq)); 1690 out: 1691 return ret; 1692 1693 } 1694 1695 /* 1696 * Verify the hello banner looks okay. 1697 */ 1698 static int verify_hello(struct ceph_connection *con) 1699 { 1700 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { 1701 pr_err("connect to %s got bad banner\n", 1702 ceph_pr_addr(&con->peer_addr.in_addr)); 1703 con->error_msg = "protocol error, bad banner"; 1704 return -1; 1705 } 1706 return 0; 1707 } 1708 1709 static bool addr_is_blank(struct sockaddr_storage *ss) 1710 { 1711 switch (ss->ss_family) { 1712 case AF_INET: 1713 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0; 1714 case AF_INET6: 1715 return 1716 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 && 1717 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 && 1718 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 && 1719 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0; 1720 } 1721 return false; 1722 } 1723 1724 static int addr_port(struct sockaddr_storage *ss) 1725 { 1726 switch (ss->ss_family) { 1727 case AF_INET: 1728 return ntohs(((struct sockaddr_in *)ss)->sin_port); 1729 case AF_INET6: 1730 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port); 1731 } 1732 return 0; 1733 } 1734 1735 static void addr_set_port(struct sockaddr_storage *ss, int p) 1736 { 1737 switch (ss->ss_family) { 1738 case AF_INET: 1739 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1740 break; 1741 case AF_INET6: 1742 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1743 break; 1744 } 1745 } 1746 1747 /* 1748 * Unlike other *_pton function semantics, zero indicates success. 1749 */ 1750 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss, 1751 char delim, const char **ipend) 1752 { 1753 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 1754 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 1755 1756 memset(ss, 0, sizeof(*ss)); 1757 1758 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) { 1759 ss->ss_family = AF_INET; 1760 return 0; 1761 } 1762 1763 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) { 1764 ss->ss_family = AF_INET6; 1765 return 0; 1766 } 1767 1768 return -EINVAL; 1769 } 1770 1771 /* 1772 * Extract hostname string and resolve using kernel DNS facility. 1773 */ 1774 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER 1775 static int ceph_dns_resolve_name(const char *name, size_t namelen, 1776 struct sockaddr_storage *ss, char delim, const char **ipend) 1777 { 1778 const char *end, *delim_p; 1779 char *colon_p, *ip_addr = NULL; 1780 int ip_len, ret; 1781 1782 /* 1783 * The end of the hostname occurs immediately preceding the delimiter or 1784 * the port marker (':') where the delimiter takes precedence. 1785 */ 1786 delim_p = memchr(name, delim, namelen); 1787 colon_p = memchr(name, ':', namelen); 1788 1789 if (delim_p && colon_p) 1790 end = delim_p < colon_p ? delim_p : colon_p; 1791 else if (!delim_p && colon_p) 1792 end = colon_p; 1793 else { 1794 end = delim_p; 1795 if (!end) /* case: hostname:/ */ 1796 end = name + namelen; 1797 } 1798 1799 if (end <= name) 1800 return -EINVAL; 1801 1802 /* do dns_resolve upcall */ 1803 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL); 1804 if (ip_len > 0) 1805 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL); 1806 else 1807 ret = -ESRCH; 1808 1809 kfree(ip_addr); 1810 1811 *ipend = end; 1812 1813 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name, 1814 ret, ret ? "failed" : ceph_pr_addr(ss)); 1815 1816 return ret; 1817 } 1818 #else 1819 static inline int ceph_dns_resolve_name(const char *name, size_t namelen, 1820 struct sockaddr_storage *ss, char delim, const char **ipend) 1821 { 1822 return -EINVAL; 1823 } 1824 #endif 1825 1826 /* 1827 * Parse a server name (IP or hostname). If a valid IP address is not found 1828 * then try to extract a hostname to resolve using userspace DNS upcall. 1829 */ 1830 static int ceph_parse_server_name(const char *name, size_t namelen, 1831 struct sockaddr_storage *ss, char delim, const char **ipend) 1832 { 1833 int ret; 1834 1835 ret = ceph_pton(name, namelen, ss, delim, ipend); 1836 if (ret) 1837 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend); 1838 1839 return ret; 1840 } 1841 1842 /* 1843 * Parse an ip[:port] list into an addr array. Use the default 1844 * monitor port if a port isn't specified. 1845 */ 1846 int ceph_parse_ips(const char *c, const char *end, 1847 struct ceph_entity_addr *addr, 1848 int max_count, int *count) 1849 { 1850 int i, ret = -EINVAL; 1851 const char *p = c; 1852 1853 dout("parse_ips on '%.*s'\n", (int)(end-c), c); 1854 for (i = 0; i < max_count; i++) { 1855 const char *ipend; 1856 struct sockaddr_storage *ss = &addr[i].in_addr; 1857 int port; 1858 char delim = ','; 1859 1860 if (*p == '[') { 1861 delim = ']'; 1862 p++; 1863 } 1864 1865 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend); 1866 if (ret) 1867 goto bad; 1868 ret = -EINVAL; 1869 1870 p = ipend; 1871 1872 if (delim == ']') { 1873 if (*p != ']') { 1874 dout("missing matching ']'\n"); 1875 goto bad; 1876 } 1877 p++; 1878 } 1879 1880 /* port? */ 1881 if (p < end && *p == ':') { 1882 port = 0; 1883 p++; 1884 while (p < end && *p >= '0' && *p <= '9') { 1885 port = (port * 10) + (*p - '0'); 1886 p++; 1887 } 1888 if (port == 0) 1889 port = CEPH_MON_PORT; 1890 else if (port > 65535) 1891 goto bad; 1892 } else { 1893 port = CEPH_MON_PORT; 1894 } 1895 1896 addr_set_port(ss, port); 1897 1898 dout("parse_ips got %s\n", ceph_pr_addr(ss)); 1899 1900 if (p == end) 1901 break; 1902 if (*p != ',') 1903 goto bad; 1904 p++; 1905 } 1906 1907 if (p != end) 1908 goto bad; 1909 1910 if (count) 1911 *count = i + 1; 1912 return 0; 1913 1914 bad: 1915 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 1916 return ret; 1917 } 1918 EXPORT_SYMBOL(ceph_parse_ips); 1919 1920 static int process_banner(struct ceph_connection *con) 1921 { 1922 dout("process_banner on %p\n", con); 1923 1924 if (verify_hello(con) < 0) 1925 return -1; 1926 1927 ceph_decode_addr(&con->actual_peer_addr); 1928 ceph_decode_addr(&con->peer_addr_for_me); 1929 1930 /* 1931 * Make sure the other end is who we wanted. note that the other 1932 * end may not yet know their ip address, so if it's 0.0.0.0, give 1933 * them the benefit of the doubt. 1934 */ 1935 if (memcmp(&con->peer_addr, &con->actual_peer_addr, 1936 sizeof(con->peer_addr)) != 0 && 1937 !(addr_is_blank(&con->actual_peer_addr.in_addr) && 1938 con->actual_peer_addr.nonce == con->peer_addr.nonce)) { 1939 pr_warning("wrong peer, want %s/%d, got %s/%d\n", 1940 ceph_pr_addr(&con->peer_addr.in_addr), 1941 (int)le32_to_cpu(con->peer_addr.nonce), 1942 ceph_pr_addr(&con->actual_peer_addr.in_addr), 1943 (int)le32_to_cpu(con->actual_peer_addr.nonce)); 1944 con->error_msg = "wrong peer at address"; 1945 return -1; 1946 } 1947 1948 /* 1949 * did we learn our address? 1950 */ 1951 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) { 1952 int port = addr_port(&con->msgr->inst.addr.in_addr); 1953 1954 memcpy(&con->msgr->inst.addr.in_addr, 1955 &con->peer_addr_for_me.in_addr, 1956 sizeof(con->peer_addr_for_me.in_addr)); 1957 addr_set_port(&con->msgr->inst.addr.in_addr, port); 1958 encode_my_addr(con->msgr); 1959 dout("process_banner learned my addr is %s\n", 1960 ceph_pr_addr(&con->msgr->inst.addr.in_addr)); 1961 } 1962 1963 return 0; 1964 } 1965 1966 static int process_connect(struct ceph_connection *con) 1967 { 1968 u64 sup_feat = con->msgr->supported_features; 1969 u64 req_feat = con->msgr->required_features; 1970 u64 server_feat = ceph_sanitize_features( 1971 le64_to_cpu(con->in_reply.features)); 1972 int ret; 1973 1974 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 1975 1976 switch (con->in_reply.tag) { 1977 case CEPH_MSGR_TAG_FEATURES: 1978 pr_err("%s%lld %s feature set mismatch," 1979 " my %llx < server's %llx, missing %llx\n", 1980 ENTITY_NAME(con->peer_name), 1981 ceph_pr_addr(&con->peer_addr.in_addr), 1982 sup_feat, server_feat, server_feat & ~sup_feat); 1983 con->error_msg = "missing required protocol features"; 1984 reset_connection(con); 1985 return -1; 1986 1987 case CEPH_MSGR_TAG_BADPROTOVER: 1988 pr_err("%s%lld %s protocol version mismatch," 1989 " my %d != server's %d\n", 1990 ENTITY_NAME(con->peer_name), 1991 ceph_pr_addr(&con->peer_addr.in_addr), 1992 le32_to_cpu(con->out_connect.protocol_version), 1993 le32_to_cpu(con->in_reply.protocol_version)); 1994 con->error_msg = "protocol version mismatch"; 1995 reset_connection(con); 1996 return -1; 1997 1998 case CEPH_MSGR_TAG_BADAUTHORIZER: 1999 con->auth_retry++; 2000 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con, 2001 con->auth_retry); 2002 if (con->auth_retry == 2) { 2003 con->error_msg = "connect authorization failure"; 2004 return -1; 2005 } 2006 con_out_kvec_reset(con); 2007 ret = prepare_write_connect(con); 2008 if (ret < 0) 2009 return ret; 2010 prepare_read_connect(con); 2011 break; 2012 2013 case CEPH_MSGR_TAG_RESETSESSION: 2014 /* 2015 * If we connected with a large connect_seq but the peer 2016 * has no record of a session with us (no connection, or 2017 * connect_seq == 0), they will send RESETSESION to indicate 2018 * that they must have reset their session, and may have 2019 * dropped messages. 2020 */ 2021 dout("process_connect got RESET peer seq %u\n", 2022 le32_to_cpu(con->in_reply.connect_seq)); 2023 pr_err("%s%lld %s connection reset\n", 2024 ENTITY_NAME(con->peer_name), 2025 ceph_pr_addr(&con->peer_addr.in_addr)); 2026 reset_connection(con); 2027 con_out_kvec_reset(con); 2028 ret = prepare_write_connect(con); 2029 if (ret < 0) 2030 return ret; 2031 prepare_read_connect(con); 2032 2033 /* Tell ceph about it. */ 2034 mutex_unlock(&con->mutex); 2035 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name)); 2036 if (con->ops->peer_reset) 2037 con->ops->peer_reset(con); 2038 mutex_lock(&con->mutex); 2039 if (con->state != CON_STATE_NEGOTIATING) 2040 return -EAGAIN; 2041 break; 2042 2043 case CEPH_MSGR_TAG_RETRY_SESSION: 2044 /* 2045 * If we sent a smaller connect_seq than the peer has, try 2046 * again with a larger value. 2047 */ 2048 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n", 2049 le32_to_cpu(con->out_connect.connect_seq), 2050 le32_to_cpu(con->in_reply.connect_seq)); 2051 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); 2052 con_out_kvec_reset(con); 2053 ret = prepare_write_connect(con); 2054 if (ret < 0) 2055 return ret; 2056 prepare_read_connect(con); 2057 break; 2058 2059 case CEPH_MSGR_TAG_RETRY_GLOBAL: 2060 /* 2061 * If we sent a smaller global_seq than the peer has, try 2062 * again with a larger value. 2063 */ 2064 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", 2065 con->peer_global_seq, 2066 le32_to_cpu(con->in_reply.global_seq)); 2067 get_global_seq(con->msgr, 2068 le32_to_cpu(con->in_reply.global_seq)); 2069 con_out_kvec_reset(con); 2070 ret = prepare_write_connect(con); 2071 if (ret < 0) 2072 return ret; 2073 prepare_read_connect(con); 2074 break; 2075 2076 case CEPH_MSGR_TAG_SEQ: 2077 case CEPH_MSGR_TAG_READY: 2078 if (req_feat & ~server_feat) { 2079 pr_err("%s%lld %s protocol feature mismatch," 2080 " my required %llx > server's %llx, need %llx\n", 2081 ENTITY_NAME(con->peer_name), 2082 ceph_pr_addr(&con->peer_addr.in_addr), 2083 req_feat, server_feat, req_feat & ~server_feat); 2084 con->error_msg = "missing required protocol features"; 2085 reset_connection(con); 2086 return -1; 2087 } 2088 2089 WARN_ON(con->state != CON_STATE_NEGOTIATING); 2090 con->state = CON_STATE_OPEN; 2091 con->auth_retry = 0; /* we authenticated; clear flag */ 2092 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 2093 con->connect_seq++; 2094 con->peer_features = server_feat; 2095 dout("process_connect got READY gseq %d cseq %d (%d)\n", 2096 con->peer_global_seq, 2097 le32_to_cpu(con->in_reply.connect_seq), 2098 con->connect_seq); 2099 WARN_ON(con->connect_seq != 2100 le32_to_cpu(con->in_reply.connect_seq)); 2101 2102 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) 2103 con_flag_set(con, CON_FLAG_LOSSYTX); 2104 2105 con->delay = 0; /* reset backoff memory */ 2106 2107 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) { 2108 prepare_write_seq(con); 2109 prepare_read_seq(con); 2110 } else { 2111 prepare_read_tag(con); 2112 } 2113 break; 2114 2115 case CEPH_MSGR_TAG_WAIT: 2116 /* 2117 * If there is a connection race (we are opening 2118 * connections to each other), one of us may just have 2119 * to WAIT. This shouldn't happen if we are the 2120 * client. 2121 */ 2122 pr_err("process_connect got WAIT as client\n"); 2123 con->error_msg = "protocol error, got WAIT as client"; 2124 return -1; 2125 2126 default: 2127 pr_err("connect protocol error, will retry\n"); 2128 con->error_msg = "protocol error, garbage tag during connect"; 2129 return -1; 2130 } 2131 return 0; 2132 } 2133 2134 2135 /* 2136 * read (part of) an ack 2137 */ 2138 static int read_partial_ack(struct ceph_connection *con) 2139 { 2140 int size = sizeof (con->in_temp_ack); 2141 int end = size; 2142 2143 return read_partial(con, end, size, &con->in_temp_ack); 2144 } 2145 2146 /* 2147 * We can finally discard anything that's been acked. 2148 */ 2149 static void process_ack(struct ceph_connection *con) 2150 { 2151 struct ceph_msg *m; 2152 u64 ack = le64_to_cpu(con->in_temp_ack); 2153 u64 seq; 2154 2155 while (!list_empty(&con->out_sent)) { 2156 m = list_first_entry(&con->out_sent, struct ceph_msg, 2157 list_head); 2158 seq = le64_to_cpu(m->hdr.seq); 2159 if (seq > ack) 2160 break; 2161 dout("got ack for seq %llu type %d at %p\n", seq, 2162 le16_to_cpu(m->hdr.type), m); 2163 m->ack_stamp = jiffies; 2164 ceph_msg_remove(m); 2165 } 2166 prepare_read_tag(con); 2167 } 2168 2169 2170 static int read_partial_message_section(struct ceph_connection *con, 2171 struct kvec *section, 2172 unsigned int sec_len, u32 *crc) 2173 { 2174 int ret, left; 2175 2176 BUG_ON(!section); 2177 2178 while (section->iov_len < sec_len) { 2179 BUG_ON(section->iov_base == NULL); 2180 left = sec_len - section->iov_len; 2181 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base + 2182 section->iov_len, left); 2183 if (ret <= 0) 2184 return ret; 2185 section->iov_len += ret; 2186 } 2187 if (section->iov_len == sec_len) 2188 *crc = crc32c(0, section->iov_base, section->iov_len); 2189 2190 return 1; 2191 } 2192 2193 static int read_partial_msg_data(struct ceph_connection *con) 2194 { 2195 struct ceph_msg *msg = con->in_msg; 2196 struct ceph_msg_data_cursor *cursor = &msg->cursor; 2197 const bool do_datacrc = !con->msgr->nocrc; 2198 struct page *page; 2199 size_t page_offset; 2200 size_t length; 2201 u32 crc = 0; 2202 int ret; 2203 2204 BUG_ON(!msg); 2205 if (list_empty(&msg->data)) 2206 return -EIO; 2207 2208 if (do_datacrc) 2209 crc = con->in_data_crc; 2210 while (cursor->resid) { 2211 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, 2212 NULL); 2213 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); 2214 if (ret <= 0) { 2215 if (do_datacrc) 2216 con->in_data_crc = crc; 2217 2218 return ret; 2219 } 2220 2221 if (do_datacrc) 2222 crc = ceph_crc32c_page(crc, page, page_offset, ret); 2223 (void) ceph_msg_data_advance(&msg->cursor, (size_t)ret); 2224 } 2225 if (do_datacrc) 2226 con->in_data_crc = crc; 2227 2228 return 1; /* must return > 0 to indicate success */ 2229 } 2230 2231 /* 2232 * read (part of) a message. 2233 */ 2234 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); 2235 2236 static int read_partial_message(struct ceph_connection *con) 2237 { 2238 struct ceph_msg *m = con->in_msg; 2239 int size; 2240 int end; 2241 int ret; 2242 unsigned int front_len, middle_len, data_len; 2243 bool do_datacrc = !con->msgr->nocrc; 2244 u64 seq; 2245 u32 crc; 2246 2247 dout("read_partial_message con %p msg %p\n", con, m); 2248 2249 /* header */ 2250 size = sizeof (con->in_hdr); 2251 end = size; 2252 ret = read_partial(con, end, size, &con->in_hdr); 2253 if (ret <= 0) 2254 return ret; 2255 2256 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc)); 2257 if (cpu_to_le32(crc) != con->in_hdr.crc) { 2258 pr_err("read_partial_message bad hdr " 2259 " crc %u != expected %u\n", 2260 crc, con->in_hdr.crc); 2261 return -EBADMSG; 2262 } 2263 2264 front_len = le32_to_cpu(con->in_hdr.front_len); 2265 if (front_len > CEPH_MSG_MAX_FRONT_LEN) 2266 return -EIO; 2267 middle_len = le32_to_cpu(con->in_hdr.middle_len); 2268 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN) 2269 return -EIO; 2270 data_len = le32_to_cpu(con->in_hdr.data_len); 2271 if (data_len > CEPH_MSG_MAX_DATA_LEN) 2272 return -EIO; 2273 2274 /* verify seq# */ 2275 seq = le64_to_cpu(con->in_hdr.seq); 2276 if ((s64)seq - (s64)con->in_seq < 1) { 2277 pr_info("skipping %s%lld %s seq %lld expected %lld\n", 2278 ENTITY_NAME(con->peer_name), 2279 ceph_pr_addr(&con->peer_addr.in_addr), 2280 seq, con->in_seq + 1); 2281 con->in_base_pos = -front_len - middle_len - data_len - 2282 sizeof(m->footer); 2283 con->in_tag = CEPH_MSGR_TAG_READY; 2284 return 0; 2285 } else if ((s64)seq - (s64)con->in_seq > 1) { 2286 pr_err("read_partial_message bad seq %lld expected %lld\n", 2287 seq, con->in_seq + 1); 2288 con->error_msg = "bad message sequence # for incoming message"; 2289 return -EBADMSG; 2290 } 2291 2292 /* allocate message? */ 2293 if (!con->in_msg) { 2294 int skip = 0; 2295 2296 dout("got hdr type %d front %d data %d\n", con->in_hdr.type, 2297 front_len, data_len); 2298 ret = ceph_con_in_msg_alloc(con, &skip); 2299 if (ret < 0) 2300 return ret; 2301 2302 BUG_ON(!con->in_msg ^ skip); 2303 if (con->in_msg && data_len > con->in_msg->data_length) { 2304 pr_warning("%s skipping long message (%u > %zd)\n", 2305 __func__, data_len, con->in_msg->data_length); 2306 ceph_msg_put(con->in_msg); 2307 con->in_msg = NULL; 2308 skip = 1; 2309 } 2310 if (skip) { 2311 /* skip this message */ 2312 dout("alloc_msg said skip message\n"); 2313 con->in_base_pos = -front_len - middle_len - data_len - 2314 sizeof(m->footer); 2315 con->in_tag = CEPH_MSGR_TAG_READY; 2316 con->in_seq++; 2317 return 0; 2318 } 2319 2320 BUG_ON(!con->in_msg); 2321 BUG_ON(con->in_msg->con != con); 2322 m = con->in_msg; 2323 m->front.iov_len = 0; /* haven't read it yet */ 2324 if (m->middle) 2325 m->middle->vec.iov_len = 0; 2326 2327 /* prepare for data payload, if any */ 2328 2329 if (data_len) 2330 prepare_message_data(con->in_msg, data_len); 2331 } 2332 2333 /* front */ 2334 ret = read_partial_message_section(con, &m->front, front_len, 2335 &con->in_front_crc); 2336 if (ret <= 0) 2337 return ret; 2338 2339 /* middle */ 2340 if (m->middle) { 2341 ret = read_partial_message_section(con, &m->middle->vec, 2342 middle_len, 2343 &con->in_middle_crc); 2344 if (ret <= 0) 2345 return ret; 2346 } 2347 2348 /* (page) data */ 2349 if (data_len) { 2350 ret = read_partial_msg_data(con); 2351 if (ret <= 0) 2352 return ret; 2353 } 2354 2355 /* footer */ 2356 size = sizeof (m->footer); 2357 end += size; 2358 ret = read_partial(con, end, size, &m->footer); 2359 if (ret <= 0) 2360 return ret; 2361 2362 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", 2363 m, front_len, m->footer.front_crc, middle_len, 2364 m->footer.middle_crc, data_len, m->footer.data_crc); 2365 2366 /* crc ok? */ 2367 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) { 2368 pr_err("read_partial_message %p front crc %u != exp. %u\n", 2369 m, con->in_front_crc, m->footer.front_crc); 2370 return -EBADMSG; 2371 } 2372 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) { 2373 pr_err("read_partial_message %p middle crc %u != exp %u\n", 2374 m, con->in_middle_crc, m->footer.middle_crc); 2375 return -EBADMSG; 2376 } 2377 if (do_datacrc && 2378 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 && 2379 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) { 2380 pr_err("read_partial_message %p data crc %u != exp. %u\n", m, 2381 con->in_data_crc, le32_to_cpu(m->footer.data_crc)); 2382 return -EBADMSG; 2383 } 2384 2385 return 1; /* done! */ 2386 } 2387 2388 /* 2389 * Process message. This happens in the worker thread. The callback should 2390 * be careful not to do anything that waits on other incoming messages or it 2391 * may deadlock. 2392 */ 2393 static void process_message(struct ceph_connection *con) 2394 { 2395 struct ceph_msg *msg; 2396 2397 BUG_ON(con->in_msg->con != con); 2398 con->in_msg->con = NULL; 2399 msg = con->in_msg; 2400 con->in_msg = NULL; 2401 con->ops->put(con); 2402 2403 /* if first message, set peer_name */ 2404 if (con->peer_name.type == 0) 2405 con->peer_name = msg->hdr.src; 2406 2407 con->in_seq++; 2408 mutex_unlock(&con->mutex); 2409 2410 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n", 2411 msg, le64_to_cpu(msg->hdr.seq), 2412 ENTITY_NAME(msg->hdr.src), 2413 le16_to_cpu(msg->hdr.type), 2414 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2415 le32_to_cpu(msg->hdr.front_len), 2416 le32_to_cpu(msg->hdr.data_len), 2417 con->in_front_crc, con->in_middle_crc, con->in_data_crc); 2418 con->ops->dispatch(con, msg); 2419 2420 mutex_lock(&con->mutex); 2421 } 2422 2423 2424 /* 2425 * Write something to the socket. Called in a worker thread when the 2426 * socket appears to be writeable and we have something ready to send. 2427 */ 2428 static int try_write(struct ceph_connection *con) 2429 { 2430 int ret = 1; 2431 2432 dout("try_write start %p state %lu\n", con, con->state); 2433 2434 more: 2435 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 2436 2437 /* open the socket first? */ 2438 if (con->state == CON_STATE_PREOPEN) { 2439 BUG_ON(con->sock); 2440 con->state = CON_STATE_CONNECTING; 2441 2442 con_out_kvec_reset(con); 2443 prepare_write_banner(con); 2444 prepare_read_banner(con); 2445 2446 BUG_ON(con->in_msg); 2447 con->in_tag = CEPH_MSGR_TAG_READY; 2448 dout("try_write initiating connect on %p new state %lu\n", 2449 con, con->state); 2450 ret = ceph_tcp_connect(con); 2451 if (ret < 0) { 2452 con->error_msg = "connect error"; 2453 goto out; 2454 } 2455 } 2456 2457 more_kvec: 2458 /* kvec data queued? */ 2459 if (con->out_skip) { 2460 ret = write_partial_skip(con); 2461 if (ret <= 0) 2462 goto out; 2463 } 2464 if (con->out_kvec_left) { 2465 ret = write_partial_kvec(con); 2466 if (ret <= 0) 2467 goto out; 2468 } 2469 2470 /* msg pages? */ 2471 if (con->out_msg) { 2472 if (con->out_msg_done) { 2473 ceph_msg_put(con->out_msg); 2474 con->out_msg = NULL; /* we're done with this one */ 2475 goto do_next; 2476 } 2477 2478 ret = write_partial_message_data(con); 2479 if (ret == 1) 2480 goto more_kvec; /* we need to send the footer, too! */ 2481 if (ret == 0) 2482 goto out; 2483 if (ret < 0) { 2484 dout("try_write write_partial_message_data err %d\n", 2485 ret); 2486 goto out; 2487 } 2488 } 2489 2490 do_next: 2491 if (con->state == CON_STATE_OPEN) { 2492 /* is anything else pending? */ 2493 if (!list_empty(&con->out_queue)) { 2494 prepare_write_message(con); 2495 goto more; 2496 } 2497 if (con->in_seq > con->in_seq_acked) { 2498 prepare_write_ack(con); 2499 goto more; 2500 } 2501 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) { 2502 prepare_write_keepalive(con); 2503 goto more; 2504 } 2505 } 2506 2507 /* Nothing to do! */ 2508 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 2509 dout("try_write nothing else to write.\n"); 2510 ret = 0; 2511 out: 2512 dout("try_write done on %p ret %d\n", con, ret); 2513 return ret; 2514 } 2515 2516 2517 2518 /* 2519 * Read what we can from the socket. 2520 */ 2521 static int try_read(struct ceph_connection *con) 2522 { 2523 int ret = -1; 2524 2525 more: 2526 dout("try_read start on %p state %lu\n", con, con->state); 2527 if (con->state != CON_STATE_CONNECTING && 2528 con->state != CON_STATE_NEGOTIATING && 2529 con->state != CON_STATE_OPEN) 2530 return 0; 2531 2532 BUG_ON(!con->sock); 2533 2534 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 2535 con->in_base_pos); 2536 2537 if (con->state == CON_STATE_CONNECTING) { 2538 dout("try_read connecting\n"); 2539 ret = read_partial_banner(con); 2540 if (ret <= 0) 2541 goto out; 2542 ret = process_banner(con); 2543 if (ret < 0) 2544 goto out; 2545 2546 con->state = CON_STATE_NEGOTIATING; 2547 2548 /* 2549 * Received banner is good, exchange connection info. 2550 * Do not reset out_kvec, as sending our banner raced 2551 * with receiving peer banner after connect completed. 2552 */ 2553 ret = prepare_write_connect(con); 2554 if (ret < 0) 2555 goto out; 2556 prepare_read_connect(con); 2557 2558 /* Send connection info before awaiting response */ 2559 goto out; 2560 } 2561 2562 if (con->state == CON_STATE_NEGOTIATING) { 2563 dout("try_read negotiating\n"); 2564 ret = read_partial_connect(con); 2565 if (ret <= 0) 2566 goto out; 2567 ret = process_connect(con); 2568 if (ret < 0) 2569 goto out; 2570 goto more; 2571 } 2572 2573 WARN_ON(con->state != CON_STATE_OPEN); 2574 2575 if (con->in_base_pos < 0) { 2576 /* 2577 * skipping + discarding content. 2578 * 2579 * FIXME: there must be a better way to do this! 2580 */ 2581 static char buf[SKIP_BUF_SIZE]; 2582 int skip = min((int) sizeof (buf), -con->in_base_pos); 2583 2584 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); 2585 ret = ceph_tcp_recvmsg(con->sock, buf, skip); 2586 if (ret <= 0) 2587 goto out; 2588 con->in_base_pos += ret; 2589 if (con->in_base_pos) 2590 goto more; 2591 } 2592 if (con->in_tag == CEPH_MSGR_TAG_READY) { 2593 /* 2594 * what's next? 2595 */ 2596 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 2597 if (ret <= 0) 2598 goto out; 2599 dout("try_read got tag %d\n", (int)con->in_tag); 2600 switch (con->in_tag) { 2601 case CEPH_MSGR_TAG_MSG: 2602 prepare_read_message(con); 2603 break; 2604 case CEPH_MSGR_TAG_ACK: 2605 prepare_read_ack(con); 2606 break; 2607 case CEPH_MSGR_TAG_CLOSE: 2608 con_close_socket(con); 2609 con->state = CON_STATE_CLOSED; 2610 goto out; 2611 default: 2612 goto bad_tag; 2613 } 2614 } 2615 if (con->in_tag == CEPH_MSGR_TAG_MSG) { 2616 ret = read_partial_message(con); 2617 if (ret <= 0) { 2618 switch (ret) { 2619 case -EBADMSG: 2620 con->error_msg = "bad crc"; 2621 ret = -EIO; 2622 break; 2623 case -EIO: 2624 con->error_msg = "io error"; 2625 break; 2626 } 2627 goto out; 2628 } 2629 if (con->in_tag == CEPH_MSGR_TAG_READY) 2630 goto more; 2631 process_message(con); 2632 if (con->state == CON_STATE_OPEN) 2633 prepare_read_tag(con); 2634 goto more; 2635 } 2636 if (con->in_tag == CEPH_MSGR_TAG_ACK || 2637 con->in_tag == CEPH_MSGR_TAG_SEQ) { 2638 /* 2639 * the final handshake seq exchange is semantically 2640 * equivalent to an ACK 2641 */ 2642 ret = read_partial_ack(con); 2643 if (ret <= 0) 2644 goto out; 2645 process_ack(con); 2646 goto more; 2647 } 2648 2649 out: 2650 dout("try_read done on %p ret %d\n", con, ret); 2651 return ret; 2652 2653 bad_tag: 2654 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag); 2655 con->error_msg = "protocol error, garbage tag"; 2656 ret = -1; 2657 goto out; 2658 } 2659 2660 2661 /* 2662 * Atomically queue work on a connection after the specified delay. 2663 * Bump @con reference to avoid races with connection teardown. 2664 * Returns 0 if work was queued, or an error code otherwise. 2665 */ 2666 static int queue_con_delay(struct ceph_connection *con, unsigned long delay) 2667 { 2668 if (!con->ops->get(con)) { 2669 dout("%s %p ref count 0\n", __func__, con); 2670 2671 return -ENOENT; 2672 } 2673 2674 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) { 2675 dout("%s %p - already queued\n", __func__, con); 2676 con->ops->put(con); 2677 2678 return -EBUSY; 2679 } 2680 2681 dout("%s %p %lu\n", __func__, con, delay); 2682 2683 return 0; 2684 } 2685 2686 static void queue_con(struct ceph_connection *con) 2687 { 2688 (void) queue_con_delay(con, 0); 2689 } 2690 2691 static bool con_sock_closed(struct ceph_connection *con) 2692 { 2693 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED)) 2694 return false; 2695 2696 #define CASE(x) \ 2697 case CON_STATE_ ## x: \ 2698 con->error_msg = "socket closed (con state " #x ")"; \ 2699 break; 2700 2701 switch (con->state) { 2702 CASE(CLOSED); 2703 CASE(PREOPEN); 2704 CASE(CONNECTING); 2705 CASE(NEGOTIATING); 2706 CASE(OPEN); 2707 CASE(STANDBY); 2708 default: 2709 pr_warning("%s con %p unrecognized state %lu\n", 2710 __func__, con, con->state); 2711 con->error_msg = "unrecognized con state"; 2712 BUG(); 2713 break; 2714 } 2715 #undef CASE 2716 2717 return true; 2718 } 2719 2720 static bool con_backoff(struct ceph_connection *con) 2721 { 2722 int ret; 2723 2724 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF)) 2725 return false; 2726 2727 ret = queue_con_delay(con, round_jiffies_relative(con->delay)); 2728 if (ret) { 2729 dout("%s: con %p FAILED to back off %lu\n", __func__, 2730 con, con->delay); 2731 BUG_ON(ret == -ENOENT); 2732 con_flag_set(con, CON_FLAG_BACKOFF); 2733 } 2734 2735 return true; 2736 } 2737 2738 /* Finish fault handling; con->mutex must *not* be held here */ 2739 2740 static void con_fault_finish(struct ceph_connection *con) 2741 { 2742 /* 2743 * in case we faulted due to authentication, invalidate our 2744 * current tickets so that we can get new ones. 2745 */ 2746 if (con->auth_retry && con->ops->invalidate_authorizer) { 2747 dout("calling invalidate_authorizer()\n"); 2748 con->ops->invalidate_authorizer(con); 2749 } 2750 2751 if (con->ops->fault) 2752 con->ops->fault(con); 2753 } 2754 2755 /* 2756 * Do some work on a connection. Drop a connection ref when we're done. 2757 */ 2758 static void con_work(struct work_struct *work) 2759 { 2760 struct ceph_connection *con = container_of(work, struct ceph_connection, 2761 work.work); 2762 bool fault; 2763 2764 mutex_lock(&con->mutex); 2765 while (true) { 2766 int ret; 2767 2768 if ((fault = con_sock_closed(con))) { 2769 dout("%s: con %p SOCK_CLOSED\n", __func__, con); 2770 break; 2771 } 2772 if (con_backoff(con)) { 2773 dout("%s: con %p BACKOFF\n", __func__, con); 2774 break; 2775 } 2776 if (con->state == CON_STATE_STANDBY) { 2777 dout("%s: con %p STANDBY\n", __func__, con); 2778 break; 2779 } 2780 if (con->state == CON_STATE_CLOSED) { 2781 dout("%s: con %p CLOSED\n", __func__, con); 2782 BUG_ON(con->sock); 2783 break; 2784 } 2785 if (con->state == CON_STATE_PREOPEN) { 2786 dout("%s: con %p PREOPEN\n", __func__, con); 2787 BUG_ON(con->sock); 2788 } 2789 2790 ret = try_read(con); 2791 if (ret < 0) { 2792 if (ret == -EAGAIN) 2793 continue; 2794 con->error_msg = "socket error on read"; 2795 fault = true; 2796 break; 2797 } 2798 2799 ret = try_write(con); 2800 if (ret < 0) { 2801 if (ret == -EAGAIN) 2802 continue; 2803 con->error_msg = "socket error on write"; 2804 fault = true; 2805 } 2806 2807 break; /* If we make it to here, we're done */ 2808 } 2809 if (fault) 2810 con_fault(con); 2811 mutex_unlock(&con->mutex); 2812 2813 if (fault) 2814 con_fault_finish(con); 2815 2816 con->ops->put(con); 2817 } 2818 2819 /* 2820 * Generic error/fault handler. A retry mechanism is used with 2821 * exponential backoff 2822 */ 2823 static void con_fault(struct ceph_connection *con) 2824 { 2825 pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2826 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2827 dout("fault %p state %lu to peer %s\n", 2828 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2829 2830 WARN_ON(con->state != CON_STATE_CONNECTING && 2831 con->state != CON_STATE_NEGOTIATING && 2832 con->state != CON_STATE_OPEN); 2833 2834 con_close_socket(con); 2835 2836 if (con_flag_test(con, CON_FLAG_LOSSYTX)) { 2837 dout("fault on LOSSYTX channel, marking CLOSED\n"); 2838 con->state = CON_STATE_CLOSED; 2839 return; 2840 } 2841 2842 if (con->in_msg) { 2843 BUG_ON(con->in_msg->con != con); 2844 con->in_msg->con = NULL; 2845 ceph_msg_put(con->in_msg); 2846 con->in_msg = NULL; 2847 con->ops->put(con); 2848 } 2849 2850 /* Requeue anything that hasn't been acked */ 2851 list_splice_init(&con->out_sent, &con->out_queue); 2852 2853 /* If there are no messages queued or keepalive pending, place 2854 * the connection in a STANDBY state */ 2855 if (list_empty(&con->out_queue) && 2856 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) { 2857 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 2858 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 2859 con->state = CON_STATE_STANDBY; 2860 } else { 2861 /* retry after a delay. */ 2862 con->state = CON_STATE_PREOPEN; 2863 if (con->delay == 0) 2864 con->delay = BASE_DELAY_INTERVAL; 2865 else if (con->delay < MAX_DELAY_INTERVAL) 2866 con->delay *= 2; 2867 con_flag_set(con, CON_FLAG_BACKOFF); 2868 queue_con(con); 2869 } 2870 } 2871 2872 2873 2874 /* 2875 * initialize a new messenger instance 2876 */ 2877 void ceph_messenger_init(struct ceph_messenger *msgr, 2878 struct ceph_entity_addr *myaddr, 2879 u64 supported_features, 2880 u64 required_features, 2881 bool nocrc) 2882 { 2883 msgr->supported_features = supported_features; 2884 msgr->required_features = required_features; 2885 2886 spin_lock_init(&msgr->global_seq_lock); 2887 2888 if (myaddr) 2889 msgr->inst.addr = *myaddr; 2890 2891 /* select a random nonce */ 2892 msgr->inst.addr.type = 0; 2893 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 2894 encode_my_addr(msgr); 2895 msgr->nocrc = nocrc; 2896 2897 atomic_set(&msgr->stopping, 0); 2898 2899 dout("%s %p\n", __func__, msgr); 2900 } 2901 EXPORT_SYMBOL(ceph_messenger_init); 2902 2903 static void clear_standby(struct ceph_connection *con) 2904 { 2905 /* come back from STANDBY? */ 2906 if (con->state == CON_STATE_STANDBY) { 2907 dout("clear_standby %p and ++connect_seq\n", con); 2908 con->state = CON_STATE_PREOPEN; 2909 con->connect_seq++; 2910 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING)); 2911 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)); 2912 } 2913 } 2914 2915 /* 2916 * Queue up an outgoing message on the given connection. 2917 */ 2918 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) 2919 { 2920 /* set src+dst */ 2921 msg->hdr.src = con->msgr->inst.name; 2922 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); 2923 msg->needs_out_seq = true; 2924 2925 mutex_lock(&con->mutex); 2926 2927 if (con->state == CON_STATE_CLOSED) { 2928 dout("con_send %p closed, dropping %p\n", con, msg); 2929 ceph_msg_put(msg); 2930 mutex_unlock(&con->mutex); 2931 return; 2932 } 2933 2934 BUG_ON(msg->con != NULL); 2935 msg->con = con->ops->get(con); 2936 BUG_ON(msg->con == NULL); 2937 2938 BUG_ON(!list_empty(&msg->list_head)); 2939 list_add_tail(&msg->list_head, &con->out_queue); 2940 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, 2941 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type), 2942 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2943 le32_to_cpu(msg->hdr.front_len), 2944 le32_to_cpu(msg->hdr.middle_len), 2945 le32_to_cpu(msg->hdr.data_len)); 2946 2947 clear_standby(con); 2948 mutex_unlock(&con->mutex); 2949 2950 /* if there wasn't anything waiting to send before, queue 2951 * new work */ 2952 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 2953 queue_con(con); 2954 } 2955 EXPORT_SYMBOL(ceph_con_send); 2956 2957 /* 2958 * Revoke a message that was previously queued for send 2959 */ 2960 void ceph_msg_revoke(struct ceph_msg *msg) 2961 { 2962 struct ceph_connection *con = msg->con; 2963 2964 if (!con) 2965 return; /* Message not in our possession */ 2966 2967 mutex_lock(&con->mutex); 2968 if (!list_empty(&msg->list_head)) { 2969 dout("%s %p msg %p - was on queue\n", __func__, con, msg); 2970 list_del_init(&msg->list_head); 2971 BUG_ON(msg->con == NULL); 2972 msg->con->ops->put(msg->con); 2973 msg->con = NULL; 2974 msg->hdr.seq = 0; 2975 2976 ceph_msg_put(msg); 2977 } 2978 if (con->out_msg == msg) { 2979 dout("%s %p msg %p - was sending\n", __func__, con, msg); 2980 con->out_msg = NULL; 2981 if (con->out_kvec_is_msg) { 2982 con->out_skip = con->out_kvec_bytes; 2983 con->out_kvec_is_msg = false; 2984 } 2985 msg->hdr.seq = 0; 2986 2987 ceph_msg_put(msg); 2988 } 2989 mutex_unlock(&con->mutex); 2990 } 2991 2992 /* 2993 * Revoke a message that we may be reading data into 2994 */ 2995 void ceph_msg_revoke_incoming(struct ceph_msg *msg) 2996 { 2997 struct ceph_connection *con; 2998 2999 BUG_ON(msg == NULL); 3000 if (!msg->con) { 3001 dout("%s msg %p null con\n", __func__, msg); 3002 3003 return; /* Message not in our possession */ 3004 } 3005 3006 con = msg->con; 3007 mutex_lock(&con->mutex); 3008 if (con->in_msg == msg) { 3009 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len); 3010 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len); 3011 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len); 3012 3013 /* skip rest of message */ 3014 dout("%s %p msg %p revoked\n", __func__, con, msg); 3015 con->in_base_pos = con->in_base_pos - 3016 sizeof(struct ceph_msg_header) - 3017 front_len - 3018 middle_len - 3019 data_len - 3020 sizeof(struct ceph_msg_footer); 3021 ceph_msg_put(con->in_msg); 3022 con->in_msg = NULL; 3023 con->in_tag = CEPH_MSGR_TAG_READY; 3024 con->in_seq++; 3025 } else { 3026 dout("%s %p in_msg %p msg %p no-op\n", 3027 __func__, con, con->in_msg, msg); 3028 } 3029 mutex_unlock(&con->mutex); 3030 } 3031 3032 /* 3033 * Queue a keepalive byte to ensure the tcp connection is alive. 3034 */ 3035 void ceph_con_keepalive(struct ceph_connection *con) 3036 { 3037 dout("con_keepalive %p\n", con); 3038 mutex_lock(&con->mutex); 3039 clear_standby(con); 3040 mutex_unlock(&con->mutex); 3041 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 && 3042 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3043 queue_con(con); 3044 } 3045 EXPORT_SYMBOL(ceph_con_keepalive); 3046 3047 static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type) 3048 { 3049 struct ceph_msg_data *data; 3050 3051 if (WARN_ON(!ceph_msg_data_type_valid(type))) 3052 return NULL; 3053 3054 data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS); 3055 if (data) 3056 data->type = type; 3057 INIT_LIST_HEAD(&data->links); 3058 3059 return data; 3060 } 3061 3062 static void ceph_msg_data_destroy(struct ceph_msg_data *data) 3063 { 3064 if (!data) 3065 return; 3066 3067 WARN_ON(!list_empty(&data->links)); 3068 if (data->type == CEPH_MSG_DATA_PAGELIST) { 3069 ceph_pagelist_release(data->pagelist); 3070 kfree(data->pagelist); 3071 } 3072 kmem_cache_free(ceph_msg_data_cache, data); 3073 } 3074 3075 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, 3076 size_t length, size_t alignment) 3077 { 3078 struct ceph_msg_data *data; 3079 3080 BUG_ON(!pages); 3081 BUG_ON(!length); 3082 3083 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES); 3084 BUG_ON(!data); 3085 data->pages = pages; 3086 data->length = length; 3087 data->alignment = alignment & ~PAGE_MASK; 3088 3089 list_add_tail(&data->links, &msg->data); 3090 msg->data_length += length; 3091 } 3092 EXPORT_SYMBOL(ceph_msg_data_add_pages); 3093 3094 void ceph_msg_data_add_pagelist(struct ceph_msg *msg, 3095 struct ceph_pagelist *pagelist) 3096 { 3097 struct ceph_msg_data *data; 3098 3099 BUG_ON(!pagelist); 3100 BUG_ON(!pagelist->length); 3101 3102 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST); 3103 BUG_ON(!data); 3104 data->pagelist = pagelist; 3105 3106 list_add_tail(&data->links, &msg->data); 3107 msg->data_length += pagelist->length; 3108 } 3109 EXPORT_SYMBOL(ceph_msg_data_add_pagelist); 3110 3111 #ifdef CONFIG_BLOCK 3112 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, 3113 size_t length) 3114 { 3115 struct ceph_msg_data *data; 3116 3117 BUG_ON(!bio); 3118 3119 data = ceph_msg_data_create(CEPH_MSG_DATA_BIO); 3120 BUG_ON(!data); 3121 data->bio = bio; 3122 data->bio_length = length; 3123 3124 list_add_tail(&data->links, &msg->data); 3125 msg->data_length += length; 3126 } 3127 EXPORT_SYMBOL(ceph_msg_data_add_bio); 3128 #endif /* CONFIG_BLOCK */ 3129 3130 /* 3131 * construct a new message with given type, size 3132 * the new msg has a ref count of 1. 3133 */ 3134 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, 3135 bool can_fail) 3136 { 3137 struct ceph_msg *m; 3138 3139 m = kmem_cache_zalloc(ceph_msg_cache, flags); 3140 if (m == NULL) 3141 goto out; 3142 3143 m->hdr.type = cpu_to_le16(type); 3144 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); 3145 m->hdr.front_len = cpu_to_le32(front_len); 3146 3147 INIT_LIST_HEAD(&m->list_head); 3148 kref_init(&m->kref); 3149 INIT_LIST_HEAD(&m->data); 3150 3151 /* front */ 3152 if (front_len) { 3153 m->front.iov_base = ceph_kvmalloc(front_len, flags); 3154 if (m->front.iov_base == NULL) { 3155 dout("ceph_msg_new can't allocate %d bytes\n", 3156 front_len); 3157 goto out2; 3158 } 3159 } else { 3160 m->front.iov_base = NULL; 3161 } 3162 m->front_alloc_len = m->front.iov_len = front_len; 3163 3164 dout("ceph_msg_new %p front %d\n", m, front_len); 3165 return m; 3166 3167 out2: 3168 ceph_msg_put(m); 3169 out: 3170 if (!can_fail) { 3171 pr_err("msg_new can't create type %d front %d\n", type, 3172 front_len); 3173 WARN_ON(1); 3174 } else { 3175 dout("msg_new can't create type %d front %d\n", type, 3176 front_len); 3177 } 3178 return NULL; 3179 } 3180 EXPORT_SYMBOL(ceph_msg_new); 3181 3182 /* 3183 * Allocate "middle" portion of a message, if it is needed and wasn't 3184 * allocated by alloc_msg. This allows us to read a small fixed-size 3185 * per-type header in the front and then gracefully fail (i.e., 3186 * propagate the error to the caller based on info in the front) when 3187 * the middle is too large. 3188 */ 3189 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) 3190 { 3191 int type = le16_to_cpu(msg->hdr.type); 3192 int middle_len = le32_to_cpu(msg->hdr.middle_len); 3193 3194 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type, 3195 ceph_msg_type_name(type), middle_len); 3196 BUG_ON(!middle_len); 3197 BUG_ON(msg->middle); 3198 3199 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS); 3200 if (!msg->middle) 3201 return -ENOMEM; 3202 return 0; 3203 } 3204 3205 /* 3206 * Allocate a message for receiving an incoming message on a 3207 * connection, and save the result in con->in_msg. Uses the 3208 * connection's private alloc_msg op if available. 3209 * 3210 * Returns 0 on success, or a negative error code. 3211 * 3212 * On success, if we set *skip = 1: 3213 * - the next message should be skipped and ignored. 3214 * - con->in_msg == NULL 3215 * or if we set *skip = 0: 3216 * - con->in_msg is non-null. 3217 * On error (ENOMEM, EAGAIN, ...), 3218 * - con->in_msg == NULL 3219 */ 3220 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) 3221 { 3222 struct ceph_msg_header *hdr = &con->in_hdr; 3223 int middle_len = le32_to_cpu(hdr->middle_len); 3224 struct ceph_msg *msg; 3225 int ret = 0; 3226 3227 BUG_ON(con->in_msg != NULL); 3228 BUG_ON(!con->ops->alloc_msg); 3229 3230 mutex_unlock(&con->mutex); 3231 msg = con->ops->alloc_msg(con, hdr, skip); 3232 mutex_lock(&con->mutex); 3233 if (con->state != CON_STATE_OPEN) { 3234 if (msg) 3235 ceph_msg_put(msg); 3236 return -EAGAIN; 3237 } 3238 if (msg) { 3239 BUG_ON(*skip); 3240 con->in_msg = msg; 3241 con->in_msg->con = con->ops->get(con); 3242 BUG_ON(con->in_msg->con == NULL); 3243 } else { 3244 /* 3245 * Null message pointer means either we should skip 3246 * this message or we couldn't allocate memory. The 3247 * former is not an error. 3248 */ 3249 if (*skip) 3250 return 0; 3251 con->error_msg = "error allocating memory for incoming message"; 3252 3253 return -ENOMEM; 3254 } 3255 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 3256 3257 if (middle_len && !con->in_msg->middle) { 3258 ret = ceph_alloc_middle(con, con->in_msg); 3259 if (ret < 0) { 3260 ceph_msg_put(con->in_msg); 3261 con->in_msg = NULL; 3262 } 3263 } 3264 3265 return ret; 3266 } 3267 3268 3269 /* 3270 * Free a generically kmalloc'd message. 3271 */ 3272 void ceph_msg_kfree(struct ceph_msg *m) 3273 { 3274 dout("msg_kfree %p\n", m); 3275 ceph_kvfree(m->front.iov_base); 3276 kmem_cache_free(ceph_msg_cache, m); 3277 } 3278 3279 /* 3280 * Drop a msg ref. Destroy as needed. 3281 */ 3282 void ceph_msg_last_put(struct kref *kref) 3283 { 3284 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); 3285 LIST_HEAD(data); 3286 struct list_head *links; 3287 struct list_head *next; 3288 3289 dout("ceph_msg_put last one on %p\n", m); 3290 WARN_ON(!list_empty(&m->list_head)); 3291 3292 /* drop middle, data, if any */ 3293 if (m->middle) { 3294 ceph_buffer_put(m->middle); 3295 m->middle = NULL; 3296 } 3297 3298 list_splice_init(&m->data, &data); 3299 list_for_each_safe(links, next, &data) { 3300 struct ceph_msg_data *data; 3301 3302 data = list_entry(links, struct ceph_msg_data, links); 3303 list_del_init(links); 3304 ceph_msg_data_destroy(data); 3305 } 3306 m->data_length = 0; 3307 3308 if (m->pool) 3309 ceph_msgpool_put(m->pool, m); 3310 else 3311 ceph_msg_kfree(m); 3312 } 3313 EXPORT_SYMBOL(ceph_msg_last_put); 3314 3315 void ceph_msg_dump(struct ceph_msg *msg) 3316 { 3317 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg, 3318 msg->front_alloc_len, msg->data_length); 3319 print_hex_dump(KERN_DEBUG, "header: ", 3320 DUMP_PREFIX_OFFSET, 16, 1, 3321 &msg->hdr, sizeof(msg->hdr), true); 3322 print_hex_dump(KERN_DEBUG, " front: ", 3323 DUMP_PREFIX_OFFSET, 16, 1, 3324 msg->front.iov_base, msg->front.iov_len, true); 3325 if (msg->middle) 3326 print_hex_dump(KERN_DEBUG, "middle: ", 3327 DUMP_PREFIX_OFFSET, 16, 1, 3328 msg->middle->vec.iov_base, 3329 msg->middle->vec.iov_len, true); 3330 print_hex_dump(KERN_DEBUG, "footer: ", 3331 DUMP_PREFIX_OFFSET, 16, 1, 3332 &msg->footer, sizeof(msg->footer), true); 3333 } 3334 EXPORT_SYMBOL(ceph_msg_dump); 3335