1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/crc32c.h> 4 #include <linux/ctype.h> 5 #include <linux/highmem.h> 6 #include <linux/inet.h> 7 #include <linux/kthread.h> 8 #include <linux/net.h> 9 #include <linux/slab.h> 10 #include <linux/socket.h> 11 #include <linux/string.h> 12 #ifdef CONFIG_BLOCK 13 #include <linux/bio.h> 14 #endif /* CONFIG_BLOCK */ 15 #include <linux/dns_resolver.h> 16 #include <net/tcp.h> 17 18 #include <linux/ceph/ceph_features.h> 19 #include <linux/ceph/libceph.h> 20 #include <linux/ceph/messenger.h> 21 #include <linux/ceph/decode.h> 22 #include <linux/ceph/pagelist.h> 23 #include <linux/export.h> 24 25 #define list_entry_next(pos, member) \ 26 list_entry(pos->member.next, typeof(*pos), member) 27 28 /* 29 * Ceph uses the messenger to exchange ceph_msg messages with other 30 * hosts in the system. The messenger provides ordered and reliable 31 * delivery. We tolerate TCP disconnects by reconnecting (with 32 * exponential backoff) in the case of a fault (disconnection, bad 33 * crc, protocol error). Acks allow sent messages to be discarded by 34 * the sender. 35 */ 36 37 /* 38 * We track the state of the socket on a given connection using 39 * values defined below. The transition to a new socket state is 40 * handled by a function which verifies we aren't coming from an 41 * unexpected state. 42 * 43 * -------- 44 * | NEW* | transient initial state 45 * -------- 46 * | con_sock_state_init() 47 * v 48 * ---------- 49 * | CLOSED | initialized, but no socket (and no 50 * ---------- TCP connection) 51 * ^ \ 52 * | \ con_sock_state_connecting() 53 * | ---------------------- 54 * | \ 55 * + con_sock_state_closed() \ 56 * |+--------------------------- \ 57 * | \ \ \ 58 * | ----------- \ \ 59 * | | CLOSING | socket event; \ \ 60 * | ----------- await close \ \ 61 * | ^ \ | 62 * | | \ | 63 * | + con_sock_state_closing() \ | 64 * | / \ | | 65 * | / --------------- | | 66 * | / \ v v 67 * | / -------------- 68 * | / -----------------| CONNECTING | socket created, TCP 69 * | | / -------------- connect initiated 70 * | | | con_sock_state_connected() 71 * | | v 72 * ------------- 73 * | CONNECTED | TCP connection established 74 * ------------- 75 * 76 * State values for ceph_connection->sock_state; NEW is assumed to be 0. 77 */ 78 79 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */ 80 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */ 81 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */ 82 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */ 83 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */ 84 85 /* 86 * connection states 87 */ 88 #define CON_STATE_CLOSED 1 /* -> PREOPEN */ 89 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */ 90 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */ 91 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */ 92 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */ 93 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */ 94 95 /* 96 * ceph_connection flag bits 97 */ 98 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop 99 * messages on errors */ 100 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */ 101 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */ 102 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */ 103 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */ 104 105 static bool con_flag_valid(unsigned long con_flag) 106 { 107 switch (con_flag) { 108 case CON_FLAG_LOSSYTX: 109 case CON_FLAG_KEEPALIVE_PENDING: 110 case CON_FLAG_WRITE_PENDING: 111 case CON_FLAG_SOCK_CLOSED: 112 case CON_FLAG_BACKOFF: 113 return true; 114 default: 115 return false; 116 } 117 } 118 119 static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag) 120 { 121 BUG_ON(!con_flag_valid(con_flag)); 122 123 clear_bit(con_flag, &con->flags); 124 } 125 126 static void con_flag_set(struct ceph_connection *con, unsigned long con_flag) 127 { 128 BUG_ON(!con_flag_valid(con_flag)); 129 130 set_bit(con_flag, &con->flags); 131 } 132 133 static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag) 134 { 135 BUG_ON(!con_flag_valid(con_flag)); 136 137 return test_bit(con_flag, &con->flags); 138 } 139 140 static bool con_flag_test_and_clear(struct ceph_connection *con, 141 unsigned long con_flag) 142 { 143 BUG_ON(!con_flag_valid(con_flag)); 144 145 return test_and_clear_bit(con_flag, &con->flags); 146 } 147 148 static bool con_flag_test_and_set(struct ceph_connection *con, 149 unsigned long con_flag) 150 { 151 BUG_ON(!con_flag_valid(con_flag)); 152 153 return test_and_set_bit(con_flag, &con->flags); 154 } 155 156 /* Slab caches for frequently-allocated structures */ 157 158 static struct kmem_cache *ceph_msg_cache; 159 static struct kmem_cache *ceph_msg_data_cache; 160 161 /* static tag bytes (protocol control messages) */ 162 static char tag_msg = CEPH_MSGR_TAG_MSG; 163 static char tag_ack = CEPH_MSGR_TAG_ACK; 164 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 165 166 #ifdef CONFIG_LOCKDEP 167 static struct lock_class_key socket_class; 168 #endif 169 170 /* 171 * When skipping (ignoring) a block of input we read it into a "skip 172 * buffer," which is this many bytes in size. 173 */ 174 #define SKIP_BUF_SIZE 1024 175 176 static void queue_con(struct ceph_connection *con); 177 static void cancel_con(struct ceph_connection *con); 178 static void con_work(struct work_struct *); 179 static void con_fault(struct ceph_connection *con); 180 181 /* 182 * Nicely render a sockaddr as a string. An array of formatted 183 * strings is used, to approximate reentrancy. 184 */ 185 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */ 186 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG) 187 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1) 188 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */ 189 190 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN]; 191 static atomic_t addr_str_seq = ATOMIC_INIT(0); 192 193 static struct page *zero_page; /* used in certain error cases */ 194 195 const char *ceph_pr_addr(const struct sockaddr_storage *ss) 196 { 197 int i; 198 char *s; 199 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 200 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 201 202 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK; 203 s = addr_str[i]; 204 205 switch (ss->ss_family) { 206 case AF_INET: 207 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr, 208 ntohs(in4->sin_port)); 209 break; 210 211 case AF_INET6: 212 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr, 213 ntohs(in6->sin6_port)); 214 break; 215 216 default: 217 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)", 218 ss->ss_family); 219 } 220 221 return s; 222 } 223 EXPORT_SYMBOL(ceph_pr_addr); 224 225 static void encode_my_addr(struct ceph_messenger *msgr) 226 { 227 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr)); 228 ceph_encode_addr(&msgr->my_enc_addr); 229 } 230 231 /* 232 * work queue for all reading and writing to/from the socket. 233 */ 234 static struct workqueue_struct *ceph_msgr_wq; 235 236 static int ceph_msgr_slab_init(void) 237 { 238 BUG_ON(ceph_msg_cache); 239 ceph_msg_cache = kmem_cache_create("ceph_msg", 240 sizeof (struct ceph_msg), 241 __alignof__(struct ceph_msg), 0, NULL); 242 243 if (!ceph_msg_cache) 244 return -ENOMEM; 245 246 BUG_ON(ceph_msg_data_cache); 247 ceph_msg_data_cache = kmem_cache_create("ceph_msg_data", 248 sizeof (struct ceph_msg_data), 249 __alignof__(struct ceph_msg_data), 250 0, NULL); 251 if (ceph_msg_data_cache) 252 return 0; 253 254 kmem_cache_destroy(ceph_msg_cache); 255 ceph_msg_cache = NULL; 256 257 return -ENOMEM; 258 } 259 260 static void ceph_msgr_slab_exit(void) 261 { 262 BUG_ON(!ceph_msg_data_cache); 263 kmem_cache_destroy(ceph_msg_data_cache); 264 ceph_msg_data_cache = NULL; 265 266 BUG_ON(!ceph_msg_cache); 267 kmem_cache_destroy(ceph_msg_cache); 268 ceph_msg_cache = NULL; 269 } 270 271 static void _ceph_msgr_exit(void) 272 { 273 if (ceph_msgr_wq) { 274 destroy_workqueue(ceph_msgr_wq); 275 ceph_msgr_wq = NULL; 276 } 277 278 ceph_msgr_slab_exit(); 279 280 BUG_ON(zero_page == NULL); 281 kunmap(zero_page); 282 page_cache_release(zero_page); 283 zero_page = NULL; 284 } 285 286 int ceph_msgr_init(void) 287 { 288 BUG_ON(zero_page != NULL); 289 zero_page = ZERO_PAGE(0); 290 page_cache_get(zero_page); 291 292 if (ceph_msgr_slab_init()) 293 return -ENOMEM; 294 295 /* 296 * The number of active work items is limited by the number of 297 * connections, so leave @max_active at default. 298 */ 299 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0); 300 if (ceph_msgr_wq) 301 return 0; 302 303 pr_err("msgr_init failed to create workqueue\n"); 304 _ceph_msgr_exit(); 305 306 return -ENOMEM; 307 } 308 EXPORT_SYMBOL(ceph_msgr_init); 309 310 void ceph_msgr_exit(void) 311 { 312 BUG_ON(ceph_msgr_wq == NULL); 313 314 _ceph_msgr_exit(); 315 } 316 EXPORT_SYMBOL(ceph_msgr_exit); 317 318 void ceph_msgr_flush(void) 319 { 320 flush_workqueue(ceph_msgr_wq); 321 } 322 EXPORT_SYMBOL(ceph_msgr_flush); 323 324 /* Connection socket state transition functions */ 325 326 static void con_sock_state_init(struct ceph_connection *con) 327 { 328 int old_state; 329 330 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 331 if (WARN_ON(old_state != CON_SOCK_STATE_NEW)) 332 printk("%s: unexpected old state %d\n", __func__, old_state); 333 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 334 CON_SOCK_STATE_CLOSED); 335 } 336 337 static void con_sock_state_connecting(struct ceph_connection *con) 338 { 339 int old_state; 340 341 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING); 342 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED)) 343 printk("%s: unexpected old state %d\n", __func__, old_state); 344 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 345 CON_SOCK_STATE_CONNECTING); 346 } 347 348 static void con_sock_state_connected(struct ceph_connection *con) 349 { 350 int old_state; 351 352 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED); 353 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING)) 354 printk("%s: unexpected old state %d\n", __func__, old_state); 355 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 356 CON_SOCK_STATE_CONNECTED); 357 } 358 359 static void con_sock_state_closing(struct ceph_connection *con) 360 { 361 int old_state; 362 363 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING); 364 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING && 365 old_state != CON_SOCK_STATE_CONNECTED && 366 old_state != CON_SOCK_STATE_CLOSING)) 367 printk("%s: unexpected old state %d\n", __func__, old_state); 368 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 369 CON_SOCK_STATE_CLOSING); 370 } 371 372 static void con_sock_state_closed(struct ceph_connection *con) 373 { 374 int old_state; 375 376 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 377 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED && 378 old_state != CON_SOCK_STATE_CLOSING && 379 old_state != CON_SOCK_STATE_CONNECTING && 380 old_state != CON_SOCK_STATE_CLOSED)) 381 printk("%s: unexpected old state %d\n", __func__, old_state); 382 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 383 CON_SOCK_STATE_CLOSED); 384 } 385 386 /* 387 * socket callback functions 388 */ 389 390 /* data available on socket, or listen socket received a connect */ 391 static void ceph_sock_data_ready(struct sock *sk) 392 { 393 struct ceph_connection *con = sk->sk_user_data; 394 if (atomic_read(&con->msgr->stopping)) { 395 return; 396 } 397 398 if (sk->sk_state != TCP_CLOSE_WAIT) { 399 dout("%s on %p state = %lu, queueing work\n", __func__, 400 con, con->state); 401 queue_con(con); 402 } 403 } 404 405 /* socket has buffer space for writing */ 406 static void ceph_sock_write_space(struct sock *sk) 407 { 408 struct ceph_connection *con = sk->sk_user_data; 409 410 /* only queue to workqueue if there is data we want to write, 411 * and there is sufficient space in the socket buffer to accept 412 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space() 413 * doesn't get called again until try_write() fills the socket 414 * buffer. See net/ipv4/tcp_input.c:tcp_check_space() 415 * and net/core/stream.c:sk_stream_write_space(). 416 */ 417 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) { 418 if (sk_stream_is_writeable(sk)) { 419 dout("%s %p queueing write work\n", __func__, con); 420 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 421 queue_con(con); 422 } 423 } else { 424 dout("%s %p nothing to write\n", __func__, con); 425 } 426 } 427 428 /* socket's state has changed */ 429 static void ceph_sock_state_change(struct sock *sk) 430 { 431 struct ceph_connection *con = sk->sk_user_data; 432 433 dout("%s %p state = %lu sk_state = %u\n", __func__, 434 con, con->state, sk->sk_state); 435 436 switch (sk->sk_state) { 437 case TCP_CLOSE: 438 dout("%s TCP_CLOSE\n", __func__); 439 case TCP_CLOSE_WAIT: 440 dout("%s TCP_CLOSE_WAIT\n", __func__); 441 con_sock_state_closing(con); 442 con_flag_set(con, CON_FLAG_SOCK_CLOSED); 443 queue_con(con); 444 break; 445 case TCP_ESTABLISHED: 446 dout("%s TCP_ESTABLISHED\n", __func__); 447 con_sock_state_connected(con); 448 queue_con(con); 449 break; 450 default: /* Everything else is uninteresting */ 451 break; 452 } 453 } 454 455 /* 456 * set up socket callbacks 457 */ 458 static void set_sock_callbacks(struct socket *sock, 459 struct ceph_connection *con) 460 { 461 struct sock *sk = sock->sk; 462 sk->sk_user_data = con; 463 sk->sk_data_ready = ceph_sock_data_ready; 464 sk->sk_write_space = ceph_sock_write_space; 465 sk->sk_state_change = ceph_sock_state_change; 466 } 467 468 469 /* 470 * socket helpers 471 */ 472 473 /* 474 * initiate connection to a remote socket. 475 */ 476 static int ceph_tcp_connect(struct ceph_connection *con) 477 { 478 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 479 struct socket *sock; 480 int ret; 481 482 BUG_ON(con->sock); 483 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM, 484 IPPROTO_TCP, &sock); 485 if (ret) 486 return ret; 487 sock->sk->sk_allocation = GFP_NOFS; 488 489 #ifdef CONFIG_LOCKDEP 490 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 491 #endif 492 493 set_sock_callbacks(sock, con); 494 495 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); 496 497 con_sock_state_connecting(con); 498 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), 499 O_NONBLOCK); 500 if (ret == -EINPROGRESS) { 501 dout("connect %s EINPROGRESS sk_state = %u\n", 502 ceph_pr_addr(&con->peer_addr.in_addr), 503 sock->sk->sk_state); 504 } else if (ret < 0) { 505 pr_err("connect %s error %d\n", 506 ceph_pr_addr(&con->peer_addr.in_addr), ret); 507 sock_release(sock); 508 con->error_msg = "connect error"; 509 510 return ret; 511 } 512 513 if (con->msgr->tcp_nodelay) { 514 int optval = 1; 515 516 ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, 517 (char *)&optval, sizeof(optval)); 518 if (ret) 519 pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d", 520 ret); 521 } 522 523 con->sock = sock; 524 return 0; 525 } 526 527 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) 528 { 529 struct kvec iov = {buf, len}; 530 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 531 int r; 532 533 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); 534 if (r == -EAGAIN) 535 r = 0; 536 return r; 537 } 538 539 static int ceph_tcp_recvpage(struct socket *sock, struct page *page, 540 int page_offset, size_t length) 541 { 542 void *kaddr; 543 int ret; 544 545 BUG_ON(page_offset + length > PAGE_SIZE); 546 547 kaddr = kmap(page); 548 BUG_ON(!kaddr); 549 ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length); 550 kunmap(page); 551 552 return ret; 553 } 554 555 /* 556 * write something. @more is true if caller will be sending more data 557 * shortly. 558 */ 559 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, 560 size_t kvlen, size_t len, int more) 561 { 562 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 563 int r; 564 565 if (more) 566 msg.msg_flags |= MSG_MORE; 567 else 568 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 569 570 r = kernel_sendmsg(sock, &msg, iov, kvlen, len); 571 if (r == -EAGAIN) 572 r = 0; 573 return r; 574 } 575 576 static int __ceph_tcp_sendpage(struct socket *sock, struct page *page, 577 int offset, size_t size, bool more) 578 { 579 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); 580 int ret; 581 582 ret = kernel_sendpage(sock, page, offset, size, flags); 583 if (ret == -EAGAIN) 584 ret = 0; 585 586 return ret; 587 } 588 589 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 590 int offset, size_t size, bool more) 591 { 592 int ret; 593 struct kvec iov; 594 595 /* sendpage cannot properly handle pages with page_count == 0, 596 * we need to fallback to sendmsg if that's the case */ 597 if (page_count(page) >= 1) 598 return __ceph_tcp_sendpage(sock, page, offset, size, more); 599 600 iov.iov_base = kmap(page) + offset; 601 iov.iov_len = size; 602 ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more); 603 kunmap(page); 604 605 return ret; 606 } 607 608 /* 609 * Shutdown/close the socket for the given connection. 610 */ 611 static int con_close_socket(struct ceph_connection *con) 612 { 613 int rc = 0; 614 615 dout("con_close_socket on %p sock %p\n", con, con->sock); 616 if (con->sock) { 617 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); 618 sock_release(con->sock); 619 con->sock = NULL; 620 } 621 622 /* 623 * Forcibly clear the SOCK_CLOSED flag. It gets set 624 * independent of the connection mutex, and we could have 625 * received a socket close event before we had the chance to 626 * shut the socket down. 627 */ 628 con_flag_clear(con, CON_FLAG_SOCK_CLOSED); 629 630 con_sock_state_closed(con); 631 return rc; 632 } 633 634 /* 635 * Reset a connection. Discard all incoming and outgoing messages 636 * and clear *_seq state. 637 */ 638 static void ceph_msg_remove(struct ceph_msg *msg) 639 { 640 list_del_init(&msg->list_head); 641 BUG_ON(msg->con == NULL); 642 msg->con->ops->put(msg->con); 643 msg->con = NULL; 644 645 ceph_msg_put(msg); 646 } 647 static void ceph_msg_remove_list(struct list_head *head) 648 { 649 while (!list_empty(head)) { 650 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, 651 list_head); 652 ceph_msg_remove(msg); 653 } 654 } 655 656 static void reset_connection(struct ceph_connection *con) 657 { 658 /* reset connection, out_queue, msg_ and connect_seq */ 659 /* discard existing out_queue and msg_seq */ 660 dout("reset_connection %p\n", con); 661 ceph_msg_remove_list(&con->out_queue); 662 ceph_msg_remove_list(&con->out_sent); 663 664 if (con->in_msg) { 665 BUG_ON(con->in_msg->con != con); 666 con->in_msg->con = NULL; 667 ceph_msg_put(con->in_msg); 668 con->in_msg = NULL; 669 con->ops->put(con); 670 } 671 672 con->connect_seq = 0; 673 con->out_seq = 0; 674 if (con->out_msg) { 675 ceph_msg_put(con->out_msg); 676 con->out_msg = NULL; 677 } 678 con->in_seq = 0; 679 con->in_seq_acked = 0; 680 } 681 682 /* 683 * mark a peer down. drop any open connections. 684 */ 685 void ceph_con_close(struct ceph_connection *con) 686 { 687 mutex_lock(&con->mutex); 688 dout("con_close %p peer %s\n", con, 689 ceph_pr_addr(&con->peer_addr.in_addr)); 690 con->state = CON_STATE_CLOSED; 691 692 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */ 693 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING); 694 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 695 con_flag_clear(con, CON_FLAG_BACKOFF); 696 697 reset_connection(con); 698 con->peer_global_seq = 0; 699 cancel_con(con); 700 con_close_socket(con); 701 mutex_unlock(&con->mutex); 702 } 703 EXPORT_SYMBOL(ceph_con_close); 704 705 /* 706 * Reopen a closed connection, with a new peer address. 707 */ 708 void ceph_con_open(struct ceph_connection *con, 709 __u8 entity_type, __u64 entity_num, 710 struct ceph_entity_addr *addr) 711 { 712 mutex_lock(&con->mutex); 713 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 714 715 WARN_ON(con->state != CON_STATE_CLOSED); 716 con->state = CON_STATE_PREOPEN; 717 718 con->peer_name.type = (__u8) entity_type; 719 con->peer_name.num = cpu_to_le64(entity_num); 720 721 memcpy(&con->peer_addr, addr, sizeof(*addr)); 722 con->delay = 0; /* reset backoff memory */ 723 mutex_unlock(&con->mutex); 724 queue_con(con); 725 } 726 EXPORT_SYMBOL(ceph_con_open); 727 728 /* 729 * return true if this connection ever successfully opened 730 */ 731 bool ceph_con_opened(struct ceph_connection *con) 732 { 733 return con->connect_seq > 0; 734 } 735 736 /* 737 * initialize a new connection. 738 */ 739 void ceph_con_init(struct ceph_connection *con, void *private, 740 const struct ceph_connection_operations *ops, 741 struct ceph_messenger *msgr) 742 { 743 dout("con_init %p\n", con); 744 memset(con, 0, sizeof(*con)); 745 con->private = private; 746 con->ops = ops; 747 con->msgr = msgr; 748 749 con_sock_state_init(con); 750 751 mutex_init(&con->mutex); 752 INIT_LIST_HEAD(&con->out_queue); 753 INIT_LIST_HEAD(&con->out_sent); 754 INIT_DELAYED_WORK(&con->work, con_work); 755 756 con->state = CON_STATE_CLOSED; 757 } 758 EXPORT_SYMBOL(ceph_con_init); 759 760 761 /* 762 * We maintain a global counter to order connection attempts. Get 763 * a unique seq greater than @gt. 764 */ 765 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) 766 { 767 u32 ret; 768 769 spin_lock(&msgr->global_seq_lock); 770 if (msgr->global_seq < gt) 771 msgr->global_seq = gt; 772 ret = ++msgr->global_seq; 773 spin_unlock(&msgr->global_seq_lock); 774 return ret; 775 } 776 777 static void con_out_kvec_reset(struct ceph_connection *con) 778 { 779 con->out_kvec_left = 0; 780 con->out_kvec_bytes = 0; 781 con->out_kvec_cur = &con->out_kvec[0]; 782 } 783 784 static void con_out_kvec_add(struct ceph_connection *con, 785 size_t size, void *data) 786 { 787 int index; 788 789 index = con->out_kvec_left; 790 BUG_ON(index >= ARRAY_SIZE(con->out_kvec)); 791 792 con->out_kvec[index].iov_len = size; 793 con->out_kvec[index].iov_base = data; 794 con->out_kvec_left++; 795 con->out_kvec_bytes += size; 796 } 797 798 #ifdef CONFIG_BLOCK 799 800 /* 801 * For a bio data item, a piece is whatever remains of the next 802 * entry in the current bio iovec, or the first entry in the next 803 * bio in the list. 804 */ 805 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor, 806 size_t length) 807 { 808 struct ceph_msg_data *data = cursor->data; 809 struct bio *bio; 810 811 BUG_ON(data->type != CEPH_MSG_DATA_BIO); 812 813 bio = data->bio; 814 BUG_ON(!bio); 815 816 cursor->resid = min(length, data->bio_length); 817 cursor->bio = bio; 818 cursor->bvec_iter = bio->bi_iter; 819 cursor->last_piece = 820 cursor->resid <= bio_iter_len(bio, cursor->bvec_iter); 821 } 822 823 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor, 824 size_t *page_offset, 825 size_t *length) 826 { 827 struct ceph_msg_data *data = cursor->data; 828 struct bio *bio; 829 struct bio_vec bio_vec; 830 831 BUG_ON(data->type != CEPH_MSG_DATA_BIO); 832 833 bio = cursor->bio; 834 BUG_ON(!bio); 835 836 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); 837 838 *page_offset = (size_t) bio_vec.bv_offset; 839 BUG_ON(*page_offset >= PAGE_SIZE); 840 if (cursor->last_piece) /* pagelist offset is always 0 */ 841 *length = cursor->resid; 842 else 843 *length = (size_t) bio_vec.bv_len; 844 BUG_ON(*length > cursor->resid); 845 BUG_ON(*page_offset + *length > PAGE_SIZE); 846 847 return bio_vec.bv_page; 848 } 849 850 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, 851 size_t bytes) 852 { 853 struct bio *bio; 854 struct bio_vec bio_vec; 855 856 BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO); 857 858 bio = cursor->bio; 859 BUG_ON(!bio); 860 861 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); 862 863 /* Advance the cursor offset */ 864 865 BUG_ON(cursor->resid < bytes); 866 cursor->resid -= bytes; 867 868 bio_advance_iter(bio, &cursor->bvec_iter, bytes); 869 870 if (bytes < bio_vec.bv_len) 871 return false; /* more bytes to process in this segment */ 872 873 /* Move on to the next segment, and possibly the next bio */ 874 875 if (!cursor->bvec_iter.bi_size) { 876 bio = bio->bi_next; 877 cursor->bio = bio; 878 if (bio) 879 cursor->bvec_iter = bio->bi_iter; 880 else 881 memset(&cursor->bvec_iter, 0, 882 sizeof(cursor->bvec_iter)); 883 } 884 885 if (!cursor->last_piece) { 886 BUG_ON(!cursor->resid); 887 BUG_ON(!bio); 888 /* A short read is OK, so use <= rather than == */ 889 if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter)) 890 cursor->last_piece = true; 891 } 892 893 return true; 894 } 895 #endif /* CONFIG_BLOCK */ 896 897 /* 898 * For a page array, a piece comes from the first page in the array 899 * that has not already been fully consumed. 900 */ 901 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor, 902 size_t length) 903 { 904 struct ceph_msg_data *data = cursor->data; 905 int page_count; 906 907 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 908 909 BUG_ON(!data->pages); 910 BUG_ON(!data->length); 911 912 cursor->resid = min(length, data->length); 913 page_count = calc_pages_for(data->alignment, (u64)data->length); 914 cursor->page_offset = data->alignment & ~PAGE_MASK; 915 cursor->page_index = 0; 916 BUG_ON(page_count > (int)USHRT_MAX); 917 cursor->page_count = (unsigned short)page_count; 918 BUG_ON(length > SIZE_MAX - cursor->page_offset); 919 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE; 920 } 921 922 static struct page * 923 ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor, 924 size_t *page_offset, size_t *length) 925 { 926 struct ceph_msg_data *data = cursor->data; 927 928 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 929 930 BUG_ON(cursor->page_index >= cursor->page_count); 931 BUG_ON(cursor->page_offset >= PAGE_SIZE); 932 933 *page_offset = cursor->page_offset; 934 if (cursor->last_piece) 935 *length = cursor->resid; 936 else 937 *length = PAGE_SIZE - *page_offset; 938 939 return data->pages[cursor->page_index]; 940 } 941 942 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor, 943 size_t bytes) 944 { 945 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES); 946 947 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE); 948 949 /* Advance the cursor page offset */ 950 951 cursor->resid -= bytes; 952 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK; 953 if (!bytes || cursor->page_offset) 954 return false; /* more bytes to process in the current page */ 955 956 if (!cursor->resid) 957 return false; /* no more data */ 958 959 /* Move on to the next page; offset is already at 0 */ 960 961 BUG_ON(cursor->page_index >= cursor->page_count); 962 cursor->page_index++; 963 cursor->last_piece = cursor->resid <= PAGE_SIZE; 964 965 return true; 966 } 967 968 /* 969 * For a pagelist, a piece is whatever remains to be consumed in the 970 * first page in the list, or the front of the next page. 971 */ 972 static void 973 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor, 974 size_t length) 975 { 976 struct ceph_msg_data *data = cursor->data; 977 struct ceph_pagelist *pagelist; 978 struct page *page; 979 980 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 981 982 pagelist = data->pagelist; 983 BUG_ON(!pagelist); 984 985 if (!length) 986 return; /* pagelist can be assigned but empty */ 987 988 BUG_ON(list_empty(&pagelist->head)); 989 page = list_first_entry(&pagelist->head, struct page, lru); 990 991 cursor->resid = min(length, pagelist->length); 992 cursor->page = page; 993 cursor->offset = 0; 994 cursor->last_piece = cursor->resid <= PAGE_SIZE; 995 } 996 997 static struct page * 998 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor, 999 size_t *page_offset, size_t *length) 1000 { 1001 struct ceph_msg_data *data = cursor->data; 1002 struct ceph_pagelist *pagelist; 1003 1004 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1005 1006 pagelist = data->pagelist; 1007 BUG_ON(!pagelist); 1008 1009 BUG_ON(!cursor->page); 1010 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 1011 1012 /* offset of first page in pagelist is always 0 */ 1013 *page_offset = cursor->offset & ~PAGE_MASK; 1014 if (cursor->last_piece) 1015 *length = cursor->resid; 1016 else 1017 *length = PAGE_SIZE - *page_offset; 1018 1019 return cursor->page; 1020 } 1021 1022 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor, 1023 size_t bytes) 1024 { 1025 struct ceph_msg_data *data = cursor->data; 1026 struct ceph_pagelist *pagelist; 1027 1028 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1029 1030 pagelist = data->pagelist; 1031 BUG_ON(!pagelist); 1032 1033 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 1034 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE); 1035 1036 /* Advance the cursor offset */ 1037 1038 cursor->resid -= bytes; 1039 cursor->offset += bytes; 1040 /* offset of first page in pagelist is always 0 */ 1041 if (!bytes || cursor->offset & ~PAGE_MASK) 1042 return false; /* more bytes to process in the current page */ 1043 1044 if (!cursor->resid) 1045 return false; /* no more data */ 1046 1047 /* Move on to the next page */ 1048 1049 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head)); 1050 cursor->page = list_entry_next(cursor->page, lru); 1051 cursor->last_piece = cursor->resid <= PAGE_SIZE; 1052 1053 return true; 1054 } 1055 1056 /* 1057 * Message data is handled (sent or received) in pieces, where each 1058 * piece resides on a single page. The network layer might not 1059 * consume an entire piece at once. A data item's cursor keeps 1060 * track of which piece is next to process and how much remains to 1061 * be processed in that piece. It also tracks whether the current 1062 * piece is the last one in the data item. 1063 */ 1064 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor) 1065 { 1066 size_t length = cursor->total_resid; 1067 1068 switch (cursor->data->type) { 1069 case CEPH_MSG_DATA_PAGELIST: 1070 ceph_msg_data_pagelist_cursor_init(cursor, length); 1071 break; 1072 case CEPH_MSG_DATA_PAGES: 1073 ceph_msg_data_pages_cursor_init(cursor, length); 1074 break; 1075 #ifdef CONFIG_BLOCK 1076 case CEPH_MSG_DATA_BIO: 1077 ceph_msg_data_bio_cursor_init(cursor, length); 1078 break; 1079 #endif /* CONFIG_BLOCK */ 1080 case CEPH_MSG_DATA_NONE: 1081 default: 1082 /* BUG(); */ 1083 break; 1084 } 1085 cursor->need_crc = true; 1086 } 1087 1088 static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) 1089 { 1090 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1091 struct ceph_msg_data *data; 1092 1093 BUG_ON(!length); 1094 BUG_ON(length > msg->data_length); 1095 BUG_ON(list_empty(&msg->data)); 1096 1097 cursor->data_head = &msg->data; 1098 cursor->total_resid = length; 1099 data = list_first_entry(&msg->data, struct ceph_msg_data, links); 1100 cursor->data = data; 1101 1102 __ceph_msg_data_cursor_init(cursor); 1103 } 1104 1105 /* 1106 * Return the page containing the next piece to process for a given 1107 * data item, and supply the page offset and length of that piece. 1108 * Indicate whether this is the last piece in this data item. 1109 */ 1110 static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, 1111 size_t *page_offset, size_t *length, 1112 bool *last_piece) 1113 { 1114 struct page *page; 1115 1116 switch (cursor->data->type) { 1117 case CEPH_MSG_DATA_PAGELIST: 1118 page = ceph_msg_data_pagelist_next(cursor, page_offset, length); 1119 break; 1120 case CEPH_MSG_DATA_PAGES: 1121 page = ceph_msg_data_pages_next(cursor, page_offset, length); 1122 break; 1123 #ifdef CONFIG_BLOCK 1124 case CEPH_MSG_DATA_BIO: 1125 page = ceph_msg_data_bio_next(cursor, page_offset, length); 1126 break; 1127 #endif /* CONFIG_BLOCK */ 1128 case CEPH_MSG_DATA_NONE: 1129 default: 1130 page = NULL; 1131 break; 1132 } 1133 BUG_ON(!page); 1134 BUG_ON(*page_offset + *length > PAGE_SIZE); 1135 BUG_ON(!*length); 1136 if (last_piece) 1137 *last_piece = cursor->last_piece; 1138 1139 return page; 1140 } 1141 1142 /* 1143 * Returns true if the result moves the cursor on to the next piece 1144 * of the data item. 1145 */ 1146 static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, 1147 size_t bytes) 1148 { 1149 bool new_piece; 1150 1151 BUG_ON(bytes > cursor->resid); 1152 switch (cursor->data->type) { 1153 case CEPH_MSG_DATA_PAGELIST: 1154 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes); 1155 break; 1156 case CEPH_MSG_DATA_PAGES: 1157 new_piece = ceph_msg_data_pages_advance(cursor, bytes); 1158 break; 1159 #ifdef CONFIG_BLOCK 1160 case CEPH_MSG_DATA_BIO: 1161 new_piece = ceph_msg_data_bio_advance(cursor, bytes); 1162 break; 1163 #endif /* CONFIG_BLOCK */ 1164 case CEPH_MSG_DATA_NONE: 1165 default: 1166 BUG(); 1167 break; 1168 } 1169 cursor->total_resid -= bytes; 1170 1171 if (!cursor->resid && cursor->total_resid) { 1172 WARN_ON(!cursor->last_piece); 1173 BUG_ON(list_is_last(&cursor->data->links, cursor->data_head)); 1174 cursor->data = list_entry_next(cursor->data, links); 1175 __ceph_msg_data_cursor_init(cursor); 1176 new_piece = true; 1177 } 1178 cursor->need_crc = new_piece; 1179 1180 return new_piece; 1181 } 1182 1183 static void prepare_message_data(struct ceph_msg *msg, u32 data_len) 1184 { 1185 BUG_ON(!msg); 1186 BUG_ON(!data_len); 1187 1188 /* Initialize data cursor */ 1189 1190 ceph_msg_data_cursor_init(msg, (size_t)data_len); 1191 } 1192 1193 /* 1194 * Prepare footer for currently outgoing message, and finish things 1195 * off. Assumes out_kvec* are already valid.. we just add on to the end. 1196 */ 1197 static void prepare_write_message_footer(struct ceph_connection *con) 1198 { 1199 struct ceph_msg *m = con->out_msg; 1200 int v = con->out_kvec_left; 1201 1202 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE; 1203 1204 dout("prepare_write_message_footer %p\n", con); 1205 con->out_kvec_is_msg = true; 1206 con->out_kvec[v].iov_base = &m->footer; 1207 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) { 1208 if (con->ops->sign_message) 1209 con->ops->sign_message(con, m); 1210 else 1211 m->footer.sig = 0; 1212 con->out_kvec[v].iov_len = sizeof(m->footer); 1213 con->out_kvec_bytes += sizeof(m->footer); 1214 } else { 1215 m->old_footer.flags = m->footer.flags; 1216 con->out_kvec[v].iov_len = sizeof(m->old_footer); 1217 con->out_kvec_bytes += sizeof(m->old_footer); 1218 } 1219 con->out_kvec_left++; 1220 con->out_more = m->more_to_follow; 1221 con->out_msg_done = true; 1222 } 1223 1224 /* 1225 * Prepare headers for the next outgoing message. 1226 */ 1227 static void prepare_write_message(struct ceph_connection *con) 1228 { 1229 struct ceph_msg *m; 1230 u32 crc; 1231 1232 con_out_kvec_reset(con); 1233 con->out_kvec_is_msg = true; 1234 con->out_msg_done = false; 1235 1236 /* Sneak an ack in there first? If we can get it into the same 1237 * TCP packet that's a good thing. */ 1238 if (con->in_seq > con->in_seq_acked) { 1239 con->in_seq_acked = con->in_seq; 1240 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1241 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1242 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1243 &con->out_temp_ack); 1244 } 1245 1246 BUG_ON(list_empty(&con->out_queue)); 1247 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); 1248 con->out_msg = m; 1249 BUG_ON(m->con != con); 1250 1251 /* put message on sent list */ 1252 ceph_msg_get(m); 1253 list_move_tail(&m->list_head, &con->out_sent); 1254 1255 /* 1256 * only assign outgoing seq # if we haven't sent this message 1257 * yet. if it is requeued, resend with it's original seq. 1258 */ 1259 if (m->needs_out_seq) { 1260 m->hdr.seq = cpu_to_le64(++con->out_seq); 1261 m->needs_out_seq = false; 1262 } 1263 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len)); 1264 1265 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n", 1266 m, con->out_seq, le16_to_cpu(m->hdr.type), 1267 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), 1268 m->data_length); 1269 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); 1270 1271 /* tag + hdr + front + middle */ 1272 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); 1273 con_out_kvec_add(con, sizeof (m->hdr), &m->hdr); 1274 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); 1275 1276 if (m->middle) 1277 con_out_kvec_add(con, m->middle->vec.iov_len, 1278 m->middle->vec.iov_base); 1279 1280 /* fill in crc (except data pages), footer */ 1281 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc)); 1282 con->out_msg->hdr.crc = cpu_to_le32(crc); 1283 con->out_msg->footer.flags = 0; 1284 1285 crc = crc32c(0, m->front.iov_base, m->front.iov_len); 1286 con->out_msg->footer.front_crc = cpu_to_le32(crc); 1287 if (m->middle) { 1288 crc = crc32c(0, m->middle->vec.iov_base, 1289 m->middle->vec.iov_len); 1290 con->out_msg->footer.middle_crc = cpu_to_le32(crc); 1291 } else 1292 con->out_msg->footer.middle_crc = 0; 1293 dout("%s front_crc %u middle_crc %u\n", __func__, 1294 le32_to_cpu(con->out_msg->footer.front_crc), 1295 le32_to_cpu(con->out_msg->footer.middle_crc)); 1296 1297 /* is there a data payload? */ 1298 con->out_msg->footer.data_crc = 0; 1299 if (m->data_length) { 1300 prepare_message_data(con->out_msg, m->data_length); 1301 con->out_more = 1; /* data + footer will follow */ 1302 } else { 1303 /* no, queue up footer too and be done */ 1304 prepare_write_message_footer(con); 1305 } 1306 1307 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1308 } 1309 1310 /* 1311 * Prepare an ack. 1312 */ 1313 static void prepare_write_ack(struct ceph_connection *con) 1314 { 1315 dout("prepare_write_ack %p %llu -> %llu\n", con, 1316 con->in_seq_acked, con->in_seq); 1317 con->in_seq_acked = con->in_seq; 1318 1319 con_out_kvec_reset(con); 1320 1321 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1322 1323 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1324 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1325 &con->out_temp_ack); 1326 1327 con->out_more = 1; /* more will follow.. eventually.. */ 1328 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1329 } 1330 1331 /* 1332 * Prepare to share the seq during handshake 1333 */ 1334 static void prepare_write_seq(struct ceph_connection *con) 1335 { 1336 dout("prepare_write_seq %p %llu -> %llu\n", con, 1337 con->in_seq_acked, con->in_seq); 1338 con->in_seq_acked = con->in_seq; 1339 1340 con_out_kvec_reset(con); 1341 1342 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1343 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1344 &con->out_temp_ack); 1345 1346 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1347 } 1348 1349 /* 1350 * Prepare to write keepalive byte. 1351 */ 1352 static void prepare_write_keepalive(struct ceph_connection *con) 1353 { 1354 dout("prepare_write_keepalive %p\n", con); 1355 con_out_kvec_reset(con); 1356 con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive); 1357 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1358 } 1359 1360 /* 1361 * Connection negotiation. 1362 */ 1363 1364 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con, 1365 int *auth_proto) 1366 { 1367 struct ceph_auth_handshake *auth; 1368 1369 if (!con->ops->get_authorizer) { 1370 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN; 1371 con->out_connect.authorizer_len = 0; 1372 return NULL; 1373 } 1374 1375 /* Can't hold the mutex while getting authorizer */ 1376 mutex_unlock(&con->mutex); 1377 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry); 1378 mutex_lock(&con->mutex); 1379 1380 if (IS_ERR(auth)) 1381 return auth; 1382 if (con->state != CON_STATE_NEGOTIATING) 1383 return ERR_PTR(-EAGAIN); 1384 1385 con->auth_reply_buf = auth->authorizer_reply_buf; 1386 con->auth_reply_buf_len = auth->authorizer_reply_buf_len; 1387 return auth; 1388 } 1389 1390 /* 1391 * We connected to a peer and are saying hello. 1392 */ 1393 static void prepare_write_banner(struct ceph_connection *con) 1394 { 1395 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER); 1396 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr), 1397 &con->msgr->my_enc_addr); 1398 1399 con->out_more = 0; 1400 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1401 } 1402 1403 static int prepare_write_connect(struct ceph_connection *con) 1404 { 1405 unsigned int global_seq = get_global_seq(con->msgr, 0); 1406 int proto; 1407 int auth_proto; 1408 struct ceph_auth_handshake *auth; 1409 1410 switch (con->peer_name.type) { 1411 case CEPH_ENTITY_TYPE_MON: 1412 proto = CEPH_MONC_PROTOCOL; 1413 break; 1414 case CEPH_ENTITY_TYPE_OSD: 1415 proto = CEPH_OSDC_PROTOCOL; 1416 break; 1417 case CEPH_ENTITY_TYPE_MDS: 1418 proto = CEPH_MDSC_PROTOCOL; 1419 break; 1420 default: 1421 BUG(); 1422 } 1423 1424 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, 1425 con->connect_seq, global_seq, proto); 1426 1427 con->out_connect.features = cpu_to_le64(con->msgr->supported_features); 1428 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); 1429 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); 1430 con->out_connect.global_seq = cpu_to_le32(global_seq); 1431 con->out_connect.protocol_version = cpu_to_le32(proto); 1432 con->out_connect.flags = 0; 1433 1434 auth_proto = CEPH_AUTH_UNKNOWN; 1435 auth = get_connect_authorizer(con, &auth_proto); 1436 if (IS_ERR(auth)) 1437 return PTR_ERR(auth); 1438 1439 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto); 1440 con->out_connect.authorizer_len = auth ? 1441 cpu_to_le32(auth->authorizer_buf_len) : 0; 1442 1443 con_out_kvec_add(con, sizeof (con->out_connect), 1444 &con->out_connect); 1445 if (auth && auth->authorizer_buf_len) 1446 con_out_kvec_add(con, auth->authorizer_buf_len, 1447 auth->authorizer_buf); 1448 1449 con->out_more = 0; 1450 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1451 1452 return 0; 1453 } 1454 1455 /* 1456 * write as much of pending kvecs to the socket as we can. 1457 * 1 -> done 1458 * 0 -> socket full, but more to do 1459 * <0 -> error 1460 */ 1461 static int write_partial_kvec(struct ceph_connection *con) 1462 { 1463 int ret; 1464 1465 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes); 1466 while (con->out_kvec_bytes > 0) { 1467 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur, 1468 con->out_kvec_left, con->out_kvec_bytes, 1469 con->out_more); 1470 if (ret <= 0) 1471 goto out; 1472 con->out_kvec_bytes -= ret; 1473 if (con->out_kvec_bytes == 0) 1474 break; /* done */ 1475 1476 /* account for full iov entries consumed */ 1477 while (ret >= con->out_kvec_cur->iov_len) { 1478 BUG_ON(!con->out_kvec_left); 1479 ret -= con->out_kvec_cur->iov_len; 1480 con->out_kvec_cur++; 1481 con->out_kvec_left--; 1482 } 1483 /* and for a partially-consumed entry */ 1484 if (ret) { 1485 con->out_kvec_cur->iov_len -= ret; 1486 con->out_kvec_cur->iov_base += ret; 1487 } 1488 } 1489 con->out_kvec_left = 0; 1490 con->out_kvec_is_msg = false; 1491 ret = 1; 1492 out: 1493 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con, 1494 con->out_kvec_bytes, con->out_kvec_left, ret); 1495 return ret; /* done! */ 1496 } 1497 1498 static u32 ceph_crc32c_page(u32 crc, struct page *page, 1499 unsigned int page_offset, 1500 unsigned int length) 1501 { 1502 char *kaddr; 1503 1504 kaddr = kmap(page); 1505 BUG_ON(kaddr == NULL); 1506 crc = crc32c(crc, kaddr + page_offset, length); 1507 kunmap(page); 1508 1509 return crc; 1510 } 1511 /* 1512 * Write as much message data payload as we can. If we finish, queue 1513 * up the footer. 1514 * 1 -> done, footer is now queued in out_kvec[]. 1515 * 0 -> socket full, but more to do 1516 * <0 -> error 1517 */ 1518 static int write_partial_message_data(struct ceph_connection *con) 1519 { 1520 struct ceph_msg *msg = con->out_msg; 1521 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1522 bool do_datacrc = !con->msgr->nocrc; 1523 u32 crc; 1524 1525 dout("%s %p msg %p\n", __func__, con, msg); 1526 1527 if (list_empty(&msg->data)) 1528 return -EINVAL; 1529 1530 /* 1531 * Iterate through each page that contains data to be 1532 * written, and send as much as possible for each. 1533 * 1534 * If we are calculating the data crc (the default), we will 1535 * need to map the page. If we have no pages, they have 1536 * been revoked, so use the zero page. 1537 */ 1538 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0; 1539 while (cursor->resid) { 1540 struct page *page; 1541 size_t page_offset; 1542 size_t length; 1543 bool last_piece; 1544 bool need_crc; 1545 int ret; 1546 1547 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, 1548 &last_piece); 1549 ret = ceph_tcp_sendpage(con->sock, page, page_offset, 1550 length, last_piece); 1551 if (ret <= 0) { 1552 if (do_datacrc) 1553 msg->footer.data_crc = cpu_to_le32(crc); 1554 1555 return ret; 1556 } 1557 if (do_datacrc && cursor->need_crc) 1558 crc = ceph_crc32c_page(crc, page, page_offset, length); 1559 need_crc = ceph_msg_data_advance(&msg->cursor, (size_t)ret); 1560 } 1561 1562 dout("%s %p msg %p done\n", __func__, con, msg); 1563 1564 /* prepare and queue up footer, too */ 1565 if (do_datacrc) 1566 msg->footer.data_crc = cpu_to_le32(crc); 1567 else 1568 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; 1569 con_out_kvec_reset(con); 1570 prepare_write_message_footer(con); 1571 1572 return 1; /* must return > 0 to indicate success */ 1573 } 1574 1575 /* 1576 * write some zeros 1577 */ 1578 static int write_partial_skip(struct ceph_connection *con) 1579 { 1580 int ret; 1581 1582 while (con->out_skip > 0) { 1583 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE); 1584 1585 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true); 1586 if (ret <= 0) 1587 goto out; 1588 con->out_skip -= ret; 1589 } 1590 ret = 1; 1591 out: 1592 return ret; 1593 } 1594 1595 /* 1596 * Prepare to read connection handshake, or an ack. 1597 */ 1598 static void prepare_read_banner(struct ceph_connection *con) 1599 { 1600 dout("prepare_read_banner %p\n", con); 1601 con->in_base_pos = 0; 1602 } 1603 1604 static void prepare_read_connect(struct ceph_connection *con) 1605 { 1606 dout("prepare_read_connect %p\n", con); 1607 con->in_base_pos = 0; 1608 } 1609 1610 static void prepare_read_ack(struct ceph_connection *con) 1611 { 1612 dout("prepare_read_ack %p\n", con); 1613 con->in_base_pos = 0; 1614 } 1615 1616 static void prepare_read_seq(struct ceph_connection *con) 1617 { 1618 dout("prepare_read_seq %p\n", con); 1619 con->in_base_pos = 0; 1620 con->in_tag = CEPH_MSGR_TAG_SEQ; 1621 } 1622 1623 static void prepare_read_tag(struct ceph_connection *con) 1624 { 1625 dout("prepare_read_tag %p\n", con); 1626 con->in_base_pos = 0; 1627 con->in_tag = CEPH_MSGR_TAG_READY; 1628 } 1629 1630 /* 1631 * Prepare to read a message. 1632 */ 1633 static int prepare_read_message(struct ceph_connection *con) 1634 { 1635 dout("prepare_read_message %p\n", con); 1636 BUG_ON(con->in_msg != NULL); 1637 con->in_base_pos = 0; 1638 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0; 1639 return 0; 1640 } 1641 1642 1643 static int read_partial(struct ceph_connection *con, 1644 int end, int size, void *object) 1645 { 1646 while (con->in_base_pos < end) { 1647 int left = end - con->in_base_pos; 1648 int have = size - left; 1649 int ret = ceph_tcp_recvmsg(con->sock, object + have, left); 1650 if (ret <= 0) 1651 return ret; 1652 con->in_base_pos += ret; 1653 } 1654 return 1; 1655 } 1656 1657 1658 /* 1659 * Read all or part of the connect-side handshake on a new connection 1660 */ 1661 static int read_partial_banner(struct ceph_connection *con) 1662 { 1663 int size; 1664 int end; 1665 int ret; 1666 1667 dout("read_partial_banner %p at %d\n", con, con->in_base_pos); 1668 1669 /* peer's banner */ 1670 size = strlen(CEPH_BANNER); 1671 end = size; 1672 ret = read_partial(con, end, size, con->in_banner); 1673 if (ret <= 0) 1674 goto out; 1675 1676 size = sizeof (con->actual_peer_addr); 1677 end += size; 1678 ret = read_partial(con, end, size, &con->actual_peer_addr); 1679 if (ret <= 0) 1680 goto out; 1681 1682 size = sizeof (con->peer_addr_for_me); 1683 end += size; 1684 ret = read_partial(con, end, size, &con->peer_addr_for_me); 1685 if (ret <= 0) 1686 goto out; 1687 1688 out: 1689 return ret; 1690 } 1691 1692 static int read_partial_connect(struct ceph_connection *con) 1693 { 1694 int size; 1695 int end; 1696 int ret; 1697 1698 dout("read_partial_connect %p at %d\n", con, con->in_base_pos); 1699 1700 size = sizeof (con->in_reply); 1701 end = size; 1702 ret = read_partial(con, end, size, &con->in_reply); 1703 if (ret <= 0) 1704 goto out; 1705 1706 size = le32_to_cpu(con->in_reply.authorizer_len); 1707 end += size; 1708 ret = read_partial(con, end, size, con->auth_reply_buf); 1709 if (ret <= 0) 1710 goto out; 1711 1712 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n", 1713 con, (int)con->in_reply.tag, 1714 le32_to_cpu(con->in_reply.connect_seq), 1715 le32_to_cpu(con->in_reply.global_seq)); 1716 out: 1717 return ret; 1718 1719 } 1720 1721 /* 1722 * Verify the hello banner looks okay. 1723 */ 1724 static int verify_hello(struct ceph_connection *con) 1725 { 1726 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { 1727 pr_err("connect to %s got bad banner\n", 1728 ceph_pr_addr(&con->peer_addr.in_addr)); 1729 con->error_msg = "protocol error, bad banner"; 1730 return -1; 1731 } 1732 return 0; 1733 } 1734 1735 static bool addr_is_blank(struct sockaddr_storage *ss) 1736 { 1737 switch (ss->ss_family) { 1738 case AF_INET: 1739 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0; 1740 case AF_INET6: 1741 return 1742 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 && 1743 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 && 1744 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 && 1745 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0; 1746 } 1747 return false; 1748 } 1749 1750 static int addr_port(struct sockaddr_storage *ss) 1751 { 1752 switch (ss->ss_family) { 1753 case AF_INET: 1754 return ntohs(((struct sockaddr_in *)ss)->sin_port); 1755 case AF_INET6: 1756 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port); 1757 } 1758 return 0; 1759 } 1760 1761 static void addr_set_port(struct sockaddr_storage *ss, int p) 1762 { 1763 switch (ss->ss_family) { 1764 case AF_INET: 1765 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1766 break; 1767 case AF_INET6: 1768 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1769 break; 1770 } 1771 } 1772 1773 /* 1774 * Unlike other *_pton function semantics, zero indicates success. 1775 */ 1776 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss, 1777 char delim, const char **ipend) 1778 { 1779 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 1780 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 1781 1782 memset(ss, 0, sizeof(*ss)); 1783 1784 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) { 1785 ss->ss_family = AF_INET; 1786 return 0; 1787 } 1788 1789 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) { 1790 ss->ss_family = AF_INET6; 1791 return 0; 1792 } 1793 1794 return -EINVAL; 1795 } 1796 1797 /* 1798 * Extract hostname string and resolve using kernel DNS facility. 1799 */ 1800 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER 1801 static int ceph_dns_resolve_name(const char *name, size_t namelen, 1802 struct sockaddr_storage *ss, char delim, const char **ipend) 1803 { 1804 const char *end, *delim_p; 1805 char *colon_p, *ip_addr = NULL; 1806 int ip_len, ret; 1807 1808 /* 1809 * The end of the hostname occurs immediately preceding the delimiter or 1810 * the port marker (':') where the delimiter takes precedence. 1811 */ 1812 delim_p = memchr(name, delim, namelen); 1813 colon_p = memchr(name, ':', namelen); 1814 1815 if (delim_p && colon_p) 1816 end = delim_p < colon_p ? delim_p : colon_p; 1817 else if (!delim_p && colon_p) 1818 end = colon_p; 1819 else { 1820 end = delim_p; 1821 if (!end) /* case: hostname:/ */ 1822 end = name + namelen; 1823 } 1824 1825 if (end <= name) 1826 return -EINVAL; 1827 1828 /* do dns_resolve upcall */ 1829 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL); 1830 if (ip_len > 0) 1831 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL); 1832 else 1833 ret = -ESRCH; 1834 1835 kfree(ip_addr); 1836 1837 *ipend = end; 1838 1839 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name, 1840 ret, ret ? "failed" : ceph_pr_addr(ss)); 1841 1842 return ret; 1843 } 1844 #else 1845 static inline int ceph_dns_resolve_name(const char *name, size_t namelen, 1846 struct sockaddr_storage *ss, char delim, const char **ipend) 1847 { 1848 return -EINVAL; 1849 } 1850 #endif 1851 1852 /* 1853 * Parse a server name (IP or hostname). If a valid IP address is not found 1854 * then try to extract a hostname to resolve using userspace DNS upcall. 1855 */ 1856 static int ceph_parse_server_name(const char *name, size_t namelen, 1857 struct sockaddr_storage *ss, char delim, const char **ipend) 1858 { 1859 int ret; 1860 1861 ret = ceph_pton(name, namelen, ss, delim, ipend); 1862 if (ret) 1863 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend); 1864 1865 return ret; 1866 } 1867 1868 /* 1869 * Parse an ip[:port] list into an addr array. Use the default 1870 * monitor port if a port isn't specified. 1871 */ 1872 int ceph_parse_ips(const char *c, const char *end, 1873 struct ceph_entity_addr *addr, 1874 int max_count, int *count) 1875 { 1876 int i, ret = -EINVAL; 1877 const char *p = c; 1878 1879 dout("parse_ips on '%.*s'\n", (int)(end-c), c); 1880 for (i = 0; i < max_count; i++) { 1881 const char *ipend; 1882 struct sockaddr_storage *ss = &addr[i].in_addr; 1883 int port; 1884 char delim = ','; 1885 1886 if (*p == '[') { 1887 delim = ']'; 1888 p++; 1889 } 1890 1891 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend); 1892 if (ret) 1893 goto bad; 1894 ret = -EINVAL; 1895 1896 p = ipend; 1897 1898 if (delim == ']') { 1899 if (*p != ']') { 1900 dout("missing matching ']'\n"); 1901 goto bad; 1902 } 1903 p++; 1904 } 1905 1906 /* port? */ 1907 if (p < end && *p == ':') { 1908 port = 0; 1909 p++; 1910 while (p < end && *p >= '0' && *p <= '9') { 1911 port = (port * 10) + (*p - '0'); 1912 p++; 1913 } 1914 if (port == 0) 1915 port = CEPH_MON_PORT; 1916 else if (port > 65535) 1917 goto bad; 1918 } else { 1919 port = CEPH_MON_PORT; 1920 } 1921 1922 addr_set_port(ss, port); 1923 1924 dout("parse_ips got %s\n", ceph_pr_addr(ss)); 1925 1926 if (p == end) 1927 break; 1928 if (*p != ',') 1929 goto bad; 1930 p++; 1931 } 1932 1933 if (p != end) 1934 goto bad; 1935 1936 if (count) 1937 *count = i + 1; 1938 return 0; 1939 1940 bad: 1941 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 1942 return ret; 1943 } 1944 EXPORT_SYMBOL(ceph_parse_ips); 1945 1946 static int process_banner(struct ceph_connection *con) 1947 { 1948 dout("process_banner on %p\n", con); 1949 1950 if (verify_hello(con) < 0) 1951 return -1; 1952 1953 ceph_decode_addr(&con->actual_peer_addr); 1954 ceph_decode_addr(&con->peer_addr_for_me); 1955 1956 /* 1957 * Make sure the other end is who we wanted. note that the other 1958 * end may not yet know their ip address, so if it's 0.0.0.0, give 1959 * them the benefit of the doubt. 1960 */ 1961 if (memcmp(&con->peer_addr, &con->actual_peer_addr, 1962 sizeof(con->peer_addr)) != 0 && 1963 !(addr_is_blank(&con->actual_peer_addr.in_addr) && 1964 con->actual_peer_addr.nonce == con->peer_addr.nonce)) { 1965 pr_warn("wrong peer, want %s/%d, got %s/%d\n", 1966 ceph_pr_addr(&con->peer_addr.in_addr), 1967 (int)le32_to_cpu(con->peer_addr.nonce), 1968 ceph_pr_addr(&con->actual_peer_addr.in_addr), 1969 (int)le32_to_cpu(con->actual_peer_addr.nonce)); 1970 con->error_msg = "wrong peer at address"; 1971 return -1; 1972 } 1973 1974 /* 1975 * did we learn our address? 1976 */ 1977 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) { 1978 int port = addr_port(&con->msgr->inst.addr.in_addr); 1979 1980 memcpy(&con->msgr->inst.addr.in_addr, 1981 &con->peer_addr_for_me.in_addr, 1982 sizeof(con->peer_addr_for_me.in_addr)); 1983 addr_set_port(&con->msgr->inst.addr.in_addr, port); 1984 encode_my_addr(con->msgr); 1985 dout("process_banner learned my addr is %s\n", 1986 ceph_pr_addr(&con->msgr->inst.addr.in_addr)); 1987 } 1988 1989 return 0; 1990 } 1991 1992 static int process_connect(struct ceph_connection *con) 1993 { 1994 u64 sup_feat = con->msgr->supported_features; 1995 u64 req_feat = con->msgr->required_features; 1996 u64 server_feat = ceph_sanitize_features( 1997 le64_to_cpu(con->in_reply.features)); 1998 int ret; 1999 2000 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 2001 2002 switch (con->in_reply.tag) { 2003 case CEPH_MSGR_TAG_FEATURES: 2004 pr_err("%s%lld %s feature set mismatch," 2005 " my %llx < server's %llx, missing %llx\n", 2006 ENTITY_NAME(con->peer_name), 2007 ceph_pr_addr(&con->peer_addr.in_addr), 2008 sup_feat, server_feat, server_feat & ~sup_feat); 2009 con->error_msg = "missing required protocol features"; 2010 reset_connection(con); 2011 return -1; 2012 2013 case CEPH_MSGR_TAG_BADPROTOVER: 2014 pr_err("%s%lld %s protocol version mismatch," 2015 " my %d != server's %d\n", 2016 ENTITY_NAME(con->peer_name), 2017 ceph_pr_addr(&con->peer_addr.in_addr), 2018 le32_to_cpu(con->out_connect.protocol_version), 2019 le32_to_cpu(con->in_reply.protocol_version)); 2020 con->error_msg = "protocol version mismatch"; 2021 reset_connection(con); 2022 return -1; 2023 2024 case CEPH_MSGR_TAG_BADAUTHORIZER: 2025 con->auth_retry++; 2026 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con, 2027 con->auth_retry); 2028 if (con->auth_retry == 2) { 2029 con->error_msg = "connect authorization failure"; 2030 return -1; 2031 } 2032 con_out_kvec_reset(con); 2033 ret = prepare_write_connect(con); 2034 if (ret < 0) 2035 return ret; 2036 prepare_read_connect(con); 2037 break; 2038 2039 case CEPH_MSGR_TAG_RESETSESSION: 2040 /* 2041 * If we connected with a large connect_seq but the peer 2042 * has no record of a session with us (no connection, or 2043 * connect_seq == 0), they will send RESETSESION to indicate 2044 * that they must have reset their session, and may have 2045 * dropped messages. 2046 */ 2047 dout("process_connect got RESET peer seq %u\n", 2048 le32_to_cpu(con->in_reply.connect_seq)); 2049 pr_err("%s%lld %s connection reset\n", 2050 ENTITY_NAME(con->peer_name), 2051 ceph_pr_addr(&con->peer_addr.in_addr)); 2052 reset_connection(con); 2053 con_out_kvec_reset(con); 2054 ret = prepare_write_connect(con); 2055 if (ret < 0) 2056 return ret; 2057 prepare_read_connect(con); 2058 2059 /* Tell ceph about it. */ 2060 mutex_unlock(&con->mutex); 2061 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name)); 2062 if (con->ops->peer_reset) 2063 con->ops->peer_reset(con); 2064 mutex_lock(&con->mutex); 2065 if (con->state != CON_STATE_NEGOTIATING) 2066 return -EAGAIN; 2067 break; 2068 2069 case CEPH_MSGR_TAG_RETRY_SESSION: 2070 /* 2071 * If we sent a smaller connect_seq than the peer has, try 2072 * again with a larger value. 2073 */ 2074 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n", 2075 le32_to_cpu(con->out_connect.connect_seq), 2076 le32_to_cpu(con->in_reply.connect_seq)); 2077 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); 2078 con_out_kvec_reset(con); 2079 ret = prepare_write_connect(con); 2080 if (ret < 0) 2081 return ret; 2082 prepare_read_connect(con); 2083 break; 2084 2085 case CEPH_MSGR_TAG_RETRY_GLOBAL: 2086 /* 2087 * If we sent a smaller global_seq than the peer has, try 2088 * again with a larger value. 2089 */ 2090 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", 2091 con->peer_global_seq, 2092 le32_to_cpu(con->in_reply.global_seq)); 2093 get_global_seq(con->msgr, 2094 le32_to_cpu(con->in_reply.global_seq)); 2095 con_out_kvec_reset(con); 2096 ret = prepare_write_connect(con); 2097 if (ret < 0) 2098 return ret; 2099 prepare_read_connect(con); 2100 break; 2101 2102 case CEPH_MSGR_TAG_SEQ: 2103 case CEPH_MSGR_TAG_READY: 2104 if (req_feat & ~server_feat) { 2105 pr_err("%s%lld %s protocol feature mismatch," 2106 " my required %llx > server's %llx, need %llx\n", 2107 ENTITY_NAME(con->peer_name), 2108 ceph_pr_addr(&con->peer_addr.in_addr), 2109 req_feat, server_feat, req_feat & ~server_feat); 2110 con->error_msg = "missing required protocol features"; 2111 reset_connection(con); 2112 return -1; 2113 } 2114 2115 WARN_ON(con->state != CON_STATE_NEGOTIATING); 2116 con->state = CON_STATE_OPEN; 2117 con->auth_retry = 0; /* we authenticated; clear flag */ 2118 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 2119 con->connect_seq++; 2120 con->peer_features = server_feat; 2121 dout("process_connect got READY gseq %d cseq %d (%d)\n", 2122 con->peer_global_seq, 2123 le32_to_cpu(con->in_reply.connect_seq), 2124 con->connect_seq); 2125 WARN_ON(con->connect_seq != 2126 le32_to_cpu(con->in_reply.connect_seq)); 2127 2128 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) 2129 con_flag_set(con, CON_FLAG_LOSSYTX); 2130 2131 con->delay = 0; /* reset backoff memory */ 2132 2133 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) { 2134 prepare_write_seq(con); 2135 prepare_read_seq(con); 2136 } else { 2137 prepare_read_tag(con); 2138 } 2139 break; 2140 2141 case CEPH_MSGR_TAG_WAIT: 2142 /* 2143 * If there is a connection race (we are opening 2144 * connections to each other), one of us may just have 2145 * to WAIT. This shouldn't happen if we are the 2146 * client. 2147 */ 2148 pr_err("process_connect got WAIT as client\n"); 2149 con->error_msg = "protocol error, got WAIT as client"; 2150 return -1; 2151 2152 default: 2153 pr_err("connect protocol error, will retry\n"); 2154 con->error_msg = "protocol error, garbage tag during connect"; 2155 return -1; 2156 } 2157 return 0; 2158 } 2159 2160 2161 /* 2162 * read (part of) an ack 2163 */ 2164 static int read_partial_ack(struct ceph_connection *con) 2165 { 2166 int size = sizeof (con->in_temp_ack); 2167 int end = size; 2168 2169 return read_partial(con, end, size, &con->in_temp_ack); 2170 } 2171 2172 /* 2173 * We can finally discard anything that's been acked. 2174 */ 2175 static void process_ack(struct ceph_connection *con) 2176 { 2177 struct ceph_msg *m; 2178 u64 ack = le64_to_cpu(con->in_temp_ack); 2179 u64 seq; 2180 2181 while (!list_empty(&con->out_sent)) { 2182 m = list_first_entry(&con->out_sent, struct ceph_msg, 2183 list_head); 2184 seq = le64_to_cpu(m->hdr.seq); 2185 if (seq > ack) 2186 break; 2187 dout("got ack for seq %llu type %d at %p\n", seq, 2188 le16_to_cpu(m->hdr.type), m); 2189 m->ack_stamp = jiffies; 2190 ceph_msg_remove(m); 2191 } 2192 prepare_read_tag(con); 2193 } 2194 2195 2196 static int read_partial_message_section(struct ceph_connection *con, 2197 struct kvec *section, 2198 unsigned int sec_len, u32 *crc) 2199 { 2200 int ret, left; 2201 2202 BUG_ON(!section); 2203 2204 while (section->iov_len < sec_len) { 2205 BUG_ON(section->iov_base == NULL); 2206 left = sec_len - section->iov_len; 2207 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base + 2208 section->iov_len, left); 2209 if (ret <= 0) 2210 return ret; 2211 section->iov_len += ret; 2212 } 2213 if (section->iov_len == sec_len) 2214 *crc = crc32c(0, section->iov_base, section->iov_len); 2215 2216 return 1; 2217 } 2218 2219 static int read_partial_msg_data(struct ceph_connection *con) 2220 { 2221 struct ceph_msg *msg = con->in_msg; 2222 struct ceph_msg_data_cursor *cursor = &msg->cursor; 2223 const bool do_datacrc = !con->msgr->nocrc; 2224 struct page *page; 2225 size_t page_offset; 2226 size_t length; 2227 u32 crc = 0; 2228 int ret; 2229 2230 BUG_ON(!msg); 2231 if (list_empty(&msg->data)) 2232 return -EIO; 2233 2234 if (do_datacrc) 2235 crc = con->in_data_crc; 2236 while (cursor->resid) { 2237 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, 2238 NULL); 2239 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); 2240 if (ret <= 0) { 2241 if (do_datacrc) 2242 con->in_data_crc = crc; 2243 2244 return ret; 2245 } 2246 2247 if (do_datacrc) 2248 crc = ceph_crc32c_page(crc, page, page_offset, ret); 2249 (void) ceph_msg_data_advance(&msg->cursor, (size_t)ret); 2250 } 2251 if (do_datacrc) 2252 con->in_data_crc = crc; 2253 2254 return 1; /* must return > 0 to indicate success */ 2255 } 2256 2257 /* 2258 * read (part of) a message. 2259 */ 2260 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); 2261 2262 static int read_partial_message(struct ceph_connection *con) 2263 { 2264 struct ceph_msg *m = con->in_msg; 2265 int size; 2266 int end; 2267 int ret; 2268 unsigned int front_len, middle_len, data_len; 2269 bool do_datacrc = !con->msgr->nocrc; 2270 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH); 2271 u64 seq; 2272 u32 crc; 2273 2274 dout("read_partial_message con %p msg %p\n", con, m); 2275 2276 /* header */ 2277 size = sizeof (con->in_hdr); 2278 end = size; 2279 ret = read_partial(con, end, size, &con->in_hdr); 2280 if (ret <= 0) 2281 return ret; 2282 2283 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc)); 2284 if (cpu_to_le32(crc) != con->in_hdr.crc) { 2285 pr_err("read_partial_message bad hdr " 2286 " crc %u != expected %u\n", 2287 crc, con->in_hdr.crc); 2288 return -EBADMSG; 2289 } 2290 2291 front_len = le32_to_cpu(con->in_hdr.front_len); 2292 if (front_len > CEPH_MSG_MAX_FRONT_LEN) 2293 return -EIO; 2294 middle_len = le32_to_cpu(con->in_hdr.middle_len); 2295 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN) 2296 return -EIO; 2297 data_len = le32_to_cpu(con->in_hdr.data_len); 2298 if (data_len > CEPH_MSG_MAX_DATA_LEN) 2299 return -EIO; 2300 2301 /* verify seq# */ 2302 seq = le64_to_cpu(con->in_hdr.seq); 2303 if ((s64)seq - (s64)con->in_seq < 1) { 2304 pr_info("skipping %s%lld %s seq %lld expected %lld\n", 2305 ENTITY_NAME(con->peer_name), 2306 ceph_pr_addr(&con->peer_addr.in_addr), 2307 seq, con->in_seq + 1); 2308 con->in_base_pos = -front_len - middle_len - data_len - 2309 sizeof(m->footer); 2310 con->in_tag = CEPH_MSGR_TAG_READY; 2311 return 0; 2312 } else if ((s64)seq - (s64)con->in_seq > 1) { 2313 pr_err("read_partial_message bad seq %lld expected %lld\n", 2314 seq, con->in_seq + 1); 2315 con->error_msg = "bad message sequence # for incoming message"; 2316 return -EBADMSG; 2317 } 2318 2319 /* allocate message? */ 2320 if (!con->in_msg) { 2321 int skip = 0; 2322 2323 dout("got hdr type %d front %d data %d\n", con->in_hdr.type, 2324 front_len, data_len); 2325 ret = ceph_con_in_msg_alloc(con, &skip); 2326 if (ret < 0) 2327 return ret; 2328 2329 BUG_ON(!con->in_msg ^ skip); 2330 if (con->in_msg && data_len > con->in_msg->data_length) { 2331 pr_warn("%s skipping long message (%u > %zd)\n", 2332 __func__, data_len, con->in_msg->data_length); 2333 ceph_msg_put(con->in_msg); 2334 con->in_msg = NULL; 2335 skip = 1; 2336 } 2337 if (skip) { 2338 /* skip this message */ 2339 dout("alloc_msg said skip message\n"); 2340 con->in_base_pos = -front_len - middle_len - data_len - 2341 sizeof(m->footer); 2342 con->in_tag = CEPH_MSGR_TAG_READY; 2343 con->in_seq++; 2344 return 0; 2345 } 2346 2347 BUG_ON(!con->in_msg); 2348 BUG_ON(con->in_msg->con != con); 2349 m = con->in_msg; 2350 m->front.iov_len = 0; /* haven't read it yet */ 2351 if (m->middle) 2352 m->middle->vec.iov_len = 0; 2353 2354 /* prepare for data payload, if any */ 2355 2356 if (data_len) 2357 prepare_message_data(con->in_msg, data_len); 2358 } 2359 2360 /* front */ 2361 ret = read_partial_message_section(con, &m->front, front_len, 2362 &con->in_front_crc); 2363 if (ret <= 0) 2364 return ret; 2365 2366 /* middle */ 2367 if (m->middle) { 2368 ret = read_partial_message_section(con, &m->middle->vec, 2369 middle_len, 2370 &con->in_middle_crc); 2371 if (ret <= 0) 2372 return ret; 2373 } 2374 2375 /* (page) data */ 2376 if (data_len) { 2377 ret = read_partial_msg_data(con); 2378 if (ret <= 0) 2379 return ret; 2380 } 2381 2382 /* footer */ 2383 if (need_sign) 2384 size = sizeof(m->footer); 2385 else 2386 size = sizeof(m->old_footer); 2387 2388 end += size; 2389 ret = read_partial(con, end, size, &m->footer); 2390 if (ret <= 0) 2391 return ret; 2392 2393 if (!need_sign) { 2394 m->footer.flags = m->old_footer.flags; 2395 m->footer.sig = 0; 2396 } 2397 2398 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", 2399 m, front_len, m->footer.front_crc, middle_len, 2400 m->footer.middle_crc, data_len, m->footer.data_crc); 2401 2402 /* crc ok? */ 2403 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) { 2404 pr_err("read_partial_message %p front crc %u != exp. %u\n", 2405 m, con->in_front_crc, m->footer.front_crc); 2406 return -EBADMSG; 2407 } 2408 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) { 2409 pr_err("read_partial_message %p middle crc %u != exp %u\n", 2410 m, con->in_middle_crc, m->footer.middle_crc); 2411 return -EBADMSG; 2412 } 2413 if (do_datacrc && 2414 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 && 2415 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) { 2416 pr_err("read_partial_message %p data crc %u != exp. %u\n", m, 2417 con->in_data_crc, le32_to_cpu(m->footer.data_crc)); 2418 return -EBADMSG; 2419 } 2420 2421 if (need_sign && con->ops->check_message_signature && 2422 con->ops->check_message_signature(con, m)) { 2423 pr_err("read_partial_message %p signature check failed\n", m); 2424 return -EBADMSG; 2425 } 2426 2427 return 1; /* done! */ 2428 } 2429 2430 /* 2431 * Process message. This happens in the worker thread. The callback should 2432 * be careful not to do anything that waits on other incoming messages or it 2433 * may deadlock. 2434 */ 2435 static void process_message(struct ceph_connection *con) 2436 { 2437 struct ceph_msg *msg; 2438 2439 BUG_ON(con->in_msg->con != con); 2440 con->in_msg->con = NULL; 2441 msg = con->in_msg; 2442 con->in_msg = NULL; 2443 con->ops->put(con); 2444 2445 /* if first message, set peer_name */ 2446 if (con->peer_name.type == 0) 2447 con->peer_name = msg->hdr.src; 2448 2449 con->in_seq++; 2450 mutex_unlock(&con->mutex); 2451 2452 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n", 2453 msg, le64_to_cpu(msg->hdr.seq), 2454 ENTITY_NAME(msg->hdr.src), 2455 le16_to_cpu(msg->hdr.type), 2456 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2457 le32_to_cpu(msg->hdr.front_len), 2458 le32_to_cpu(msg->hdr.data_len), 2459 con->in_front_crc, con->in_middle_crc, con->in_data_crc); 2460 con->ops->dispatch(con, msg); 2461 2462 mutex_lock(&con->mutex); 2463 } 2464 2465 2466 /* 2467 * Write something to the socket. Called in a worker thread when the 2468 * socket appears to be writeable and we have something ready to send. 2469 */ 2470 static int try_write(struct ceph_connection *con) 2471 { 2472 int ret = 1; 2473 2474 dout("try_write start %p state %lu\n", con, con->state); 2475 2476 more: 2477 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 2478 2479 /* open the socket first? */ 2480 if (con->state == CON_STATE_PREOPEN) { 2481 BUG_ON(con->sock); 2482 con->state = CON_STATE_CONNECTING; 2483 2484 con_out_kvec_reset(con); 2485 prepare_write_banner(con); 2486 prepare_read_banner(con); 2487 2488 BUG_ON(con->in_msg); 2489 con->in_tag = CEPH_MSGR_TAG_READY; 2490 dout("try_write initiating connect on %p new state %lu\n", 2491 con, con->state); 2492 ret = ceph_tcp_connect(con); 2493 if (ret < 0) { 2494 con->error_msg = "connect error"; 2495 goto out; 2496 } 2497 } 2498 2499 more_kvec: 2500 /* kvec data queued? */ 2501 if (con->out_skip) { 2502 ret = write_partial_skip(con); 2503 if (ret <= 0) 2504 goto out; 2505 } 2506 if (con->out_kvec_left) { 2507 ret = write_partial_kvec(con); 2508 if (ret <= 0) 2509 goto out; 2510 } 2511 2512 /* msg pages? */ 2513 if (con->out_msg) { 2514 if (con->out_msg_done) { 2515 ceph_msg_put(con->out_msg); 2516 con->out_msg = NULL; /* we're done with this one */ 2517 goto do_next; 2518 } 2519 2520 ret = write_partial_message_data(con); 2521 if (ret == 1) 2522 goto more_kvec; /* we need to send the footer, too! */ 2523 if (ret == 0) 2524 goto out; 2525 if (ret < 0) { 2526 dout("try_write write_partial_message_data err %d\n", 2527 ret); 2528 goto out; 2529 } 2530 } 2531 2532 do_next: 2533 if (con->state == CON_STATE_OPEN) { 2534 /* is anything else pending? */ 2535 if (!list_empty(&con->out_queue)) { 2536 prepare_write_message(con); 2537 goto more; 2538 } 2539 if (con->in_seq > con->in_seq_acked) { 2540 prepare_write_ack(con); 2541 goto more; 2542 } 2543 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) { 2544 prepare_write_keepalive(con); 2545 goto more; 2546 } 2547 } 2548 2549 /* Nothing to do! */ 2550 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 2551 dout("try_write nothing else to write.\n"); 2552 ret = 0; 2553 out: 2554 dout("try_write done on %p ret %d\n", con, ret); 2555 return ret; 2556 } 2557 2558 2559 2560 /* 2561 * Read what we can from the socket. 2562 */ 2563 static int try_read(struct ceph_connection *con) 2564 { 2565 int ret = -1; 2566 2567 more: 2568 dout("try_read start on %p state %lu\n", con, con->state); 2569 if (con->state != CON_STATE_CONNECTING && 2570 con->state != CON_STATE_NEGOTIATING && 2571 con->state != CON_STATE_OPEN) 2572 return 0; 2573 2574 BUG_ON(!con->sock); 2575 2576 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 2577 con->in_base_pos); 2578 2579 if (con->state == CON_STATE_CONNECTING) { 2580 dout("try_read connecting\n"); 2581 ret = read_partial_banner(con); 2582 if (ret <= 0) 2583 goto out; 2584 ret = process_banner(con); 2585 if (ret < 0) 2586 goto out; 2587 2588 con->state = CON_STATE_NEGOTIATING; 2589 2590 /* 2591 * Received banner is good, exchange connection info. 2592 * Do not reset out_kvec, as sending our banner raced 2593 * with receiving peer banner after connect completed. 2594 */ 2595 ret = prepare_write_connect(con); 2596 if (ret < 0) 2597 goto out; 2598 prepare_read_connect(con); 2599 2600 /* Send connection info before awaiting response */ 2601 goto out; 2602 } 2603 2604 if (con->state == CON_STATE_NEGOTIATING) { 2605 dout("try_read negotiating\n"); 2606 ret = read_partial_connect(con); 2607 if (ret <= 0) 2608 goto out; 2609 ret = process_connect(con); 2610 if (ret < 0) 2611 goto out; 2612 goto more; 2613 } 2614 2615 WARN_ON(con->state != CON_STATE_OPEN); 2616 2617 if (con->in_base_pos < 0) { 2618 /* 2619 * skipping + discarding content. 2620 * 2621 * FIXME: there must be a better way to do this! 2622 */ 2623 static char buf[SKIP_BUF_SIZE]; 2624 int skip = min((int) sizeof (buf), -con->in_base_pos); 2625 2626 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); 2627 ret = ceph_tcp_recvmsg(con->sock, buf, skip); 2628 if (ret <= 0) 2629 goto out; 2630 con->in_base_pos += ret; 2631 if (con->in_base_pos) 2632 goto more; 2633 } 2634 if (con->in_tag == CEPH_MSGR_TAG_READY) { 2635 /* 2636 * what's next? 2637 */ 2638 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 2639 if (ret <= 0) 2640 goto out; 2641 dout("try_read got tag %d\n", (int)con->in_tag); 2642 switch (con->in_tag) { 2643 case CEPH_MSGR_TAG_MSG: 2644 prepare_read_message(con); 2645 break; 2646 case CEPH_MSGR_TAG_ACK: 2647 prepare_read_ack(con); 2648 break; 2649 case CEPH_MSGR_TAG_CLOSE: 2650 con_close_socket(con); 2651 con->state = CON_STATE_CLOSED; 2652 goto out; 2653 default: 2654 goto bad_tag; 2655 } 2656 } 2657 if (con->in_tag == CEPH_MSGR_TAG_MSG) { 2658 ret = read_partial_message(con); 2659 if (ret <= 0) { 2660 switch (ret) { 2661 case -EBADMSG: 2662 con->error_msg = "bad crc"; 2663 ret = -EIO; 2664 break; 2665 case -EIO: 2666 con->error_msg = "io error"; 2667 break; 2668 } 2669 goto out; 2670 } 2671 if (con->in_tag == CEPH_MSGR_TAG_READY) 2672 goto more; 2673 process_message(con); 2674 if (con->state == CON_STATE_OPEN) 2675 prepare_read_tag(con); 2676 goto more; 2677 } 2678 if (con->in_tag == CEPH_MSGR_TAG_ACK || 2679 con->in_tag == CEPH_MSGR_TAG_SEQ) { 2680 /* 2681 * the final handshake seq exchange is semantically 2682 * equivalent to an ACK 2683 */ 2684 ret = read_partial_ack(con); 2685 if (ret <= 0) 2686 goto out; 2687 process_ack(con); 2688 goto more; 2689 } 2690 2691 out: 2692 dout("try_read done on %p ret %d\n", con, ret); 2693 return ret; 2694 2695 bad_tag: 2696 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag); 2697 con->error_msg = "protocol error, garbage tag"; 2698 ret = -1; 2699 goto out; 2700 } 2701 2702 2703 /* 2704 * Atomically queue work on a connection after the specified delay. 2705 * Bump @con reference to avoid races with connection teardown. 2706 * Returns 0 if work was queued, or an error code otherwise. 2707 */ 2708 static int queue_con_delay(struct ceph_connection *con, unsigned long delay) 2709 { 2710 if (!con->ops->get(con)) { 2711 dout("%s %p ref count 0\n", __func__, con); 2712 return -ENOENT; 2713 } 2714 2715 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) { 2716 dout("%s %p - already queued\n", __func__, con); 2717 con->ops->put(con); 2718 return -EBUSY; 2719 } 2720 2721 dout("%s %p %lu\n", __func__, con, delay); 2722 return 0; 2723 } 2724 2725 static void queue_con(struct ceph_connection *con) 2726 { 2727 (void) queue_con_delay(con, 0); 2728 } 2729 2730 static void cancel_con(struct ceph_connection *con) 2731 { 2732 if (cancel_delayed_work(&con->work)) { 2733 dout("%s %p\n", __func__, con); 2734 con->ops->put(con); 2735 } 2736 } 2737 2738 static bool con_sock_closed(struct ceph_connection *con) 2739 { 2740 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED)) 2741 return false; 2742 2743 #define CASE(x) \ 2744 case CON_STATE_ ## x: \ 2745 con->error_msg = "socket closed (con state " #x ")"; \ 2746 break; 2747 2748 switch (con->state) { 2749 CASE(CLOSED); 2750 CASE(PREOPEN); 2751 CASE(CONNECTING); 2752 CASE(NEGOTIATING); 2753 CASE(OPEN); 2754 CASE(STANDBY); 2755 default: 2756 pr_warn("%s con %p unrecognized state %lu\n", 2757 __func__, con, con->state); 2758 con->error_msg = "unrecognized con state"; 2759 BUG(); 2760 break; 2761 } 2762 #undef CASE 2763 2764 return true; 2765 } 2766 2767 static bool con_backoff(struct ceph_connection *con) 2768 { 2769 int ret; 2770 2771 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF)) 2772 return false; 2773 2774 ret = queue_con_delay(con, round_jiffies_relative(con->delay)); 2775 if (ret) { 2776 dout("%s: con %p FAILED to back off %lu\n", __func__, 2777 con, con->delay); 2778 BUG_ON(ret == -ENOENT); 2779 con_flag_set(con, CON_FLAG_BACKOFF); 2780 } 2781 2782 return true; 2783 } 2784 2785 /* Finish fault handling; con->mutex must *not* be held here */ 2786 2787 static void con_fault_finish(struct ceph_connection *con) 2788 { 2789 /* 2790 * in case we faulted due to authentication, invalidate our 2791 * current tickets so that we can get new ones. 2792 */ 2793 if (con->auth_retry && con->ops->invalidate_authorizer) { 2794 dout("calling invalidate_authorizer()\n"); 2795 con->ops->invalidate_authorizer(con); 2796 } 2797 2798 if (con->ops->fault) 2799 con->ops->fault(con); 2800 } 2801 2802 /* 2803 * Do some work on a connection. Drop a connection ref when we're done. 2804 */ 2805 static void con_work(struct work_struct *work) 2806 { 2807 struct ceph_connection *con = container_of(work, struct ceph_connection, 2808 work.work); 2809 bool fault; 2810 2811 mutex_lock(&con->mutex); 2812 while (true) { 2813 int ret; 2814 2815 if ((fault = con_sock_closed(con))) { 2816 dout("%s: con %p SOCK_CLOSED\n", __func__, con); 2817 break; 2818 } 2819 if (con_backoff(con)) { 2820 dout("%s: con %p BACKOFF\n", __func__, con); 2821 break; 2822 } 2823 if (con->state == CON_STATE_STANDBY) { 2824 dout("%s: con %p STANDBY\n", __func__, con); 2825 break; 2826 } 2827 if (con->state == CON_STATE_CLOSED) { 2828 dout("%s: con %p CLOSED\n", __func__, con); 2829 BUG_ON(con->sock); 2830 break; 2831 } 2832 if (con->state == CON_STATE_PREOPEN) { 2833 dout("%s: con %p PREOPEN\n", __func__, con); 2834 BUG_ON(con->sock); 2835 } 2836 2837 ret = try_read(con); 2838 if (ret < 0) { 2839 if (ret == -EAGAIN) 2840 continue; 2841 con->error_msg = "socket error on read"; 2842 fault = true; 2843 break; 2844 } 2845 2846 ret = try_write(con); 2847 if (ret < 0) { 2848 if (ret == -EAGAIN) 2849 continue; 2850 con->error_msg = "socket error on write"; 2851 fault = true; 2852 } 2853 2854 break; /* If we make it to here, we're done */ 2855 } 2856 if (fault) 2857 con_fault(con); 2858 mutex_unlock(&con->mutex); 2859 2860 if (fault) 2861 con_fault_finish(con); 2862 2863 con->ops->put(con); 2864 } 2865 2866 /* 2867 * Generic error/fault handler. A retry mechanism is used with 2868 * exponential backoff 2869 */ 2870 static void con_fault(struct ceph_connection *con) 2871 { 2872 pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2873 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2874 dout("fault %p state %lu to peer %s\n", 2875 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2876 2877 WARN_ON(con->state != CON_STATE_CONNECTING && 2878 con->state != CON_STATE_NEGOTIATING && 2879 con->state != CON_STATE_OPEN); 2880 2881 con_close_socket(con); 2882 2883 if (con_flag_test(con, CON_FLAG_LOSSYTX)) { 2884 dout("fault on LOSSYTX channel, marking CLOSED\n"); 2885 con->state = CON_STATE_CLOSED; 2886 return; 2887 } 2888 2889 if (con->in_msg) { 2890 BUG_ON(con->in_msg->con != con); 2891 con->in_msg->con = NULL; 2892 ceph_msg_put(con->in_msg); 2893 con->in_msg = NULL; 2894 con->ops->put(con); 2895 } 2896 2897 /* Requeue anything that hasn't been acked */ 2898 list_splice_init(&con->out_sent, &con->out_queue); 2899 2900 /* If there are no messages queued or keepalive pending, place 2901 * the connection in a STANDBY state */ 2902 if (list_empty(&con->out_queue) && 2903 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) { 2904 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 2905 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 2906 con->state = CON_STATE_STANDBY; 2907 } else { 2908 /* retry after a delay. */ 2909 con->state = CON_STATE_PREOPEN; 2910 if (con->delay == 0) 2911 con->delay = BASE_DELAY_INTERVAL; 2912 else if (con->delay < MAX_DELAY_INTERVAL) 2913 con->delay *= 2; 2914 con_flag_set(con, CON_FLAG_BACKOFF); 2915 queue_con(con); 2916 } 2917 } 2918 2919 2920 2921 /* 2922 * initialize a new messenger instance 2923 */ 2924 void ceph_messenger_init(struct ceph_messenger *msgr, 2925 struct ceph_entity_addr *myaddr, 2926 u64 supported_features, 2927 u64 required_features, 2928 bool nocrc, 2929 bool tcp_nodelay) 2930 { 2931 msgr->supported_features = supported_features; 2932 msgr->required_features = required_features; 2933 2934 spin_lock_init(&msgr->global_seq_lock); 2935 2936 if (myaddr) 2937 msgr->inst.addr = *myaddr; 2938 2939 /* select a random nonce */ 2940 msgr->inst.addr.type = 0; 2941 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 2942 encode_my_addr(msgr); 2943 msgr->nocrc = nocrc; 2944 msgr->tcp_nodelay = tcp_nodelay; 2945 2946 atomic_set(&msgr->stopping, 0); 2947 2948 dout("%s %p\n", __func__, msgr); 2949 } 2950 EXPORT_SYMBOL(ceph_messenger_init); 2951 2952 static void clear_standby(struct ceph_connection *con) 2953 { 2954 /* come back from STANDBY? */ 2955 if (con->state == CON_STATE_STANDBY) { 2956 dout("clear_standby %p and ++connect_seq\n", con); 2957 con->state = CON_STATE_PREOPEN; 2958 con->connect_seq++; 2959 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING)); 2960 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)); 2961 } 2962 } 2963 2964 /* 2965 * Queue up an outgoing message on the given connection. 2966 */ 2967 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) 2968 { 2969 /* set src+dst */ 2970 msg->hdr.src = con->msgr->inst.name; 2971 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); 2972 msg->needs_out_seq = true; 2973 2974 mutex_lock(&con->mutex); 2975 2976 if (con->state == CON_STATE_CLOSED) { 2977 dout("con_send %p closed, dropping %p\n", con, msg); 2978 ceph_msg_put(msg); 2979 mutex_unlock(&con->mutex); 2980 return; 2981 } 2982 2983 BUG_ON(msg->con != NULL); 2984 msg->con = con->ops->get(con); 2985 BUG_ON(msg->con == NULL); 2986 2987 BUG_ON(!list_empty(&msg->list_head)); 2988 list_add_tail(&msg->list_head, &con->out_queue); 2989 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, 2990 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type), 2991 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2992 le32_to_cpu(msg->hdr.front_len), 2993 le32_to_cpu(msg->hdr.middle_len), 2994 le32_to_cpu(msg->hdr.data_len)); 2995 2996 clear_standby(con); 2997 mutex_unlock(&con->mutex); 2998 2999 /* if there wasn't anything waiting to send before, queue 3000 * new work */ 3001 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3002 queue_con(con); 3003 } 3004 EXPORT_SYMBOL(ceph_con_send); 3005 3006 /* 3007 * Revoke a message that was previously queued for send 3008 */ 3009 void ceph_msg_revoke(struct ceph_msg *msg) 3010 { 3011 struct ceph_connection *con = msg->con; 3012 3013 if (!con) 3014 return; /* Message not in our possession */ 3015 3016 mutex_lock(&con->mutex); 3017 if (!list_empty(&msg->list_head)) { 3018 dout("%s %p msg %p - was on queue\n", __func__, con, msg); 3019 list_del_init(&msg->list_head); 3020 BUG_ON(msg->con == NULL); 3021 msg->con->ops->put(msg->con); 3022 msg->con = NULL; 3023 msg->hdr.seq = 0; 3024 3025 ceph_msg_put(msg); 3026 } 3027 if (con->out_msg == msg) { 3028 dout("%s %p msg %p - was sending\n", __func__, con, msg); 3029 con->out_msg = NULL; 3030 if (con->out_kvec_is_msg) { 3031 con->out_skip = con->out_kvec_bytes; 3032 con->out_kvec_is_msg = false; 3033 } 3034 msg->hdr.seq = 0; 3035 3036 ceph_msg_put(msg); 3037 } 3038 mutex_unlock(&con->mutex); 3039 } 3040 3041 /* 3042 * Revoke a message that we may be reading data into 3043 */ 3044 void ceph_msg_revoke_incoming(struct ceph_msg *msg) 3045 { 3046 struct ceph_connection *con; 3047 3048 BUG_ON(msg == NULL); 3049 if (!msg->con) { 3050 dout("%s msg %p null con\n", __func__, msg); 3051 3052 return; /* Message not in our possession */ 3053 } 3054 3055 con = msg->con; 3056 mutex_lock(&con->mutex); 3057 if (con->in_msg == msg) { 3058 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len); 3059 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len); 3060 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len); 3061 3062 /* skip rest of message */ 3063 dout("%s %p msg %p revoked\n", __func__, con, msg); 3064 con->in_base_pos = con->in_base_pos - 3065 sizeof(struct ceph_msg_header) - 3066 front_len - 3067 middle_len - 3068 data_len - 3069 sizeof(struct ceph_msg_footer); 3070 ceph_msg_put(con->in_msg); 3071 con->in_msg = NULL; 3072 con->in_tag = CEPH_MSGR_TAG_READY; 3073 con->in_seq++; 3074 } else { 3075 dout("%s %p in_msg %p msg %p no-op\n", 3076 __func__, con, con->in_msg, msg); 3077 } 3078 mutex_unlock(&con->mutex); 3079 } 3080 3081 /* 3082 * Queue a keepalive byte to ensure the tcp connection is alive. 3083 */ 3084 void ceph_con_keepalive(struct ceph_connection *con) 3085 { 3086 dout("con_keepalive %p\n", con); 3087 mutex_lock(&con->mutex); 3088 clear_standby(con); 3089 mutex_unlock(&con->mutex); 3090 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 && 3091 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3092 queue_con(con); 3093 } 3094 EXPORT_SYMBOL(ceph_con_keepalive); 3095 3096 static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type) 3097 { 3098 struct ceph_msg_data *data; 3099 3100 if (WARN_ON(!ceph_msg_data_type_valid(type))) 3101 return NULL; 3102 3103 data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS); 3104 if (data) 3105 data->type = type; 3106 INIT_LIST_HEAD(&data->links); 3107 3108 return data; 3109 } 3110 3111 static void ceph_msg_data_destroy(struct ceph_msg_data *data) 3112 { 3113 if (!data) 3114 return; 3115 3116 WARN_ON(!list_empty(&data->links)); 3117 if (data->type == CEPH_MSG_DATA_PAGELIST) 3118 ceph_pagelist_release(data->pagelist); 3119 kmem_cache_free(ceph_msg_data_cache, data); 3120 } 3121 3122 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, 3123 size_t length, size_t alignment) 3124 { 3125 struct ceph_msg_data *data; 3126 3127 BUG_ON(!pages); 3128 BUG_ON(!length); 3129 3130 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES); 3131 BUG_ON(!data); 3132 data->pages = pages; 3133 data->length = length; 3134 data->alignment = alignment & ~PAGE_MASK; 3135 3136 list_add_tail(&data->links, &msg->data); 3137 msg->data_length += length; 3138 } 3139 EXPORT_SYMBOL(ceph_msg_data_add_pages); 3140 3141 void ceph_msg_data_add_pagelist(struct ceph_msg *msg, 3142 struct ceph_pagelist *pagelist) 3143 { 3144 struct ceph_msg_data *data; 3145 3146 BUG_ON(!pagelist); 3147 BUG_ON(!pagelist->length); 3148 3149 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST); 3150 BUG_ON(!data); 3151 data->pagelist = pagelist; 3152 3153 list_add_tail(&data->links, &msg->data); 3154 msg->data_length += pagelist->length; 3155 } 3156 EXPORT_SYMBOL(ceph_msg_data_add_pagelist); 3157 3158 #ifdef CONFIG_BLOCK 3159 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, 3160 size_t length) 3161 { 3162 struct ceph_msg_data *data; 3163 3164 BUG_ON(!bio); 3165 3166 data = ceph_msg_data_create(CEPH_MSG_DATA_BIO); 3167 BUG_ON(!data); 3168 data->bio = bio; 3169 data->bio_length = length; 3170 3171 list_add_tail(&data->links, &msg->data); 3172 msg->data_length += length; 3173 } 3174 EXPORT_SYMBOL(ceph_msg_data_add_bio); 3175 #endif /* CONFIG_BLOCK */ 3176 3177 /* 3178 * construct a new message with given type, size 3179 * the new msg has a ref count of 1. 3180 */ 3181 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, 3182 bool can_fail) 3183 { 3184 struct ceph_msg *m; 3185 3186 m = kmem_cache_zalloc(ceph_msg_cache, flags); 3187 if (m == NULL) 3188 goto out; 3189 3190 m->hdr.type = cpu_to_le16(type); 3191 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); 3192 m->hdr.front_len = cpu_to_le32(front_len); 3193 3194 INIT_LIST_HEAD(&m->list_head); 3195 kref_init(&m->kref); 3196 INIT_LIST_HEAD(&m->data); 3197 3198 /* front */ 3199 if (front_len) { 3200 m->front.iov_base = ceph_kvmalloc(front_len, flags); 3201 if (m->front.iov_base == NULL) { 3202 dout("ceph_msg_new can't allocate %d bytes\n", 3203 front_len); 3204 goto out2; 3205 } 3206 } else { 3207 m->front.iov_base = NULL; 3208 } 3209 m->front_alloc_len = m->front.iov_len = front_len; 3210 3211 dout("ceph_msg_new %p front %d\n", m, front_len); 3212 return m; 3213 3214 out2: 3215 ceph_msg_put(m); 3216 out: 3217 if (!can_fail) { 3218 pr_err("msg_new can't create type %d front %d\n", type, 3219 front_len); 3220 WARN_ON(1); 3221 } else { 3222 dout("msg_new can't create type %d front %d\n", type, 3223 front_len); 3224 } 3225 return NULL; 3226 } 3227 EXPORT_SYMBOL(ceph_msg_new); 3228 3229 /* 3230 * Allocate "middle" portion of a message, if it is needed and wasn't 3231 * allocated by alloc_msg. This allows us to read a small fixed-size 3232 * per-type header in the front and then gracefully fail (i.e., 3233 * propagate the error to the caller based on info in the front) when 3234 * the middle is too large. 3235 */ 3236 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) 3237 { 3238 int type = le16_to_cpu(msg->hdr.type); 3239 int middle_len = le32_to_cpu(msg->hdr.middle_len); 3240 3241 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type, 3242 ceph_msg_type_name(type), middle_len); 3243 BUG_ON(!middle_len); 3244 BUG_ON(msg->middle); 3245 3246 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS); 3247 if (!msg->middle) 3248 return -ENOMEM; 3249 return 0; 3250 } 3251 3252 /* 3253 * Allocate a message for receiving an incoming message on a 3254 * connection, and save the result in con->in_msg. Uses the 3255 * connection's private alloc_msg op if available. 3256 * 3257 * Returns 0 on success, or a negative error code. 3258 * 3259 * On success, if we set *skip = 1: 3260 * - the next message should be skipped and ignored. 3261 * - con->in_msg == NULL 3262 * or if we set *skip = 0: 3263 * - con->in_msg is non-null. 3264 * On error (ENOMEM, EAGAIN, ...), 3265 * - con->in_msg == NULL 3266 */ 3267 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) 3268 { 3269 struct ceph_msg_header *hdr = &con->in_hdr; 3270 int middle_len = le32_to_cpu(hdr->middle_len); 3271 struct ceph_msg *msg; 3272 int ret = 0; 3273 3274 BUG_ON(con->in_msg != NULL); 3275 BUG_ON(!con->ops->alloc_msg); 3276 3277 mutex_unlock(&con->mutex); 3278 msg = con->ops->alloc_msg(con, hdr, skip); 3279 mutex_lock(&con->mutex); 3280 if (con->state != CON_STATE_OPEN) { 3281 if (msg) 3282 ceph_msg_put(msg); 3283 return -EAGAIN; 3284 } 3285 if (msg) { 3286 BUG_ON(*skip); 3287 con->in_msg = msg; 3288 con->in_msg->con = con->ops->get(con); 3289 BUG_ON(con->in_msg->con == NULL); 3290 } else { 3291 /* 3292 * Null message pointer means either we should skip 3293 * this message or we couldn't allocate memory. The 3294 * former is not an error. 3295 */ 3296 if (*skip) 3297 return 0; 3298 con->error_msg = "error allocating memory for incoming message"; 3299 3300 return -ENOMEM; 3301 } 3302 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 3303 3304 if (middle_len && !con->in_msg->middle) { 3305 ret = ceph_alloc_middle(con, con->in_msg); 3306 if (ret < 0) { 3307 ceph_msg_put(con->in_msg); 3308 con->in_msg = NULL; 3309 } 3310 } 3311 3312 return ret; 3313 } 3314 3315 3316 /* 3317 * Free a generically kmalloc'd message. 3318 */ 3319 static void ceph_msg_free(struct ceph_msg *m) 3320 { 3321 dout("%s %p\n", __func__, m); 3322 kvfree(m->front.iov_base); 3323 kmem_cache_free(ceph_msg_cache, m); 3324 } 3325 3326 static void ceph_msg_release(struct kref *kref) 3327 { 3328 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); 3329 LIST_HEAD(data); 3330 struct list_head *links; 3331 struct list_head *next; 3332 3333 dout("%s %p\n", __func__, m); 3334 WARN_ON(!list_empty(&m->list_head)); 3335 3336 /* drop middle, data, if any */ 3337 if (m->middle) { 3338 ceph_buffer_put(m->middle); 3339 m->middle = NULL; 3340 } 3341 3342 list_splice_init(&m->data, &data); 3343 list_for_each_safe(links, next, &data) { 3344 struct ceph_msg_data *data; 3345 3346 data = list_entry(links, struct ceph_msg_data, links); 3347 list_del_init(links); 3348 ceph_msg_data_destroy(data); 3349 } 3350 m->data_length = 0; 3351 3352 if (m->pool) 3353 ceph_msgpool_put(m->pool, m); 3354 else 3355 ceph_msg_free(m); 3356 } 3357 3358 struct ceph_msg *ceph_msg_get(struct ceph_msg *msg) 3359 { 3360 dout("%s %p (was %d)\n", __func__, msg, 3361 atomic_read(&msg->kref.refcount)); 3362 kref_get(&msg->kref); 3363 return msg; 3364 } 3365 EXPORT_SYMBOL(ceph_msg_get); 3366 3367 void ceph_msg_put(struct ceph_msg *msg) 3368 { 3369 dout("%s %p (was %d)\n", __func__, msg, 3370 atomic_read(&msg->kref.refcount)); 3371 kref_put(&msg->kref, ceph_msg_release); 3372 } 3373 EXPORT_SYMBOL(ceph_msg_put); 3374 3375 void ceph_msg_dump(struct ceph_msg *msg) 3376 { 3377 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg, 3378 msg->front_alloc_len, msg->data_length); 3379 print_hex_dump(KERN_DEBUG, "header: ", 3380 DUMP_PREFIX_OFFSET, 16, 1, 3381 &msg->hdr, sizeof(msg->hdr), true); 3382 print_hex_dump(KERN_DEBUG, " front: ", 3383 DUMP_PREFIX_OFFSET, 16, 1, 3384 msg->front.iov_base, msg->front.iov_len, true); 3385 if (msg->middle) 3386 print_hex_dump(KERN_DEBUG, "middle: ", 3387 DUMP_PREFIX_OFFSET, 16, 1, 3388 msg->middle->vec.iov_base, 3389 msg->middle->vec.iov_len, true); 3390 print_hex_dump(KERN_DEBUG, "footer: ", 3391 DUMP_PREFIX_OFFSET, 16, 1, 3392 &msg->footer, sizeof(msg->footer), true); 3393 } 3394 EXPORT_SYMBOL(ceph_msg_dump); 3395