1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/crc32c.h> 4 #include <linux/ctype.h> 5 #include <linux/highmem.h> 6 #include <linux/inet.h> 7 #include <linux/kthread.h> 8 #include <linux/net.h> 9 #include <linux/slab.h> 10 #include <linux/socket.h> 11 #include <linux/string.h> 12 #ifdef CONFIG_BLOCK 13 #include <linux/bio.h> 14 #endif /* CONFIG_BLOCK */ 15 #include <linux/dns_resolver.h> 16 #include <net/tcp.h> 17 18 #include <linux/ceph/ceph_features.h> 19 #include <linux/ceph/libceph.h> 20 #include <linux/ceph/messenger.h> 21 #include <linux/ceph/decode.h> 22 #include <linux/ceph/pagelist.h> 23 #include <linux/export.h> 24 25 #define list_entry_next(pos, member) \ 26 list_entry(pos->member.next, typeof(*pos), member) 27 28 /* 29 * Ceph uses the messenger to exchange ceph_msg messages with other 30 * hosts in the system. The messenger provides ordered and reliable 31 * delivery. We tolerate TCP disconnects by reconnecting (with 32 * exponential backoff) in the case of a fault (disconnection, bad 33 * crc, protocol error). Acks allow sent messages to be discarded by 34 * the sender. 35 */ 36 37 /* 38 * We track the state of the socket on a given connection using 39 * values defined below. The transition to a new socket state is 40 * handled by a function which verifies we aren't coming from an 41 * unexpected state. 42 * 43 * -------- 44 * | NEW* | transient initial state 45 * -------- 46 * | con_sock_state_init() 47 * v 48 * ---------- 49 * | CLOSED | initialized, but no socket (and no 50 * ---------- TCP connection) 51 * ^ \ 52 * | \ con_sock_state_connecting() 53 * | ---------------------- 54 * | \ 55 * + con_sock_state_closed() \ 56 * |+--------------------------- \ 57 * | \ \ \ 58 * | ----------- \ \ 59 * | | CLOSING | socket event; \ \ 60 * | ----------- await close \ \ 61 * | ^ \ | 62 * | | \ | 63 * | + con_sock_state_closing() \ | 64 * | / \ | | 65 * | / --------------- | | 66 * | / \ v v 67 * | / -------------- 68 * | / -----------------| CONNECTING | socket created, TCP 69 * | | / -------------- connect initiated 70 * | | | con_sock_state_connected() 71 * | | v 72 * ------------- 73 * | CONNECTED | TCP connection established 74 * ------------- 75 * 76 * State values for ceph_connection->sock_state; NEW is assumed to be 0. 77 */ 78 79 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */ 80 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */ 81 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */ 82 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */ 83 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */ 84 85 /* 86 * connection states 87 */ 88 #define CON_STATE_CLOSED 1 /* -> PREOPEN */ 89 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */ 90 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */ 91 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */ 92 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */ 93 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */ 94 95 /* 96 * ceph_connection flag bits 97 */ 98 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop 99 * messages on errors */ 100 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */ 101 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */ 102 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */ 103 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */ 104 105 static bool con_flag_valid(unsigned long con_flag) 106 { 107 switch (con_flag) { 108 case CON_FLAG_LOSSYTX: 109 case CON_FLAG_KEEPALIVE_PENDING: 110 case CON_FLAG_WRITE_PENDING: 111 case CON_FLAG_SOCK_CLOSED: 112 case CON_FLAG_BACKOFF: 113 return true; 114 default: 115 return false; 116 } 117 } 118 119 static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag) 120 { 121 BUG_ON(!con_flag_valid(con_flag)); 122 123 clear_bit(con_flag, &con->flags); 124 } 125 126 static void con_flag_set(struct ceph_connection *con, unsigned long con_flag) 127 { 128 BUG_ON(!con_flag_valid(con_flag)); 129 130 set_bit(con_flag, &con->flags); 131 } 132 133 static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag) 134 { 135 BUG_ON(!con_flag_valid(con_flag)); 136 137 return test_bit(con_flag, &con->flags); 138 } 139 140 static bool con_flag_test_and_clear(struct ceph_connection *con, 141 unsigned long con_flag) 142 { 143 BUG_ON(!con_flag_valid(con_flag)); 144 145 return test_and_clear_bit(con_flag, &con->flags); 146 } 147 148 static bool con_flag_test_and_set(struct ceph_connection *con, 149 unsigned long con_flag) 150 { 151 BUG_ON(!con_flag_valid(con_flag)); 152 153 return test_and_set_bit(con_flag, &con->flags); 154 } 155 156 /* Slab caches for frequently-allocated structures */ 157 158 static struct kmem_cache *ceph_msg_cache; 159 static struct kmem_cache *ceph_msg_data_cache; 160 161 /* static tag bytes (protocol control messages) */ 162 static char tag_msg = CEPH_MSGR_TAG_MSG; 163 static char tag_ack = CEPH_MSGR_TAG_ACK; 164 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 165 166 #ifdef CONFIG_LOCKDEP 167 static struct lock_class_key socket_class; 168 #endif 169 170 /* 171 * When skipping (ignoring) a block of input we read it into a "skip 172 * buffer," which is this many bytes in size. 173 */ 174 #define SKIP_BUF_SIZE 1024 175 176 static void queue_con(struct ceph_connection *con); 177 static void cancel_con(struct ceph_connection *con); 178 static void con_work(struct work_struct *); 179 static void con_fault(struct ceph_connection *con); 180 181 /* 182 * Nicely render a sockaddr as a string. An array of formatted 183 * strings is used, to approximate reentrancy. 184 */ 185 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */ 186 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG) 187 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1) 188 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */ 189 190 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN]; 191 static atomic_t addr_str_seq = ATOMIC_INIT(0); 192 193 static struct page *zero_page; /* used in certain error cases */ 194 195 const char *ceph_pr_addr(const struct sockaddr_storage *ss) 196 { 197 int i; 198 char *s; 199 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 200 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 201 202 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK; 203 s = addr_str[i]; 204 205 switch (ss->ss_family) { 206 case AF_INET: 207 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr, 208 ntohs(in4->sin_port)); 209 break; 210 211 case AF_INET6: 212 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr, 213 ntohs(in6->sin6_port)); 214 break; 215 216 default: 217 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)", 218 ss->ss_family); 219 } 220 221 return s; 222 } 223 EXPORT_SYMBOL(ceph_pr_addr); 224 225 static void encode_my_addr(struct ceph_messenger *msgr) 226 { 227 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr)); 228 ceph_encode_addr(&msgr->my_enc_addr); 229 } 230 231 /* 232 * work queue for all reading and writing to/from the socket. 233 */ 234 static struct workqueue_struct *ceph_msgr_wq; 235 236 static int ceph_msgr_slab_init(void) 237 { 238 BUG_ON(ceph_msg_cache); 239 ceph_msg_cache = kmem_cache_create("ceph_msg", 240 sizeof (struct ceph_msg), 241 __alignof__(struct ceph_msg), 0, NULL); 242 243 if (!ceph_msg_cache) 244 return -ENOMEM; 245 246 BUG_ON(ceph_msg_data_cache); 247 ceph_msg_data_cache = kmem_cache_create("ceph_msg_data", 248 sizeof (struct ceph_msg_data), 249 __alignof__(struct ceph_msg_data), 250 0, NULL); 251 if (ceph_msg_data_cache) 252 return 0; 253 254 kmem_cache_destroy(ceph_msg_cache); 255 ceph_msg_cache = NULL; 256 257 return -ENOMEM; 258 } 259 260 static void ceph_msgr_slab_exit(void) 261 { 262 BUG_ON(!ceph_msg_data_cache); 263 kmem_cache_destroy(ceph_msg_data_cache); 264 ceph_msg_data_cache = NULL; 265 266 BUG_ON(!ceph_msg_cache); 267 kmem_cache_destroy(ceph_msg_cache); 268 ceph_msg_cache = NULL; 269 } 270 271 static void _ceph_msgr_exit(void) 272 { 273 if (ceph_msgr_wq) { 274 destroy_workqueue(ceph_msgr_wq); 275 ceph_msgr_wq = NULL; 276 } 277 278 ceph_msgr_slab_exit(); 279 280 BUG_ON(zero_page == NULL); 281 kunmap(zero_page); 282 page_cache_release(zero_page); 283 zero_page = NULL; 284 } 285 286 int ceph_msgr_init(void) 287 { 288 BUG_ON(zero_page != NULL); 289 zero_page = ZERO_PAGE(0); 290 page_cache_get(zero_page); 291 292 if (ceph_msgr_slab_init()) 293 return -ENOMEM; 294 295 /* 296 * The number of active work items is limited by the number of 297 * connections, so leave @max_active at default. 298 */ 299 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0); 300 if (ceph_msgr_wq) 301 return 0; 302 303 pr_err("msgr_init failed to create workqueue\n"); 304 _ceph_msgr_exit(); 305 306 return -ENOMEM; 307 } 308 EXPORT_SYMBOL(ceph_msgr_init); 309 310 void ceph_msgr_exit(void) 311 { 312 BUG_ON(ceph_msgr_wq == NULL); 313 314 _ceph_msgr_exit(); 315 } 316 EXPORT_SYMBOL(ceph_msgr_exit); 317 318 void ceph_msgr_flush(void) 319 { 320 flush_workqueue(ceph_msgr_wq); 321 } 322 EXPORT_SYMBOL(ceph_msgr_flush); 323 324 /* Connection socket state transition functions */ 325 326 static void con_sock_state_init(struct ceph_connection *con) 327 { 328 int old_state; 329 330 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 331 if (WARN_ON(old_state != CON_SOCK_STATE_NEW)) 332 printk("%s: unexpected old state %d\n", __func__, old_state); 333 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 334 CON_SOCK_STATE_CLOSED); 335 } 336 337 static void con_sock_state_connecting(struct ceph_connection *con) 338 { 339 int old_state; 340 341 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING); 342 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED)) 343 printk("%s: unexpected old state %d\n", __func__, old_state); 344 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 345 CON_SOCK_STATE_CONNECTING); 346 } 347 348 static void con_sock_state_connected(struct ceph_connection *con) 349 { 350 int old_state; 351 352 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED); 353 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING)) 354 printk("%s: unexpected old state %d\n", __func__, old_state); 355 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 356 CON_SOCK_STATE_CONNECTED); 357 } 358 359 static void con_sock_state_closing(struct ceph_connection *con) 360 { 361 int old_state; 362 363 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING); 364 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING && 365 old_state != CON_SOCK_STATE_CONNECTED && 366 old_state != CON_SOCK_STATE_CLOSING)) 367 printk("%s: unexpected old state %d\n", __func__, old_state); 368 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 369 CON_SOCK_STATE_CLOSING); 370 } 371 372 static void con_sock_state_closed(struct ceph_connection *con) 373 { 374 int old_state; 375 376 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 377 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED && 378 old_state != CON_SOCK_STATE_CLOSING && 379 old_state != CON_SOCK_STATE_CONNECTING && 380 old_state != CON_SOCK_STATE_CLOSED)) 381 printk("%s: unexpected old state %d\n", __func__, old_state); 382 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 383 CON_SOCK_STATE_CLOSED); 384 } 385 386 /* 387 * socket callback functions 388 */ 389 390 /* data available on socket, or listen socket received a connect */ 391 static void ceph_sock_data_ready(struct sock *sk) 392 { 393 struct ceph_connection *con = sk->sk_user_data; 394 if (atomic_read(&con->msgr->stopping)) { 395 return; 396 } 397 398 if (sk->sk_state != TCP_CLOSE_WAIT) { 399 dout("%s on %p state = %lu, queueing work\n", __func__, 400 con, con->state); 401 queue_con(con); 402 } 403 } 404 405 /* socket has buffer space for writing */ 406 static void ceph_sock_write_space(struct sock *sk) 407 { 408 struct ceph_connection *con = sk->sk_user_data; 409 410 /* only queue to workqueue if there is data we want to write, 411 * and there is sufficient space in the socket buffer to accept 412 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space() 413 * doesn't get called again until try_write() fills the socket 414 * buffer. See net/ipv4/tcp_input.c:tcp_check_space() 415 * and net/core/stream.c:sk_stream_write_space(). 416 */ 417 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) { 418 if (sk_stream_is_writeable(sk)) { 419 dout("%s %p queueing write work\n", __func__, con); 420 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 421 queue_con(con); 422 } 423 } else { 424 dout("%s %p nothing to write\n", __func__, con); 425 } 426 } 427 428 /* socket's state has changed */ 429 static void ceph_sock_state_change(struct sock *sk) 430 { 431 struct ceph_connection *con = sk->sk_user_data; 432 433 dout("%s %p state = %lu sk_state = %u\n", __func__, 434 con, con->state, sk->sk_state); 435 436 switch (sk->sk_state) { 437 case TCP_CLOSE: 438 dout("%s TCP_CLOSE\n", __func__); 439 case TCP_CLOSE_WAIT: 440 dout("%s TCP_CLOSE_WAIT\n", __func__); 441 con_sock_state_closing(con); 442 con_flag_set(con, CON_FLAG_SOCK_CLOSED); 443 queue_con(con); 444 break; 445 case TCP_ESTABLISHED: 446 dout("%s TCP_ESTABLISHED\n", __func__); 447 con_sock_state_connected(con); 448 queue_con(con); 449 break; 450 default: /* Everything else is uninteresting */ 451 break; 452 } 453 } 454 455 /* 456 * set up socket callbacks 457 */ 458 static void set_sock_callbacks(struct socket *sock, 459 struct ceph_connection *con) 460 { 461 struct sock *sk = sock->sk; 462 sk->sk_user_data = con; 463 sk->sk_data_ready = ceph_sock_data_ready; 464 sk->sk_write_space = ceph_sock_write_space; 465 sk->sk_state_change = ceph_sock_state_change; 466 } 467 468 469 /* 470 * socket helpers 471 */ 472 473 /* 474 * initiate connection to a remote socket. 475 */ 476 static int ceph_tcp_connect(struct ceph_connection *con) 477 { 478 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 479 struct socket *sock; 480 int ret; 481 482 BUG_ON(con->sock); 483 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM, 484 IPPROTO_TCP, &sock); 485 if (ret) 486 return ret; 487 sock->sk->sk_allocation = GFP_NOFS | __GFP_MEMALLOC; 488 489 #ifdef CONFIG_LOCKDEP 490 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 491 #endif 492 493 set_sock_callbacks(sock, con); 494 495 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); 496 497 con_sock_state_connecting(con); 498 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), 499 O_NONBLOCK); 500 if (ret == -EINPROGRESS) { 501 dout("connect %s EINPROGRESS sk_state = %u\n", 502 ceph_pr_addr(&con->peer_addr.in_addr), 503 sock->sk->sk_state); 504 } else if (ret < 0) { 505 pr_err("connect %s error %d\n", 506 ceph_pr_addr(&con->peer_addr.in_addr), ret); 507 sock_release(sock); 508 con->error_msg = "connect error"; 509 510 return ret; 511 } 512 513 if (con->msgr->tcp_nodelay) { 514 int optval = 1; 515 516 ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, 517 (char *)&optval, sizeof(optval)); 518 if (ret) 519 pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d", 520 ret); 521 } 522 523 sk_set_memalloc(sock->sk); 524 525 con->sock = sock; 526 return 0; 527 } 528 529 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) 530 { 531 struct kvec iov = {buf, len}; 532 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 533 int r; 534 535 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); 536 if (r == -EAGAIN) 537 r = 0; 538 return r; 539 } 540 541 static int ceph_tcp_recvpage(struct socket *sock, struct page *page, 542 int page_offset, size_t length) 543 { 544 void *kaddr; 545 int ret; 546 547 BUG_ON(page_offset + length > PAGE_SIZE); 548 549 kaddr = kmap(page); 550 BUG_ON(!kaddr); 551 ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length); 552 kunmap(page); 553 554 return ret; 555 } 556 557 /* 558 * write something. @more is true if caller will be sending more data 559 * shortly. 560 */ 561 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, 562 size_t kvlen, size_t len, int more) 563 { 564 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 565 int r; 566 567 if (more) 568 msg.msg_flags |= MSG_MORE; 569 else 570 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 571 572 r = kernel_sendmsg(sock, &msg, iov, kvlen, len); 573 if (r == -EAGAIN) 574 r = 0; 575 return r; 576 } 577 578 static int __ceph_tcp_sendpage(struct socket *sock, struct page *page, 579 int offset, size_t size, bool more) 580 { 581 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); 582 int ret; 583 584 ret = kernel_sendpage(sock, page, offset, size, flags); 585 if (ret == -EAGAIN) 586 ret = 0; 587 588 return ret; 589 } 590 591 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 592 int offset, size_t size, bool more) 593 { 594 int ret; 595 struct kvec iov; 596 597 /* sendpage cannot properly handle pages with page_count == 0, 598 * we need to fallback to sendmsg if that's the case */ 599 if (page_count(page) >= 1) 600 return __ceph_tcp_sendpage(sock, page, offset, size, more); 601 602 iov.iov_base = kmap(page) + offset; 603 iov.iov_len = size; 604 ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more); 605 kunmap(page); 606 607 return ret; 608 } 609 610 /* 611 * Shutdown/close the socket for the given connection. 612 */ 613 static int con_close_socket(struct ceph_connection *con) 614 { 615 int rc = 0; 616 617 dout("con_close_socket on %p sock %p\n", con, con->sock); 618 if (con->sock) { 619 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); 620 sock_release(con->sock); 621 con->sock = NULL; 622 } 623 624 /* 625 * Forcibly clear the SOCK_CLOSED flag. It gets set 626 * independent of the connection mutex, and we could have 627 * received a socket close event before we had the chance to 628 * shut the socket down. 629 */ 630 con_flag_clear(con, CON_FLAG_SOCK_CLOSED); 631 632 con_sock_state_closed(con); 633 return rc; 634 } 635 636 /* 637 * Reset a connection. Discard all incoming and outgoing messages 638 * and clear *_seq state. 639 */ 640 static void ceph_msg_remove(struct ceph_msg *msg) 641 { 642 list_del_init(&msg->list_head); 643 BUG_ON(msg->con == NULL); 644 msg->con->ops->put(msg->con); 645 msg->con = NULL; 646 647 ceph_msg_put(msg); 648 } 649 static void ceph_msg_remove_list(struct list_head *head) 650 { 651 while (!list_empty(head)) { 652 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, 653 list_head); 654 ceph_msg_remove(msg); 655 } 656 } 657 658 static void reset_connection(struct ceph_connection *con) 659 { 660 /* reset connection, out_queue, msg_ and connect_seq */ 661 /* discard existing out_queue and msg_seq */ 662 dout("reset_connection %p\n", con); 663 ceph_msg_remove_list(&con->out_queue); 664 ceph_msg_remove_list(&con->out_sent); 665 666 if (con->in_msg) { 667 BUG_ON(con->in_msg->con != con); 668 con->in_msg->con = NULL; 669 ceph_msg_put(con->in_msg); 670 con->in_msg = NULL; 671 con->ops->put(con); 672 } 673 674 con->connect_seq = 0; 675 con->out_seq = 0; 676 if (con->out_msg) { 677 ceph_msg_put(con->out_msg); 678 con->out_msg = NULL; 679 } 680 con->in_seq = 0; 681 con->in_seq_acked = 0; 682 } 683 684 /* 685 * mark a peer down. drop any open connections. 686 */ 687 void ceph_con_close(struct ceph_connection *con) 688 { 689 mutex_lock(&con->mutex); 690 dout("con_close %p peer %s\n", con, 691 ceph_pr_addr(&con->peer_addr.in_addr)); 692 con->state = CON_STATE_CLOSED; 693 694 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */ 695 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING); 696 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 697 con_flag_clear(con, CON_FLAG_BACKOFF); 698 699 reset_connection(con); 700 con->peer_global_seq = 0; 701 cancel_con(con); 702 con_close_socket(con); 703 mutex_unlock(&con->mutex); 704 } 705 EXPORT_SYMBOL(ceph_con_close); 706 707 /* 708 * Reopen a closed connection, with a new peer address. 709 */ 710 void ceph_con_open(struct ceph_connection *con, 711 __u8 entity_type, __u64 entity_num, 712 struct ceph_entity_addr *addr) 713 { 714 mutex_lock(&con->mutex); 715 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 716 717 WARN_ON(con->state != CON_STATE_CLOSED); 718 con->state = CON_STATE_PREOPEN; 719 720 con->peer_name.type = (__u8) entity_type; 721 con->peer_name.num = cpu_to_le64(entity_num); 722 723 memcpy(&con->peer_addr, addr, sizeof(*addr)); 724 con->delay = 0; /* reset backoff memory */ 725 mutex_unlock(&con->mutex); 726 queue_con(con); 727 } 728 EXPORT_SYMBOL(ceph_con_open); 729 730 /* 731 * return true if this connection ever successfully opened 732 */ 733 bool ceph_con_opened(struct ceph_connection *con) 734 { 735 return con->connect_seq > 0; 736 } 737 738 /* 739 * initialize a new connection. 740 */ 741 void ceph_con_init(struct ceph_connection *con, void *private, 742 const struct ceph_connection_operations *ops, 743 struct ceph_messenger *msgr) 744 { 745 dout("con_init %p\n", con); 746 memset(con, 0, sizeof(*con)); 747 con->private = private; 748 con->ops = ops; 749 con->msgr = msgr; 750 751 con_sock_state_init(con); 752 753 mutex_init(&con->mutex); 754 INIT_LIST_HEAD(&con->out_queue); 755 INIT_LIST_HEAD(&con->out_sent); 756 INIT_DELAYED_WORK(&con->work, con_work); 757 758 con->state = CON_STATE_CLOSED; 759 } 760 EXPORT_SYMBOL(ceph_con_init); 761 762 763 /* 764 * We maintain a global counter to order connection attempts. Get 765 * a unique seq greater than @gt. 766 */ 767 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) 768 { 769 u32 ret; 770 771 spin_lock(&msgr->global_seq_lock); 772 if (msgr->global_seq < gt) 773 msgr->global_seq = gt; 774 ret = ++msgr->global_seq; 775 spin_unlock(&msgr->global_seq_lock); 776 return ret; 777 } 778 779 static void con_out_kvec_reset(struct ceph_connection *con) 780 { 781 con->out_kvec_left = 0; 782 con->out_kvec_bytes = 0; 783 con->out_kvec_cur = &con->out_kvec[0]; 784 } 785 786 static void con_out_kvec_add(struct ceph_connection *con, 787 size_t size, void *data) 788 { 789 int index; 790 791 index = con->out_kvec_left; 792 BUG_ON(index >= ARRAY_SIZE(con->out_kvec)); 793 794 con->out_kvec[index].iov_len = size; 795 con->out_kvec[index].iov_base = data; 796 con->out_kvec_left++; 797 con->out_kvec_bytes += size; 798 } 799 800 #ifdef CONFIG_BLOCK 801 802 /* 803 * For a bio data item, a piece is whatever remains of the next 804 * entry in the current bio iovec, or the first entry in the next 805 * bio in the list. 806 */ 807 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor, 808 size_t length) 809 { 810 struct ceph_msg_data *data = cursor->data; 811 struct bio *bio; 812 813 BUG_ON(data->type != CEPH_MSG_DATA_BIO); 814 815 bio = data->bio; 816 BUG_ON(!bio); 817 818 cursor->resid = min(length, data->bio_length); 819 cursor->bio = bio; 820 cursor->bvec_iter = bio->bi_iter; 821 cursor->last_piece = 822 cursor->resid <= bio_iter_len(bio, cursor->bvec_iter); 823 } 824 825 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor, 826 size_t *page_offset, 827 size_t *length) 828 { 829 struct ceph_msg_data *data = cursor->data; 830 struct bio *bio; 831 struct bio_vec bio_vec; 832 833 BUG_ON(data->type != CEPH_MSG_DATA_BIO); 834 835 bio = cursor->bio; 836 BUG_ON(!bio); 837 838 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); 839 840 *page_offset = (size_t) bio_vec.bv_offset; 841 BUG_ON(*page_offset >= PAGE_SIZE); 842 if (cursor->last_piece) /* pagelist offset is always 0 */ 843 *length = cursor->resid; 844 else 845 *length = (size_t) bio_vec.bv_len; 846 BUG_ON(*length > cursor->resid); 847 BUG_ON(*page_offset + *length > PAGE_SIZE); 848 849 return bio_vec.bv_page; 850 } 851 852 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, 853 size_t bytes) 854 { 855 struct bio *bio; 856 struct bio_vec bio_vec; 857 858 BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO); 859 860 bio = cursor->bio; 861 BUG_ON(!bio); 862 863 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); 864 865 /* Advance the cursor offset */ 866 867 BUG_ON(cursor->resid < bytes); 868 cursor->resid -= bytes; 869 870 bio_advance_iter(bio, &cursor->bvec_iter, bytes); 871 872 if (bytes < bio_vec.bv_len) 873 return false; /* more bytes to process in this segment */ 874 875 /* Move on to the next segment, and possibly the next bio */ 876 877 if (!cursor->bvec_iter.bi_size) { 878 bio = bio->bi_next; 879 cursor->bio = bio; 880 if (bio) 881 cursor->bvec_iter = bio->bi_iter; 882 else 883 memset(&cursor->bvec_iter, 0, 884 sizeof(cursor->bvec_iter)); 885 } 886 887 if (!cursor->last_piece) { 888 BUG_ON(!cursor->resid); 889 BUG_ON(!bio); 890 /* A short read is OK, so use <= rather than == */ 891 if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter)) 892 cursor->last_piece = true; 893 } 894 895 return true; 896 } 897 #endif /* CONFIG_BLOCK */ 898 899 /* 900 * For a page array, a piece comes from the first page in the array 901 * that has not already been fully consumed. 902 */ 903 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor, 904 size_t length) 905 { 906 struct ceph_msg_data *data = cursor->data; 907 int page_count; 908 909 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 910 911 BUG_ON(!data->pages); 912 BUG_ON(!data->length); 913 914 cursor->resid = min(length, data->length); 915 page_count = calc_pages_for(data->alignment, (u64)data->length); 916 cursor->page_offset = data->alignment & ~PAGE_MASK; 917 cursor->page_index = 0; 918 BUG_ON(page_count > (int)USHRT_MAX); 919 cursor->page_count = (unsigned short)page_count; 920 BUG_ON(length > SIZE_MAX - cursor->page_offset); 921 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE; 922 } 923 924 static struct page * 925 ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor, 926 size_t *page_offset, size_t *length) 927 { 928 struct ceph_msg_data *data = cursor->data; 929 930 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 931 932 BUG_ON(cursor->page_index >= cursor->page_count); 933 BUG_ON(cursor->page_offset >= PAGE_SIZE); 934 935 *page_offset = cursor->page_offset; 936 if (cursor->last_piece) 937 *length = cursor->resid; 938 else 939 *length = PAGE_SIZE - *page_offset; 940 941 return data->pages[cursor->page_index]; 942 } 943 944 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor, 945 size_t bytes) 946 { 947 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES); 948 949 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE); 950 951 /* Advance the cursor page offset */ 952 953 cursor->resid -= bytes; 954 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK; 955 if (!bytes || cursor->page_offset) 956 return false; /* more bytes to process in the current page */ 957 958 if (!cursor->resid) 959 return false; /* no more data */ 960 961 /* Move on to the next page; offset is already at 0 */ 962 963 BUG_ON(cursor->page_index >= cursor->page_count); 964 cursor->page_index++; 965 cursor->last_piece = cursor->resid <= PAGE_SIZE; 966 967 return true; 968 } 969 970 /* 971 * For a pagelist, a piece is whatever remains to be consumed in the 972 * first page in the list, or the front of the next page. 973 */ 974 static void 975 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor, 976 size_t length) 977 { 978 struct ceph_msg_data *data = cursor->data; 979 struct ceph_pagelist *pagelist; 980 struct page *page; 981 982 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 983 984 pagelist = data->pagelist; 985 BUG_ON(!pagelist); 986 987 if (!length) 988 return; /* pagelist can be assigned but empty */ 989 990 BUG_ON(list_empty(&pagelist->head)); 991 page = list_first_entry(&pagelist->head, struct page, lru); 992 993 cursor->resid = min(length, pagelist->length); 994 cursor->page = page; 995 cursor->offset = 0; 996 cursor->last_piece = cursor->resid <= PAGE_SIZE; 997 } 998 999 static struct page * 1000 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor, 1001 size_t *page_offset, size_t *length) 1002 { 1003 struct ceph_msg_data *data = cursor->data; 1004 struct ceph_pagelist *pagelist; 1005 1006 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1007 1008 pagelist = data->pagelist; 1009 BUG_ON(!pagelist); 1010 1011 BUG_ON(!cursor->page); 1012 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 1013 1014 /* offset of first page in pagelist is always 0 */ 1015 *page_offset = cursor->offset & ~PAGE_MASK; 1016 if (cursor->last_piece) 1017 *length = cursor->resid; 1018 else 1019 *length = PAGE_SIZE - *page_offset; 1020 1021 return cursor->page; 1022 } 1023 1024 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor, 1025 size_t bytes) 1026 { 1027 struct ceph_msg_data *data = cursor->data; 1028 struct ceph_pagelist *pagelist; 1029 1030 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1031 1032 pagelist = data->pagelist; 1033 BUG_ON(!pagelist); 1034 1035 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 1036 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE); 1037 1038 /* Advance the cursor offset */ 1039 1040 cursor->resid -= bytes; 1041 cursor->offset += bytes; 1042 /* offset of first page in pagelist is always 0 */ 1043 if (!bytes || cursor->offset & ~PAGE_MASK) 1044 return false; /* more bytes to process in the current page */ 1045 1046 if (!cursor->resid) 1047 return false; /* no more data */ 1048 1049 /* Move on to the next page */ 1050 1051 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head)); 1052 cursor->page = list_entry_next(cursor->page, lru); 1053 cursor->last_piece = cursor->resid <= PAGE_SIZE; 1054 1055 return true; 1056 } 1057 1058 /* 1059 * Message data is handled (sent or received) in pieces, where each 1060 * piece resides on a single page. The network layer might not 1061 * consume an entire piece at once. A data item's cursor keeps 1062 * track of which piece is next to process and how much remains to 1063 * be processed in that piece. It also tracks whether the current 1064 * piece is the last one in the data item. 1065 */ 1066 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor) 1067 { 1068 size_t length = cursor->total_resid; 1069 1070 switch (cursor->data->type) { 1071 case CEPH_MSG_DATA_PAGELIST: 1072 ceph_msg_data_pagelist_cursor_init(cursor, length); 1073 break; 1074 case CEPH_MSG_DATA_PAGES: 1075 ceph_msg_data_pages_cursor_init(cursor, length); 1076 break; 1077 #ifdef CONFIG_BLOCK 1078 case CEPH_MSG_DATA_BIO: 1079 ceph_msg_data_bio_cursor_init(cursor, length); 1080 break; 1081 #endif /* CONFIG_BLOCK */ 1082 case CEPH_MSG_DATA_NONE: 1083 default: 1084 /* BUG(); */ 1085 break; 1086 } 1087 cursor->need_crc = true; 1088 } 1089 1090 static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) 1091 { 1092 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1093 struct ceph_msg_data *data; 1094 1095 BUG_ON(!length); 1096 BUG_ON(length > msg->data_length); 1097 BUG_ON(list_empty(&msg->data)); 1098 1099 cursor->data_head = &msg->data; 1100 cursor->total_resid = length; 1101 data = list_first_entry(&msg->data, struct ceph_msg_data, links); 1102 cursor->data = data; 1103 1104 __ceph_msg_data_cursor_init(cursor); 1105 } 1106 1107 /* 1108 * Return the page containing the next piece to process for a given 1109 * data item, and supply the page offset and length of that piece. 1110 * Indicate whether this is the last piece in this data item. 1111 */ 1112 static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, 1113 size_t *page_offset, size_t *length, 1114 bool *last_piece) 1115 { 1116 struct page *page; 1117 1118 switch (cursor->data->type) { 1119 case CEPH_MSG_DATA_PAGELIST: 1120 page = ceph_msg_data_pagelist_next(cursor, page_offset, length); 1121 break; 1122 case CEPH_MSG_DATA_PAGES: 1123 page = ceph_msg_data_pages_next(cursor, page_offset, length); 1124 break; 1125 #ifdef CONFIG_BLOCK 1126 case CEPH_MSG_DATA_BIO: 1127 page = ceph_msg_data_bio_next(cursor, page_offset, length); 1128 break; 1129 #endif /* CONFIG_BLOCK */ 1130 case CEPH_MSG_DATA_NONE: 1131 default: 1132 page = NULL; 1133 break; 1134 } 1135 BUG_ON(!page); 1136 BUG_ON(*page_offset + *length > PAGE_SIZE); 1137 BUG_ON(!*length); 1138 if (last_piece) 1139 *last_piece = cursor->last_piece; 1140 1141 return page; 1142 } 1143 1144 /* 1145 * Returns true if the result moves the cursor on to the next piece 1146 * of the data item. 1147 */ 1148 static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, 1149 size_t bytes) 1150 { 1151 bool new_piece; 1152 1153 BUG_ON(bytes > cursor->resid); 1154 switch (cursor->data->type) { 1155 case CEPH_MSG_DATA_PAGELIST: 1156 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes); 1157 break; 1158 case CEPH_MSG_DATA_PAGES: 1159 new_piece = ceph_msg_data_pages_advance(cursor, bytes); 1160 break; 1161 #ifdef CONFIG_BLOCK 1162 case CEPH_MSG_DATA_BIO: 1163 new_piece = ceph_msg_data_bio_advance(cursor, bytes); 1164 break; 1165 #endif /* CONFIG_BLOCK */ 1166 case CEPH_MSG_DATA_NONE: 1167 default: 1168 BUG(); 1169 break; 1170 } 1171 cursor->total_resid -= bytes; 1172 1173 if (!cursor->resid && cursor->total_resid) { 1174 WARN_ON(!cursor->last_piece); 1175 BUG_ON(list_is_last(&cursor->data->links, cursor->data_head)); 1176 cursor->data = list_entry_next(cursor->data, links); 1177 __ceph_msg_data_cursor_init(cursor); 1178 new_piece = true; 1179 } 1180 cursor->need_crc = new_piece; 1181 1182 return new_piece; 1183 } 1184 1185 static void prepare_message_data(struct ceph_msg *msg, u32 data_len) 1186 { 1187 BUG_ON(!msg); 1188 BUG_ON(!data_len); 1189 1190 /* Initialize data cursor */ 1191 1192 ceph_msg_data_cursor_init(msg, (size_t)data_len); 1193 } 1194 1195 /* 1196 * Prepare footer for currently outgoing message, and finish things 1197 * off. Assumes out_kvec* are already valid.. we just add on to the end. 1198 */ 1199 static void prepare_write_message_footer(struct ceph_connection *con) 1200 { 1201 struct ceph_msg *m = con->out_msg; 1202 int v = con->out_kvec_left; 1203 1204 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE; 1205 1206 dout("prepare_write_message_footer %p\n", con); 1207 con->out_kvec_is_msg = true; 1208 con->out_kvec[v].iov_base = &m->footer; 1209 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) { 1210 if (con->ops->sign_message) 1211 con->ops->sign_message(con, m); 1212 else 1213 m->footer.sig = 0; 1214 con->out_kvec[v].iov_len = sizeof(m->footer); 1215 con->out_kvec_bytes += sizeof(m->footer); 1216 } else { 1217 m->old_footer.flags = m->footer.flags; 1218 con->out_kvec[v].iov_len = sizeof(m->old_footer); 1219 con->out_kvec_bytes += sizeof(m->old_footer); 1220 } 1221 con->out_kvec_left++; 1222 con->out_more = m->more_to_follow; 1223 con->out_msg_done = true; 1224 } 1225 1226 /* 1227 * Prepare headers for the next outgoing message. 1228 */ 1229 static void prepare_write_message(struct ceph_connection *con) 1230 { 1231 struct ceph_msg *m; 1232 u32 crc; 1233 1234 con_out_kvec_reset(con); 1235 con->out_kvec_is_msg = true; 1236 con->out_msg_done = false; 1237 1238 /* Sneak an ack in there first? If we can get it into the same 1239 * TCP packet that's a good thing. */ 1240 if (con->in_seq > con->in_seq_acked) { 1241 con->in_seq_acked = con->in_seq; 1242 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1243 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1244 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1245 &con->out_temp_ack); 1246 } 1247 1248 BUG_ON(list_empty(&con->out_queue)); 1249 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); 1250 con->out_msg = m; 1251 BUG_ON(m->con != con); 1252 1253 /* put message on sent list */ 1254 ceph_msg_get(m); 1255 list_move_tail(&m->list_head, &con->out_sent); 1256 1257 /* 1258 * only assign outgoing seq # if we haven't sent this message 1259 * yet. if it is requeued, resend with it's original seq. 1260 */ 1261 if (m->needs_out_seq) { 1262 m->hdr.seq = cpu_to_le64(++con->out_seq); 1263 m->needs_out_seq = false; 1264 } 1265 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len)); 1266 1267 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n", 1268 m, con->out_seq, le16_to_cpu(m->hdr.type), 1269 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), 1270 m->data_length); 1271 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); 1272 1273 /* tag + hdr + front + middle */ 1274 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); 1275 con_out_kvec_add(con, sizeof (m->hdr), &m->hdr); 1276 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); 1277 1278 if (m->middle) 1279 con_out_kvec_add(con, m->middle->vec.iov_len, 1280 m->middle->vec.iov_base); 1281 1282 /* fill in crc (except data pages), footer */ 1283 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc)); 1284 con->out_msg->hdr.crc = cpu_to_le32(crc); 1285 con->out_msg->footer.flags = 0; 1286 1287 crc = crc32c(0, m->front.iov_base, m->front.iov_len); 1288 con->out_msg->footer.front_crc = cpu_to_le32(crc); 1289 if (m->middle) { 1290 crc = crc32c(0, m->middle->vec.iov_base, 1291 m->middle->vec.iov_len); 1292 con->out_msg->footer.middle_crc = cpu_to_le32(crc); 1293 } else 1294 con->out_msg->footer.middle_crc = 0; 1295 dout("%s front_crc %u middle_crc %u\n", __func__, 1296 le32_to_cpu(con->out_msg->footer.front_crc), 1297 le32_to_cpu(con->out_msg->footer.middle_crc)); 1298 1299 /* is there a data payload? */ 1300 con->out_msg->footer.data_crc = 0; 1301 if (m->data_length) { 1302 prepare_message_data(con->out_msg, m->data_length); 1303 con->out_more = 1; /* data + footer will follow */ 1304 } else { 1305 /* no, queue up footer too and be done */ 1306 prepare_write_message_footer(con); 1307 } 1308 1309 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1310 } 1311 1312 /* 1313 * Prepare an ack. 1314 */ 1315 static void prepare_write_ack(struct ceph_connection *con) 1316 { 1317 dout("prepare_write_ack %p %llu -> %llu\n", con, 1318 con->in_seq_acked, con->in_seq); 1319 con->in_seq_acked = con->in_seq; 1320 1321 con_out_kvec_reset(con); 1322 1323 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1324 1325 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1326 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1327 &con->out_temp_ack); 1328 1329 con->out_more = 1; /* more will follow.. eventually.. */ 1330 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1331 } 1332 1333 /* 1334 * Prepare to share the seq during handshake 1335 */ 1336 static void prepare_write_seq(struct ceph_connection *con) 1337 { 1338 dout("prepare_write_seq %p %llu -> %llu\n", con, 1339 con->in_seq_acked, con->in_seq); 1340 con->in_seq_acked = con->in_seq; 1341 1342 con_out_kvec_reset(con); 1343 1344 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1345 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1346 &con->out_temp_ack); 1347 1348 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1349 } 1350 1351 /* 1352 * Prepare to write keepalive byte. 1353 */ 1354 static void prepare_write_keepalive(struct ceph_connection *con) 1355 { 1356 dout("prepare_write_keepalive %p\n", con); 1357 con_out_kvec_reset(con); 1358 con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive); 1359 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1360 } 1361 1362 /* 1363 * Connection negotiation. 1364 */ 1365 1366 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con, 1367 int *auth_proto) 1368 { 1369 struct ceph_auth_handshake *auth; 1370 1371 if (!con->ops->get_authorizer) { 1372 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN; 1373 con->out_connect.authorizer_len = 0; 1374 return NULL; 1375 } 1376 1377 /* Can't hold the mutex while getting authorizer */ 1378 mutex_unlock(&con->mutex); 1379 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry); 1380 mutex_lock(&con->mutex); 1381 1382 if (IS_ERR(auth)) 1383 return auth; 1384 if (con->state != CON_STATE_NEGOTIATING) 1385 return ERR_PTR(-EAGAIN); 1386 1387 con->auth_reply_buf = auth->authorizer_reply_buf; 1388 con->auth_reply_buf_len = auth->authorizer_reply_buf_len; 1389 return auth; 1390 } 1391 1392 /* 1393 * We connected to a peer and are saying hello. 1394 */ 1395 static void prepare_write_banner(struct ceph_connection *con) 1396 { 1397 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER); 1398 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr), 1399 &con->msgr->my_enc_addr); 1400 1401 con->out_more = 0; 1402 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1403 } 1404 1405 static int prepare_write_connect(struct ceph_connection *con) 1406 { 1407 unsigned int global_seq = get_global_seq(con->msgr, 0); 1408 int proto; 1409 int auth_proto; 1410 struct ceph_auth_handshake *auth; 1411 1412 switch (con->peer_name.type) { 1413 case CEPH_ENTITY_TYPE_MON: 1414 proto = CEPH_MONC_PROTOCOL; 1415 break; 1416 case CEPH_ENTITY_TYPE_OSD: 1417 proto = CEPH_OSDC_PROTOCOL; 1418 break; 1419 case CEPH_ENTITY_TYPE_MDS: 1420 proto = CEPH_MDSC_PROTOCOL; 1421 break; 1422 default: 1423 BUG(); 1424 } 1425 1426 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, 1427 con->connect_seq, global_seq, proto); 1428 1429 con->out_connect.features = cpu_to_le64(con->msgr->supported_features); 1430 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); 1431 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); 1432 con->out_connect.global_seq = cpu_to_le32(global_seq); 1433 con->out_connect.protocol_version = cpu_to_le32(proto); 1434 con->out_connect.flags = 0; 1435 1436 auth_proto = CEPH_AUTH_UNKNOWN; 1437 auth = get_connect_authorizer(con, &auth_proto); 1438 if (IS_ERR(auth)) 1439 return PTR_ERR(auth); 1440 1441 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto); 1442 con->out_connect.authorizer_len = auth ? 1443 cpu_to_le32(auth->authorizer_buf_len) : 0; 1444 1445 con_out_kvec_add(con, sizeof (con->out_connect), 1446 &con->out_connect); 1447 if (auth && auth->authorizer_buf_len) 1448 con_out_kvec_add(con, auth->authorizer_buf_len, 1449 auth->authorizer_buf); 1450 1451 con->out_more = 0; 1452 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1453 1454 return 0; 1455 } 1456 1457 /* 1458 * write as much of pending kvecs to the socket as we can. 1459 * 1 -> done 1460 * 0 -> socket full, but more to do 1461 * <0 -> error 1462 */ 1463 static int write_partial_kvec(struct ceph_connection *con) 1464 { 1465 int ret; 1466 1467 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes); 1468 while (con->out_kvec_bytes > 0) { 1469 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur, 1470 con->out_kvec_left, con->out_kvec_bytes, 1471 con->out_more); 1472 if (ret <= 0) 1473 goto out; 1474 con->out_kvec_bytes -= ret; 1475 if (con->out_kvec_bytes == 0) 1476 break; /* done */ 1477 1478 /* account for full iov entries consumed */ 1479 while (ret >= con->out_kvec_cur->iov_len) { 1480 BUG_ON(!con->out_kvec_left); 1481 ret -= con->out_kvec_cur->iov_len; 1482 con->out_kvec_cur++; 1483 con->out_kvec_left--; 1484 } 1485 /* and for a partially-consumed entry */ 1486 if (ret) { 1487 con->out_kvec_cur->iov_len -= ret; 1488 con->out_kvec_cur->iov_base += ret; 1489 } 1490 } 1491 con->out_kvec_left = 0; 1492 con->out_kvec_is_msg = false; 1493 ret = 1; 1494 out: 1495 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con, 1496 con->out_kvec_bytes, con->out_kvec_left, ret); 1497 return ret; /* done! */ 1498 } 1499 1500 static u32 ceph_crc32c_page(u32 crc, struct page *page, 1501 unsigned int page_offset, 1502 unsigned int length) 1503 { 1504 char *kaddr; 1505 1506 kaddr = kmap(page); 1507 BUG_ON(kaddr == NULL); 1508 crc = crc32c(crc, kaddr + page_offset, length); 1509 kunmap(page); 1510 1511 return crc; 1512 } 1513 /* 1514 * Write as much message data payload as we can. If we finish, queue 1515 * up the footer. 1516 * 1 -> done, footer is now queued in out_kvec[]. 1517 * 0 -> socket full, but more to do 1518 * <0 -> error 1519 */ 1520 static int write_partial_message_data(struct ceph_connection *con) 1521 { 1522 struct ceph_msg *msg = con->out_msg; 1523 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1524 bool do_datacrc = !con->msgr->nocrc; 1525 u32 crc; 1526 1527 dout("%s %p msg %p\n", __func__, con, msg); 1528 1529 if (list_empty(&msg->data)) 1530 return -EINVAL; 1531 1532 /* 1533 * Iterate through each page that contains data to be 1534 * written, and send as much as possible for each. 1535 * 1536 * If we are calculating the data crc (the default), we will 1537 * need to map the page. If we have no pages, they have 1538 * been revoked, so use the zero page. 1539 */ 1540 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0; 1541 while (cursor->resid) { 1542 struct page *page; 1543 size_t page_offset; 1544 size_t length; 1545 bool last_piece; 1546 bool need_crc; 1547 int ret; 1548 1549 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, 1550 &last_piece); 1551 ret = ceph_tcp_sendpage(con->sock, page, page_offset, 1552 length, last_piece); 1553 if (ret <= 0) { 1554 if (do_datacrc) 1555 msg->footer.data_crc = cpu_to_le32(crc); 1556 1557 return ret; 1558 } 1559 if (do_datacrc && cursor->need_crc) 1560 crc = ceph_crc32c_page(crc, page, page_offset, length); 1561 need_crc = ceph_msg_data_advance(&msg->cursor, (size_t)ret); 1562 } 1563 1564 dout("%s %p msg %p done\n", __func__, con, msg); 1565 1566 /* prepare and queue up footer, too */ 1567 if (do_datacrc) 1568 msg->footer.data_crc = cpu_to_le32(crc); 1569 else 1570 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; 1571 con_out_kvec_reset(con); 1572 prepare_write_message_footer(con); 1573 1574 return 1; /* must return > 0 to indicate success */ 1575 } 1576 1577 /* 1578 * write some zeros 1579 */ 1580 static int write_partial_skip(struct ceph_connection *con) 1581 { 1582 int ret; 1583 1584 while (con->out_skip > 0) { 1585 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE); 1586 1587 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true); 1588 if (ret <= 0) 1589 goto out; 1590 con->out_skip -= ret; 1591 } 1592 ret = 1; 1593 out: 1594 return ret; 1595 } 1596 1597 /* 1598 * Prepare to read connection handshake, or an ack. 1599 */ 1600 static void prepare_read_banner(struct ceph_connection *con) 1601 { 1602 dout("prepare_read_banner %p\n", con); 1603 con->in_base_pos = 0; 1604 } 1605 1606 static void prepare_read_connect(struct ceph_connection *con) 1607 { 1608 dout("prepare_read_connect %p\n", con); 1609 con->in_base_pos = 0; 1610 } 1611 1612 static void prepare_read_ack(struct ceph_connection *con) 1613 { 1614 dout("prepare_read_ack %p\n", con); 1615 con->in_base_pos = 0; 1616 } 1617 1618 static void prepare_read_seq(struct ceph_connection *con) 1619 { 1620 dout("prepare_read_seq %p\n", con); 1621 con->in_base_pos = 0; 1622 con->in_tag = CEPH_MSGR_TAG_SEQ; 1623 } 1624 1625 static void prepare_read_tag(struct ceph_connection *con) 1626 { 1627 dout("prepare_read_tag %p\n", con); 1628 con->in_base_pos = 0; 1629 con->in_tag = CEPH_MSGR_TAG_READY; 1630 } 1631 1632 /* 1633 * Prepare to read a message. 1634 */ 1635 static int prepare_read_message(struct ceph_connection *con) 1636 { 1637 dout("prepare_read_message %p\n", con); 1638 BUG_ON(con->in_msg != NULL); 1639 con->in_base_pos = 0; 1640 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0; 1641 return 0; 1642 } 1643 1644 1645 static int read_partial(struct ceph_connection *con, 1646 int end, int size, void *object) 1647 { 1648 while (con->in_base_pos < end) { 1649 int left = end - con->in_base_pos; 1650 int have = size - left; 1651 int ret = ceph_tcp_recvmsg(con->sock, object + have, left); 1652 if (ret <= 0) 1653 return ret; 1654 con->in_base_pos += ret; 1655 } 1656 return 1; 1657 } 1658 1659 1660 /* 1661 * Read all or part of the connect-side handshake on a new connection 1662 */ 1663 static int read_partial_banner(struct ceph_connection *con) 1664 { 1665 int size; 1666 int end; 1667 int ret; 1668 1669 dout("read_partial_banner %p at %d\n", con, con->in_base_pos); 1670 1671 /* peer's banner */ 1672 size = strlen(CEPH_BANNER); 1673 end = size; 1674 ret = read_partial(con, end, size, con->in_banner); 1675 if (ret <= 0) 1676 goto out; 1677 1678 size = sizeof (con->actual_peer_addr); 1679 end += size; 1680 ret = read_partial(con, end, size, &con->actual_peer_addr); 1681 if (ret <= 0) 1682 goto out; 1683 1684 size = sizeof (con->peer_addr_for_me); 1685 end += size; 1686 ret = read_partial(con, end, size, &con->peer_addr_for_me); 1687 if (ret <= 0) 1688 goto out; 1689 1690 out: 1691 return ret; 1692 } 1693 1694 static int read_partial_connect(struct ceph_connection *con) 1695 { 1696 int size; 1697 int end; 1698 int ret; 1699 1700 dout("read_partial_connect %p at %d\n", con, con->in_base_pos); 1701 1702 size = sizeof (con->in_reply); 1703 end = size; 1704 ret = read_partial(con, end, size, &con->in_reply); 1705 if (ret <= 0) 1706 goto out; 1707 1708 size = le32_to_cpu(con->in_reply.authorizer_len); 1709 end += size; 1710 ret = read_partial(con, end, size, con->auth_reply_buf); 1711 if (ret <= 0) 1712 goto out; 1713 1714 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n", 1715 con, (int)con->in_reply.tag, 1716 le32_to_cpu(con->in_reply.connect_seq), 1717 le32_to_cpu(con->in_reply.global_seq)); 1718 out: 1719 return ret; 1720 1721 } 1722 1723 /* 1724 * Verify the hello banner looks okay. 1725 */ 1726 static int verify_hello(struct ceph_connection *con) 1727 { 1728 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { 1729 pr_err("connect to %s got bad banner\n", 1730 ceph_pr_addr(&con->peer_addr.in_addr)); 1731 con->error_msg = "protocol error, bad banner"; 1732 return -1; 1733 } 1734 return 0; 1735 } 1736 1737 static bool addr_is_blank(struct sockaddr_storage *ss) 1738 { 1739 switch (ss->ss_family) { 1740 case AF_INET: 1741 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0; 1742 case AF_INET6: 1743 return 1744 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 && 1745 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 && 1746 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 && 1747 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0; 1748 } 1749 return false; 1750 } 1751 1752 static int addr_port(struct sockaddr_storage *ss) 1753 { 1754 switch (ss->ss_family) { 1755 case AF_INET: 1756 return ntohs(((struct sockaddr_in *)ss)->sin_port); 1757 case AF_INET6: 1758 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port); 1759 } 1760 return 0; 1761 } 1762 1763 static void addr_set_port(struct sockaddr_storage *ss, int p) 1764 { 1765 switch (ss->ss_family) { 1766 case AF_INET: 1767 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1768 break; 1769 case AF_INET6: 1770 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1771 break; 1772 } 1773 } 1774 1775 /* 1776 * Unlike other *_pton function semantics, zero indicates success. 1777 */ 1778 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss, 1779 char delim, const char **ipend) 1780 { 1781 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 1782 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 1783 1784 memset(ss, 0, sizeof(*ss)); 1785 1786 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) { 1787 ss->ss_family = AF_INET; 1788 return 0; 1789 } 1790 1791 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) { 1792 ss->ss_family = AF_INET6; 1793 return 0; 1794 } 1795 1796 return -EINVAL; 1797 } 1798 1799 /* 1800 * Extract hostname string and resolve using kernel DNS facility. 1801 */ 1802 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER 1803 static int ceph_dns_resolve_name(const char *name, size_t namelen, 1804 struct sockaddr_storage *ss, char delim, const char **ipend) 1805 { 1806 const char *end, *delim_p; 1807 char *colon_p, *ip_addr = NULL; 1808 int ip_len, ret; 1809 1810 /* 1811 * The end of the hostname occurs immediately preceding the delimiter or 1812 * the port marker (':') where the delimiter takes precedence. 1813 */ 1814 delim_p = memchr(name, delim, namelen); 1815 colon_p = memchr(name, ':', namelen); 1816 1817 if (delim_p && colon_p) 1818 end = delim_p < colon_p ? delim_p : colon_p; 1819 else if (!delim_p && colon_p) 1820 end = colon_p; 1821 else { 1822 end = delim_p; 1823 if (!end) /* case: hostname:/ */ 1824 end = name + namelen; 1825 } 1826 1827 if (end <= name) 1828 return -EINVAL; 1829 1830 /* do dns_resolve upcall */ 1831 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL); 1832 if (ip_len > 0) 1833 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL); 1834 else 1835 ret = -ESRCH; 1836 1837 kfree(ip_addr); 1838 1839 *ipend = end; 1840 1841 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name, 1842 ret, ret ? "failed" : ceph_pr_addr(ss)); 1843 1844 return ret; 1845 } 1846 #else 1847 static inline int ceph_dns_resolve_name(const char *name, size_t namelen, 1848 struct sockaddr_storage *ss, char delim, const char **ipend) 1849 { 1850 return -EINVAL; 1851 } 1852 #endif 1853 1854 /* 1855 * Parse a server name (IP or hostname). If a valid IP address is not found 1856 * then try to extract a hostname to resolve using userspace DNS upcall. 1857 */ 1858 static int ceph_parse_server_name(const char *name, size_t namelen, 1859 struct sockaddr_storage *ss, char delim, const char **ipend) 1860 { 1861 int ret; 1862 1863 ret = ceph_pton(name, namelen, ss, delim, ipend); 1864 if (ret) 1865 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend); 1866 1867 return ret; 1868 } 1869 1870 /* 1871 * Parse an ip[:port] list into an addr array. Use the default 1872 * monitor port if a port isn't specified. 1873 */ 1874 int ceph_parse_ips(const char *c, const char *end, 1875 struct ceph_entity_addr *addr, 1876 int max_count, int *count) 1877 { 1878 int i, ret = -EINVAL; 1879 const char *p = c; 1880 1881 dout("parse_ips on '%.*s'\n", (int)(end-c), c); 1882 for (i = 0; i < max_count; i++) { 1883 const char *ipend; 1884 struct sockaddr_storage *ss = &addr[i].in_addr; 1885 int port; 1886 char delim = ','; 1887 1888 if (*p == '[') { 1889 delim = ']'; 1890 p++; 1891 } 1892 1893 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend); 1894 if (ret) 1895 goto bad; 1896 ret = -EINVAL; 1897 1898 p = ipend; 1899 1900 if (delim == ']') { 1901 if (*p != ']') { 1902 dout("missing matching ']'\n"); 1903 goto bad; 1904 } 1905 p++; 1906 } 1907 1908 /* port? */ 1909 if (p < end && *p == ':') { 1910 port = 0; 1911 p++; 1912 while (p < end && *p >= '0' && *p <= '9') { 1913 port = (port * 10) + (*p - '0'); 1914 p++; 1915 } 1916 if (port == 0) 1917 port = CEPH_MON_PORT; 1918 else if (port > 65535) 1919 goto bad; 1920 } else { 1921 port = CEPH_MON_PORT; 1922 } 1923 1924 addr_set_port(ss, port); 1925 1926 dout("parse_ips got %s\n", ceph_pr_addr(ss)); 1927 1928 if (p == end) 1929 break; 1930 if (*p != ',') 1931 goto bad; 1932 p++; 1933 } 1934 1935 if (p != end) 1936 goto bad; 1937 1938 if (count) 1939 *count = i + 1; 1940 return 0; 1941 1942 bad: 1943 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 1944 return ret; 1945 } 1946 EXPORT_SYMBOL(ceph_parse_ips); 1947 1948 static int process_banner(struct ceph_connection *con) 1949 { 1950 dout("process_banner on %p\n", con); 1951 1952 if (verify_hello(con) < 0) 1953 return -1; 1954 1955 ceph_decode_addr(&con->actual_peer_addr); 1956 ceph_decode_addr(&con->peer_addr_for_me); 1957 1958 /* 1959 * Make sure the other end is who we wanted. note that the other 1960 * end may not yet know their ip address, so if it's 0.0.0.0, give 1961 * them the benefit of the doubt. 1962 */ 1963 if (memcmp(&con->peer_addr, &con->actual_peer_addr, 1964 sizeof(con->peer_addr)) != 0 && 1965 !(addr_is_blank(&con->actual_peer_addr.in_addr) && 1966 con->actual_peer_addr.nonce == con->peer_addr.nonce)) { 1967 pr_warn("wrong peer, want %s/%d, got %s/%d\n", 1968 ceph_pr_addr(&con->peer_addr.in_addr), 1969 (int)le32_to_cpu(con->peer_addr.nonce), 1970 ceph_pr_addr(&con->actual_peer_addr.in_addr), 1971 (int)le32_to_cpu(con->actual_peer_addr.nonce)); 1972 con->error_msg = "wrong peer at address"; 1973 return -1; 1974 } 1975 1976 /* 1977 * did we learn our address? 1978 */ 1979 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) { 1980 int port = addr_port(&con->msgr->inst.addr.in_addr); 1981 1982 memcpy(&con->msgr->inst.addr.in_addr, 1983 &con->peer_addr_for_me.in_addr, 1984 sizeof(con->peer_addr_for_me.in_addr)); 1985 addr_set_port(&con->msgr->inst.addr.in_addr, port); 1986 encode_my_addr(con->msgr); 1987 dout("process_banner learned my addr is %s\n", 1988 ceph_pr_addr(&con->msgr->inst.addr.in_addr)); 1989 } 1990 1991 return 0; 1992 } 1993 1994 static int process_connect(struct ceph_connection *con) 1995 { 1996 u64 sup_feat = con->msgr->supported_features; 1997 u64 req_feat = con->msgr->required_features; 1998 u64 server_feat = ceph_sanitize_features( 1999 le64_to_cpu(con->in_reply.features)); 2000 int ret; 2001 2002 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 2003 2004 switch (con->in_reply.tag) { 2005 case CEPH_MSGR_TAG_FEATURES: 2006 pr_err("%s%lld %s feature set mismatch," 2007 " my %llx < server's %llx, missing %llx\n", 2008 ENTITY_NAME(con->peer_name), 2009 ceph_pr_addr(&con->peer_addr.in_addr), 2010 sup_feat, server_feat, server_feat & ~sup_feat); 2011 con->error_msg = "missing required protocol features"; 2012 reset_connection(con); 2013 return -1; 2014 2015 case CEPH_MSGR_TAG_BADPROTOVER: 2016 pr_err("%s%lld %s protocol version mismatch," 2017 " my %d != server's %d\n", 2018 ENTITY_NAME(con->peer_name), 2019 ceph_pr_addr(&con->peer_addr.in_addr), 2020 le32_to_cpu(con->out_connect.protocol_version), 2021 le32_to_cpu(con->in_reply.protocol_version)); 2022 con->error_msg = "protocol version mismatch"; 2023 reset_connection(con); 2024 return -1; 2025 2026 case CEPH_MSGR_TAG_BADAUTHORIZER: 2027 con->auth_retry++; 2028 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con, 2029 con->auth_retry); 2030 if (con->auth_retry == 2) { 2031 con->error_msg = "connect authorization failure"; 2032 return -1; 2033 } 2034 con_out_kvec_reset(con); 2035 ret = prepare_write_connect(con); 2036 if (ret < 0) 2037 return ret; 2038 prepare_read_connect(con); 2039 break; 2040 2041 case CEPH_MSGR_TAG_RESETSESSION: 2042 /* 2043 * If we connected with a large connect_seq but the peer 2044 * has no record of a session with us (no connection, or 2045 * connect_seq == 0), they will send RESETSESION to indicate 2046 * that they must have reset their session, and may have 2047 * dropped messages. 2048 */ 2049 dout("process_connect got RESET peer seq %u\n", 2050 le32_to_cpu(con->in_reply.connect_seq)); 2051 pr_err("%s%lld %s connection reset\n", 2052 ENTITY_NAME(con->peer_name), 2053 ceph_pr_addr(&con->peer_addr.in_addr)); 2054 reset_connection(con); 2055 con_out_kvec_reset(con); 2056 ret = prepare_write_connect(con); 2057 if (ret < 0) 2058 return ret; 2059 prepare_read_connect(con); 2060 2061 /* Tell ceph about it. */ 2062 mutex_unlock(&con->mutex); 2063 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name)); 2064 if (con->ops->peer_reset) 2065 con->ops->peer_reset(con); 2066 mutex_lock(&con->mutex); 2067 if (con->state != CON_STATE_NEGOTIATING) 2068 return -EAGAIN; 2069 break; 2070 2071 case CEPH_MSGR_TAG_RETRY_SESSION: 2072 /* 2073 * If we sent a smaller connect_seq than the peer has, try 2074 * again with a larger value. 2075 */ 2076 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n", 2077 le32_to_cpu(con->out_connect.connect_seq), 2078 le32_to_cpu(con->in_reply.connect_seq)); 2079 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); 2080 con_out_kvec_reset(con); 2081 ret = prepare_write_connect(con); 2082 if (ret < 0) 2083 return ret; 2084 prepare_read_connect(con); 2085 break; 2086 2087 case CEPH_MSGR_TAG_RETRY_GLOBAL: 2088 /* 2089 * If we sent a smaller global_seq than the peer has, try 2090 * again with a larger value. 2091 */ 2092 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", 2093 con->peer_global_seq, 2094 le32_to_cpu(con->in_reply.global_seq)); 2095 get_global_seq(con->msgr, 2096 le32_to_cpu(con->in_reply.global_seq)); 2097 con_out_kvec_reset(con); 2098 ret = prepare_write_connect(con); 2099 if (ret < 0) 2100 return ret; 2101 prepare_read_connect(con); 2102 break; 2103 2104 case CEPH_MSGR_TAG_SEQ: 2105 case CEPH_MSGR_TAG_READY: 2106 if (req_feat & ~server_feat) { 2107 pr_err("%s%lld %s protocol feature mismatch," 2108 " my required %llx > server's %llx, need %llx\n", 2109 ENTITY_NAME(con->peer_name), 2110 ceph_pr_addr(&con->peer_addr.in_addr), 2111 req_feat, server_feat, req_feat & ~server_feat); 2112 con->error_msg = "missing required protocol features"; 2113 reset_connection(con); 2114 return -1; 2115 } 2116 2117 WARN_ON(con->state != CON_STATE_NEGOTIATING); 2118 con->state = CON_STATE_OPEN; 2119 con->auth_retry = 0; /* we authenticated; clear flag */ 2120 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 2121 con->connect_seq++; 2122 con->peer_features = server_feat; 2123 dout("process_connect got READY gseq %d cseq %d (%d)\n", 2124 con->peer_global_seq, 2125 le32_to_cpu(con->in_reply.connect_seq), 2126 con->connect_seq); 2127 WARN_ON(con->connect_seq != 2128 le32_to_cpu(con->in_reply.connect_seq)); 2129 2130 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) 2131 con_flag_set(con, CON_FLAG_LOSSYTX); 2132 2133 con->delay = 0; /* reset backoff memory */ 2134 2135 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) { 2136 prepare_write_seq(con); 2137 prepare_read_seq(con); 2138 } else { 2139 prepare_read_tag(con); 2140 } 2141 break; 2142 2143 case CEPH_MSGR_TAG_WAIT: 2144 /* 2145 * If there is a connection race (we are opening 2146 * connections to each other), one of us may just have 2147 * to WAIT. This shouldn't happen if we are the 2148 * client. 2149 */ 2150 pr_err("process_connect got WAIT as client\n"); 2151 con->error_msg = "protocol error, got WAIT as client"; 2152 return -1; 2153 2154 default: 2155 pr_err("connect protocol error, will retry\n"); 2156 con->error_msg = "protocol error, garbage tag during connect"; 2157 return -1; 2158 } 2159 return 0; 2160 } 2161 2162 2163 /* 2164 * read (part of) an ack 2165 */ 2166 static int read_partial_ack(struct ceph_connection *con) 2167 { 2168 int size = sizeof (con->in_temp_ack); 2169 int end = size; 2170 2171 return read_partial(con, end, size, &con->in_temp_ack); 2172 } 2173 2174 /* 2175 * We can finally discard anything that's been acked. 2176 */ 2177 static void process_ack(struct ceph_connection *con) 2178 { 2179 struct ceph_msg *m; 2180 u64 ack = le64_to_cpu(con->in_temp_ack); 2181 u64 seq; 2182 2183 while (!list_empty(&con->out_sent)) { 2184 m = list_first_entry(&con->out_sent, struct ceph_msg, 2185 list_head); 2186 seq = le64_to_cpu(m->hdr.seq); 2187 if (seq > ack) 2188 break; 2189 dout("got ack for seq %llu type %d at %p\n", seq, 2190 le16_to_cpu(m->hdr.type), m); 2191 m->ack_stamp = jiffies; 2192 ceph_msg_remove(m); 2193 } 2194 prepare_read_tag(con); 2195 } 2196 2197 2198 static int read_partial_message_section(struct ceph_connection *con, 2199 struct kvec *section, 2200 unsigned int sec_len, u32 *crc) 2201 { 2202 int ret, left; 2203 2204 BUG_ON(!section); 2205 2206 while (section->iov_len < sec_len) { 2207 BUG_ON(section->iov_base == NULL); 2208 left = sec_len - section->iov_len; 2209 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base + 2210 section->iov_len, left); 2211 if (ret <= 0) 2212 return ret; 2213 section->iov_len += ret; 2214 } 2215 if (section->iov_len == sec_len) 2216 *crc = crc32c(0, section->iov_base, section->iov_len); 2217 2218 return 1; 2219 } 2220 2221 static int read_partial_msg_data(struct ceph_connection *con) 2222 { 2223 struct ceph_msg *msg = con->in_msg; 2224 struct ceph_msg_data_cursor *cursor = &msg->cursor; 2225 const bool do_datacrc = !con->msgr->nocrc; 2226 struct page *page; 2227 size_t page_offset; 2228 size_t length; 2229 u32 crc = 0; 2230 int ret; 2231 2232 BUG_ON(!msg); 2233 if (list_empty(&msg->data)) 2234 return -EIO; 2235 2236 if (do_datacrc) 2237 crc = con->in_data_crc; 2238 while (cursor->resid) { 2239 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, 2240 NULL); 2241 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); 2242 if (ret <= 0) { 2243 if (do_datacrc) 2244 con->in_data_crc = crc; 2245 2246 return ret; 2247 } 2248 2249 if (do_datacrc) 2250 crc = ceph_crc32c_page(crc, page, page_offset, ret); 2251 (void) ceph_msg_data_advance(&msg->cursor, (size_t)ret); 2252 } 2253 if (do_datacrc) 2254 con->in_data_crc = crc; 2255 2256 return 1; /* must return > 0 to indicate success */ 2257 } 2258 2259 /* 2260 * read (part of) a message. 2261 */ 2262 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); 2263 2264 static int read_partial_message(struct ceph_connection *con) 2265 { 2266 struct ceph_msg *m = con->in_msg; 2267 int size; 2268 int end; 2269 int ret; 2270 unsigned int front_len, middle_len, data_len; 2271 bool do_datacrc = !con->msgr->nocrc; 2272 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH); 2273 u64 seq; 2274 u32 crc; 2275 2276 dout("read_partial_message con %p msg %p\n", con, m); 2277 2278 /* header */ 2279 size = sizeof (con->in_hdr); 2280 end = size; 2281 ret = read_partial(con, end, size, &con->in_hdr); 2282 if (ret <= 0) 2283 return ret; 2284 2285 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc)); 2286 if (cpu_to_le32(crc) != con->in_hdr.crc) { 2287 pr_err("read_partial_message bad hdr " 2288 " crc %u != expected %u\n", 2289 crc, con->in_hdr.crc); 2290 return -EBADMSG; 2291 } 2292 2293 front_len = le32_to_cpu(con->in_hdr.front_len); 2294 if (front_len > CEPH_MSG_MAX_FRONT_LEN) 2295 return -EIO; 2296 middle_len = le32_to_cpu(con->in_hdr.middle_len); 2297 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN) 2298 return -EIO; 2299 data_len = le32_to_cpu(con->in_hdr.data_len); 2300 if (data_len > CEPH_MSG_MAX_DATA_LEN) 2301 return -EIO; 2302 2303 /* verify seq# */ 2304 seq = le64_to_cpu(con->in_hdr.seq); 2305 if ((s64)seq - (s64)con->in_seq < 1) { 2306 pr_info("skipping %s%lld %s seq %lld expected %lld\n", 2307 ENTITY_NAME(con->peer_name), 2308 ceph_pr_addr(&con->peer_addr.in_addr), 2309 seq, con->in_seq + 1); 2310 con->in_base_pos = -front_len - middle_len - data_len - 2311 sizeof(m->footer); 2312 con->in_tag = CEPH_MSGR_TAG_READY; 2313 return 0; 2314 } else if ((s64)seq - (s64)con->in_seq > 1) { 2315 pr_err("read_partial_message bad seq %lld expected %lld\n", 2316 seq, con->in_seq + 1); 2317 con->error_msg = "bad message sequence # for incoming message"; 2318 return -EBADMSG; 2319 } 2320 2321 /* allocate message? */ 2322 if (!con->in_msg) { 2323 int skip = 0; 2324 2325 dout("got hdr type %d front %d data %d\n", con->in_hdr.type, 2326 front_len, data_len); 2327 ret = ceph_con_in_msg_alloc(con, &skip); 2328 if (ret < 0) 2329 return ret; 2330 2331 BUG_ON(!con->in_msg ^ skip); 2332 if (con->in_msg && data_len > con->in_msg->data_length) { 2333 pr_warn("%s skipping long message (%u > %zd)\n", 2334 __func__, data_len, con->in_msg->data_length); 2335 ceph_msg_put(con->in_msg); 2336 con->in_msg = NULL; 2337 skip = 1; 2338 } 2339 if (skip) { 2340 /* skip this message */ 2341 dout("alloc_msg said skip message\n"); 2342 con->in_base_pos = -front_len - middle_len - data_len - 2343 sizeof(m->footer); 2344 con->in_tag = CEPH_MSGR_TAG_READY; 2345 con->in_seq++; 2346 return 0; 2347 } 2348 2349 BUG_ON(!con->in_msg); 2350 BUG_ON(con->in_msg->con != con); 2351 m = con->in_msg; 2352 m->front.iov_len = 0; /* haven't read it yet */ 2353 if (m->middle) 2354 m->middle->vec.iov_len = 0; 2355 2356 /* prepare for data payload, if any */ 2357 2358 if (data_len) 2359 prepare_message_data(con->in_msg, data_len); 2360 } 2361 2362 /* front */ 2363 ret = read_partial_message_section(con, &m->front, front_len, 2364 &con->in_front_crc); 2365 if (ret <= 0) 2366 return ret; 2367 2368 /* middle */ 2369 if (m->middle) { 2370 ret = read_partial_message_section(con, &m->middle->vec, 2371 middle_len, 2372 &con->in_middle_crc); 2373 if (ret <= 0) 2374 return ret; 2375 } 2376 2377 /* (page) data */ 2378 if (data_len) { 2379 ret = read_partial_msg_data(con); 2380 if (ret <= 0) 2381 return ret; 2382 } 2383 2384 /* footer */ 2385 if (need_sign) 2386 size = sizeof(m->footer); 2387 else 2388 size = sizeof(m->old_footer); 2389 2390 end += size; 2391 ret = read_partial(con, end, size, &m->footer); 2392 if (ret <= 0) 2393 return ret; 2394 2395 if (!need_sign) { 2396 m->footer.flags = m->old_footer.flags; 2397 m->footer.sig = 0; 2398 } 2399 2400 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", 2401 m, front_len, m->footer.front_crc, middle_len, 2402 m->footer.middle_crc, data_len, m->footer.data_crc); 2403 2404 /* crc ok? */ 2405 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) { 2406 pr_err("read_partial_message %p front crc %u != exp. %u\n", 2407 m, con->in_front_crc, m->footer.front_crc); 2408 return -EBADMSG; 2409 } 2410 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) { 2411 pr_err("read_partial_message %p middle crc %u != exp %u\n", 2412 m, con->in_middle_crc, m->footer.middle_crc); 2413 return -EBADMSG; 2414 } 2415 if (do_datacrc && 2416 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 && 2417 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) { 2418 pr_err("read_partial_message %p data crc %u != exp. %u\n", m, 2419 con->in_data_crc, le32_to_cpu(m->footer.data_crc)); 2420 return -EBADMSG; 2421 } 2422 2423 if (need_sign && con->ops->check_message_signature && 2424 con->ops->check_message_signature(con, m)) { 2425 pr_err("read_partial_message %p signature check failed\n", m); 2426 return -EBADMSG; 2427 } 2428 2429 return 1; /* done! */ 2430 } 2431 2432 /* 2433 * Process message. This happens in the worker thread. The callback should 2434 * be careful not to do anything that waits on other incoming messages or it 2435 * may deadlock. 2436 */ 2437 static void process_message(struct ceph_connection *con) 2438 { 2439 struct ceph_msg *msg; 2440 2441 BUG_ON(con->in_msg->con != con); 2442 con->in_msg->con = NULL; 2443 msg = con->in_msg; 2444 con->in_msg = NULL; 2445 con->ops->put(con); 2446 2447 /* if first message, set peer_name */ 2448 if (con->peer_name.type == 0) 2449 con->peer_name = msg->hdr.src; 2450 2451 con->in_seq++; 2452 mutex_unlock(&con->mutex); 2453 2454 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n", 2455 msg, le64_to_cpu(msg->hdr.seq), 2456 ENTITY_NAME(msg->hdr.src), 2457 le16_to_cpu(msg->hdr.type), 2458 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2459 le32_to_cpu(msg->hdr.front_len), 2460 le32_to_cpu(msg->hdr.data_len), 2461 con->in_front_crc, con->in_middle_crc, con->in_data_crc); 2462 con->ops->dispatch(con, msg); 2463 2464 mutex_lock(&con->mutex); 2465 } 2466 2467 2468 /* 2469 * Write something to the socket. Called in a worker thread when the 2470 * socket appears to be writeable and we have something ready to send. 2471 */ 2472 static int try_write(struct ceph_connection *con) 2473 { 2474 int ret = 1; 2475 2476 dout("try_write start %p state %lu\n", con, con->state); 2477 2478 more: 2479 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 2480 2481 /* open the socket first? */ 2482 if (con->state == CON_STATE_PREOPEN) { 2483 BUG_ON(con->sock); 2484 con->state = CON_STATE_CONNECTING; 2485 2486 con_out_kvec_reset(con); 2487 prepare_write_banner(con); 2488 prepare_read_banner(con); 2489 2490 BUG_ON(con->in_msg); 2491 con->in_tag = CEPH_MSGR_TAG_READY; 2492 dout("try_write initiating connect on %p new state %lu\n", 2493 con, con->state); 2494 ret = ceph_tcp_connect(con); 2495 if (ret < 0) { 2496 con->error_msg = "connect error"; 2497 goto out; 2498 } 2499 } 2500 2501 more_kvec: 2502 /* kvec data queued? */ 2503 if (con->out_skip) { 2504 ret = write_partial_skip(con); 2505 if (ret <= 0) 2506 goto out; 2507 } 2508 if (con->out_kvec_left) { 2509 ret = write_partial_kvec(con); 2510 if (ret <= 0) 2511 goto out; 2512 } 2513 2514 /* msg pages? */ 2515 if (con->out_msg) { 2516 if (con->out_msg_done) { 2517 ceph_msg_put(con->out_msg); 2518 con->out_msg = NULL; /* we're done with this one */ 2519 goto do_next; 2520 } 2521 2522 ret = write_partial_message_data(con); 2523 if (ret == 1) 2524 goto more_kvec; /* we need to send the footer, too! */ 2525 if (ret == 0) 2526 goto out; 2527 if (ret < 0) { 2528 dout("try_write write_partial_message_data err %d\n", 2529 ret); 2530 goto out; 2531 } 2532 } 2533 2534 do_next: 2535 if (con->state == CON_STATE_OPEN) { 2536 /* is anything else pending? */ 2537 if (!list_empty(&con->out_queue)) { 2538 prepare_write_message(con); 2539 goto more; 2540 } 2541 if (con->in_seq > con->in_seq_acked) { 2542 prepare_write_ack(con); 2543 goto more; 2544 } 2545 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) { 2546 prepare_write_keepalive(con); 2547 goto more; 2548 } 2549 } 2550 2551 /* Nothing to do! */ 2552 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 2553 dout("try_write nothing else to write.\n"); 2554 ret = 0; 2555 out: 2556 dout("try_write done on %p ret %d\n", con, ret); 2557 return ret; 2558 } 2559 2560 2561 2562 /* 2563 * Read what we can from the socket. 2564 */ 2565 static int try_read(struct ceph_connection *con) 2566 { 2567 int ret = -1; 2568 2569 more: 2570 dout("try_read start on %p state %lu\n", con, con->state); 2571 if (con->state != CON_STATE_CONNECTING && 2572 con->state != CON_STATE_NEGOTIATING && 2573 con->state != CON_STATE_OPEN) 2574 return 0; 2575 2576 BUG_ON(!con->sock); 2577 2578 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 2579 con->in_base_pos); 2580 2581 if (con->state == CON_STATE_CONNECTING) { 2582 dout("try_read connecting\n"); 2583 ret = read_partial_banner(con); 2584 if (ret <= 0) 2585 goto out; 2586 ret = process_banner(con); 2587 if (ret < 0) 2588 goto out; 2589 2590 con->state = CON_STATE_NEGOTIATING; 2591 2592 /* 2593 * Received banner is good, exchange connection info. 2594 * Do not reset out_kvec, as sending our banner raced 2595 * with receiving peer banner after connect completed. 2596 */ 2597 ret = prepare_write_connect(con); 2598 if (ret < 0) 2599 goto out; 2600 prepare_read_connect(con); 2601 2602 /* Send connection info before awaiting response */ 2603 goto out; 2604 } 2605 2606 if (con->state == CON_STATE_NEGOTIATING) { 2607 dout("try_read negotiating\n"); 2608 ret = read_partial_connect(con); 2609 if (ret <= 0) 2610 goto out; 2611 ret = process_connect(con); 2612 if (ret < 0) 2613 goto out; 2614 goto more; 2615 } 2616 2617 WARN_ON(con->state != CON_STATE_OPEN); 2618 2619 if (con->in_base_pos < 0) { 2620 /* 2621 * skipping + discarding content. 2622 * 2623 * FIXME: there must be a better way to do this! 2624 */ 2625 static char buf[SKIP_BUF_SIZE]; 2626 int skip = min((int) sizeof (buf), -con->in_base_pos); 2627 2628 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); 2629 ret = ceph_tcp_recvmsg(con->sock, buf, skip); 2630 if (ret <= 0) 2631 goto out; 2632 con->in_base_pos += ret; 2633 if (con->in_base_pos) 2634 goto more; 2635 } 2636 if (con->in_tag == CEPH_MSGR_TAG_READY) { 2637 /* 2638 * what's next? 2639 */ 2640 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 2641 if (ret <= 0) 2642 goto out; 2643 dout("try_read got tag %d\n", (int)con->in_tag); 2644 switch (con->in_tag) { 2645 case CEPH_MSGR_TAG_MSG: 2646 prepare_read_message(con); 2647 break; 2648 case CEPH_MSGR_TAG_ACK: 2649 prepare_read_ack(con); 2650 break; 2651 case CEPH_MSGR_TAG_CLOSE: 2652 con_close_socket(con); 2653 con->state = CON_STATE_CLOSED; 2654 goto out; 2655 default: 2656 goto bad_tag; 2657 } 2658 } 2659 if (con->in_tag == CEPH_MSGR_TAG_MSG) { 2660 ret = read_partial_message(con); 2661 if (ret <= 0) { 2662 switch (ret) { 2663 case -EBADMSG: 2664 con->error_msg = "bad crc"; 2665 ret = -EIO; 2666 break; 2667 case -EIO: 2668 con->error_msg = "io error"; 2669 break; 2670 } 2671 goto out; 2672 } 2673 if (con->in_tag == CEPH_MSGR_TAG_READY) 2674 goto more; 2675 process_message(con); 2676 if (con->state == CON_STATE_OPEN) 2677 prepare_read_tag(con); 2678 goto more; 2679 } 2680 if (con->in_tag == CEPH_MSGR_TAG_ACK || 2681 con->in_tag == CEPH_MSGR_TAG_SEQ) { 2682 /* 2683 * the final handshake seq exchange is semantically 2684 * equivalent to an ACK 2685 */ 2686 ret = read_partial_ack(con); 2687 if (ret <= 0) 2688 goto out; 2689 process_ack(con); 2690 goto more; 2691 } 2692 2693 out: 2694 dout("try_read done on %p ret %d\n", con, ret); 2695 return ret; 2696 2697 bad_tag: 2698 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag); 2699 con->error_msg = "protocol error, garbage tag"; 2700 ret = -1; 2701 goto out; 2702 } 2703 2704 2705 /* 2706 * Atomically queue work on a connection after the specified delay. 2707 * Bump @con reference to avoid races with connection teardown. 2708 * Returns 0 if work was queued, or an error code otherwise. 2709 */ 2710 static int queue_con_delay(struct ceph_connection *con, unsigned long delay) 2711 { 2712 if (!con->ops->get(con)) { 2713 dout("%s %p ref count 0\n", __func__, con); 2714 return -ENOENT; 2715 } 2716 2717 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) { 2718 dout("%s %p - already queued\n", __func__, con); 2719 con->ops->put(con); 2720 return -EBUSY; 2721 } 2722 2723 dout("%s %p %lu\n", __func__, con, delay); 2724 return 0; 2725 } 2726 2727 static void queue_con(struct ceph_connection *con) 2728 { 2729 (void) queue_con_delay(con, 0); 2730 } 2731 2732 static void cancel_con(struct ceph_connection *con) 2733 { 2734 if (cancel_delayed_work(&con->work)) { 2735 dout("%s %p\n", __func__, con); 2736 con->ops->put(con); 2737 } 2738 } 2739 2740 static bool con_sock_closed(struct ceph_connection *con) 2741 { 2742 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED)) 2743 return false; 2744 2745 #define CASE(x) \ 2746 case CON_STATE_ ## x: \ 2747 con->error_msg = "socket closed (con state " #x ")"; \ 2748 break; 2749 2750 switch (con->state) { 2751 CASE(CLOSED); 2752 CASE(PREOPEN); 2753 CASE(CONNECTING); 2754 CASE(NEGOTIATING); 2755 CASE(OPEN); 2756 CASE(STANDBY); 2757 default: 2758 pr_warn("%s con %p unrecognized state %lu\n", 2759 __func__, con, con->state); 2760 con->error_msg = "unrecognized con state"; 2761 BUG(); 2762 break; 2763 } 2764 #undef CASE 2765 2766 return true; 2767 } 2768 2769 static bool con_backoff(struct ceph_connection *con) 2770 { 2771 int ret; 2772 2773 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF)) 2774 return false; 2775 2776 ret = queue_con_delay(con, round_jiffies_relative(con->delay)); 2777 if (ret) { 2778 dout("%s: con %p FAILED to back off %lu\n", __func__, 2779 con, con->delay); 2780 BUG_ON(ret == -ENOENT); 2781 con_flag_set(con, CON_FLAG_BACKOFF); 2782 } 2783 2784 return true; 2785 } 2786 2787 /* Finish fault handling; con->mutex must *not* be held here */ 2788 2789 static void con_fault_finish(struct ceph_connection *con) 2790 { 2791 /* 2792 * in case we faulted due to authentication, invalidate our 2793 * current tickets so that we can get new ones. 2794 */ 2795 if (con->auth_retry && con->ops->invalidate_authorizer) { 2796 dout("calling invalidate_authorizer()\n"); 2797 con->ops->invalidate_authorizer(con); 2798 } 2799 2800 if (con->ops->fault) 2801 con->ops->fault(con); 2802 } 2803 2804 /* 2805 * Do some work on a connection. Drop a connection ref when we're done. 2806 */ 2807 static void con_work(struct work_struct *work) 2808 { 2809 struct ceph_connection *con = container_of(work, struct ceph_connection, 2810 work.work); 2811 unsigned long pflags = current->flags; 2812 bool fault; 2813 2814 current->flags |= PF_MEMALLOC; 2815 2816 mutex_lock(&con->mutex); 2817 while (true) { 2818 int ret; 2819 2820 if ((fault = con_sock_closed(con))) { 2821 dout("%s: con %p SOCK_CLOSED\n", __func__, con); 2822 break; 2823 } 2824 if (con_backoff(con)) { 2825 dout("%s: con %p BACKOFF\n", __func__, con); 2826 break; 2827 } 2828 if (con->state == CON_STATE_STANDBY) { 2829 dout("%s: con %p STANDBY\n", __func__, con); 2830 break; 2831 } 2832 if (con->state == CON_STATE_CLOSED) { 2833 dout("%s: con %p CLOSED\n", __func__, con); 2834 BUG_ON(con->sock); 2835 break; 2836 } 2837 if (con->state == CON_STATE_PREOPEN) { 2838 dout("%s: con %p PREOPEN\n", __func__, con); 2839 BUG_ON(con->sock); 2840 } 2841 2842 ret = try_read(con); 2843 if (ret < 0) { 2844 if (ret == -EAGAIN) 2845 continue; 2846 con->error_msg = "socket error on read"; 2847 fault = true; 2848 break; 2849 } 2850 2851 ret = try_write(con); 2852 if (ret < 0) { 2853 if (ret == -EAGAIN) 2854 continue; 2855 con->error_msg = "socket error on write"; 2856 fault = true; 2857 } 2858 2859 break; /* If we make it to here, we're done */ 2860 } 2861 if (fault) 2862 con_fault(con); 2863 mutex_unlock(&con->mutex); 2864 2865 if (fault) 2866 con_fault_finish(con); 2867 2868 con->ops->put(con); 2869 2870 tsk_restore_flags(current, pflags, PF_MEMALLOC); 2871 } 2872 2873 /* 2874 * Generic error/fault handler. A retry mechanism is used with 2875 * exponential backoff 2876 */ 2877 static void con_fault(struct ceph_connection *con) 2878 { 2879 pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2880 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2881 dout("fault %p state %lu to peer %s\n", 2882 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2883 2884 WARN_ON(con->state != CON_STATE_CONNECTING && 2885 con->state != CON_STATE_NEGOTIATING && 2886 con->state != CON_STATE_OPEN); 2887 2888 con_close_socket(con); 2889 2890 if (con_flag_test(con, CON_FLAG_LOSSYTX)) { 2891 dout("fault on LOSSYTX channel, marking CLOSED\n"); 2892 con->state = CON_STATE_CLOSED; 2893 return; 2894 } 2895 2896 if (con->in_msg) { 2897 BUG_ON(con->in_msg->con != con); 2898 con->in_msg->con = NULL; 2899 ceph_msg_put(con->in_msg); 2900 con->in_msg = NULL; 2901 con->ops->put(con); 2902 } 2903 2904 /* Requeue anything that hasn't been acked */ 2905 list_splice_init(&con->out_sent, &con->out_queue); 2906 2907 /* If there are no messages queued or keepalive pending, place 2908 * the connection in a STANDBY state */ 2909 if (list_empty(&con->out_queue) && 2910 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) { 2911 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 2912 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 2913 con->state = CON_STATE_STANDBY; 2914 } else { 2915 /* retry after a delay. */ 2916 con->state = CON_STATE_PREOPEN; 2917 if (con->delay == 0) 2918 con->delay = BASE_DELAY_INTERVAL; 2919 else if (con->delay < MAX_DELAY_INTERVAL) 2920 con->delay *= 2; 2921 con_flag_set(con, CON_FLAG_BACKOFF); 2922 queue_con(con); 2923 } 2924 } 2925 2926 2927 2928 /* 2929 * initialize a new messenger instance 2930 */ 2931 void ceph_messenger_init(struct ceph_messenger *msgr, 2932 struct ceph_entity_addr *myaddr, 2933 u64 supported_features, 2934 u64 required_features, 2935 bool nocrc, 2936 bool tcp_nodelay) 2937 { 2938 msgr->supported_features = supported_features; 2939 msgr->required_features = required_features; 2940 2941 spin_lock_init(&msgr->global_seq_lock); 2942 2943 if (myaddr) 2944 msgr->inst.addr = *myaddr; 2945 2946 /* select a random nonce */ 2947 msgr->inst.addr.type = 0; 2948 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 2949 encode_my_addr(msgr); 2950 msgr->nocrc = nocrc; 2951 msgr->tcp_nodelay = tcp_nodelay; 2952 2953 atomic_set(&msgr->stopping, 0); 2954 2955 dout("%s %p\n", __func__, msgr); 2956 } 2957 EXPORT_SYMBOL(ceph_messenger_init); 2958 2959 static void clear_standby(struct ceph_connection *con) 2960 { 2961 /* come back from STANDBY? */ 2962 if (con->state == CON_STATE_STANDBY) { 2963 dout("clear_standby %p and ++connect_seq\n", con); 2964 con->state = CON_STATE_PREOPEN; 2965 con->connect_seq++; 2966 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING)); 2967 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)); 2968 } 2969 } 2970 2971 /* 2972 * Queue up an outgoing message on the given connection. 2973 */ 2974 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) 2975 { 2976 /* set src+dst */ 2977 msg->hdr.src = con->msgr->inst.name; 2978 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); 2979 msg->needs_out_seq = true; 2980 2981 mutex_lock(&con->mutex); 2982 2983 if (con->state == CON_STATE_CLOSED) { 2984 dout("con_send %p closed, dropping %p\n", con, msg); 2985 ceph_msg_put(msg); 2986 mutex_unlock(&con->mutex); 2987 return; 2988 } 2989 2990 BUG_ON(msg->con != NULL); 2991 msg->con = con->ops->get(con); 2992 BUG_ON(msg->con == NULL); 2993 2994 BUG_ON(!list_empty(&msg->list_head)); 2995 list_add_tail(&msg->list_head, &con->out_queue); 2996 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, 2997 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type), 2998 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2999 le32_to_cpu(msg->hdr.front_len), 3000 le32_to_cpu(msg->hdr.middle_len), 3001 le32_to_cpu(msg->hdr.data_len)); 3002 3003 clear_standby(con); 3004 mutex_unlock(&con->mutex); 3005 3006 /* if there wasn't anything waiting to send before, queue 3007 * new work */ 3008 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3009 queue_con(con); 3010 } 3011 EXPORT_SYMBOL(ceph_con_send); 3012 3013 /* 3014 * Revoke a message that was previously queued for send 3015 */ 3016 void ceph_msg_revoke(struct ceph_msg *msg) 3017 { 3018 struct ceph_connection *con = msg->con; 3019 3020 if (!con) 3021 return; /* Message not in our possession */ 3022 3023 mutex_lock(&con->mutex); 3024 if (!list_empty(&msg->list_head)) { 3025 dout("%s %p msg %p - was on queue\n", __func__, con, msg); 3026 list_del_init(&msg->list_head); 3027 BUG_ON(msg->con == NULL); 3028 msg->con->ops->put(msg->con); 3029 msg->con = NULL; 3030 msg->hdr.seq = 0; 3031 3032 ceph_msg_put(msg); 3033 } 3034 if (con->out_msg == msg) { 3035 dout("%s %p msg %p - was sending\n", __func__, con, msg); 3036 con->out_msg = NULL; 3037 if (con->out_kvec_is_msg) { 3038 con->out_skip = con->out_kvec_bytes; 3039 con->out_kvec_is_msg = false; 3040 } 3041 msg->hdr.seq = 0; 3042 3043 ceph_msg_put(msg); 3044 } 3045 mutex_unlock(&con->mutex); 3046 } 3047 3048 /* 3049 * Revoke a message that we may be reading data into 3050 */ 3051 void ceph_msg_revoke_incoming(struct ceph_msg *msg) 3052 { 3053 struct ceph_connection *con; 3054 3055 BUG_ON(msg == NULL); 3056 if (!msg->con) { 3057 dout("%s msg %p null con\n", __func__, msg); 3058 3059 return; /* Message not in our possession */ 3060 } 3061 3062 con = msg->con; 3063 mutex_lock(&con->mutex); 3064 if (con->in_msg == msg) { 3065 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len); 3066 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len); 3067 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len); 3068 3069 /* skip rest of message */ 3070 dout("%s %p msg %p revoked\n", __func__, con, msg); 3071 con->in_base_pos = con->in_base_pos - 3072 sizeof(struct ceph_msg_header) - 3073 front_len - 3074 middle_len - 3075 data_len - 3076 sizeof(struct ceph_msg_footer); 3077 ceph_msg_put(con->in_msg); 3078 con->in_msg = NULL; 3079 con->in_tag = CEPH_MSGR_TAG_READY; 3080 con->in_seq++; 3081 } else { 3082 dout("%s %p in_msg %p msg %p no-op\n", 3083 __func__, con, con->in_msg, msg); 3084 } 3085 mutex_unlock(&con->mutex); 3086 } 3087 3088 /* 3089 * Queue a keepalive byte to ensure the tcp connection is alive. 3090 */ 3091 void ceph_con_keepalive(struct ceph_connection *con) 3092 { 3093 dout("con_keepalive %p\n", con); 3094 mutex_lock(&con->mutex); 3095 clear_standby(con); 3096 mutex_unlock(&con->mutex); 3097 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 && 3098 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3099 queue_con(con); 3100 } 3101 EXPORT_SYMBOL(ceph_con_keepalive); 3102 3103 static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type) 3104 { 3105 struct ceph_msg_data *data; 3106 3107 if (WARN_ON(!ceph_msg_data_type_valid(type))) 3108 return NULL; 3109 3110 data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS); 3111 if (data) 3112 data->type = type; 3113 INIT_LIST_HEAD(&data->links); 3114 3115 return data; 3116 } 3117 3118 static void ceph_msg_data_destroy(struct ceph_msg_data *data) 3119 { 3120 if (!data) 3121 return; 3122 3123 WARN_ON(!list_empty(&data->links)); 3124 if (data->type == CEPH_MSG_DATA_PAGELIST) 3125 ceph_pagelist_release(data->pagelist); 3126 kmem_cache_free(ceph_msg_data_cache, data); 3127 } 3128 3129 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, 3130 size_t length, size_t alignment) 3131 { 3132 struct ceph_msg_data *data; 3133 3134 BUG_ON(!pages); 3135 BUG_ON(!length); 3136 3137 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES); 3138 BUG_ON(!data); 3139 data->pages = pages; 3140 data->length = length; 3141 data->alignment = alignment & ~PAGE_MASK; 3142 3143 list_add_tail(&data->links, &msg->data); 3144 msg->data_length += length; 3145 } 3146 EXPORT_SYMBOL(ceph_msg_data_add_pages); 3147 3148 void ceph_msg_data_add_pagelist(struct ceph_msg *msg, 3149 struct ceph_pagelist *pagelist) 3150 { 3151 struct ceph_msg_data *data; 3152 3153 BUG_ON(!pagelist); 3154 BUG_ON(!pagelist->length); 3155 3156 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST); 3157 BUG_ON(!data); 3158 data->pagelist = pagelist; 3159 3160 list_add_tail(&data->links, &msg->data); 3161 msg->data_length += pagelist->length; 3162 } 3163 EXPORT_SYMBOL(ceph_msg_data_add_pagelist); 3164 3165 #ifdef CONFIG_BLOCK 3166 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, 3167 size_t length) 3168 { 3169 struct ceph_msg_data *data; 3170 3171 BUG_ON(!bio); 3172 3173 data = ceph_msg_data_create(CEPH_MSG_DATA_BIO); 3174 BUG_ON(!data); 3175 data->bio = bio; 3176 data->bio_length = length; 3177 3178 list_add_tail(&data->links, &msg->data); 3179 msg->data_length += length; 3180 } 3181 EXPORT_SYMBOL(ceph_msg_data_add_bio); 3182 #endif /* CONFIG_BLOCK */ 3183 3184 /* 3185 * construct a new message with given type, size 3186 * the new msg has a ref count of 1. 3187 */ 3188 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, 3189 bool can_fail) 3190 { 3191 struct ceph_msg *m; 3192 3193 m = kmem_cache_zalloc(ceph_msg_cache, flags); 3194 if (m == NULL) 3195 goto out; 3196 3197 m->hdr.type = cpu_to_le16(type); 3198 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); 3199 m->hdr.front_len = cpu_to_le32(front_len); 3200 3201 INIT_LIST_HEAD(&m->list_head); 3202 kref_init(&m->kref); 3203 INIT_LIST_HEAD(&m->data); 3204 3205 /* front */ 3206 if (front_len) { 3207 m->front.iov_base = ceph_kvmalloc(front_len, flags); 3208 if (m->front.iov_base == NULL) { 3209 dout("ceph_msg_new can't allocate %d bytes\n", 3210 front_len); 3211 goto out2; 3212 } 3213 } else { 3214 m->front.iov_base = NULL; 3215 } 3216 m->front_alloc_len = m->front.iov_len = front_len; 3217 3218 dout("ceph_msg_new %p front %d\n", m, front_len); 3219 return m; 3220 3221 out2: 3222 ceph_msg_put(m); 3223 out: 3224 if (!can_fail) { 3225 pr_err("msg_new can't create type %d front %d\n", type, 3226 front_len); 3227 WARN_ON(1); 3228 } else { 3229 dout("msg_new can't create type %d front %d\n", type, 3230 front_len); 3231 } 3232 return NULL; 3233 } 3234 EXPORT_SYMBOL(ceph_msg_new); 3235 3236 /* 3237 * Allocate "middle" portion of a message, if it is needed and wasn't 3238 * allocated by alloc_msg. This allows us to read a small fixed-size 3239 * per-type header in the front and then gracefully fail (i.e., 3240 * propagate the error to the caller based on info in the front) when 3241 * the middle is too large. 3242 */ 3243 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) 3244 { 3245 int type = le16_to_cpu(msg->hdr.type); 3246 int middle_len = le32_to_cpu(msg->hdr.middle_len); 3247 3248 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type, 3249 ceph_msg_type_name(type), middle_len); 3250 BUG_ON(!middle_len); 3251 BUG_ON(msg->middle); 3252 3253 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS); 3254 if (!msg->middle) 3255 return -ENOMEM; 3256 return 0; 3257 } 3258 3259 /* 3260 * Allocate a message for receiving an incoming message on a 3261 * connection, and save the result in con->in_msg. Uses the 3262 * connection's private alloc_msg op if available. 3263 * 3264 * Returns 0 on success, or a negative error code. 3265 * 3266 * On success, if we set *skip = 1: 3267 * - the next message should be skipped and ignored. 3268 * - con->in_msg == NULL 3269 * or if we set *skip = 0: 3270 * - con->in_msg is non-null. 3271 * On error (ENOMEM, EAGAIN, ...), 3272 * - con->in_msg == NULL 3273 */ 3274 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) 3275 { 3276 struct ceph_msg_header *hdr = &con->in_hdr; 3277 int middle_len = le32_to_cpu(hdr->middle_len); 3278 struct ceph_msg *msg; 3279 int ret = 0; 3280 3281 BUG_ON(con->in_msg != NULL); 3282 BUG_ON(!con->ops->alloc_msg); 3283 3284 mutex_unlock(&con->mutex); 3285 msg = con->ops->alloc_msg(con, hdr, skip); 3286 mutex_lock(&con->mutex); 3287 if (con->state != CON_STATE_OPEN) { 3288 if (msg) 3289 ceph_msg_put(msg); 3290 return -EAGAIN; 3291 } 3292 if (msg) { 3293 BUG_ON(*skip); 3294 con->in_msg = msg; 3295 con->in_msg->con = con->ops->get(con); 3296 BUG_ON(con->in_msg->con == NULL); 3297 } else { 3298 /* 3299 * Null message pointer means either we should skip 3300 * this message or we couldn't allocate memory. The 3301 * former is not an error. 3302 */ 3303 if (*skip) 3304 return 0; 3305 con->error_msg = "error allocating memory for incoming message"; 3306 3307 return -ENOMEM; 3308 } 3309 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 3310 3311 if (middle_len && !con->in_msg->middle) { 3312 ret = ceph_alloc_middle(con, con->in_msg); 3313 if (ret < 0) { 3314 ceph_msg_put(con->in_msg); 3315 con->in_msg = NULL; 3316 } 3317 } 3318 3319 return ret; 3320 } 3321 3322 3323 /* 3324 * Free a generically kmalloc'd message. 3325 */ 3326 static void ceph_msg_free(struct ceph_msg *m) 3327 { 3328 dout("%s %p\n", __func__, m); 3329 kvfree(m->front.iov_base); 3330 kmem_cache_free(ceph_msg_cache, m); 3331 } 3332 3333 static void ceph_msg_release(struct kref *kref) 3334 { 3335 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); 3336 LIST_HEAD(data); 3337 struct list_head *links; 3338 struct list_head *next; 3339 3340 dout("%s %p\n", __func__, m); 3341 WARN_ON(!list_empty(&m->list_head)); 3342 3343 /* drop middle, data, if any */ 3344 if (m->middle) { 3345 ceph_buffer_put(m->middle); 3346 m->middle = NULL; 3347 } 3348 3349 list_splice_init(&m->data, &data); 3350 list_for_each_safe(links, next, &data) { 3351 struct ceph_msg_data *data; 3352 3353 data = list_entry(links, struct ceph_msg_data, links); 3354 list_del_init(links); 3355 ceph_msg_data_destroy(data); 3356 } 3357 m->data_length = 0; 3358 3359 if (m->pool) 3360 ceph_msgpool_put(m->pool, m); 3361 else 3362 ceph_msg_free(m); 3363 } 3364 3365 struct ceph_msg *ceph_msg_get(struct ceph_msg *msg) 3366 { 3367 dout("%s %p (was %d)\n", __func__, msg, 3368 atomic_read(&msg->kref.refcount)); 3369 kref_get(&msg->kref); 3370 return msg; 3371 } 3372 EXPORT_SYMBOL(ceph_msg_get); 3373 3374 void ceph_msg_put(struct ceph_msg *msg) 3375 { 3376 dout("%s %p (was %d)\n", __func__, msg, 3377 atomic_read(&msg->kref.refcount)); 3378 kref_put(&msg->kref, ceph_msg_release); 3379 } 3380 EXPORT_SYMBOL(ceph_msg_put); 3381 3382 void ceph_msg_dump(struct ceph_msg *msg) 3383 { 3384 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg, 3385 msg->front_alloc_len, msg->data_length); 3386 print_hex_dump(KERN_DEBUG, "header: ", 3387 DUMP_PREFIX_OFFSET, 16, 1, 3388 &msg->hdr, sizeof(msg->hdr), true); 3389 print_hex_dump(KERN_DEBUG, " front: ", 3390 DUMP_PREFIX_OFFSET, 16, 1, 3391 msg->front.iov_base, msg->front.iov_len, true); 3392 if (msg->middle) 3393 print_hex_dump(KERN_DEBUG, "middle: ", 3394 DUMP_PREFIX_OFFSET, 16, 1, 3395 msg->middle->vec.iov_base, 3396 msg->middle->vec.iov_len, true); 3397 print_hex_dump(KERN_DEBUG, "footer: ", 3398 DUMP_PREFIX_OFFSET, 16, 1, 3399 &msg->footer, sizeof(msg->footer), true); 3400 } 3401 EXPORT_SYMBOL(ceph_msg_dump); 3402