1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/crc32c.h> 4 #include <linux/ctype.h> 5 #include <linux/highmem.h> 6 #include <linux/inet.h> 7 #include <linux/kthread.h> 8 #include <linux/net.h> 9 #include <linux/slab.h> 10 #include <linux/socket.h> 11 #include <linux/string.h> 12 #ifdef CONFIG_BLOCK 13 #include <linux/bio.h> 14 #endif /* CONFIG_BLOCK */ 15 #include <linux/dns_resolver.h> 16 #include <net/tcp.h> 17 18 #include <linux/ceph/ceph_features.h> 19 #include <linux/ceph/libceph.h> 20 #include <linux/ceph/messenger.h> 21 #include <linux/ceph/decode.h> 22 #include <linux/ceph/pagelist.h> 23 #include <linux/export.h> 24 25 #define list_entry_next(pos, member) \ 26 list_entry(pos->member.next, typeof(*pos), member) 27 28 /* 29 * Ceph uses the messenger to exchange ceph_msg messages with other 30 * hosts in the system. The messenger provides ordered and reliable 31 * delivery. We tolerate TCP disconnects by reconnecting (with 32 * exponential backoff) in the case of a fault (disconnection, bad 33 * crc, protocol error). Acks allow sent messages to be discarded by 34 * the sender. 35 */ 36 37 /* 38 * We track the state of the socket on a given connection using 39 * values defined below. The transition to a new socket state is 40 * handled by a function which verifies we aren't coming from an 41 * unexpected state. 42 * 43 * -------- 44 * | NEW* | transient initial state 45 * -------- 46 * | con_sock_state_init() 47 * v 48 * ---------- 49 * | CLOSED | initialized, but no socket (and no 50 * ---------- TCP connection) 51 * ^ \ 52 * | \ con_sock_state_connecting() 53 * | ---------------------- 54 * | \ 55 * + con_sock_state_closed() \ 56 * |+--------------------------- \ 57 * | \ \ \ 58 * | ----------- \ \ 59 * | | CLOSING | socket event; \ \ 60 * | ----------- await close \ \ 61 * | ^ \ | 62 * | | \ | 63 * | + con_sock_state_closing() \ | 64 * | / \ | | 65 * | / --------------- | | 66 * | / \ v v 67 * | / -------------- 68 * | / -----------------| CONNECTING | socket created, TCP 69 * | | / -------------- connect initiated 70 * | | | con_sock_state_connected() 71 * | | v 72 * ------------- 73 * | CONNECTED | TCP connection established 74 * ------------- 75 * 76 * State values for ceph_connection->sock_state; NEW is assumed to be 0. 77 */ 78 79 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */ 80 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */ 81 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */ 82 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */ 83 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */ 84 85 /* 86 * connection states 87 */ 88 #define CON_STATE_CLOSED 1 /* -> PREOPEN */ 89 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */ 90 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */ 91 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */ 92 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */ 93 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */ 94 95 /* 96 * ceph_connection flag bits 97 */ 98 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop 99 * messages on errors */ 100 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */ 101 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */ 102 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */ 103 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */ 104 105 static bool con_flag_valid(unsigned long con_flag) 106 { 107 switch (con_flag) { 108 case CON_FLAG_LOSSYTX: 109 case CON_FLAG_KEEPALIVE_PENDING: 110 case CON_FLAG_WRITE_PENDING: 111 case CON_FLAG_SOCK_CLOSED: 112 case CON_FLAG_BACKOFF: 113 return true; 114 default: 115 return false; 116 } 117 } 118 119 static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag) 120 { 121 BUG_ON(!con_flag_valid(con_flag)); 122 123 clear_bit(con_flag, &con->flags); 124 } 125 126 static void con_flag_set(struct ceph_connection *con, unsigned long con_flag) 127 { 128 BUG_ON(!con_flag_valid(con_flag)); 129 130 set_bit(con_flag, &con->flags); 131 } 132 133 static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag) 134 { 135 BUG_ON(!con_flag_valid(con_flag)); 136 137 return test_bit(con_flag, &con->flags); 138 } 139 140 static bool con_flag_test_and_clear(struct ceph_connection *con, 141 unsigned long con_flag) 142 { 143 BUG_ON(!con_flag_valid(con_flag)); 144 145 return test_and_clear_bit(con_flag, &con->flags); 146 } 147 148 static bool con_flag_test_and_set(struct ceph_connection *con, 149 unsigned long con_flag) 150 { 151 BUG_ON(!con_flag_valid(con_flag)); 152 153 return test_and_set_bit(con_flag, &con->flags); 154 } 155 156 /* Slab caches for frequently-allocated structures */ 157 158 static struct kmem_cache *ceph_msg_cache; 159 static struct kmem_cache *ceph_msg_data_cache; 160 161 /* static tag bytes (protocol control messages) */ 162 static char tag_msg = CEPH_MSGR_TAG_MSG; 163 static char tag_ack = CEPH_MSGR_TAG_ACK; 164 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 165 166 #ifdef CONFIG_LOCKDEP 167 static struct lock_class_key socket_class; 168 #endif 169 170 /* 171 * When skipping (ignoring) a block of input we read it into a "skip 172 * buffer," which is this many bytes in size. 173 */ 174 #define SKIP_BUF_SIZE 1024 175 176 static void queue_con(struct ceph_connection *con); 177 static void con_work(struct work_struct *); 178 static void con_fault(struct ceph_connection *con); 179 180 /* 181 * Nicely render a sockaddr as a string. An array of formatted 182 * strings is used, to approximate reentrancy. 183 */ 184 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */ 185 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG) 186 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1) 187 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */ 188 189 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN]; 190 static atomic_t addr_str_seq = ATOMIC_INIT(0); 191 192 static struct page *zero_page; /* used in certain error cases */ 193 194 const char *ceph_pr_addr(const struct sockaddr_storage *ss) 195 { 196 int i; 197 char *s; 198 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 199 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 200 201 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK; 202 s = addr_str[i]; 203 204 switch (ss->ss_family) { 205 case AF_INET: 206 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr, 207 ntohs(in4->sin_port)); 208 break; 209 210 case AF_INET6: 211 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr, 212 ntohs(in6->sin6_port)); 213 break; 214 215 default: 216 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)", 217 ss->ss_family); 218 } 219 220 return s; 221 } 222 EXPORT_SYMBOL(ceph_pr_addr); 223 224 static void encode_my_addr(struct ceph_messenger *msgr) 225 { 226 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr)); 227 ceph_encode_addr(&msgr->my_enc_addr); 228 } 229 230 /* 231 * work queue for all reading and writing to/from the socket. 232 */ 233 static struct workqueue_struct *ceph_msgr_wq; 234 235 static int ceph_msgr_slab_init(void) 236 { 237 BUG_ON(ceph_msg_cache); 238 ceph_msg_cache = kmem_cache_create("ceph_msg", 239 sizeof (struct ceph_msg), 240 __alignof__(struct ceph_msg), 0, NULL); 241 242 if (!ceph_msg_cache) 243 return -ENOMEM; 244 245 BUG_ON(ceph_msg_data_cache); 246 ceph_msg_data_cache = kmem_cache_create("ceph_msg_data", 247 sizeof (struct ceph_msg_data), 248 __alignof__(struct ceph_msg_data), 249 0, NULL); 250 if (ceph_msg_data_cache) 251 return 0; 252 253 kmem_cache_destroy(ceph_msg_cache); 254 ceph_msg_cache = NULL; 255 256 return -ENOMEM; 257 } 258 259 static void ceph_msgr_slab_exit(void) 260 { 261 BUG_ON(!ceph_msg_data_cache); 262 kmem_cache_destroy(ceph_msg_data_cache); 263 ceph_msg_data_cache = NULL; 264 265 BUG_ON(!ceph_msg_cache); 266 kmem_cache_destroy(ceph_msg_cache); 267 ceph_msg_cache = NULL; 268 } 269 270 static void _ceph_msgr_exit(void) 271 { 272 if (ceph_msgr_wq) { 273 destroy_workqueue(ceph_msgr_wq); 274 ceph_msgr_wq = NULL; 275 } 276 277 ceph_msgr_slab_exit(); 278 279 BUG_ON(zero_page == NULL); 280 kunmap(zero_page); 281 page_cache_release(zero_page); 282 zero_page = NULL; 283 } 284 285 int ceph_msgr_init(void) 286 { 287 BUG_ON(zero_page != NULL); 288 zero_page = ZERO_PAGE(0); 289 page_cache_get(zero_page); 290 291 if (ceph_msgr_slab_init()) 292 return -ENOMEM; 293 294 ceph_msgr_wq = alloc_workqueue("ceph-msgr", 0, 0); 295 if (ceph_msgr_wq) 296 return 0; 297 298 pr_err("msgr_init failed to create workqueue\n"); 299 _ceph_msgr_exit(); 300 301 return -ENOMEM; 302 } 303 EXPORT_SYMBOL(ceph_msgr_init); 304 305 void ceph_msgr_exit(void) 306 { 307 BUG_ON(ceph_msgr_wq == NULL); 308 309 _ceph_msgr_exit(); 310 } 311 EXPORT_SYMBOL(ceph_msgr_exit); 312 313 void ceph_msgr_flush(void) 314 { 315 flush_workqueue(ceph_msgr_wq); 316 } 317 EXPORT_SYMBOL(ceph_msgr_flush); 318 319 /* Connection socket state transition functions */ 320 321 static void con_sock_state_init(struct ceph_connection *con) 322 { 323 int old_state; 324 325 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 326 if (WARN_ON(old_state != CON_SOCK_STATE_NEW)) 327 printk("%s: unexpected old state %d\n", __func__, old_state); 328 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 329 CON_SOCK_STATE_CLOSED); 330 } 331 332 static void con_sock_state_connecting(struct ceph_connection *con) 333 { 334 int old_state; 335 336 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING); 337 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED)) 338 printk("%s: unexpected old state %d\n", __func__, old_state); 339 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 340 CON_SOCK_STATE_CONNECTING); 341 } 342 343 static void con_sock_state_connected(struct ceph_connection *con) 344 { 345 int old_state; 346 347 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED); 348 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING)) 349 printk("%s: unexpected old state %d\n", __func__, old_state); 350 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 351 CON_SOCK_STATE_CONNECTED); 352 } 353 354 static void con_sock_state_closing(struct ceph_connection *con) 355 { 356 int old_state; 357 358 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING); 359 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING && 360 old_state != CON_SOCK_STATE_CONNECTED && 361 old_state != CON_SOCK_STATE_CLOSING)) 362 printk("%s: unexpected old state %d\n", __func__, old_state); 363 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 364 CON_SOCK_STATE_CLOSING); 365 } 366 367 static void con_sock_state_closed(struct ceph_connection *con) 368 { 369 int old_state; 370 371 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 372 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED && 373 old_state != CON_SOCK_STATE_CLOSING && 374 old_state != CON_SOCK_STATE_CONNECTING && 375 old_state != CON_SOCK_STATE_CLOSED)) 376 printk("%s: unexpected old state %d\n", __func__, old_state); 377 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 378 CON_SOCK_STATE_CLOSED); 379 } 380 381 /* 382 * socket callback functions 383 */ 384 385 /* data available on socket, or listen socket received a connect */ 386 static void ceph_sock_data_ready(struct sock *sk) 387 { 388 struct ceph_connection *con = sk->sk_user_data; 389 if (atomic_read(&con->msgr->stopping)) { 390 return; 391 } 392 393 if (sk->sk_state != TCP_CLOSE_WAIT) { 394 dout("%s on %p state = %lu, queueing work\n", __func__, 395 con, con->state); 396 queue_con(con); 397 } 398 } 399 400 /* socket has buffer space for writing */ 401 static void ceph_sock_write_space(struct sock *sk) 402 { 403 struct ceph_connection *con = sk->sk_user_data; 404 405 /* only queue to workqueue if there is data we want to write, 406 * and there is sufficient space in the socket buffer to accept 407 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space() 408 * doesn't get called again until try_write() fills the socket 409 * buffer. See net/ipv4/tcp_input.c:tcp_check_space() 410 * and net/core/stream.c:sk_stream_write_space(). 411 */ 412 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) { 413 if (sk_stream_is_writeable(sk)) { 414 dout("%s %p queueing write work\n", __func__, con); 415 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 416 queue_con(con); 417 } 418 } else { 419 dout("%s %p nothing to write\n", __func__, con); 420 } 421 } 422 423 /* socket's state has changed */ 424 static void ceph_sock_state_change(struct sock *sk) 425 { 426 struct ceph_connection *con = sk->sk_user_data; 427 428 dout("%s %p state = %lu sk_state = %u\n", __func__, 429 con, con->state, sk->sk_state); 430 431 switch (sk->sk_state) { 432 case TCP_CLOSE: 433 dout("%s TCP_CLOSE\n", __func__); 434 case TCP_CLOSE_WAIT: 435 dout("%s TCP_CLOSE_WAIT\n", __func__); 436 con_sock_state_closing(con); 437 con_flag_set(con, CON_FLAG_SOCK_CLOSED); 438 queue_con(con); 439 break; 440 case TCP_ESTABLISHED: 441 dout("%s TCP_ESTABLISHED\n", __func__); 442 con_sock_state_connected(con); 443 queue_con(con); 444 break; 445 default: /* Everything else is uninteresting */ 446 break; 447 } 448 } 449 450 /* 451 * set up socket callbacks 452 */ 453 static void set_sock_callbacks(struct socket *sock, 454 struct ceph_connection *con) 455 { 456 struct sock *sk = sock->sk; 457 sk->sk_user_data = con; 458 sk->sk_data_ready = ceph_sock_data_ready; 459 sk->sk_write_space = ceph_sock_write_space; 460 sk->sk_state_change = ceph_sock_state_change; 461 } 462 463 464 /* 465 * socket helpers 466 */ 467 468 /* 469 * initiate connection to a remote socket. 470 */ 471 static int ceph_tcp_connect(struct ceph_connection *con) 472 { 473 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 474 struct socket *sock; 475 int ret; 476 477 BUG_ON(con->sock); 478 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM, 479 IPPROTO_TCP, &sock); 480 if (ret) 481 return ret; 482 sock->sk->sk_allocation = GFP_NOFS; 483 484 #ifdef CONFIG_LOCKDEP 485 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 486 #endif 487 488 set_sock_callbacks(sock, con); 489 490 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); 491 492 con_sock_state_connecting(con); 493 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), 494 O_NONBLOCK); 495 if (ret == -EINPROGRESS) { 496 dout("connect %s EINPROGRESS sk_state = %u\n", 497 ceph_pr_addr(&con->peer_addr.in_addr), 498 sock->sk->sk_state); 499 } else if (ret < 0) { 500 pr_err("connect %s error %d\n", 501 ceph_pr_addr(&con->peer_addr.in_addr), ret); 502 sock_release(sock); 503 con->error_msg = "connect error"; 504 505 return ret; 506 } 507 con->sock = sock; 508 return 0; 509 } 510 511 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) 512 { 513 struct kvec iov = {buf, len}; 514 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 515 int r; 516 517 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); 518 if (r == -EAGAIN) 519 r = 0; 520 return r; 521 } 522 523 static int ceph_tcp_recvpage(struct socket *sock, struct page *page, 524 int page_offset, size_t length) 525 { 526 void *kaddr; 527 int ret; 528 529 BUG_ON(page_offset + length > PAGE_SIZE); 530 531 kaddr = kmap(page); 532 BUG_ON(!kaddr); 533 ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length); 534 kunmap(page); 535 536 return ret; 537 } 538 539 /* 540 * write something. @more is true if caller will be sending more data 541 * shortly. 542 */ 543 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, 544 size_t kvlen, size_t len, int more) 545 { 546 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 547 int r; 548 549 if (more) 550 msg.msg_flags |= MSG_MORE; 551 else 552 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 553 554 r = kernel_sendmsg(sock, &msg, iov, kvlen, len); 555 if (r == -EAGAIN) 556 r = 0; 557 return r; 558 } 559 560 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 561 int offset, size_t size, bool more) 562 { 563 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); 564 int ret; 565 566 ret = kernel_sendpage(sock, page, offset, size, flags); 567 if (ret == -EAGAIN) 568 ret = 0; 569 570 return ret; 571 } 572 573 574 /* 575 * Shutdown/close the socket for the given connection. 576 */ 577 static int con_close_socket(struct ceph_connection *con) 578 { 579 int rc = 0; 580 581 dout("con_close_socket on %p sock %p\n", con, con->sock); 582 if (con->sock) { 583 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); 584 sock_release(con->sock); 585 con->sock = NULL; 586 } 587 588 /* 589 * Forcibly clear the SOCK_CLOSED flag. It gets set 590 * independent of the connection mutex, and we could have 591 * received a socket close event before we had the chance to 592 * shut the socket down. 593 */ 594 con_flag_clear(con, CON_FLAG_SOCK_CLOSED); 595 596 con_sock_state_closed(con); 597 return rc; 598 } 599 600 /* 601 * Reset a connection. Discard all incoming and outgoing messages 602 * and clear *_seq state. 603 */ 604 static void ceph_msg_remove(struct ceph_msg *msg) 605 { 606 list_del_init(&msg->list_head); 607 BUG_ON(msg->con == NULL); 608 msg->con->ops->put(msg->con); 609 msg->con = NULL; 610 611 ceph_msg_put(msg); 612 } 613 static void ceph_msg_remove_list(struct list_head *head) 614 { 615 while (!list_empty(head)) { 616 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, 617 list_head); 618 ceph_msg_remove(msg); 619 } 620 } 621 622 static void reset_connection(struct ceph_connection *con) 623 { 624 /* reset connection, out_queue, msg_ and connect_seq */ 625 /* discard existing out_queue and msg_seq */ 626 dout("reset_connection %p\n", con); 627 ceph_msg_remove_list(&con->out_queue); 628 ceph_msg_remove_list(&con->out_sent); 629 630 if (con->in_msg) { 631 BUG_ON(con->in_msg->con != con); 632 con->in_msg->con = NULL; 633 ceph_msg_put(con->in_msg); 634 con->in_msg = NULL; 635 con->ops->put(con); 636 } 637 638 con->connect_seq = 0; 639 con->out_seq = 0; 640 if (con->out_msg) { 641 ceph_msg_put(con->out_msg); 642 con->out_msg = NULL; 643 } 644 con->in_seq = 0; 645 con->in_seq_acked = 0; 646 } 647 648 /* 649 * mark a peer down. drop any open connections. 650 */ 651 void ceph_con_close(struct ceph_connection *con) 652 { 653 mutex_lock(&con->mutex); 654 dout("con_close %p peer %s\n", con, 655 ceph_pr_addr(&con->peer_addr.in_addr)); 656 con->state = CON_STATE_CLOSED; 657 658 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */ 659 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING); 660 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 661 con_flag_clear(con, CON_FLAG_BACKOFF); 662 663 reset_connection(con); 664 con->peer_global_seq = 0; 665 cancel_delayed_work(&con->work); 666 con_close_socket(con); 667 mutex_unlock(&con->mutex); 668 } 669 EXPORT_SYMBOL(ceph_con_close); 670 671 /* 672 * Reopen a closed connection, with a new peer address. 673 */ 674 void ceph_con_open(struct ceph_connection *con, 675 __u8 entity_type, __u64 entity_num, 676 struct ceph_entity_addr *addr) 677 { 678 mutex_lock(&con->mutex); 679 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 680 681 WARN_ON(con->state != CON_STATE_CLOSED); 682 con->state = CON_STATE_PREOPEN; 683 684 con->peer_name.type = (__u8) entity_type; 685 con->peer_name.num = cpu_to_le64(entity_num); 686 687 memcpy(&con->peer_addr, addr, sizeof(*addr)); 688 con->delay = 0; /* reset backoff memory */ 689 mutex_unlock(&con->mutex); 690 queue_con(con); 691 } 692 EXPORT_SYMBOL(ceph_con_open); 693 694 /* 695 * return true if this connection ever successfully opened 696 */ 697 bool ceph_con_opened(struct ceph_connection *con) 698 { 699 return con->connect_seq > 0; 700 } 701 702 /* 703 * initialize a new connection. 704 */ 705 void ceph_con_init(struct ceph_connection *con, void *private, 706 const struct ceph_connection_operations *ops, 707 struct ceph_messenger *msgr) 708 { 709 dout("con_init %p\n", con); 710 memset(con, 0, sizeof(*con)); 711 con->private = private; 712 con->ops = ops; 713 con->msgr = msgr; 714 715 con_sock_state_init(con); 716 717 mutex_init(&con->mutex); 718 INIT_LIST_HEAD(&con->out_queue); 719 INIT_LIST_HEAD(&con->out_sent); 720 INIT_DELAYED_WORK(&con->work, con_work); 721 722 con->state = CON_STATE_CLOSED; 723 } 724 EXPORT_SYMBOL(ceph_con_init); 725 726 727 /* 728 * We maintain a global counter to order connection attempts. Get 729 * a unique seq greater than @gt. 730 */ 731 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) 732 { 733 u32 ret; 734 735 spin_lock(&msgr->global_seq_lock); 736 if (msgr->global_seq < gt) 737 msgr->global_seq = gt; 738 ret = ++msgr->global_seq; 739 spin_unlock(&msgr->global_seq_lock); 740 return ret; 741 } 742 743 static void con_out_kvec_reset(struct ceph_connection *con) 744 { 745 con->out_kvec_left = 0; 746 con->out_kvec_bytes = 0; 747 con->out_kvec_cur = &con->out_kvec[0]; 748 } 749 750 static void con_out_kvec_add(struct ceph_connection *con, 751 size_t size, void *data) 752 { 753 int index; 754 755 index = con->out_kvec_left; 756 BUG_ON(index >= ARRAY_SIZE(con->out_kvec)); 757 758 con->out_kvec[index].iov_len = size; 759 con->out_kvec[index].iov_base = data; 760 con->out_kvec_left++; 761 con->out_kvec_bytes += size; 762 } 763 764 #ifdef CONFIG_BLOCK 765 766 /* 767 * For a bio data item, a piece is whatever remains of the next 768 * entry in the current bio iovec, or the first entry in the next 769 * bio in the list. 770 */ 771 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor, 772 size_t length) 773 { 774 struct ceph_msg_data *data = cursor->data; 775 struct bio *bio; 776 777 BUG_ON(data->type != CEPH_MSG_DATA_BIO); 778 779 bio = data->bio; 780 BUG_ON(!bio); 781 782 cursor->resid = min(length, data->bio_length); 783 cursor->bio = bio; 784 cursor->bvec_iter = bio->bi_iter; 785 cursor->last_piece = 786 cursor->resid <= bio_iter_len(bio, cursor->bvec_iter); 787 } 788 789 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor, 790 size_t *page_offset, 791 size_t *length) 792 { 793 struct ceph_msg_data *data = cursor->data; 794 struct bio *bio; 795 struct bio_vec bio_vec; 796 797 BUG_ON(data->type != CEPH_MSG_DATA_BIO); 798 799 bio = cursor->bio; 800 BUG_ON(!bio); 801 802 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); 803 804 *page_offset = (size_t) bio_vec.bv_offset; 805 BUG_ON(*page_offset >= PAGE_SIZE); 806 if (cursor->last_piece) /* pagelist offset is always 0 */ 807 *length = cursor->resid; 808 else 809 *length = (size_t) bio_vec.bv_len; 810 BUG_ON(*length > cursor->resid); 811 BUG_ON(*page_offset + *length > PAGE_SIZE); 812 813 return bio_vec.bv_page; 814 } 815 816 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, 817 size_t bytes) 818 { 819 struct bio *bio; 820 struct bio_vec bio_vec; 821 822 BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO); 823 824 bio = cursor->bio; 825 BUG_ON(!bio); 826 827 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); 828 829 /* Advance the cursor offset */ 830 831 BUG_ON(cursor->resid < bytes); 832 cursor->resid -= bytes; 833 834 bio_advance_iter(bio, &cursor->bvec_iter, bytes); 835 836 if (bytes < bio_vec.bv_len) 837 return false; /* more bytes to process in this segment */ 838 839 /* Move on to the next segment, and possibly the next bio */ 840 841 if (!cursor->bvec_iter.bi_size) { 842 bio = bio->bi_next; 843 cursor->bio = bio; 844 if (bio) 845 cursor->bvec_iter = bio->bi_iter; 846 else 847 memset(&cursor->bvec_iter, 0, 848 sizeof(cursor->bvec_iter)); 849 } 850 851 if (!cursor->last_piece) { 852 BUG_ON(!cursor->resid); 853 BUG_ON(!bio); 854 /* A short read is OK, so use <= rather than == */ 855 if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter)) 856 cursor->last_piece = true; 857 } 858 859 return true; 860 } 861 #endif /* CONFIG_BLOCK */ 862 863 /* 864 * For a page array, a piece comes from the first page in the array 865 * that has not already been fully consumed. 866 */ 867 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor, 868 size_t length) 869 { 870 struct ceph_msg_data *data = cursor->data; 871 int page_count; 872 873 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 874 875 BUG_ON(!data->pages); 876 BUG_ON(!data->length); 877 878 cursor->resid = min(length, data->length); 879 page_count = calc_pages_for(data->alignment, (u64)data->length); 880 cursor->page_offset = data->alignment & ~PAGE_MASK; 881 cursor->page_index = 0; 882 BUG_ON(page_count > (int)USHRT_MAX); 883 cursor->page_count = (unsigned short)page_count; 884 BUG_ON(length > SIZE_MAX - cursor->page_offset); 885 cursor->last_piece = (size_t)cursor->page_offset + length <= PAGE_SIZE; 886 } 887 888 static struct page * 889 ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor, 890 size_t *page_offset, size_t *length) 891 { 892 struct ceph_msg_data *data = cursor->data; 893 894 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 895 896 BUG_ON(cursor->page_index >= cursor->page_count); 897 BUG_ON(cursor->page_offset >= PAGE_SIZE); 898 899 *page_offset = cursor->page_offset; 900 if (cursor->last_piece) 901 *length = cursor->resid; 902 else 903 *length = PAGE_SIZE - *page_offset; 904 905 return data->pages[cursor->page_index]; 906 } 907 908 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor, 909 size_t bytes) 910 { 911 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES); 912 913 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE); 914 915 /* Advance the cursor page offset */ 916 917 cursor->resid -= bytes; 918 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK; 919 if (!bytes || cursor->page_offset) 920 return false; /* more bytes to process in the current page */ 921 922 if (!cursor->resid) 923 return false; /* no more data */ 924 925 /* Move on to the next page; offset is already at 0 */ 926 927 BUG_ON(cursor->page_index >= cursor->page_count); 928 cursor->page_index++; 929 cursor->last_piece = cursor->resid <= PAGE_SIZE; 930 931 return true; 932 } 933 934 /* 935 * For a pagelist, a piece is whatever remains to be consumed in the 936 * first page in the list, or the front of the next page. 937 */ 938 static void 939 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor, 940 size_t length) 941 { 942 struct ceph_msg_data *data = cursor->data; 943 struct ceph_pagelist *pagelist; 944 struct page *page; 945 946 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 947 948 pagelist = data->pagelist; 949 BUG_ON(!pagelist); 950 951 if (!length) 952 return; /* pagelist can be assigned but empty */ 953 954 BUG_ON(list_empty(&pagelist->head)); 955 page = list_first_entry(&pagelist->head, struct page, lru); 956 957 cursor->resid = min(length, pagelist->length); 958 cursor->page = page; 959 cursor->offset = 0; 960 cursor->last_piece = cursor->resid <= PAGE_SIZE; 961 } 962 963 static struct page * 964 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor, 965 size_t *page_offset, size_t *length) 966 { 967 struct ceph_msg_data *data = cursor->data; 968 struct ceph_pagelist *pagelist; 969 970 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 971 972 pagelist = data->pagelist; 973 BUG_ON(!pagelist); 974 975 BUG_ON(!cursor->page); 976 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 977 978 /* offset of first page in pagelist is always 0 */ 979 *page_offset = cursor->offset & ~PAGE_MASK; 980 if (cursor->last_piece) 981 *length = cursor->resid; 982 else 983 *length = PAGE_SIZE - *page_offset; 984 985 return cursor->page; 986 } 987 988 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor, 989 size_t bytes) 990 { 991 struct ceph_msg_data *data = cursor->data; 992 struct ceph_pagelist *pagelist; 993 994 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 995 996 pagelist = data->pagelist; 997 BUG_ON(!pagelist); 998 999 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 1000 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE); 1001 1002 /* Advance the cursor offset */ 1003 1004 cursor->resid -= bytes; 1005 cursor->offset += bytes; 1006 /* offset of first page in pagelist is always 0 */ 1007 if (!bytes || cursor->offset & ~PAGE_MASK) 1008 return false; /* more bytes to process in the current page */ 1009 1010 if (!cursor->resid) 1011 return false; /* no more data */ 1012 1013 /* Move on to the next page */ 1014 1015 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head)); 1016 cursor->page = list_entry_next(cursor->page, lru); 1017 cursor->last_piece = cursor->resid <= PAGE_SIZE; 1018 1019 return true; 1020 } 1021 1022 /* 1023 * Message data is handled (sent or received) in pieces, where each 1024 * piece resides on a single page. The network layer might not 1025 * consume an entire piece at once. A data item's cursor keeps 1026 * track of which piece is next to process and how much remains to 1027 * be processed in that piece. It also tracks whether the current 1028 * piece is the last one in the data item. 1029 */ 1030 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor) 1031 { 1032 size_t length = cursor->total_resid; 1033 1034 switch (cursor->data->type) { 1035 case CEPH_MSG_DATA_PAGELIST: 1036 ceph_msg_data_pagelist_cursor_init(cursor, length); 1037 break; 1038 case CEPH_MSG_DATA_PAGES: 1039 ceph_msg_data_pages_cursor_init(cursor, length); 1040 break; 1041 #ifdef CONFIG_BLOCK 1042 case CEPH_MSG_DATA_BIO: 1043 ceph_msg_data_bio_cursor_init(cursor, length); 1044 break; 1045 #endif /* CONFIG_BLOCK */ 1046 case CEPH_MSG_DATA_NONE: 1047 default: 1048 /* BUG(); */ 1049 break; 1050 } 1051 cursor->need_crc = true; 1052 } 1053 1054 static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) 1055 { 1056 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1057 struct ceph_msg_data *data; 1058 1059 BUG_ON(!length); 1060 BUG_ON(length > msg->data_length); 1061 BUG_ON(list_empty(&msg->data)); 1062 1063 cursor->data_head = &msg->data; 1064 cursor->total_resid = length; 1065 data = list_first_entry(&msg->data, struct ceph_msg_data, links); 1066 cursor->data = data; 1067 1068 __ceph_msg_data_cursor_init(cursor); 1069 } 1070 1071 /* 1072 * Return the page containing the next piece to process for a given 1073 * data item, and supply the page offset and length of that piece. 1074 * Indicate whether this is the last piece in this data item. 1075 */ 1076 static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, 1077 size_t *page_offset, size_t *length, 1078 bool *last_piece) 1079 { 1080 struct page *page; 1081 1082 switch (cursor->data->type) { 1083 case CEPH_MSG_DATA_PAGELIST: 1084 page = ceph_msg_data_pagelist_next(cursor, page_offset, length); 1085 break; 1086 case CEPH_MSG_DATA_PAGES: 1087 page = ceph_msg_data_pages_next(cursor, page_offset, length); 1088 break; 1089 #ifdef CONFIG_BLOCK 1090 case CEPH_MSG_DATA_BIO: 1091 page = ceph_msg_data_bio_next(cursor, page_offset, length); 1092 break; 1093 #endif /* CONFIG_BLOCK */ 1094 case CEPH_MSG_DATA_NONE: 1095 default: 1096 page = NULL; 1097 break; 1098 } 1099 BUG_ON(!page); 1100 BUG_ON(*page_offset + *length > PAGE_SIZE); 1101 BUG_ON(!*length); 1102 if (last_piece) 1103 *last_piece = cursor->last_piece; 1104 1105 return page; 1106 } 1107 1108 /* 1109 * Returns true if the result moves the cursor on to the next piece 1110 * of the data item. 1111 */ 1112 static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, 1113 size_t bytes) 1114 { 1115 bool new_piece; 1116 1117 BUG_ON(bytes > cursor->resid); 1118 switch (cursor->data->type) { 1119 case CEPH_MSG_DATA_PAGELIST: 1120 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes); 1121 break; 1122 case CEPH_MSG_DATA_PAGES: 1123 new_piece = ceph_msg_data_pages_advance(cursor, bytes); 1124 break; 1125 #ifdef CONFIG_BLOCK 1126 case CEPH_MSG_DATA_BIO: 1127 new_piece = ceph_msg_data_bio_advance(cursor, bytes); 1128 break; 1129 #endif /* CONFIG_BLOCK */ 1130 case CEPH_MSG_DATA_NONE: 1131 default: 1132 BUG(); 1133 break; 1134 } 1135 cursor->total_resid -= bytes; 1136 1137 if (!cursor->resid && cursor->total_resid) { 1138 WARN_ON(!cursor->last_piece); 1139 BUG_ON(list_is_last(&cursor->data->links, cursor->data_head)); 1140 cursor->data = list_entry_next(cursor->data, links); 1141 __ceph_msg_data_cursor_init(cursor); 1142 new_piece = true; 1143 } 1144 cursor->need_crc = new_piece; 1145 1146 return new_piece; 1147 } 1148 1149 static void prepare_message_data(struct ceph_msg *msg, u32 data_len) 1150 { 1151 BUG_ON(!msg); 1152 BUG_ON(!data_len); 1153 1154 /* Initialize data cursor */ 1155 1156 ceph_msg_data_cursor_init(msg, (size_t)data_len); 1157 } 1158 1159 /* 1160 * Prepare footer for currently outgoing message, and finish things 1161 * off. Assumes out_kvec* are already valid.. we just add on to the end. 1162 */ 1163 static void prepare_write_message_footer(struct ceph_connection *con) 1164 { 1165 struct ceph_msg *m = con->out_msg; 1166 int v = con->out_kvec_left; 1167 1168 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE; 1169 1170 dout("prepare_write_message_footer %p\n", con); 1171 con->out_kvec_is_msg = true; 1172 con->out_kvec[v].iov_base = &m->footer; 1173 con->out_kvec[v].iov_len = sizeof(m->footer); 1174 con->out_kvec_bytes += sizeof(m->footer); 1175 con->out_kvec_left++; 1176 con->out_more = m->more_to_follow; 1177 con->out_msg_done = true; 1178 } 1179 1180 /* 1181 * Prepare headers for the next outgoing message. 1182 */ 1183 static void prepare_write_message(struct ceph_connection *con) 1184 { 1185 struct ceph_msg *m; 1186 u32 crc; 1187 1188 con_out_kvec_reset(con); 1189 con->out_kvec_is_msg = true; 1190 con->out_msg_done = false; 1191 1192 /* Sneak an ack in there first? If we can get it into the same 1193 * TCP packet that's a good thing. */ 1194 if (con->in_seq > con->in_seq_acked) { 1195 con->in_seq_acked = con->in_seq; 1196 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1197 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1198 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1199 &con->out_temp_ack); 1200 } 1201 1202 BUG_ON(list_empty(&con->out_queue)); 1203 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); 1204 con->out_msg = m; 1205 BUG_ON(m->con != con); 1206 1207 /* put message on sent list */ 1208 ceph_msg_get(m); 1209 list_move_tail(&m->list_head, &con->out_sent); 1210 1211 /* 1212 * only assign outgoing seq # if we haven't sent this message 1213 * yet. if it is requeued, resend with it's original seq. 1214 */ 1215 if (m->needs_out_seq) { 1216 m->hdr.seq = cpu_to_le64(++con->out_seq); 1217 m->needs_out_seq = false; 1218 } 1219 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len)); 1220 1221 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n", 1222 m, con->out_seq, le16_to_cpu(m->hdr.type), 1223 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), 1224 m->data_length); 1225 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); 1226 1227 /* tag + hdr + front + middle */ 1228 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); 1229 con_out_kvec_add(con, sizeof (m->hdr), &m->hdr); 1230 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); 1231 1232 if (m->middle) 1233 con_out_kvec_add(con, m->middle->vec.iov_len, 1234 m->middle->vec.iov_base); 1235 1236 /* fill in crc (except data pages), footer */ 1237 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc)); 1238 con->out_msg->hdr.crc = cpu_to_le32(crc); 1239 con->out_msg->footer.flags = 0; 1240 1241 crc = crc32c(0, m->front.iov_base, m->front.iov_len); 1242 con->out_msg->footer.front_crc = cpu_to_le32(crc); 1243 if (m->middle) { 1244 crc = crc32c(0, m->middle->vec.iov_base, 1245 m->middle->vec.iov_len); 1246 con->out_msg->footer.middle_crc = cpu_to_le32(crc); 1247 } else 1248 con->out_msg->footer.middle_crc = 0; 1249 dout("%s front_crc %u middle_crc %u\n", __func__, 1250 le32_to_cpu(con->out_msg->footer.front_crc), 1251 le32_to_cpu(con->out_msg->footer.middle_crc)); 1252 1253 /* is there a data payload? */ 1254 con->out_msg->footer.data_crc = 0; 1255 if (m->data_length) { 1256 prepare_message_data(con->out_msg, m->data_length); 1257 con->out_more = 1; /* data + footer will follow */ 1258 } else { 1259 /* no, queue up footer too and be done */ 1260 prepare_write_message_footer(con); 1261 } 1262 1263 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1264 } 1265 1266 /* 1267 * Prepare an ack. 1268 */ 1269 static void prepare_write_ack(struct ceph_connection *con) 1270 { 1271 dout("prepare_write_ack %p %llu -> %llu\n", con, 1272 con->in_seq_acked, con->in_seq); 1273 con->in_seq_acked = con->in_seq; 1274 1275 con_out_kvec_reset(con); 1276 1277 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1278 1279 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1280 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1281 &con->out_temp_ack); 1282 1283 con->out_more = 1; /* more will follow.. eventually.. */ 1284 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1285 } 1286 1287 /* 1288 * Prepare to share the seq during handshake 1289 */ 1290 static void prepare_write_seq(struct ceph_connection *con) 1291 { 1292 dout("prepare_write_seq %p %llu -> %llu\n", con, 1293 con->in_seq_acked, con->in_seq); 1294 con->in_seq_acked = con->in_seq; 1295 1296 con_out_kvec_reset(con); 1297 1298 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1299 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1300 &con->out_temp_ack); 1301 1302 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1303 } 1304 1305 /* 1306 * Prepare to write keepalive byte. 1307 */ 1308 static void prepare_write_keepalive(struct ceph_connection *con) 1309 { 1310 dout("prepare_write_keepalive %p\n", con); 1311 con_out_kvec_reset(con); 1312 con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive); 1313 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1314 } 1315 1316 /* 1317 * Connection negotiation. 1318 */ 1319 1320 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con, 1321 int *auth_proto) 1322 { 1323 struct ceph_auth_handshake *auth; 1324 1325 if (!con->ops->get_authorizer) { 1326 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN; 1327 con->out_connect.authorizer_len = 0; 1328 return NULL; 1329 } 1330 1331 /* Can't hold the mutex while getting authorizer */ 1332 mutex_unlock(&con->mutex); 1333 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry); 1334 mutex_lock(&con->mutex); 1335 1336 if (IS_ERR(auth)) 1337 return auth; 1338 if (con->state != CON_STATE_NEGOTIATING) 1339 return ERR_PTR(-EAGAIN); 1340 1341 con->auth_reply_buf = auth->authorizer_reply_buf; 1342 con->auth_reply_buf_len = auth->authorizer_reply_buf_len; 1343 return auth; 1344 } 1345 1346 /* 1347 * We connected to a peer and are saying hello. 1348 */ 1349 static void prepare_write_banner(struct ceph_connection *con) 1350 { 1351 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER); 1352 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr), 1353 &con->msgr->my_enc_addr); 1354 1355 con->out_more = 0; 1356 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1357 } 1358 1359 static int prepare_write_connect(struct ceph_connection *con) 1360 { 1361 unsigned int global_seq = get_global_seq(con->msgr, 0); 1362 int proto; 1363 int auth_proto; 1364 struct ceph_auth_handshake *auth; 1365 1366 switch (con->peer_name.type) { 1367 case CEPH_ENTITY_TYPE_MON: 1368 proto = CEPH_MONC_PROTOCOL; 1369 break; 1370 case CEPH_ENTITY_TYPE_OSD: 1371 proto = CEPH_OSDC_PROTOCOL; 1372 break; 1373 case CEPH_ENTITY_TYPE_MDS: 1374 proto = CEPH_MDSC_PROTOCOL; 1375 break; 1376 default: 1377 BUG(); 1378 } 1379 1380 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, 1381 con->connect_seq, global_seq, proto); 1382 1383 con->out_connect.features = cpu_to_le64(con->msgr->supported_features); 1384 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); 1385 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); 1386 con->out_connect.global_seq = cpu_to_le32(global_seq); 1387 con->out_connect.protocol_version = cpu_to_le32(proto); 1388 con->out_connect.flags = 0; 1389 1390 auth_proto = CEPH_AUTH_UNKNOWN; 1391 auth = get_connect_authorizer(con, &auth_proto); 1392 if (IS_ERR(auth)) 1393 return PTR_ERR(auth); 1394 1395 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto); 1396 con->out_connect.authorizer_len = auth ? 1397 cpu_to_le32(auth->authorizer_buf_len) : 0; 1398 1399 con_out_kvec_add(con, sizeof (con->out_connect), 1400 &con->out_connect); 1401 if (auth && auth->authorizer_buf_len) 1402 con_out_kvec_add(con, auth->authorizer_buf_len, 1403 auth->authorizer_buf); 1404 1405 con->out_more = 0; 1406 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1407 1408 return 0; 1409 } 1410 1411 /* 1412 * write as much of pending kvecs to the socket as we can. 1413 * 1 -> done 1414 * 0 -> socket full, but more to do 1415 * <0 -> error 1416 */ 1417 static int write_partial_kvec(struct ceph_connection *con) 1418 { 1419 int ret; 1420 1421 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes); 1422 while (con->out_kvec_bytes > 0) { 1423 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur, 1424 con->out_kvec_left, con->out_kvec_bytes, 1425 con->out_more); 1426 if (ret <= 0) 1427 goto out; 1428 con->out_kvec_bytes -= ret; 1429 if (con->out_kvec_bytes == 0) 1430 break; /* done */ 1431 1432 /* account for full iov entries consumed */ 1433 while (ret >= con->out_kvec_cur->iov_len) { 1434 BUG_ON(!con->out_kvec_left); 1435 ret -= con->out_kvec_cur->iov_len; 1436 con->out_kvec_cur++; 1437 con->out_kvec_left--; 1438 } 1439 /* and for a partially-consumed entry */ 1440 if (ret) { 1441 con->out_kvec_cur->iov_len -= ret; 1442 con->out_kvec_cur->iov_base += ret; 1443 } 1444 } 1445 con->out_kvec_left = 0; 1446 con->out_kvec_is_msg = false; 1447 ret = 1; 1448 out: 1449 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con, 1450 con->out_kvec_bytes, con->out_kvec_left, ret); 1451 return ret; /* done! */ 1452 } 1453 1454 static u32 ceph_crc32c_page(u32 crc, struct page *page, 1455 unsigned int page_offset, 1456 unsigned int length) 1457 { 1458 char *kaddr; 1459 1460 kaddr = kmap(page); 1461 BUG_ON(kaddr == NULL); 1462 crc = crc32c(crc, kaddr + page_offset, length); 1463 kunmap(page); 1464 1465 return crc; 1466 } 1467 /* 1468 * Write as much message data payload as we can. If we finish, queue 1469 * up the footer. 1470 * 1 -> done, footer is now queued in out_kvec[]. 1471 * 0 -> socket full, but more to do 1472 * <0 -> error 1473 */ 1474 static int write_partial_message_data(struct ceph_connection *con) 1475 { 1476 struct ceph_msg *msg = con->out_msg; 1477 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1478 bool do_datacrc = !con->msgr->nocrc; 1479 u32 crc; 1480 1481 dout("%s %p msg %p\n", __func__, con, msg); 1482 1483 if (list_empty(&msg->data)) 1484 return -EINVAL; 1485 1486 /* 1487 * Iterate through each page that contains data to be 1488 * written, and send as much as possible for each. 1489 * 1490 * If we are calculating the data crc (the default), we will 1491 * need to map the page. If we have no pages, they have 1492 * been revoked, so use the zero page. 1493 */ 1494 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0; 1495 while (cursor->resid) { 1496 struct page *page; 1497 size_t page_offset; 1498 size_t length; 1499 bool last_piece; 1500 bool need_crc; 1501 int ret; 1502 1503 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, 1504 &last_piece); 1505 ret = ceph_tcp_sendpage(con->sock, page, page_offset, 1506 length, last_piece); 1507 if (ret <= 0) { 1508 if (do_datacrc) 1509 msg->footer.data_crc = cpu_to_le32(crc); 1510 1511 return ret; 1512 } 1513 if (do_datacrc && cursor->need_crc) 1514 crc = ceph_crc32c_page(crc, page, page_offset, length); 1515 need_crc = ceph_msg_data_advance(&msg->cursor, (size_t)ret); 1516 } 1517 1518 dout("%s %p msg %p done\n", __func__, con, msg); 1519 1520 /* prepare and queue up footer, too */ 1521 if (do_datacrc) 1522 msg->footer.data_crc = cpu_to_le32(crc); 1523 else 1524 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; 1525 con_out_kvec_reset(con); 1526 prepare_write_message_footer(con); 1527 1528 return 1; /* must return > 0 to indicate success */ 1529 } 1530 1531 /* 1532 * write some zeros 1533 */ 1534 static int write_partial_skip(struct ceph_connection *con) 1535 { 1536 int ret; 1537 1538 while (con->out_skip > 0) { 1539 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE); 1540 1541 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true); 1542 if (ret <= 0) 1543 goto out; 1544 con->out_skip -= ret; 1545 } 1546 ret = 1; 1547 out: 1548 return ret; 1549 } 1550 1551 /* 1552 * Prepare to read connection handshake, or an ack. 1553 */ 1554 static void prepare_read_banner(struct ceph_connection *con) 1555 { 1556 dout("prepare_read_banner %p\n", con); 1557 con->in_base_pos = 0; 1558 } 1559 1560 static void prepare_read_connect(struct ceph_connection *con) 1561 { 1562 dout("prepare_read_connect %p\n", con); 1563 con->in_base_pos = 0; 1564 } 1565 1566 static void prepare_read_ack(struct ceph_connection *con) 1567 { 1568 dout("prepare_read_ack %p\n", con); 1569 con->in_base_pos = 0; 1570 } 1571 1572 static void prepare_read_seq(struct ceph_connection *con) 1573 { 1574 dout("prepare_read_seq %p\n", con); 1575 con->in_base_pos = 0; 1576 con->in_tag = CEPH_MSGR_TAG_SEQ; 1577 } 1578 1579 static void prepare_read_tag(struct ceph_connection *con) 1580 { 1581 dout("prepare_read_tag %p\n", con); 1582 con->in_base_pos = 0; 1583 con->in_tag = CEPH_MSGR_TAG_READY; 1584 } 1585 1586 /* 1587 * Prepare to read a message. 1588 */ 1589 static int prepare_read_message(struct ceph_connection *con) 1590 { 1591 dout("prepare_read_message %p\n", con); 1592 BUG_ON(con->in_msg != NULL); 1593 con->in_base_pos = 0; 1594 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0; 1595 return 0; 1596 } 1597 1598 1599 static int read_partial(struct ceph_connection *con, 1600 int end, int size, void *object) 1601 { 1602 while (con->in_base_pos < end) { 1603 int left = end - con->in_base_pos; 1604 int have = size - left; 1605 int ret = ceph_tcp_recvmsg(con->sock, object + have, left); 1606 if (ret <= 0) 1607 return ret; 1608 con->in_base_pos += ret; 1609 } 1610 return 1; 1611 } 1612 1613 1614 /* 1615 * Read all or part of the connect-side handshake on a new connection 1616 */ 1617 static int read_partial_banner(struct ceph_connection *con) 1618 { 1619 int size; 1620 int end; 1621 int ret; 1622 1623 dout("read_partial_banner %p at %d\n", con, con->in_base_pos); 1624 1625 /* peer's banner */ 1626 size = strlen(CEPH_BANNER); 1627 end = size; 1628 ret = read_partial(con, end, size, con->in_banner); 1629 if (ret <= 0) 1630 goto out; 1631 1632 size = sizeof (con->actual_peer_addr); 1633 end += size; 1634 ret = read_partial(con, end, size, &con->actual_peer_addr); 1635 if (ret <= 0) 1636 goto out; 1637 1638 size = sizeof (con->peer_addr_for_me); 1639 end += size; 1640 ret = read_partial(con, end, size, &con->peer_addr_for_me); 1641 if (ret <= 0) 1642 goto out; 1643 1644 out: 1645 return ret; 1646 } 1647 1648 static int read_partial_connect(struct ceph_connection *con) 1649 { 1650 int size; 1651 int end; 1652 int ret; 1653 1654 dout("read_partial_connect %p at %d\n", con, con->in_base_pos); 1655 1656 size = sizeof (con->in_reply); 1657 end = size; 1658 ret = read_partial(con, end, size, &con->in_reply); 1659 if (ret <= 0) 1660 goto out; 1661 1662 size = le32_to_cpu(con->in_reply.authorizer_len); 1663 end += size; 1664 ret = read_partial(con, end, size, con->auth_reply_buf); 1665 if (ret <= 0) 1666 goto out; 1667 1668 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n", 1669 con, (int)con->in_reply.tag, 1670 le32_to_cpu(con->in_reply.connect_seq), 1671 le32_to_cpu(con->in_reply.global_seq)); 1672 out: 1673 return ret; 1674 1675 } 1676 1677 /* 1678 * Verify the hello banner looks okay. 1679 */ 1680 static int verify_hello(struct ceph_connection *con) 1681 { 1682 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { 1683 pr_err("connect to %s got bad banner\n", 1684 ceph_pr_addr(&con->peer_addr.in_addr)); 1685 con->error_msg = "protocol error, bad banner"; 1686 return -1; 1687 } 1688 return 0; 1689 } 1690 1691 static bool addr_is_blank(struct sockaddr_storage *ss) 1692 { 1693 switch (ss->ss_family) { 1694 case AF_INET: 1695 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0; 1696 case AF_INET6: 1697 return 1698 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 && 1699 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 && 1700 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 && 1701 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0; 1702 } 1703 return false; 1704 } 1705 1706 static int addr_port(struct sockaddr_storage *ss) 1707 { 1708 switch (ss->ss_family) { 1709 case AF_INET: 1710 return ntohs(((struct sockaddr_in *)ss)->sin_port); 1711 case AF_INET6: 1712 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port); 1713 } 1714 return 0; 1715 } 1716 1717 static void addr_set_port(struct sockaddr_storage *ss, int p) 1718 { 1719 switch (ss->ss_family) { 1720 case AF_INET: 1721 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1722 break; 1723 case AF_INET6: 1724 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1725 break; 1726 } 1727 } 1728 1729 /* 1730 * Unlike other *_pton function semantics, zero indicates success. 1731 */ 1732 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss, 1733 char delim, const char **ipend) 1734 { 1735 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 1736 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 1737 1738 memset(ss, 0, sizeof(*ss)); 1739 1740 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) { 1741 ss->ss_family = AF_INET; 1742 return 0; 1743 } 1744 1745 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) { 1746 ss->ss_family = AF_INET6; 1747 return 0; 1748 } 1749 1750 return -EINVAL; 1751 } 1752 1753 /* 1754 * Extract hostname string and resolve using kernel DNS facility. 1755 */ 1756 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER 1757 static int ceph_dns_resolve_name(const char *name, size_t namelen, 1758 struct sockaddr_storage *ss, char delim, const char **ipend) 1759 { 1760 const char *end, *delim_p; 1761 char *colon_p, *ip_addr = NULL; 1762 int ip_len, ret; 1763 1764 /* 1765 * The end of the hostname occurs immediately preceding the delimiter or 1766 * the port marker (':') where the delimiter takes precedence. 1767 */ 1768 delim_p = memchr(name, delim, namelen); 1769 colon_p = memchr(name, ':', namelen); 1770 1771 if (delim_p && colon_p) 1772 end = delim_p < colon_p ? delim_p : colon_p; 1773 else if (!delim_p && colon_p) 1774 end = colon_p; 1775 else { 1776 end = delim_p; 1777 if (!end) /* case: hostname:/ */ 1778 end = name + namelen; 1779 } 1780 1781 if (end <= name) 1782 return -EINVAL; 1783 1784 /* do dns_resolve upcall */ 1785 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL); 1786 if (ip_len > 0) 1787 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL); 1788 else 1789 ret = -ESRCH; 1790 1791 kfree(ip_addr); 1792 1793 *ipend = end; 1794 1795 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name, 1796 ret, ret ? "failed" : ceph_pr_addr(ss)); 1797 1798 return ret; 1799 } 1800 #else 1801 static inline int ceph_dns_resolve_name(const char *name, size_t namelen, 1802 struct sockaddr_storage *ss, char delim, const char **ipend) 1803 { 1804 return -EINVAL; 1805 } 1806 #endif 1807 1808 /* 1809 * Parse a server name (IP or hostname). If a valid IP address is not found 1810 * then try to extract a hostname to resolve using userspace DNS upcall. 1811 */ 1812 static int ceph_parse_server_name(const char *name, size_t namelen, 1813 struct sockaddr_storage *ss, char delim, const char **ipend) 1814 { 1815 int ret; 1816 1817 ret = ceph_pton(name, namelen, ss, delim, ipend); 1818 if (ret) 1819 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend); 1820 1821 return ret; 1822 } 1823 1824 /* 1825 * Parse an ip[:port] list into an addr array. Use the default 1826 * monitor port if a port isn't specified. 1827 */ 1828 int ceph_parse_ips(const char *c, const char *end, 1829 struct ceph_entity_addr *addr, 1830 int max_count, int *count) 1831 { 1832 int i, ret = -EINVAL; 1833 const char *p = c; 1834 1835 dout("parse_ips on '%.*s'\n", (int)(end-c), c); 1836 for (i = 0; i < max_count; i++) { 1837 const char *ipend; 1838 struct sockaddr_storage *ss = &addr[i].in_addr; 1839 int port; 1840 char delim = ','; 1841 1842 if (*p == '[') { 1843 delim = ']'; 1844 p++; 1845 } 1846 1847 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend); 1848 if (ret) 1849 goto bad; 1850 ret = -EINVAL; 1851 1852 p = ipend; 1853 1854 if (delim == ']') { 1855 if (*p != ']') { 1856 dout("missing matching ']'\n"); 1857 goto bad; 1858 } 1859 p++; 1860 } 1861 1862 /* port? */ 1863 if (p < end && *p == ':') { 1864 port = 0; 1865 p++; 1866 while (p < end && *p >= '0' && *p <= '9') { 1867 port = (port * 10) + (*p - '0'); 1868 p++; 1869 } 1870 if (port == 0) 1871 port = CEPH_MON_PORT; 1872 else if (port > 65535) 1873 goto bad; 1874 } else { 1875 port = CEPH_MON_PORT; 1876 } 1877 1878 addr_set_port(ss, port); 1879 1880 dout("parse_ips got %s\n", ceph_pr_addr(ss)); 1881 1882 if (p == end) 1883 break; 1884 if (*p != ',') 1885 goto bad; 1886 p++; 1887 } 1888 1889 if (p != end) 1890 goto bad; 1891 1892 if (count) 1893 *count = i + 1; 1894 return 0; 1895 1896 bad: 1897 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 1898 return ret; 1899 } 1900 EXPORT_SYMBOL(ceph_parse_ips); 1901 1902 static int process_banner(struct ceph_connection *con) 1903 { 1904 dout("process_banner on %p\n", con); 1905 1906 if (verify_hello(con) < 0) 1907 return -1; 1908 1909 ceph_decode_addr(&con->actual_peer_addr); 1910 ceph_decode_addr(&con->peer_addr_for_me); 1911 1912 /* 1913 * Make sure the other end is who we wanted. note that the other 1914 * end may not yet know their ip address, so if it's 0.0.0.0, give 1915 * them the benefit of the doubt. 1916 */ 1917 if (memcmp(&con->peer_addr, &con->actual_peer_addr, 1918 sizeof(con->peer_addr)) != 0 && 1919 !(addr_is_blank(&con->actual_peer_addr.in_addr) && 1920 con->actual_peer_addr.nonce == con->peer_addr.nonce)) { 1921 pr_warning("wrong peer, want %s/%d, got %s/%d\n", 1922 ceph_pr_addr(&con->peer_addr.in_addr), 1923 (int)le32_to_cpu(con->peer_addr.nonce), 1924 ceph_pr_addr(&con->actual_peer_addr.in_addr), 1925 (int)le32_to_cpu(con->actual_peer_addr.nonce)); 1926 con->error_msg = "wrong peer at address"; 1927 return -1; 1928 } 1929 1930 /* 1931 * did we learn our address? 1932 */ 1933 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) { 1934 int port = addr_port(&con->msgr->inst.addr.in_addr); 1935 1936 memcpy(&con->msgr->inst.addr.in_addr, 1937 &con->peer_addr_for_me.in_addr, 1938 sizeof(con->peer_addr_for_me.in_addr)); 1939 addr_set_port(&con->msgr->inst.addr.in_addr, port); 1940 encode_my_addr(con->msgr); 1941 dout("process_banner learned my addr is %s\n", 1942 ceph_pr_addr(&con->msgr->inst.addr.in_addr)); 1943 } 1944 1945 return 0; 1946 } 1947 1948 static int process_connect(struct ceph_connection *con) 1949 { 1950 u64 sup_feat = con->msgr->supported_features; 1951 u64 req_feat = con->msgr->required_features; 1952 u64 server_feat = ceph_sanitize_features( 1953 le64_to_cpu(con->in_reply.features)); 1954 int ret; 1955 1956 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 1957 1958 switch (con->in_reply.tag) { 1959 case CEPH_MSGR_TAG_FEATURES: 1960 pr_err("%s%lld %s feature set mismatch," 1961 " my %llx < server's %llx, missing %llx\n", 1962 ENTITY_NAME(con->peer_name), 1963 ceph_pr_addr(&con->peer_addr.in_addr), 1964 sup_feat, server_feat, server_feat & ~sup_feat); 1965 con->error_msg = "missing required protocol features"; 1966 reset_connection(con); 1967 return -1; 1968 1969 case CEPH_MSGR_TAG_BADPROTOVER: 1970 pr_err("%s%lld %s protocol version mismatch," 1971 " my %d != server's %d\n", 1972 ENTITY_NAME(con->peer_name), 1973 ceph_pr_addr(&con->peer_addr.in_addr), 1974 le32_to_cpu(con->out_connect.protocol_version), 1975 le32_to_cpu(con->in_reply.protocol_version)); 1976 con->error_msg = "protocol version mismatch"; 1977 reset_connection(con); 1978 return -1; 1979 1980 case CEPH_MSGR_TAG_BADAUTHORIZER: 1981 con->auth_retry++; 1982 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con, 1983 con->auth_retry); 1984 if (con->auth_retry == 2) { 1985 con->error_msg = "connect authorization failure"; 1986 return -1; 1987 } 1988 con_out_kvec_reset(con); 1989 ret = prepare_write_connect(con); 1990 if (ret < 0) 1991 return ret; 1992 prepare_read_connect(con); 1993 break; 1994 1995 case CEPH_MSGR_TAG_RESETSESSION: 1996 /* 1997 * If we connected with a large connect_seq but the peer 1998 * has no record of a session with us (no connection, or 1999 * connect_seq == 0), they will send RESETSESION to indicate 2000 * that they must have reset their session, and may have 2001 * dropped messages. 2002 */ 2003 dout("process_connect got RESET peer seq %u\n", 2004 le32_to_cpu(con->in_reply.connect_seq)); 2005 pr_err("%s%lld %s connection reset\n", 2006 ENTITY_NAME(con->peer_name), 2007 ceph_pr_addr(&con->peer_addr.in_addr)); 2008 reset_connection(con); 2009 con_out_kvec_reset(con); 2010 ret = prepare_write_connect(con); 2011 if (ret < 0) 2012 return ret; 2013 prepare_read_connect(con); 2014 2015 /* Tell ceph about it. */ 2016 mutex_unlock(&con->mutex); 2017 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name)); 2018 if (con->ops->peer_reset) 2019 con->ops->peer_reset(con); 2020 mutex_lock(&con->mutex); 2021 if (con->state != CON_STATE_NEGOTIATING) 2022 return -EAGAIN; 2023 break; 2024 2025 case CEPH_MSGR_TAG_RETRY_SESSION: 2026 /* 2027 * If we sent a smaller connect_seq than the peer has, try 2028 * again with a larger value. 2029 */ 2030 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n", 2031 le32_to_cpu(con->out_connect.connect_seq), 2032 le32_to_cpu(con->in_reply.connect_seq)); 2033 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); 2034 con_out_kvec_reset(con); 2035 ret = prepare_write_connect(con); 2036 if (ret < 0) 2037 return ret; 2038 prepare_read_connect(con); 2039 break; 2040 2041 case CEPH_MSGR_TAG_RETRY_GLOBAL: 2042 /* 2043 * If we sent a smaller global_seq than the peer has, try 2044 * again with a larger value. 2045 */ 2046 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", 2047 con->peer_global_seq, 2048 le32_to_cpu(con->in_reply.global_seq)); 2049 get_global_seq(con->msgr, 2050 le32_to_cpu(con->in_reply.global_seq)); 2051 con_out_kvec_reset(con); 2052 ret = prepare_write_connect(con); 2053 if (ret < 0) 2054 return ret; 2055 prepare_read_connect(con); 2056 break; 2057 2058 case CEPH_MSGR_TAG_SEQ: 2059 case CEPH_MSGR_TAG_READY: 2060 if (req_feat & ~server_feat) { 2061 pr_err("%s%lld %s protocol feature mismatch," 2062 " my required %llx > server's %llx, need %llx\n", 2063 ENTITY_NAME(con->peer_name), 2064 ceph_pr_addr(&con->peer_addr.in_addr), 2065 req_feat, server_feat, req_feat & ~server_feat); 2066 con->error_msg = "missing required protocol features"; 2067 reset_connection(con); 2068 return -1; 2069 } 2070 2071 WARN_ON(con->state != CON_STATE_NEGOTIATING); 2072 con->state = CON_STATE_OPEN; 2073 con->auth_retry = 0; /* we authenticated; clear flag */ 2074 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 2075 con->connect_seq++; 2076 con->peer_features = server_feat; 2077 dout("process_connect got READY gseq %d cseq %d (%d)\n", 2078 con->peer_global_seq, 2079 le32_to_cpu(con->in_reply.connect_seq), 2080 con->connect_seq); 2081 WARN_ON(con->connect_seq != 2082 le32_to_cpu(con->in_reply.connect_seq)); 2083 2084 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) 2085 con_flag_set(con, CON_FLAG_LOSSYTX); 2086 2087 con->delay = 0; /* reset backoff memory */ 2088 2089 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) { 2090 prepare_write_seq(con); 2091 prepare_read_seq(con); 2092 } else { 2093 prepare_read_tag(con); 2094 } 2095 break; 2096 2097 case CEPH_MSGR_TAG_WAIT: 2098 /* 2099 * If there is a connection race (we are opening 2100 * connections to each other), one of us may just have 2101 * to WAIT. This shouldn't happen if we are the 2102 * client. 2103 */ 2104 pr_err("process_connect got WAIT as client\n"); 2105 con->error_msg = "protocol error, got WAIT as client"; 2106 return -1; 2107 2108 default: 2109 pr_err("connect protocol error, will retry\n"); 2110 con->error_msg = "protocol error, garbage tag during connect"; 2111 return -1; 2112 } 2113 return 0; 2114 } 2115 2116 2117 /* 2118 * read (part of) an ack 2119 */ 2120 static int read_partial_ack(struct ceph_connection *con) 2121 { 2122 int size = sizeof (con->in_temp_ack); 2123 int end = size; 2124 2125 return read_partial(con, end, size, &con->in_temp_ack); 2126 } 2127 2128 /* 2129 * We can finally discard anything that's been acked. 2130 */ 2131 static void process_ack(struct ceph_connection *con) 2132 { 2133 struct ceph_msg *m; 2134 u64 ack = le64_to_cpu(con->in_temp_ack); 2135 u64 seq; 2136 2137 while (!list_empty(&con->out_sent)) { 2138 m = list_first_entry(&con->out_sent, struct ceph_msg, 2139 list_head); 2140 seq = le64_to_cpu(m->hdr.seq); 2141 if (seq > ack) 2142 break; 2143 dout("got ack for seq %llu type %d at %p\n", seq, 2144 le16_to_cpu(m->hdr.type), m); 2145 m->ack_stamp = jiffies; 2146 ceph_msg_remove(m); 2147 } 2148 prepare_read_tag(con); 2149 } 2150 2151 2152 static int read_partial_message_section(struct ceph_connection *con, 2153 struct kvec *section, 2154 unsigned int sec_len, u32 *crc) 2155 { 2156 int ret, left; 2157 2158 BUG_ON(!section); 2159 2160 while (section->iov_len < sec_len) { 2161 BUG_ON(section->iov_base == NULL); 2162 left = sec_len - section->iov_len; 2163 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base + 2164 section->iov_len, left); 2165 if (ret <= 0) 2166 return ret; 2167 section->iov_len += ret; 2168 } 2169 if (section->iov_len == sec_len) 2170 *crc = crc32c(0, section->iov_base, section->iov_len); 2171 2172 return 1; 2173 } 2174 2175 static int read_partial_msg_data(struct ceph_connection *con) 2176 { 2177 struct ceph_msg *msg = con->in_msg; 2178 struct ceph_msg_data_cursor *cursor = &msg->cursor; 2179 const bool do_datacrc = !con->msgr->nocrc; 2180 struct page *page; 2181 size_t page_offset; 2182 size_t length; 2183 u32 crc = 0; 2184 int ret; 2185 2186 BUG_ON(!msg); 2187 if (list_empty(&msg->data)) 2188 return -EIO; 2189 2190 if (do_datacrc) 2191 crc = con->in_data_crc; 2192 while (cursor->resid) { 2193 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, 2194 NULL); 2195 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); 2196 if (ret <= 0) { 2197 if (do_datacrc) 2198 con->in_data_crc = crc; 2199 2200 return ret; 2201 } 2202 2203 if (do_datacrc) 2204 crc = ceph_crc32c_page(crc, page, page_offset, ret); 2205 (void) ceph_msg_data_advance(&msg->cursor, (size_t)ret); 2206 } 2207 if (do_datacrc) 2208 con->in_data_crc = crc; 2209 2210 return 1; /* must return > 0 to indicate success */ 2211 } 2212 2213 /* 2214 * read (part of) a message. 2215 */ 2216 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); 2217 2218 static int read_partial_message(struct ceph_connection *con) 2219 { 2220 struct ceph_msg *m = con->in_msg; 2221 int size; 2222 int end; 2223 int ret; 2224 unsigned int front_len, middle_len, data_len; 2225 bool do_datacrc = !con->msgr->nocrc; 2226 u64 seq; 2227 u32 crc; 2228 2229 dout("read_partial_message con %p msg %p\n", con, m); 2230 2231 /* header */ 2232 size = sizeof (con->in_hdr); 2233 end = size; 2234 ret = read_partial(con, end, size, &con->in_hdr); 2235 if (ret <= 0) 2236 return ret; 2237 2238 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc)); 2239 if (cpu_to_le32(crc) != con->in_hdr.crc) { 2240 pr_err("read_partial_message bad hdr " 2241 " crc %u != expected %u\n", 2242 crc, con->in_hdr.crc); 2243 return -EBADMSG; 2244 } 2245 2246 front_len = le32_to_cpu(con->in_hdr.front_len); 2247 if (front_len > CEPH_MSG_MAX_FRONT_LEN) 2248 return -EIO; 2249 middle_len = le32_to_cpu(con->in_hdr.middle_len); 2250 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN) 2251 return -EIO; 2252 data_len = le32_to_cpu(con->in_hdr.data_len); 2253 if (data_len > CEPH_MSG_MAX_DATA_LEN) 2254 return -EIO; 2255 2256 /* verify seq# */ 2257 seq = le64_to_cpu(con->in_hdr.seq); 2258 if ((s64)seq - (s64)con->in_seq < 1) { 2259 pr_info("skipping %s%lld %s seq %lld expected %lld\n", 2260 ENTITY_NAME(con->peer_name), 2261 ceph_pr_addr(&con->peer_addr.in_addr), 2262 seq, con->in_seq + 1); 2263 con->in_base_pos = -front_len - middle_len - data_len - 2264 sizeof(m->footer); 2265 con->in_tag = CEPH_MSGR_TAG_READY; 2266 return 0; 2267 } else if ((s64)seq - (s64)con->in_seq > 1) { 2268 pr_err("read_partial_message bad seq %lld expected %lld\n", 2269 seq, con->in_seq + 1); 2270 con->error_msg = "bad message sequence # for incoming message"; 2271 return -EBADMSG; 2272 } 2273 2274 /* allocate message? */ 2275 if (!con->in_msg) { 2276 int skip = 0; 2277 2278 dout("got hdr type %d front %d data %d\n", con->in_hdr.type, 2279 front_len, data_len); 2280 ret = ceph_con_in_msg_alloc(con, &skip); 2281 if (ret < 0) 2282 return ret; 2283 2284 BUG_ON(!con->in_msg ^ skip); 2285 if (con->in_msg && data_len > con->in_msg->data_length) { 2286 pr_warning("%s skipping long message (%u > %zd)\n", 2287 __func__, data_len, con->in_msg->data_length); 2288 ceph_msg_put(con->in_msg); 2289 con->in_msg = NULL; 2290 skip = 1; 2291 } 2292 if (skip) { 2293 /* skip this message */ 2294 dout("alloc_msg said skip message\n"); 2295 con->in_base_pos = -front_len - middle_len - data_len - 2296 sizeof(m->footer); 2297 con->in_tag = CEPH_MSGR_TAG_READY; 2298 con->in_seq++; 2299 return 0; 2300 } 2301 2302 BUG_ON(!con->in_msg); 2303 BUG_ON(con->in_msg->con != con); 2304 m = con->in_msg; 2305 m->front.iov_len = 0; /* haven't read it yet */ 2306 if (m->middle) 2307 m->middle->vec.iov_len = 0; 2308 2309 /* prepare for data payload, if any */ 2310 2311 if (data_len) 2312 prepare_message_data(con->in_msg, data_len); 2313 } 2314 2315 /* front */ 2316 ret = read_partial_message_section(con, &m->front, front_len, 2317 &con->in_front_crc); 2318 if (ret <= 0) 2319 return ret; 2320 2321 /* middle */ 2322 if (m->middle) { 2323 ret = read_partial_message_section(con, &m->middle->vec, 2324 middle_len, 2325 &con->in_middle_crc); 2326 if (ret <= 0) 2327 return ret; 2328 } 2329 2330 /* (page) data */ 2331 if (data_len) { 2332 ret = read_partial_msg_data(con); 2333 if (ret <= 0) 2334 return ret; 2335 } 2336 2337 /* footer */ 2338 size = sizeof (m->footer); 2339 end += size; 2340 ret = read_partial(con, end, size, &m->footer); 2341 if (ret <= 0) 2342 return ret; 2343 2344 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", 2345 m, front_len, m->footer.front_crc, middle_len, 2346 m->footer.middle_crc, data_len, m->footer.data_crc); 2347 2348 /* crc ok? */ 2349 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) { 2350 pr_err("read_partial_message %p front crc %u != exp. %u\n", 2351 m, con->in_front_crc, m->footer.front_crc); 2352 return -EBADMSG; 2353 } 2354 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) { 2355 pr_err("read_partial_message %p middle crc %u != exp %u\n", 2356 m, con->in_middle_crc, m->footer.middle_crc); 2357 return -EBADMSG; 2358 } 2359 if (do_datacrc && 2360 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 && 2361 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) { 2362 pr_err("read_partial_message %p data crc %u != exp. %u\n", m, 2363 con->in_data_crc, le32_to_cpu(m->footer.data_crc)); 2364 return -EBADMSG; 2365 } 2366 2367 return 1; /* done! */ 2368 } 2369 2370 /* 2371 * Process message. This happens in the worker thread. The callback should 2372 * be careful not to do anything that waits on other incoming messages or it 2373 * may deadlock. 2374 */ 2375 static void process_message(struct ceph_connection *con) 2376 { 2377 struct ceph_msg *msg; 2378 2379 BUG_ON(con->in_msg->con != con); 2380 con->in_msg->con = NULL; 2381 msg = con->in_msg; 2382 con->in_msg = NULL; 2383 con->ops->put(con); 2384 2385 /* if first message, set peer_name */ 2386 if (con->peer_name.type == 0) 2387 con->peer_name = msg->hdr.src; 2388 2389 con->in_seq++; 2390 mutex_unlock(&con->mutex); 2391 2392 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n", 2393 msg, le64_to_cpu(msg->hdr.seq), 2394 ENTITY_NAME(msg->hdr.src), 2395 le16_to_cpu(msg->hdr.type), 2396 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2397 le32_to_cpu(msg->hdr.front_len), 2398 le32_to_cpu(msg->hdr.data_len), 2399 con->in_front_crc, con->in_middle_crc, con->in_data_crc); 2400 con->ops->dispatch(con, msg); 2401 2402 mutex_lock(&con->mutex); 2403 } 2404 2405 2406 /* 2407 * Write something to the socket. Called in a worker thread when the 2408 * socket appears to be writeable and we have something ready to send. 2409 */ 2410 static int try_write(struct ceph_connection *con) 2411 { 2412 int ret = 1; 2413 2414 dout("try_write start %p state %lu\n", con, con->state); 2415 2416 more: 2417 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 2418 2419 /* open the socket first? */ 2420 if (con->state == CON_STATE_PREOPEN) { 2421 BUG_ON(con->sock); 2422 con->state = CON_STATE_CONNECTING; 2423 2424 con_out_kvec_reset(con); 2425 prepare_write_banner(con); 2426 prepare_read_banner(con); 2427 2428 BUG_ON(con->in_msg); 2429 con->in_tag = CEPH_MSGR_TAG_READY; 2430 dout("try_write initiating connect on %p new state %lu\n", 2431 con, con->state); 2432 ret = ceph_tcp_connect(con); 2433 if (ret < 0) { 2434 con->error_msg = "connect error"; 2435 goto out; 2436 } 2437 } 2438 2439 more_kvec: 2440 /* kvec data queued? */ 2441 if (con->out_skip) { 2442 ret = write_partial_skip(con); 2443 if (ret <= 0) 2444 goto out; 2445 } 2446 if (con->out_kvec_left) { 2447 ret = write_partial_kvec(con); 2448 if (ret <= 0) 2449 goto out; 2450 } 2451 2452 /* msg pages? */ 2453 if (con->out_msg) { 2454 if (con->out_msg_done) { 2455 ceph_msg_put(con->out_msg); 2456 con->out_msg = NULL; /* we're done with this one */ 2457 goto do_next; 2458 } 2459 2460 ret = write_partial_message_data(con); 2461 if (ret == 1) 2462 goto more_kvec; /* we need to send the footer, too! */ 2463 if (ret == 0) 2464 goto out; 2465 if (ret < 0) { 2466 dout("try_write write_partial_message_data err %d\n", 2467 ret); 2468 goto out; 2469 } 2470 } 2471 2472 do_next: 2473 if (con->state == CON_STATE_OPEN) { 2474 /* is anything else pending? */ 2475 if (!list_empty(&con->out_queue)) { 2476 prepare_write_message(con); 2477 goto more; 2478 } 2479 if (con->in_seq > con->in_seq_acked) { 2480 prepare_write_ack(con); 2481 goto more; 2482 } 2483 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) { 2484 prepare_write_keepalive(con); 2485 goto more; 2486 } 2487 } 2488 2489 /* Nothing to do! */ 2490 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 2491 dout("try_write nothing else to write.\n"); 2492 ret = 0; 2493 out: 2494 dout("try_write done on %p ret %d\n", con, ret); 2495 return ret; 2496 } 2497 2498 2499 2500 /* 2501 * Read what we can from the socket. 2502 */ 2503 static int try_read(struct ceph_connection *con) 2504 { 2505 int ret = -1; 2506 2507 more: 2508 dout("try_read start on %p state %lu\n", con, con->state); 2509 if (con->state != CON_STATE_CONNECTING && 2510 con->state != CON_STATE_NEGOTIATING && 2511 con->state != CON_STATE_OPEN) 2512 return 0; 2513 2514 BUG_ON(!con->sock); 2515 2516 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 2517 con->in_base_pos); 2518 2519 if (con->state == CON_STATE_CONNECTING) { 2520 dout("try_read connecting\n"); 2521 ret = read_partial_banner(con); 2522 if (ret <= 0) 2523 goto out; 2524 ret = process_banner(con); 2525 if (ret < 0) 2526 goto out; 2527 2528 con->state = CON_STATE_NEGOTIATING; 2529 2530 /* 2531 * Received banner is good, exchange connection info. 2532 * Do not reset out_kvec, as sending our banner raced 2533 * with receiving peer banner after connect completed. 2534 */ 2535 ret = prepare_write_connect(con); 2536 if (ret < 0) 2537 goto out; 2538 prepare_read_connect(con); 2539 2540 /* Send connection info before awaiting response */ 2541 goto out; 2542 } 2543 2544 if (con->state == CON_STATE_NEGOTIATING) { 2545 dout("try_read negotiating\n"); 2546 ret = read_partial_connect(con); 2547 if (ret <= 0) 2548 goto out; 2549 ret = process_connect(con); 2550 if (ret < 0) 2551 goto out; 2552 goto more; 2553 } 2554 2555 WARN_ON(con->state != CON_STATE_OPEN); 2556 2557 if (con->in_base_pos < 0) { 2558 /* 2559 * skipping + discarding content. 2560 * 2561 * FIXME: there must be a better way to do this! 2562 */ 2563 static char buf[SKIP_BUF_SIZE]; 2564 int skip = min((int) sizeof (buf), -con->in_base_pos); 2565 2566 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); 2567 ret = ceph_tcp_recvmsg(con->sock, buf, skip); 2568 if (ret <= 0) 2569 goto out; 2570 con->in_base_pos += ret; 2571 if (con->in_base_pos) 2572 goto more; 2573 } 2574 if (con->in_tag == CEPH_MSGR_TAG_READY) { 2575 /* 2576 * what's next? 2577 */ 2578 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 2579 if (ret <= 0) 2580 goto out; 2581 dout("try_read got tag %d\n", (int)con->in_tag); 2582 switch (con->in_tag) { 2583 case CEPH_MSGR_TAG_MSG: 2584 prepare_read_message(con); 2585 break; 2586 case CEPH_MSGR_TAG_ACK: 2587 prepare_read_ack(con); 2588 break; 2589 case CEPH_MSGR_TAG_CLOSE: 2590 con_close_socket(con); 2591 con->state = CON_STATE_CLOSED; 2592 goto out; 2593 default: 2594 goto bad_tag; 2595 } 2596 } 2597 if (con->in_tag == CEPH_MSGR_TAG_MSG) { 2598 ret = read_partial_message(con); 2599 if (ret <= 0) { 2600 switch (ret) { 2601 case -EBADMSG: 2602 con->error_msg = "bad crc"; 2603 ret = -EIO; 2604 break; 2605 case -EIO: 2606 con->error_msg = "io error"; 2607 break; 2608 } 2609 goto out; 2610 } 2611 if (con->in_tag == CEPH_MSGR_TAG_READY) 2612 goto more; 2613 process_message(con); 2614 if (con->state == CON_STATE_OPEN) 2615 prepare_read_tag(con); 2616 goto more; 2617 } 2618 if (con->in_tag == CEPH_MSGR_TAG_ACK || 2619 con->in_tag == CEPH_MSGR_TAG_SEQ) { 2620 /* 2621 * the final handshake seq exchange is semantically 2622 * equivalent to an ACK 2623 */ 2624 ret = read_partial_ack(con); 2625 if (ret <= 0) 2626 goto out; 2627 process_ack(con); 2628 goto more; 2629 } 2630 2631 out: 2632 dout("try_read done on %p ret %d\n", con, ret); 2633 return ret; 2634 2635 bad_tag: 2636 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag); 2637 con->error_msg = "protocol error, garbage tag"; 2638 ret = -1; 2639 goto out; 2640 } 2641 2642 2643 /* 2644 * Atomically queue work on a connection after the specified delay. 2645 * Bump @con reference to avoid races with connection teardown. 2646 * Returns 0 if work was queued, or an error code otherwise. 2647 */ 2648 static int queue_con_delay(struct ceph_connection *con, unsigned long delay) 2649 { 2650 if (!con->ops->get(con)) { 2651 dout("%s %p ref count 0\n", __func__, con); 2652 2653 return -ENOENT; 2654 } 2655 2656 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) { 2657 dout("%s %p - already queued\n", __func__, con); 2658 con->ops->put(con); 2659 2660 return -EBUSY; 2661 } 2662 2663 dout("%s %p %lu\n", __func__, con, delay); 2664 2665 return 0; 2666 } 2667 2668 static void queue_con(struct ceph_connection *con) 2669 { 2670 (void) queue_con_delay(con, 0); 2671 } 2672 2673 static bool con_sock_closed(struct ceph_connection *con) 2674 { 2675 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED)) 2676 return false; 2677 2678 #define CASE(x) \ 2679 case CON_STATE_ ## x: \ 2680 con->error_msg = "socket closed (con state " #x ")"; \ 2681 break; 2682 2683 switch (con->state) { 2684 CASE(CLOSED); 2685 CASE(PREOPEN); 2686 CASE(CONNECTING); 2687 CASE(NEGOTIATING); 2688 CASE(OPEN); 2689 CASE(STANDBY); 2690 default: 2691 pr_warning("%s con %p unrecognized state %lu\n", 2692 __func__, con, con->state); 2693 con->error_msg = "unrecognized con state"; 2694 BUG(); 2695 break; 2696 } 2697 #undef CASE 2698 2699 return true; 2700 } 2701 2702 static bool con_backoff(struct ceph_connection *con) 2703 { 2704 int ret; 2705 2706 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF)) 2707 return false; 2708 2709 ret = queue_con_delay(con, round_jiffies_relative(con->delay)); 2710 if (ret) { 2711 dout("%s: con %p FAILED to back off %lu\n", __func__, 2712 con, con->delay); 2713 BUG_ON(ret == -ENOENT); 2714 con_flag_set(con, CON_FLAG_BACKOFF); 2715 } 2716 2717 return true; 2718 } 2719 2720 /* Finish fault handling; con->mutex must *not* be held here */ 2721 2722 static void con_fault_finish(struct ceph_connection *con) 2723 { 2724 /* 2725 * in case we faulted due to authentication, invalidate our 2726 * current tickets so that we can get new ones. 2727 */ 2728 if (con->auth_retry && con->ops->invalidate_authorizer) { 2729 dout("calling invalidate_authorizer()\n"); 2730 con->ops->invalidate_authorizer(con); 2731 } 2732 2733 if (con->ops->fault) 2734 con->ops->fault(con); 2735 } 2736 2737 /* 2738 * Do some work on a connection. Drop a connection ref when we're done. 2739 */ 2740 static void con_work(struct work_struct *work) 2741 { 2742 struct ceph_connection *con = container_of(work, struct ceph_connection, 2743 work.work); 2744 bool fault; 2745 2746 mutex_lock(&con->mutex); 2747 while (true) { 2748 int ret; 2749 2750 if ((fault = con_sock_closed(con))) { 2751 dout("%s: con %p SOCK_CLOSED\n", __func__, con); 2752 break; 2753 } 2754 if (con_backoff(con)) { 2755 dout("%s: con %p BACKOFF\n", __func__, con); 2756 break; 2757 } 2758 if (con->state == CON_STATE_STANDBY) { 2759 dout("%s: con %p STANDBY\n", __func__, con); 2760 break; 2761 } 2762 if (con->state == CON_STATE_CLOSED) { 2763 dout("%s: con %p CLOSED\n", __func__, con); 2764 BUG_ON(con->sock); 2765 break; 2766 } 2767 if (con->state == CON_STATE_PREOPEN) { 2768 dout("%s: con %p PREOPEN\n", __func__, con); 2769 BUG_ON(con->sock); 2770 } 2771 2772 ret = try_read(con); 2773 if (ret < 0) { 2774 if (ret == -EAGAIN) 2775 continue; 2776 con->error_msg = "socket error on read"; 2777 fault = true; 2778 break; 2779 } 2780 2781 ret = try_write(con); 2782 if (ret < 0) { 2783 if (ret == -EAGAIN) 2784 continue; 2785 con->error_msg = "socket error on write"; 2786 fault = true; 2787 } 2788 2789 break; /* If we make it to here, we're done */ 2790 } 2791 if (fault) 2792 con_fault(con); 2793 mutex_unlock(&con->mutex); 2794 2795 if (fault) 2796 con_fault_finish(con); 2797 2798 con->ops->put(con); 2799 } 2800 2801 /* 2802 * Generic error/fault handler. A retry mechanism is used with 2803 * exponential backoff 2804 */ 2805 static void con_fault(struct ceph_connection *con) 2806 { 2807 pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2808 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2809 dout("fault %p state %lu to peer %s\n", 2810 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2811 2812 WARN_ON(con->state != CON_STATE_CONNECTING && 2813 con->state != CON_STATE_NEGOTIATING && 2814 con->state != CON_STATE_OPEN); 2815 2816 con_close_socket(con); 2817 2818 if (con_flag_test(con, CON_FLAG_LOSSYTX)) { 2819 dout("fault on LOSSYTX channel, marking CLOSED\n"); 2820 con->state = CON_STATE_CLOSED; 2821 return; 2822 } 2823 2824 if (con->in_msg) { 2825 BUG_ON(con->in_msg->con != con); 2826 con->in_msg->con = NULL; 2827 ceph_msg_put(con->in_msg); 2828 con->in_msg = NULL; 2829 con->ops->put(con); 2830 } 2831 2832 /* Requeue anything that hasn't been acked */ 2833 list_splice_init(&con->out_sent, &con->out_queue); 2834 2835 /* If there are no messages queued or keepalive pending, place 2836 * the connection in a STANDBY state */ 2837 if (list_empty(&con->out_queue) && 2838 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) { 2839 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 2840 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 2841 con->state = CON_STATE_STANDBY; 2842 } else { 2843 /* retry after a delay. */ 2844 con->state = CON_STATE_PREOPEN; 2845 if (con->delay == 0) 2846 con->delay = BASE_DELAY_INTERVAL; 2847 else if (con->delay < MAX_DELAY_INTERVAL) 2848 con->delay *= 2; 2849 con_flag_set(con, CON_FLAG_BACKOFF); 2850 queue_con(con); 2851 } 2852 } 2853 2854 2855 2856 /* 2857 * initialize a new messenger instance 2858 */ 2859 void ceph_messenger_init(struct ceph_messenger *msgr, 2860 struct ceph_entity_addr *myaddr, 2861 u64 supported_features, 2862 u64 required_features, 2863 bool nocrc) 2864 { 2865 msgr->supported_features = supported_features; 2866 msgr->required_features = required_features; 2867 2868 spin_lock_init(&msgr->global_seq_lock); 2869 2870 if (myaddr) 2871 msgr->inst.addr = *myaddr; 2872 2873 /* select a random nonce */ 2874 msgr->inst.addr.type = 0; 2875 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 2876 encode_my_addr(msgr); 2877 msgr->nocrc = nocrc; 2878 2879 atomic_set(&msgr->stopping, 0); 2880 2881 dout("%s %p\n", __func__, msgr); 2882 } 2883 EXPORT_SYMBOL(ceph_messenger_init); 2884 2885 static void clear_standby(struct ceph_connection *con) 2886 { 2887 /* come back from STANDBY? */ 2888 if (con->state == CON_STATE_STANDBY) { 2889 dout("clear_standby %p and ++connect_seq\n", con); 2890 con->state = CON_STATE_PREOPEN; 2891 con->connect_seq++; 2892 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING)); 2893 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)); 2894 } 2895 } 2896 2897 /* 2898 * Queue up an outgoing message on the given connection. 2899 */ 2900 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) 2901 { 2902 /* set src+dst */ 2903 msg->hdr.src = con->msgr->inst.name; 2904 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); 2905 msg->needs_out_seq = true; 2906 2907 mutex_lock(&con->mutex); 2908 2909 if (con->state == CON_STATE_CLOSED) { 2910 dout("con_send %p closed, dropping %p\n", con, msg); 2911 ceph_msg_put(msg); 2912 mutex_unlock(&con->mutex); 2913 return; 2914 } 2915 2916 BUG_ON(msg->con != NULL); 2917 msg->con = con->ops->get(con); 2918 BUG_ON(msg->con == NULL); 2919 2920 BUG_ON(!list_empty(&msg->list_head)); 2921 list_add_tail(&msg->list_head, &con->out_queue); 2922 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, 2923 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type), 2924 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2925 le32_to_cpu(msg->hdr.front_len), 2926 le32_to_cpu(msg->hdr.middle_len), 2927 le32_to_cpu(msg->hdr.data_len)); 2928 2929 clear_standby(con); 2930 mutex_unlock(&con->mutex); 2931 2932 /* if there wasn't anything waiting to send before, queue 2933 * new work */ 2934 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 2935 queue_con(con); 2936 } 2937 EXPORT_SYMBOL(ceph_con_send); 2938 2939 /* 2940 * Revoke a message that was previously queued for send 2941 */ 2942 void ceph_msg_revoke(struct ceph_msg *msg) 2943 { 2944 struct ceph_connection *con = msg->con; 2945 2946 if (!con) 2947 return; /* Message not in our possession */ 2948 2949 mutex_lock(&con->mutex); 2950 if (!list_empty(&msg->list_head)) { 2951 dout("%s %p msg %p - was on queue\n", __func__, con, msg); 2952 list_del_init(&msg->list_head); 2953 BUG_ON(msg->con == NULL); 2954 msg->con->ops->put(msg->con); 2955 msg->con = NULL; 2956 msg->hdr.seq = 0; 2957 2958 ceph_msg_put(msg); 2959 } 2960 if (con->out_msg == msg) { 2961 dout("%s %p msg %p - was sending\n", __func__, con, msg); 2962 con->out_msg = NULL; 2963 if (con->out_kvec_is_msg) { 2964 con->out_skip = con->out_kvec_bytes; 2965 con->out_kvec_is_msg = false; 2966 } 2967 msg->hdr.seq = 0; 2968 2969 ceph_msg_put(msg); 2970 } 2971 mutex_unlock(&con->mutex); 2972 } 2973 2974 /* 2975 * Revoke a message that we may be reading data into 2976 */ 2977 void ceph_msg_revoke_incoming(struct ceph_msg *msg) 2978 { 2979 struct ceph_connection *con; 2980 2981 BUG_ON(msg == NULL); 2982 if (!msg->con) { 2983 dout("%s msg %p null con\n", __func__, msg); 2984 2985 return; /* Message not in our possession */ 2986 } 2987 2988 con = msg->con; 2989 mutex_lock(&con->mutex); 2990 if (con->in_msg == msg) { 2991 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len); 2992 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len); 2993 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len); 2994 2995 /* skip rest of message */ 2996 dout("%s %p msg %p revoked\n", __func__, con, msg); 2997 con->in_base_pos = con->in_base_pos - 2998 sizeof(struct ceph_msg_header) - 2999 front_len - 3000 middle_len - 3001 data_len - 3002 sizeof(struct ceph_msg_footer); 3003 ceph_msg_put(con->in_msg); 3004 con->in_msg = NULL; 3005 con->in_tag = CEPH_MSGR_TAG_READY; 3006 con->in_seq++; 3007 } else { 3008 dout("%s %p in_msg %p msg %p no-op\n", 3009 __func__, con, con->in_msg, msg); 3010 } 3011 mutex_unlock(&con->mutex); 3012 } 3013 3014 /* 3015 * Queue a keepalive byte to ensure the tcp connection is alive. 3016 */ 3017 void ceph_con_keepalive(struct ceph_connection *con) 3018 { 3019 dout("con_keepalive %p\n", con); 3020 mutex_lock(&con->mutex); 3021 clear_standby(con); 3022 mutex_unlock(&con->mutex); 3023 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 && 3024 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3025 queue_con(con); 3026 } 3027 EXPORT_SYMBOL(ceph_con_keepalive); 3028 3029 static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type) 3030 { 3031 struct ceph_msg_data *data; 3032 3033 if (WARN_ON(!ceph_msg_data_type_valid(type))) 3034 return NULL; 3035 3036 data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS); 3037 if (data) 3038 data->type = type; 3039 INIT_LIST_HEAD(&data->links); 3040 3041 return data; 3042 } 3043 3044 static void ceph_msg_data_destroy(struct ceph_msg_data *data) 3045 { 3046 if (!data) 3047 return; 3048 3049 WARN_ON(!list_empty(&data->links)); 3050 if (data->type == CEPH_MSG_DATA_PAGELIST) { 3051 ceph_pagelist_release(data->pagelist); 3052 kfree(data->pagelist); 3053 } 3054 kmem_cache_free(ceph_msg_data_cache, data); 3055 } 3056 3057 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, 3058 size_t length, size_t alignment) 3059 { 3060 struct ceph_msg_data *data; 3061 3062 BUG_ON(!pages); 3063 BUG_ON(!length); 3064 3065 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES); 3066 BUG_ON(!data); 3067 data->pages = pages; 3068 data->length = length; 3069 data->alignment = alignment & ~PAGE_MASK; 3070 3071 list_add_tail(&data->links, &msg->data); 3072 msg->data_length += length; 3073 } 3074 EXPORT_SYMBOL(ceph_msg_data_add_pages); 3075 3076 void ceph_msg_data_add_pagelist(struct ceph_msg *msg, 3077 struct ceph_pagelist *pagelist) 3078 { 3079 struct ceph_msg_data *data; 3080 3081 BUG_ON(!pagelist); 3082 BUG_ON(!pagelist->length); 3083 3084 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST); 3085 BUG_ON(!data); 3086 data->pagelist = pagelist; 3087 3088 list_add_tail(&data->links, &msg->data); 3089 msg->data_length += pagelist->length; 3090 } 3091 EXPORT_SYMBOL(ceph_msg_data_add_pagelist); 3092 3093 #ifdef CONFIG_BLOCK 3094 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, 3095 size_t length) 3096 { 3097 struct ceph_msg_data *data; 3098 3099 BUG_ON(!bio); 3100 3101 data = ceph_msg_data_create(CEPH_MSG_DATA_BIO); 3102 BUG_ON(!data); 3103 data->bio = bio; 3104 data->bio_length = length; 3105 3106 list_add_tail(&data->links, &msg->data); 3107 msg->data_length += length; 3108 } 3109 EXPORT_SYMBOL(ceph_msg_data_add_bio); 3110 #endif /* CONFIG_BLOCK */ 3111 3112 /* 3113 * construct a new message with given type, size 3114 * the new msg has a ref count of 1. 3115 */ 3116 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, 3117 bool can_fail) 3118 { 3119 struct ceph_msg *m; 3120 3121 m = kmem_cache_zalloc(ceph_msg_cache, flags); 3122 if (m == NULL) 3123 goto out; 3124 3125 m->hdr.type = cpu_to_le16(type); 3126 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); 3127 m->hdr.front_len = cpu_to_le32(front_len); 3128 3129 INIT_LIST_HEAD(&m->list_head); 3130 kref_init(&m->kref); 3131 INIT_LIST_HEAD(&m->data); 3132 3133 /* front */ 3134 if (front_len) { 3135 m->front.iov_base = ceph_kvmalloc(front_len, flags); 3136 if (m->front.iov_base == NULL) { 3137 dout("ceph_msg_new can't allocate %d bytes\n", 3138 front_len); 3139 goto out2; 3140 } 3141 } else { 3142 m->front.iov_base = NULL; 3143 } 3144 m->front_alloc_len = m->front.iov_len = front_len; 3145 3146 dout("ceph_msg_new %p front %d\n", m, front_len); 3147 return m; 3148 3149 out2: 3150 ceph_msg_put(m); 3151 out: 3152 if (!can_fail) { 3153 pr_err("msg_new can't create type %d front %d\n", type, 3154 front_len); 3155 WARN_ON(1); 3156 } else { 3157 dout("msg_new can't create type %d front %d\n", type, 3158 front_len); 3159 } 3160 return NULL; 3161 } 3162 EXPORT_SYMBOL(ceph_msg_new); 3163 3164 /* 3165 * Allocate "middle" portion of a message, if it is needed and wasn't 3166 * allocated by alloc_msg. This allows us to read a small fixed-size 3167 * per-type header in the front and then gracefully fail (i.e., 3168 * propagate the error to the caller based on info in the front) when 3169 * the middle is too large. 3170 */ 3171 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) 3172 { 3173 int type = le16_to_cpu(msg->hdr.type); 3174 int middle_len = le32_to_cpu(msg->hdr.middle_len); 3175 3176 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type, 3177 ceph_msg_type_name(type), middle_len); 3178 BUG_ON(!middle_len); 3179 BUG_ON(msg->middle); 3180 3181 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS); 3182 if (!msg->middle) 3183 return -ENOMEM; 3184 return 0; 3185 } 3186 3187 /* 3188 * Allocate a message for receiving an incoming message on a 3189 * connection, and save the result in con->in_msg. Uses the 3190 * connection's private alloc_msg op if available. 3191 * 3192 * Returns 0 on success, or a negative error code. 3193 * 3194 * On success, if we set *skip = 1: 3195 * - the next message should be skipped and ignored. 3196 * - con->in_msg == NULL 3197 * or if we set *skip = 0: 3198 * - con->in_msg is non-null. 3199 * On error (ENOMEM, EAGAIN, ...), 3200 * - con->in_msg == NULL 3201 */ 3202 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) 3203 { 3204 struct ceph_msg_header *hdr = &con->in_hdr; 3205 int middle_len = le32_to_cpu(hdr->middle_len); 3206 struct ceph_msg *msg; 3207 int ret = 0; 3208 3209 BUG_ON(con->in_msg != NULL); 3210 BUG_ON(!con->ops->alloc_msg); 3211 3212 mutex_unlock(&con->mutex); 3213 msg = con->ops->alloc_msg(con, hdr, skip); 3214 mutex_lock(&con->mutex); 3215 if (con->state != CON_STATE_OPEN) { 3216 if (msg) 3217 ceph_msg_put(msg); 3218 return -EAGAIN; 3219 } 3220 if (msg) { 3221 BUG_ON(*skip); 3222 con->in_msg = msg; 3223 con->in_msg->con = con->ops->get(con); 3224 BUG_ON(con->in_msg->con == NULL); 3225 } else { 3226 /* 3227 * Null message pointer means either we should skip 3228 * this message or we couldn't allocate memory. The 3229 * former is not an error. 3230 */ 3231 if (*skip) 3232 return 0; 3233 con->error_msg = "error allocating memory for incoming message"; 3234 3235 return -ENOMEM; 3236 } 3237 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 3238 3239 if (middle_len && !con->in_msg->middle) { 3240 ret = ceph_alloc_middle(con, con->in_msg); 3241 if (ret < 0) { 3242 ceph_msg_put(con->in_msg); 3243 con->in_msg = NULL; 3244 } 3245 } 3246 3247 return ret; 3248 } 3249 3250 3251 /* 3252 * Free a generically kmalloc'd message. 3253 */ 3254 void ceph_msg_kfree(struct ceph_msg *m) 3255 { 3256 dout("msg_kfree %p\n", m); 3257 ceph_kvfree(m->front.iov_base); 3258 kmem_cache_free(ceph_msg_cache, m); 3259 } 3260 3261 /* 3262 * Drop a msg ref. Destroy as needed. 3263 */ 3264 void ceph_msg_last_put(struct kref *kref) 3265 { 3266 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); 3267 LIST_HEAD(data); 3268 struct list_head *links; 3269 struct list_head *next; 3270 3271 dout("ceph_msg_put last one on %p\n", m); 3272 WARN_ON(!list_empty(&m->list_head)); 3273 3274 /* drop middle, data, if any */ 3275 if (m->middle) { 3276 ceph_buffer_put(m->middle); 3277 m->middle = NULL; 3278 } 3279 3280 list_splice_init(&m->data, &data); 3281 list_for_each_safe(links, next, &data) { 3282 struct ceph_msg_data *data; 3283 3284 data = list_entry(links, struct ceph_msg_data, links); 3285 list_del_init(links); 3286 ceph_msg_data_destroy(data); 3287 } 3288 m->data_length = 0; 3289 3290 if (m->pool) 3291 ceph_msgpool_put(m->pool, m); 3292 else 3293 ceph_msg_kfree(m); 3294 } 3295 EXPORT_SYMBOL(ceph_msg_last_put); 3296 3297 void ceph_msg_dump(struct ceph_msg *msg) 3298 { 3299 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg, 3300 msg->front_alloc_len, msg->data_length); 3301 print_hex_dump(KERN_DEBUG, "header: ", 3302 DUMP_PREFIX_OFFSET, 16, 1, 3303 &msg->hdr, sizeof(msg->hdr), true); 3304 print_hex_dump(KERN_DEBUG, " front: ", 3305 DUMP_PREFIX_OFFSET, 16, 1, 3306 msg->front.iov_base, msg->front.iov_len, true); 3307 if (msg->middle) 3308 print_hex_dump(KERN_DEBUG, "middle: ", 3309 DUMP_PREFIX_OFFSET, 16, 1, 3310 msg->middle->vec.iov_base, 3311 msg->middle->vec.iov_len, true); 3312 print_hex_dump(KERN_DEBUG, "footer: ", 3313 DUMP_PREFIX_OFFSET, 16, 1, 3314 &msg->footer, sizeof(msg->footer), true); 3315 } 3316 EXPORT_SYMBOL(ceph_msg_dump); 3317