1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/crc32c.h> 5 #include <linux/ctype.h> 6 #include <linux/highmem.h> 7 #include <linux/inet.h> 8 #include <linux/kthread.h> 9 #include <linux/net.h> 10 #include <linux/nsproxy.h> 11 #include <linux/sched/mm.h> 12 #include <linux/slab.h> 13 #include <linux/socket.h> 14 #include <linux/string.h> 15 #ifdef CONFIG_BLOCK 16 #include <linux/bio.h> 17 #endif /* CONFIG_BLOCK */ 18 #include <linux/dns_resolver.h> 19 #include <net/tcp.h> 20 21 #include <linux/ceph/ceph_features.h> 22 #include <linux/ceph/libceph.h> 23 #include <linux/ceph/messenger.h> 24 #include <linux/ceph/decode.h> 25 #include <linux/ceph/pagelist.h> 26 #include <linux/export.h> 27 28 /* 29 * Ceph uses the messenger to exchange ceph_msg messages with other 30 * hosts in the system. The messenger provides ordered and reliable 31 * delivery. We tolerate TCP disconnects by reconnecting (with 32 * exponential backoff) in the case of a fault (disconnection, bad 33 * crc, protocol error). Acks allow sent messages to be discarded by 34 * the sender. 35 */ 36 37 /* 38 * We track the state of the socket on a given connection using 39 * values defined below. The transition to a new socket state is 40 * handled by a function which verifies we aren't coming from an 41 * unexpected state. 42 * 43 * -------- 44 * | NEW* | transient initial state 45 * -------- 46 * | con_sock_state_init() 47 * v 48 * ---------- 49 * | CLOSED | initialized, but no socket (and no 50 * ---------- TCP connection) 51 * ^ \ 52 * | \ con_sock_state_connecting() 53 * | ---------------------- 54 * | \ 55 * + con_sock_state_closed() \ 56 * |+--------------------------- \ 57 * | \ \ \ 58 * | ----------- \ \ 59 * | | CLOSING | socket event; \ \ 60 * | ----------- await close \ \ 61 * | ^ \ | 62 * | | \ | 63 * | + con_sock_state_closing() \ | 64 * | / \ | | 65 * | / --------------- | | 66 * | / \ v v 67 * | / -------------- 68 * | / -----------------| CONNECTING | socket created, TCP 69 * | | / -------------- connect initiated 70 * | | | con_sock_state_connected() 71 * | | v 72 * ------------- 73 * | CONNECTED | TCP connection established 74 * ------------- 75 * 76 * State values for ceph_connection->sock_state; NEW is assumed to be 0. 77 */ 78 79 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */ 80 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */ 81 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */ 82 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */ 83 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */ 84 85 /* 86 * connection states 87 */ 88 #define CON_STATE_CLOSED 1 /* -> PREOPEN */ 89 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */ 90 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */ 91 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */ 92 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */ 93 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */ 94 95 /* 96 * ceph_connection flag bits 97 */ 98 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop 99 * messages on errors */ 100 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */ 101 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */ 102 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */ 103 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */ 104 105 static bool con_flag_valid(unsigned long con_flag) 106 { 107 switch (con_flag) { 108 case CON_FLAG_LOSSYTX: 109 case CON_FLAG_KEEPALIVE_PENDING: 110 case CON_FLAG_WRITE_PENDING: 111 case CON_FLAG_SOCK_CLOSED: 112 case CON_FLAG_BACKOFF: 113 return true; 114 default: 115 return false; 116 } 117 } 118 119 static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag) 120 { 121 BUG_ON(!con_flag_valid(con_flag)); 122 123 clear_bit(con_flag, &con->flags); 124 } 125 126 static void con_flag_set(struct ceph_connection *con, unsigned long con_flag) 127 { 128 BUG_ON(!con_flag_valid(con_flag)); 129 130 set_bit(con_flag, &con->flags); 131 } 132 133 static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag) 134 { 135 BUG_ON(!con_flag_valid(con_flag)); 136 137 return test_bit(con_flag, &con->flags); 138 } 139 140 static bool con_flag_test_and_clear(struct ceph_connection *con, 141 unsigned long con_flag) 142 { 143 BUG_ON(!con_flag_valid(con_flag)); 144 145 return test_and_clear_bit(con_flag, &con->flags); 146 } 147 148 static bool con_flag_test_and_set(struct ceph_connection *con, 149 unsigned long con_flag) 150 { 151 BUG_ON(!con_flag_valid(con_flag)); 152 153 return test_and_set_bit(con_flag, &con->flags); 154 } 155 156 /* Slab caches for frequently-allocated structures */ 157 158 static struct kmem_cache *ceph_msg_cache; 159 160 /* static tag bytes (protocol control messages) */ 161 static char tag_msg = CEPH_MSGR_TAG_MSG; 162 static char tag_ack = CEPH_MSGR_TAG_ACK; 163 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 164 static char tag_keepalive2 = CEPH_MSGR_TAG_KEEPALIVE2; 165 166 #ifdef CONFIG_LOCKDEP 167 static struct lock_class_key socket_class; 168 #endif 169 170 static void queue_con(struct ceph_connection *con); 171 static void cancel_con(struct ceph_connection *con); 172 static void ceph_con_workfn(struct work_struct *); 173 static void con_fault(struct ceph_connection *con); 174 175 /* 176 * Nicely render a sockaddr as a string. An array of formatted 177 * strings is used, to approximate reentrancy. 178 */ 179 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */ 180 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG) 181 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1) 182 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */ 183 184 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN]; 185 static atomic_t addr_str_seq = ATOMIC_INIT(0); 186 187 static struct page *zero_page; /* used in certain error cases */ 188 189 const char *ceph_pr_addr(const struct sockaddr_storage *ss) 190 { 191 int i; 192 char *s; 193 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 194 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 195 196 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK; 197 s = addr_str[i]; 198 199 switch (ss->ss_family) { 200 case AF_INET: 201 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr, 202 ntohs(in4->sin_port)); 203 break; 204 205 case AF_INET6: 206 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr, 207 ntohs(in6->sin6_port)); 208 break; 209 210 default: 211 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)", 212 ss->ss_family); 213 } 214 215 return s; 216 } 217 EXPORT_SYMBOL(ceph_pr_addr); 218 219 static void encode_my_addr(struct ceph_messenger *msgr) 220 { 221 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr)); 222 ceph_encode_addr(&msgr->my_enc_addr); 223 } 224 225 /* 226 * work queue for all reading and writing to/from the socket. 227 */ 228 static struct workqueue_struct *ceph_msgr_wq; 229 230 static int ceph_msgr_slab_init(void) 231 { 232 BUG_ON(ceph_msg_cache); 233 ceph_msg_cache = KMEM_CACHE(ceph_msg, 0); 234 if (!ceph_msg_cache) 235 return -ENOMEM; 236 237 return 0; 238 } 239 240 static void ceph_msgr_slab_exit(void) 241 { 242 BUG_ON(!ceph_msg_cache); 243 kmem_cache_destroy(ceph_msg_cache); 244 ceph_msg_cache = NULL; 245 } 246 247 static void _ceph_msgr_exit(void) 248 { 249 if (ceph_msgr_wq) { 250 destroy_workqueue(ceph_msgr_wq); 251 ceph_msgr_wq = NULL; 252 } 253 254 BUG_ON(zero_page == NULL); 255 put_page(zero_page); 256 zero_page = NULL; 257 258 ceph_msgr_slab_exit(); 259 } 260 261 int __init ceph_msgr_init(void) 262 { 263 if (ceph_msgr_slab_init()) 264 return -ENOMEM; 265 266 BUG_ON(zero_page != NULL); 267 zero_page = ZERO_PAGE(0); 268 get_page(zero_page); 269 270 /* 271 * The number of active work items is limited by the number of 272 * connections, so leave @max_active at default. 273 */ 274 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0); 275 if (ceph_msgr_wq) 276 return 0; 277 278 pr_err("msgr_init failed to create workqueue\n"); 279 _ceph_msgr_exit(); 280 281 return -ENOMEM; 282 } 283 284 void ceph_msgr_exit(void) 285 { 286 BUG_ON(ceph_msgr_wq == NULL); 287 288 _ceph_msgr_exit(); 289 } 290 291 void ceph_msgr_flush(void) 292 { 293 flush_workqueue(ceph_msgr_wq); 294 } 295 EXPORT_SYMBOL(ceph_msgr_flush); 296 297 /* Connection socket state transition functions */ 298 299 static void con_sock_state_init(struct ceph_connection *con) 300 { 301 int old_state; 302 303 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 304 if (WARN_ON(old_state != CON_SOCK_STATE_NEW)) 305 printk("%s: unexpected old state %d\n", __func__, old_state); 306 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 307 CON_SOCK_STATE_CLOSED); 308 } 309 310 static void con_sock_state_connecting(struct ceph_connection *con) 311 { 312 int old_state; 313 314 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING); 315 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED)) 316 printk("%s: unexpected old state %d\n", __func__, old_state); 317 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 318 CON_SOCK_STATE_CONNECTING); 319 } 320 321 static void con_sock_state_connected(struct ceph_connection *con) 322 { 323 int old_state; 324 325 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED); 326 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING)) 327 printk("%s: unexpected old state %d\n", __func__, old_state); 328 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 329 CON_SOCK_STATE_CONNECTED); 330 } 331 332 static void con_sock_state_closing(struct ceph_connection *con) 333 { 334 int old_state; 335 336 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING); 337 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING && 338 old_state != CON_SOCK_STATE_CONNECTED && 339 old_state != CON_SOCK_STATE_CLOSING)) 340 printk("%s: unexpected old state %d\n", __func__, old_state); 341 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 342 CON_SOCK_STATE_CLOSING); 343 } 344 345 static void con_sock_state_closed(struct ceph_connection *con) 346 { 347 int old_state; 348 349 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 350 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED && 351 old_state != CON_SOCK_STATE_CLOSING && 352 old_state != CON_SOCK_STATE_CONNECTING && 353 old_state != CON_SOCK_STATE_CLOSED)) 354 printk("%s: unexpected old state %d\n", __func__, old_state); 355 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 356 CON_SOCK_STATE_CLOSED); 357 } 358 359 /* 360 * socket callback functions 361 */ 362 363 /* data available on socket, or listen socket received a connect */ 364 static void ceph_sock_data_ready(struct sock *sk) 365 { 366 struct ceph_connection *con = sk->sk_user_data; 367 if (atomic_read(&con->msgr->stopping)) { 368 return; 369 } 370 371 if (sk->sk_state != TCP_CLOSE_WAIT) { 372 dout("%s on %p state = %lu, queueing work\n", __func__, 373 con, con->state); 374 queue_con(con); 375 } 376 } 377 378 /* socket has buffer space for writing */ 379 static void ceph_sock_write_space(struct sock *sk) 380 { 381 struct ceph_connection *con = sk->sk_user_data; 382 383 /* only queue to workqueue if there is data we want to write, 384 * and there is sufficient space in the socket buffer to accept 385 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space() 386 * doesn't get called again until try_write() fills the socket 387 * buffer. See net/ipv4/tcp_input.c:tcp_check_space() 388 * and net/core/stream.c:sk_stream_write_space(). 389 */ 390 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) { 391 if (sk_stream_is_writeable(sk)) { 392 dout("%s %p queueing write work\n", __func__, con); 393 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 394 queue_con(con); 395 } 396 } else { 397 dout("%s %p nothing to write\n", __func__, con); 398 } 399 } 400 401 /* socket's state has changed */ 402 static void ceph_sock_state_change(struct sock *sk) 403 { 404 struct ceph_connection *con = sk->sk_user_data; 405 406 dout("%s %p state = %lu sk_state = %u\n", __func__, 407 con, con->state, sk->sk_state); 408 409 switch (sk->sk_state) { 410 case TCP_CLOSE: 411 dout("%s TCP_CLOSE\n", __func__); 412 /* fall through */ 413 case TCP_CLOSE_WAIT: 414 dout("%s TCP_CLOSE_WAIT\n", __func__); 415 con_sock_state_closing(con); 416 con_flag_set(con, CON_FLAG_SOCK_CLOSED); 417 queue_con(con); 418 break; 419 case TCP_ESTABLISHED: 420 dout("%s TCP_ESTABLISHED\n", __func__); 421 con_sock_state_connected(con); 422 queue_con(con); 423 break; 424 default: /* Everything else is uninteresting */ 425 break; 426 } 427 } 428 429 /* 430 * set up socket callbacks 431 */ 432 static void set_sock_callbacks(struct socket *sock, 433 struct ceph_connection *con) 434 { 435 struct sock *sk = sock->sk; 436 sk->sk_user_data = con; 437 sk->sk_data_ready = ceph_sock_data_ready; 438 sk->sk_write_space = ceph_sock_write_space; 439 sk->sk_state_change = ceph_sock_state_change; 440 } 441 442 443 /* 444 * socket helpers 445 */ 446 447 /* 448 * initiate connection to a remote socket. 449 */ 450 static int ceph_tcp_connect(struct ceph_connection *con) 451 { 452 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 453 struct socket *sock; 454 unsigned int noio_flag; 455 int ret; 456 457 BUG_ON(con->sock); 458 459 /* sock_create_kern() allocates with GFP_KERNEL */ 460 noio_flag = memalloc_noio_save(); 461 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family, 462 SOCK_STREAM, IPPROTO_TCP, &sock); 463 memalloc_noio_restore(noio_flag); 464 if (ret) 465 return ret; 466 sock->sk->sk_allocation = GFP_NOFS; 467 468 #ifdef CONFIG_LOCKDEP 469 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 470 #endif 471 472 set_sock_callbacks(sock, con); 473 474 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); 475 476 con_sock_state_connecting(con); 477 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), 478 O_NONBLOCK); 479 if (ret == -EINPROGRESS) { 480 dout("connect %s EINPROGRESS sk_state = %u\n", 481 ceph_pr_addr(&con->peer_addr.in_addr), 482 sock->sk->sk_state); 483 } else if (ret < 0) { 484 pr_err("connect %s error %d\n", 485 ceph_pr_addr(&con->peer_addr.in_addr), ret); 486 sock_release(sock); 487 return ret; 488 } 489 490 if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY)) { 491 int optval = 1; 492 493 ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, 494 (char *)&optval, sizeof(optval)); 495 if (ret) 496 pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d", 497 ret); 498 } 499 500 con->sock = sock; 501 return 0; 502 } 503 504 /* 505 * If @buf is NULL, discard up to @len bytes. 506 */ 507 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) 508 { 509 struct kvec iov = {buf, len}; 510 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 511 int r; 512 513 if (!buf) 514 msg.msg_flags |= MSG_TRUNC; 515 516 iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, len); 517 r = sock_recvmsg(sock, &msg, msg.msg_flags); 518 if (r == -EAGAIN) 519 r = 0; 520 return r; 521 } 522 523 static int ceph_tcp_recvpage(struct socket *sock, struct page *page, 524 int page_offset, size_t length) 525 { 526 struct bio_vec bvec = { 527 .bv_page = page, 528 .bv_offset = page_offset, 529 .bv_len = length 530 }; 531 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 532 int r; 533 534 BUG_ON(page_offset + length > PAGE_SIZE); 535 iov_iter_bvec(&msg.msg_iter, READ, &bvec, 1, length); 536 r = sock_recvmsg(sock, &msg, msg.msg_flags); 537 if (r == -EAGAIN) 538 r = 0; 539 return r; 540 } 541 542 /* 543 * write something. @more is true if caller will be sending more data 544 * shortly. 545 */ 546 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, 547 size_t kvlen, size_t len, int more) 548 { 549 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 550 int r; 551 552 if (more) 553 msg.msg_flags |= MSG_MORE; 554 else 555 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 556 557 r = kernel_sendmsg(sock, &msg, iov, kvlen, len); 558 if (r == -EAGAIN) 559 r = 0; 560 return r; 561 } 562 563 static int __ceph_tcp_sendpage(struct socket *sock, struct page *page, 564 int offset, size_t size, bool more) 565 { 566 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); 567 int ret; 568 569 ret = kernel_sendpage(sock, page, offset, size, flags); 570 if (ret == -EAGAIN) 571 ret = 0; 572 573 return ret; 574 } 575 576 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 577 int offset, size_t size, bool more) 578 { 579 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 580 struct bio_vec bvec; 581 int ret; 582 583 /* 584 * sendpage cannot properly handle pages with page_count == 0, 585 * we need to fall back to sendmsg if that's the case. 586 * 587 * Same goes for slab pages: skb_can_coalesce() allows 588 * coalescing neighboring slab objects into a single frag which 589 * triggers one of hardened usercopy checks. 590 */ 591 if (page_count(page) >= 1 && !PageSlab(page)) 592 return __ceph_tcp_sendpage(sock, page, offset, size, more); 593 594 bvec.bv_page = page; 595 bvec.bv_offset = offset; 596 bvec.bv_len = size; 597 598 if (more) 599 msg.msg_flags |= MSG_MORE; 600 else 601 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 602 603 iov_iter_bvec(&msg.msg_iter, WRITE, &bvec, 1, size); 604 ret = sock_sendmsg(sock, &msg); 605 if (ret == -EAGAIN) 606 ret = 0; 607 608 return ret; 609 } 610 611 /* 612 * Shutdown/close the socket for the given connection. 613 */ 614 static int con_close_socket(struct ceph_connection *con) 615 { 616 int rc = 0; 617 618 dout("con_close_socket on %p sock %p\n", con, con->sock); 619 if (con->sock) { 620 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); 621 sock_release(con->sock); 622 con->sock = NULL; 623 } 624 625 /* 626 * Forcibly clear the SOCK_CLOSED flag. It gets set 627 * independent of the connection mutex, and we could have 628 * received a socket close event before we had the chance to 629 * shut the socket down. 630 */ 631 con_flag_clear(con, CON_FLAG_SOCK_CLOSED); 632 633 con_sock_state_closed(con); 634 return rc; 635 } 636 637 /* 638 * Reset a connection. Discard all incoming and outgoing messages 639 * and clear *_seq state. 640 */ 641 static void ceph_msg_remove(struct ceph_msg *msg) 642 { 643 list_del_init(&msg->list_head); 644 645 ceph_msg_put(msg); 646 } 647 static void ceph_msg_remove_list(struct list_head *head) 648 { 649 while (!list_empty(head)) { 650 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, 651 list_head); 652 ceph_msg_remove(msg); 653 } 654 } 655 656 static void reset_connection(struct ceph_connection *con) 657 { 658 /* reset connection, out_queue, msg_ and connect_seq */ 659 /* discard existing out_queue and msg_seq */ 660 dout("reset_connection %p\n", con); 661 ceph_msg_remove_list(&con->out_queue); 662 ceph_msg_remove_list(&con->out_sent); 663 664 if (con->in_msg) { 665 BUG_ON(con->in_msg->con != con); 666 ceph_msg_put(con->in_msg); 667 con->in_msg = NULL; 668 } 669 670 con->connect_seq = 0; 671 con->out_seq = 0; 672 if (con->out_msg) { 673 BUG_ON(con->out_msg->con != con); 674 ceph_msg_put(con->out_msg); 675 con->out_msg = NULL; 676 } 677 con->in_seq = 0; 678 con->in_seq_acked = 0; 679 680 con->out_skip = 0; 681 } 682 683 /* 684 * mark a peer down. drop any open connections. 685 */ 686 void ceph_con_close(struct ceph_connection *con) 687 { 688 mutex_lock(&con->mutex); 689 dout("con_close %p peer %s\n", con, 690 ceph_pr_addr(&con->peer_addr.in_addr)); 691 con->state = CON_STATE_CLOSED; 692 693 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */ 694 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING); 695 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 696 con_flag_clear(con, CON_FLAG_BACKOFF); 697 698 reset_connection(con); 699 con->peer_global_seq = 0; 700 cancel_con(con); 701 con_close_socket(con); 702 mutex_unlock(&con->mutex); 703 } 704 EXPORT_SYMBOL(ceph_con_close); 705 706 /* 707 * Reopen a closed connection, with a new peer address. 708 */ 709 void ceph_con_open(struct ceph_connection *con, 710 __u8 entity_type, __u64 entity_num, 711 struct ceph_entity_addr *addr) 712 { 713 mutex_lock(&con->mutex); 714 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 715 716 WARN_ON(con->state != CON_STATE_CLOSED); 717 con->state = CON_STATE_PREOPEN; 718 719 con->peer_name.type = (__u8) entity_type; 720 con->peer_name.num = cpu_to_le64(entity_num); 721 722 memcpy(&con->peer_addr, addr, sizeof(*addr)); 723 con->delay = 0; /* reset backoff memory */ 724 mutex_unlock(&con->mutex); 725 queue_con(con); 726 } 727 EXPORT_SYMBOL(ceph_con_open); 728 729 /* 730 * return true if this connection ever successfully opened 731 */ 732 bool ceph_con_opened(struct ceph_connection *con) 733 { 734 return con->connect_seq > 0; 735 } 736 737 /* 738 * initialize a new connection. 739 */ 740 void ceph_con_init(struct ceph_connection *con, void *private, 741 const struct ceph_connection_operations *ops, 742 struct ceph_messenger *msgr) 743 { 744 dout("con_init %p\n", con); 745 memset(con, 0, sizeof(*con)); 746 con->private = private; 747 con->ops = ops; 748 con->msgr = msgr; 749 750 con_sock_state_init(con); 751 752 mutex_init(&con->mutex); 753 INIT_LIST_HEAD(&con->out_queue); 754 INIT_LIST_HEAD(&con->out_sent); 755 INIT_DELAYED_WORK(&con->work, ceph_con_workfn); 756 757 con->state = CON_STATE_CLOSED; 758 } 759 EXPORT_SYMBOL(ceph_con_init); 760 761 762 /* 763 * We maintain a global counter to order connection attempts. Get 764 * a unique seq greater than @gt. 765 */ 766 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) 767 { 768 u32 ret; 769 770 spin_lock(&msgr->global_seq_lock); 771 if (msgr->global_seq < gt) 772 msgr->global_seq = gt; 773 ret = ++msgr->global_seq; 774 spin_unlock(&msgr->global_seq_lock); 775 return ret; 776 } 777 778 static void con_out_kvec_reset(struct ceph_connection *con) 779 { 780 BUG_ON(con->out_skip); 781 782 con->out_kvec_left = 0; 783 con->out_kvec_bytes = 0; 784 con->out_kvec_cur = &con->out_kvec[0]; 785 } 786 787 static void con_out_kvec_add(struct ceph_connection *con, 788 size_t size, void *data) 789 { 790 int index = con->out_kvec_left; 791 792 BUG_ON(con->out_skip); 793 BUG_ON(index >= ARRAY_SIZE(con->out_kvec)); 794 795 con->out_kvec[index].iov_len = size; 796 con->out_kvec[index].iov_base = data; 797 con->out_kvec_left++; 798 con->out_kvec_bytes += size; 799 } 800 801 /* 802 * Chop off a kvec from the end. Return residual number of bytes for 803 * that kvec, i.e. how many bytes would have been written if the kvec 804 * hadn't been nuked. 805 */ 806 static int con_out_kvec_skip(struct ceph_connection *con) 807 { 808 int off = con->out_kvec_cur - con->out_kvec; 809 int skip = 0; 810 811 if (con->out_kvec_bytes > 0) { 812 skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len; 813 BUG_ON(con->out_kvec_bytes < skip); 814 BUG_ON(!con->out_kvec_left); 815 con->out_kvec_bytes -= skip; 816 con->out_kvec_left--; 817 } 818 819 return skip; 820 } 821 822 #ifdef CONFIG_BLOCK 823 824 /* 825 * For a bio data item, a piece is whatever remains of the next 826 * entry in the current bio iovec, or the first entry in the next 827 * bio in the list. 828 */ 829 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor, 830 size_t length) 831 { 832 struct ceph_msg_data *data = cursor->data; 833 struct ceph_bio_iter *it = &cursor->bio_iter; 834 835 cursor->resid = min_t(size_t, length, data->bio_length); 836 *it = data->bio_pos; 837 if (cursor->resid < it->iter.bi_size) 838 it->iter.bi_size = cursor->resid; 839 840 BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter)); 841 cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter); 842 } 843 844 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor, 845 size_t *page_offset, 846 size_t *length) 847 { 848 struct bio_vec bv = bio_iter_iovec(cursor->bio_iter.bio, 849 cursor->bio_iter.iter); 850 851 *page_offset = bv.bv_offset; 852 *length = bv.bv_len; 853 return bv.bv_page; 854 } 855 856 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, 857 size_t bytes) 858 { 859 struct ceph_bio_iter *it = &cursor->bio_iter; 860 861 BUG_ON(bytes > cursor->resid); 862 BUG_ON(bytes > bio_iter_len(it->bio, it->iter)); 863 cursor->resid -= bytes; 864 bio_advance_iter(it->bio, &it->iter, bytes); 865 866 if (!cursor->resid) { 867 BUG_ON(!cursor->last_piece); 868 return false; /* no more data */ 869 } 870 871 if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done)) 872 return false; /* more bytes to process in this segment */ 873 874 if (!it->iter.bi_size) { 875 it->bio = it->bio->bi_next; 876 it->iter = it->bio->bi_iter; 877 if (cursor->resid < it->iter.bi_size) 878 it->iter.bi_size = cursor->resid; 879 } 880 881 BUG_ON(cursor->last_piece); 882 BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter)); 883 cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter); 884 return true; 885 } 886 #endif /* CONFIG_BLOCK */ 887 888 static void ceph_msg_data_bvecs_cursor_init(struct ceph_msg_data_cursor *cursor, 889 size_t length) 890 { 891 struct ceph_msg_data *data = cursor->data; 892 struct bio_vec *bvecs = data->bvec_pos.bvecs; 893 894 cursor->resid = min_t(size_t, length, data->bvec_pos.iter.bi_size); 895 cursor->bvec_iter = data->bvec_pos.iter; 896 cursor->bvec_iter.bi_size = cursor->resid; 897 898 BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter)); 899 cursor->last_piece = 900 cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter); 901 } 902 903 static struct page *ceph_msg_data_bvecs_next(struct ceph_msg_data_cursor *cursor, 904 size_t *page_offset, 905 size_t *length) 906 { 907 struct bio_vec bv = bvec_iter_bvec(cursor->data->bvec_pos.bvecs, 908 cursor->bvec_iter); 909 910 *page_offset = bv.bv_offset; 911 *length = bv.bv_len; 912 return bv.bv_page; 913 } 914 915 static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor, 916 size_t bytes) 917 { 918 struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs; 919 920 BUG_ON(bytes > cursor->resid); 921 BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter)); 922 cursor->resid -= bytes; 923 bvec_iter_advance(bvecs, &cursor->bvec_iter, bytes); 924 925 if (!cursor->resid) { 926 BUG_ON(!cursor->last_piece); 927 return false; /* no more data */ 928 } 929 930 if (!bytes || cursor->bvec_iter.bi_bvec_done) 931 return false; /* more bytes to process in this segment */ 932 933 BUG_ON(cursor->last_piece); 934 BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter)); 935 cursor->last_piece = 936 cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter); 937 return true; 938 } 939 940 /* 941 * For a page array, a piece comes from the first page in the array 942 * that has not already been fully consumed. 943 */ 944 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor, 945 size_t length) 946 { 947 struct ceph_msg_data *data = cursor->data; 948 int page_count; 949 950 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 951 952 BUG_ON(!data->pages); 953 BUG_ON(!data->length); 954 955 cursor->resid = min(length, data->length); 956 page_count = calc_pages_for(data->alignment, (u64)data->length); 957 cursor->page_offset = data->alignment & ~PAGE_MASK; 958 cursor->page_index = 0; 959 BUG_ON(page_count > (int)USHRT_MAX); 960 cursor->page_count = (unsigned short)page_count; 961 BUG_ON(length > SIZE_MAX - cursor->page_offset); 962 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE; 963 } 964 965 static struct page * 966 ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor, 967 size_t *page_offset, size_t *length) 968 { 969 struct ceph_msg_data *data = cursor->data; 970 971 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 972 973 BUG_ON(cursor->page_index >= cursor->page_count); 974 BUG_ON(cursor->page_offset >= PAGE_SIZE); 975 976 *page_offset = cursor->page_offset; 977 if (cursor->last_piece) 978 *length = cursor->resid; 979 else 980 *length = PAGE_SIZE - *page_offset; 981 982 return data->pages[cursor->page_index]; 983 } 984 985 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor, 986 size_t bytes) 987 { 988 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES); 989 990 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE); 991 992 /* Advance the cursor page offset */ 993 994 cursor->resid -= bytes; 995 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK; 996 if (!bytes || cursor->page_offset) 997 return false; /* more bytes to process in the current page */ 998 999 if (!cursor->resid) 1000 return false; /* no more data */ 1001 1002 /* Move on to the next page; offset is already at 0 */ 1003 1004 BUG_ON(cursor->page_index >= cursor->page_count); 1005 cursor->page_index++; 1006 cursor->last_piece = cursor->resid <= PAGE_SIZE; 1007 1008 return true; 1009 } 1010 1011 /* 1012 * For a pagelist, a piece is whatever remains to be consumed in the 1013 * first page in the list, or the front of the next page. 1014 */ 1015 static void 1016 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor, 1017 size_t length) 1018 { 1019 struct ceph_msg_data *data = cursor->data; 1020 struct ceph_pagelist *pagelist; 1021 struct page *page; 1022 1023 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1024 1025 pagelist = data->pagelist; 1026 BUG_ON(!pagelist); 1027 1028 if (!length) 1029 return; /* pagelist can be assigned but empty */ 1030 1031 BUG_ON(list_empty(&pagelist->head)); 1032 page = list_first_entry(&pagelist->head, struct page, lru); 1033 1034 cursor->resid = min(length, pagelist->length); 1035 cursor->page = page; 1036 cursor->offset = 0; 1037 cursor->last_piece = cursor->resid <= PAGE_SIZE; 1038 } 1039 1040 static struct page * 1041 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor, 1042 size_t *page_offset, size_t *length) 1043 { 1044 struct ceph_msg_data *data = cursor->data; 1045 struct ceph_pagelist *pagelist; 1046 1047 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1048 1049 pagelist = data->pagelist; 1050 BUG_ON(!pagelist); 1051 1052 BUG_ON(!cursor->page); 1053 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 1054 1055 /* offset of first page in pagelist is always 0 */ 1056 *page_offset = cursor->offset & ~PAGE_MASK; 1057 if (cursor->last_piece) 1058 *length = cursor->resid; 1059 else 1060 *length = PAGE_SIZE - *page_offset; 1061 1062 return cursor->page; 1063 } 1064 1065 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor, 1066 size_t bytes) 1067 { 1068 struct ceph_msg_data *data = cursor->data; 1069 struct ceph_pagelist *pagelist; 1070 1071 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1072 1073 pagelist = data->pagelist; 1074 BUG_ON(!pagelist); 1075 1076 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 1077 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE); 1078 1079 /* Advance the cursor offset */ 1080 1081 cursor->resid -= bytes; 1082 cursor->offset += bytes; 1083 /* offset of first page in pagelist is always 0 */ 1084 if (!bytes || cursor->offset & ~PAGE_MASK) 1085 return false; /* more bytes to process in the current page */ 1086 1087 if (!cursor->resid) 1088 return false; /* no more data */ 1089 1090 /* Move on to the next page */ 1091 1092 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head)); 1093 cursor->page = list_next_entry(cursor->page, lru); 1094 cursor->last_piece = cursor->resid <= PAGE_SIZE; 1095 1096 return true; 1097 } 1098 1099 /* 1100 * Message data is handled (sent or received) in pieces, where each 1101 * piece resides on a single page. The network layer might not 1102 * consume an entire piece at once. A data item's cursor keeps 1103 * track of which piece is next to process and how much remains to 1104 * be processed in that piece. It also tracks whether the current 1105 * piece is the last one in the data item. 1106 */ 1107 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor) 1108 { 1109 size_t length = cursor->total_resid; 1110 1111 switch (cursor->data->type) { 1112 case CEPH_MSG_DATA_PAGELIST: 1113 ceph_msg_data_pagelist_cursor_init(cursor, length); 1114 break; 1115 case CEPH_MSG_DATA_PAGES: 1116 ceph_msg_data_pages_cursor_init(cursor, length); 1117 break; 1118 #ifdef CONFIG_BLOCK 1119 case CEPH_MSG_DATA_BIO: 1120 ceph_msg_data_bio_cursor_init(cursor, length); 1121 break; 1122 #endif /* CONFIG_BLOCK */ 1123 case CEPH_MSG_DATA_BVECS: 1124 ceph_msg_data_bvecs_cursor_init(cursor, length); 1125 break; 1126 case CEPH_MSG_DATA_NONE: 1127 default: 1128 /* BUG(); */ 1129 break; 1130 } 1131 cursor->need_crc = true; 1132 } 1133 1134 static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) 1135 { 1136 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1137 1138 BUG_ON(!length); 1139 BUG_ON(length > msg->data_length); 1140 BUG_ON(!msg->num_data_items); 1141 1142 cursor->total_resid = length; 1143 cursor->data = msg->data; 1144 1145 __ceph_msg_data_cursor_init(cursor); 1146 } 1147 1148 /* 1149 * Return the page containing the next piece to process for a given 1150 * data item, and supply the page offset and length of that piece. 1151 * Indicate whether this is the last piece in this data item. 1152 */ 1153 static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, 1154 size_t *page_offset, size_t *length, 1155 bool *last_piece) 1156 { 1157 struct page *page; 1158 1159 switch (cursor->data->type) { 1160 case CEPH_MSG_DATA_PAGELIST: 1161 page = ceph_msg_data_pagelist_next(cursor, page_offset, length); 1162 break; 1163 case CEPH_MSG_DATA_PAGES: 1164 page = ceph_msg_data_pages_next(cursor, page_offset, length); 1165 break; 1166 #ifdef CONFIG_BLOCK 1167 case CEPH_MSG_DATA_BIO: 1168 page = ceph_msg_data_bio_next(cursor, page_offset, length); 1169 break; 1170 #endif /* CONFIG_BLOCK */ 1171 case CEPH_MSG_DATA_BVECS: 1172 page = ceph_msg_data_bvecs_next(cursor, page_offset, length); 1173 break; 1174 case CEPH_MSG_DATA_NONE: 1175 default: 1176 page = NULL; 1177 break; 1178 } 1179 1180 BUG_ON(!page); 1181 BUG_ON(*page_offset + *length > PAGE_SIZE); 1182 BUG_ON(!*length); 1183 BUG_ON(*length > cursor->resid); 1184 if (last_piece) 1185 *last_piece = cursor->last_piece; 1186 1187 return page; 1188 } 1189 1190 /* 1191 * Returns true if the result moves the cursor on to the next piece 1192 * of the data item. 1193 */ 1194 static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, 1195 size_t bytes) 1196 { 1197 bool new_piece; 1198 1199 BUG_ON(bytes > cursor->resid); 1200 switch (cursor->data->type) { 1201 case CEPH_MSG_DATA_PAGELIST: 1202 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes); 1203 break; 1204 case CEPH_MSG_DATA_PAGES: 1205 new_piece = ceph_msg_data_pages_advance(cursor, bytes); 1206 break; 1207 #ifdef CONFIG_BLOCK 1208 case CEPH_MSG_DATA_BIO: 1209 new_piece = ceph_msg_data_bio_advance(cursor, bytes); 1210 break; 1211 #endif /* CONFIG_BLOCK */ 1212 case CEPH_MSG_DATA_BVECS: 1213 new_piece = ceph_msg_data_bvecs_advance(cursor, bytes); 1214 break; 1215 case CEPH_MSG_DATA_NONE: 1216 default: 1217 BUG(); 1218 break; 1219 } 1220 cursor->total_resid -= bytes; 1221 1222 if (!cursor->resid && cursor->total_resid) { 1223 WARN_ON(!cursor->last_piece); 1224 cursor->data++; 1225 __ceph_msg_data_cursor_init(cursor); 1226 new_piece = true; 1227 } 1228 cursor->need_crc = new_piece; 1229 } 1230 1231 static size_t sizeof_footer(struct ceph_connection *con) 1232 { 1233 return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ? 1234 sizeof(struct ceph_msg_footer) : 1235 sizeof(struct ceph_msg_footer_old); 1236 } 1237 1238 static void prepare_message_data(struct ceph_msg *msg, u32 data_len) 1239 { 1240 /* Initialize data cursor */ 1241 1242 ceph_msg_data_cursor_init(msg, (size_t)data_len); 1243 } 1244 1245 /* 1246 * Prepare footer for currently outgoing message, and finish things 1247 * off. Assumes out_kvec* are already valid.. we just add on to the end. 1248 */ 1249 static void prepare_write_message_footer(struct ceph_connection *con) 1250 { 1251 struct ceph_msg *m = con->out_msg; 1252 1253 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE; 1254 1255 dout("prepare_write_message_footer %p\n", con); 1256 con_out_kvec_add(con, sizeof_footer(con), &m->footer); 1257 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) { 1258 if (con->ops->sign_message) 1259 con->ops->sign_message(m); 1260 else 1261 m->footer.sig = 0; 1262 } else { 1263 m->old_footer.flags = m->footer.flags; 1264 } 1265 con->out_more = m->more_to_follow; 1266 con->out_msg_done = true; 1267 } 1268 1269 /* 1270 * Prepare headers for the next outgoing message. 1271 */ 1272 static void prepare_write_message(struct ceph_connection *con) 1273 { 1274 struct ceph_msg *m; 1275 u32 crc; 1276 1277 con_out_kvec_reset(con); 1278 con->out_msg_done = false; 1279 1280 /* Sneak an ack in there first? If we can get it into the same 1281 * TCP packet that's a good thing. */ 1282 if (con->in_seq > con->in_seq_acked) { 1283 con->in_seq_acked = con->in_seq; 1284 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1285 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1286 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1287 &con->out_temp_ack); 1288 } 1289 1290 BUG_ON(list_empty(&con->out_queue)); 1291 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); 1292 con->out_msg = m; 1293 BUG_ON(m->con != con); 1294 1295 /* put message on sent list */ 1296 ceph_msg_get(m); 1297 list_move_tail(&m->list_head, &con->out_sent); 1298 1299 /* 1300 * only assign outgoing seq # if we haven't sent this message 1301 * yet. if it is requeued, resend with it's original seq. 1302 */ 1303 if (m->needs_out_seq) { 1304 m->hdr.seq = cpu_to_le64(++con->out_seq); 1305 m->needs_out_seq = false; 1306 1307 if (con->ops->reencode_message) 1308 con->ops->reencode_message(m); 1309 } 1310 1311 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n", 1312 m, con->out_seq, le16_to_cpu(m->hdr.type), 1313 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), 1314 m->data_length); 1315 WARN_ON(m->front.iov_len != le32_to_cpu(m->hdr.front_len)); 1316 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len)); 1317 1318 /* tag + hdr + front + middle */ 1319 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); 1320 con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr); 1321 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); 1322 1323 if (m->middle) 1324 con_out_kvec_add(con, m->middle->vec.iov_len, 1325 m->middle->vec.iov_base); 1326 1327 /* fill in hdr crc and finalize hdr */ 1328 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc)); 1329 con->out_msg->hdr.crc = cpu_to_le32(crc); 1330 memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr)); 1331 1332 /* fill in front and middle crc, footer */ 1333 crc = crc32c(0, m->front.iov_base, m->front.iov_len); 1334 con->out_msg->footer.front_crc = cpu_to_le32(crc); 1335 if (m->middle) { 1336 crc = crc32c(0, m->middle->vec.iov_base, 1337 m->middle->vec.iov_len); 1338 con->out_msg->footer.middle_crc = cpu_to_le32(crc); 1339 } else 1340 con->out_msg->footer.middle_crc = 0; 1341 dout("%s front_crc %u middle_crc %u\n", __func__, 1342 le32_to_cpu(con->out_msg->footer.front_crc), 1343 le32_to_cpu(con->out_msg->footer.middle_crc)); 1344 con->out_msg->footer.flags = 0; 1345 1346 /* is there a data payload? */ 1347 con->out_msg->footer.data_crc = 0; 1348 if (m->data_length) { 1349 prepare_message_data(con->out_msg, m->data_length); 1350 con->out_more = 1; /* data + footer will follow */ 1351 } else { 1352 /* no, queue up footer too and be done */ 1353 prepare_write_message_footer(con); 1354 } 1355 1356 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1357 } 1358 1359 /* 1360 * Prepare an ack. 1361 */ 1362 static void prepare_write_ack(struct ceph_connection *con) 1363 { 1364 dout("prepare_write_ack %p %llu -> %llu\n", con, 1365 con->in_seq_acked, con->in_seq); 1366 con->in_seq_acked = con->in_seq; 1367 1368 con_out_kvec_reset(con); 1369 1370 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1371 1372 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1373 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1374 &con->out_temp_ack); 1375 1376 con->out_more = 1; /* more will follow.. eventually.. */ 1377 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1378 } 1379 1380 /* 1381 * Prepare to share the seq during handshake 1382 */ 1383 static void prepare_write_seq(struct ceph_connection *con) 1384 { 1385 dout("prepare_write_seq %p %llu -> %llu\n", con, 1386 con->in_seq_acked, con->in_seq); 1387 con->in_seq_acked = con->in_seq; 1388 1389 con_out_kvec_reset(con); 1390 1391 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1392 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1393 &con->out_temp_ack); 1394 1395 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1396 } 1397 1398 /* 1399 * Prepare to write keepalive byte. 1400 */ 1401 static void prepare_write_keepalive(struct ceph_connection *con) 1402 { 1403 dout("prepare_write_keepalive %p\n", con); 1404 con_out_kvec_reset(con); 1405 if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) { 1406 struct timespec64 now; 1407 1408 ktime_get_real_ts64(&now); 1409 con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2); 1410 ceph_encode_timespec64(&con->out_temp_keepalive2, &now); 1411 con_out_kvec_add(con, sizeof(con->out_temp_keepalive2), 1412 &con->out_temp_keepalive2); 1413 } else { 1414 con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive); 1415 } 1416 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1417 } 1418 1419 /* 1420 * Connection negotiation. 1421 */ 1422 1423 static int get_connect_authorizer(struct ceph_connection *con) 1424 { 1425 struct ceph_auth_handshake *auth; 1426 int auth_proto; 1427 1428 if (!con->ops->get_authorizer) { 1429 con->auth = NULL; 1430 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN; 1431 con->out_connect.authorizer_len = 0; 1432 return 0; 1433 } 1434 1435 auth = con->ops->get_authorizer(con, &auth_proto, con->auth_retry); 1436 if (IS_ERR(auth)) 1437 return PTR_ERR(auth); 1438 1439 con->auth = auth; 1440 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto); 1441 con->out_connect.authorizer_len = cpu_to_le32(auth->authorizer_buf_len); 1442 return 0; 1443 } 1444 1445 /* 1446 * We connected to a peer and are saying hello. 1447 */ 1448 static void prepare_write_banner(struct ceph_connection *con) 1449 { 1450 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER); 1451 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr), 1452 &con->msgr->my_enc_addr); 1453 1454 con->out_more = 0; 1455 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1456 } 1457 1458 static void __prepare_write_connect(struct ceph_connection *con) 1459 { 1460 con_out_kvec_add(con, sizeof(con->out_connect), &con->out_connect); 1461 if (con->auth) 1462 con_out_kvec_add(con, con->auth->authorizer_buf_len, 1463 con->auth->authorizer_buf); 1464 1465 con->out_more = 0; 1466 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1467 } 1468 1469 static int prepare_write_connect(struct ceph_connection *con) 1470 { 1471 unsigned int global_seq = get_global_seq(con->msgr, 0); 1472 int proto; 1473 int ret; 1474 1475 switch (con->peer_name.type) { 1476 case CEPH_ENTITY_TYPE_MON: 1477 proto = CEPH_MONC_PROTOCOL; 1478 break; 1479 case CEPH_ENTITY_TYPE_OSD: 1480 proto = CEPH_OSDC_PROTOCOL; 1481 break; 1482 case CEPH_ENTITY_TYPE_MDS: 1483 proto = CEPH_MDSC_PROTOCOL; 1484 break; 1485 default: 1486 BUG(); 1487 } 1488 1489 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, 1490 con->connect_seq, global_seq, proto); 1491 1492 con->out_connect.features = 1493 cpu_to_le64(from_msgr(con->msgr)->supported_features); 1494 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); 1495 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); 1496 con->out_connect.global_seq = cpu_to_le32(global_seq); 1497 con->out_connect.protocol_version = cpu_to_le32(proto); 1498 con->out_connect.flags = 0; 1499 1500 ret = get_connect_authorizer(con); 1501 if (ret) 1502 return ret; 1503 1504 __prepare_write_connect(con); 1505 return 0; 1506 } 1507 1508 /* 1509 * write as much of pending kvecs to the socket as we can. 1510 * 1 -> done 1511 * 0 -> socket full, but more to do 1512 * <0 -> error 1513 */ 1514 static int write_partial_kvec(struct ceph_connection *con) 1515 { 1516 int ret; 1517 1518 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes); 1519 while (con->out_kvec_bytes > 0) { 1520 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur, 1521 con->out_kvec_left, con->out_kvec_bytes, 1522 con->out_more); 1523 if (ret <= 0) 1524 goto out; 1525 con->out_kvec_bytes -= ret; 1526 if (con->out_kvec_bytes == 0) 1527 break; /* done */ 1528 1529 /* account for full iov entries consumed */ 1530 while (ret >= con->out_kvec_cur->iov_len) { 1531 BUG_ON(!con->out_kvec_left); 1532 ret -= con->out_kvec_cur->iov_len; 1533 con->out_kvec_cur++; 1534 con->out_kvec_left--; 1535 } 1536 /* and for a partially-consumed entry */ 1537 if (ret) { 1538 con->out_kvec_cur->iov_len -= ret; 1539 con->out_kvec_cur->iov_base += ret; 1540 } 1541 } 1542 con->out_kvec_left = 0; 1543 ret = 1; 1544 out: 1545 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con, 1546 con->out_kvec_bytes, con->out_kvec_left, ret); 1547 return ret; /* done! */ 1548 } 1549 1550 static u32 ceph_crc32c_page(u32 crc, struct page *page, 1551 unsigned int page_offset, 1552 unsigned int length) 1553 { 1554 char *kaddr; 1555 1556 kaddr = kmap(page); 1557 BUG_ON(kaddr == NULL); 1558 crc = crc32c(crc, kaddr + page_offset, length); 1559 kunmap(page); 1560 1561 return crc; 1562 } 1563 /* 1564 * Write as much message data payload as we can. If we finish, queue 1565 * up the footer. 1566 * 1 -> done, footer is now queued in out_kvec[]. 1567 * 0 -> socket full, but more to do 1568 * <0 -> error 1569 */ 1570 static int write_partial_message_data(struct ceph_connection *con) 1571 { 1572 struct ceph_msg *msg = con->out_msg; 1573 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1574 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); 1575 u32 crc; 1576 1577 dout("%s %p msg %p\n", __func__, con, msg); 1578 1579 if (!msg->num_data_items) 1580 return -EINVAL; 1581 1582 /* 1583 * Iterate through each page that contains data to be 1584 * written, and send as much as possible for each. 1585 * 1586 * If we are calculating the data crc (the default), we will 1587 * need to map the page. If we have no pages, they have 1588 * been revoked, so use the zero page. 1589 */ 1590 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0; 1591 while (cursor->total_resid) { 1592 struct page *page; 1593 size_t page_offset; 1594 size_t length; 1595 bool last_piece; 1596 int ret; 1597 1598 if (!cursor->resid) { 1599 ceph_msg_data_advance(cursor, 0); 1600 continue; 1601 } 1602 1603 page = ceph_msg_data_next(cursor, &page_offset, &length, 1604 &last_piece); 1605 ret = ceph_tcp_sendpage(con->sock, page, page_offset, 1606 length, !last_piece); 1607 if (ret <= 0) { 1608 if (do_datacrc) 1609 msg->footer.data_crc = cpu_to_le32(crc); 1610 1611 return ret; 1612 } 1613 if (do_datacrc && cursor->need_crc) 1614 crc = ceph_crc32c_page(crc, page, page_offset, length); 1615 ceph_msg_data_advance(cursor, (size_t)ret); 1616 } 1617 1618 dout("%s %p msg %p done\n", __func__, con, msg); 1619 1620 /* prepare and queue up footer, too */ 1621 if (do_datacrc) 1622 msg->footer.data_crc = cpu_to_le32(crc); 1623 else 1624 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; 1625 con_out_kvec_reset(con); 1626 prepare_write_message_footer(con); 1627 1628 return 1; /* must return > 0 to indicate success */ 1629 } 1630 1631 /* 1632 * write some zeros 1633 */ 1634 static int write_partial_skip(struct ceph_connection *con) 1635 { 1636 int ret; 1637 1638 dout("%s %p %d left\n", __func__, con, con->out_skip); 1639 while (con->out_skip > 0) { 1640 size_t size = min(con->out_skip, (int) PAGE_SIZE); 1641 1642 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true); 1643 if (ret <= 0) 1644 goto out; 1645 con->out_skip -= ret; 1646 } 1647 ret = 1; 1648 out: 1649 return ret; 1650 } 1651 1652 /* 1653 * Prepare to read connection handshake, or an ack. 1654 */ 1655 static void prepare_read_banner(struct ceph_connection *con) 1656 { 1657 dout("prepare_read_banner %p\n", con); 1658 con->in_base_pos = 0; 1659 } 1660 1661 static void prepare_read_connect(struct ceph_connection *con) 1662 { 1663 dout("prepare_read_connect %p\n", con); 1664 con->in_base_pos = 0; 1665 } 1666 1667 static void prepare_read_ack(struct ceph_connection *con) 1668 { 1669 dout("prepare_read_ack %p\n", con); 1670 con->in_base_pos = 0; 1671 } 1672 1673 static void prepare_read_seq(struct ceph_connection *con) 1674 { 1675 dout("prepare_read_seq %p\n", con); 1676 con->in_base_pos = 0; 1677 con->in_tag = CEPH_MSGR_TAG_SEQ; 1678 } 1679 1680 static void prepare_read_tag(struct ceph_connection *con) 1681 { 1682 dout("prepare_read_tag %p\n", con); 1683 con->in_base_pos = 0; 1684 con->in_tag = CEPH_MSGR_TAG_READY; 1685 } 1686 1687 static void prepare_read_keepalive_ack(struct ceph_connection *con) 1688 { 1689 dout("prepare_read_keepalive_ack %p\n", con); 1690 con->in_base_pos = 0; 1691 } 1692 1693 /* 1694 * Prepare to read a message. 1695 */ 1696 static int prepare_read_message(struct ceph_connection *con) 1697 { 1698 dout("prepare_read_message %p\n", con); 1699 BUG_ON(con->in_msg != NULL); 1700 con->in_base_pos = 0; 1701 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0; 1702 return 0; 1703 } 1704 1705 1706 static int read_partial(struct ceph_connection *con, 1707 int end, int size, void *object) 1708 { 1709 while (con->in_base_pos < end) { 1710 int left = end - con->in_base_pos; 1711 int have = size - left; 1712 int ret = ceph_tcp_recvmsg(con->sock, object + have, left); 1713 if (ret <= 0) 1714 return ret; 1715 con->in_base_pos += ret; 1716 } 1717 return 1; 1718 } 1719 1720 1721 /* 1722 * Read all or part of the connect-side handshake on a new connection 1723 */ 1724 static int read_partial_banner(struct ceph_connection *con) 1725 { 1726 int size; 1727 int end; 1728 int ret; 1729 1730 dout("read_partial_banner %p at %d\n", con, con->in_base_pos); 1731 1732 /* peer's banner */ 1733 size = strlen(CEPH_BANNER); 1734 end = size; 1735 ret = read_partial(con, end, size, con->in_banner); 1736 if (ret <= 0) 1737 goto out; 1738 1739 size = sizeof (con->actual_peer_addr); 1740 end += size; 1741 ret = read_partial(con, end, size, &con->actual_peer_addr); 1742 if (ret <= 0) 1743 goto out; 1744 1745 size = sizeof (con->peer_addr_for_me); 1746 end += size; 1747 ret = read_partial(con, end, size, &con->peer_addr_for_me); 1748 if (ret <= 0) 1749 goto out; 1750 1751 out: 1752 return ret; 1753 } 1754 1755 static int read_partial_connect(struct ceph_connection *con) 1756 { 1757 int size; 1758 int end; 1759 int ret; 1760 1761 dout("read_partial_connect %p at %d\n", con, con->in_base_pos); 1762 1763 size = sizeof (con->in_reply); 1764 end = size; 1765 ret = read_partial(con, end, size, &con->in_reply); 1766 if (ret <= 0) 1767 goto out; 1768 1769 if (con->auth) { 1770 size = le32_to_cpu(con->in_reply.authorizer_len); 1771 if (size > con->auth->authorizer_reply_buf_len) { 1772 pr_err("authorizer reply too big: %d > %zu\n", size, 1773 con->auth->authorizer_reply_buf_len); 1774 ret = -EINVAL; 1775 goto out; 1776 } 1777 1778 end += size; 1779 ret = read_partial(con, end, size, 1780 con->auth->authorizer_reply_buf); 1781 if (ret <= 0) 1782 goto out; 1783 } 1784 1785 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n", 1786 con, (int)con->in_reply.tag, 1787 le32_to_cpu(con->in_reply.connect_seq), 1788 le32_to_cpu(con->in_reply.global_seq)); 1789 out: 1790 return ret; 1791 } 1792 1793 /* 1794 * Verify the hello banner looks okay. 1795 */ 1796 static int verify_hello(struct ceph_connection *con) 1797 { 1798 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { 1799 pr_err("connect to %s got bad banner\n", 1800 ceph_pr_addr(&con->peer_addr.in_addr)); 1801 con->error_msg = "protocol error, bad banner"; 1802 return -1; 1803 } 1804 return 0; 1805 } 1806 1807 static bool addr_is_blank(struct sockaddr_storage *ss) 1808 { 1809 struct in_addr *addr = &((struct sockaddr_in *)ss)->sin_addr; 1810 struct in6_addr *addr6 = &((struct sockaddr_in6 *)ss)->sin6_addr; 1811 1812 switch (ss->ss_family) { 1813 case AF_INET: 1814 return addr->s_addr == htonl(INADDR_ANY); 1815 case AF_INET6: 1816 return ipv6_addr_any(addr6); 1817 default: 1818 return true; 1819 } 1820 } 1821 1822 static int addr_port(struct sockaddr_storage *ss) 1823 { 1824 switch (ss->ss_family) { 1825 case AF_INET: 1826 return ntohs(((struct sockaddr_in *)ss)->sin_port); 1827 case AF_INET6: 1828 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port); 1829 } 1830 return 0; 1831 } 1832 1833 static void addr_set_port(struct sockaddr_storage *ss, int p) 1834 { 1835 switch (ss->ss_family) { 1836 case AF_INET: 1837 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1838 break; 1839 case AF_INET6: 1840 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1841 break; 1842 } 1843 } 1844 1845 /* 1846 * Unlike other *_pton function semantics, zero indicates success. 1847 */ 1848 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss, 1849 char delim, const char **ipend) 1850 { 1851 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 1852 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 1853 1854 memset(ss, 0, sizeof(*ss)); 1855 1856 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) { 1857 ss->ss_family = AF_INET; 1858 return 0; 1859 } 1860 1861 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) { 1862 ss->ss_family = AF_INET6; 1863 return 0; 1864 } 1865 1866 return -EINVAL; 1867 } 1868 1869 /* 1870 * Extract hostname string and resolve using kernel DNS facility. 1871 */ 1872 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER 1873 static int ceph_dns_resolve_name(const char *name, size_t namelen, 1874 struct sockaddr_storage *ss, char delim, const char **ipend) 1875 { 1876 const char *end, *delim_p; 1877 char *colon_p, *ip_addr = NULL; 1878 int ip_len, ret; 1879 1880 /* 1881 * The end of the hostname occurs immediately preceding the delimiter or 1882 * the port marker (':') where the delimiter takes precedence. 1883 */ 1884 delim_p = memchr(name, delim, namelen); 1885 colon_p = memchr(name, ':', namelen); 1886 1887 if (delim_p && colon_p) 1888 end = delim_p < colon_p ? delim_p : colon_p; 1889 else if (!delim_p && colon_p) 1890 end = colon_p; 1891 else { 1892 end = delim_p; 1893 if (!end) /* case: hostname:/ */ 1894 end = name + namelen; 1895 } 1896 1897 if (end <= name) 1898 return -EINVAL; 1899 1900 /* do dns_resolve upcall */ 1901 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL); 1902 if (ip_len > 0) 1903 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL); 1904 else 1905 ret = -ESRCH; 1906 1907 kfree(ip_addr); 1908 1909 *ipend = end; 1910 1911 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name, 1912 ret, ret ? "failed" : ceph_pr_addr(ss)); 1913 1914 return ret; 1915 } 1916 #else 1917 static inline int ceph_dns_resolve_name(const char *name, size_t namelen, 1918 struct sockaddr_storage *ss, char delim, const char **ipend) 1919 { 1920 return -EINVAL; 1921 } 1922 #endif 1923 1924 /* 1925 * Parse a server name (IP or hostname). If a valid IP address is not found 1926 * then try to extract a hostname to resolve using userspace DNS upcall. 1927 */ 1928 static int ceph_parse_server_name(const char *name, size_t namelen, 1929 struct sockaddr_storage *ss, char delim, const char **ipend) 1930 { 1931 int ret; 1932 1933 ret = ceph_pton(name, namelen, ss, delim, ipend); 1934 if (ret) 1935 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend); 1936 1937 return ret; 1938 } 1939 1940 /* 1941 * Parse an ip[:port] list into an addr array. Use the default 1942 * monitor port if a port isn't specified. 1943 */ 1944 int ceph_parse_ips(const char *c, const char *end, 1945 struct ceph_entity_addr *addr, 1946 int max_count, int *count) 1947 { 1948 int i, ret = -EINVAL; 1949 const char *p = c; 1950 1951 dout("parse_ips on '%.*s'\n", (int)(end-c), c); 1952 for (i = 0; i < max_count; i++) { 1953 const char *ipend; 1954 struct sockaddr_storage *ss = &addr[i].in_addr; 1955 int port; 1956 char delim = ','; 1957 1958 if (*p == '[') { 1959 delim = ']'; 1960 p++; 1961 } 1962 1963 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend); 1964 if (ret) 1965 goto bad; 1966 ret = -EINVAL; 1967 1968 p = ipend; 1969 1970 if (delim == ']') { 1971 if (*p != ']') { 1972 dout("missing matching ']'\n"); 1973 goto bad; 1974 } 1975 p++; 1976 } 1977 1978 /* port? */ 1979 if (p < end && *p == ':') { 1980 port = 0; 1981 p++; 1982 while (p < end && *p >= '0' && *p <= '9') { 1983 port = (port * 10) + (*p - '0'); 1984 p++; 1985 } 1986 if (port == 0) 1987 port = CEPH_MON_PORT; 1988 else if (port > 65535) 1989 goto bad; 1990 } else { 1991 port = CEPH_MON_PORT; 1992 } 1993 1994 addr_set_port(ss, port); 1995 1996 dout("parse_ips got %s\n", ceph_pr_addr(ss)); 1997 1998 if (p == end) 1999 break; 2000 if (*p != ',') 2001 goto bad; 2002 p++; 2003 } 2004 2005 if (p != end) 2006 goto bad; 2007 2008 if (count) 2009 *count = i + 1; 2010 return 0; 2011 2012 bad: 2013 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 2014 return ret; 2015 } 2016 EXPORT_SYMBOL(ceph_parse_ips); 2017 2018 static int process_banner(struct ceph_connection *con) 2019 { 2020 dout("process_banner on %p\n", con); 2021 2022 if (verify_hello(con) < 0) 2023 return -1; 2024 2025 ceph_decode_addr(&con->actual_peer_addr); 2026 ceph_decode_addr(&con->peer_addr_for_me); 2027 2028 /* 2029 * Make sure the other end is who we wanted. note that the other 2030 * end may not yet know their ip address, so if it's 0.0.0.0, give 2031 * them the benefit of the doubt. 2032 */ 2033 if (memcmp(&con->peer_addr, &con->actual_peer_addr, 2034 sizeof(con->peer_addr)) != 0 && 2035 !(addr_is_blank(&con->actual_peer_addr.in_addr) && 2036 con->actual_peer_addr.nonce == con->peer_addr.nonce)) { 2037 pr_warn("wrong peer, want %s/%d, got %s/%d\n", 2038 ceph_pr_addr(&con->peer_addr.in_addr), 2039 (int)le32_to_cpu(con->peer_addr.nonce), 2040 ceph_pr_addr(&con->actual_peer_addr.in_addr), 2041 (int)le32_to_cpu(con->actual_peer_addr.nonce)); 2042 con->error_msg = "wrong peer at address"; 2043 return -1; 2044 } 2045 2046 /* 2047 * did we learn our address? 2048 */ 2049 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) { 2050 int port = addr_port(&con->msgr->inst.addr.in_addr); 2051 2052 memcpy(&con->msgr->inst.addr.in_addr, 2053 &con->peer_addr_for_me.in_addr, 2054 sizeof(con->peer_addr_for_me.in_addr)); 2055 addr_set_port(&con->msgr->inst.addr.in_addr, port); 2056 encode_my_addr(con->msgr); 2057 dout("process_banner learned my addr is %s\n", 2058 ceph_pr_addr(&con->msgr->inst.addr.in_addr)); 2059 } 2060 2061 return 0; 2062 } 2063 2064 static int process_connect(struct ceph_connection *con) 2065 { 2066 u64 sup_feat = from_msgr(con->msgr)->supported_features; 2067 u64 req_feat = from_msgr(con->msgr)->required_features; 2068 u64 server_feat = le64_to_cpu(con->in_reply.features); 2069 int ret; 2070 2071 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 2072 2073 if (con->auth) { 2074 /* 2075 * Any connection that defines ->get_authorizer() 2076 * should also define ->add_authorizer_challenge() and 2077 * ->verify_authorizer_reply(). 2078 * 2079 * See get_connect_authorizer(). 2080 */ 2081 if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) { 2082 ret = con->ops->add_authorizer_challenge( 2083 con, con->auth->authorizer_reply_buf, 2084 le32_to_cpu(con->in_reply.authorizer_len)); 2085 if (ret < 0) 2086 return ret; 2087 2088 con_out_kvec_reset(con); 2089 __prepare_write_connect(con); 2090 prepare_read_connect(con); 2091 return 0; 2092 } 2093 2094 ret = con->ops->verify_authorizer_reply(con); 2095 if (ret < 0) { 2096 con->error_msg = "bad authorize reply"; 2097 return ret; 2098 } 2099 } 2100 2101 switch (con->in_reply.tag) { 2102 case CEPH_MSGR_TAG_FEATURES: 2103 pr_err("%s%lld %s feature set mismatch," 2104 " my %llx < server's %llx, missing %llx\n", 2105 ENTITY_NAME(con->peer_name), 2106 ceph_pr_addr(&con->peer_addr.in_addr), 2107 sup_feat, server_feat, server_feat & ~sup_feat); 2108 con->error_msg = "missing required protocol features"; 2109 reset_connection(con); 2110 return -1; 2111 2112 case CEPH_MSGR_TAG_BADPROTOVER: 2113 pr_err("%s%lld %s protocol version mismatch," 2114 " my %d != server's %d\n", 2115 ENTITY_NAME(con->peer_name), 2116 ceph_pr_addr(&con->peer_addr.in_addr), 2117 le32_to_cpu(con->out_connect.protocol_version), 2118 le32_to_cpu(con->in_reply.protocol_version)); 2119 con->error_msg = "protocol version mismatch"; 2120 reset_connection(con); 2121 return -1; 2122 2123 case CEPH_MSGR_TAG_BADAUTHORIZER: 2124 con->auth_retry++; 2125 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con, 2126 con->auth_retry); 2127 if (con->auth_retry == 2) { 2128 con->error_msg = "connect authorization failure"; 2129 return -1; 2130 } 2131 con_out_kvec_reset(con); 2132 ret = prepare_write_connect(con); 2133 if (ret < 0) 2134 return ret; 2135 prepare_read_connect(con); 2136 break; 2137 2138 case CEPH_MSGR_TAG_RESETSESSION: 2139 /* 2140 * If we connected with a large connect_seq but the peer 2141 * has no record of a session with us (no connection, or 2142 * connect_seq == 0), they will send RESETSESION to indicate 2143 * that they must have reset their session, and may have 2144 * dropped messages. 2145 */ 2146 dout("process_connect got RESET peer seq %u\n", 2147 le32_to_cpu(con->in_reply.connect_seq)); 2148 pr_err("%s%lld %s connection reset\n", 2149 ENTITY_NAME(con->peer_name), 2150 ceph_pr_addr(&con->peer_addr.in_addr)); 2151 reset_connection(con); 2152 con_out_kvec_reset(con); 2153 ret = prepare_write_connect(con); 2154 if (ret < 0) 2155 return ret; 2156 prepare_read_connect(con); 2157 2158 /* Tell ceph about it. */ 2159 mutex_unlock(&con->mutex); 2160 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name)); 2161 if (con->ops->peer_reset) 2162 con->ops->peer_reset(con); 2163 mutex_lock(&con->mutex); 2164 if (con->state != CON_STATE_NEGOTIATING) 2165 return -EAGAIN; 2166 break; 2167 2168 case CEPH_MSGR_TAG_RETRY_SESSION: 2169 /* 2170 * If we sent a smaller connect_seq than the peer has, try 2171 * again with a larger value. 2172 */ 2173 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n", 2174 le32_to_cpu(con->out_connect.connect_seq), 2175 le32_to_cpu(con->in_reply.connect_seq)); 2176 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); 2177 con_out_kvec_reset(con); 2178 ret = prepare_write_connect(con); 2179 if (ret < 0) 2180 return ret; 2181 prepare_read_connect(con); 2182 break; 2183 2184 case CEPH_MSGR_TAG_RETRY_GLOBAL: 2185 /* 2186 * If we sent a smaller global_seq than the peer has, try 2187 * again with a larger value. 2188 */ 2189 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", 2190 con->peer_global_seq, 2191 le32_to_cpu(con->in_reply.global_seq)); 2192 get_global_seq(con->msgr, 2193 le32_to_cpu(con->in_reply.global_seq)); 2194 con_out_kvec_reset(con); 2195 ret = prepare_write_connect(con); 2196 if (ret < 0) 2197 return ret; 2198 prepare_read_connect(con); 2199 break; 2200 2201 case CEPH_MSGR_TAG_SEQ: 2202 case CEPH_MSGR_TAG_READY: 2203 if (req_feat & ~server_feat) { 2204 pr_err("%s%lld %s protocol feature mismatch," 2205 " my required %llx > server's %llx, need %llx\n", 2206 ENTITY_NAME(con->peer_name), 2207 ceph_pr_addr(&con->peer_addr.in_addr), 2208 req_feat, server_feat, req_feat & ~server_feat); 2209 con->error_msg = "missing required protocol features"; 2210 reset_connection(con); 2211 return -1; 2212 } 2213 2214 WARN_ON(con->state != CON_STATE_NEGOTIATING); 2215 con->state = CON_STATE_OPEN; 2216 con->auth_retry = 0; /* we authenticated; clear flag */ 2217 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 2218 con->connect_seq++; 2219 con->peer_features = server_feat; 2220 dout("process_connect got READY gseq %d cseq %d (%d)\n", 2221 con->peer_global_seq, 2222 le32_to_cpu(con->in_reply.connect_seq), 2223 con->connect_seq); 2224 WARN_ON(con->connect_seq != 2225 le32_to_cpu(con->in_reply.connect_seq)); 2226 2227 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) 2228 con_flag_set(con, CON_FLAG_LOSSYTX); 2229 2230 con->delay = 0; /* reset backoff memory */ 2231 2232 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) { 2233 prepare_write_seq(con); 2234 prepare_read_seq(con); 2235 } else { 2236 prepare_read_tag(con); 2237 } 2238 break; 2239 2240 case CEPH_MSGR_TAG_WAIT: 2241 /* 2242 * If there is a connection race (we are opening 2243 * connections to each other), one of us may just have 2244 * to WAIT. This shouldn't happen if we are the 2245 * client. 2246 */ 2247 con->error_msg = "protocol error, got WAIT as client"; 2248 return -1; 2249 2250 default: 2251 con->error_msg = "protocol error, garbage tag during connect"; 2252 return -1; 2253 } 2254 return 0; 2255 } 2256 2257 2258 /* 2259 * read (part of) an ack 2260 */ 2261 static int read_partial_ack(struct ceph_connection *con) 2262 { 2263 int size = sizeof (con->in_temp_ack); 2264 int end = size; 2265 2266 return read_partial(con, end, size, &con->in_temp_ack); 2267 } 2268 2269 /* 2270 * We can finally discard anything that's been acked. 2271 */ 2272 static void process_ack(struct ceph_connection *con) 2273 { 2274 struct ceph_msg *m; 2275 u64 ack = le64_to_cpu(con->in_temp_ack); 2276 u64 seq; 2277 bool reconnect = (con->in_tag == CEPH_MSGR_TAG_SEQ); 2278 struct list_head *list = reconnect ? &con->out_queue : &con->out_sent; 2279 2280 /* 2281 * In the reconnect case, con_fault() has requeued messages 2282 * in out_sent. We should cleanup old messages according to 2283 * the reconnect seq. 2284 */ 2285 while (!list_empty(list)) { 2286 m = list_first_entry(list, struct ceph_msg, list_head); 2287 if (reconnect && m->needs_out_seq) 2288 break; 2289 seq = le64_to_cpu(m->hdr.seq); 2290 if (seq > ack) 2291 break; 2292 dout("got ack for seq %llu type %d at %p\n", seq, 2293 le16_to_cpu(m->hdr.type), m); 2294 m->ack_stamp = jiffies; 2295 ceph_msg_remove(m); 2296 } 2297 2298 prepare_read_tag(con); 2299 } 2300 2301 2302 static int read_partial_message_section(struct ceph_connection *con, 2303 struct kvec *section, 2304 unsigned int sec_len, u32 *crc) 2305 { 2306 int ret, left; 2307 2308 BUG_ON(!section); 2309 2310 while (section->iov_len < sec_len) { 2311 BUG_ON(section->iov_base == NULL); 2312 left = sec_len - section->iov_len; 2313 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base + 2314 section->iov_len, left); 2315 if (ret <= 0) 2316 return ret; 2317 section->iov_len += ret; 2318 } 2319 if (section->iov_len == sec_len) 2320 *crc = crc32c(0, section->iov_base, section->iov_len); 2321 2322 return 1; 2323 } 2324 2325 static int read_partial_msg_data(struct ceph_connection *con) 2326 { 2327 struct ceph_msg *msg = con->in_msg; 2328 struct ceph_msg_data_cursor *cursor = &msg->cursor; 2329 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); 2330 struct page *page; 2331 size_t page_offset; 2332 size_t length; 2333 u32 crc = 0; 2334 int ret; 2335 2336 if (!msg->num_data_items) 2337 return -EIO; 2338 2339 if (do_datacrc) 2340 crc = con->in_data_crc; 2341 while (cursor->total_resid) { 2342 if (!cursor->resid) { 2343 ceph_msg_data_advance(cursor, 0); 2344 continue; 2345 } 2346 2347 page = ceph_msg_data_next(cursor, &page_offset, &length, NULL); 2348 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); 2349 if (ret <= 0) { 2350 if (do_datacrc) 2351 con->in_data_crc = crc; 2352 2353 return ret; 2354 } 2355 2356 if (do_datacrc) 2357 crc = ceph_crc32c_page(crc, page, page_offset, ret); 2358 ceph_msg_data_advance(cursor, (size_t)ret); 2359 } 2360 if (do_datacrc) 2361 con->in_data_crc = crc; 2362 2363 return 1; /* must return > 0 to indicate success */ 2364 } 2365 2366 /* 2367 * read (part of) a message. 2368 */ 2369 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); 2370 2371 static int read_partial_message(struct ceph_connection *con) 2372 { 2373 struct ceph_msg *m = con->in_msg; 2374 int size; 2375 int end; 2376 int ret; 2377 unsigned int front_len, middle_len, data_len; 2378 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); 2379 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH); 2380 u64 seq; 2381 u32 crc; 2382 2383 dout("read_partial_message con %p msg %p\n", con, m); 2384 2385 /* header */ 2386 size = sizeof (con->in_hdr); 2387 end = size; 2388 ret = read_partial(con, end, size, &con->in_hdr); 2389 if (ret <= 0) 2390 return ret; 2391 2392 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc)); 2393 if (cpu_to_le32(crc) != con->in_hdr.crc) { 2394 pr_err("read_partial_message bad hdr crc %u != expected %u\n", 2395 crc, con->in_hdr.crc); 2396 return -EBADMSG; 2397 } 2398 2399 front_len = le32_to_cpu(con->in_hdr.front_len); 2400 if (front_len > CEPH_MSG_MAX_FRONT_LEN) 2401 return -EIO; 2402 middle_len = le32_to_cpu(con->in_hdr.middle_len); 2403 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN) 2404 return -EIO; 2405 data_len = le32_to_cpu(con->in_hdr.data_len); 2406 if (data_len > CEPH_MSG_MAX_DATA_LEN) 2407 return -EIO; 2408 2409 /* verify seq# */ 2410 seq = le64_to_cpu(con->in_hdr.seq); 2411 if ((s64)seq - (s64)con->in_seq < 1) { 2412 pr_info("skipping %s%lld %s seq %lld expected %lld\n", 2413 ENTITY_NAME(con->peer_name), 2414 ceph_pr_addr(&con->peer_addr.in_addr), 2415 seq, con->in_seq + 1); 2416 con->in_base_pos = -front_len - middle_len - data_len - 2417 sizeof_footer(con); 2418 con->in_tag = CEPH_MSGR_TAG_READY; 2419 return 1; 2420 } else if ((s64)seq - (s64)con->in_seq > 1) { 2421 pr_err("read_partial_message bad seq %lld expected %lld\n", 2422 seq, con->in_seq + 1); 2423 con->error_msg = "bad message sequence # for incoming message"; 2424 return -EBADE; 2425 } 2426 2427 /* allocate message? */ 2428 if (!con->in_msg) { 2429 int skip = 0; 2430 2431 dout("got hdr type %d front %d data %d\n", con->in_hdr.type, 2432 front_len, data_len); 2433 ret = ceph_con_in_msg_alloc(con, &skip); 2434 if (ret < 0) 2435 return ret; 2436 2437 BUG_ON(!con->in_msg ^ skip); 2438 if (skip) { 2439 /* skip this message */ 2440 dout("alloc_msg said skip message\n"); 2441 con->in_base_pos = -front_len - middle_len - data_len - 2442 sizeof_footer(con); 2443 con->in_tag = CEPH_MSGR_TAG_READY; 2444 con->in_seq++; 2445 return 1; 2446 } 2447 2448 BUG_ON(!con->in_msg); 2449 BUG_ON(con->in_msg->con != con); 2450 m = con->in_msg; 2451 m->front.iov_len = 0; /* haven't read it yet */ 2452 if (m->middle) 2453 m->middle->vec.iov_len = 0; 2454 2455 /* prepare for data payload, if any */ 2456 2457 if (data_len) 2458 prepare_message_data(con->in_msg, data_len); 2459 } 2460 2461 /* front */ 2462 ret = read_partial_message_section(con, &m->front, front_len, 2463 &con->in_front_crc); 2464 if (ret <= 0) 2465 return ret; 2466 2467 /* middle */ 2468 if (m->middle) { 2469 ret = read_partial_message_section(con, &m->middle->vec, 2470 middle_len, 2471 &con->in_middle_crc); 2472 if (ret <= 0) 2473 return ret; 2474 } 2475 2476 /* (page) data */ 2477 if (data_len) { 2478 ret = read_partial_msg_data(con); 2479 if (ret <= 0) 2480 return ret; 2481 } 2482 2483 /* footer */ 2484 size = sizeof_footer(con); 2485 end += size; 2486 ret = read_partial(con, end, size, &m->footer); 2487 if (ret <= 0) 2488 return ret; 2489 2490 if (!need_sign) { 2491 m->footer.flags = m->old_footer.flags; 2492 m->footer.sig = 0; 2493 } 2494 2495 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", 2496 m, front_len, m->footer.front_crc, middle_len, 2497 m->footer.middle_crc, data_len, m->footer.data_crc); 2498 2499 /* crc ok? */ 2500 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) { 2501 pr_err("read_partial_message %p front crc %u != exp. %u\n", 2502 m, con->in_front_crc, m->footer.front_crc); 2503 return -EBADMSG; 2504 } 2505 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) { 2506 pr_err("read_partial_message %p middle crc %u != exp %u\n", 2507 m, con->in_middle_crc, m->footer.middle_crc); 2508 return -EBADMSG; 2509 } 2510 if (do_datacrc && 2511 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 && 2512 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) { 2513 pr_err("read_partial_message %p data crc %u != exp. %u\n", m, 2514 con->in_data_crc, le32_to_cpu(m->footer.data_crc)); 2515 return -EBADMSG; 2516 } 2517 2518 if (need_sign && con->ops->check_message_signature && 2519 con->ops->check_message_signature(m)) { 2520 pr_err("read_partial_message %p signature check failed\n", m); 2521 return -EBADMSG; 2522 } 2523 2524 return 1; /* done! */ 2525 } 2526 2527 /* 2528 * Process message. This happens in the worker thread. The callback should 2529 * be careful not to do anything that waits on other incoming messages or it 2530 * may deadlock. 2531 */ 2532 static void process_message(struct ceph_connection *con) 2533 { 2534 struct ceph_msg *msg = con->in_msg; 2535 2536 BUG_ON(con->in_msg->con != con); 2537 con->in_msg = NULL; 2538 2539 /* if first message, set peer_name */ 2540 if (con->peer_name.type == 0) 2541 con->peer_name = msg->hdr.src; 2542 2543 con->in_seq++; 2544 mutex_unlock(&con->mutex); 2545 2546 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n", 2547 msg, le64_to_cpu(msg->hdr.seq), 2548 ENTITY_NAME(msg->hdr.src), 2549 le16_to_cpu(msg->hdr.type), 2550 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2551 le32_to_cpu(msg->hdr.front_len), 2552 le32_to_cpu(msg->hdr.data_len), 2553 con->in_front_crc, con->in_middle_crc, con->in_data_crc); 2554 con->ops->dispatch(con, msg); 2555 2556 mutex_lock(&con->mutex); 2557 } 2558 2559 static int read_keepalive_ack(struct ceph_connection *con) 2560 { 2561 struct ceph_timespec ceph_ts; 2562 size_t size = sizeof(ceph_ts); 2563 int ret = read_partial(con, size, size, &ceph_ts); 2564 if (ret <= 0) 2565 return ret; 2566 ceph_decode_timespec64(&con->last_keepalive_ack, &ceph_ts); 2567 prepare_read_tag(con); 2568 return 1; 2569 } 2570 2571 /* 2572 * Write something to the socket. Called in a worker thread when the 2573 * socket appears to be writeable and we have something ready to send. 2574 */ 2575 static int try_write(struct ceph_connection *con) 2576 { 2577 int ret = 1; 2578 2579 dout("try_write start %p state %lu\n", con, con->state); 2580 if (con->state != CON_STATE_PREOPEN && 2581 con->state != CON_STATE_CONNECTING && 2582 con->state != CON_STATE_NEGOTIATING && 2583 con->state != CON_STATE_OPEN) 2584 return 0; 2585 2586 /* open the socket first? */ 2587 if (con->state == CON_STATE_PREOPEN) { 2588 BUG_ON(con->sock); 2589 con->state = CON_STATE_CONNECTING; 2590 2591 con_out_kvec_reset(con); 2592 prepare_write_banner(con); 2593 prepare_read_banner(con); 2594 2595 BUG_ON(con->in_msg); 2596 con->in_tag = CEPH_MSGR_TAG_READY; 2597 dout("try_write initiating connect on %p new state %lu\n", 2598 con, con->state); 2599 ret = ceph_tcp_connect(con); 2600 if (ret < 0) { 2601 con->error_msg = "connect error"; 2602 goto out; 2603 } 2604 } 2605 2606 more: 2607 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 2608 BUG_ON(!con->sock); 2609 2610 /* kvec data queued? */ 2611 if (con->out_kvec_left) { 2612 ret = write_partial_kvec(con); 2613 if (ret <= 0) 2614 goto out; 2615 } 2616 if (con->out_skip) { 2617 ret = write_partial_skip(con); 2618 if (ret <= 0) 2619 goto out; 2620 } 2621 2622 /* msg pages? */ 2623 if (con->out_msg) { 2624 if (con->out_msg_done) { 2625 ceph_msg_put(con->out_msg); 2626 con->out_msg = NULL; /* we're done with this one */ 2627 goto do_next; 2628 } 2629 2630 ret = write_partial_message_data(con); 2631 if (ret == 1) 2632 goto more; /* we need to send the footer, too! */ 2633 if (ret == 0) 2634 goto out; 2635 if (ret < 0) { 2636 dout("try_write write_partial_message_data err %d\n", 2637 ret); 2638 goto out; 2639 } 2640 } 2641 2642 do_next: 2643 if (con->state == CON_STATE_OPEN) { 2644 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) { 2645 prepare_write_keepalive(con); 2646 goto more; 2647 } 2648 /* is anything else pending? */ 2649 if (!list_empty(&con->out_queue)) { 2650 prepare_write_message(con); 2651 goto more; 2652 } 2653 if (con->in_seq > con->in_seq_acked) { 2654 prepare_write_ack(con); 2655 goto more; 2656 } 2657 } 2658 2659 /* Nothing to do! */ 2660 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 2661 dout("try_write nothing else to write.\n"); 2662 ret = 0; 2663 out: 2664 dout("try_write done on %p ret %d\n", con, ret); 2665 return ret; 2666 } 2667 2668 /* 2669 * Read what we can from the socket. 2670 */ 2671 static int try_read(struct ceph_connection *con) 2672 { 2673 int ret = -1; 2674 2675 more: 2676 dout("try_read start on %p state %lu\n", con, con->state); 2677 if (con->state != CON_STATE_CONNECTING && 2678 con->state != CON_STATE_NEGOTIATING && 2679 con->state != CON_STATE_OPEN) 2680 return 0; 2681 2682 BUG_ON(!con->sock); 2683 2684 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 2685 con->in_base_pos); 2686 2687 if (con->state == CON_STATE_CONNECTING) { 2688 dout("try_read connecting\n"); 2689 ret = read_partial_banner(con); 2690 if (ret <= 0) 2691 goto out; 2692 ret = process_banner(con); 2693 if (ret < 0) 2694 goto out; 2695 2696 con->state = CON_STATE_NEGOTIATING; 2697 2698 /* 2699 * Received banner is good, exchange connection info. 2700 * Do not reset out_kvec, as sending our banner raced 2701 * with receiving peer banner after connect completed. 2702 */ 2703 ret = prepare_write_connect(con); 2704 if (ret < 0) 2705 goto out; 2706 prepare_read_connect(con); 2707 2708 /* Send connection info before awaiting response */ 2709 goto out; 2710 } 2711 2712 if (con->state == CON_STATE_NEGOTIATING) { 2713 dout("try_read negotiating\n"); 2714 ret = read_partial_connect(con); 2715 if (ret <= 0) 2716 goto out; 2717 ret = process_connect(con); 2718 if (ret < 0) 2719 goto out; 2720 goto more; 2721 } 2722 2723 WARN_ON(con->state != CON_STATE_OPEN); 2724 2725 if (con->in_base_pos < 0) { 2726 /* 2727 * skipping + discarding content. 2728 */ 2729 ret = ceph_tcp_recvmsg(con->sock, NULL, -con->in_base_pos); 2730 if (ret <= 0) 2731 goto out; 2732 dout("skipped %d / %d bytes\n", ret, -con->in_base_pos); 2733 con->in_base_pos += ret; 2734 if (con->in_base_pos) 2735 goto more; 2736 } 2737 if (con->in_tag == CEPH_MSGR_TAG_READY) { 2738 /* 2739 * what's next? 2740 */ 2741 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 2742 if (ret <= 0) 2743 goto out; 2744 dout("try_read got tag %d\n", (int)con->in_tag); 2745 switch (con->in_tag) { 2746 case CEPH_MSGR_TAG_MSG: 2747 prepare_read_message(con); 2748 break; 2749 case CEPH_MSGR_TAG_ACK: 2750 prepare_read_ack(con); 2751 break; 2752 case CEPH_MSGR_TAG_KEEPALIVE2_ACK: 2753 prepare_read_keepalive_ack(con); 2754 break; 2755 case CEPH_MSGR_TAG_CLOSE: 2756 con_close_socket(con); 2757 con->state = CON_STATE_CLOSED; 2758 goto out; 2759 default: 2760 goto bad_tag; 2761 } 2762 } 2763 if (con->in_tag == CEPH_MSGR_TAG_MSG) { 2764 ret = read_partial_message(con); 2765 if (ret <= 0) { 2766 switch (ret) { 2767 case -EBADMSG: 2768 con->error_msg = "bad crc/signature"; 2769 /* fall through */ 2770 case -EBADE: 2771 ret = -EIO; 2772 break; 2773 case -EIO: 2774 con->error_msg = "io error"; 2775 break; 2776 } 2777 goto out; 2778 } 2779 if (con->in_tag == CEPH_MSGR_TAG_READY) 2780 goto more; 2781 process_message(con); 2782 if (con->state == CON_STATE_OPEN) 2783 prepare_read_tag(con); 2784 goto more; 2785 } 2786 if (con->in_tag == CEPH_MSGR_TAG_ACK || 2787 con->in_tag == CEPH_MSGR_TAG_SEQ) { 2788 /* 2789 * the final handshake seq exchange is semantically 2790 * equivalent to an ACK 2791 */ 2792 ret = read_partial_ack(con); 2793 if (ret <= 0) 2794 goto out; 2795 process_ack(con); 2796 goto more; 2797 } 2798 if (con->in_tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) { 2799 ret = read_keepalive_ack(con); 2800 if (ret <= 0) 2801 goto out; 2802 goto more; 2803 } 2804 2805 out: 2806 dout("try_read done on %p ret %d\n", con, ret); 2807 return ret; 2808 2809 bad_tag: 2810 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag); 2811 con->error_msg = "protocol error, garbage tag"; 2812 ret = -1; 2813 goto out; 2814 } 2815 2816 2817 /* 2818 * Atomically queue work on a connection after the specified delay. 2819 * Bump @con reference to avoid races with connection teardown. 2820 * Returns 0 if work was queued, or an error code otherwise. 2821 */ 2822 static int queue_con_delay(struct ceph_connection *con, unsigned long delay) 2823 { 2824 if (!con->ops->get(con)) { 2825 dout("%s %p ref count 0\n", __func__, con); 2826 return -ENOENT; 2827 } 2828 2829 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) { 2830 dout("%s %p - already queued\n", __func__, con); 2831 con->ops->put(con); 2832 return -EBUSY; 2833 } 2834 2835 dout("%s %p %lu\n", __func__, con, delay); 2836 return 0; 2837 } 2838 2839 static void queue_con(struct ceph_connection *con) 2840 { 2841 (void) queue_con_delay(con, 0); 2842 } 2843 2844 static void cancel_con(struct ceph_connection *con) 2845 { 2846 if (cancel_delayed_work(&con->work)) { 2847 dout("%s %p\n", __func__, con); 2848 con->ops->put(con); 2849 } 2850 } 2851 2852 static bool con_sock_closed(struct ceph_connection *con) 2853 { 2854 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED)) 2855 return false; 2856 2857 #define CASE(x) \ 2858 case CON_STATE_ ## x: \ 2859 con->error_msg = "socket closed (con state " #x ")"; \ 2860 break; 2861 2862 switch (con->state) { 2863 CASE(CLOSED); 2864 CASE(PREOPEN); 2865 CASE(CONNECTING); 2866 CASE(NEGOTIATING); 2867 CASE(OPEN); 2868 CASE(STANDBY); 2869 default: 2870 pr_warn("%s con %p unrecognized state %lu\n", 2871 __func__, con, con->state); 2872 con->error_msg = "unrecognized con state"; 2873 BUG(); 2874 break; 2875 } 2876 #undef CASE 2877 2878 return true; 2879 } 2880 2881 static bool con_backoff(struct ceph_connection *con) 2882 { 2883 int ret; 2884 2885 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF)) 2886 return false; 2887 2888 ret = queue_con_delay(con, round_jiffies_relative(con->delay)); 2889 if (ret) { 2890 dout("%s: con %p FAILED to back off %lu\n", __func__, 2891 con, con->delay); 2892 BUG_ON(ret == -ENOENT); 2893 con_flag_set(con, CON_FLAG_BACKOFF); 2894 } 2895 2896 return true; 2897 } 2898 2899 /* Finish fault handling; con->mutex must *not* be held here */ 2900 2901 static void con_fault_finish(struct ceph_connection *con) 2902 { 2903 dout("%s %p\n", __func__, con); 2904 2905 /* 2906 * in case we faulted due to authentication, invalidate our 2907 * current tickets so that we can get new ones. 2908 */ 2909 if (con->auth_retry) { 2910 dout("auth_retry %d, invalidating\n", con->auth_retry); 2911 if (con->ops->invalidate_authorizer) 2912 con->ops->invalidate_authorizer(con); 2913 con->auth_retry = 0; 2914 } 2915 2916 if (con->ops->fault) 2917 con->ops->fault(con); 2918 } 2919 2920 /* 2921 * Do some work on a connection. Drop a connection ref when we're done. 2922 */ 2923 static void ceph_con_workfn(struct work_struct *work) 2924 { 2925 struct ceph_connection *con = container_of(work, struct ceph_connection, 2926 work.work); 2927 bool fault; 2928 2929 mutex_lock(&con->mutex); 2930 while (true) { 2931 int ret; 2932 2933 if ((fault = con_sock_closed(con))) { 2934 dout("%s: con %p SOCK_CLOSED\n", __func__, con); 2935 break; 2936 } 2937 if (con_backoff(con)) { 2938 dout("%s: con %p BACKOFF\n", __func__, con); 2939 break; 2940 } 2941 if (con->state == CON_STATE_STANDBY) { 2942 dout("%s: con %p STANDBY\n", __func__, con); 2943 break; 2944 } 2945 if (con->state == CON_STATE_CLOSED) { 2946 dout("%s: con %p CLOSED\n", __func__, con); 2947 BUG_ON(con->sock); 2948 break; 2949 } 2950 if (con->state == CON_STATE_PREOPEN) { 2951 dout("%s: con %p PREOPEN\n", __func__, con); 2952 BUG_ON(con->sock); 2953 } 2954 2955 ret = try_read(con); 2956 if (ret < 0) { 2957 if (ret == -EAGAIN) 2958 continue; 2959 if (!con->error_msg) 2960 con->error_msg = "socket error on read"; 2961 fault = true; 2962 break; 2963 } 2964 2965 ret = try_write(con); 2966 if (ret < 0) { 2967 if (ret == -EAGAIN) 2968 continue; 2969 if (!con->error_msg) 2970 con->error_msg = "socket error on write"; 2971 fault = true; 2972 } 2973 2974 break; /* If we make it to here, we're done */ 2975 } 2976 if (fault) 2977 con_fault(con); 2978 mutex_unlock(&con->mutex); 2979 2980 if (fault) 2981 con_fault_finish(con); 2982 2983 con->ops->put(con); 2984 } 2985 2986 /* 2987 * Generic error/fault handler. A retry mechanism is used with 2988 * exponential backoff 2989 */ 2990 static void con_fault(struct ceph_connection *con) 2991 { 2992 dout("fault %p state %lu to peer %s\n", 2993 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2994 2995 pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2996 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2997 con->error_msg = NULL; 2998 2999 WARN_ON(con->state != CON_STATE_CONNECTING && 3000 con->state != CON_STATE_NEGOTIATING && 3001 con->state != CON_STATE_OPEN); 3002 3003 con_close_socket(con); 3004 3005 if (con_flag_test(con, CON_FLAG_LOSSYTX)) { 3006 dout("fault on LOSSYTX channel, marking CLOSED\n"); 3007 con->state = CON_STATE_CLOSED; 3008 return; 3009 } 3010 3011 if (con->in_msg) { 3012 BUG_ON(con->in_msg->con != con); 3013 ceph_msg_put(con->in_msg); 3014 con->in_msg = NULL; 3015 } 3016 3017 /* Requeue anything that hasn't been acked */ 3018 list_splice_init(&con->out_sent, &con->out_queue); 3019 3020 /* If there are no messages queued or keepalive pending, place 3021 * the connection in a STANDBY state */ 3022 if (list_empty(&con->out_queue) && 3023 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) { 3024 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 3025 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 3026 con->state = CON_STATE_STANDBY; 3027 } else { 3028 /* retry after a delay. */ 3029 con->state = CON_STATE_PREOPEN; 3030 if (con->delay == 0) 3031 con->delay = BASE_DELAY_INTERVAL; 3032 else if (con->delay < MAX_DELAY_INTERVAL) 3033 con->delay *= 2; 3034 con_flag_set(con, CON_FLAG_BACKOFF); 3035 queue_con(con); 3036 } 3037 } 3038 3039 3040 3041 /* 3042 * initialize a new messenger instance 3043 */ 3044 void ceph_messenger_init(struct ceph_messenger *msgr, 3045 struct ceph_entity_addr *myaddr) 3046 { 3047 spin_lock_init(&msgr->global_seq_lock); 3048 3049 if (myaddr) 3050 msgr->inst.addr = *myaddr; 3051 3052 /* select a random nonce */ 3053 msgr->inst.addr.type = 0; 3054 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 3055 encode_my_addr(msgr); 3056 3057 atomic_set(&msgr->stopping, 0); 3058 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns)); 3059 3060 dout("%s %p\n", __func__, msgr); 3061 } 3062 EXPORT_SYMBOL(ceph_messenger_init); 3063 3064 void ceph_messenger_fini(struct ceph_messenger *msgr) 3065 { 3066 put_net(read_pnet(&msgr->net)); 3067 } 3068 EXPORT_SYMBOL(ceph_messenger_fini); 3069 3070 static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con) 3071 { 3072 if (msg->con) 3073 msg->con->ops->put(msg->con); 3074 3075 msg->con = con ? con->ops->get(con) : NULL; 3076 BUG_ON(msg->con != con); 3077 } 3078 3079 static void clear_standby(struct ceph_connection *con) 3080 { 3081 /* come back from STANDBY? */ 3082 if (con->state == CON_STATE_STANDBY) { 3083 dout("clear_standby %p and ++connect_seq\n", con); 3084 con->state = CON_STATE_PREOPEN; 3085 con->connect_seq++; 3086 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING)); 3087 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)); 3088 } 3089 } 3090 3091 /* 3092 * Queue up an outgoing message on the given connection. 3093 */ 3094 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) 3095 { 3096 /* set src+dst */ 3097 msg->hdr.src = con->msgr->inst.name; 3098 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); 3099 msg->needs_out_seq = true; 3100 3101 mutex_lock(&con->mutex); 3102 3103 if (con->state == CON_STATE_CLOSED) { 3104 dout("con_send %p closed, dropping %p\n", con, msg); 3105 ceph_msg_put(msg); 3106 mutex_unlock(&con->mutex); 3107 return; 3108 } 3109 3110 msg_con_set(msg, con); 3111 3112 BUG_ON(!list_empty(&msg->list_head)); 3113 list_add_tail(&msg->list_head, &con->out_queue); 3114 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, 3115 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type), 3116 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 3117 le32_to_cpu(msg->hdr.front_len), 3118 le32_to_cpu(msg->hdr.middle_len), 3119 le32_to_cpu(msg->hdr.data_len)); 3120 3121 clear_standby(con); 3122 mutex_unlock(&con->mutex); 3123 3124 /* if there wasn't anything waiting to send before, queue 3125 * new work */ 3126 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3127 queue_con(con); 3128 } 3129 EXPORT_SYMBOL(ceph_con_send); 3130 3131 /* 3132 * Revoke a message that was previously queued for send 3133 */ 3134 void ceph_msg_revoke(struct ceph_msg *msg) 3135 { 3136 struct ceph_connection *con = msg->con; 3137 3138 if (!con) { 3139 dout("%s msg %p null con\n", __func__, msg); 3140 return; /* Message not in our possession */ 3141 } 3142 3143 mutex_lock(&con->mutex); 3144 if (!list_empty(&msg->list_head)) { 3145 dout("%s %p msg %p - was on queue\n", __func__, con, msg); 3146 list_del_init(&msg->list_head); 3147 msg->hdr.seq = 0; 3148 3149 ceph_msg_put(msg); 3150 } 3151 if (con->out_msg == msg) { 3152 BUG_ON(con->out_skip); 3153 /* footer */ 3154 if (con->out_msg_done) { 3155 con->out_skip += con_out_kvec_skip(con); 3156 } else { 3157 BUG_ON(!msg->data_length); 3158 con->out_skip += sizeof_footer(con); 3159 } 3160 /* data, middle, front */ 3161 if (msg->data_length) 3162 con->out_skip += msg->cursor.total_resid; 3163 if (msg->middle) 3164 con->out_skip += con_out_kvec_skip(con); 3165 con->out_skip += con_out_kvec_skip(con); 3166 3167 dout("%s %p msg %p - was sending, will write %d skip %d\n", 3168 __func__, con, msg, con->out_kvec_bytes, con->out_skip); 3169 msg->hdr.seq = 0; 3170 con->out_msg = NULL; 3171 ceph_msg_put(msg); 3172 } 3173 3174 mutex_unlock(&con->mutex); 3175 } 3176 3177 /* 3178 * Revoke a message that we may be reading data into 3179 */ 3180 void ceph_msg_revoke_incoming(struct ceph_msg *msg) 3181 { 3182 struct ceph_connection *con = msg->con; 3183 3184 if (!con) { 3185 dout("%s msg %p null con\n", __func__, msg); 3186 return; /* Message not in our possession */ 3187 } 3188 3189 mutex_lock(&con->mutex); 3190 if (con->in_msg == msg) { 3191 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len); 3192 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len); 3193 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len); 3194 3195 /* skip rest of message */ 3196 dout("%s %p msg %p revoked\n", __func__, con, msg); 3197 con->in_base_pos = con->in_base_pos - 3198 sizeof(struct ceph_msg_header) - 3199 front_len - 3200 middle_len - 3201 data_len - 3202 sizeof(struct ceph_msg_footer); 3203 ceph_msg_put(con->in_msg); 3204 con->in_msg = NULL; 3205 con->in_tag = CEPH_MSGR_TAG_READY; 3206 con->in_seq++; 3207 } else { 3208 dout("%s %p in_msg %p msg %p no-op\n", 3209 __func__, con, con->in_msg, msg); 3210 } 3211 mutex_unlock(&con->mutex); 3212 } 3213 3214 /* 3215 * Queue a keepalive byte to ensure the tcp connection is alive. 3216 */ 3217 void ceph_con_keepalive(struct ceph_connection *con) 3218 { 3219 dout("con_keepalive %p\n", con); 3220 mutex_lock(&con->mutex); 3221 clear_standby(con); 3222 mutex_unlock(&con->mutex); 3223 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 && 3224 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3225 queue_con(con); 3226 } 3227 EXPORT_SYMBOL(ceph_con_keepalive); 3228 3229 bool ceph_con_keepalive_expired(struct ceph_connection *con, 3230 unsigned long interval) 3231 { 3232 if (interval > 0 && 3233 (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) { 3234 struct timespec64 now; 3235 struct timespec64 ts; 3236 ktime_get_real_ts64(&now); 3237 jiffies_to_timespec64(interval, &ts); 3238 ts = timespec64_add(con->last_keepalive_ack, ts); 3239 return timespec64_compare(&now, &ts) >= 0; 3240 } 3241 return false; 3242 } 3243 3244 static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg) 3245 { 3246 BUG_ON(msg->num_data_items >= msg->max_data_items); 3247 return &msg->data[msg->num_data_items++]; 3248 } 3249 3250 static void ceph_msg_data_destroy(struct ceph_msg_data *data) 3251 { 3252 if (data->type == CEPH_MSG_DATA_PAGELIST) 3253 ceph_pagelist_release(data->pagelist); 3254 } 3255 3256 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, 3257 size_t length, size_t alignment) 3258 { 3259 struct ceph_msg_data *data; 3260 3261 BUG_ON(!pages); 3262 BUG_ON(!length); 3263 3264 data = ceph_msg_data_add(msg); 3265 data->type = CEPH_MSG_DATA_PAGES; 3266 data->pages = pages; 3267 data->length = length; 3268 data->alignment = alignment & ~PAGE_MASK; 3269 3270 msg->data_length += length; 3271 } 3272 EXPORT_SYMBOL(ceph_msg_data_add_pages); 3273 3274 void ceph_msg_data_add_pagelist(struct ceph_msg *msg, 3275 struct ceph_pagelist *pagelist) 3276 { 3277 struct ceph_msg_data *data; 3278 3279 BUG_ON(!pagelist); 3280 BUG_ON(!pagelist->length); 3281 3282 data = ceph_msg_data_add(msg); 3283 data->type = CEPH_MSG_DATA_PAGELIST; 3284 refcount_inc(&pagelist->refcnt); 3285 data->pagelist = pagelist; 3286 3287 msg->data_length += pagelist->length; 3288 } 3289 EXPORT_SYMBOL(ceph_msg_data_add_pagelist); 3290 3291 #ifdef CONFIG_BLOCK 3292 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos, 3293 u32 length) 3294 { 3295 struct ceph_msg_data *data; 3296 3297 data = ceph_msg_data_add(msg); 3298 data->type = CEPH_MSG_DATA_BIO; 3299 data->bio_pos = *bio_pos; 3300 data->bio_length = length; 3301 3302 msg->data_length += length; 3303 } 3304 EXPORT_SYMBOL(ceph_msg_data_add_bio); 3305 #endif /* CONFIG_BLOCK */ 3306 3307 void ceph_msg_data_add_bvecs(struct ceph_msg *msg, 3308 struct ceph_bvec_iter *bvec_pos) 3309 { 3310 struct ceph_msg_data *data; 3311 3312 data = ceph_msg_data_add(msg); 3313 data->type = CEPH_MSG_DATA_BVECS; 3314 data->bvec_pos = *bvec_pos; 3315 3316 msg->data_length += bvec_pos->iter.bi_size; 3317 } 3318 EXPORT_SYMBOL(ceph_msg_data_add_bvecs); 3319 3320 /* 3321 * construct a new message with given type, size 3322 * the new msg has a ref count of 1. 3323 */ 3324 struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items, 3325 gfp_t flags, bool can_fail) 3326 { 3327 struct ceph_msg *m; 3328 3329 m = kmem_cache_zalloc(ceph_msg_cache, flags); 3330 if (m == NULL) 3331 goto out; 3332 3333 m->hdr.type = cpu_to_le16(type); 3334 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); 3335 m->hdr.front_len = cpu_to_le32(front_len); 3336 3337 INIT_LIST_HEAD(&m->list_head); 3338 kref_init(&m->kref); 3339 3340 /* front */ 3341 if (front_len) { 3342 m->front.iov_base = ceph_kvmalloc(front_len, flags); 3343 if (m->front.iov_base == NULL) { 3344 dout("ceph_msg_new can't allocate %d bytes\n", 3345 front_len); 3346 goto out2; 3347 } 3348 } else { 3349 m->front.iov_base = NULL; 3350 } 3351 m->front_alloc_len = m->front.iov_len = front_len; 3352 3353 if (max_data_items) { 3354 m->data = kmalloc_array(max_data_items, sizeof(*m->data), 3355 flags); 3356 if (!m->data) 3357 goto out2; 3358 3359 m->max_data_items = max_data_items; 3360 } 3361 3362 dout("ceph_msg_new %p front %d\n", m, front_len); 3363 return m; 3364 3365 out2: 3366 ceph_msg_put(m); 3367 out: 3368 if (!can_fail) { 3369 pr_err("msg_new can't create type %d front %d\n", type, 3370 front_len); 3371 WARN_ON(1); 3372 } else { 3373 dout("msg_new can't create type %d front %d\n", type, 3374 front_len); 3375 } 3376 return NULL; 3377 } 3378 EXPORT_SYMBOL(ceph_msg_new2); 3379 3380 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, 3381 bool can_fail) 3382 { 3383 return ceph_msg_new2(type, front_len, 0, flags, can_fail); 3384 } 3385 EXPORT_SYMBOL(ceph_msg_new); 3386 3387 /* 3388 * Allocate "middle" portion of a message, if it is needed and wasn't 3389 * allocated by alloc_msg. This allows us to read a small fixed-size 3390 * per-type header in the front and then gracefully fail (i.e., 3391 * propagate the error to the caller based on info in the front) when 3392 * the middle is too large. 3393 */ 3394 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) 3395 { 3396 int type = le16_to_cpu(msg->hdr.type); 3397 int middle_len = le32_to_cpu(msg->hdr.middle_len); 3398 3399 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type, 3400 ceph_msg_type_name(type), middle_len); 3401 BUG_ON(!middle_len); 3402 BUG_ON(msg->middle); 3403 3404 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS); 3405 if (!msg->middle) 3406 return -ENOMEM; 3407 return 0; 3408 } 3409 3410 /* 3411 * Allocate a message for receiving an incoming message on a 3412 * connection, and save the result in con->in_msg. Uses the 3413 * connection's private alloc_msg op if available. 3414 * 3415 * Returns 0 on success, or a negative error code. 3416 * 3417 * On success, if we set *skip = 1: 3418 * - the next message should be skipped and ignored. 3419 * - con->in_msg == NULL 3420 * or if we set *skip = 0: 3421 * - con->in_msg is non-null. 3422 * On error (ENOMEM, EAGAIN, ...), 3423 * - con->in_msg == NULL 3424 */ 3425 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) 3426 { 3427 struct ceph_msg_header *hdr = &con->in_hdr; 3428 int middle_len = le32_to_cpu(hdr->middle_len); 3429 struct ceph_msg *msg; 3430 int ret = 0; 3431 3432 BUG_ON(con->in_msg != NULL); 3433 BUG_ON(!con->ops->alloc_msg); 3434 3435 mutex_unlock(&con->mutex); 3436 msg = con->ops->alloc_msg(con, hdr, skip); 3437 mutex_lock(&con->mutex); 3438 if (con->state != CON_STATE_OPEN) { 3439 if (msg) 3440 ceph_msg_put(msg); 3441 return -EAGAIN; 3442 } 3443 if (msg) { 3444 BUG_ON(*skip); 3445 msg_con_set(msg, con); 3446 con->in_msg = msg; 3447 } else { 3448 /* 3449 * Null message pointer means either we should skip 3450 * this message or we couldn't allocate memory. The 3451 * former is not an error. 3452 */ 3453 if (*skip) 3454 return 0; 3455 3456 con->error_msg = "error allocating memory for incoming message"; 3457 return -ENOMEM; 3458 } 3459 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 3460 3461 if (middle_len && !con->in_msg->middle) { 3462 ret = ceph_alloc_middle(con, con->in_msg); 3463 if (ret < 0) { 3464 ceph_msg_put(con->in_msg); 3465 con->in_msg = NULL; 3466 } 3467 } 3468 3469 return ret; 3470 } 3471 3472 3473 /* 3474 * Free a generically kmalloc'd message. 3475 */ 3476 static void ceph_msg_free(struct ceph_msg *m) 3477 { 3478 dout("%s %p\n", __func__, m); 3479 kvfree(m->front.iov_base); 3480 kfree(m->data); 3481 kmem_cache_free(ceph_msg_cache, m); 3482 } 3483 3484 static void ceph_msg_release(struct kref *kref) 3485 { 3486 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); 3487 int i; 3488 3489 dout("%s %p\n", __func__, m); 3490 WARN_ON(!list_empty(&m->list_head)); 3491 3492 msg_con_set(m, NULL); 3493 3494 /* drop middle, data, if any */ 3495 if (m->middle) { 3496 ceph_buffer_put(m->middle); 3497 m->middle = NULL; 3498 } 3499 3500 for (i = 0; i < m->num_data_items; i++) 3501 ceph_msg_data_destroy(&m->data[i]); 3502 3503 if (m->pool) 3504 ceph_msgpool_put(m->pool, m); 3505 else 3506 ceph_msg_free(m); 3507 } 3508 3509 struct ceph_msg *ceph_msg_get(struct ceph_msg *msg) 3510 { 3511 dout("%s %p (was %d)\n", __func__, msg, 3512 kref_read(&msg->kref)); 3513 kref_get(&msg->kref); 3514 return msg; 3515 } 3516 EXPORT_SYMBOL(ceph_msg_get); 3517 3518 void ceph_msg_put(struct ceph_msg *msg) 3519 { 3520 dout("%s %p (was %d)\n", __func__, msg, 3521 kref_read(&msg->kref)); 3522 kref_put(&msg->kref, ceph_msg_release); 3523 } 3524 EXPORT_SYMBOL(ceph_msg_put); 3525 3526 void ceph_msg_dump(struct ceph_msg *msg) 3527 { 3528 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg, 3529 msg->front_alloc_len, msg->data_length); 3530 print_hex_dump(KERN_DEBUG, "header: ", 3531 DUMP_PREFIX_OFFSET, 16, 1, 3532 &msg->hdr, sizeof(msg->hdr), true); 3533 print_hex_dump(KERN_DEBUG, " front: ", 3534 DUMP_PREFIX_OFFSET, 16, 1, 3535 msg->front.iov_base, msg->front.iov_len, true); 3536 if (msg->middle) 3537 print_hex_dump(KERN_DEBUG, "middle: ", 3538 DUMP_PREFIX_OFFSET, 16, 1, 3539 msg->middle->vec.iov_base, 3540 msg->middle->vec.iov_len, true); 3541 print_hex_dump(KERN_DEBUG, "footer: ", 3542 DUMP_PREFIX_OFFSET, 16, 1, 3543 &msg->footer, sizeof(msg->footer), true); 3544 } 3545 EXPORT_SYMBOL(ceph_msg_dump); 3546