1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/crc32c.h> 5 #include <linux/ctype.h> 6 #include <linux/highmem.h> 7 #include <linux/inet.h> 8 #include <linux/kthread.h> 9 #include <linux/net.h> 10 #include <linux/nsproxy.h> 11 #include <linux/sched/mm.h> 12 #include <linux/slab.h> 13 #include <linux/socket.h> 14 #include <linux/string.h> 15 #ifdef CONFIG_BLOCK 16 #include <linux/bio.h> 17 #endif /* CONFIG_BLOCK */ 18 #include <linux/dns_resolver.h> 19 #include <net/tcp.h> 20 21 #include <linux/ceph/ceph_features.h> 22 #include <linux/ceph/libceph.h> 23 #include <linux/ceph/messenger.h> 24 #include <linux/ceph/decode.h> 25 #include <linux/ceph/pagelist.h> 26 #include <linux/export.h> 27 28 /* 29 * Ceph uses the messenger to exchange ceph_msg messages with other 30 * hosts in the system. The messenger provides ordered and reliable 31 * delivery. We tolerate TCP disconnects by reconnecting (with 32 * exponential backoff) in the case of a fault (disconnection, bad 33 * crc, protocol error). Acks allow sent messages to be discarded by 34 * the sender. 35 */ 36 37 /* 38 * We track the state of the socket on a given connection using 39 * values defined below. The transition to a new socket state is 40 * handled by a function which verifies we aren't coming from an 41 * unexpected state. 42 * 43 * -------- 44 * | NEW* | transient initial state 45 * -------- 46 * | con_sock_state_init() 47 * v 48 * ---------- 49 * | CLOSED | initialized, but no socket (and no 50 * ---------- TCP connection) 51 * ^ \ 52 * | \ con_sock_state_connecting() 53 * | ---------------------- 54 * | \ 55 * + con_sock_state_closed() \ 56 * |+--------------------------- \ 57 * | \ \ \ 58 * | ----------- \ \ 59 * | | CLOSING | socket event; \ \ 60 * | ----------- await close \ \ 61 * | ^ \ | 62 * | | \ | 63 * | + con_sock_state_closing() \ | 64 * | / \ | | 65 * | / --------------- | | 66 * | / \ v v 67 * | / -------------- 68 * | / -----------------| CONNECTING | socket created, TCP 69 * | | / -------------- connect initiated 70 * | | | con_sock_state_connected() 71 * | | v 72 * ------------- 73 * | CONNECTED | TCP connection established 74 * ------------- 75 * 76 * State values for ceph_connection->sock_state; NEW is assumed to be 0. 77 */ 78 79 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */ 80 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */ 81 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */ 82 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */ 83 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */ 84 85 /* 86 * connection states 87 */ 88 #define CON_STATE_CLOSED 1 /* -> PREOPEN */ 89 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */ 90 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */ 91 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */ 92 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */ 93 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */ 94 95 /* 96 * ceph_connection flag bits 97 */ 98 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop 99 * messages on errors */ 100 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */ 101 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */ 102 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */ 103 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */ 104 105 static bool con_flag_valid(unsigned long con_flag) 106 { 107 switch (con_flag) { 108 case CON_FLAG_LOSSYTX: 109 case CON_FLAG_KEEPALIVE_PENDING: 110 case CON_FLAG_WRITE_PENDING: 111 case CON_FLAG_SOCK_CLOSED: 112 case CON_FLAG_BACKOFF: 113 return true; 114 default: 115 return false; 116 } 117 } 118 119 static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag) 120 { 121 BUG_ON(!con_flag_valid(con_flag)); 122 123 clear_bit(con_flag, &con->flags); 124 } 125 126 static void con_flag_set(struct ceph_connection *con, unsigned long con_flag) 127 { 128 BUG_ON(!con_flag_valid(con_flag)); 129 130 set_bit(con_flag, &con->flags); 131 } 132 133 static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag) 134 { 135 BUG_ON(!con_flag_valid(con_flag)); 136 137 return test_bit(con_flag, &con->flags); 138 } 139 140 static bool con_flag_test_and_clear(struct ceph_connection *con, 141 unsigned long con_flag) 142 { 143 BUG_ON(!con_flag_valid(con_flag)); 144 145 return test_and_clear_bit(con_flag, &con->flags); 146 } 147 148 static bool con_flag_test_and_set(struct ceph_connection *con, 149 unsigned long con_flag) 150 { 151 BUG_ON(!con_flag_valid(con_flag)); 152 153 return test_and_set_bit(con_flag, &con->flags); 154 } 155 156 /* Slab caches for frequently-allocated structures */ 157 158 static struct kmem_cache *ceph_msg_cache; 159 160 /* static tag bytes (protocol control messages) */ 161 static char tag_msg = CEPH_MSGR_TAG_MSG; 162 static char tag_ack = CEPH_MSGR_TAG_ACK; 163 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 164 static char tag_keepalive2 = CEPH_MSGR_TAG_KEEPALIVE2; 165 166 #ifdef CONFIG_LOCKDEP 167 static struct lock_class_key socket_class; 168 #endif 169 170 static void queue_con(struct ceph_connection *con); 171 static void cancel_con(struct ceph_connection *con); 172 static void ceph_con_workfn(struct work_struct *); 173 static void con_fault(struct ceph_connection *con); 174 175 /* 176 * Nicely render a sockaddr as a string. An array of formatted 177 * strings is used, to approximate reentrancy. 178 */ 179 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */ 180 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG) 181 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1) 182 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */ 183 184 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN]; 185 static atomic_t addr_str_seq = ATOMIC_INIT(0); 186 187 static struct page *zero_page; /* used in certain error cases */ 188 189 const char *ceph_pr_addr(const struct sockaddr_storage *ss) 190 { 191 int i; 192 char *s; 193 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 194 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 195 196 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK; 197 s = addr_str[i]; 198 199 switch (ss->ss_family) { 200 case AF_INET: 201 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr, 202 ntohs(in4->sin_port)); 203 break; 204 205 case AF_INET6: 206 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr, 207 ntohs(in6->sin6_port)); 208 break; 209 210 default: 211 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)", 212 ss->ss_family); 213 } 214 215 return s; 216 } 217 EXPORT_SYMBOL(ceph_pr_addr); 218 219 static void encode_my_addr(struct ceph_messenger *msgr) 220 { 221 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr)); 222 ceph_encode_addr(&msgr->my_enc_addr); 223 } 224 225 /* 226 * work queue for all reading and writing to/from the socket. 227 */ 228 static struct workqueue_struct *ceph_msgr_wq; 229 230 static int ceph_msgr_slab_init(void) 231 { 232 BUG_ON(ceph_msg_cache); 233 ceph_msg_cache = KMEM_CACHE(ceph_msg, 0); 234 if (!ceph_msg_cache) 235 return -ENOMEM; 236 237 return 0; 238 } 239 240 static void ceph_msgr_slab_exit(void) 241 { 242 BUG_ON(!ceph_msg_cache); 243 kmem_cache_destroy(ceph_msg_cache); 244 ceph_msg_cache = NULL; 245 } 246 247 static void _ceph_msgr_exit(void) 248 { 249 if (ceph_msgr_wq) { 250 destroy_workqueue(ceph_msgr_wq); 251 ceph_msgr_wq = NULL; 252 } 253 254 BUG_ON(zero_page == NULL); 255 put_page(zero_page); 256 zero_page = NULL; 257 258 ceph_msgr_slab_exit(); 259 } 260 261 int __init ceph_msgr_init(void) 262 { 263 if (ceph_msgr_slab_init()) 264 return -ENOMEM; 265 266 BUG_ON(zero_page != NULL); 267 zero_page = ZERO_PAGE(0); 268 get_page(zero_page); 269 270 /* 271 * The number of active work items is limited by the number of 272 * connections, so leave @max_active at default. 273 */ 274 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0); 275 if (ceph_msgr_wq) 276 return 0; 277 278 pr_err("msgr_init failed to create workqueue\n"); 279 _ceph_msgr_exit(); 280 281 return -ENOMEM; 282 } 283 284 void ceph_msgr_exit(void) 285 { 286 BUG_ON(ceph_msgr_wq == NULL); 287 288 _ceph_msgr_exit(); 289 } 290 291 void ceph_msgr_flush(void) 292 { 293 flush_workqueue(ceph_msgr_wq); 294 } 295 EXPORT_SYMBOL(ceph_msgr_flush); 296 297 /* Connection socket state transition functions */ 298 299 static void con_sock_state_init(struct ceph_connection *con) 300 { 301 int old_state; 302 303 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 304 if (WARN_ON(old_state != CON_SOCK_STATE_NEW)) 305 printk("%s: unexpected old state %d\n", __func__, old_state); 306 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 307 CON_SOCK_STATE_CLOSED); 308 } 309 310 static void con_sock_state_connecting(struct ceph_connection *con) 311 { 312 int old_state; 313 314 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING); 315 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED)) 316 printk("%s: unexpected old state %d\n", __func__, old_state); 317 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 318 CON_SOCK_STATE_CONNECTING); 319 } 320 321 static void con_sock_state_connected(struct ceph_connection *con) 322 { 323 int old_state; 324 325 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED); 326 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING)) 327 printk("%s: unexpected old state %d\n", __func__, old_state); 328 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 329 CON_SOCK_STATE_CONNECTED); 330 } 331 332 static void con_sock_state_closing(struct ceph_connection *con) 333 { 334 int old_state; 335 336 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING); 337 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING && 338 old_state != CON_SOCK_STATE_CONNECTED && 339 old_state != CON_SOCK_STATE_CLOSING)) 340 printk("%s: unexpected old state %d\n", __func__, old_state); 341 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 342 CON_SOCK_STATE_CLOSING); 343 } 344 345 static void con_sock_state_closed(struct ceph_connection *con) 346 { 347 int old_state; 348 349 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 350 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED && 351 old_state != CON_SOCK_STATE_CLOSING && 352 old_state != CON_SOCK_STATE_CONNECTING && 353 old_state != CON_SOCK_STATE_CLOSED)) 354 printk("%s: unexpected old state %d\n", __func__, old_state); 355 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 356 CON_SOCK_STATE_CLOSED); 357 } 358 359 /* 360 * socket callback functions 361 */ 362 363 /* data available on socket, or listen socket received a connect */ 364 static void ceph_sock_data_ready(struct sock *sk) 365 { 366 struct ceph_connection *con = sk->sk_user_data; 367 if (atomic_read(&con->msgr->stopping)) { 368 return; 369 } 370 371 if (sk->sk_state != TCP_CLOSE_WAIT) { 372 dout("%s on %p state = %lu, queueing work\n", __func__, 373 con, con->state); 374 queue_con(con); 375 } 376 } 377 378 /* socket has buffer space for writing */ 379 static void ceph_sock_write_space(struct sock *sk) 380 { 381 struct ceph_connection *con = sk->sk_user_data; 382 383 /* only queue to workqueue if there is data we want to write, 384 * and there is sufficient space in the socket buffer to accept 385 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space() 386 * doesn't get called again until try_write() fills the socket 387 * buffer. See net/ipv4/tcp_input.c:tcp_check_space() 388 * and net/core/stream.c:sk_stream_write_space(). 389 */ 390 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) { 391 if (sk_stream_is_writeable(sk)) { 392 dout("%s %p queueing write work\n", __func__, con); 393 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 394 queue_con(con); 395 } 396 } else { 397 dout("%s %p nothing to write\n", __func__, con); 398 } 399 } 400 401 /* socket's state has changed */ 402 static void ceph_sock_state_change(struct sock *sk) 403 { 404 struct ceph_connection *con = sk->sk_user_data; 405 406 dout("%s %p state = %lu sk_state = %u\n", __func__, 407 con, con->state, sk->sk_state); 408 409 switch (sk->sk_state) { 410 case TCP_CLOSE: 411 dout("%s TCP_CLOSE\n", __func__); 412 /* fall through */ 413 case TCP_CLOSE_WAIT: 414 dout("%s TCP_CLOSE_WAIT\n", __func__); 415 con_sock_state_closing(con); 416 con_flag_set(con, CON_FLAG_SOCK_CLOSED); 417 queue_con(con); 418 break; 419 case TCP_ESTABLISHED: 420 dout("%s TCP_ESTABLISHED\n", __func__); 421 con_sock_state_connected(con); 422 queue_con(con); 423 break; 424 default: /* Everything else is uninteresting */ 425 break; 426 } 427 } 428 429 /* 430 * set up socket callbacks 431 */ 432 static void set_sock_callbacks(struct socket *sock, 433 struct ceph_connection *con) 434 { 435 struct sock *sk = sock->sk; 436 sk->sk_user_data = con; 437 sk->sk_data_ready = ceph_sock_data_ready; 438 sk->sk_write_space = ceph_sock_write_space; 439 sk->sk_state_change = ceph_sock_state_change; 440 } 441 442 443 /* 444 * socket helpers 445 */ 446 447 /* 448 * initiate connection to a remote socket. 449 */ 450 static int ceph_tcp_connect(struct ceph_connection *con) 451 { 452 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 453 struct socket *sock; 454 unsigned int noio_flag; 455 int ret; 456 457 BUG_ON(con->sock); 458 459 /* sock_create_kern() allocates with GFP_KERNEL */ 460 noio_flag = memalloc_noio_save(); 461 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family, 462 SOCK_STREAM, IPPROTO_TCP, &sock); 463 memalloc_noio_restore(noio_flag); 464 if (ret) 465 return ret; 466 sock->sk->sk_allocation = GFP_NOFS; 467 468 #ifdef CONFIG_LOCKDEP 469 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 470 #endif 471 472 set_sock_callbacks(sock, con); 473 474 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); 475 476 con_sock_state_connecting(con); 477 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), 478 O_NONBLOCK); 479 if (ret == -EINPROGRESS) { 480 dout("connect %s EINPROGRESS sk_state = %u\n", 481 ceph_pr_addr(&con->peer_addr.in_addr), 482 sock->sk->sk_state); 483 } else if (ret < 0) { 484 pr_err("connect %s error %d\n", 485 ceph_pr_addr(&con->peer_addr.in_addr), ret); 486 sock_release(sock); 487 return ret; 488 } 489 490 if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY)) { 491 int optval = 1; 492 493 ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, 494 (char *)&optval, sizeof(optval)); 495 if (ret) 496 pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d", 497 ret); 498 } 499 500 con->sock = sock; 501 return 0; 502 } 503 504 /* 505 * If @buf is NULL, discard up to @len bytes. 506 */ 507 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) 508 { 509 struct kvec iov = {buf, len}; 510 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 511 int r; 512 513 if (!buf) 514 msg.msg_flags |= MSG_TRUNC; 515 516 iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, len); 517 r = sock_recvmsg(sock, &msg, msg.msg_flags); 518 if (r == -EAGAIN) 519 r = 0; 520 return r; 521 } 522 523 static int ceph_tcp_recvpage(struct socket *sock, struct page *page, 524 int page_offset, size_t length) 525 { 526 struct bio_vec bvec = { 527 .bv_page = page, 528 .bv_offset = page_offset, 529 .bv_len = length 530 }; 531 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 532 int r; 533 534 BUG_ON(page_offset + length > PAGE_SIZE); 535 iov_iter_bvec(&msg.msg_iter, READ, &bvec, 1, length); 536 r = sock_recvmsg(sock, &msg, msg.msg_flags); 537 if (r == -EAGAIN) 538 r = 0; 539 return r; 540 } 541 542 /* 543 * write something. @more is true if caller will be sending more data 544 * shortly. 545 */ 546 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, 547 size_t kvlen, size_t len, bool more) 548 { 549 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 550 int r; 551 552 if (more) 553 msg.msg_flags |= MSG_MORE; 554 else 555 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 556 557 r = kernel_sendmsg(sock, &msg, iov, kvlen, len); 558 if (r == -EAGAIN) 559 r = 0; 560 return r; 561 } 562 563 /* 564 * @more: either or both of MSG_MORE and MSG_SENDPAGE_NOTLAST 565 */ 566 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 567 int offset, size_t size, int more) 568 { 569 ssize_t (*sendpage)(struct socket *sock, struct page *page, 570 int offset, size_t size, int flags); 571 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more; 572 int ret; 573 574 /* 575 * sendpage cannot properly handle pages with page_count == 0, 576 * we need to fall back to sendmsg if that's the case. 577 * 578 * Same goes for slab pages: skb_can_coalesce() allows 579 * coalescing neighboring slab objects into a single frag which 580 * triggers one of hardened usercopy checks. 581 */ 582 if (page_count(page) >= 1 && !PageSlab(page)) 583 sendpage = sock->ops->sendpage; 584 else 585 sendpage = sock_no_sendpage; 586 587 ret = sendpage(sock, page, offset, size, flags); 588 if (ret == -EAGAIN) 589 ret = 0; 590 591 return ret; 592 } 593 594 /* 595 * Shutdown/close the socket for the given connection. 596 */ 597 static int con_close_socket(struct ceph_connection *con) 598 { 599 int rc = 0; 600 601 dout("con_close_socket on %p sock %p\n", con, con->sock); 602 if (con->sock) { 603 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); 604 sock_release(con->sock); 605 con->sock = NULL; 606 } 607 608 /* 609 * Forcibly clear the SOCK_CLOSED flag. It gets set 610 * independent of the connection mutex, and we could have 611 * received a socket close event before we had the chance to 612 * shut the socket down. 613 */ 614 con_flag_clear(con, CON_FLAG_SOCK_CLOSED); 615 616 con_sock_state_closed(con); 617 return rc; 618 } 619 620 /* 621 * Reset a connection. Discard all incoming and outgoing messages 622 * and clear *_seq state. 623 */ 624 static void ceph_msg_remove(struct ceph_msg *msg) 625 { 626 list_del_init(&msg->list_head); 627 628 ceph_msg_put(msg); 629 } 630 static void ceph_msg_remove_list(struct list_head *head) 631 { 632 while (!list_empty(head)) { 633 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, 634 list_head); 635 ceph_msg_remove(msg); 636 } 637 } 638 639 static void reset_connection(struct ceph_connection *con) 640 { 641 /* reset connection, out_queue, msg_ and connect_seq */ 642 /* discard existing out_queue and msg_seq */ 643 dout("reset_connection %p\n", con); 644 ceph_msg_remove_list(&con->out_queue); 645 ceph_msg_remove_list(&con->out_sent); 646 647 if (con->in_msg) { 648 BUG_ON(con->in_msg->con != con); 649 ceph_msg_put(con->in_msg); 650 con->in_msg = NULL; 651 } 652 653 con->connect_seq = 0; 654 con->out_seq = 0; 655 if (con->out_msg) { 656 BUG_ON(con->out_msg->con != con); 657 ceph_msg_put(con->out_msg); 658 con->out_msg = NULL; 659 } 660 con->in_seq = 0; 661 con->in_seq_acked = 0; 662 663 con->out_skip = 0; 664 } 665 666 /* 667 * mark a peer down. drop any open connections. 668 */ 669 void ceph_con_close(struct ceph_connection *con) 670 { 671 mutex_lock(&con->mutex); 672 dout("con_close %p peer %s\n", con, 673 ceph_pr_addr(&con->peer_addr.in_addr)); 674 con->state = CON_STATE_CLOSED; 675 676 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */ 677 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING); 678 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 679 con_flag_clear(con, CON_FLAG_BACKOFF); 680 681 reset_connection(con); 682 con->peer_global_seq = 0; 683 cancel_con(con); 684 con_close_socket(con); 685 mutex_unlock(&con->mutex); 686 } 687 EXPORT_SYMBOL(ceph_con_close); 688 689 /* 690 * Reopen a closed connection, with a new peer address. 691 */ 692 void ceph_con_open(struct ceph_connection *con, 693 __u8 entity_type, __u64 entity_num, 694 struct ceph_entity_addr *addr) 695 { 696 mutex_lock(&con->mutex); 697 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 698 699 WARN_ON(con->state != CON_STATE_CLOSED); 700 con->state = CON_STATE_PREOPEN; 701 702 con->peer_name.type = (__u8) entity_type; 703 con->peer_name.num = cpu_to_le64(entity_num); 704 705 memcpy(&con->peer_addr, addr, sizeof(*addr)); 706 con->delay = 0; /* reset backoff memory */ 707 mutex_unlock(&con->mutex); 708 queue_con(con); 709 } 710 EXPORT_SYMBOL(ceph_con_open); 711 712 /* 713 * return true if this connection ever successfully opened 714 */ 715 bool ceph_con_opened(struct ceph_connection *con) 716 { 717 return con->connect_seq > 0; 718 } 719 720 /* 721 * initialize a new connection. 722 */ 723 void ceph_con_init(struct ceph_connection *con, void *private, 724 const struct ceph_connection_operations *ops, 725 struct ceph_messenger *msgr) 726 { 727 dout("con_init %p\n", con); 728 memset(con, 0, sizeof(*con)); 729 con->private = private; 730 con->ops = ops; 731 con->msgr = msgr; 732 733 con_sock_state_init(con); 734 735 mutex_init(&con->mutex); 736 INIT_LIST_HEAD(&con->out_queue); 737 INIT_LIST_HEAD(&con->out_sent); 738 INIT_DELAYED_WORK(&con->work, ceph_con_workfn); 739 740 con->state = CON_STATE_CLOSED; 741 } 742 EXPORT_SYMBOL(ceph_con_init); 743 744 745 /* 746 * We maintain a global counter to order connection attempts. Get 747 * a unique seq greater than @gt. 748 */ 749 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) 750 { 751 u32 ret; 752 753 spin_lock(&msgr->global_seq_lock); 754 if (msgr->global_seq < gt) 755 msgr->global_seq = gt; 756 ret = ++msgr->global_seq; 757 spin_unlock(&msgr->global_seq_lock); 758 return ret; 759 } 760 761 static void con_out_kvec_reset(struct ceph_connection *con) 762 { 763 BUG_ON(con->out_skip); 764 765 con->out_kvec_left = 0; 766 con->out_kvec_bytes = 0; 767 con->out_kvec_cur = &con->out_kvec[0]; 768 } 769 770 static void con_out_kvec_add(struct ceph_connection *con, 771 size_t size, void *data) 772 { 773 int index = con->out_kvec_left; 774 775 BUG_ON(con->out_skip); 776 BUG_ON(index >= ARRAY_SIZE(con->out_kvec)); 777 778 con->out_kvec[index].iov_len = size; 779 con->out_kvec[index].iov_base = data; 780 con->out_kvec_left++; 781 con->out_kvec_bytes += size; 782 } 783 784 /* 785 * Chop off a kvec from the end. Return residual number of bytes for 786 * that kvec, i.e. how many bytes would have been written if the kvec 787 * hadn't been nuked. 788 */ 789 static int con_out_kvec_skip(struct ceph_connection *con) 790 { 791 int off = con->out_kvec_cur - con->out_kvec; 792 int skip = 0; 793 794 if (con->out_kvec_bytes > 0) { 795 skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len; 796 BUG_ON(con->out_kvec_bytes < skip); 797 BUG_ON(!con->out_kvec_left); 798 con->out_kvec_bytes -= skip; 799 con->out_kvec_left--; 800 } 801 802 return skip; 803 } 804 805 #ifdef CONFIG_BLOCK 806 807 /* 808 * For a bio data item, a piece is whatever remains of the next 809 * entry in the current bio iovec, or the first entry in the next 810 * bio in the list. 811 */ 812 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor, 813 size_t length) 814 { 815 struct ceph_msg_data *data = cursor->data; 816 struct ceph_bio_iter *it = &cursor->bio_iter; 817 818 cursor->resid = min_t(size_t, length, data->bio_length); 819 *it = data->bio_pos; 820 if (cursor->resid < it->iter.bi_size) 821 it->iter.bi_size = cursor->resid; 822 823 BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter)); 824 cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter); 825 } 826 827 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor, 828 size_t *page_offset, 829 size_t *length) 830 { 831 struct bio_vec bv = bio_iter_iovec(cursor->bio_iter.bio, 832 cursor->bio_iter.iter); 833 834 *page_offset = bv.bv_offset; 835 *length = bv.bv_len; 836 return bv.bv_page; 837 } 838 839 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, 840 size_t bytes) 841 { 842 struct ceph_bio_iter *it = &cursor->bio_iter; 843 struct page *page = bio_iter_page(it->bio, it->iter); 844 845 BUG_ON(bytes > cursor->resid); 846 BUG_ON(bytes > bio_iter_len(it->bio, it->iter)); 847 cursor->resid -= bytes; 848 bio_advance_iter(it->bio, &it->iter, bytes); 849 850 if (!cursor->resid) { 851 BUG_ON(!cursor->last_piece); 852 return false; /* no more data */ 853 } 854 855 if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done && 856 page == bio_iter_page(it->bio, it->iter))) 857 return false; /* more bytes to process in this segment */ 858 859 if (!it->iter.bi_size) { 860 it->bio = it->bio->bi_next; 861 it->iter = it->bio->bi_iter; 862 if (cursor->resid < it->iter.bi_size) 863 it->iter.bi_size = cursor->resid; 864 } 865 866 BUG_ON(cursor->last_piece); 867 BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter)); 868 cursor->last_piece = cursor->resid == bio_iter_len(it->bio, it->iter); 869 return true; 870 } 871 #endif /* CONFIG_BLOCK */ 872 873 static void ceph_msg_data_bvecs_cursor_init(struct ceph_msg_data_cursor *cursor, 874 size_t length) 875 { 876 struct ceph_msg_data *data = cursor->data; 877 struct bio_vec *bvecs = data->bvec_pos.bvecs; 878 879 cursor->resid = min_t(size_t, length, data->bvec_pos.iter.bi_size); 880 cursor->bvec_iter = data->bvec_pos.iter; 881 cursor->bvec_iter.bi_size = cursor->resid; 882 883 BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter)); 884 cursor->last_piece = 885 cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter); 886 } 887 888 static struct page *ceph_msg_data_bvecs_next(struct ceph_msg_data_cursor *cursor, 889 size_t *page_offset, 890 size_t *length) 891 { 892 struct bio_vec bv = bvec_iter_bvec(cursor->data->bvec_pos.bvecs, 893 cursor->bvec_iter); 894 895 *page_offset = bv.bv_offset; 896 *length = bv.bv_len; 897 return bv.bv_page; 898 } 899 900 static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor, 901 size_t bytes) 902 { 903 struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs; 904 struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter); 905 906 BUG_ON(bytes > cursor->resid); 907 BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter)); 908 cursor->resid -= bytes; 909 bvec_iter_advance(bvecs, &cursor->bvec_iter, bytes); 910 911 if (!cursor->resid) { 912 BUG_ON(!cursor->last_piece); 913 return false; /* no more data */ 914 } 915 916 if (!bytes || (cursor->bvec_iter.bi_bvec_done && 917 page == bvec_iter_page(bvecs, cursor->bvec_iter))) 918 return false; /* more bytes to process in this segment */ 919 920 BUG_ON(cursor->last_piece); 921 BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter)); 922 cursor->last_piece = 923 cursor->resid == bvec_iter_len(bvecs, cursor->bvec_iter); 924 return true; 925 } 926 927 /* 928 * For a page array, a piece comes from the first page in the array 929 * that has not already been fully consumed. 930 */ 931 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor, 932 size_t length) 933 { 934 struct ceph_msg_data *data = cursor->data; 935 int page_count; 936 937 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 938 939 BUG_ON(!data->pages); 940 BUG_ON(!data->length); 941 942 cursor->resid = min(length, data->length); 943 page_count = calc_pages_for(data->alignment, (u64)data->length); 944 cursor->page_offset = data->alignment & ~PAGE_MASK; 945 cursor->page_index = 0; 946 BUG_ON(page_count > (int)USHRT_MAX); 947 cursor->page_count = (unsigned short)page_count; 948 BUG_ON(length > SIZE_MAX - cursor->page_offset); 949 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE; 950 } 951 952 static struct page * 953 ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor, 954 size_t *page_offset, size_t *length) 955 { 956 struct ceph_msg_data *data = cursor->data; 957 958 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 959 960 BUG_ON(cursor->page_index >= cursor->page_count); 961 BUG_ON(cursor->page_offset >= PAGE_SIZE); 962 963 *page_offset = cursor->page_offset; 964 if (cursor->last_piece) 965 *length = cursor->resid; 966 else 967 *length = PAGE_SIZE - *page_offset; 968 969 return data->pages[cursor->page_index]; 970 } 971 972 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor, 973 size_t bytes) 974 { 975 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES); 976 977 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE); 978 979 /* Advance the cursor page offset */ 980 981 cursor->resid -= bytes; 982 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK; 983 if (!bytes || cursor->page_offset) 984 return false; /* more bytes to process in the current page */ 985 986 if (!cursor->resid) 987 return false; /* no more data */ 988 989 /* Move on to the next page; offset is already at 0 */ 990 991 BUG_ON(cursor->page_index >= cursor->page_count); 992 cursor->page_index++; 993 cursor->last_piece = cursor->resid <= PAGE_SIZE; 994 995 return true; 996 } 997 998 /* 999 * For a pagelist, a piece is whatever remains to be consumed in the 1000 * first page in the list, or the front of the next page. 1001 */ 1002 static void 1003 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor, 1004 size_t length) 1005 { 1006 struct ceph_msg_data *data = cursor->data; 1007 struct ceph_pagelist *pagelist; 1008 struct page *page; 1009 1010 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1011 1012 pagelist = data->pagelist; 1013 BUG_ON(!pagelist); 1014 1015 if (!length) 1016 return; /* pagelist can be assigned but empty */ 1017 1018 BUG_ON(list_empty(&pagelist->head)); 1019 page = list_first_entry(&pagelist->head, struct page, lru); 1020 1021 cursor->resid = min(length, pagelist->length); 1022 cursor->page = page; 1023 cursor->offset = 0; 1024 cursor->last_piece = cursor->resid <= PAGE_SIZE; 1025 } 1026 1027 static struct page * 1028 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor, 1029 size_t *page_offset, size_t *length) 1030 { 1031 struct ceph_msg_data *data = cursor->data; 1032 struct ceph_pagelist *pagelist; 1033 1034 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1035 1036 pagelist = data->pagelist; 1037 BUG_ON(!pagelist); 1038 1039 BUG_ON(!cursor->page); 1040 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 1041 1042 /* offset of first page in pagelist is always 0 */ 1043 *page_offset = cursor->offset & ~PAGE_MASK; 1044 if (cursor->last_piece) 1045 *length = cursor->resid; 1046 else 1047 *length = PAGE_SIZE - *page_offset; 1048 1049 return cursor->page; 1050 } 1051 1052 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor, 1053 size_t bytes) 1054 { 1055 struct ceph_msg_data *data = cursor->data; 1056 struct ceph_pagelist *pagelist; 1057 1058 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1059 1060 pagelist = data->pagelist; 1061 BUG_ON(!pagelist); 1062 1063 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 1064 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE); 1065 1066 /* Advance the cursor offset */ 1067 1068 cursor->resid -= bytes; 1069 cursor->offset += bytes; 1070 /* offset of first page in pagelist is always 0 */ 1071 if (!bytes || cursor->offset & ~PAGE_MASK) 1072 return false; /* more bytes to process in the current page */ 1073 1074 if (!cursor->resid) 1075 return false; /* no more data */ 1076 1077 /* Move on to the next page */ 1078 1079 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head)); 1080 cursor->page = list_next_entry(cursor->page, lru); 1081 cursor->last_piece = cursor->resid <= PAGE_SIZE; 1082 1083 return true; 1084 } 1085 1086 /* 1087 * Message data is handled (sent or received) in pieces, where each 1088 * piece resides on a single page. The network layer might not 1089 * consume an entire piece at once. A data item's cursor keeps 1090 * track of which piece is next to process and how much remains to 1091 * be processed in that piece. It also tracks whether the current 1092 * piece is the last one in the data item. 1093 */ 1094 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor) 1095 { 1096 size_t length = cursor->total_resid; 1097 1098 switch (cursor->data->type) { 1099 case CEPH_MSG_DATA_PAGELIST: 1100 ceph_msg_data_pagelist_cursor_init(cursor, length); 1101 break; 1102 case CEPH_MSG_DATA_PAGES: 1103 ceph_msg_data_pages_cursor_init(cursor, length); 1104 break; 1105 #ifdef CONFIG_BLOCK 1106 case CEPH_MSG_DATA_BIO: 1107 ceph_msg_data_bio_cursor_init(cursor, length); 1108 break; 1109 #endif /* CONFIG_BLOCK */ 1110 case CEPH_MSG_DATA_BVECS: 1111 ceph_msg_data_bvecs_cursor_init(cursor, length); 1112 break; 1113 case CEPH_MSG_DATA_NONE: 1114 default: 1115 /* BUG(); */ 1116 break; 1117 } 1118 cursor->need_crc = true; 1119 } 1120 1121 static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) 1122 { 1123 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1124 1125 BUG_ON(!length); 1126 BUG_ON(length > msg->data_length); 1127 BUG_ON(!msg->num_data_items); 1128 1129 cursor->total_resid = length; 1130 cursor->data = msg->data; 1131 1132 __ceph_msg_data_cursor_init(cursor); 1133 } 1134 1135 /* 1136 * Return the page containing the next piece to process for a given 1137 * data item, and supply the page offset and length of that piece. 1138 * Indicate whether this is the last piece in this data item. 1139 */ 1140 static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, 1141 size_t *page_offset, size_t *length, 1142 bool *last_piece) 1143 { 1144 struct page *page; 1145 1146 switch (cursor->data->type) { 1147 case CEPH_MSG_DATA_PAGELIST: 1148 page = ceph_msg_data_pagelist_next(cursor, page_offset, length); 1149 break; 1150 case CEPH_MSG_DATA_PAGES: 1151 page = ceph_msg_data_pages_next(cursor, page_offset, length); 1152 break; 1153 #ifdef CONFIG_BLOCK 1154 case CEPH_MSG_DATA_BIO: 1155 page = ceph_msg_data_bio_next(cursor, page_offset, length); 1156 break; 1157 #endif /* CONFIG_BLOCK */ 1158 case CEPH_MSG_DATA_BVECS: 1159 page = ceph_msg_data_bvecs_next(cursor, page_offset, length); 1160 break; 1161 case CEPH_MSG_DATA_NONE: 1162 default: 1163 page = NULL; 1164 break; 1165 } 1166 1167 BUG_ON(!page); 1168 BUG_ON(*page_offset + *length > PAGE_SIZE); 1169 BUG_ON(!*length); 1170 BUG_ON(*length > cursor->resid); 1171 if (last_piece) 1172 *last_piece = cursor->last_piece; 1173 1174 return page; 1175 } 1176 1177 /* 1178 * Returns true if the result moves the cursor on to the next piece 1179 * of the data item. 1180 */ 1181 static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, 1182 size_t bytes) 1183 { 1184 bool new_piece; 1185 1186 BUG_ON(bytes > cursor->resid); 1187 switch (cursor->data->type) { 1188 case CEPH_MSG_DATA_PAGELIST: 1189 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes); 1190 break; 1191 case CEPH_MSG_DATA_PAGES: 1192 new_piece = ceph_msg_data_pages_advance(cursor, bytes); 1193 break; 1194 #ifdef CONFIG_BLOCK 1195 case CEPH_MSG_DATA_BIO: 1196 new_piece = ceph_msg_data_bio_advance(cursor, bytes); 1197 break; 1198 #endif /* CONFIG_BLOCK */ 1199 case CEPH_MSG_DATA_BVECS: 1200 new_piece = ceph_msg_data_bvecs_advance(cursor, bytes); 1201 break; 1202 case CEPH_MSG_DATA_NONE: 1203 default: 1204 BUG(); 1205 break; 1206 } 1207 cursor->total_resid -= bytes; 1208 1209 if (!cursor->resid && cursor->total_resid) { 1210 WARN_ON(!cursor->last_piece); 1211 cursor->data++; 1212 __ceph_msg_data_cursor_init(cursor); 1213 new_piece = true; 1214 } 1215 cursor->need_crc = new_piece; 1216 } 1217 1218 static size_t sizeof_footer(struct ceph_connection *con) 1219 { 1220 return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ? 1221 sizeof(struct ceph_msg_footer) : 1222 sizeof(struct ceph_msg_footer_old); 1223 } 1224 1225 static void prepare_message_data(struct ceph_msg *msg, u32 data_len) 1226 { 1227 /* Initialize data cursor */ 1228 1229 ceph_msg_data_cursor_init(msg, (size_t)data_len); 1230 } 1231 1232 /* 1233 * Prepare footer for currently outgoing message, and finish things 1234 * off. Assumes out_kvec* are already valid.. we just add on to the end. 1235 */ 1236 static void prepare_write_message_footer(struct ceph_connection *con) 1237 { 1238 struct ceph_msg *m = con->out_msg; 1239 1240 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE; 1241 1242 dout("prepare_write_message_footer %p\n", con); 1243 con_out_kvec_add(con, sizeof_footer(con), &m->footer); 1244 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) { 1245 if (con->ops->sign_message) 1246 con->ops->sign_message(m); 1247 else 1248 m->footer.sig = 0; 1249 } else { 1250 m->old_footer.flags = m->footer.flags; 1251 } 1252 con->out_more = m->more_to_follow; 1253 con->out_msg_done = true; 1254 } 1255 1256 /* 1257 * Prepare headers for the next outgoing message. 1258 */ 1259 static void prepare_write_message(struct ceph_connection *con) 1260 { 1261 struct ceph_msg *m; 1262 u32 crc; 1263 1264 con_out_kvec_reset(con); 1265 con->out_msg_done = false; 1266 1267 /* Sneak an ack in there first? If we can get it into the same 1268 * TCP packet that's a good thing. */ 1269 if (con->in_seq > con->in_seq_acked) { 1270 con->in_seq_acked = con->in_seq; 1271 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1272 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1273 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1274 &con->out_temp_ack); 1275 } 1276 1277 BUG_ON(list_empty(&con->out_queue)); 1278 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); 1279 con->out_msg = m; 1280 BUG_ON(m->con != con); 1281 1282 /* put message on sent list */ 1283 ceph_msg_get(m); 1284 list_move_tail(&m->list_head, &con->out_sent); 1285 1286 /* 1287 * only assign outgoing seq # if we haven't sent this message 1288 * yet. if it is requeued, resend with it's original seq. 1289 */ 1290 if (m->needs_out_seq) { 1291 m->hdr.seq = cpu_to_le64(++con->out_seq); 1292 m->needs_out_seq = false; 1293 1294 if (con->ops->reencode_message) 1295 con->ops->reencode_message(m); 1296 } 1297 1298 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n", 1299 m, con->out_seq, le16_to_cpu(m->hdr.type), 1300 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), 1301 m->data_length); 1302 WARN_ON(m->front.iov_len != le32_to_cpu(m->hdr.front_len)); 1303 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len)); 1304 1305 /* tag + hdr + front + middle */ 1306 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); 1307 con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr); 1308 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); 1309 1310 if (m->middle) 1311 con_out_kvec_add(con, m->middle->vec.iov_len, 1312 m->middle->vec.iov_base); 1313 1314 /* fill in hdr crc and finalize hdr */ 1315 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc)); 1316 con->out_msg->hdr.crc = cpu_to_le32(crc); 1317 memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr)); 1318 1319 /* fill in front and middle crc, footer */ 1320 crc = crc32c(0, m->front.iov_base, m->front.iov_len); 1321 con->out_msg->footer.front_crc = cpu_to_le32(crc); 1322 if (m->middle) { 1323 crc = crc32c(0, m->middle->vec.iov_base, 1324 m->middle->vec.iov_len); 1325 con->out_msg->footer.middle_crc = cpu_to_le32(crc); 1326 } else 1327 con->out_msg->footer.middle_crc = 0; 1328 dout("%s front_crc %u middle_crc %u\n", __func__, 1329 le32_to_cpu(con->out_msg->footer.front_crc), 1330 le32_to_cpu(con->out_msg->footer.middle_crc)); 1331 con->out_msg->footer.flags = 0; 1332 1333 /* is there a data payload? */ 1334 con->out_msg->footer.data_crc = 0; 1335 if (m->data_length) { 1336 prepare_message_data(con->out_msg, m->data_length); 1337 con->out_more = 1; /* data + footer will follow */ 1338 } else { 1339 /* no, queue up footer too and be done */ 1340 prepare_write_message_footer(con); 1341 } 1342 1343 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1344 } 1345 1346 /* 1347 * Prepare an ack. 1348 */ 1349 static void prepare_write_ack(struct ceph_connection *con) 1350 { 1351 dout("prepare_write_ack %p %llu -> %llu\n", con, 1352 con->in_seq_acked, con->in_seq); 1353 con->in_seq_acked = con->in_seq; 1354 1355 con_out_kvec_reset(con); 1356 1357 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1358 1359 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1360 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1361 &con->out_temp_ack); 1362 1363 con->out_more = 1; /* more will follow.. eventually.. */ 1364 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1365 } 1366 1367 /* 1368 * Prepare to share the seq during handshake 1369 */ 1370 static void prepare_write_seq(struct ceph_connection *con) 1371 { 1372 dout("prepare_write_seq %p %llu -> %llu\n", con, 1373 con->in_seq_acked, con->in_seq); 1374 con->in_seq_acked = con->in_seq; 1375 1376 con_out_kvec_reset(con); 1377 1378 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1379 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1380 &con->out_temp_ack); 1381 1382 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1383 } 1384 1385 /* 1386 * Prepare to write keepalive byte. 1387 */ 1388 static void prepare_write_keepalive(struct ceph_connection *con) 1389 { 1390 dout("prepare_write_keepalive %p\n", con); 1391 con_out_kvec_reset(con); 1392 if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) { 1393 struct timespec64 now; 1394 1395 ktime_get_real_ts64(&now); 1396 con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2); 1397 ceph_encode_timespec64(&con->out_temp_keepalive2, &now); 1398 con_out_kvec_add(con, sizeof(con->out_temp_keepalive2), 1399 &con->out_temp_keepalive2); 1400 } else { 1401 con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive); 1402 } 1403 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1404 } 1405 1406 /* 1407 * Connection negotiation. 1408 */ 1409 1410 static int get_connect_authorizer(struct ceph_connection *con) 1411 { 1412 struct ceph_auth_handshake *auth; 1413 int auth_proto; 1414 1415 if (!con->ops->get_authorizer) { 1416 con->auth = NULL; 1417 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN; 1418 con->out_connect.authorizer_len = 0; 1419 return 0; 1420 } 1421 1422 auth = con->ops->get_authorizer(con, &auth_proto, con->auth_retry); 1423 if (IS_ERR(auth)) 1424 return PTR_ERR(auth); 1425 1426 con->auth = auth; 1427 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto); 1428 con->out_connect.authorizer_len = cpu_to_le32(auth->authorizer_buf_len); 1429 return 0; 1430 } 1431 1432 /* 1433 * We connected to a peer and are saying hello. 1434 */ 1435 static void prepare_write_banner(struct ceph_connection *con) 1436 { 1437 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER); 1438 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr), 1439 &con->msgr->my_enc_addr); 1440 1441 con->out_more = 0; 1442 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1443 } 1444 1445 static void __prepare_write_connect(struct ceph_connection *con) 1446 { 1447 con_out_kvec_add(con, sizeof(con->out_connect), &con->out_connect); 1448 if (con->auth) 1449 con_out_kvec_add(con, con->auth->authorizer_buf_len, 1450 con->auth->authorizer_buf); 1451 1452 con->out_more = 0; 1453 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1454 } 1455 1456 static int prepare_write_connect(struct ceph_connection *con) 1457 { 1458 unsigned int global_seq = get_global_seq(con->msgr, 0); 1459 int proto; 1460 int ret; 1461 1462 switch (con->peer_name.type) { 1463 case CEPH_ENTITY_TYPE_MON: 1464 proto = CEPH_MONC_PROTOCOL; 1465 break; 1466 case CEPH_ENTITY_TYPE_OSD: 1467 proto = CEPH_OSDC_PROTOCOL; 1468 break; 1469 case CEPH_ENTITY_TYPE_MDS: 1470 proto = CEPH_MDSC_PROTOCOL; 1471 break; 1472 default: 1473 BUG(); 1474 } 1475 1476 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, 1477 con->connect_seq, global_seq, proto); 1478 1479 con->out_connect.features = 1480 cpu_to_le64(from_msgr(con->msgr)->supported_features); 1481 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); 1482 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); 1483 con->out_connect.global_seq = cpu_to_le32(global_seq); 1484 con->out_connect.protocol_version = cpu_to_le32(proto); 1485 con->out_connect.flags = 0; 1486 1487 ret = get_connect_authorizer(con); 1488 if (ret) 1489 return ret; 1490 1491 __prepare_write_connect(con); 1492 return 0; 1493 } 1494 1495 /* 1496 * write as much of pending kvecs to the socket as we can. 1497 * 1 -> done 1498 * 0 -> socket full, but more to do 1499 * <0 -> error 1500 */ 1501 static int write_partial_kvec(struct ceph_connection *con) 1502 { 1503 int ret; 1504 1505 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes); 1506 while (con->out_kvec_bytes > 0) { 1507 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur, 1508 con->out_kvec_left, con->out_kvec_bytes, 1509 con->out_more); 1510 if (ret <= 0) 1511 goto out; 1512 con->out_kvec_bytes -= ret; 1513 if (con->out_kvec_bytes == 0) 1514 break; /* done */ 1515 1516 /* account for full iov entries consumed */ 1517 while (ret >= con->out_kvec_cur->iov_len) { 1518 BUG_ON(!con->out_kvec_left); 1519 ret -= con->out_kvec_cur->iov_len; 1520 con->out_kvec_cur++; 1521 con->out_kvec_left--; 1522 } 1523 /* and for a partially-consumed entry */ 1524 if (ret) { 1525 con->out_kvec_cur->iov_len -= ret; 1526 con->out_kvec_cur->iov_base += ret; 1527 } 1528 } 1529 con->out_kvec_left = 0; 1530 ret = 1; 1531 out: 1532 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con, 1533 con->out_kvec_bytes, con->out_kvec_left, ret); 1534 return ret; /* done! */ 1535 } 1536 1537 static u32 ceph_crc32c_page(u32 crc, struct page *page, 1538 unsigned int page_offset, 1539 unsigned int length) 1540 { 1541 char *kaddr; 1542 1543 kaddr = kmap(page); 1544 BUG_ON(kaddr == NULL); 1545 crc = crc32c(crc, kaddr + page_offset, length); 1546 kunmap(page); 1547 1548 return crc; 1549 } 1550 /* 1551 * Write as much message data payload as we can. If we finish, queue 1552 * up the footer. 1553 * 1 -> done, footer is now queued in out_kvec[]. 1554 * 0 -> socket full, but more to do 1555 * <0 -> error 1556 */ 1557 static int write_partial_message_data(struct ceph_connection *con) 1558 { 1559 struct ceph_msg *msg = con->out_msg; 1560 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1561 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); 1562 int more = MSG_MORE | MSG_SENDPAGE_NOTLAST; 1563 u32 crc; 1564 1565 dout("%s %p msg %p\n", __func__, con, msg); 1566 1567 if (!msg->num_data_items) 1568 return -EINVAL; 1569 1570 /* 1571 * Iterate through each page that contains data to be 1572 * written, and send as much as possible for each. 1573 * 1574 * If we are calculating the data crc (the default), we will 1575 * need to map the page. If we have no pages, they have 1576 * been revoked, so use the zero page. 1577 */ 1578 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0; 1579 while (cursor->total_resid) { 1580 struct page *page; 1581 size_t page_offset; 1582 size_t length; 1583 int ret; 1584 1585 if (!cursor->resid) { 1586 ceph_msg_data_advance(cursor, 0); 1587 continue; 1588 } 1589 1590 page = ceph_msg_data_next(cursor, &page_offset, &length, NULL); 1591 if (length == cursor->total_resid) 1592 more = MSG_MORE; 1593 ret = ceph_tcp_sendpage(con->sock, page, page_offset, length, 1594 more); 1595 if (ret <= 0) { 1596 if (do_datacrc) 1597 msg->footer.data_crc = cpu_to_le32(crc); 1598 1599 return ret; 1600 } 1601 if (do_datacrc && cursor->need_crc) 1602 crc = ceph_crc32c_page(crc, page, page_offset, length); 1603 ceph_msg_data_advance(cursor, (size_t)ret); 1604 } 1605 1606 dout("%s %p msg %p done\n", __func__, con, msg); 1607 1608 /* prepare and queue up footer, too */ 1609 if (do_datacrc) 1610 msg->footer.data_crc = cpu_to_le32(crc); 1611 else 1612 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; 1613 con_out_kvec_reset(con); 1614 prepare_write_message_footer(con); 1615 1616 return 1; /* must return > 0 to indicate success */ 1617 } 1618 1619 /* 1620 * write some zeros 1621 */ 1622 static int write_partial_skip(struct ceph_connection *con) 1623 { 1624 int more = MSG_MORE | MSG_SENDPAGE_NOTLAST; 1625 int ret; 1626 1627 dout("%s %p %d left\n", __func__, con, con->out_skip); 1628 while (con->out_skip > 0) { 1629 size_t size = min(con->out_skip, (int) PAGE_SIZE); 1630 1631 if (size == con->out_skip) 1632 more = MSG_MORE; 1633 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, more); 1634 if (ret <= 0) 1635 goto out; 1636 con->out_skip -= ret; 1637 } 1638 ret = 1; 1639 out: 1640 return ret; 1641 } 1642 1643 /* 1644 * Prepare to read connection handshake, or an ack. 1645 */ 1646 static void prepare_read_banner(struct ceph_connection *con) 1647 { 1648 dout("prepare_read_banner %p\n", con); 1649 con->in_base_pos = 0; 1650 } 1651 1652 static void prepare_read_connect(struct ceph_connection *con) 1653 { 1654 dout("prepare_read_connect %p\n", con); 1655 con->in_base_pos = 0; 1656 } 1657 1658 static void prepare_read_ack(struct ceph_connection *con) 1659 { 1660 dout("prepare_read_ack %p\n", con); 1661 con->in_base_pos = 0; 1662 } 1663 1664 static void prepare_read_seq(struct ceph_connection *con) 1665 { 1666 dout("prepare_read_seq %p\n", con); 1667 con->in_base_pos = 0; 1668 con->in_tag = CEPH_MSGR_TAG_SEQ; 1669 } 1670 1671 static void prepare_read_tag(struct ceph_connection *con) 1672 { 1673 dout("prepare_read_tag %p\n", con); 1674 con->in_base_pos = 0; 1675 con->in_tag = CEPH_MSGR_TAG_READY; 1676 } 1677 1678 static void prepare_read_keepalive_ack(struct ceph_connection *con) 1679 { 1680 dout("prepare_read_keepalive_ack %p\n", con); 1681 con->in_base_pos = 0; 1682 } 1683 1684 /* 1685 * Prepare to read a message. 1686 */ 1687 static int prepare_read_message(struct ceph_connection *con) 1688 { 1689 dout("prepare_read_message %p\n", con); 1690 BUG_ON(con->in_msg != NULL); 1691 con->in_base_pos = 0; 1692 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0; 1693 return 0; 1694 } 1695 1696 1697 static int read_partial(struct ceph_connection *con, 1698 int end, int size, void *object) 1699 { 1700 while (con->in_base_pos < end) { 1701 int left = end - con->in_base_pos; 1702 int have = size - left; 1703 int ret = ceph_tcp_recvmsg(con->sock, object + have, left); 1704 if (ret <= 0) 1705 return ret; 1706 con->in_base_pos += ret; 1707 } 1708 return 1; 1709 } 1710 1711 1712 /* 1713 * Read all or part of the connect-side handshake on a new connection 1714 */ 1715 static int read_partial_banner(struct ceph_connection *con) 1716 { 1717 int size; 1718 int end; 1719 int ret; 1720 1721 dout("read_partial_banner %p at %d\n", con, con->in_base_pos); 1722 1723 /* peer's banner */ 1724 size = strlen(CEPH_BANNER); 1725 end = size; 1726 ret = read_partial(con, end, size, con->in_banner); 1727 if (ret <= 0) 1728 goto out; 1729 1730 size = sizeof (con->actual_peer_addr); 1731 end += size; 1732 ret = read_partial(con, end, size, &con->actual_peer_addr); 1733 if (ret <= 0) 1734 goto out; 1735 1736 size = sizeof (con->peer_addr_for_me); 1737 end += size; 1738 ret = read_partial(con, end, size, &con->peer_addr_for_me); 1739 if (ret <= 0) 1740 goto out; 1741 1742 out: 1743 return ret; 1744 } 1745 1746 static int read_partial_connect(struct ceph_connection *con) 1747 { 1748 int size; 1749 int end; 1750 int ret; 1751 1752 dout("read_partial_connect %p at %d\n", con, con->in_base_pos); 1753 1754 size = sizeof (con->in_reply); 1755 end = size; 1756 ret = read_partial(con, end, size, &con->in_reply); 1757 if (ret <= 0) 1758 goto out; 1759 1760 if (con->auth) { 1761 size = le32_to_cpu(con->in_reply.authorizer_len); 1762 if (size > con->auth->authorizer_reply_buf_len) { 1763 pr_err("authorizer reply too big: %d > %zu\n", size, 1764 con->auth->authorizer_reply_buf_len); 1765 ret = -EINVAL; 1766 goto out; 1767 } 1768 1769 end += size; 1770 ret = read_partial(con, end, size, 1771 con->auth->authorizer_reply_buf); 1772 if (ret <= 0) 1773 goto out; 1774 } 1775 1776 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n", 1777 con, (int)con->in_reply.tag, 1778 le32_to_cpu(con->in_reply.connect_seq), 1779 le32_to_cpu(con->in_reply.global_seq)); 1780 out: 1781 return ret; 1782 } 1783 1784 /* 1785 * Verify the hello banner looks okay. 1786 */ 1787 static int verify_hello(struct ceph_connection *con) 1788 { 1789 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { 1790 pr_err("connect to %s got bad banner\n", 1791 ceph_pr_addr(&con->peer_addr.in_addr)); 1792 con->error_msg = "protocol error, bad banner"; 1793 return -1; 1794 } 1795 return 0; 1796 } 1797 1798 static bool addr_is_blank(struct sockaddr_storage *ss) 1799 { 1800 struct in_addr *addr = &((struct sockaddr_in *)ss)->sin_addr; 1801 struct in6_addr *addr6 = &((struct sockaddr_in6 *)ss)->sin6_addr; 1802 1803 switch (ss->ss_family) { 1804 case AF_INET: 1805 return addr->s_addr == htonl(INADDR_ANY); 1806 case AF_INET6: 1807 return ipv6_addr_any(addr6); 1808 default: 1809 return true; 1810 } 1811 } 1812 1813 static int addr_port(struct sockaddr_storage *ss) 1814 { 1815 switch (ss->ss_family) { 1816 case AF_INET: 1817 return ntohs(((struct sockaddr_in *)ss)->sin_port); 1818 case AF_INET6: 1819 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port); 1820 } 1821 return 0; 1822 } 1823 1824 static void addr_set_port(struct sockaddr_storage *ss, int p) 1825 { 1826 switch (ss->ss_family) { 1827 case AF_INET: 1828 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1829 break; 1830 case AF_INET6: 1831 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1832 break; 1833 } 1834 } 1835 1836 /* 1837 * Unlike other *_pton function semantics, zero indicates success. 1838 */ 1839 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss, 1840 char delim, const char **ipend) 1841 { 1842 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 1843 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 1844 1845 memset(ss, 0, sizeof(*ss)); 1846 1847 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) { 1848 ss->ss_family = AF_INET; 1849 return 0; 1850 } 1851 1852 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) { 1853 ss->ss_family = AF_INET6; 1854 return 0; 1855 } 1856 1857 return -EINVAL; 1858 } 1859 1860 /* 1861 * Extract hostname string and resolve using kernel DNS facility. 1862 */ 1863 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER 1864 static int ceph_dns_resolve_name(const char *name, size_t namelen, 1865 struct sockaddr_storage *ss, char delim, const char **ipend) 1866 { 1867 const char *end, *delim_p; 1868 char *colon_p, *ip_addr = NULL; 1869 int ip_len, ret; 1870 1871 /* 1872 * The end of the hostname occurs immediately preceding the delimiter or 1873 * the port marker (':') where the delimiter takes precedence. 1874 */ 1875 delim_p = memchr(name, delim, namelen); 1876 colon_p = memchr(name, ':', namelen); 1877 1878 if (delim_p && colon_p) 1879 end = delim_p < colon_p ? delim_p : colon_p; 1880 else if (!delim_p && colon_p) 1881 end = colon_p; 1882 else { 1883 end = delim_p; 1884 if (!end) /* case: hostname:/ */ 1885 end = name + namelen; 1886 } 1887 1888 if (end <= name) 1889 return -EINVAL; 1890 1891 /* do dns_resolve upcall */ 1892 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL); 1893 if (ip_len > 0) 1894 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL); 1895 else 1896 ret = -ESRCH; 1897 1898 kfree(ip_addr); 1899 1900 *ipend = end; 1901 1902 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name, 1903 ret, ret ? "failed" : ceph_pr_addr(ss)); 1904 1905 return ret; 1906 } 1907 #else 1908 static inline int ceph_dns_resolve_name(const char *name, size_t namelen, 1909 struct sockaddr_storage *ss, char delim, const char **ipend) 1910 { 1911 return -EINVAL; 1912 } 1913 #endif 1914 1915 /* 1916 * Parse a server name (IP or hostname). If a valid IP address is not found 1917 * then try to extract a hostname to resolve using userspace DNS upcall. 1918 */ 1919 static int ceph_parse_server_name(const char *name, size_t namelen, 1920 struct sockaddr_storage *ss, char delim, const char **ipend) 1921 { 1922 int ret; 1923 1924 ret = ceph_pton(name, namelen, ss, delim, ipend); 1925 if (ret) 1926 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend); 1927 1928 return ret; 1929 } 1930 1931 /* 1932 * Parse an ip[:port] list into an addr array. Use the default 1933 * monitor port if a port isn't specified. 1934 */ 1935 int ceph_parse_ips(const char *c, const char *end, 1936 struct ceph_entity_addr *addr, 1937 int max_count, int *count) 1938 { 1939 int i, ret = -EINVAL; 1940 const char *p = c; 1941 1942 dout("parse_ips on '%.*s'\n", (int)(end-c), c); 1943 for (i = 0; i < max_count; i++) { 1944 const char *ipend; 1945 struct sockaddr_storage *ss = &addr[i].in_addr; 1946 int port; 1947 char delim = ','; 1948 1949 if (*p == '[') { 1950 delim = ']'; 1951 p++; 1952 } 1953 1954 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend); 1955 if (ret) 1956 goto bad; 1957 ret = -EINVAL; 1958 1959 p = ipend; 1960 1961 if (delim == ']') { 1962 if (*p != ']') { 1963 dout("missing matching ']'\n"); 1964 goto bad; 1965 } 1966 p++; 1967 } 1968 1969 /* port? */ 1970 if (p < end && *p == ':') { 1971 port = 0; 1972 p++; 1973 while (p < end && *p >= '0' && *p <= '9') { 1974 port = (port * 10) + (*p - '0'); 1975 p++; 1976 } 1977 if (port == 0) 1978 port = CEPH_MON_PORT; 1979 else if (port > 65535) 1980 goto bad; 1981 } else { 1982 port = CEPH_MON_PORT; 1983 } 1984 1985 addr_set_port(ss, port); 1986 1987 dout("parse_ips got %s\n", ceph_pr_addr(ss)); 1988 1989 if (p == end) 1990 break; 1991 if (*p != ',') 1992 goto bad; 1993 p++; 1994 } 1995 1996 if (p != end) 1997 goto bad; 1998 1999 if (count) 2000 *count = i + 1; 2001 return 0; 2002 2003 bad: 2004 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 2005 return ret; 2006 } 2007 EXPORT_SYMBOL(ceph_parse_ips); 2008 2009 static int process_banner(struct ceph_connection *con) 2010 { 2011 dout("process_banner on %p\n", con); 2012 2013 if (verify_hello(con) < 0) 2014 return -1; 2015 2016 ceph_decode_addr(&con->actual_peer_addr); 2017 ceph_decode_addr(&con->peer_addr_for_me); 2018 2019 /* 2020 * Make sure the other end is who we wanted. note that the other 2021 * end may not yet know their ip address, so if it's 0.0.0.0, give 2022 * them the benefit of the doubt. 2023 */ 2024 if (memcmp(&con->peer_addr, &con->actual_peer_addr, 2025 sizeof(con->peer_addr)) != 0 && 2026 !(addr_is_blank(&con->actual_peer_addr.in_addr) && 2027 con->actual_peer_addr.nonce == con->peer_addr.nonce)) { 2028 pr_warn("wrong peer, want %s/%d, got %s/%d\n", 2029 ceph_pr_addr(&con->peer_addr.in_addr), 2030 (int)le32_to_cpu(con->peer_addr.nonce), 2031 ceph_pr_addr(&con->actual_peer_addr.in_addr), 2032 (int)le32_to_cpu(con->actual_peer_addr.nonce)); 2033 con->error_msg = "wrong peer at address"; 2034 return -1; 2035 } 2036 2037 /* 2038 * did we learn our address? 2039 */ 2040 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) { 2041 int port = addr_port(&con->msgr->inst.addr.in_addr); 2042 2043 memcpy(&con->msgr->inst.addr.in_addr, 2044 &con->peer_addr_for_me.in_addr, 2045 sizeof(con->peer_addr_for_me.in_addr)); 2046 addr_set_port(&con->msgr->inst.addr.in_addr, port); 2047 encode_my_addr(con->msgr); 2048 dout("process_banner learned my addr is %s\n", 2049 ceph_pr_addr(&con->msgr->inst.addr.in_addr)); 2050 } 2051 2052 return 0; 2053 } 2054 2055 static int process_connect(struct ceph_connection *con) 2056 { 2057 u64 sup_feat = from_msgr(con->msgr)->supported_features; 2058 u64 req_feat = from_msgr(con->msgr)->required_features; 2059 u64 server_feat = le64_to_cpu(con->in_reply.features); 2060 int ret; 2061 2062 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 2063 2064 if (con->auth) { 2065 int len = le32_to_cpu(con->in_reply.authorizer_len); 2066 2067 /* 2068 * Any connection that defines ->get_authorizer() 2069 * should also define ->add_authorizer_challenge() and 2070 * ->verify_authorizer_reply(). 2071 * 2072 * See get_connect_authorizer(). 2073 */ 2074 if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) { 2075 ret = con->ops->add_authorizer_challenge( 2076 con, con->auth->authorizer_reply_buf, len); 2077 if (ret < 0) 2078 return ret; 2079 2080 con_out_kvec_reset(con); 2081 __prepare_write_connect(con); 2082 prepare_read_connect(con); 2083 return 0; 2084 } 2085 2086 if (len) { 2087 ret = con->ops->verify_authorizer_reply(con); 2088 if (ret < 0) { 2089 con->error_msg = "bad authorize reply"; 2090 return ret; 2091 } 2092 } 2093 } 2094 2095 switch (con->in_reply.tag) { 2096 case CEPH_MSGR_TAG_FEATURES: 2097 pr_err("%s%lld %s feature set mismatch," 2098 " my %llx < server's %llx, missing %llx\n", 2099 ENTITY_NAME(con->peer_name), 2100 ceph_pr_addr(&con->peer_addr.in_addr), 2101 sup_feat, server_feat, server_feat & ~sup_feat); 2102 con->error_msg = "missing required protocol features"; 2103 reset_connection(con); 2104 return -1; 2105 2106 case CEPH_MSGR_TAG_BADPROTOVER: 2107 pr_err("%s%lld %s protocol version mismatch," 2108 " my %d != server's %d\n", 2109 ENTITY_NAME(con->peer_name), 2110 ceph_pr_addr(&con->peer_addr.in_addr), 2111 le32_to_cpu(con->out_connect.protocol_version), 2112 le32_to_cpu(con->in_reply.protocol_version)); 2113 con->error_msg = "protocol version mismatch"; 2114 reset_connection(con); 2115 return -1; 2116 2117 case CEPH_MSGR_TAG_BADAUTHORIZER: 2118 con->auth_retry++; 2119 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con, 2120 con->auth_retry); 2121 if (con->auth_retry == 2) { 2122 con->error_msg = "connect authorization failure"; 2123 return -1; 2124 } 2125 con_out_kvec_reset(con); 2126 ret = prepare_write_connect(con); 2127 if (ret < 0) 2128 return ret; 2129 prepare_read_connect(con); 2130 break; 2131 2132 case CEPH_MSGR_TAG_RESETSESSION: 2133 /* 2134 * If we connected with a large connect_seq but the peer 2135 * has no record of a session with us (no connection, or 2136 * connect_seq == 0), they will send RESETSESION to indicate 2137 * that they must have reset their session, and may have 2138 * dropped messages. 2139 */ 2140 dout("process_connect got RESET peer seq %u\n", 2141 le32_to_cpu(con->in_reply.connect_seq)); 2142 pr_err("%s%lld %s connection reset\n", 2143 ENTITY_NAME(con->peer_name), 2144 ceph_pr_addr(&con->peer_addr.in_addr)); 2145 reset_connection(con); 2146 con_out_kvec_reset(con); 2147 ret = prepare_write_connect(con); 2148 if (ret < 0) 2149 return ret; 2150 prepare_read_connect(con); 2151 2152 /* Tell ceph about it. */ 2153 mutex_unlock(&con->mutex); 2154 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name)); 2155 if (con->ops->peer_reset) 2156 con->ops->peer_reset(con); 2157 mutex_lock(&con->mutex); 2158 if (con->state != CON_STATE_NEGOTIATING) 2159 return -EAGAIN; 2160 break; 2161 2162 case CEPH_MSGR_TAG_RETRY_SESSION: 2163 /* 2164 * If we sent a smaller connect_seq than the peer has, try 2165 * again with a larger value. 2166 */ 2167 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n", 2168 le32_to_cpu(con->out_connect.connect_seq), 2169 le32_to_cpu(con->in_reply.connect_seq)); 2170 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); 2171 con_out_kvec_reset(con); 2172 ret = prepare_write_connect(con); 2173 if (ret < 0) 2174 return ret; 2175 prepare_read_connect(con); 2176 break; 2177 2178 case CEPH_MSGR_TAG_RETRY_GLOBAL: 2179 /* 2180 * If we sent a smaller global_seq than the peer has, try 2181 * again with a larger value. 2182 */ 2183 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", 2184 con->peer_global_seq, 2185 le32_to_cpu(con->in_reply.global_seq)); 2186 get_global_seq(con->msgr, 2187 le32_to_cpu(con->in_reply.global_seq)); 2188 con_out_kvec_reset(con); 2189 ret = prepare_write_connect(con); 2190 if (ret < 0) 2191 return ret; 2192 prepare_read_connect(con); 2193 break; 2194 2195 case CEPH_MSGR_TAG_SEQ: 2196 case CEPH_MSGR_TAG_READY: 2197 if (req_feat & ~server_feat) { 2198 pr_err("%s%lld %s protocol feature mismatch," 2199 " my required %llx > server's %llx, need %llx\n", 2200 ENTITY_NAME(con->peer_name), 2201 ceph_pr_addr(&con->peer_addr.in_addr), 2202 req_feat, server_feat, req_feat & ~server_feat); 2203 con->error_msg = "missing required protocol features"; 2204 reset_connection(con); 2205 return -1; 2206 } 2207 2208 WARN_ON(con->state != CON_STATE_NEGOTIATING); 2209 con->state = CON_STATE_OPEN; 2210 con->auth_retry = 0; /* we authenticated; clear flag */ 2211 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 2212 con->connect_seq++; 2213 con->peer_features = server_feat; 2214 dout("process_connect got READY gseq %d cseq %d (%d)\n", 2215 con->peer_global_seq, 2216 le32_to_cpu(con->in_reply.connect_seq), 2217 con->connect_seq); 2218 WARN_ON(con->connect_seq != 2219 le32_to_cpu(con->in_reply.connect_seq)); 2220 2221 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) 2222 con_flag_set(con, CON_FLAG_LOSSYTX); 2223 2224 con->delay = 0; /* reset backoff memory */ 2225 2226 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) { 2227 prepare_write_seq(con); 2228 prepare_read_seq(con); 2229 } else { 2230 prepare_read_tag(con); 2231 } 2232 break; 2233 2234 case CEPH_MSGR_TAG_WAIT: 2235 /* 2236 * If there is a connection race (we are opening 2237 * connections to each other), one of us may just have 2238 * to WAIT. This shouldn't happen if we are the 2239 * client. 2240 */ 2241 con->error_msg = "protocol error, got WAIT as client"; 2242 return -1; 2243 2244 default: 2245 con->error_msg = "protocol error, garbage tag during connect"; 2246 return -1; 2247 } 2248 return 0; 2249 } 2250 2251 2252 /* 2253 * read (part of) an ack 2254 */ 2255 static int read_partial_ack(struct ceph_connection *con) 2256 { 2257 int size = sizeof (con->in_temp_ack); 2258 int end = size; 2259 2260 return read_partial(con, end, size, &con->in_temp_ack); 2261 } 2262 2263 /* 2264 * We can finally discard anything that's been acked. 2265 */ 2266 static void process_ack(struct ceph_connection *con) 2267 { 2268 struct ceph_msg *m; 2269 u64 ack = le64_to_cpu(con->in_temp_ack); 2270 u64 seq; 2271 bool reconnect = (con->in_tag == CEPH_MSGR_TAG_SEQ); 2272 struct list_head *list = reconnect ? &con->out_queue : &con->out_sent; 2273 2274 /* 2275 * In the reconnect case, con_fault() has requeued messages 2276 * in out_sent. We should cleanup old messages according to 2277 * the reconnect seq. 2278 */ 2279 while (!list_empty(list)) { 2280 m = list_first_entry(list, struct ceph_msg, list_head); 2281 if (reconnect && m->needs_out_seq) 2282 break; 2283 seq = le64_to_cpu(m->hdr.seq); 2284 if (seq > ack) 2285 break; 2286 dout("got ack for seq %llu type %d at %p\n", seq, 2287 le16_to_cpu(m->hdr.type), m); 2288 m->ack_stamp = jiffies; 2289 ceph_msg_remove(m); 2290 } 2291 2292 prepare_read_tag(con); 2293 } 2294 2295 2296 static int read_partial_message_section(struct ceph_connection *con, 2297 struct kvec *section, 2298 unsigned int sec_len, u32 *crc) 2299 { 2300 int ret, left; 2301 2302 BUG_ON(!section); 2303 2304 while (section->iov_len < sec_len) { 2305 BUG_ON(section->iov_base == NULL); 2306 left = sec_len - section->iov_len; 2307 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base + 2308 section->iov_len, left); 2309 if (ret <= 0) 2310 return ret; 2311 section->iov_len += ret; 2312 } 2313 if (section->iov_len == sec_len) 2314 *crc = crc32c(0, section->iov_base, section->iov_len); 2315 2316 return 1; 2317 } 2318 2319 static int read_partial_msg_data(struct ceph_connection *con) 2320 { 2321 struct ceph_msg *msg = con->in_msg; 2322 struct ceph_msg_data_cursor *cursor = &msg->cursor; 2323 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); 2324 struct page *page; 2325 size_t page_offset; 2326 size_t length; 2327 u32 crc = 0; 2328 int ret; 2329 2330 if (!msg->num_data_items) 2331 return -EIO; 2332 2333 if (do_datacrc) 2334 crc = con->in_data_crc; 2335 while (cursor->total_resid) { 2336 if (!cursor->resid) { 2337 ceph_msg_data_advance(cursor, 0); 2338 continue; 2339 } 2340 2341 page = ceph_msg_data_next(cursor, &page_offset, &length, NULL); 2342 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); 2343 if (ret <= 0) { 2344 if (do_datacrc) 2345 con->in_data_crc = crc; 2346 2347 return ret; 2348 } 2349 2350 if (do_datacrc) 2351 crc = ceph_crc32c_page(crc, page, page_offset, ret); 2352 ceph_msg_data_advance(cursor, (size_t)ret); 2353 } 2354 if (do_datacrc) 2355 con->in_data_crc = crc; 2356 2357 return 1; /* must return > 0 to indicate success */ 2358 } 2359 2360 /* 2361 * read (part of) a message. 2362 */ 2363 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); 2364 2365 static int read_partial_message(struct ceph_connection *con) 2366 { 2367 struct ceph_msg *m = con->in_msg; 2368 int size; 2369 int end; 2370 int ret; 2371 unsigned int front_len, middle_len, data_len; 2372 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); 2373 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH); 2374 u64 seq; 2375 u32 crc; 2376 2377 dout("read_partial_message con %p msg %p\n", con, m); 2378 2379 /* header */ 2380 size = sizeof (con->in_hdr); 2381 end = size; 2382 ret = read_partial(con, end, size, &con->in_hdr); 2383 if (ret <= 0) 2384 return ret; 2385 2386 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc)); 2387 if (cpu_to_le32(crc) != con->in_hdr.crc) { 2388 pr_err("read_partial_message bad hdr crc %u != expected %u\n", 2389 crc, con->in_hdr.crc); 2390 return -EBADMSG; 2391 } 2392 2393 front_len = le32_to_cpu(con->in_hdr.front_len); 2394 if (front_len > CEPH_MSG_MAX_FRONT_LEN) 2395 return -EIO; 2396 middle_len = le32_to_cpu(con->in_hdr.middle_len); 2397 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN) 2398 return -EIO; 2399 data_len = le32_to_cpu(con->in_hdr.data_len); 2400 if (data_len > CEPH_MSG_MAX_DATA_LEN) 2401 return -EIO; 2402 2403 /* verify seq# */ 2404 seq = le64_to_cpu(con->in_hdr.seq); 2405 if ((s64)seq - (s64)con->in_seq < 1) { 2406 pr_info("skipping %s%lld %s seq %lld expected %lld\n", 2407 ENTITY_NAME(con->peer_name), 2408 ceph_pr_addr(&con->peer_addr.in_addr), 2409 seq, con->in_seq + 1); 2410 con->in_base_pos = -front_len - middle_len - data_len - 2411 sizeof_footer(con); 2412 con->in_tag = CEPH_MSGR_TAG_READY; 2413 return 1; 2414 } else if ((s64)seq - (s64)con->in_seq > 1) { 2415 pr_err("read_partial_message bad seq %lld expected %lld\n", 2416 seq, con->in_seq + 1); 2417 con->error_msg = "bad message sequence # for incoming message"; 2418 return -EBADE; 2419 } 2420 2421 /* allocate message? */ 2422 if (!con->in_msg) { 2423 int skip = 0; 2424 2425 dout("got hdr type %d front %d data %d\n", con->in_hdr.type, 2426 front_len, data_len); 2427 ret = ceph_con_in_msg_alloc(con, &skip); 2428 if (ret < 0) 2429 return ret; 2430 2431 BUG_ON(!con->in_msg ^ skip); 2432 if (skip) { 2433 /* skip this message */ 2434 dout("alloc_msg said skip message\n"); 2435 con->in_base_pos = -front_len - middle_len - data_len - 2436 sizeof_footer(con); 2437 con->in_tag = CEPH_MSGR_TAG_READY; 2438 con->in_seq++; 2439 return 1; 2440 } 2441 2442 BUG_ON(!con->in_msg); 2443 BUG_ON(con->in_msg->con != con); 2444 m = con->in_msg; 2445 m->front.iov_len = 0; /* haven't read it yet */ 2446 if (m->middle) 2447 m->middle->vec.iov_len = 0; 2448 2449 /* prepare for data payload, if any */ 2450 2451 if (data_len) 2452 prepare_message_data(con->in_msg, data_len); 2453 } 2454 2455 /* front */ 2456 ret = read_partial_message_section(con, &m->front, front_len, 2457 &con->in_front_crc); 2458 if (ret <= 0) 2459 return ret; 2460 2461 /* middle */ 2462 if (m->middle) { 2463 ret = read_partial_message_section(con, &m->middle->vec, 2464 middle_len, 2465 &con->in_middle_crc); 2466 if (ret <= 0) 2467 return ret; 2468 } 2469 2470 /* (page) data */ 2471 if (data_len) { 2472 ret = read_partial_msg_data(con); 2473 if (ret <= 0) 2474 return ret; 2475 } 2476 2477 /* footer */ 2478 size = sizeof_footer(con); 2479 end += size; 2480 ret = read_partial(con, end, size, &m->footer); 2481 if (ret <= 0) 2482 return ret; 2483 2484 if (!need_sign) { 2485 m->footer.flags = m->old_footer.flags; 2486 m->footer.sig = 0; 2487 } 2488 2489 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", 2490 m, front_len, m->footer.front_crc, middle_len, 2491 m->footer.middle_crc, data_len, m->footer.data_crc); 2492 2493 /* crc ok? */ 2494 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) { 2495 pr_err("read_partial_message %p front crc %u != exp. %u\n", 2496 m, con->in_front_crc, m->footer.front_crc); 2497 return -EBADMSG; 2498 } 2499 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) { 2500 pr_err("read_partial_message %p middle crc %u != exp %u\n", 2501 m, con->in_middle_crc, m->footer.middle_crc); 2502 return -EBADMSG; 2503 } 2504 if (do_datacrc && 2505 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 && 2506 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) { 2507 pr_err("read_partial_message %p data crc %u != exp. %u\n", m, 2508 con->in_data_crc, le32_to_cpu(m->footer.data_crc)); 2509 return -EBADMSG; 2510 } 2511 2512 if (need_sign && con->ops->check_message_signature && 2513 con->ops->check_message_signature(m)) { 2514 pr_err("read_partial_message %p signature check failed\n", m); 2515 return -EBADMSG; 2516 } 2517 2518 return 1; /* done! */ 2519 } 2520 2521 /* 2522 * Process message. This happens in the worker thread. The callback should 2523 * be careful not to do anything that waits on other incoming messages or it 2524 * may deadlock. 2525 */ 2526 static void process_message(struct ceph_connection *con) 2527 { 2528 struct ceph_msg *msg = con->in_msg; 2529 2530 BUG_ON(con->in_msg->con != con); 2531 con->in_msg = NULL; 2532 2533 /* if first message, set peer_name */ 2534 if (con->peer_name.type == 0) 2535 con->peer_name = msg->hdr.src; 2536 2537 con->in_seq++; 2538 mutex_unlock(&con->mutex); 2539 2540 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n", 2541 msg, le64_to_cpu(msg->hdr.seq), 2542 ENTITY_NAME(msg->hdr.src), 2543 le16_to_cpu(msg->hdr.type), 2544 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2545 le32_to_cpu(msg->hdr.front_len), 2546 le32_to_cpu(msg->hdr.data_len), 2547 con->in_front_crc, con->in_middle_crc, con->in_data_crc); 2548 con->ops->dispatch(con, msg); 2549 2550 mutex_lock(&con->mutex); 2551 } 2552 2553 static int read_keepalive_ack(struct ceph_connection *con) 2554 { 2555 struct ceph_timespec ceph_ts; 2556 size_t size = sizeof(ceph_ts); 2557 int ret = read_partial(con, size, size, &ceph_ts); 2558 if (ret <= 0) 2559 return ret; 2560 ceph_decode_timespec64(&con->last_keepalive_ack, &ceph_ts); 2561 prepare_read_tag(con); 2562 return 1; 2563 } 2564 2565 /* 2566 * Write something to the socket. Called in a worker thread when the 2567 * socket appears to be writeable and we have something ready to send. 2568 */ 2569 static int try_write(struct ceph_connection *con) 2570 { 2571 int ret = 1; 2572 2573 dout("try_write start %p state %lu\n", con, con->state); 2574 if (con->state != CON_STATE_PREOPEN && 2575 con->state != CON_STATE_CONNECTING && 2576 con->state != CON_STATE_NEGOTIATING && 2577 con->state != CON_STATE_OPEN) 2578 return 0; 2579 2580 /* open the socket first? */ 2581 if (con->state == CON_STATE_PREOPEN) { 2582 BUG_ON(con->sock); 2583 con->state = CON_STATE_CONNECTING; 2584 2585 con_out_kvec_reset(con); 2586 prepare_write_banner(con); 2587 prepare_read_banner(con); 2588 2589 BUG_ON(con->in_msg); 2590 con->in_tag = CEPH_MSGR_TAG_READY; 2591 dout("try_write initiating connect on %p new state %lu\n", 2592 con, con->state); 2593 ret = ceph_tcp_connect(con); 2594 if (ret < 0) { 2595 con->error_msg = "connect error"; 2596 goto out; 2597 } 2598 } 2599 2600 more: 2601 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 2602 BUG_ON(!con->sock); 2603 2604 /* kvec data queued? */ 2605 if (con->out_kvec_left) { 2606 ret = write_partial_kvec(con); 2607 if (ret <= 0) 2608 goto out; 2609 } 2610 if (con->out_skip) { 2611 ret = write_partial_skip(con); 2612 if (ret <= 0) 2613 goto out; 2614 } 2615 2616 /* msg pages? */ 2617 if (con->out_msg) { 2618 if (con->out_msg_done) { 2619 ceph_msg_put(con->out_msg); 2620 con->out_msg = NULL; /* we're done with this one */ 2621 goto do_next; 2622 } 2623 2624 ret = write_partial_message_data(con); 2625 if (ret == 1) 2626 goto more; /* we need to send the footer, too! */ 2627 if (ret == 0) 2628 goto out; 2629 if (ret < 0) { 2630 dout("try_write write_partial_message_data err %d\n", 2631 ret); 2632 goto out; 2633 } 2634 } 2635 2636 do_next: 2637 if (con->state == CON_STATE_OPEN) { 2638 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) { 2639 prepare_write_keepalive(con); 2640 goto more; 2641 } 2642 /* is anything else pending? */ 2643 if (!list_empty(&con->out_queue)) { 2644 prepare_write_message(con); 2645 goto more; 2646 } 2647 if (con->in_seq > con->in_seq_acked) { 2648 prepare_write_ack(con); 2649 goto more; 2650 } 2651 } 2652 2653 /* Nothing to do! */ 2654 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 2655 dout("try_write nothing else to write.\n"); 2656 ret = 0; 2657 out: 2658 dout("try_write done on %p ret %d\n", con, ret); 2659 return ret; 2660 } 2661 2662 /* 2663 * Read what we can from the socket. 2664 */ 2665 static int try_read(struct ceph_connection *con) 2666 { 2667 int ret = -1; 2668 2669 more: 2670 dout("try_read start on %p state %lu\n", con, con->state); 2671 if (con->state != CON_STATE_CONNECTING && 2672 con->state != CON_STATE_NEGOTIATING && 2673 con->state != CON_STATE_OPEN) 2674 return 0; 2675 2676 BUG_ON(!con->sock); 2677 2678 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 2679 con->in_base_pos); 2680 2681 if (con->state == CON_STATE_CONNECTING) { 2682 dout("try_read connecting\n"); 2683 ret = read_partial_banner(con); 2684 if (ret <= 0) 2685 goto out; 2686 ret = process_banner(con); 2687 if (ret < 0) 2688 goto out; 2689 2690 con->state = CON_STATE_NEGOTIATING; 2691 2692 /* 2693 * Received banner is good, exchange connection info. 2694 * Do not reset out_kvec, as sending our banner raced 2695 * with receiving peer banner after connect completed. 2696 */ 2697 ret = prepare_write_connect(con); 2698 if (ret < 0) 2699 goto out; 2700 prepare_read_connect(con); 2701 2702 /* Send connection info before awaiting response */ 2703 goto out; 2704 } 2705 2706 if (con->state == CON_STATE_NEGOTIATING) { 2707 dout("try_read negotiating\n"); 2708 ret = read_partial_connect(con); 2709 if (ret <= 0) 2710 goto out; 2711 ret = process_connect(con); 2712 if (ret < 0) 2713 goto out; 2714 goto more; 2715 } 2716 2717 WARN_ON(con->state != CON_STATE_OPEN); 2718 2719 if (con->in_base_pos < 0) { 2720 /* 2721 * skipping + discarding content. 2722 */ 2723 ret = ceph_tcp_recvmsg(con->sock, NULL, -con->in_base_pos); 2724 if (ret <= 0) 2725 goto out; 2726 dout("skipped %d / %d bytes\n", ret, -con->in_base_pos); 2727 con->in_base_pos += ret; 2728 if (con->in_base_pos) 2729 goto more; 2730 } 2731 if (con->in_tag == CEPH_MSGR_TAG_READY) { 2732 /* 2733 * what's next? 2734 */ 2735 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 2736 if (ret <= 0) 2737 goto out; 2738 dout("try_read got tag %d\n", (int)con->in_tag); 2739 switch (con->in_tag) { 2740 case CEPH_MSGR_TAG_MSG: 2741 prepare_read_message(con); 2742 break; 2743 case CEPH_MSGR_TAG_ACK: 2744 prepare_read_ack(con); 2745 break; 2746 case CEPH_MSGR_TAG_KEEPALIVE2_ACK: 2747 prepare_read_keepalive_ack(con); 2748 break; 2749 case CEPH_MSGR_TAG_CLOSE: 2750 con_close_socket(con); 2751 con->state = CON_STATE_CLOSED; 2752 goto out; 2753 default: 2754 goto bad_tag; 2755 } 2756 } 2757 if (con->in_tag == CEPH_MSGR_TAG_MSG) { 2758 ret = read_partial_message(con); 2759 if (ret <= 0) { 2760 switch (ret) { 2761 case -EBADMSG: 2762 con->error_msg = "bad crc/signature"; 2763 /* fall through */ 2764 case -EBADE: 2765 ret = -EIO; 2766 break; 2767 case -EIO: 2768 con->error_msg = "io error"; 2769 break; 2770 } 2771 goto out; 2772 } 2773 if (con->in_tag == CEPH_MSGR_TAG_READY) 2774 goto more; 2775 process_message(con); 2776 if (con->state == CON_STATE_OPEN) 2777 prepare_read_tag(con); 2778 goto more; 2779 } 2780 if (con->in_tag == CEPH_MSGR_TAG_ACK || 2781 con->in_tag == CEPH_MSGR_TAG_SEQ) { 2782 /* 2783 * the final handshake seq exchange is semantically 2784 * equivalent to an ACK 2785 */ 2786 ret = read_partial_ack(con); 2787 if (ret <= 0) 2788 goto out; 2789 process_ack(con); 2790 goto more; 2791 } 2792 if (con->in_tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) { 2793 ret = read_keepalive_ack(con); 2794 if (ret <= 0) 2795 goto out; 2796 goto more; 2797 } 2798 2799 out: 2800 dout("try_read done on %p ret %d\n", con, ret); 2801 return ret; 2802 2803 bad_tag: 2804 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag); 2805 con->error_msg = "protocol error, garbage tag"; 2806 ret = -1; 2807 goto out; 2808 } 2809 2810 2811 /* 2812 * Atomically queue work on a connection after the specified delay. 2813 * Bump @con reference to avoid races with connection teardown. 2814 * Returns 0 if work was queued, or an error code otherwise. 2815 */ 2816 static int queue_con_delay(struct ceph_connection *con, unsigned long delay) 2817 { 2818 if (!con->ops->get(con)) { 2819 dout("%s %p ref count 0\n", __func__, con); 2820 return -ENOENT; 2821 } 2822 2823 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) { 2824 dout("%s %p - already queued\n", __func__, con); 2825 con->ops->put(con); 2826 return -EBUSY; 2827 } 2828 2829 dout("%s %p %lu\n", __func__, con, delay); 2830 return 0; 2831 } 2832 2833 static void queue_con(struct ceph_connection *con) 2834 { 2835 (void) queue_con_delay(con, 0); 2836 } 2837 2838 static void cancel_con(struct ceph_connection *con) 2839 { 2840 if (cancel_delayed_work(&con->work)) { 2841 dout("%s %p\n", __func__, con); 2842 con->ops->put(con); 2843 } 2844 } 2845 2846 static bool con_sock_closed(struct ceph_connection *con) 2847 { 2848 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED)) 2849 return false; 2850 2851 #define CASE(x) \ 2852 case CON_STATE_ ## x: \ 2853 con->error_msg = "socket closed (con state " #x ")"; \ 2854 break; 2855 2856 switch (con->state) { 2857 CASE(CLOSED); 2858 CASE(PREOPEN); 2859 CASE(CONNECTING); 2860 CASE(NEGOTIATING); 2861 CASE(OPEN); 2862 CASE(STANDBY); 2863 default: 2864 pr_warn("%s con %p unrecognized state %lu\n", 2865 __func__, con, con->state); 2866 con->error_msg = "unrecognized con state"; 2867 BUG(); 2868 break; 2869 } 2870 #undef CASE 2871 2872 return true; 2873 } 2874 2875 static bool con_backoff(struct ceph_connection *con) 2876 { 2877 int ret; 2878 2879 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF)) 2880 return false; 2881 2882 ret = queue_con_delay(con, round_jiffies_relative(con->delay)); 2883 if (ret) { 2884 dout("%s: con %p FAILED to back off %lu\n", __func__, 2885 con, con->delay); 2886 BUG_ON(ret == -ENOENT); 2887 con_flag_set(con, CON_FLAG_BACKOFF); 2888 } 2889 2890 return true; 2891 } 2892 2893 /* Finish fault handling; con->mutex must *not* be held here */ 2894 2895 static void con_fault_finish(struct ceph_connection *con) 2896 { 2897 dout("%s %p\n", __func__, con); 2898 2899 /* 2900 * in case we faulted due to authentication, invalidate our 2901 * current tickets so that we can get new ones. 2902 */ 2903 if (con->auth_retry) { 2904 dout("auth_retry %d, invalidating\n", con->auth_retry); 2905 if (con->ops->invalidate_authorizer) 2906 con->ops->invalidate_authorizer(con); 2907 con->auth_retry = 0; 2908 } 2909 2910 if (con->ops->fault) 2911 con->ops->fault(con); 2912 } 2913 2914 /* 2915 * Do some work on a connection. Drop a connection ref when we're done. 2916 */ 2917 static void ceph_con_workfn(struct work_struct *work) 2918 { 2919 struct ceph_connection *con = container_of(work, struct ceph_connection, 2920 work.work); 2921 bool fault; 2922 2923 mutex_lock(&con->mutex); 2924 while (true) { 2925 int ret; 2926 2927 if ((fault = con_sock_closed(con))) { 2928 dout("%s: con %p SOCK_CLOSED\n", __func__, con); 2929 break; 2930 } 2931 if (con_backoff(con)) { 2932 dout("%s: con %p BACKOFF\n", __func__, con); 2933 break; 2934 } 2935 if (con->state == CON_STATE_STANDBY) { 2936 dout("%s: con %p STANDBY\n", __func__, con); 2937 break; 2938 } 2939 if (con->state == CON_STATE_CLOSED) { 2940 dout("%s: con %p CLOSED\n", __func__, con); 2941 BUG_ON(con->sock); 2942 break; 2943 } 2944 if (con->state == CON_STATE_PREOPEN) { 2945 dout("%s: con %p PREOPEN\n", __func__, con); 2946 BUG_ON(con->sock); 2947 } 2948 2949 ret = try_read(con); 2950 if (ret < 0) { 2951 if (ret == -EAGAIN) 2952 continue; 2953 if (!con->error_msg) 2954 con->error_msg = "socket error on read"; 2955 fault = true; 2956 break; 2957 } 2958 2959 ret = try_write(con); 2960 if (ret < 0) { 2961 if (ret == -EAGAIN) 2962 continue; 2963 if (!con->error_msg) 2964 con->error_msg = "socket error on write"; 2965 fault = true; 2966 } 2967 2968 break; /* If we make it to here, we're done */ 2969 } 2970 if (fault) 2971 con_fault(con); 2972 mutex_unlock(&con->mutex); 2973 2974 if (fault) 2975 con_fault_finish(con); 2976 2977 con->ops->put(con); 2978 } 2979 2980 /* 2981 * Generic error/fault handler. A retry mechanism is used with 2982 * exponential backoff 2983 */ 2984 static void con_fault(struct ceph_connection *con) 2985 { 2986 dout("fault %p state %lu to peer %s\n", 2987 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2988 2989 pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2990 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2991 con->error_msg = NULL; 2992 2993 WARN_ON(con->state != CON_STATE_CONNECTING && 2994 con->state != CON_STATE_NEGOTIATING && 2995 con->state != CON_STATE_OPEN); 2996 2997 con_close_socket(con); 2998 2999 if (con_flag_test(con, CON_FLAG_LOSSYTX)) { 3000 dout("fault on LOSSYTX channel, marking CLOSED\n"); 3001 con->state = CON_STATE_CLOSED; 3002 return; 3003 } 3004 3005 if (con->in_msg) { 3006 BUG_ON(con->in_msg->con != con); 3007 ceph_msg_put(con->in_msg); 3008 con->in_msg = NULL; 3009 } 3010 3011 /* Requeue anything that hasn't been acked */ 3012 list_splice_init(&con->out_sent, &con->out_queue); 3013 3014 /* If there are no messages queued or keepalive pending, place 3015 * the connection in a STANDBY state */ 3016 if (list_empty(&con->out_queue) && 3017 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) { 3018 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 3019 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 3020 con->state = CON_STATE_STANDBY; 3021 } else { 3022 /* retry after a delay. */ 3023 con->state = CON_STATE_PREOPEN; 3024 if (con->delay == 0) 3025 con->delay = BASE_DELAY_INTERVAL; 3026 else if (con->delay < MAX_DELAY_INTERVAL) 3027 con->delay *= 2; 3028 con_flag_set(con, CON_FLAG_BACKOFF); 3029 queue_con(con); 3030 } 3031 } 3032 3033 3034 3035 /* 3036 * initialize a new messenger instance 3037 */ 3038 void ceph_messenger_init(struct ceph_messenger *msgr, 3039 struct ceph_entity_addr *myaddr) 3040 { 3041 spin_lock_init(&msgr->global_seq_lock); 3042 3043 if (myaddr) 3044 msgr->inst.addr = *myaddr; 3045 3046 /* select a random nonce */ 3047 msgr->inst.addr.type = 0; 3048 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 3049 encode_my_addr(msgr); 3050 3051 atomic_set(&msgr->stopping, 0); 3052 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns)); 3053 3054 dout("%s %p\n", __func__, msgr); 3055 } 3056 EXPORT_SYMBOL(ceph_messenger_init); 3057 3058 void ceph_messenger_fini(struct ceph_messenger *msgr) 3059 { 3060 put_net(read_pnet(&msgr->net)); 3061 } 3062 EXPORT_SYMBOL(ceph_messenger_fini); 3063 3064 static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con) 3065 { 3066 if (msg->con) 3067 msg->con->ops->put(msg->con); 3068 3069 msg->con = con ? con->ops->get(con) : NULL; 3070 BUG_ON(msg->con != con); 3071 } 3072 3073 static void clear_standby(struct ceph_connection *con) 3074 { 3075 /* come back from STANDBY? */ 3076 if (con->state == CON_STATE_STANDBY) { 3077 dout("clear_standby %p and ++connect_seq\n", con); 3078 con->state = CON_STATE_PREOPEN; 3079 con->connect_seq++; 3080 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING)); 3081 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)); 3082 } 3083 } 3084 3085 /* 3086 * Queue up an outgoing message on the given connection. 3087 */ 3088 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) 3089 { 3090 /* set src+dst */ 3091 msg->hdr.src = con->msgr->inst.name; 3092 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); 3093 msg->needs_out_seq = true; 3094 3095 mutex_lock(&con->mutex); 3096 3097 if (con->state == CON_STATE_CLOSED) { 3098 dout("con_send %p closed, dropping %p\n", con, msg); 3099 ceph_msg_put(msg); 3100 mutex_unlock(&con->mutex); 3101 return; 3102 } 3103 3104 msg_con_set(msg, con); 3105 3106 BUG_ON(!list_empty(&msg->list_head)); 3107 list_add_tail(&msg->list_head, &con->out_queue); 3108 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, 3109 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type), 3110 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 3111 le32_to_cpu(msg->hdr.front_len), 3112 le32_to_cpu(msg->hdr.middle_len), 3113 le32_to_cpu(msg->hdr.data_len)); 3114 3115 clear_standby(con); 3116 mutex_unlock(&con->mutex); 3117 3118 /* if there wasn't anything waiting to send before, queue 3119 * new work */ 3120 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3121 queue_con(con); 3122 } 3123 EXPORT_SYMBOL(ceph_con_send); 3124 3125 /* 3126 * Revoke a message that was previously queued for send 3127 */ 3128 void ceph_msg_revoke(struct ceph_msg *msg) 3129 { 3130 struct ceph_connection *con = msg->con; 3131 3132 if (!con) { 3133 dout("%s msg %p null con\n", __func__, msg); 3134 return; /* Message not in our possession */ 3135 } 3136 3137 mutex_lock(&con->mutex); 3138 if (!list_empty(&msg->list_head)) { 3139 dout("%s %p msg %p - was on queue\n", __func__, con, msg); 3140 list_del_init(&msg->list_head); 3141 msg->hdr.seq = 0; 3142 3143 ceph_msg_put(msg); 3144 } 3145 if (con->out_msg == msg) { 3146 BUG_ON(con->out_skip); 3147 /* footer */ 3148 if (con->out_msg_done) { 3149 con->out_skip += con_out_kvec_skip(con); 3150 } else { 3151 BUG_ON(!msg->data_length); 3152 con->out_skip += sizeof_footer(con); 3153 } 3154 /* data, middle, front */ 3155 if (msg->data_length) 3156 con->out_skip += msg->cursor.total_resid; 3157 if (msg->middle) 3158 con->out_skip += con_out_kvec_skip(con); 3159 con->out_skip += con_out_kvec_skip(con); 3160 3161 dout("%s %p msg %p - was sending, will write %d skip %d\n", 3162 __func__, con, msg, con->out_kvec_bytes, con->out_skip); 3163 msg->hdr.seq = 0; 3164 con->out_msg = NULL; 3165 ceph_msg_put(msg); 3166 } 3167 3168 mutex_unlock(&con->mutex); 3169 } 3170 3171 /* 3172 * Revoke a message that we may be reading data into 3173 */ 3174 void ceph_msg_revoke_incoming(struct ceph_msg *msg) 3175 { 3176 struct ceph_connection *con = msg->con; 3177 3178 if (!con) { 3179 dout("%s msg %p null con\n", __func__, msg); 3180 return; /* Message not in our possession */ 3181 } 3182 3183 mutex_lock(&con->mutex); 3184 if (con->in_msg == msg) { 3185 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len); 3186 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len); 3187 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len); 3188 3189 /* skip rest of message */ 3190 dout("%s %p msg %p revoked\n", __func__, con, msg); 3191 con->in_base_pos = con->in_base_pos - 3192 sizeof(struct ceph_msg_header) - 3193 front_len - 3194 middle_len - 3195 data_len - 3196 sizeof(struct ceph_msg_footer); 3197 ceph_msg_put(con->in_msg); 3198 con->in_msg = NULL; 3199 con->in_tag = CEPH_MSGR_TAG_READY; 3200 con->in_seq++; 3201 } else { 3202 dout("%s %p in_msg %p msg %p no-op\n", 3203 __func__, con, con->in_msg, msg); 3204 } 3205 mutex_unlock(&con->mutex); 3206 } 3207 3208 /* 3209 * Queue a keepalive byte to ensure the tcp connection is alive. 3210 */ 3211 void ceph_con_keepalive(struct ceph_connection *con) 3212 { 3213 dout("con_keepalive %p\n", con); 3214 mutex_lock(&con->mutex); 3215 clear_standby(con); 3216 con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING); 3217 mutex_unlock(&con->mutex); 3218 3219 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3220 queue_con(con); 3221 } 3222 EXPORT_SYMBOL(ceph_con_keepalive); 3223 3224 bool ceph_con_keepalive_expired(struct ceph_connection *con, 3225 unsigned long interval) 3226 { 3227 if (interval > 0 && 3228 (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) { 3229 struct timespec64 now; 3230 struct timespec64 ts; 3231 ktime_get_real_ts64(&now); 3232 jiffies_to_timespec64(interval, &ts); 3233 ts = timespec64_add(con->last_keepalive_ack, ts); 3234 return timespec64_compare(&now, &ts) >= 0; 3235 } 3236 return false; 3237 } 3238 3239 static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg) 3240 { 3241 BUG_ON(msg->num_data_items >= msg->max_data_items); 3242 return &msg->data[msg->num_data_items++]; 3243 } 3244 3245 static void ceph_msg_data_destroy(struct ceph_msg_data *data) 3246 { 3247 if (data->type == CEPH_MSG_DATA_PAGELIST) 3248 ceph_pagelist_release(data->pagelist); 3249 } 3250 3251 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, 3252 size_t length, size_t alignment) 3253 { 3254 struct ceph_msg_data *data; 3255 3256 BUG_ON(!pages); 3257 BUG_ON(!length); 3258 3259 data = ceph_msg_data_add(msg); 3260 data->type = CEPH_MSG_DATA_PAGES; 3261 data->pages = pages; 3262 data->length = length; 3263 data->alignment = alignment & ~PAGE_MASK; 3264 3265 msg->data_length += length; 3266 } 3267 EXPORT_SYMBOL(ceph_msg_data_add_pages); 3268 3269 void ceph_msg_data_add_pagelist(struct ceph_msg *msg, 3270 struct ceph_pagelist *pagelist) 3271 { 3272 struct ceph_msg_data *data; 3273 3274 BUG_ON(!pagelist); 3275 BUG_ON(!pagelist->length); 3276 3277 data = ceph_msg_data_add(msg); 3278 data->type = CEPH_MSG_DATA_PAGELIST; 3279 refcount_inc(&pagelist->refcnt); 3280 data->pagelist = pagelist; 3281 3282 msg->data_length += pagelist->length; 3283 } 3284 EXPORT_SYMBOL(ceph_msg_data_add_pagelist); 3285 3286 #ifdef CONFIG_BLOCK 3287 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos, 3288 u32 length) 3289 { 3290 struct ceph_msg_data *data; 3291 3292 data = ceph_msg_data_add(msg); 3293 data->type = CEPH_MSG_DATA_BIO; 3294 data->bio_pos = *bio_pos; 3295 data->bio_length = length; 3296 3297 msg->data_length += length; 3298 } 3299 EXPORT_SYMBOL(ceph_msg_data_add_bio); 3300 #endif /* CONFIG_BLOCK */ 3301 3302 void ceph_msg_data_add_bvecs(struct ceph_msg *msg, 3303 struct ceph_bvec_iter *bvec_pos) 3304 { 3305 struct ceph_msg_data *data; 3306 3307 data = ceph_msg_data_add(msg); 3308 data->type = CEPH_MSG_DATA_BVECS; 3309 data->bvec_pos = *bvec_pos; 3310 3311 msg->data_length += bvec_pos->iter.bi_size; 3312 } 3313 EXPORT_SYMBOL(ceph_msg_data_add_bvecs); 3314 3315 /* 3316 * construct a new message with given type, size 3317 * the new msg has a ref count of 1. 3318 */ 3319 struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items, 3320 gfp_t flags, bool can_fail) 3321 { 3322 struct ceph_msg *m; 3323 3324 m = kmem_cache_zalloc(ceph_msg_cache, flags); 3325 if (m == NULL) 3326 goto out; 3327 3328 m->hdr.type = cpu_to_le16(type); 3329 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); 3330 m->hdr.front_len = cpu_to_le32(front_len); 3331 3332 INIT_LIST_HEAD(&m->list_head); 3333 kref_init(&m->kref); 3334 3335 /* front */ 3336 if (front_len) { 3337 m->front.iov_base = ceph_kvmalloc(front_len, flags); 3338 if (m->front.iov_base == NULL) { 3339 dout("ceph_msg_new can't allocate %d bytes\n", 3340 front_len); 3341 goto out2; 3342 } 3343 } else { 3344 m->front.iov_base = NULL; 3345 } 3346 m->front_alloc_len = m->front.iov_len = front_len; 3347 3348 if (max_data_items) { 3349 m->data = kmalloc_array(max_data_items, sizeof(*m->data), 3350 flags); 3351 if (!m->data) 3352 goto out2; 3353 3354 m->max_data_items = max_data_items; 3355 } 3356 3357 dout("ceph_msg_new %p front %d\n", m, front_len); 3358 return m; 3359 3360 out2: 3361 ceph_msg_put(m); 3362 out: 3363 if (!can_fail) { 3364 pr_err("msg_new can't create type %d front %d\n", type, 3365 front_len); 3366 WARN_ON(1); 3367 } else { 3368 dout("msg_new can't create type %d front %d\n", type, 3369 front_len); 3370 } 3371 return NULL; 3372 } 3373 EXPORT_SYMBOL(ceph_msg_new2); 3374 3375 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, 3376 bool can_fail) 3377 { 3378 return ceph_msg_new2(type, front_len, 0, flags, can_fail); 3379 } 3380 EXPORT_SYMBOL(ceph_msg_new); 3381 3382 /* 3383 * Allocate "middle" portion of a message, if it is needed and wasn't 3384 * allocated by alloc_msg. This allows us to read a small fixed-size 3385 * per-type header in the front and then gracefully fail (i.e., 3386 * propagate the error to the caller based on info in the front) when 3387 * the middle is too large. 3388 */ 3389 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) 3390 { 3391 int type = le16_to_cpu(msg->hdr.type); 3392 int middle_len = le32_to_cpu(msg->hdr.middle_len); 3393 3394 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type, 3395 ceph_msg_type_name(type), middle_len); 3396 BUG_ON(!middle_len); 3397 BUG_ON(msg->middle); 3398 3399 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS); 3400 if (!msg->middle) 3401 return -ENOMEM; 3402 return 0; 3403 } 3404 3405 /* 3406 * Allocate a message for receiving an incoming message on a 3407 * connection, and save the result in con->in_msg. Uses the 3408 * connection's private alloc_msg op if available. 3409 * 3410 * Returns 0 on success, or a negative error code. 3411 * 3412 * On success, if we set *skip = 1: 3413 * - the next message should be skipped and ignored. 3414 * - con->in_msg == NULL 3415 * or if we set *skip = 0: 3416 * - con->in_msg is non-null. 3417 * On error (ENOMEM, EAGAIN, ...), 3418 * - con->in_msg == NULL 3419 */ 3420 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) 3421 { 3422 struct ceph_msg_header *hdr = &con->in_hdr; 3423 int middle_len = le32_to_cpu(hdr->middle_len); 3424 struct ceph_msg *msg; 3425 int ret = 0; 3426 3427 BUG_ON(con->in_msg != NULL); 3428 BUG_ON(!con->ops->alloc_msg); 3429 3430 mutex_unlock(&con->mutex); 3431 msg = con->ops->alloc_msg(con, hdr, skip); 3432 mutex_lock(&con->mutex); 3433 if (con->state != CON_STATE_OPEN) { 3434 if (msg) 3435 ceph_msg_put(msg); 3436 return -EAGAIN; 3437 } 3438 if (msg) { 3439 BUG_ON(*skip); 3440 msg_con_set(msg, con); 3441 con->in_msg = msg; 3442 } else { 3443 /* 3444 * Null message pointer means either we should skip 3445 * this message or we couldn't allocate memory. The 3446 * former is not an error. 3447 */ 3448 if (*skip) 3449 return 0; 3450 3451 con->error_msg = "error allocating memory for incoming message"; 3452 return -ENOMEM; 3453 } 3454 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 3455 3456 if (middle_len && !con->in_msg->middle) { 3457 ret = ceph_alloc_middle(con, con->in_msg); 3458 if (ret < 0) { 3459 ceph_msg_put(con->in_msg); 3460 con->in_msg = NULL; 3461 } 3462 } 3463 3464 return ret; 3465 } 3466 3467 3468 /* 3469 * Free a generically kmalloc'd message. 3470 */ 3471 static void ceph_msg_free(struct ceph_msg *m) 3472 { 3473 dout("%s %p\n", __func__, m); 3474 kvfree(m->front.iov_base); 3475 kfree(m->data); 3476 kmem_cache_free(ceph_msg_cache, m); 3477 } 3478 3479 static void ceph_msg_release(struct kref *kref) 3480 { 3481 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); 3482 int i; 3483 3484 dout("%s %p\n", __func__, m); 3485 WARN_ON(!list_empty(&m->list_head)); 3486 3487 msg_con_set(m, NULL); 3488 3489 /* drop middle, data, if any */ 3490 if (m->middle) { 3491 ceph_buffer_put(m->middle); 3492 m->middle = NULL; 3493 } 3494 3495 for (i = 0; i < m->num_data_items; i++) 3496 ceph_msg_data_destroy(&m->data[i]); 3497 3498 if (m->pool) 3499 ceph_msgpool_put(m->pool, m); 3500 else 3501 ceph_msg_free(m); 3502 } 3503 3504 struct ceph_msg *ceph_msg_get(struct ceph_msg *msg) 3505 { 3506 dout("%s %p (was %d)\n", __func__, msg, 3507 kref_read(&msg->kref)); 3508 kref_get(&msg->kref); 3509 return msg; 3510 } 3511 EXPORT_SYMBOL(ceph_msg_get); 3512 3513 void ceph_msg_put(struct ceph_msg *msg) 3514 { 3515 dout("%s %p (was %d)\n", __func__, msg, 3516 kref_read(&msg->kref)); 3517 kref_put(&msg->kref, ceph_msg_release); 3518 } 3519 EXPORT_SYMBOL(ceph_msg_put); 3520 3521 void ceph_msg_dump(struct ceph_msg *msg) 3522 { 3523 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg, 3524 msg->front_alloc_len, msg->data_length); 3525 print_hex_dump(KERN_DEBUG, "header: ", 3526 DUMP_PREFIX_OFFSET, 16, 1, 3527 &msg->hdr, sizeof(msg->hdr), true); 3528 print_hex_dump(KERN_DEBUG, " front: ", 3529 DUMP_PREFIX_OFFSET, 16, 1, 3530 msg->front.iov_base, msg->front.iov_len, true); 3531 if (msg->middle) 3532 print_hex_dump(KERN_DEBUG, "middle: ", 3533 DUMP_PREFIX_OFFSET, 16, 1, 3534 msg->middle->vec.iov_base, 3535 msg->middle->vec.iov_len, true); 3536 print_hex_dump(KERN_DEBUG, "footer: ", 3537 DUMP_PREFIX_OFFSET, 16, 1, 3538 &msg->footer, sizeof(msg->footer), true); 3539 } 3540 EXPORT_SYMBOL(ceph_msg_dump); 3541