1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/crc32c.h> 5 #include <linux/ctype.h> 6 #include <linux/highmem.h> 7 #include <linux/inet.h> 8 #include <linux/kthread.h> 9 #include <linux/net.h> 10 #include <linux/nsproxy.h> 11 #include <linux/sched/mm.h> 12 #include <linux/slab.h> 13 #include <linux/socket.h> 14 #include <linux/string.h> 15 #ifdef CONFIG_BLOCK 16 #include <linux/bio.h> 17 #endif /* CONFIG_BLOCK */ 18 #include <linux/dns_resolver.h> 19 #include <net/tcp.h> 20 21 #include <linux/ceph/ceph_features.h> 22 #include <linux/ceph/libceph.h> 23 #include <linux/ceph/messenger.h> 24 #include <linux/ceph/decode.h> 25 #include <linux/ceph/pagelist.h> 26 #include <linux/export.h> 27 28 /* 29 * Ceph uses the messenger to exchange ceph_msg messages with other 30 * hosts in the system. The messenger provides ordered and reliable 31 * delivery. We tolerate TCP disconnects by reconnecting (with 32 * exponential backoff) in the case of a fault (disconnection, bad 33 * crc, protocol error). Acks allow sent messages to be discarded by 34 * the sender. 35 */ 36 37 /* 38 * We track the state of the socket on a given connection using 39 * values defined below. The transition to a new socket state is 40 * handled by a function which verifies we aren't coming from an 41 * unexpected state. 42 * 43 * -------- 44 * | NEW* | transient initial state 45 * -------- 46 * | con_sock_state_init() 47 * v 48 * ---------- 49 * | CLOSED | initialized, but no socket (and no 50 * ---------- TCP connection) 51 * ^ \ 52 * | \ con_sock_state_connecting() 53 * | ---------------------- 54 * | \ 55 * + con_sock_state_closed() \ 56 * |+--------------------------- \ 57 * | \ \ \ 58 * | ----------- \ \ 59 * | | CLOSING | socket event; \ \ 60 * | ----------- await close \ \ 61 * | ^ \ | 62 * | | \ | 63 * | + con_sock_state_closing() \ | 64 * | / \ | | 65 * | / --------------- | | 66 * | / \ v v 67 * | / -------------- 68 * | / -----------------| CONNECTING | socket created, TCP 69 * | | / -------------- connect initiated 70 * | | | con_sock_state_connected() 71 * | | v 72 * ------------- 73 * | CONNECTED | TCP connection established 74 * ------------- 75 * 76 * State values for ceph_connection->sock_state; NEW is assumed to be 0. 77 */ 78 79 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */ 80 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */ 81 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */ 82 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */ 83 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */ 84 85 /* 86 * connection states 87 */ 88 #define CON_STATE_CLOSED 1 /* -> PREOPEN */ 89 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */ 90 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */ 91 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */ 92 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */ 93 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */ 94 95 /* 96 * ceph_connection flag bits 97 */ 98 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop 99 * messages on errors */ 100 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */ 101 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */ 102 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */ 103 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */ 104 105 static bool con_flag_valid(unsigned long con_flag) 106 { 107 switch (con_flag) { 108 case CON_FLAG_LOSSYTX: 109 case CON_FLAG_KEEPALIVE_PENDING: 110 case CON_FLAG_WRITE_PENDING: 111 case CON_FLAG_SOCK_CLOSED: 112 case CON_FLAG_BACKOFF: 113 return true; 114 default: 115 return false; 116 } 117 } 118 119 static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag) 120 { 121 BUG_ON(!con_flag_valid(con_flag)); 122 123 clear_bit(con_flag, &con->flags); 124 } 125 126 static void con_flag_set(struct ceph_connection *con, unsigned long con_flag) 127 { 128 BUG_ON(!con_flag_valid(con_flag)); 129 130 set_bit(con_flag, &con->flags); 131 } 132 133 static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag) 134 { 135 BUG_ON(!con_flag_valid(con_flag)); 136 137 return test_bit(con_flag, &con->flags); 138 } 139 140 static bool con_flag_test_and_clear(struct ceph_connection *con, 141 unsigned long con_flag) 142 { 143 BUG_ON(!con_flag_valid(con_flag)); 144 145 return test_and_clear_bit(con_flag, &con->flags); 146 } 147 148 static bool con_flag_test_and_set(struct ceph_connection *con, 149 unsigned long con_flag) 150 { 151 BUG_ON(!con_flag_valid(con_flag)); 152 153 return test_and_set_bit(con_flag, &con->flags); 154 } 155 156 /* Slab caches for frequently-allocated structures */ 157 158 static struct kmem_cache *ceph_msg_cache; 159 static struct kmem_cache *ceph_msg_data_cache; 160 161 /* static tag bytes (protocol control messages) */ 162 static char tag_msg = CEPH_MSGR_TAG_MSG; 163 static char tag_ack = CEPH_MSGR_TAG_ACK; 164 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 165 static char tag_keepalive2 = CEPH_MSGR_TAG_KEEPALIVE2; 166 167 #ifdef CONFIG_LOCKDEP 168 static struct lock_class_key socket_class; 169 #endif 170 171 /* 172 * When skipping (ignoring) a block of input we read it into a "skip 173 * buffer," which is this many bytes in size. 174 */ 175 #define SKIP_BUF_SIZE 1024 176 177 static void queue_con(struct ceph_connection *con); 178 static void cancel_con(struct ceph_connection *con); 179 static void ceph_con_workfn(struct work_struct *); 180 static void con_fault(struct ceph_connection *con); 181 182 /* 183 * Nicely render a sockaddr as a string. An array of formatted 184 * strings is used, to approximate reentrancy. 185 */ 186 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */ 187 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG) 188 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1) 189 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */ 190 191 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN]; 192 static atomic_t addr_str_seq = ATOMIC_INIT(0); 193 194 static struct page *zero_page; /* used in certain error cases */ 195 196 const char *ceph_pr_addr(const struct sockaddr_storage *ss) 197 { 198 int i; 199 char *s; 200 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 201 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 202 203 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK; 204 s = addr_str[i]; 205 206 switch (ss->ss_family) { 207 case AF_INET: 208 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr, 209 ntohs(in4->sin_port)); 210 break; 211 212 case AF_INET6: 213 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr, 214 ntohs(in6->sin6_port)); 215 break; 216 217 default: 218 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)", 219 ss->ss_family); 220 } 221 222 return s; 223 } 224 EXPORT_SYMBOL(ceph_pr_addr); 225 226 static void encode_my_addr(struct ceph_messenger *msgr) 227 { 228 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr)); 229 ceph_encode_addr(&msgr->my_enc_addr); 230 } 231 232 /* 233 * work queue for all reading and writing to/from the socket. 234 */ 235 static struct workqueue_struct *ceph_msgr_wq; 236 237 static int ceph_msgr_slab_init(void) 238 { 239 BUG_ON(ceph_msg_cache); 240 ceph_msg_cache = KMEM_CACHE(ceph_msg, 0); 241 if (!ceph_msg_cache) 242 return -ENOMEM; 243 244 BUG_ON(ceph_msg_data_cache); 245 ceph_msg_data_cache = KMEM_CACHE(ceph_msg_data, 0); 246 if (ceph_msg_data_cache) 247 return 0; 248 249 kmem_cache_destroy(ceph_msg_cache); 250 ceph_msg_cache = NULL; 251 252 return -ENOMEM; 253 } 254 255 static void ceph_msgr_slab_exit(void) 256 { 257 BUG_ON(!ceph_msg_data_cache); 258 kmem_cache_destroy(ceph_msg_data_cache); 259 ceph_msg_data_cache = NULL; 260 261 BUG_ON(!ceph_msg_cache); 262 kmem_cache_destroy(ceph_msg_cache); 263 ceph_msg_cache = NULL; 264 } 265 266 static void _ceph_msgr_exit(void) 267 { 268 if (ceph_msgr_wq) { 269 destroy_workqueue(ceph_msgr_wq); 270 ceph_msgr_wq = NULL; 271 } 272 273 BUG_ON(zero_page == NULL); 274 put_page(zero_page); 275 zero_page = NULL; 276 277 ceph_msgr_slab_exit(); 278 } 279 280 int ceph_msgr_init(void) 281 { 282 if (ceph_msgr_slab_init()) 283 return -ENOMEM; 284 285 BUG_ON(zero_page != NULL); 286 zero_page = ZERO_PAGE(0); 287 get_page(zero_page); 288 289 /* 290 * The number of active work items is limited by the number of 291 * connections, so leave @max_active at default. 292 */ 293 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0); 294 if (ceph_msgr_wq) 295 return 0; 296 297 pr_err("msgr_init failed to create workqueue\n"); 298 _ceph_msgr_exit(); 299 300 return -ENOMEM; 301 } 302 EXPORT_SYMBOL(ceph_msgr_init); 303 304 void ceph_msgr_exit(void) 305 { 306 BUG_ON(ceph_msgr_wq == NULL); 307 308 _ceph_msgr_exit(); 309 } 310 EXPORT_SYMBOL(ceph_msgr_exit); 311 312 void ceph_msgr_flush(void) 313 { 314 flush_workqueue(ceph_msgr_wq); 315 } 316 EXPORT_SYMBOL(ceph_msgr_flush); 317 318 /* Connection socket state transition functions */ 319 320 static void con_sock_state_init(struct ceph_connection *con) 321 { 322 int old_state; 323 324 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 325 if (WARN_ON(old_state != CON_SOCK_STATE_NEW)) 326 printk("%s: unexpected old state %d\n", __func__, old_state); 327 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 328 CON_SOCK_STATE_CLOSED); 329 } 330 331 static void con_sock_state_connecting(struct ceph_connection *con) 332 { 333 int old_state; 334 335 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING); 336 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED)) 337 printk("%s: unexpected old state %d\n", __func__, old_state); 338 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 339 CON_SOCK_STATE_CONNECTING); 340 } 341 342 static void con_sock_state_connected(struct ceph_connection *con) 343 { 344 int old_state; 345 346 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED); 347 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING)) 348 printk("%s: unexpected old state %d\n", __func__, old_state); 349 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 350 CON_SOCK_STATE_CONNECTED); 351 } 352 353 static void con_sock_state_closing(struct ceph_connection *con) 354 { 355 int old_state; 356 357 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING); 358 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING && 359 old_state != CON_SOCK_STATE_CONNECTED && 360 old_state != CON_SOCK_STATE_CLOSING)) 361 printk("%s: unexpected old state %d\n", __func__, old_state); 362 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 363 CON_SOCK_STATE_CLOSING); 364 } 365 366 static void con_sock_state_closed(struct ceph_connection *con) 367 { 368 int old_state; 369 370 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 371 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED && 372 old_state != CON_SOCK_STATE_CLOSING && 373 old_state != CON_SOCK_STATE_CONNECTING && 374 old_state != CON_SOCK_STATE_CLOSED)) 375 printk("%s: unexpected old state %d\n", __func__, old_state); 376 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 377 CON_SOCK_STATE_CLOSED); 378 } 379 380 /* 381 * socket callback functions 382 */ 383 384 /* data available on socket, or listen socket received a connect */ 385 static void ceph_sock_data_ready(struct sock *sk) 386 { 387 struct ceph_connection *con = sk->sk_user_data; 388 if (atomic_read(&con->msgr->stopping)) { 389 return; 390 } 391 392 if (sk->sk_state != TCP_CLOSE_WAIT) { 393 dout("%s on %p state = %lu, queueing work\n", __func__, 394 con, con->state); 395 queue_con(con); 396 } 397 } 398 399 /* socket has buffer space for writing */ 400 static void ceph_sock_write_space(struct sock *sk) 401 { 402 struct ceph_connection *con = sk->sk_user_data; 403 404 /* only queue to workqueue if there is data we want to write, 405 * and there is sufficient space in the socket buffer to accept 406 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space() 407 * doesn't get called again until try_write() fills the socket 408 * buffer. See net/ipv4/tcp_input.c:tcp_check_space() 409 * and net/core/stream.c:sk_stream_write_space(). 410 */ 411 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) { 412 if (sk_stream_is_writeable(sk)) { 413 dout("%s %p queueing write work\n", __func__, con); 414 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 415 queue_con(con); 416 } 417 } else { 418 dout("%s %p nothing to write\n", __func__, con); 419 } 420 } 421 422 /* socket's state has changed */ 423 static void ceph_sock_state_change(struct sock *sk) 424 { 425 struct ceph_connection *con = sk->sk_user_data; 426 427 dout("%s %p state = %lu sk_state = %u\n", __func__, 428 con, con->state, sk->sk_state); 429 430 switch (sk->sk_state) { 431 case TCP_CLOSE: 432 dout("%s TCP_CLOSE\n", __func__); 433 /* fall through */ 434 case TCP_CLOSE_WAIT: 435 dout("%s TCP_CLOSE_WAIT\n", __func__); 436 con_sock_state_closing(con); 437 con_flag_set(con, CON_FLAG_SOCK_CLOSED); 438 queue_con(con); 439 break; 440 case TCP_ESTABLISHED: 441 dout("%s TCP_ESTABLISHED\n", __func__); 442 con_sock_state_connected(con); 443 queue_con(con); 444 break; 445 default: /* Everything else is uninteresting */ 446 break; 447 } 448 } 449 450 /* 451 * set up socket callbacks 452 */ 453 static void set_sock_callbacks(struct socket *sock, 454 struct ceph_connection *con) 455 { 456 struct sock *sk = sock->sk; 457 sk->sk_user_data = con; 458 sk->sk_data_ready = ceph_sock_data_ready; 459 sk->sk_write_space = ceph_sock_write_space; 460 sk->sk_state_change = ceph_sock_state_change; 461 } 462 463 464 /* 465 * socket helpers 466 */ 467 468 /* 469 * initiate connection to a remote socket. 470 */ 471 static int ceph_tcp_connect(struct ceph_connection *con) 472 { 473 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 474 struct socket *sock; 475 unsigned int noio_flag; 476 int ret; 477 478 BUG_ON(con->sock); 479 480 /* sock_create_kern() allocates with GFP_KERNEL */ 481 noio_flag = memalloc_noio_save(); 482 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family, 483 SOCK_STREAM, IPPROTO_TCP, &sock); 484 memalloc_noio_restore(noio_flag); 485 if (ret) 486 return ret; 487 sock->sk->sk_allocation = GFP_NOFS; 488 489 #ifdef CONFIG_LOCKDEP 490 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 491 #endif 492 493 set_sock_callbacks(sock, con); 494 495 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); 496 497 con_sock_state_connecting(con); 498 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), 499 O_NONBLOCK); 500 if (ret == -EINPROGRESS) { 501 dout("connect %s EINPROGRESS sk_state = %u\n", 502 ceph_pr_addr(&con->peer_addr.in_addr), 503 sock->sk->sk_state); 504 } else if (ret < 0) { 505 pr_err("connect %s error %d\n", 506 ceph_pr_addr(&con->peer_addr.in_addr), ret); 507 sock_release(sock); 508 return ret; 509 } 510 511 if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY)) { 512 int optval = 1; 513 514 ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, 515 (char *)&optval, sizeof(optval)); 516 if (ret) 517 pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d", 518 ret); 519 } 520 521 con->sock = sock; 522 return 0; 523 } 524 525 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) 526 { 527 struct kvec iov = {buf, len}; 528 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 529 int r; 530 531 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, len); 532 r = sock_recvmsg(sock, &msg, msg.msg_flags); 533 if (r == -EAGAIN) 534 r = 0; 535 return r; 536 } 537 538 static int ceph_tcp_recvpage(struct socket *sock, struct page *page, 539 int page_offset, size_t length) 540 { 541 struct bio_vec bvec = { 542 .bv_page = page, 543 .bv_offset = page_offset, 544 .bv_len = length 545 }; 546 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 547 int r; 548 549 BUG_ON(page_offset + length > PAGE_SIZE); 550 iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, &bvec, 1, length); 551 r = sock_recvmsg(sock, &msg, msg.msg_flags); 552 if (r == -EAGAIN) 553 r = 0; 554 return r; 555 } 556 557 /* 558 * write something. @more is true if caller will be sending more data 559 * shortly. 560 */ 561 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, 562 size_t kvlen, size_t len, int more) 563 { 564 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 565 int r; 566 567 if (more) 568 msg.msg_flags |= MSG_MORE; 569 else 570 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 571 572 r = kernel_sendmsg(sock, &msg, iov, kvlen, len); 573 if (r == -EAGAIN) 574 r = 0; 575 return r; 576 } 577 578 static int __ceph_tcp_sendpage(struct socket *sock, struct page *page, 579 int offset, size_t size, bool more) 580 { 581 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); 582 int ret; 583 584 ret = kernel_sendpage(sock, page, offset, size, flags); 585 if (ret == -EAGAIN) 586 ret = 0; 587 588 return ret; 589 } 590 591 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 592 int offset, size_t size, bool more) 593 { 594 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 595 struct bio_vec bvec; 596 int ret; 597 598 /* sendpage cannot properly handle pages with page_count == 0, 599 * we need to fallback to sendmsg if that's the case */ 600 if (page_count(page) >= 1) 601 return __ceph_tcp_sendpage(sock, page, offset, size, more); 602 603 bvec.bv_page = page; 604 bvec.bv_offset = offset; 605 bvec.bv_len = size; 606 607 if (more) 608 msg.msg_flags |= MSG_MORE; 609 else 610 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 611 612 iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC, &bvec, 1, size); 613 ret = sock_sendmsg(sock, &msg); 614 if (ret == -EAGAIN) 615 ret = 0; 616 617 return ret; 618 } 619 620 /* 621 * Shutdown/close the socket for the given connection. 622 */ 623 static int con_close_socket(struct ceph_connection *con) 624 { 625 int rc = 0; 626 627 dout("con_close_socket on %p sock %p\n", con, con->sock); 628 if (con->sock) { 629 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); 630 sock_release(con->sock); 631 con->sock = NULL; 632 } 633 634 /* 635 * Forcibly clear the SOCK_CLOSED flag. It gets set 636 * independent of the connection mutex, and we could have 637 * received a socket close event before we had the chance to 638 * shut the socket down. 639 */ 640 con_flag_clear(con, CON_FLAG_SOCK_CLOSED); 641 642 con_sock_state_closed(con); 643 return rc; 644 } 645 646 /* 647 * Reset a connection. Discard all incoming and outgoing messages 648 * and clear *_seq state. 649 */ 650 static void ceph_msg_remove(struct ceph_msg *msg) 651 { 652 list_del_init(&msg->list_head); 653 654 ceph_msg_put(msg); 655 } 656 static void ceph_msg_remove_list(struct list_head *head) 657 { 658 while (!list_empty(head)) { 659 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, 660 list_head); 661 ceph_msg_remove(msg); 662 } 663 } 664 665 static void reset_connection(struct ceph_connection *con) 666 { 667 /* reset connection, out_queue, msg_ and connect_seq */ 668 /* discard existing out_queue and msg_seq */ 669 dout("reset_connection %p\n", con); 670 ceph_msg_remove_list(&con->out_queue); 671 ceph_msg_remove_list(&con->out_sent); 672 673 if (con->in_msg) { 674 BUG_ON(con->in_msg->con != con); 675 ceph_msg_put(con->in_msg); 676 con->in_msg = NULL; 677 } 678 679 con->connect_seq = 0; 680 con->out_seq = 0; 681 if (con->out_msg) { 682 BUG_ON(con->out_msg->con != con); 683 ceph_msg_put(con->out_msg); 684 con->out_msg = NULL; 685 } 686 con->in_seq = 0; 687 con->in_seq_acked = 0; 688 689 con->out_skip = 0; 690 } 691 692 /* 693 * mark a peer down. drop any open connections. 694 */ 695 void ceph_con_close(struct ceph_connection *con) 696 { 697 mutex_lock(&con->mutex); 698 dout("con_close %p peer %s\n", con, 699 ceph_pr_addr(&con->peer_addr.in_addr)); 700 con->state = CON_STATE_CLOSED; 701 702 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */ 703 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING); 704 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 705 con_flag_clear(con, CON_FLAG_BACKOFF); 706 707 reset_connection(con); 708 con->peer_global_seq = 0; 709 cancel_con(con); 710 con_close_socket(con); 711 mutex_unlock(&con->mutex); 712 } 713 EXPORT_SYMBOL(ceph_con_close); 714 715 /* 716 * Reopen a closed connection, with a new peer address. 717 */ 718 void ceph_con_open(struct ceph_connection *con, 719 __u8 entity_type, __u64 entity_num, 720 struct ceph_entity_addr *addr) 721 { 722 mutex_lock(&con->mutex); 723 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 724 725 WARN_ON(con->state != CON_STATE_CLOSED); 726 con->state = CON_STATE_PREOPEN; 727 728 con->peer_name.type = (__u8) entity_type; 729 con->peer_name.num = cpu_to_le64(entity_num); 730 731 memcpy(&con->peer_addr, addr, sizeof(*addr)); 732 con->delay = 0; /* reset backoff memory */ 733 mutex_unlock(&con->mutex); 734 queue_con(con); 735 } 736 EXPORT_SYMBOL(ceph_con_open); 737 738 /* 739 * return true if this connection ever successfully opened 740 */ 741 bool ceph_con_opened(struct ceph_connection *con) 742 { 743 return con->connect_seq > 0; 744 } 745 746 /* 747 * initialize a new connection. 748 */ 749 void ceph_con_init(struct ceph_connection *con, void *private, 750 const struct ceph_connection_operations *ops, 751 struct ceph_messenger *msgr) 752 { 753 dout("con_init %p\n", con); 754 memset(con, 0, sizeof(*con)); 755 con->private = private; 756 con->ops = ops; 757 con->msgr = msgr; 758 759 con_sock_state_init(con); 760 761 mutex_init(&con->mutex); 762 INIT_LIST_HEAD(&con->out_queue); 763 INIT_LIST_HEAD(&con->out_sent); 764 INIT_DELAYED_WORK(&con->work, ceph_con_workfn); 765 766 con->state = CON_STATE_CLOSED; 767 } 768 EXPORT_SYMBOL(ceph_con_init); 769 770 771 /* 772 * We maintain a global counter to order connection attempts. Get 773 * a unique seq greater than @gt. 774 */ 775 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) 776 { 777 u32 ret; 778 779 spin_lock(&msgr->global_seq_lock); 780 if (msgr->global_seq < gt) 781 msgr->global_seq = gt; 782 ret = ++msgr->global_seq; 783 spin_unlock(&msgr->global_seq_lock); 784 return ret; 785 } 786 787 static void con_out_kvec_reset(struct ceph_connection *con) 788 { 789 BUG_ON(con->out_skip); 790 791 con->out_kvec_left = 0; 792 con->out_kvec_bytes = 0; 793 con->out_kvec_cur = &con->out_kvec[0]; 794 } 795 796 static void con_out_kvec_add(struct ceph_connection *con, 797 size_t size, void *data) 798 { 799 int index = con->out_kvec_left; 800 801 BUG_ON(con->out_skip); 802 BUG_ON(index >= ARRAY_SIZE(con->out_kvec)); 803 804 con->out_kvec[index].iov_len = size; 805 con->out_kvec[index].iov_base = data; 806 con->out_kvec_left++; 807 con->out_kvec_bytes += size; 808 } 809 810 /* 811 * Chop off a kvec from the end. Return residual number of bytes for 812 * that kvec, i.e. how many bytes would have been written if the kvec 813 * hadn't been nuked. 814 */ 815 static int con_out_kvec_skip(struct ceph_connection *con) 816 { 817 int off = con->out_kvec_cur - con->out_kvec; 818 int skip = 0; 819 820 if (con->out_kvec_bytes > 0) { 821 skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len; 822 BUG_ON(con->out_kvec_bytes < skip); 823 BUG_ON(!con->out_kvec_left); 824 con->out_kvec_bytes -= skip; 825 con->out_kvec_left--; 826 } 827 828 return skip; 829 } 830 831 #ifdef CONFIG_BLOCK 832 833 /* 834 * For a bio data item, a piece is whatever remains of the next 835 * entry in the current bio iovec, or the first entry in the next 836 * bio in the list. 837 */ 838 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor, 839 size_t length) 840 { 841 struct ceph_msg_data *data = cursor->data; 842 struct bio *bio; 843 844 BUG_ON(data->type != CEPH_MSG_DATA_BIO); 845 846 bio = data->bio; 847 BUG_ON(!bio); 848 849 cursor->resid = min(length, data->bio_length); 850 cursor->bio = bio; 851 cursor->bvec_iter = bio->bi_iter; 852 cursor->last_piece = 853 cursor->resid <= bio_iter_len(bio, cursor->bvec_iter); 854 } 855 856 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor, 857 size_t *page_offset, 858 size_t *length) 859 { 860 struct ceph_msg_data *data = cursor->data; 861 struct bio *bio; 862 struct bio_vec bio_vec; 863 864 BUG_ON(data->type != CEPH_MSG_DATA_BIO); 865 866 bio = cursor->bio; 867 BUG_ON(!bio); 868 869 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); 870 871 *page_offset = (size_t) bio_vec.bv_offset; 872 BUG_ON(*page_offset >= PAGE_SIZE); 873 if (cursor->last_piece) /* pagelist offset is always 0 */ 874 *length = cursor->resid; 875 else 876 *length = (size_t) bio_vec.bv_len; 877 BUG_ON(*length > cursor->resid); 878 BUG_ON(*page_offset + *length > PAGE_SIZE); 879 880 return bio_vec.bv_page; 881 } 882 883 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, 884 size_t bytes) 885 { 886 struct bio *bio; 887 struct bio_vec bio_vec; 888 889 BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO); 890 891 bio = cursor->bio; 892 BUG_ON(!bio); 893 894 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter); 895 896 /* Advance the cursor offset */ 897 898 BUG_ON(cursor->resid < bytes); 899 cursor->resid -= bytes; 900 901 bio_advance_iter(bio, &cursor->bvec_iter, bytes); 902 903 if (bytes < bio_vec.bv_len) 904 return false; /* more bytes to process in this segment */ 905 906 /* Move on to the next segment, and possibly the next bio */ 907 908 if (!cursor->bvec_iter.bi_size) { 909 bio = bio->bi_next; 910 cursor->bio = bio; 911 if (bio) 912 cursor->bvec_iter = bio->bi_iter; 913 else 914 memset(&cursor->bvec_iter, 0, 915 sizeof(cursor->bvec_iter)); 916 } 917 918 if (!cursor->last_piece) { 919 BUG_ON(!cursor->resid); 920 BUG_ON(!bio); 921 /* A short read is OK, so use <= rather than == */ 922 if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter)) 923 cursor->last_piece = true; 924 } 925 926 return true; 927 } 928 #endif /* CONFIG_BLOCK */ 929 930 /* 931 * For a page array, a piece comes from the first page in the array 932 * that has not already been fully consumed. 933 */ 934 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor, 935 size_t length) 936 { 937 struct ceph_msg_data *data = cursor->data; 938 int page_count; 939 940 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 941 942 BUG_ON(!data->pages); 943 BUG_ON(!data->length); 944 945 cursor->resid = min(length, data->length); 946 page_count = calc_pages_for(data->alignment, (u64)data->length); 947 cursor->page_offset = data->alignment & ~PAGE_MASK; 948 cursor->page_index = 0; 949 BUG_ON(page_count > (int)USHRT_MAX); 950 cursor->page_count = (unsigned short)page_count; 951 BUG_ON(length > SIZE_MAX - cursor->page_offset); 952 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE; 953 } 954 955 static struct page * 956 ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor, 957 size_t *page_offset, size_t *length) 958 { 959 struct ceph_msg_data *data = cursor->data; 960 961 BUG_ON(data->type != CEPH_MSG_DATA_PAGES); 962 963 BUG_ON(cursor->page_index >= cursor->page_count); 964 BUG_ON(cursor->page_offset >= PAGE_SIZE); 965 966 *page_offset = cursor->page_offset; 967 if (cursor->last_piece) 968 *length = cursor->resid; 969 else 970 *length = PAGE_SIZE - *page_offset; 971 972 return data->pages[cursor->page_index]; 973 } 974 975 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor, 976 size_t bytes) 977 { 978 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES); 979 980 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE); 981 982 /* Advance the cursor page offset */ 983 984 cursor->resid -= bytes; 985 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK; 986 if (!bytes || cursor->page_offset) 987 return false; /* more bytes to process in the current page */ 988 989 if (!cursor->resid) 990 return false; /* no more data */ 991 992 /* Move on to the next page; offset is already at 0 */ 993 994 BUG_ON(cursor->page_index >= cursor->page_count); 995 cursor->page_index++; 996 cursor->last_piece = cursor->resid <= PAGE_SIZE; 997 998 return true; 999 } 1000 1001 /* 1002 * For a pagelist, a piece is whatever remains to be consumed in the 1003 * first page in the list, or the front of the next page. 1004 */ 1005 static void 1006 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor, 1007 size_t length) 1008 { 1009 struct ceph_msg_data *data = cursor->data; 1010 struct ceph_pagelist *pagelist; 1011 struct page *page; 1012 1013 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1014 1015 pagelist = data->pagelist; 1016 BUG_ON(!pagelist); 1017 1018 if (!length) 1019 return; /* pagelist can be assigned but empty */ 1020 1021 BUG_ON(list_empty(&pagelist->head)); 1022 page = list_first_entry(&pagelist->head, struct page, lru); 1023 1024 cursor->resid = min(length, pagelist->length); 1025 cursor->page = page; 1026 cursor->offset = 0; 1027 cursor->last_piece = cursor->resid <= PAGE_SIZE; 1028 } 1029 1030 static struct page * 1031 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor, 1032 size_t *page_offset, size_t *length) 1033 { 1034 struct ceph_msg_data *data = cursor->data; 1035 struct ceph_pagelist *pagelist; 1036 1037 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1038 1039 pagelist = data->pagelist; 1040 BUG_ON(!pagelist); 1041 1042 BUG_ON(!cursor->page); 1043 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 1044 1045 /* offset of first page in pagelist is always 0 */ 1046 *page_offset = cursor->offset & ~PAGE_MASK; 1047 if (cursor->last_piece) 1048 *length = cursor->resid; 1049 else 1050 *length = PAGE_SIZE - *page_offset; 1051 1052 return cursor->page; 1053 } 1054 1055 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor, 1056 size_t bytes) 1057 { 1058 struct ceph_msg_data *data = cursor->data; 1059 struct ceph_pagelist *pagelist; 1060 1061 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); 1062 1063 pagelist = data->pagelist; 1064 BUG_ON(!pagelist); 1065 1066 BUG_ON(cursor->offset + cursor->resid != pagelist->length); 1067 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE); 1068 1069 /* Advance the cursor offset */ 1070 1071 cursor->resid -= bytes; 1072 cursor->offset += bytes; 1073 /* offset of first page in pagelist is always 0 */ 1074 if (!bytes || cursor->offset & ~PAGE_MASK) 1075 return false; /* more bytes to process in the current page */ 1076 1077 if (!cursor->resid) 1078 return false; /* no more data */ 1079 1080 /* Move on to the next page */ 1081 1082 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head)); 1083 cursor->page = list_next_entry(cursor->page, lru); 1084 cursor->last_piece = cursor->resid <= PAGE_SIZE; 1085 1086 return true; 1087 } 1088 1089 /* 1090 * Message data is handled (sent or received) in pieces, where each 1091 * piece resides on a single page. The network layer might not 1092 * consume an entire piece at once. A data item's cursor keeps 1093 * track of which piece is next to process and how much remains to 1094 * be processed in that piece. It also tracks whether the current 1095 * piece is the last one in the data item. 1096 */ 1097 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor) 1098 { 1099 size_t length = cursor->total_resid; 1100 1101 switch (cursor->data->type) { 1102 case CEPH_MSG_DATA_PAGELIST: 1103 ceph_msg_data_pagelist_cursor_init(cursor, length); 1104 break; 1105 case CEPH_MSG_DATA_PAGES: 1106 ceph_msg_data_pages_cursor_init(cursor, length); 1107 break; 1108 #ifdef CONFIG_BLOCK 1109 case CEPH_MSG_DATA_BIO: 1110 ceph_msg_data_bio_cursor_init(cursor, length); 1111 break; 1112 #endif /* CONFIG_BLOCK */ 1113 case CEPH_MSG_DATA_NONE: 1114 default: 1115 /* BUG(); */ 1116 break; 1117 } 1118 cursor->need_crc = true; 1119 } 1120 1121 static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) 1122 { 1123 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1124 struct ceph_msg_data *data; 1125 1126 BUG_ON(!length); 1127 BUG_ON(length > msg->data_length); 1128 BUG_ON(list_empty(&msg->data)); 1129 1130 cursor->data_head = &msg->data; 1131 cursor->total_resid = length; 1132 data = list_first_entry(&msg->data, struct ceph_msg_data, links); 1133 cursor->data = data; 1134 1135 __ceph_msg_data_cursor_init(cursor); 1136 } 1137 1138 /* 1139 * Return the page containing the next piece to process for a given 1140 * data item, and supply the page offset and length of that piece. 1141 * Indicate whether this is the last piece in this data item. 1142 */ 1143 static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, 1144 size_t *page_offset, size_t *length, 1145 bool *last_piece) 1146 { 1147 struct page *page; 1148 1149 switch (cursor->data->type) { 1150 case CEPH_MSG_DATA_PAGELIST: 1151 page = ceph_msg_data_pagelist_next(cursor, page_offset, length); 1152 break; 1153 case CEPH_MSG_DATA_PAGES: 1154 page = ceph_msg_data_pages_next(cursor, page_offset, length); 1155 break; 1156 #ifdef CONFIG_BLOCK 1157 case CEPH_MSG_DATA_BIO: 1158 page = ceph_msg_data_bio_next(cursor, page_offset, length); 1159 break; 1160 #endif /* CONFIG_BLOCK */ 1161 case CEPH_MSG_DATA_NONE: 1162 default: 1163 page = NULL; 1164 break; 1165 } 1166 BUG_ON(!page); 1167 BUG_ON(*page_offset + *length > PAGE_SIZE); 1168 BUG_ON(!*length); 1169 if (last_piece) 1170 *last_piece = cursor->last_piece; 1171 1172 return page; 1173 } 1174 1175 /* 1176 * Returns true if the result moves the cursor on to the next piece 1177 * of the data item. 1178 */ 1179 static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, 1180 size_t bytes) 1181 { 1182 bool new_piece; 1183 1184 BUG_ON(bytes > cursor->resid); 1185 switch (cursor->data->type) { 1186 case CEPH_MSG_DATA_PAGELIST: 1187 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes); 1188 break; 1189 case CEPH_MSG_DATA_PAGES: 1190 new_piece = ceph_msg_data_pages_advance(cursor, bytes); 1191 break; 1192 #ifdef CONFIG_BLOCK 1193 case CEPH_MSG_DATA_BIO: 1194 new_piece = ceph_msg_data_bio_advance(cursor, bytes); 1195 break; 1196 #endif /* CONFIG_BLOCK */ 1197 case CEPH_MSG_DATA_NONE: 1198 default: 1199 BUG(); 1200 break; 1201 } 1202 cursor->total_resid -= bytes; 1203 1204 if (!cursor->resid && cursor->total_resid) { 1205 WARN_ON(!cursor->last_piece); 1206 BUG_ON(list_is_last(&cursor->data->links, cursor->data_head)); 1207 cursor->data = list_next_entry(cursor->data, links); 1208 __ceph_msg_data_cursor_init(cursor); 1209 new_piece = true; 1210 } 1211 cursor->need_crc = new_piece; 1212 } 1213 1214 static size_t sizeof_footer(struct ceph_connection *con) 1215 { 1216 return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ? 1217 sizeof(struct ceph_msg_footer) : 1218 sizeof(struct ceph_msg_footer_old); 1219 } 1220 1221 static void prepare_message_data(struct ceph_msg *msg, u32 data_len) 1222 { 1223 BUG_ON(!msg); 1224 BUG_ON(!data_len); 1225 1226 /* Initialize data cursor */ 1227 1228 ceph_msg_data_cursor_init(msg, (size_t)data_len); 1229 } 1230 1231 /* 1232 * Prepare footer for currently outgoing message, and finish things 1233 * off. Assumes out_kvec* are already valid.. we just add on to the end. 1234 */ 1235 static void prepare_write_message_footer(struct ceph_connection *con) 1236 { 1237 struct ceph_msg *m = con->out_msg; 1238 1239 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE; 1240 1241 dout("prepare_write_message_footer %p\n", con); 1242 con_out_kvec_add(con, sizeof_footer(con), &m->footer); 1243 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) { 1244 if (con->ops->sign_message) 1245 con->ops->sign_message(m); 1246 else 1247 m->footer.sig = 0; 1248 } else { 1249 m->old_footer.flags = m->footer.flags; 1250 } 1251 con->out_more = m->more_to_follow; 1252 con->out_msg_done = true; 1253 } 1254 1255 /* 1256 * Prepare headers for the next outgoing message. 1257 */ 1258 static void prepare_write_message(struct ceph_connection *con) 1259 { 1260 struct ceph_msg *m; 1261 u32 crc; 1262 1263 con_out_kvec_reset(con); 1264 con->out_msg_done = false; 1265 1266 /* Sneak an ack in there first? If we can get it into the same 1267 * TCP packet that's a good thing. */ 1268 if (con->in_seq > con->in_seq_acked) { 1269 con->in_seq_acked = con->in_seq; 1270 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1271 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1272 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1273 &con->out_temp_ack); 1274 } 1275 1276 BUG_ON(list_empty(&con->out_queue)); 1277 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); 1278 con->out_msg = m; 1279 BUG_ON(m->con != con); 1280 1281 /* put message on sent list */ 1282 ceph_msg_get(m); 1283 list_move_tail(&m->list_head, &con->out_sent); 1284 1285 /* 1286 * only assign outgoing seq # if we haven't sent this message 1287 * yet. if it is requeued, resend with it's original seq. 1288 */ 1289 if (m->needs_out_seq) { 1290 m->hdr.seq = cpu_to_le64(++con->out_seq); 1291 m->needs_out_seq = false; 1292 1293 if (con->ops->reencode_message) 1294 con->ops->reencode_message(m); 1295 } 1296 1297 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n", 1298 m, con->out_seq, le16_to_cpu(m->hdr.type), 1299 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), 1300 m->data_length); 1301 WARN_ON(m->front.iov_len != le32_to_cpu(m->hdr.front_len)); 1302 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len)); 1303 1304 /* tag + hdr + front + middle */ 1305 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); 1306 con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr); 1307 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); 1308 1309 if (m->middle) 1310 con_out_kvec_add(con, m->middle->vec.iov_len, 1311 m->middle->vec.iov_base); 1312 1313 /* fill in hdr crc and finalize hdr */ 1314 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc)); 1315 con->out_msg->hdr.crc = cpu_to_le32(crc); 1316 memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr)); 1317 1318 /* fill in front and middle crc, footer */ 1319 crc = crc32c(0, m->front.iov_base, m->front.iov_len); 1320 con->out_msg->footer.front_crc = cpu_to_le32(crc); 1321 if (m->middle) { 1322 crc = crc32c(0, m->middle->vec.iov_base, 1323 m->middle->vec.iov_len); 1324 con->out_msg->footer.middle_crc = cpu_to_le32(crc); 1325 } else 1326 con->out_msg->footer.middle_crc = 0; 1327 dout("%s front_crc %u middle_crc %u\n", __func__, 1328 le32_to_cpu(con->out_msg->footer.front_crc), 1329 le32_to_cpu(con->out_msg->footer.middle_crc)); 1330 con->out_msg->footer.flags = 0; 1331 1332 /* is there a data payload? */ 1333 con->out_msg->footer.data_crc = 0; 1334 if (m->data_length) { 1335 prepare_message_data(con->out_msg, m->data_length); 1336 con->out_more = 1; /* data + footer will follow */ 1337 } else { 1338 /* no, queue up footer too and be done */ 1339 prepare_write_message_footer(con); 1340 } 1341 1342 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1343 } 1344 1345 /* 1346 * Prepare an ack. 1347 */ 1348 static void prepare_write_ack(struct ceph_connection *con) 1349 { 1350 dout("prepare_write_ack %p %llu -> %llu\n", con, 1351 con->in_seq_acked, con->in_seq); 1352 con->in_seq_acked = con->in_seq; 1353 1354 con_out_kvec_reset(con); 1355 1356 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 1357 1358 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1359 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1360 &con->out_temp_ack); 1361 1362 con->out_more = 1; /* more will follow.. eventually.. */ 1363 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1364 } 1365 1366 /* 1367 * Prepare to share the seq during handshake 1368 */ 1369 static void prepare_write_seq(struct ceph_connection *con) 1370 { 1371 dout("prepare_write_seq %p %llu -> %llu\n", con, 1372 con->in_seq_acked, con->in_seq); 1373 con->in_seq_acked = con->in_seq; 1374 1375 con_out_kvec_reset(con); 1376 1377 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 1378 con_out_kvec_add(con, sizeof (con->out_temp_ack), 1379 &con->out_temp_ack); 1380 1381 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1382 } 1383 1384 /* 1385 * Prepare to write keepalive byte. 1386 */ 1387 static void prepare_write_keepalive(struct ceph_connection *con) 1388 { 1389 dout("prepare_write_keepalive %p\n", con); 1390 con_out_kvec_reset(con); 1391 if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) { 1392 struct timespec now; 1393 1394 ktime_get_real_ts(&now); 1395 con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2); 1396 ceph_encode_timespec(&con->out_temp_keepalive2, &now); 1397 con_out_kvec_add(con, sizeof(con->out_temp_keepalive2), 1398 &con->out_temp_keepalive2); 1399 } else { 1400 con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive); 1401 } 1402 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1403 } 1404 1405 /* 1406 * Connection negotiation. 1407 */ 1408 1409 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con, 1410 int *auth_proto) 1411 { 1412 struct ceph_auth_handshake *auth; 1413 1414 if (!con->ops->get_authorizer) { 1415 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN; 1416 con->out_connect.authorizer_len = 0; 1417 return NULL; 1418 } 1419 1420 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry); 1421 if (IS_ERR(auth)) 1422 return auth; 1423 1424 con->auth_reply_buf = auth->authorizer_reply_buf; 1425 con->auth_reply_buf_len = auth->authorizer_reply_buf_len; 1426 return auth; 1427 } 1428 1429 /* 1430 * We connected to a peer and are saying hello. 1431 */ 1432 static void prepare_write_banner(struct ceph_connection *con) 1433 { 1434 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER); 1435 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr), 1436 &con->msgr->my_enc_addr); 1437 1438 con->out_more = 0; 1439 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1440 } 1441 1442 static int prepare_write_connect(struct ceph_connection *con) 1443 { 1444 unsigned int global_seq = get_global_seq(con->msgr, 0); 1445 int proto; 1446 int auth_proto; 1447 struct ceph_auth_handshake *auth; 1448 1449 switch (con->peer_name.type) { 1450 case CEPH_ENTITY_TYPE_MON: 1451 proto = CEPH_MONC_PROTOCOL; 1452 break; 1453 case CEPH_ENTITY_TYPE_OSD: 1454 proto = CEPH_OSDC_PROTOCOL; 1455 break; 1456 case CEPH_ENTITY_TYPE_MDS: 1457 proto = CEPH_MDSC_PROTOCOL; 1458 break; 1459 default: 1460 BUG(); 1461 } 1462 1463 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, 1464 con->connect_seq, global_seq, proto); 1465 1466 con->out_connect.features = 1467 cpu_to_le64(from_msgr(con->msgr)->supported_features); 1468 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); 1469 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); 1470 con->out_connect.global_seq = cpu_to_le32(global_seq); 1471 con->out_connect.protocol_version = cpu_to_le32(proto); 1472 con->out_connect.flags = 0; 1473 1474 auth_proto = CEPH_AUTH_UNKNOWN; 1475 auth = get_connect_authorizer(con, &auth_proto); 1476 if (IS_ERR(auth)) 1477 return PTR_ERR(auth); 1478 1479 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto); 1480 con->out_connect.authorizer_len = auth ? 1481 cpu_to_le32(auth->authorizer_buf_len) : 0; 1482 1483 con_out_kvec_add(con, sizeof (con->out_connect), 1484 &con->out_connect); 1485 if (auth && auth->authorizer_buf_len) 1486 con_out_kvec_add(con, auth->authorizer_buf_len, 1487 auth->authorizer_buf); 1488 1489 con->out_more = 0; 1490 con_flag_set(con, CON_FLAG_WRITE_PENDING); 1491 1492 return 0; 1493 } 1494 1495 /* 1496 * write as much of pending kvecs to the socket as we can. 1497 * 1 -> done 1498 * 0 -> socket full, but more to do 1499 * <0 -> error 1500 */ 1501 static int write_partial_kvec(struct ceph_connection *con) 1502 { 1503 int ret; 1504 1505 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes); 1506 while (con->out_kvec_bytes > 0) { 1507 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur, 1508 con->out_kvec_left, con->out_kvec_bytes, 1509 con->out_more); 1510 if (ret <= 0) 1511 goto out; 1512 con->out_kvec_bytes -= ret; 1513 if (con->out_kvec_bytes == 0) 1514 break; /* done */ 1515 1516 /* account for full iov entries consumed */ 1517 while (ret >= con->out_kvec_cur->iov_len) { 1518 BUG_ON(!con->out_kvec_left); 1519 ret -= con->out_kvec_cur->iov_len; 1520 con->out_kvec_cur++; 1521 con->out_kvec_left--; 1522 } 1523 /* and for a partially-consumed entry */ 1524 if (ret) { 1525 con->out_kvec_cur->iov_len -= ret; 1526 con->out_kvec_cur->iov_base += ret; 1527 } 1528 } 1529 con->out_kvec_left = 0; 1530 ret = 1; 1531 out: 1532 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con, 1533 con->out_kvec_bytes, con->out_kvec_left, ret); 1534 return ret; /* done! */ 1535 } 1536 1537 static u32 ceph_crc32c_page(u32 crc, struct page *page, 1538 unsigned int page_offset, 1539 unsigned int length) 1540 { 1541 char *kaddr; 1542 1543 kaddr = kmap(page); 1544 BUG_ON(kaddr == NULL); 1545 crc = crc32c(crc, kaddr + page_offset, length); 1546 kunmap(page); 1547 1548 return crc; 1549 } 1550 /* 1551 * Write as much message data payload as we can. If we finish, queue 1552 * up the footer. 1553 * 1 -> done, footer is now queued in out_kvec[]. 1554 * 0 -> socket full, but more to do 1555 * <0 -> error 1556 */ 1557 static int write_partial_message_data(struct ceph_connection *con) 1558 { 1559 struct ceph_msg *msg = con->out_msg; 1560 struct ceph_msg_data_cursor *cursor = &msg->cursor; 1561 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); 1562 u32 crc; 1563 1564 dout("%s %p msg %p\n", __func__, con, msg); 1565 1566 if (list_empty(&msg->data)) 1567 return -EINVAL; 1568 1569 /* 1570 * Iterate through each page that contains data to be 1571 * written, and send as much as possible for each. 1572 * 1573 * If we are calculating the data crc (the default), we will 1574 * need to map the page. If we have no pages, they have 1575 * been revoked, so use the zero page. 1576 */ 1577 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0; 1578 while (cursor->resid) { 1579 struct page *page; 1580 size_t page_offset; 1581 size_t length; 1582 bool last_piece; 1583 int ret; 1584 1585 page = ceph_msg_data_next(cursor, &page_offset, &length, 1586 &last_piece); 1587 ret = ceph_tcp_sendpage(con->sock, page, page_offset, 1588 length, !last_piece); 1589 if (ret <= 0) { 1590 if (do_datacrc) 1591 msg->footer.data_crc = cpu_to_le32(crc); 1592 1593 return ret; 1594 } 1595 if (do_datacrc && cursor->need_crc) 1596 crc = ceph_crc32c_page(crc, page, page_offset, length); 1597 ceph_msg_data_advance(cursor, (size_t)ret); 1598 } 1599 1600 dout("%s %p msg %p done\n", __func__, con, msg); 1601 1602 /* prepare and queue up footer, too */ 1603 if (do_datacrc) 1604 msg->footer.data_crc = cpu_to_le32(crc); 1605 else 1606 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; 1607 con_out_kvec_reset(con); 1608 prepare_write_message_footer(con); 1609 1610 return 1; /* must return > 0 to indicate success */ 1611 } 1612 1613 /* 1614 * write some zeros 1615 */ 1616 static int write_partial_skip(struct ceph_connection *con) 1617 { 1618 int ret; 1619 1620 dout("%s %p %d left\n", __func__, con, con->out_skip); 1621 while (con->out_skip > 0) { 1622 size_t size = min(con->out_skip, (int) PAGE_SIZE); 1623 1624 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true); 1625 if (ret <= 0) 1626 goto out; 1627 con->out_skip -= ret; 1628 } 1629 ret = 1; 1630 out: 1631 return ret; 1632 } 1633 1634 /* 1635 * Prepare to read connection handshake, or an ack. 1636 */ 1637 static void prepare_read_banner(struct ceph_connection *con) 1638 { 1639 dout("prepare_read_banner %p\n", con); 1640 con->in_base_pos = 0; 1641 } 1642 1643 static void prepare_read_connect(struct ceph_connection *con) 1644 { 1645 dout("prepare_read_connect %p\n", con); 1646 con->in_base_pos = 0; 1647 } 1648 1649 static void prepare_read_ack(struct ceph_connection *con) 1650 { 1651 dout("prepare_read_ack %p\n", con); 1652 con->in_base_pos = 0; 1653 } 1654 1655 static void prepare_read_seq(struct ceph_connection *con) 1656 { 1657 dout("prepare_read_seq %p\n", con); 1658 con->in_base_pos = 0; 1659 con->in_tag = CEPH_MSGR_TAG_SEQ; 1660 } 1661 1662 static void prepare_read_tag(struct ceph_connection *con) 1663 { 1664 dout("prepare_read_tag %p\n", con); 1665 con->in_base_pos = 0; 1666 con->in_tag = CEPH_MSGR_TAG_READY; 1667 } 1668 1669 static void prepare_read_keepalive_ack(struct ceph_connection *con) 1670 { 1671 dout("prepare_read_keepalive_ack %p\n", con); 1672 con->in_base_pos = 0; 1673 } 1674 1675 /* 1676 * Prepare to read a message. 1677 */ 1678 static int prepare_read_message(struct ceph_connection *con) 1679 { 1680 dout("prepare_read_message %p\n", con); 1681 BUG_ON(con->in_msg != NULL); 1682 con->in_base_pos = 0; 1683 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0; 1684 return 0; 1685 } 1686 1687 1688 static int read_partial(struct ceph_connection *con, 1689 int end, int size, void *object) 1690 { 1691 while (con->in_base_pos < end) { 1692 int left = end - con->in_base_pos; 1693 int have = size - left; 1694 int ret = ceph_tcp_recvmsg(con->sock, object + have, left); 1695 if (ret <= 0) 1696 return ret; 1697 con->in_base_pos += ret; 1698 } 1699 return 1; 1700 } 1701 1702 1703 /* 1704 * Read all or part of the connect-side handshake on a new connection 1705 */ 1706 static int read_partial_banner(struct ceph_connection *con) 1707 { 1708 int size; 1709 int end; 1710 int ret; 1711 1712 dout("read_partial_banner %p at %d\n", con, con->in_base_pos); 1713 1714 /* peer's banner */ 1715 size = strlen(CEPH_BANNER); 1716 end = size; 1717 ret = read_partial(con, end, size, con->in_banner); 1718 if (ret <= 0) 1719 goto out; 1720 1721 size = sizeof (con->actual_peer_addr); 1722 end += size; 1723 ret = read_partial(con, end, size, &con->actual_peer_addr); 1724 if (ret <= 0) 1725 goto out; 1726 1727 size = sizeof (con->peer_addr_for_me); 1728 end += size; 1729 ret = read_partial(con, end, size, &con->peer_addr_for_me); 1730 if (ret <= 0) 1731 goto out; 1732 1733 out: 1734 return ret; 1735 } 1736 1737 static int read_partial_connect(struct ceph_connection *con) 1738 { 1739 int size; 1740 int end; 1741 int ret; 1742 1743 dout("read_partial_connect %p at %d\n", con, con->in_base_pos); 1744 1745 size = sizeof (con->in_reply); 1746 end = size; 1747 ret = read_partial(con, end, size, &con->in_reply); 1748 if (ret <= 0) 1749 goto out; 1750 1751 size = le32_to_cpu(con->in_reply.authorizer_len); 1752 end += size; 1753 ret = read_partial(con, end, size, con->auth_reply_buf); 1754 if (ret <= 0) 1755 goto out; 1756 1757 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n", 1758 con, (int)con->in_reply.tag, 1759 le32_to_cpu(con->in_reply.connect_seq), 1760 le32_to_cpu(con->in_reply.global_seq)); 1761 out: 1762 return ret; 1763 1764 } 1765 1766 /* 1767 * Verify the hello banner looks okay. 1768 */ 1769 static int verify_hello(struct ceph_connection *con) 1770 { 1771 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { 1772 pr_err("connect to %s got bad banner\n", 1773 ceph_pr_addr(&con->peer_addr.in_addr)); 1774 con->error_msg = "protocol error, bad banner"; 1775 return -1; 1776 } 1777 return 0; 1778 } 1779 1780 static bool addr_is_blank(struct sockaddr_storage *ss) 1781 { 1782 struct in_addr *addr = &((struct sockaddr_in *)ss)->sin_addr; 1783 struct in6_addr *addr6 = &((struct sockaddr_in6 *)ss)->sin6_addr; 1784 1785 switch (ss->ss_family) { 1786 case AF_INET: 1787 return addr->s_addr == htonl(INADDR_ANY); 1788 case AF_INET6: 1789 return ipv6_addr_any(addr6); 1790 default: 1791 return true; 1792 } 1793 } 1794 1795 static int addr_port(struct sockaddr_storage *ss) 1796 { 1797 switch (ss->ss_family) { 1798 case AF_INET: 1799 return ntohs(((struct sockaddr_in *)ss)->sin_port); 1800 case AF_INET6: 1801 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port); 1802 } 1803 return 0; 1804 } 1805 1806 static void addr_set_port(struct sockaddr_storage *ss, int p) 1807 { 1808 switch (ss->ss_family) { 1809 case AF_INET: 1810 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1811 break; 1812 case AF_INET6: 1813 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1814 break; 1815 } 1816 } 1817 1818 /* 1819 * Unlike other *_pton function semantics, zero indicates success. 1820 */ 1821 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss, 1822 char delim, const char **ipend) 1823 { 1824 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 1825 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 1826 1827 memset(ss, 0, sizeof(*ss)); 1828 1829 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) { 1830 ss->ss_family = AF_INET; 1831 return 0; 1832 } 1833 1834 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) { 1835 ss->ss_family = AF_INET6; 1836 return 0; 1837 } 1838 1839 return -EINVAL; 1840 } 1841 1842 /* 1843 * Extract hostname string and resolve using kernel DNS facility. 1844 */ 1845 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER 1846 static int ceph_dns_resolve_name(const char *name, size_t namelen, 1847 struct sockaddr_storage *ss, char delim, const char **ipend) 1848 { 1849 const char *end, *delim_p; 1850 char *colon_p, *ip_addr = NULL; 1851 int ip_len, ret; 1852 1853 /* 1854 * The end of the hostname occurs immediately preceding the delimiter or 1855 * the port marker (':') where the delimiter takes precedence. 1856 */ 1857 delim_p = memchr(name, delim, namelen); 1858 colon_p = memchr(name, ':', namelen); 1859 1860 if (delim_p && colon_p) 1861 end = delim_p < colon_p ? delim_p : colon_p; 1862 else if (!delim_p && colon_p) 1863 end = colon_p; 1864 else { 1865 end = delim_p; 1866 if (!end) /* case: hostname:/ */ 1867 end = name + namelen; 1868 } 1869 1870 if (end <= name) 1871 return -EINVAL; 1872 1873 /* do dns_resolve upcall */ 1874 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL); 1875 if (ip_len > 0) 1876 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL); 1877 else 1878 ret = -ESRCH; 1879 1880 kfree(ip_addr); 1881 1882 *ipend = end; 1883 1884 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name, 1885 ret, ret ? "failed" : ceph_pr_addr(ss)); 1886 1887 return ret; 1888 } 1889 #else 1890 static inline int ceph_dns_resolve_name(const char *name, size_t namelen, 1891 struct sockaddr_storage *ss, char delim, const char **ipend) 1892 { 1893 return -EINVAL; 1894 } 1895 #endif 1896 1897 /* 1898 * Parse a server name (IP or hostname). If a valid IP address is not found 1899 * then try to extract a hostname to resolve using userspace DNS upcall. 1900 */ 1901 static int ceph_parse_server_name(const char *name, size_t namelen, 1902 struct sockaddr_storage *ss, char delim, const char **ipend) 1903 { 1904 int ret; 1905 1906 ret = ceph_pton(name, namelen, ss, delim, ipend); 1907 if (ret) 1908 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend); 1909 1910 return ret; 1911 } 1912 1913 /* 1914 * Parse an ip[:port] list into an addr array. Use the default 1915 * monitor port if a port isn't specified. 1916 */ 1917 int ceph_parse_ips(const char *c, const char *end, 1918 struct ceph_entity_addr *addr, 1919 int max_count, int *count) 1920 { 1921 int i, ret = -EINVAL; 1922 const char *p = c; 1923 1924 dout("parse_ips on '%.*s'\n", (int)(end-c), c); 1925 for (i = 0; i < max_count; i++) { 1926 const char *ipend; 1927 struct sockaddr_storage *ss = &addr[i].in_addr; 1928 int port; 1929 char delim = ','; 1930 1931 if (*p == '[') { 1932 delim = ']'; 1933 p++; 1934 } 1935 1936 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend); 1937 if (ret) 1938 goto bad; 1939 ret = -EINVAL; 1940 1941 p = ipend; 1942 1943 if (delim == ']') { 1944 if (*p != ']') { 1945 dout("missing matching ']'\n"); 1946 goto bad; 1947 } 1948 p++; 1949 } 1950 1951 /* port? */ 1952 if (p < end && *p == ':') { 1953 port = 0; 1954 p++; 1955 while (p < end && *p >= '0' && *p <= '9') { 1956 port = (port * 10) + (*p - '0'); 1957 p++; 1958 } 1959 if (port == 0) 1960 port = CEPH_MON_PORT; 1961 else if (port > 65535) 1962 goto bad; 1963 } else { 1964 port = CEPH_MON_PORT; 1965 } 1966 1967 addr_set_port(ss, port); 1968 1969 dout("parse_ips got %s\n", ceph_pr_addr(ss)); 1970 1971 if (p == end) 1972 break; 1973 if (*p != ',') 1974 goto bad; 1975 p++; 1976 } 1977 1978 if (p != end) 1979 goto bad; 1980 1981 if (count) 1982 *count = i + 1; 1983 return 0; 1984 1985 bad: 1986 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 1987 return ret; 1988 } 1989 EXPORT_SYMBOL(ceph_parse_ips); 1990 1991 static int process_banner(struct ceph_connection *con) 1992 { 1993 dout("process_banner on %p\n", con); 1994 1995 if (verify_hello(con) < 0) 1996 return -1; 1997 1998 ceph_decode_addr(&con->actual_peer_addr); 1999 ceph_decode_addr(&con->peer_addr_for_me); 2000 2001 /* 2002 * Make sure the other end is who we wanted. note that the other 2003 * end may not yet know their ip address, so if it's 0.0.0.0, give 2004 * them the benefit of the doubt. 2005 */ 2006 if (memcmp(&con->peer_addr, &con->actual_peer_addr, 2007 sizeof(con->peer_addr)) != 0 && 2008 !(addr_is_blank(&con->actual_peer_addr.in_addr) && 2009 con->actual_peer_addr.nonce == con->peer_addr.nonce)) { 2010 pr_warn("wrong peer, want %s/%d, got %s/%d\n", 2011 ceph_pr_addr(&con->peer_addr.in_addr), 2012 (int)le32_to_cpu(con->peer_addr.nonce), 2013 ceph_pr_addr(&con->actual_peer_addr.in_addr), 2014 (int)le32_to_cpu(con->actual_peer_addr.nonce)); 2015 con->error_msg = "wrong peer at address"; 2016 return -1; 2017 } 2018 2019 /* 2020 * did we learn our address? 2021 */ 2022 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) { 2023 int port = addr_port(&con->msgr->inst.addr.in_addr); 2024 2025 memcpy(&con->msgr->inst.addr.in_addr, 2026 &con->peer_addr_for_me.in_addr, 2027 sizeof(con->peer_addr_for_me.in_addr)); 2028 addr_set_port(&con->msgr->inst.addr.in_addr, port); 2029 encode_my_addr(con->msgr); 2030 dout("process_banner learned my addr is %s\n", 2031 ceph_pr_addr(&con->msgr->inst.addr.in_addr)); 2032 } 2033 2034 return 0; 2035 } 2036 2037 static int process_connect(struct ceph_connection *con) 2038 { 2039 u64 sup_feat = from_msgr(con->msgr)->supported_features; 2040 u64 req_feat = from_msgr(con->msgr)->required_features; 2041 u64 server_feat = le64_to_cpu(con->in_reply.features); 2042 int ret; 2043 2044 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 2045 2046 if (con->auth_reply_buf) { 2047 /* 2048 * Any connection that defines ->get_authorizer() 2049 * should also define ->verify_authorizer_reply(). 2050 * See get_connect_authorizer(). 2051 */ 2052 ret = con->ops->verify_authorizer_reply(con); 2053 if (ret < 0) { 2054 con->error_msg = "bad authorize reply"; 2055 return ret; 2056 } 2057 } 2058 2059 switch (con->in_reply.tag) { 2060 case CEPH_MSGR_TAG_FEATURES: 2061 pr_err("%s%lld %s feature set mismatch," 2062 " my %llx < server's %llx, missing %llx\n", 2063 ENTITY_NAME(con->peer_name), 2064 ceph_pr_addr(&con->peer_addr.in_addr), 2065 sup_feat, server_feat, server_feat & ~sup_feat); 2066 con->error_msg = "missing required protocol features"; 2067 reset_connection(con); 2068 return -1; 2069 2070 case CEPH_MSGR_TAG_BADPROTOVER: 2071 pr_err("%s%lld %s protocol version mismatch," 2072 " my %d != server's %d\n", 2073 ENTITY_NAME(con->peer_name), 2074 ceph_pr_addr(&con->peer_addr.in_addr), 2075 le32_to_cpu(con->out_connect.protocol_version), 2076 le32_to_cpu(con->in_reply.protocol_version)); 2077 con->error_msg = "protocol version mismatch"; 2078 reset_connection(con); 2079 return -1; 2080 2081 case CEPH_MSGR_TAG_BADAUTHORIZER: 2082 con->auth_retry++; 2083 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con, 2084 con->auth_retry); 2085 if (con->auth_retry == 2) { 2086 con->error_msg = "connect authorization failure"; 2087 return -1; 2088 } 2089 con_out_kvec_reset(con); 2090 ret = prepare_write_connect(con); 2091 if (ret < 0) 2092 return ret; 2093 prepare_read_connect(con); 2094 break; 2095 2096 case CEPH_MSGR_TAG_RESETSESSION: 2097 /* 2098 * If we connected with a large connect_seq but the peer 2099 * has no record of a session with us (no connection, or 2100 * connect_seq == 0), they will send RESETSESION to indicate 2101 * that they must have reset their session, and may have 2102 * dropped messages. 2103 */ 2104 dout("process_connect got RESET peer seq %u\n", 2105 le32_to_cpu(con->in_reply.connect_seq)); 2106 pr_err("%s%lld %s connection reset\n", 2107 ENTITY_NAME(con->peer_name), 2108 ceph_pr_addr(&con->peer_addr.in_addr)); 2109 reset_connection(con); 2110 con_out_kvec_reset(con); 2111 ret = prepare_write_connect(con); 2112 if (ret < 0) 2113 return ret; 2114 prepare_read_connect(con); 2115 2116 /* Tell ceph about it. */ 2117 mutex_unlock(&con->mutex); 2118 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name)); 2119 if (con->ops->peer_reset) 2120 con->ops->peer_reset(con); 2121 mutex_lock(&con->mutex); 2122 if (con->state != CON_STATE_NEGOTIATING) 2123 return -EAGAIN; 2124 break; 2125 2126 case CEPH_MSGR_TAG_RETRY_SESSION: 2127 /* 2128 * If we sent a smaller connect_seq than the peer has, try 2129 * again with a larger value. 2130 */ 2131 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n", 2132 le32_to_cpu(con->out_connect.connect_seq), 2133 le32_to_cpu(con->in_reply.connect_seq)); 2134 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); 2135 con_out_kvec_reset(con); 2136 ret = prepare_write_connect(con); 2137 if (ret < 0) 2138 return ret; 2139 prepare_read_connect(con); 2140 break; 2141 2142 case CEPH_MSGR_TAG_RETRY_GLOBAL: 2143 /* 2144 * If we sent a smaller global_seq than the peer has, try 2145 * again with a larger value. 2146 */ 2147 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", 2148 con->peer_global_seq, 2149 le32_to_cpu(con->in_reply.global_seq)); 2150 get_global_seq(con->msgr, 2151 le32_to_cpu(con->in_reply.global_seq)); 2152 con_out_kvec_reset(con); 2153 ret = prepare_write_connect(con); 2154 if (ret < 0) 2155 return ret; 2156 prepare_read_connect(con); 2157 break; 2158 2159 case CEPH_MSGR_TAG_SEQ: 2160 case CEPH_MSGR_TAG_READY: 2161 if (req_feat & ~server_feat) { 2162 pr_err("%s%lld %s protocol feature mismatch," 2163 " my required %llx > server's %llx, need %llx\n", 2164 ENTITY_NAME(con->peer_name), 2165 ceph_pr_addr(&con->peer_addr.in_addr), 2166 req_feat, server_feat, req_feat & ~server_feat); 2167 con->error_msg = "missing required protocol features"; 2168 reset_connection(con); 2169 return -1; 2170 } 2171 2172 WARN_ON(con->state != CON_STATE_NEGOTIATING); 2173 con->state = CON_STATE_OPEN; 2174 con->auth_retry = 0; /* we authenticated; clear flag */ 2175 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 2176 con->connect_seq++; 2177 con->peer_features = server_feat; 2178 dout("process_connect got READY gseq %d cseq %d (%d)\n", 2179 con->peer_global_seq, 2180 le32_to_cpu(con->in_reply.connect_seq), 2181 con->connect_seq); 2182 WARN_ON(con->connect_seq != 2183 le32_to_cpu(con->in_reply.connect_seq)); 2184 2185 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) 2186 con_flag_set(con, CON_FLAG_LOSSYTX); 2187 2188 con->delay = 0; /* reset backoff memory */ 2189 2190 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) { 2191 prepare_write_seq(con); 2192 prepare_read_seq(con); 2193 } else { 2194 prepare_read_tag(con); 2195 } 2196 break; 2197 2198 case CEPH_MSGR_TAG_WAIT: 2199 /* 2200 * If there is a connection race (we are opening 2201 * connections to each other), one of us may just have 2202 * to WAIT. This shouldn't happen if we are the 2203 * client. 2204 */ 2205 con->error_msg = "protocol error, got WAIT as client"; 2206 return -1; 2207 2208 default: 2209 con->error_msg = "protocol error, garbage tag during connect"; 2210 return -1; 2211 } 2212 return 0; 2213 } 2214 2215 2216 /* 2217 * read (part of) an ack 2218 */ 2219 static int read_partial_ack(struct ceph_connection *con) 2220 { 2221 int size = sizeof (con->in_temp_ack); 2222 int end = size; 2223 2224 return read_partial(con, end, size, &con->in_temp_ack); 2225 } 2226 2227 /* 2228 * We can finally discard anything that's been acked. 2229 */ 2230 static void process_ack(struct ceph_connection *con) 2231 { 2232 struct ceph_msg *m; 2233 u64 ack = le64_to_cpu(con->in_temp_ack); 2234 u64 seq; 2235 bool reconnect = (con->in_tag == CEPH_MSGR_TAG_SEQ); 2236 struct list_head *list = reconnect ? &con->out_queue : &con->out_sent; 2237 2238 /* 2239 * In the reconnect case, con_fault() has requeued messages 2240 * in out_sent. We should cleanup old messages according to 2241 * the reconnect seq. 2242 */ 2243 while (!list_empty(list)) { 2244 m = list_first_entry(list, struct ceph_msg, list_head); 2245 if (reconnect && m->needs_out_seq) 2246 break; 2247 seq = le64_to_cpu(m->hdr.seq); 2248 if (seq > ack) 2249 break; 2250 dout("got ack for seq %llu type %d at %p\n", seq, 2251 le16_to_cpu(m->hdr.type), m); 2252 m->ack_stamp = jiffies; 2253 ceph_msg_remove(m); 2254 } 2255 2256 prepare_read_tag(con); 2257 } 2258 2259 2260 static int read_partial_message_section(struct ceph_connection *con, 2261 struct kvec *section, 2262 unsigned int sec_len, u32 *crc) 2263 { 2264 int ret, left; 2265 2266 BUG_ON(!section); 2267 2268 while (section->iov_len < sec_len) { 2269 BUG_ON(section->iov_base == NULL); 2270 left = sec_len - section->iov_len; 2271 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base + 2272 section->iov_len, left); 2273 if (ret <= 0) 2274 return ret; 2275 section->iov_len += ret; 2276 } 2277 if (section->iov_len == sec_len) 2278 *crc = crc32c(0, section->iov_base, section->iov_len); 2279 2280 return 1; 2281 } 2282 2283 static int read_partial_msg_data(struct ceph_connection *con) 2284 { 2285 struct ceph_msg *msg = con->in_msg; 2286 struct ceph_msg_data_cursor *cursor = &msg->cursor; 2287 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); 2288 struct page *page; 2289 size_t page_offset; 2290 size_t length; 2291 u32 crc = 0; 2292 int ret; 2293 2294 BUG_ON(!msg); 2295 if (list_empty(&msg->data)) 2296 return -EIO; 2297 2298 if (do_datacrc) 2299 crc = con->in_data_crc; 2300 while (cursor->resid) { 2301 page = ceph_msg_data_next(cursor, &page_offset, &length, NULL); 2302 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); 2303 if (ret <= 0) { 2304 if (do_datacrc) 2305 con->in_data_crc = crc; 2306 2307 return ret; 2308 } 2309 2310 if (do_datacrc) 2311 crc = ceph_crc32c_page(crc, page, page_offset, ret); 2312 ceph_msg_data_advance(cursor, (size_t)ret); 2313 } 2314 if (do_datacrc) 2315 con->in_data_crc = crc; 2316 2317 return 1; /* must return > 0 to indicate success */ 2318 } 2319 2320 /* 2321 * read (part of) a message. 2322 */ 2323 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); 2324 2325 static int read_partial_message(struct ceph_connection *con) 2326 { 2327 struct ceph_msg *m = con->in_msg; 2328 int size; 2329 int end; 2330 int ret; 2331 unsigned int front_len, middle_len, data_len; 2332 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); 2333 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH); 2334 u64 seq; 2335 u32 crc; 2336 2337 dout("read_partial_message con %p msg %p\n", con, m); 2338 2339 /* header */ 2340 size = sizeof (con->in_hdr); 2341 end = size; 2342 ret = read_partial(con, end, size, &con->in_hdr); 2343 if (ret <= 0) 2344 return ret; 2345 2346 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc)); 2347 if (cpu_to_le32(crc) != con->in_hdr.crc) { 2348 pr_err("read_partial_message bad hdr crc %u != expected %u\n", 2349 crc, con->in_hdr.crc); 2350 return -EBADMSG; 2351 } 2352 2353 front_len = le32_to_cpu(con->in_hdr.front_len); 2354 if (front_len > CEPH_MSG_MAX_FRONT_LEN) 2355 return -EIO; 2356 middle_len = le32_to_cpu(con->in_hdr.middle_len); 2357 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN) 2358 return -EIO; 2359 data_len = le32_to_cpu(con->in_hdr.data_len); 2360 if (data_len > CEPH_MSG_MAX_DATA_LEN) 2361 return -EIO; 2362 2363 /* verify seq# */ 2364 seq = le64_to_cpu(con->in_hdr.seq); 2365 if ((s64)seq - (s64)con->in_seq < 1) { 2366 pr_info("skipping %s%lld %s seq %lld expected %lld\n", 2367 ENTITY_NAME(con->peer_name), 2368 ceph_pr_addr(&con->peer_addr.in_addr), 2369 seq, con->in_seq + 1); 2370 con->in_base_pos = -front_len - middle_len - data_len - 2371 sizeof_footer(con); 2372 con->in_tag = CEPH_MSGR_TAG_READY; 2373 return 1; 2374 } else if ((s64)seq - (s64)con->in_seq > 1) { 2375 pr_err("read_partial_message bad seq %lld expected %lld\n", 2376 seq, con->in_seq + 1); 2377 con->error_msg = "bad message sequence # for incoming message"; 2378 return -EBADE; 2379 } 2380 2381 /* allocate message? */ 2382 if (!con->in_msg) { 2383 int skip = 0; 2384 2385 dout("got hdr type %d front %d data %d\n", con->in_hdr.type, 2386 front_len, data_len); 2387 ret = ceph_con_in_msg_alloc(con, &skip); 2388 if (ret < 0) 2389 return ret; 2390 2391 BUG_ON(!con->in_msg ^ skip); 2392 if (skip) { 2393 /* skip this message */ 2394 dout("alloc_msg said skip message\n"); 2395 con->in_base_pos = -front_len - middle_len - data_len - 2396 sizeof_footer(con); 2397 con->in_tag = CEPH_MSGR_TAG_READY; 2398 con->in_seq++; 2399 return 1; 2400 } 2401 2402 BUG_ON(!con->in_msg); 2403 BUG_ON(con->in_msg->con != con); 2404 m = con->in_msg; 2405 m->front.iov_len = 0; /* haven't read it yet */ 2406 if (m->middle) 2407 m->middle->vec.iov_len = 0; 2408 2409 /* prepare for data payload, if any */ 2410 2411 if (data_len) 2412 prepare_message_data(con->in_msg, data_len); 2413 } 2414 2415 /* front */ 2416 ret = read_partial_message_section(con, &m->front, front_len, 2417 &con->in_front_crc); 2418 if (ret <= 0) 2419 return ret; 2420 2421 /* middle */ 2422 if (m->middle) { 2423 ret = read_partial_message_section(con, &m->middle->vec, 2424 middle_len, 2425 &con->in_middle_crc); 2426 if (ret <= 0) 2427 return ret; 2428 } 2429 2430 /* (page) data */ 2431 if (data_len) { 2432 ret = read_partial_msg_data(con); 2433 if (ret <= 0) 2434 return ret; 2435 } 2436 2437 /* footer */ 2438 size = sizeof_footer(con); 2439 end += size; 2440 ret = read_partial(con, end, size, &m->footer); 2441 if (ret <= 0) 2442 return ret; 2443 2444 if (!need_sign) { 2445 m->footer.flags = m->old_footer.flags; 2446 m->footer.sig = 0; 2447 } 2448 2449 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", 2450 m, front_len, m->footer.front_crc, middle_len, 2451 m->footer.middle_crc, data_len, m->footer.data_crc); 2452 2453 /* crc ok? */ 2454 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) { 2455 pr_err("read_partial_message %p front crc %u != exp. %u\n", 2456 m, con->in_front_crc, m->footer.front_crc); 2457 return -EBADMSG; 2458 } 2459 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) { 2460 pr_err("read_partial_message %p middle crc %u != exp %u\n", 2461 m, con->in_middle_crc, m->footer.middle_crc); 2462 return -EBADMSG; 2463 } 2464 if (do_datacrc && 2465 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 && 2466 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) { 2467 pr_err("read_partial_message %p data crc %u != exp. %u\n", m, 2468 con->in_data_crc, le32_to_cpu(m->footer.data_crc)); 2469 return -EBADMSG; 2470 } 2471 2472 if (need_sign && con->ops->check_message_signature && 2473 con->ops->check_message_signature(m)) { 2474 pr_err("read_partial_message %p signature check failed\n", m); 2475 return -EBADMSG; 2476 } 2477 2478 return 1; /* done! */ 2479 } 2480 2481 /* 2482 * Process message. This happens in the worker thread. The callback should 2483 * be careful not to do anything that waits on other incoming messages or it 2484 * may deadlock. 2485 */ 2486 static void process_message(struct ceph_connection *con) 2487 { 2488 struct ceph_msg *msg = con->in_msg; 2489 2490 BUG_ON(con->in_msg->con != con); 2491 con->in_msg = NULL; 2492 2493 /* if first message, set peer_name */ 2494 if (con->peer_name.type == 0) 2495 con->peer_name = msg->hdr.src; 2496 2497 con->in_seq++; 2498 mutex_unlock(&con->mutex); 2499 2500 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n", 2501 msg, le64_to_cpu(msg->hdr.seq), 2502 ENTITY_NAME(msg->hdr.src), 2503 le16_to_cpu(msg->hdr.type), 2504 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2505 le32_to_cpu(msg->hdr.front_len), 2506 le32_to_cpu(msg->hdr.data_len), 2507 con->in_front_crc, con->in_middle_crc, con->in_data_crc); 2508 con->ops->dispatch(con, msg); 2509 2510 mutex_lock(&con->mutex); 2511 } 2512 2513 static int read_keepalive_ack(struct ceph_connection *con) 2514 { 2515 struct ceph_timespec ceph_ts; 2516 size_t size = sizeof(ceph_ts); 2517 int ret = read_partial(con, size, size, &ceph_ts); 2518 if (ret <= 0) 2519 return ret; 2520 ceph_decode_timespec(&con->last_keepalive_ack, &ceph_ts); 2521 prepare_read_tag(con); 2522 return 1; 2523 } 2524 2525 /* 2526 * Write something to the socket. Called in a worker thread when the 2527 * socket appears to be writeable and we have something ready to send. 2528 */ 2529 static int try_write(struct ceph_connection *con) 2530 { 2531 int ret = 1; 2532 2533 dout("try_write start %p state %lu\n", con, con->state); 2534 2535 more: 2536 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 2537 2538 /* open the socket first? */ 2539 if (con->state == CON_STATE_PREOPEN) { 2540 BUG_ON(con->sock); 2541 con->state = CON_STATE_CONNECTING; 2542 2543 con_out_kvec_reset(con); 2544 prepare_write_banner(con); 2545 prepare_read_banner(con); 2546 2547 BUG_ON(con->in_msg); 2548 con->in_tag = CEPH_MSGR_TAG_READY; 2549 dout("try_write initiating connect on %p new state %lu\n", 2550 con, con->state); 2551 ret = ceph_tcp_connect(con); 2552 if (ret < 0) { 2553 con->error_msg = "connect error"; 2554 goto out; 2555 } 2556 } 2557 2558 more_kvec: 2559 /* kvec data queued? */ 2560 if (con->out_kvec_left) { 2561 ret = write_partial_kvec(con); 2562 if (ret <= 0) 2563 goto out; 2564 } 2565 if (con->out_skip) { 2566 ret = write_partial_skip(con); 2567 if (ret <= 0) 2568 goto out; 2569 } 2570 2571 /* msg pages? */ 2572 if (con->out_msg) { 2573 if (con->out_msg_done) { 2574 ceph_msg_put(con->out_msg); 2575 con->out_msg = NULL; /* we're done with this one */ 2576 goto do_next; 2577 } 2578 2579 ret = write_partial_message_data(con); 2580 if (ret == 1) 2581 goto more_kvec; /* we need to send the footer, too! */ 2582 if (ret == 0) 2583 goto out; 2584 if (ret < 0) { 2585 dout("try_write write_partial_message_data err %d\n", 2586 ret); 2587 goto out; 2588 } 2589 } 2590 2591 do_next: 2592 if (con->state == CON_STATE_OPEN) { 2593 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) { 2594 prepare_write_keepalive(con); 2595 goto more; 2596 } 2597 /* is anything else pending? */ 2598 if (!list_empty(&con->out_queue)) { 2599 prepare_write_message(con); 2600 goto more; 2601 } 2602 if (con->in_seq > con->in_seq_acked) { 2603 prepare_write_ack(con); 2604 goto more; 2605 } 2606 } 2607 2608 /* Nothing to do! */ 2609 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 2610 dout("try_write nothing else to write.\n"); 2611 ret = 0; 2612 out: 2613 dout("try_write done on %p ret %d\n", con, ret); 2614 return ret; 2615 } 2616 2617 2618 2619 /* 2620 * Read what we can from the socket. 2621 */ 2622 static int try_read(struct ceph_connection *con) 2623 { 2624 int ret = -1; 2625 2626 more: 2627 dout("try_read start on %p state %lu\n", con, con->state); 2628 if (con->state != CON_STATE_CONNECTING && 2629 con->state != CON_STATE_NEGOTIATING && 2630 con->state != CON_STATE_OPEN) 2631 return 0; 2632 2633 BUG_ON(!con->sock); 2634 2635 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 2636 con->in_base_pos); 2637 2638 if (con->state == CON_STATE_CONNECTING) { 2639 dout("try_read connecting\n"); 2640 ret = read_partial_banner(con); 2641 if (ret <= 0) 2642 goto out; 2643 ret = process_banner(con); 2644 if (ret < 0) 2645 goto out; 2646 2647 con->state = CON_STATE_NEGOTIATING; 2648 2649 /* 2650 * Received banner is good, exchange connection info. 2651 * Do not reset out_kvec, as sending our banner raced 2652 * with receiving peer banner after connect completed. 2653 */ 2654 ret = prepare_write_connect(con); 2655 if (ret < 0) 2656 goto out; 2657 prepare_read_connect(con); 2658 2659 /* Send connection info before awaiting response */ 2660 goto out; 2661 } 2662 2663 if (con->state == CON_STATE_NEGOTIATING) { 2664 dout("try_read negotiating\n"); 2665 ret = read_partial_connect(con); 2666 if (ret <= 0) 2667 goto out; 2668 ret = process_connect(con); 2669 if (ret < 0) 2670 goto out; 2671 goto more; 2672 } 2673 2674 WARN_ON(con->state != CON_STATE_OPEN); 2675 2676 if (con->in_base_pos < 0) { 2677 /* 2678 * skipping + discarding content. 2679 * 2680 * FIXME: there must be a better way to do this! 2681 */ 2682 static char buf[SKIP_BUF_SIZE]; 2683 int skip = min((int) sizeof (buf), -con->in_base_pos); 2684 2685 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); 2686 ret = ceph_tcp_recvmsg(con->sock, buf, skip); 2687 if (ret <= 0) 2688 goto out; 2689 con->in_base_pos += ret; 2690 if (con->in_base_pos) 2691 goto more; 2692 } 2693 if (con->in_tag == CEPH_MSGR_TAG_READY) { 2694 /* 2695 * what's next? 2696 */ 2697 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 2698 if (ret <= 0) 2699 goto out; 2700 dout("try_read got tag %d\n", (int)con->in_tag); 2701 switch (con->in_tag) { 2702 case CEPH_MSGR_TAG_MSG: 2703 prepare_read_message(con); 2704 break; 2705 case CEPH_MSGR_TAG_ACK: 2706 prepare_read_ack(con); 2707 break; 2708 case CEPH_MSGR_TAG_KEEPALIVE2_ACK: 2709 prepare_read_keepalive_ack(con); 2710 break; 2711 case CEPH_MSGR_TAG_CLOSE: 2712 con_close_socket(con); 2713 con->state = CON_STATE_CLOSED; 2714 goto out; 2715 default: 2716 goto bad_tag; 2717 } 2718 } 2719 if (con->in_tag == CEPH_MSGR_TAG_MSG) { 2720 ret = read_partial_message(con); 2721 if (ret <= 0) { 2722 switch (ret) { 2723 case -EBADMSG: 2724 con->error_msg = "bad crc/signature"; 2725 /* fall through */ 2726 case -EBADE: 2727 ret = -EIO; 2728 break; 2729 case -EIO: 2730 con->error_msg = "io error"; 2731 break; 2732 } 2733 goto out; 2734 } 2735 if (con->in_tag == CEPH_MSGR_TAG_READY) 2736 goto more; 2737 process_message(con); 2738 if (con->state == CON_STATE_OPEN) 2739 prepare_read_tag(con); 2740 goto more; 2741 } 2742 if (con->in_tag == CEPH_MSGR_TAG_ACK || 2743 con->in_tag == CEPH_MSGR_TAG_SEQ) { 2744 /* 2745 * the final handshake seq exchange is semantically 2746 * equivalent to an ACK 2747 */ 2748 ret = read_partial_ack(con); 2749 if (ret <= 0) 2750 goto out; 2751 process_ack(con); 2752 goto more; 2753 } 2754 if (con->in_tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) { 2755 ret = read_keepalive_ack(con); 2756 if (ret <= 0) 2757 goto out; 2758 goto more; 2759 } 2760 2761 out: 2762 dout("try_read done on %p ret %d\n", con, ret); 2763 return ret; 2764 2765 bad_tag: 2766 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag); 2767 con->error_msg = "protocol error, garbage tag"; 2768 ret = -1; 2769 goto out; 2770 } 2771 2772 2773 /* 2774 * Atomically queue work on a connection after the specified delay. 2775 * Bump @con reference to avoid races with connection teardown. 2776 * Returns 0 if work was queued, or an error code otherwise. 2777 */ 2778 static int queue_con_delay(struct ceph_connection *con, unsigned long delay) 2779 { 2780 if (!con->ops->get(con)) { 2781 dout("%s %p ref count 0\n", __func__, con); 2782 return -ENOENT; 2783 } 2784 2785 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) { 2786 dout("%s %p - already queued\n", __func__, con); 2787 con->ops->put(con); 2788 return -EBUSY; 2789 } 2790 2791 dout("%s %p %lu\n", __func__, con, delay); 2792 return 0; 2793 } 2794 2795 static void queue_con(struct ceph_connection *con) 2796 { 2797 (void) queue_con_delay(con, 0); 2798 } 2799 2800 static void cancel_con(struct ceph_connection *con) 2801 { 2802 if (cancel_delayed_work(&con->work)) { 2803 dout("%s %p\n", __func__, con); 2804 con->ops->put(con); 2805 } 2806 } 2807 2808 static bool con_sock_closed(struct ceph_connection *con) 2809 { 2810 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED)) 2811 return false; 2812 2813 #define CASE(x) \ 2814 case CON_STATE_ ## x: \ 2815 con->error_msg = "socket closed (con state " #x ")"; \ 2816 break; 2817 2818 switch (con->state) { 2819 CASE(CLOSED); 2820 CASE(PREOPEN); 2821 CASE(CONNECTING); 2822 CASE(NEGOTIATING); 2823 CASE(OPEN); 2824 CASE(STANDBY); 2825 default: 2826 pr_warn("%s con %p unrecognized state %lu\n", 2827 __func__, con, con->state); 2828 con->error_msg = "unrecognized con state"; 2829 BUG(); 2830 break; 2831 } 2832 #undef CASE 2833 2834 return true; 2835 } 2836 2837 static bool con_backoff(struct ceph_connection *con) 2838 { 2839 int ret; 2840 2841 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF)) 2842 return false; 2843 2844 ret = queue_con_delay(con, round_jiffies_relative(con->delay)); 2845 if (ret) { 2846 dout("%s: con %p FAILED to back off %lu\n", __func__, 2847 con, con->delay); 2848 BUG_ON(ret == -ENOENT); 2849 con_flag_set(con, CON_FLAG_BACKOFF); 2850 } 2851 2852 return true; 2853 } 2854 2855 /* Finish fault handling; con->mutex must *not* be held here */ 2856 2857 static void con_fault_finish(struct ceph_connection *con) 2858 { 2859 dout("%s %p\n", __func__, con); 2860 2861 /* 2862 * in case we faulted due to authentication, invalidate our 2863 * current tickets so that we can get new ones. 2864 */ 2865 if (con->auth_retry) { 2866 dout("auth_retry %d, invalidating\n", con->auth_retry); 2867 if (con->ops->invalidate_authorizer) 2868 con->ops->invalidate_authorizer(con); 2869 con->auth_retry = 0; 2870 } 2871 2872 if (con->ops->fault) 2873 con->ops->fault(con); 2874 } 2875 2876 /* 2877 * Do some work on a connection. Drop a connection ref when we're done. 2878 */ 2879 static void ceph_con_workfn(struct work_struct *work) 2880 { 2881 struct ceph_connection *con = container_of(work, struct ceph_connection, 2882 work.work); 2883 bool fault; 2884 2885 mutex_lock(&con->mutex); 2886 while (true) { 2887 int ret; 2888 2889 if ((fault = con_sock_closed(con))) { 2890 dout("%s: con %p SOCK_CLOSED\n", __func__, con); 2891 break; 2892 } 2893 if (con_backoff(con)) { 2894 dout("%s: con %p BACKOFF\n", __func__, con); 2895 break; 2896 } 2897 if (con->state == CON_STATE_STANDBY) { 2898 dout("%s: con %p STANDBY\n", __func__, con); 2899 break; 2900 } 2901 if (con->state == CON_STATE_CLOSED) { 2902 dout("%s: con %p CLOSED\n", __func__, con); 2903 BUG_ON(con->sock); 2904 break; 2905 } 2906 if (con->state == CON_STATE_PREOPEN) { 2907 dout("%s: con %p PREOPEN\n", __func__, con); 2908 BUG_ON(con->sock); 2909 } 2910 2911 ret = try_read(con); 2912 if (ret < 0) { 2913 if (ret == -EAGAIN) 2914 continue; 2915 if (!con->error_msg) 2916 con->error_msg = "socket error on read"; 2917 fault = true; 2918 break; 2919 } 2920 2921 ret = try_write(con); 2922 if (ret < 0) { 2923 if (ret == -EAGAIN) 2924 continue; 2925 if (!con->error_msg) 2926 con->error_msg = "socket error on write"; 2927 fault = true; 2928 } 2929 2930 break; /* If we make it to here, we're done */ 2931 } 2932 if (fault) 2933 con_fault(con); 2934 mutex_unlock(&con->mutex); 2935 2936 if (fault) 2937 con_fault_finish(con); 2938 2939 con->ops->put(con); 2940 } 2941 2942 /* 2943 * Generic error/fault handler. A retry mechanism is used with 2944 * exponential backoff 2945 */ 2946 static void con_fault(struct ceph_connection *con) 2947 { 2948 dout("fault %p state %lu to peer %s\n", 2949 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2950 2951 pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2952 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2953 con->error_msg = NULL; 2954 2955 WARN_ON(con->state != CON_STATE_CONNECTING && 2956 con->state != CON_STATE_NEGOTIATING && 2957 con->state != CON_STATE_OPEN); 2958 2959 con_close_socket(con); 2960 2961 if (con_flag_test(con, CON_FLAG_LOSSYTX)) { 2962 dout("fault on LOSSYTX channel, marking CLOSED\n"); 2963 con->state = CON_STATE_CLOSED; 2964 return; 2965 } 2966 2967 if (con->in_msg) { 2968 BUG_ON(con->in_msg->con != con); 2969 ceph_msg_put(con->in_msg); 2970 con->in_msg = NULL; 2971 } 2972 2973 /* Requeue anything that hasn't been acked */ 2974 list_splice_init(&con->out_sent, &con->out_queue); 2975 2976 /* If there are no messages queued or keepalive pending, place 2977 * the connection in a STANDBY state */ 2978 if (list_empty(&con->out_queue) && 2979 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) { 2980 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 2981 con_flag_clear(con, CON_FLAG_WRITE_PENDING); 2982 con->state = CON_STATE_STANDBY; 2983 } else { 2984 /* retry after a delay. */ 2985 con->state = CON_STATE_PREOPEN; 2986 if (con->delay == 0) 2987 con->delay = BASE_DELAY_INTERVAL; 2988 else if (con->delay < MAX_DELAY_INTERVAL) 2989 con->delay *= 2; 2990 con_flag_set(con, CON_FLAG_BACKOFF); 2991 queue_con(con); 2992 } 2993 } 2994 2995 2996 2997 /* 2998 * initialize a new messenger instance 2999 */ 3000 void ceph_messenger_init(struct ceph_messenger *msgr, 3001 struct ceph_entity_addr *myaddr) 3002 { 3003 spin_lock_init(&msgr->global_seq_lock); 3004 3005 if (myaddr) 3006 msgr->inst.addr = *myaddr; 3007 3008 /* select a random nonce */ 3009 msgr->inst.addr.type = 0; 3010 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 3011 encode_my_addr(msgr); 3012 3013 atomic_set(&msgr->stopping, 0); 3014 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns)); 3015 3016 dout("%s %p\n", __func__, msgr); 3017 } 3018 EXPORT_SYMBOL(ceph_messenger_init); 3019 3020 void ceph_messenger_fini(struct ceph_messenger *msgr) 3021 { 3022 put_net(read_pnet(&msgr->net)); 3023 } 3024 EXPORT_SYMBOL(ceph_messenger_fini); 3025 3026 static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con) 3027 { 3028 if (msg->con) 3029 msg->con->ops->put(msg->con); 3030 3031 msg->con = con ? con->ops->get(con) : NULL; 3032 BUG_ON(msg->con != con); 3033 } 3034 3035 static void clear_standby(struct ceph_connection *con) 3036 { 3037 /* come back from STANDBY? */ 3038 if (con->state == CON_STATE_STANDBY) { 3039 dout("clear_standby %p and ++connect_seq\n", con); 3040 con->state = CON_STATE_PREOPEN; 3041 con->connect_seq++; 3042 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING)); 3043 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)); 3044 } 3045 } 3046 3047 /* 3048 * Queue up an outgoing message on the given connection. 3049 */ 3050 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) 3051 { 3052 /* set src+dst */ 3053 msg->hdr.src = con->msgr->inst.name; 3054 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); 3055 msg->needs_out_seq = true; 3056 3057 mutex_lock(&con->mutex); 3058 3059 if (con->state == CON_STATE_CLOSED) { 3060 dout("con_send %p closed, dropping %p\n", con, msg); 3061 ceph_msg_put(msg); 3062 mutex_unlock(&con->mutex); 3063 return; 3064 } 3065 3066 msg_con_set(msg, con); 3067 3068 BUG_ON(!list_empty(&msg->list_head)); 3069 list_add_tail(&msg->list_head, &con->out_queue); 3070 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, 3071 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type), 3072 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 3073 le32_to_cpu(msg->hdr.front_len), 3074 le32_to_cpu(msg->hdr.middle_len), 3075 le32_to_cpu(msg->hdr.data_len)); 3076 3077 clear_standby(con); 3078 mutex_unlock(&con->mutex); 3079 3080 /* if there wasn't anything waiting to send before, queue 3081 * new work */ 3082 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3083 queue_con(con); 3084 } 3085 EXPORT_SYMBOL(ceph_con_send); 3086 3087 /* 3088 * Revoke a message that was previously queued for send 3089 */ 3090 void ceph_msg_revoke(struct ceph_msg *msg) 3091 { 3092 struct ceph_connection *con = msg->con; 3093 3094 if (!con) { 3095 dout("%s msg %p null con\n", __func__, msg); 3096 return; /* Message not in our possession */ 3097 } 3098 3099 mutex_lock(&con->mutex); 3100 if (!list_empty(&msg->list_head)) { 3101 dout("%s %p msg %p - was on queue\n", __func__, con, msg); 3102 list_del_init(&msg->list_head); 3103 msg->hdr.seq = 0; 3104 3105 ceph_msg_put(msg); 3106 } 3107 if (con->out_msg == msg) { 3108 BUG_ON(con->out_skip); 3109 /* footer */ 3110 if (con->out_msg_done) { 3111 con->out_skip += con_out_kvec_skip(con); 3112 } else { 3113 BUG_ON(!msg->data_length); 3114 con->out_skip += sizeof_footer(con); 3115 } 3116 /* data, middle, front */ 3117 if (msg->data_length) 3118 con->out_skip += msg->cursor.total_resid; 3119 if (msg->middle) 3120 con->out_skip += con_out_kvec_skip(con); 3121 con->out_skip += con_out_kvec_skip(con); 3122 3123 dout("%s %p msg %p - was sending, will write %d skip %d\n", 3124 __func__, con, msg, con->out_kvec_bytes, con->out_skip); 3125 msg->hdr.seq = 0; 3126 con->out_msg = NULL; 3127 ceph_msg_put(msg); 3128 } 3129 3130 mutex_unlock(&con->mutex); 3131 } 3132 3133 /* 3134 * Revoke a message that we may be reading data into 3135 */ 3136 void ceph_msg_revoke_incoming(struct ceph_msg *msg) 3137 { 3138 struct ceph_connection *con = msg->con; 3139 3140 if (!con) { 3141 dout("%s msg %p null con\n", __func__, msg); 3142 return; /* Message not in our possession */ 3143 } 3144 3145 mutex_lock(&con->mutex); 3146 if (con->in_msg == msg) { 3147 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len); 3148 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len); 3149 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len); 3150 3151 /* skip rest of message */ 3152 dout("%s %p msg %p revoked\n", __func__, con, msg); 3153 con->in_base_pos = con->in_base_pos - 3154 sizeof(struct ceph_msg_header) - 3155 front_len - 3156 middle_len - 3157 data_len - 3158 sizeof(struct ceph_msg_footer); 3159 ceph_msg_put(con->in_msg); 3160 con->in_msg = NULL; 3161 con->in_tag = CEPH_MSGR_TAG_READY; 3162 con->in_seq++; 3163 } else { 3164 dout("%s %p in_msg %p msg %p no-op\n", 3165 __func__, con, con->in_msg, msg); 3166 } 3167 mutex_unlock(&con->mutex); 3168 } 3169 3170 /* 3171 * Queue a keepalive byte to ensure the tcp connection is alive. 3172 */ 3173 void ceph_con_keepalive(struct ceph_connection *con) 3174 { 3175 dout("con_keepalive %p\n", con); 3176 mutex_lock(&con->mutex); 3177 clear_standby(con); 3178 mutex_unlock(&con->mutex); 3179 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 && 3180 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0) 3181 queue_con(con); 3182 } 3183 EXPORT_SYMBOL(ceph_con_keepalive); 3184 3185 bool ceph_con_keepalive_expired(struct ceph_connection *con, 3186 unsigned long interval) 3187 { 3188 if (interval > 0 && 3189 (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) { 3190 struct timespec now; 3191 struct timespec ts; 3192 ktime_get_real_ts(&now); 3193 jiffies_to_timespec(interval, &ts); 3194 ts = timespec_add(con->last_keepalive_ack, ts); 3195 return timespec_compare(&now, &ts) >= 0; 3196 } 3197 return false; 3198 } 3199 3200 static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type) 3201 { 3202 struct ceph_msg_data *data; 3203 3204 if (WARN_ON(!ceph_msg_data_type_valid(type))) 3205 return NULL; 3206 3207 data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS); 3208 if (!data) 3209 return NULL; 3210 3211 data->type = type; 3212 INIT_LIST_HEAD(&data->links); 3213 3214 return data; 3215 } 3216 3217 static void ceph_msg_data_destroy(struct ceph_msg_data *data) 3218 { 3219 if (!data) 3220 return; 3221 3222 WARN_ON(!list_empty(&data->links)); 3223 if (data->type == CEPH_MSG_DATA_PAGELIST) 3224 ceph_pagelist_release(data->pagelist); 3225 kmem_cache_free(ceph_msg_data_cache, data); 3226 } 3227 3228 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, 3229 size_t length, size_t alignment) 3230 { 3231 struct ceph_msg_data *data; 3232 3233 BUG_ON(!pages); 3234 BUG_ON(!length); 3235 3236 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES); 3237 BUG_ON(!data); 3238 data->pages = pages; 3239 data->length = length; 3240 data->alignment = alignment & ~PAGE_MASK; 3241 3242 list_add_tail(&data->links, &msg->data); 3243 msg->data_length += length; 3244 } 3245 EXPORT_SYMBOL(ceph_msg_data_add_pages); 3246 3247 void ceph_msg_data_add_pagelist(struct ceph_msg *msg, 3248 struct ceph_pagelist *pagelist) 3249 { 3250 struct ceph_msg_data *data; 3251 3252 BUG_ON(!pagelist); 3253 BUG_ON(!pagelist->length); 3254 3255 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST); 3256 BUG_ON(!data); 3257 data->pagelist = pagelist; 3258 3259 list_add_tail(&data->links, &msg->data); 3260 msg->data_length += pagelist->length; 3261 } 3262 EXPORT_SYMBOL(ceph_msg_data_add_pagelist); 3263 3264 #ifdef CONFIG_BLOCK 3265 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, 3266 size_t length) 3267 { 3268 struct ceph_msg_data *data; 3269 3270 BUG_ON(!bio); 3271 3272 data = ceph_msg_data_create(CEPH_MSG_DATA_BIO); 3273 BUG_ON(!data); 3274 data->bio = bio; 3275 data->bio_length = length; 3276 3277 list_add_tail(&data->links, &msg->data); 3278 msg->data_length += length; 3279 } 3280 EXPORT_SYMBOL(ceph_msg_data_add_bio); 3281 #endif /* CONFIG_BLOCK */ 3282 3283 /* 3284 * construct a new message with given type, size 3285 * the new msg has a ref count of 1. 3286 */ 3287 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, 3288 bool can_fail) 3289 { 3290 struct ceph_msg *m; 3291 3292 m = kmem_cache_zalloc(ceph_msg_cache, flags); 3293 if (m == NULL) 3294 goto out; 3295 3296 m->hdr.type = cpu_to_le16(type); 3297 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); 3298 m->hdr.front_len = cpu_to_le32(front_len); 3299 3300 INIT_LIST_HEAD(&m->list_head); 3301 kref_init(&m->kref); 3302 INIT_LIST_HEAD(&m->data); 3303 3304 /* front */ 3305 if (front_len) { 3306 m->front.iov_base = ceph_kvmalloc(front_len, flags); 3307 if (m->front.iov_base == NULL) { 3308 dout("ceph_msg_new can't allocate %d bytes\n", 3309 front_len); 3310 goto out2; 3311 } 3312 } else { 3313 m->front.iov_base = NULL; 3314 } 3315 m->front_alloc_len = m->front.iov_len = front_len; 3316 3317 dout("ceph_msg_new %p front %d\n", m, front_len); 3318 return m; 3319 3320 out2: 3321 ceph_msg_put(m); 3322 out: 3323 if (!can_fail) { 3324 pr_err("msg_new can't create type %d front %d\n", type, 3325 front_len); 3326 WARN_ON(1); 3327 } else { 3328 dout("msg_new can't create type %d front %d\n", type, 3329 front_len); 3330 } 3331 return NULL; 3332 } 3333 EXPORT_SYMBOL(ceph_msg_new); 3334 3335 /* 3336 * Allocate "middle" portion of a message, if it is needed and wasn't 3337 * allocated by alloc_msg. This allows us to read a small fixed-size 3338 * per-type header in the front and then gracefully fail (i.e., 3339 * propagate the error to the caller based on info in the front) when 3340 * the middle is too large. 3341 */ 3342 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) 3343 { 3344 int type = le16_to_cpu(msg->hdr.type); 3345 int middle_len = le32_to_cpu(msg->hdr.middle_len); 3346 3347 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type, 3348 ceph_msg_type_name(type), middle_len); 3349 BUG_ON(!middle_len); 3350 BUG_ON(msg->middle); 3351 3352 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS); 3353 if (!msg->middle) 3354 return -ENOMEM; 3355 return 0; 3356 } 3357 3358 /* 3359 * Allocate a message for receiving an incoming message on a 3360 * connection, and save the result in con->in_msg. Uses the 3361 * connection's private alloc_msg op if available. 3362 * 3363 * Returns 0 on success, or a negative error code. 3364 * 3365 * On success, if we set *skip = 1: 3366 * - the next message should be skipped and ignored. 3367 * - con->in_msg == NULL 3368 * or if we set *skip = 0: 3369 * - con->in_msg is non-null. 3370 * On error (ENOMEM, EAGAIN, ...), 3371 * - con->in_msg == NULL 3372 */ 3373 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) 3374 { 3375 struct ceph_msg_header *hdr = &con->in_hdr; 3376 int middle_len = le32_to_cpu(hdr->middle_len); 3377 struct ceph_msg *msg; 3378 int ret = 0; 3379 3380 BUG_ON(con->in_msg != NULL); 3381 BUG_ON(!con->ops->alloc_msg); 3382 3383 mutex_unlock(&con->mutex); 3384 msg = con->ops->alloc_msg(con, hdr, skip); 3385 mutex_lock(&con->mutex); 3386 if (con->state != CON_STATE_OPEN) { 3387 if (msg) 3388 ceph_msg_put(msg); 3389 return -EAGAIN; 3390 } 3391 if (msg) { 3392 BUG_ON(*skip); 3393 msg_con_set(msg, con); 3394 con->in_msg = msg; 3395 } else { 3396 /* 3397 * Null message pointer means either we should skip 3398 * this message or we couldn't allocate memory. The 3399 * former is not an error. 3400 */ 3401 if (*skip) 3402 return 0; 3403 3404 con->error_msg = "error allocating memory for incoming message"; 3405 return -ENOMEM; 3406 } 3407 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 3408 3409 if (middle_len && !con->in_msg->middle) { 3410 ret = ceph_alloc_middle(con, con->in_msg); 3411 if (ret < 0) { 3412 ceph_msg_put(con->in_msg); 3413 con->in_msg = NULL; 3414 } 3415 } 3416 3417 return ret; 3418 } 3419 3420 3421 /* 3422 * Free a generically kmalloc'd message. 3423 */ 3424 static void ceph_msg_free(struct ceph_msg *m) 3425 { 3426 dout("%s %p\n", __func__, m); 3427 kvfree(m->front.iov_base); 3428 kmem_cache_free(ceph_msg_cache, m); 3429 } 3430 3431 static void ceph_msg_release(struct kref *kref) 3432 { 3433 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); 3434 struct ceph_msg_data *data, *next; 3435 3436 dout("%s %p\n", __func__, m); 3437 WARN_ON(!list_empty(&m->list_head)); 3438 3439 msg_con_set(m, NULL); 3440 3441 /* drop middle, data, if any */ 3442 if (m->middle) { 3443 ceph_buffer_put(m->middle); 3444 m->middle = NULL; 3445 } 3446 3447 list_for_each_entry_safe(data, next, &m->data, links) { 3448 list_del_init(&data->links); 3449 ceph_msg_data_destroy(data); 3450 } 3451 m->data_length = 0; 3452 3453 if (m->pool) 3454 ceph_msgpool_put(m->pool, m); 3455 else 3456 ceph_msg_free(m); 3457 } 3458 3459 struct ceph_msg *ceph_msg_get(struct ceph_msg *msg) 3460 { 3461 dout("%s %p (was %d)\n", __func__, msg, 3462 kref_read(&msg->kref)); 3463 kref_get(&msg->kref); 3464 return msg; 3465 } 3466 EXPORT_SYMBOL(ceph_msg_get); 3467 3468 void ceph_msg_put(struct ceph_msg *msg) 3469 { 3470 dout("%s %p (was %d)\n", __func__, msg, 3471 kref_read(&msg->kref)); 3472 kref_put(&msg->kref, ceph_msg_release); 3473 } 3474 EXPORT_SYMBOL(ceph_msg_put); 3475 3476 void ceph_msg_dump(struct ceph_msg *msg) 3477 { 3478 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg, 3479 msg->front_alloc_len, msg->data_length); 3480 print_hex_dump(KERN_DEBUG, "header: ", 3481 DUMP_PREFIX_OFFSET, 16, 1, 3482 &msg->hdr, sizeof(msg->hdr), true); 3483 print_hex_dump(KERN_DEBUG, " front: ", 3484 DUMP_PREFIX_OFFSET, 16, 1, 3485 msg->front.iov_base, msg->front.iov_len, true); 3486 if (msg->middle) 3487 print_hex_dump(KERN_DEBUG, "middle: ", 3488 DUMP_PREFIX_OFFSET, 16, 1, 3489 msg->middle->vec.iov_base, 3490 msg->middle->vec.iov_len, true); 3491 print_hex_dump(KERN_DEBUG, "footer: ", 3492 DUMP_PREFIX_OFFSET, 16, 1, 3493 &msg->footer, sizeof(msg->footer), true); 3494 } 3495 EXPORT_SYMBOL(ceph_msg_dump); 3496