1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/crc32c.h> 4 #include <linux/ctype.h> 5 #include <linux/highmem.h> 6 #include <linux/inet.h> 7 #include <linux/kthread.h> 8 #include <linux/net.h> 9 #include <linux/slab.h> 10 #include <linux/socket.h> 11 #include <linux/string.h> 12 #include <linux/bio.h> 13 #include <linux/blkdev.h> 14 #include <linux/dns_resolver.h> 15 #include <net/tcp.h> 16 17 #include <linux/ceph/libceph.h> 18 #include <linux/ceph/messenger.h> 19 #include <linux/ceph/decode.h> 20 #include <linux/ceph/pagelist.h> 21 #include <linux/export.h> 22 23 /* 24 * Ceph uses the messenger to exchange ceph_msg messages with other 25 * hosts in the system. The messenger provides ordered and reliable 26 * delivery. We tolerate TCP disconnects by reconnecting (with 27 * exponential backoff) in the case of a fault (disconnection, bad 28 * crc, protocol error). Acks allow sent messages to be discarded by 29 * the sender. 30 */ 31 32 /* static tag bytes (protocol control messages) */ 33 static char tag_msg = CEPH_MSGR_TAG_MSG; 34 static char tag_ack = CEPH_MSGR_TAG_ACK; 35 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 36 37 #ifdef CONFIG_LOCKDEP 38 static struct lock_class_key socket_class; 39 #endif 40 41 /* 42 * When skipping (ignoring) a block of input we read it into a "skip 43 * buffer," which is this many bytes in size. 44 */ 45 #define SKIP_BUF_SIZE 1024 46 47 static void queue_con(struct ceph_connection *con); 48 static void con_work(struct work_struct *); 49 static void ceph_fault(struct ceph_connection *con); 50 51 /* 52 * Nicely render a sockaddr as a string. An array of formatted 53 * strings is used, to approximate reentrancy. 54 */ 55 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */ 56 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG) 57 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1) 58 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */ 59 60 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN]; 61 static atomic_t addr_str_seq = ATOMIC_INIT(0); 62 63 static struct page *zero_page; /* used in certain error cases */ 64 65 const char *ceph_pr_addr(const struct sockaddr_storage *ss) 66 { 67 int i; 68 char *s; 69 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 70 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 71 72 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK; 73 s = addr_str[i]; 74 75 switch (ss->ss_family) { 76 case AF_INET: 77 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr, 78 ntohs(in4->sin_port)); 79 break; 80 81 case AF_INET6: 82 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr, 83 ntohs(in6->sin6_port)); 84 break; 85 86 default: 87 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)", 88 ss->ss_family); 89 } 90 91 return s; 92 } 93 EXPORT_SYMBOL(ceph_pr_addr); 94 95 static void encode_my_addr(struct ceph_messenger *msgr) 96 { 97 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr)); 98 ceph_encode_addr(&msgr->my_enc_addr); 99 } 100 101 /* 102 * work queue for all reading and writing to/from the socket. 103 */ 104 static struct workqueue_struct *ceph_msgr_wq; 105 106 void _ceph_msgr_exit(void) 107 { 108 if (ceph_msgr_wq) { 109 destroy_workqueue(ceph_msgr_wq); 110 ceph_msgr_wq = NULL; 111 } 112 113 BUG_ON(zero_page == NULL); 114 kunmap(zero_page); 115 page_cache_release(zero_page); 116 zero_page = NULL; 117 } 118 119 int ceph_msgr_init(void) 120 { 121 BUG_ON(zero_page != NULL); 122 zero_page = ZERO_PAGE(0); 123 page_cache_get(zero_page); 124 125 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0); 126 if (ceph_msgr_wq) 127 return 0; 128 129 pr_err("msgr_init failed to create workqueue\n"); 130 _ceph_msgr_exit(); 131 132 return -ENOMEM; 133 } 134 EXPORT_SYMBOL(ceph_msgr_init); 135 136 void ceph_msgr_exit(void) 137 { 138 BUG_ON(ceph_msgr_wq == NULL); 139 140 _ceph_msgr_exit(); 141 } 142 EXPORT_SYMBOL(ceph_msgr_exit); 143 144 void ceph_msgr_flush(void) 145 { 146 flush_workqueue(ceph_msgr_wq); 147 } 148 EXPORT_SYMBOL(ceph_msgr_flush); 149 150 151 /* 152 * socket callback functions 153 */ 154 155 /* data available on socket, or listen socket received a connect */ 156 static void ceph_data_ready(struct sock *sk, int count_unused) 157 { 158 struct ceph_connection *con = sk->sk_user_data; 159 160 if (sk->sk_state != TCP_CLOSE_WAIT) { 161 dout("ceph_data_ready on %p state = %lu, queueing work\n", 162 con, con->state); 163 queue_con(con); 164 } 165 } 166 167 /* socket has buffer space for writing */ 168 static void ceph_write_space(struct sock *sk) 169 { 170 struct ceph_connection *con = sk->sk_user_data; 171 172 /* only queue to workqueue if there is data we want to write, 173 * and there is sufficient space in the socket buffer to accept 174 * more data. clear SOCK_NOSPACE so that ceph_write_space() 175 * doesn't get called again until try_write() fills the socket 176 * buffer. See net/ipv4/tcp_input.c:tcp_check_space() 177 * and net/core/stream.c:sk_stream_write_space(). 178 */ 179 if (test_bit(WRITE_PENDING, &con->state)) { 180 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { 181 dout("ceph_write_space %p queueing write work\n", con); 182 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 183 queue_con(con); 184 } 185 } else { 186 dout("ceph_write_space %p nothing to write\n", con); 187 } 188 } 189 190 /* socket's state has changed */ 191 static void ceph_state_change(struct sock *sk) 192 { 193 struct ceph_connection *con = sk->sk_user_data; 194 195 dout("ceph_state_change %p state = %lu sk_state = %u\n", 196 con, con->state, sk->sk_state); 197 198 if (test_bit(CLOSED, &con->state)) 199 return; 200 201 switch (sk->sk_state) { 202 case TCP_CLOSE: 203 dout("ceph_state_change TCP_CLOSE\n"); 204 case TCP_CLOSE_WAIT: 205 dout("ceph_state_change TCP_CLOSE_WAIT\n"); 206 if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) { 207 if (test_bit(CONNECTING, &con->state)) 208 con->error_msg = "connection failed"; 209 else 210 con->error_msg = "socket closed"; 211 queue_con(con); 212 } 213 break; 214 case TCP_ESTABLISHED: 215 dout("ceph_state_change TCP_ESTABLISHED\n"); 216 queue_con(con); 217 break; 218 default: /* Everything else is uninteresting */ 219 break; 220 } 221 } 222 223 /* 224 * set up socket callbacks 225 */ 226 static void set_sock_callbacks(struct socket *sock, 227 struct ceph_connection *con) 228 { 229 struct sock *sk = sock->sk; 230 sk->sk_user_data = con; 231 sk->sk_data_ready = ceph_data_ready; 232 sk->sk_write_space = ceph_write_space; 233 sk->sk_state_change = ceph_state_change; 234 } 235 236 237 /* 238 * socket helpers 239 */ 240 241 /* 242 * initiate connection to a remote socket. 243 */ 244 static int ceph_tcp_connect(struct ceph_connection *con) 245 { 246 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 247 struct socket *sock; 248 int ret; 249 250 BUG_ON(con->sock); 251 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM, 252 IPPROTO_TCP, &sock); 253 if (ret) 254 return ret; 255 sock->sk->sk_allocation = GFP_NOFS; 256 257 #ifdef CONFIG_LOCKDEP 258 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 259 #endif 260 261 set_sock_callbacks(sock, con); 262 263 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); 264 265 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), 266 O_NONBLOCK); 267 if (ret == -EINPROGRESS) { 268 dout("connect %s EINPROGRESS sk_state = %u\n", 269 ceph_pr_addr(&con->peer_addr.in_addr), 270 sock->sk->sk_state); 271 } else if (ret < 0) { 272 pr_err("connect %s error %d\n", 273 ceph_pr_addr(&con->peer_addr.in_addr), ret); 274 sock_release(sock); 275 con->error_msg = "connect error"; 276 277 return ret; 278 } 279 con->sock = sock; 280 281 return 0; 282 } 283 284 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) 285 { 286 struct kvec iov = {buf, len}; 287 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 288 int r; 289 290 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); 291 if (r == -EAGAIN) 292 r = 0; 293 return r; 294 } 295 296 /* 297 * write something. @more is true if caller will be sending more data 298 * shortly. 299 */ 300 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, 301 size_t kvlen, size_t len, int more) 302 { 303 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 304 int r; 305 306 if (more) 307 msg.msg_flags |= MSG_MORE; 308 else 309 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 310 311 r = kernel_sendmsg(sock, &msg, iov, kvlen, len); 312 if (r == -EAGAIN) 313 r = 0; 314 return r; 315 } 316 317 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 318 int offset, size_t size, int more) 319 { 320 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); 321 int ret; 322 323 ret = kernel_sendpage(sock, page, offset, size, flags); 324 if (ret == -EAGAIN) 325 ret = 0; 326 327 return ret; 328 } 329 330 331 /* 332 * Shutdown/close the socket for the given connection. 333 */ 334 static int con_close_socket(struct ceph_connection *con) 335 { 336 int rc; 337 338 dout("con_close_socket on %p sock %p\n", con, con->sock); 339 if (!con->sock) 340 return 0; 341 set_bit(SOCK_CLOSED, &con->state); 342 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); 343 sock_release(con->sock); 344 con->sock = NULL; 345 clear_bit(SOCK_CLOSED, &con->state); 346 return rc; 347 } 348 349 /* 350 * Reset a connection. Discard all incoming and outgoing messages 351 * and clear *_seq state. 352 */ 353 static void ceph_msg_remove(struct ceph_msg *msg) 354 { 355 list_del_init(&msg->list_head); 356 ceph_msg_put(msg); 357 } 358 static void ceph_msg_remove_list(struct list_head *head) 359 { 360 while (!list_empty(head)) { 361 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, 362 list_head); 363 ceph_msg_remove(msg); 364 } 365 } 366 367 static void reset_connection(struct ceph_connection *con) 368 { 369 /* reset connection, out_queue, msg_ and connect_seq */ 370 /* discard existing out_queue and msg_seq */ 371 ceph_msg_remove_list(&con->out_queue); 372 ceph_msg_remove_list(&con->out_sent); 373 374 if (con->in_msg) { 375 ceph_msg_put(con->in_msg); 376 con->in_msg = NULL; 377 } 378 379 con->connect_seq = 0; 380 con->out_seq = 0; 381 if (con->out_msg) { 382 ceph_msg_put(con->out_msg); 383 con->out_msg = NULL; 384 } 385 con->in_seq = 0; 386 con->in_seq_acked = 0; 387 } 388 389 /* 390 * mark a peer down. drop any open connections. 391 */ 392 void ceph_con_close(struct ceph_connection *con) 393 { 394 dout("con_close %p peer %s\n", con, 395 ceph_pr_addr(&con->peer_addr.in_addr)); 396 set_bit(CLOSED, &con->state); /* in case there's queued work */ 397 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */ 398 clear_bit(LOSSYTX, &con->state); /* so we retry next connect */ 399 clear_bit(KEEPALIVE_PENDING, &con->state); 400 clear_bit(WRITE_PENDING, &con->state); 401 mutex_lock(&con->mutex); 402 reset_connection(con); 403 con->peer_global_seq = 0; 404 cancel_delayed_work(&con->work); 405 mutex_unlock(&con->mutex); 406 queue_con(con); 407 } 408 EXPORT_SYMBOL(ceph_con_close); 409 410 /* 411 * Reopen a closed connection, with a new peer address. 412 */ 413 void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr) 414 { 415 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 416 set_bit(OPENING, &con->state); 417 clear_bit(CLOSED, &con->state); 418 memcpy(&con->peer_addr, addr, sizeof(*addr)); 419 con->delay = 0; /* reset backoff memory */ 420 queue_con(con); 421 } 422 EXPORT_SYMBOL(ceph_con_open); 423 424 /* 425 * return true if this connection ever successfully opened 426 */ 427 bool ceph_con_opened(struct ceph_connection *con) 428 { 429 return con->connect_seq > 0; 430 } 431 432 /* 433 * generic get/put 434 */ 435 struct ceph_connection *ceph_con_get(struct ceph_connection *con) 436 { 437 int nref = __atomic_add_unless(&con->nref, 1, 0); 438 439 dout("con_get %p nref = %d -> %d\n", con, nref, nref + 1); 440 441 return nref ? con : NULL; 442 } 443 444 void ceph_con_put(struct ceph_connection *con) 445 { 446 int nref = atomic_dec_return(&con->nref); 447 448 BUG_ON(nref < 0); 449 if (nref == 0) { 450 BUG_ON(con->sock); 451 kfree(con); 452 } 453 dout("con_put %p nref = %d -> %d\n", con, nref + 1, nref); 454 } 455 456 /* 457 * initialize a new connection. 458 */ 459 void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con) 460 { 461 dout("con_init %p\n", con); 462 memset(con, 0, sizeof(*con)); 463 atomic_set(&con->nref, 1); 464 con->msgr = msgr; 465 mutex_init(&con->mutex); 466 INIT_LIST_HEAD(&con->out_queue); 467 INIT_LIST_HEAD(&con->out_sent); 468 INIT_DELAYED_WORK(&con->work, con_work); 469 } 470 EXPORT_SYMBOL(ceph_con_init); 471 472 473 /* 474 * We maintain a global counter to order connection attempts. Get 475 * a unique seq greater than @gt. 476 */ 477 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) 478 { 479 u32 ret; 480 481 spin_lock(&msgr->global_seq_lock); 482 if (msgr->global_seq < gt) 483 msgr->global_seq = gt; 484 ret = ++msgr->global_seq; 485 spin_unlock(&msgr->global_seq_lock); 486 return ret; 487 } 488 489 static void ceph_con_out_kvec_reset(struct ceph_connection *con) 490 { 491 con->out_kvec_left = 0; 492 con->out_kvec_bytes = 0; 493 con->out_kvec_cur = &con->out_kvec[0]; 494 } 495 496 static void ceph_con_out_kvec_add(struct ceph_connection *con, 497 size_t size, void *data) 498 { 499 int index; 500 501 index = con->out_kvec_left; 502 BUG_ON(index >= ARRAY_SIZE(con->out_kvec)); 503 504 con->out_kvec[index].iov_len = size; 505 con->out_kvec[index].iov_base = data; 506 con->out_kvec_left++; 507 con->out_kvec_bytes += size; 508 } 509 510 /* 511 * Prepare footer for currently outgoing message, and finish things 512 * off. Assumes out_kvec* are already valid.. we just add on to the end. 513 */ 514 static void prepare_write_message_footer(struct ceph_connection *con) 515 { 516 struct ceph_msg *m = con->out_msg; 517 int v = con->out_kvec_left; 518 519 dout("prepare_write_message_footer %p\n", con); 520 con->out_kvec_is_msg = true; 521 con->out_kvec[v].iov_base = &m->footer; 522 con->out_kvec[v].iov_len = sizeof(m->footer); 523 con->out_kvec_bytes += sizeof(m->footer); 524 con->out_kvec_left++; 525 con->out_more = m->more_to_follow; 526 con->out_msg_done = true; 527 } 528 529 /* 530 * Prepare headers for the next outgoing message. 531 */ 532 static void prepare_write_message(struct ceph_connection *con) 533 { 534 struct ceph_msg *m; 535 u32 crc; 536 537 ceph_con_out_kvec_reset(con); 538 con->out_kvec_is_msg = true; 539 con->out_msg_done = false; 540 541 /* Sneak an ack in there first? If we can get it into the same 542 * TCP packet that's a good thing. */ 543 if (con->in_seq > con->in_seq_acked) { 544 con->in_seq_acked = con->in_seq; 545 ceph_con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 546 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 547 ceph_con_out_kvec_add(con, sizeof (con->out_temp_ack), 548 &con->out_temp_ack); 549 } 550 551 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); 552 con->out_msg = m; 553 554 /* put message on sent list */ 555 ceph_msg_get(m); 556 list_move_tail(&m->list_head, &con->out_sent); 557 558 /* 559 * only assign outgoing seq # if we haven't sent this message 560 * yet. if it is requeued, resend with it's original seq. 561 */ 562 if (m->needs_out_seq) { 563 m->hdr.seq = cpu_to_le64(++con->out_seq); 564 m->needs_out_seq = false; 565 } 566 567 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n", 568 m, con->out_seq, le16_to_cpu(m->hdr.type), 569 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), 570 le32_to_cpu(m->hdr.data_len), 571 m->nr_pages); 572 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); 573 574 /* tag + hdr + front + middle */ 575 ceph_con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); 576 ceph_con_out_kvec_add(con, sizeof (m->hdr), &m->hdr); 577 ceph_con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); 578 579 if (m->middle) 580 ceph_con_out_kvec_add(con, m->middle->vec.iov_len, 581 m->middle->vec.iov_base); 582 583 /* fill in crc (except data pages), footer */ 584 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc)); 585 con->out_msg->hdr.crc = cpu_to_le32(crc); 586 con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE; 587 588 crc = crc32c(0, m->front.iov_base, m->front.iov_len); 589 con->out_msg->footer.front_crc = cpu_to_le32(crc); 590 if (m->middle) { 591 crc = crc32c(0, m->middle->vec.iov_base, 592 m->middle->vec.iov_len); 593 con->out_msg->footer.middle_crc = cpu_to_le32(crc); 594 } else 595 con->out_msg->footer.middle_crc = 0; 596 con->out_msg->footer.data_crc = 0; 597 dout("prepare_write_message front_crc %u data_crc %u\n", 598 le32_to_cpu(con->out_msg->footer.front_crc), 599 le32_to_cpu(con->out_msg->footer.middle_crc)); 600 601 /* is there a data payload? */ 602 if (le32_to_cpu(m->hdr.data_len) > 0) { 603 /* initialize page iterator */ 604 con->out_msg_pos.page = 0; 605 if (m->pages) 606 con->out_msg_pos.page_pos = m->page_alignment; 607 else 608 con->out_msg_pos.page_pos = 0; 609 con->out_msg_pos.data_pos = 0; 610 con->out_msg_pos.did_page_crc = false; 611 con->out_more = 1; /* data + footer will follow */ 612 } else { 613 /* no, queue up footer too and be done */ 614 prepare_write_message_footer(con); 615 } 616 617 set_bit(WRITE_PENDING, &con->state); 618 } 619 620 /* 621 * Prepare an ack. 622 */ 623 static void prepare_write_ack(struct ceph_connection *con) 624 { 625 dout("prepare_write_ack %p %llu -> %llu\n", con, 626 con->in_seq_acked, con->in_seq); 627 con->in_seq_acked = con->in_seq; 628 629 ceph_con_out_kvec_reset(con); 630 631 ceph_con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 632 633 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 634 ceph_con_out_kvec_add(con, sizeof (con->out_temp_ack), 635 &con->out_temp_ack); 636 637 con->out_more = 1; /* more will follow.. eventually.. */ 638 set_bit(WRITE_PENDING, &con->state); 639 } 640 641 /* 642 * Prepare to write keepalive byte. 643 */ 644 static void prepare_write_keepalive(struct ceph_connection *con) 645 { 646 dout("prepare_write_keepalive %p\n", con); 647 ceph_con_out_kvec_reset(con); 648 ceph_con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive); 649 set_bit(WRITE_PENDING, &con->state); 650 } 651 652 /* 653 * Connection negotiation. 654 */ 655 656 static int prepare_connect_authorizer(struct ceph_connection *con) 657 { 658 void *auth_buf; 659 int auth_len = 0; 660 int auth_protocol = 0; 661 662 mutex_unlock(&con->mutex); 663 if (con->ops->get_authorizer) 664 con->ops->get_authorizer(con, &auth_buf, &auth_len, 665 &auth_protocol, &con->auth_reply_buf, 666 &con->auth_reply_buf_len, 667 con->auth_retry); 668 mutex_lock(&con->mutex); 669 670 if (test_bit(CLOSED, &con->state) || 671 test_bit(OPENING, &con->state)) 672 return -EAGAIN; 673 674 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol); 675 con->out_connect.authorizer_len = cpu_to_le32(auth_len); 676 677 if (auth_len) 678 ceph_con_out_kvec_add(con, auth_len, auth_buf); 679 680 return 0; 681 } 682 683 /* 684 * We connected to a peer and are saying hello. 685 */ 686 static void prepare_write_banner(struct ceph_messenger *msgr, 687 struct ceph_connection *con) 688 { 689 ceph_con_out_kvec_reset(con); 690 ceph_con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER); 691 ceph_con_out_kvec_add(con, sizeof (msgr->my_enc_addr), 692 &msgr->my_enc_addr); 693 694 con->out_more = 0; 695 set_bit(WRITE_PENDING, &con->state); 696 } 697 698 static int prepare_write_connect(struct ceph_messenger *msgr, 699 struct ceph_connection *con, 700 int include_banner) 701 { 702 unsigned global_seq = get_global_seq(con->msgr, 0); 703 int proto; 704 705 switch (con->peer_name.type) { 706 case CEPH_ENTITY_TYPE_MON: 707 proto = CEPH_MONC_PROTOCOL; 708 break; 709 case CEPH_ENTITY_TYPE_OSD: 710 proto = CEPH_OSDC_PROTOCOL; 711 break; 712 case CEPH_ENTITY_TYPE_MDS: 713 proto = CEPH_MDSC_PROTOCOL; 714 break; 715 default: 716 BUG(); 717 } 718 719 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, 720 con->connect_seq, global_seq, proto); 721 722 con->out_connect.features = cpu_to_le64(msgr->supported_features); 723 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); 724 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); 725 con->out_connect.global_seq = cpu_to_le32(global_seq); 726 con->out_connect.protocol_version = cpu_to_le32(proto); 727 con->out_connect.flags = 0; 728 729 if (include_banner) 730 prepare_write_banner(msgr, con); 731 else 732 ceph_con_out_kvec_reset(con); 733 ceph_con_out_kvec_add(con, sizeof (con->out_connect), &con->out_connect); 734 735 con->out_more = 0; 736 set_bit(WRITE_PENDING, &con->state); 737 738 return prepare_connect_authorizer(con); 739 } 740 741 /* 742 * write as much of pending kvecs to the socket as we can. 743 * 1 -> done 744 * 0 -> socket full, but more to do 745 * <0 -> error 746 */ 747 static int write_partial_kvec(struct ceph_connection *con) 748 { 749 int ret; 750 751 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes); 752 while (con->out_kvec_bytes > 0) { 753 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur, 754 con->out_kvec_left, con->out_kvec_bytes, 755 con->out_more); 756 if (ret <= 0) 757 goto out; 758 con->out_kvec_bytes -= ret; 759 if (con->out_kvec_bytes == 0) 760 break; /* done */ 761 762 /* account for full iov entries consumed */ 763 while (ret >= con->out_kvec_cur->iov_len) { 764 BUG_ON(!con->out_kvec_left); 765 ret -= con->out_kvec_cur->iov_len; 766 con->out_kvec_cur++; 767 con->out_kvec_left--; 768 } 769 /* and for a partially-consumed entry */ 770 if (ret) { 771 con->out_kvec_cur->iov_len -= ret; 772 con->out_kvec_cur->iov_base += ret; 773 } 774 } 775 con->out_kvec_left = 0; 776 con->out_kvec_is_msg = false; 777 ret = 1; 778 out: 779 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con, 780 con->out_kvec_bytes, con->out_kvec_left, ret); 781 return ret; /* done! */ 782 } 783 784 #ifdef CONFIG_BLOCK 785 static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg) 786 { 787 if (!bio) { 788 *iter = NULL; 789 *seg = 0; 790 return; 791 } 792 *iter = bio; 793 *seg = bio->bi_idx; 794 } 795 796 static void iter_bio_next(struct bio **bio_iter, int *seg) 797 { 798 if (*bio_iter == NULL) 799 return; 800 801 BUG_ON(*seg >= (*bio_iter)->bi_vcnt); 802 803 (*seg)++; 804 if (*seg == (*bio_iter)->bi_vcnt) 805 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg); 806 } 807 #endif 808 809 /* 810 * Write as much message data payload as we can. If we finish, queue 811 * up the footer. 812 * 1 -> done, footer is now queued in out_kvec[]. 813 * 0 -> socket full, but more to do 814 * <0 -> error 815 */ 816 static int write_partial_msg_pages(struct ceph_connection *con) 817 { 818 struct ceph_msg *msg = con->out_msg; 819 unsigned data_len = le32_to_cpu(msg->hdr.data_len); 820 size_t len; 821 bool do_datacrc = !con->msgr->nocrc; 822 int ret; 823 int total_max_write; 824 int in_trail = 0; 825 size_t trail_len = (msg->trail ? msg->trail->length : 0); 826 827 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n", 828 con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages, 829 con->out_msg_pos.page_pos); 830 831 #ifdef CONFIG_BLOCK 832 if (msg->bio && !msg->bio_iter) 833 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg); 834 #endif 835 836 while (data_len > con->out_msg_pos.data_pos) { 837 struct page *page = NULL; 838 int max_write = PAGE_SIZE; 839 int bio_offset = 0; 840 841 total_max_write = data_len - trail_len - 842 con->out_msg_pos.data_pos; 843 844 /* 845 * if we are calculating the data crc (the default), we need 846 * to map the page. if our pages[] has been revoked, use the 847 * zero page. 848 */ 849 850 /* have we reached the trail part of the data? */ 851 if (con->out_msg_pos.data_pos >= data_len - trail_len) { 852 in_trail = 1; 853 854 total_max_write = data_len - con->out_msg_pos.data_pos; 855 856 page = list_first_entry(&msg->trail->head, 857 struct page, lru); 858 max_write = PAGE_SIZE; 859 } else if (msg->pages) { 860 page = msg->pages[con->out_msg_pos.page]; 861 } else if (msg->pagelist) { 862 page = list_first_entry(&msg->pagelist->head, 863 struct page, lru); 864 #ifdef CONFIG_BLOCK 865 } else if (msg->bio) { 866 struct bio_vec *bv; 867 868 bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg); 869 page = bv->bv_page; 870 bio_offset = bv->bv_offset; 871 max_write = bv->bv_len; 872 #endif 873 } else { 874 page = zero_page; 875 } 876 len = min_t(int, max_write - con->out_msg_pos.page_pos, 877 total_max_write); 878 879 if (do_datacrc && !con->out_msg_pos.did_page_crc) { 880 void *base; 881 u32 crc; 882 u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc); 883 char *kaddr; 884 885 kaddr = kmap(page); 886 BUG_ON(kaddr == NULL); 887 base = kaddr + con->out_msg_pos.page_pos + bio_offset; 888 crc = crc32c(tmpcrc, base, len); 889 con->out_msg->footer.data_crc = cpu_to_le32(crc); 890 con->out_msg_pos.did_page_crc = true; 891 } 892 ret = ceph_tcp_sendpage(con->sock, page, 893 con->out_msg_pos.page_pos + bio_offset, 894 len, 1); 895 896 if (do_datacrc) 897 kunmap(page); 898 899 if (ret <= 0) 900 goto out; 901 902 con->out_msg_pos.data_pos += ret; 903 con->out_msg_pos.page_pos += ret; 904 if (ret == len) { 905 con->out_msg_pos.page_pos = 0; 906 con->out_msg_pos.page++; 907 con->out_msg_pos.did_page_crc = false; 908 if (in_trail) 909 list_move_tail(&page->lru, 910 &msg->trail->head); 911 else if (msg->pagelist) 912 list_move_tail(&page->lru, 913 &msg->pagelist->head); 914 #ifdef CONFIG_BLOCK 915 else if (msg->bio) 916 iter_bio_next(&msg->bio_iter, &msg->bio_seg); 917 #endif 918 } 919 } 920 921 dout("write_partial_msg_pages %p msg %p done\n", con, msg); 922 923 /* prepare and queue up footer, too */ 924 if (!do_datacrc) 925 con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; 926 ceph_con_out_kvec_reset(con); 927 prepare_write_message_footer(con); 928 ret = 1; 929 out: 930 return ret; 931 } 932 933 /* 934 * write some zeros 935 */ 936 static int write_partial_skip(struct ceph_connection *con) 937 { 938 int ret; 939 940 while (con->out_skip > 0) { 941 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE); 942 943 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, 1); 944 if (ret <= 0) 945 goto out; 946 con->out_skip -= ret; 947 } 948 ret = 1; 949 out: 950 return ret; 951 } 952 953 /* 954 * Prepare to read connection handshake, or an ack. 955 */ 956 static void prepare_read_banner(struct ceph_connection *con) 957 { 958 dout("prepare_read_banner %p\n", con); 959 con->in_base_pos = 0; 960 } 961 962 static void prepare_read_connect(struct ceph_connection *con) 963 { 964 dout("prepare_read_connect %p\n", con); 965 con->in_base_pos = 0; 966 } 967 968 static void prepare_read_ack(struct ceph_connection *con) 969 { 970 dout("prepare_read_ack %p\n", con); 971 con->in_base_pos = 0; 972 } 973 974 static void prepare_read_tag(struct ceph_connection *con) 975 { 976 dout("prepare_read_tag %p\n", con); 977 con->in_base_pos = 0; 978 con->in_tag = CEPH_MSGR_TAG_READY; 979 } 980 981 /* 982 * Prepare to read a message. 983 */ 984 static int prepare_read_message(struct ceph_connection *con) 985 { 986 dout("prepare_read_message %p\n", con); 987 BUG_ON(con->in_msg != NULL); 988 con->in_base_pos = 0; 989 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0; 990 return 0; 991 } 992 993 994 static int read_partial(struct ceph_connection *con, 995 int *to, int size, void *object) 996 { 997 *to += size; 998 while (con->in_base_pos < *to) { 999 int left = *to - con->in_base_pos; 1000 int have = size - left; 1001 int ret = ceph_tcp_recvmsg(con->sock, object + have, left); 1002 if (ret <= 0) 1003 return ret; 1004 con->in_base_pos += ret; 1005 } 1006 return 1; 1007 } 1008 1009 1010 /* 1011 * Read all or part of the connect-side handshake on a new connection 1012 */ 1013 static int read_partial_banner(struct ceph_connection *con) 1014 { 1015 int ret, to = 0; 1016 1017 dout("read_partial_banner %p at %d\n", con, con->in_base_pos); 1018 1019 /* peer's banner */ 1020 ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner); 1021 if (ret <= 0) 1022 goto out; 1023 ret = read_partial(con, &to, sizeof(con->actual_peer_addr), 1024 &con->actual_peer_addr); 1025 if (ret <= 0) 1026 goto out; 1027 ret = read_partial(con, &to, sizeof(con->peer_addr_for_me), 1028 &con->peer_addr_for_me); 1029 if (ret <= 0) 1030 goto out; 1031 out: 1032 return ret; 1033 } 1034 1035 static int read_partial_connect(struct ceph_connection *con) 1036 { 1037 int ret, to = 0; 1038 1039 dout("read_partial_connect %p at %d\n", con, con->in_base_pos); 1040 1041 ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply); 1042 if (ret <= 0) 1043 goto out; 1044 ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len), 1045 con->auth_reply_buf); 1046 if (ret <= 0) 1047 goto out; 1048 1049 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n", 1050 con, (int)con->in_reply.tag, 1051 le32_to_cpu(con->in_reply.connect_seq), 1052 le32_to_cpu(con->in_reply.global_seq)); 1053 out: 1054 return ret; 1055 1056 } 1057 1058 /* 1059 * Verify the hello banner looks okay. 1060 */ 1061 static int verify_hello(struct ceph_connection *con) 1062 { 1063 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { 1064 pr_err("connect to %s got bad banner\n", 1065 ceph_pr_addr(&con->peer_addr.in_addr)); 1066 con->error_msg = "protocol error, bad banner"; 1067 return -1; 1068 } 1069 return 0; 1070 } 1071 1072 static bool addr_is_blank(struct sockaddr_storage *ss) 1073 { 1074 switch (ss->ss_family) { 1075 case AF_INET: 1076 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0; 1077 case AF_INET6: 1078 return 1079 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 && 1080 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 && 1081 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 && 1082 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0; 1083 } 1084 return false; 1085 } 1086 1087 static int addr_port(struct sockaddr_storage *ss) 1088 { 1089 switch (ss->ss_family) { 1090 case AF_INET: 1091 return ntohs(((struct sockaddr_in *)ss)->sin_port); 1092 case AF_INET6: 1093 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port); 1094 } 1095 return 0; 1096 } 1097 1098 static void addr_set_port(struct sockaddr_storage *ss, int p) 1099 { 1100 switch (ss->ss_family) { 1101 case AF_INET: 1102 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1103 break; 1104 case AF_INET6: 1105 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1106 break; 1107 } 1108 } 1109 1110 /* 1111 * Unlike other *_pton function semantics, zero indicates success. 1112 */ 1113 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss, 1114 char delim, const char **ipend) 1115 { 1116 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 1117 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 1118 1119 memset(ss, 0, sizeof(*ss)); 1120 1121 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) { 1122 ss->ss_family = AF_INET; 1123 return 0; 1124 } 1125 1126 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) { 1127 ss->ss_family = AF_INET6; 1128 return 0; 1129 } 1130 1131 return -EINVAL; 1132 } 1133 1134 /* 1135 * Extract hostname string and resolve using kernel DNS facility. 1136 */ 1137 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER 1138 static int ceph_dns_resolve_name(const char *name, size_t namelen, 1139 struct sockaddr_storage *ss, char delim, const char **ipend) 1140 { 1141 const char *end, *delim_p; 1142 char *colon_p, *ip_addr = NULL; 1143 int ip_len, ret; 1144 1145 /* 1146 * The end of the hostname occurs immediately preceding the delimiter or 1147 * the port marker (':') where the delimiter takes precedence. 1148 */ 1149 delim_p = memchr(name, delim, namelen); 1150 colon_p = memchr(name, ':', namelen); 1151 1152 if (delim_p && colon_p) 1153 end = delim_p < colon_p ? delim_p : colon_p; 1154 else if (!delim_p && colon_p) 1155 end = colon_p; 1156 else { 1157 end = delim_p; 1158 if (!end) /* case: hostname:/ */ 1159 end = name + namelen; 1160 } 1161 1162 if (end <= name) 1163 return -EINVAL; 1164 1165 /* do dns_resolve upcall */ 1166 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL); 1167 if (ip_len > 0) 1168 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL); 1169 else 1170 ret = -ESRCH; 1171 1172 kfree(ip_addr); 1173 1174 *ipend = end; 1175 1176 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name, 1177 ret, ret ? "failed" : ceph_pr_addr(ss)); 1178 1179 return ret; 1180 } 1181 #else 1182 static inline int ceph_dns_resolve_name(const char *name, size_t namelen, 1183 struct sockaddr_storage *ss, char delim, const char **ipend) 1184 { 1185 return -EINVAL; 1186 } 1187 #endif 1188 1189 /* 1190 * Parse a server name (IP or hostname). If a valid IP address is not found 1191 * then try to extract a hostname to resolve using userspace DNS upcall. 1192 */ 1193 static int ceph_parse_server_name(const char *name, size_t namelen, 1194 struct sockaddr_storage *ss, char delim, const char **ipend) 1195 { 1196 int ret; 1197 1198 ret = ceph_pton(name, namelen, ss, delim, ipend); 1199 if (ret) 1200 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend); 1201 1202 return ret; 1203 } 1204 1205 /* 1206 * Parse an ip[:port] list into an addr array. Use the default 1207 * monitor port if a port isn't specified. 1208 */ 1209 int ceph_parse_ips(const char *c, const char *end, 1210 struct ceph_entity_addr *addr, 1211 int max_count, int *count) 1212 { 1213 int i, ret = -EINVAL; 1214 const char *p = c; 1215 1216 dout("parse_ips on '%.*s'\n", (int)(end-c), c); 1217 for (i = 0; i < max_count; i++) { 1218 const char *ipend; 1219 struct sockaddr_storage *ss = &addr[i].in_addr; 1220 int port; 1221 char delim = ','; 1222 1223 if (*p == '[') { 1224 delim = ']'; 1225 p++; 1226 } 1227 1228 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend); 1229 if (ret) 1230 goto bad; 1231 ret = -EINVAL; 1232 1233 p = ipend; 1234 1235 if (delim == ']') { 1236 if (*p != ']') { 1237 dout("missing matching ']'\n"); 1238 goto bad; 1239 } 1240 p++; 1241 } 1242 1243 /* port? */ 1244 if (p < end && *p == ':') { 1245 port = 0; 1246 p++; 1247 while (p < end && *p >= '0' && *p <= '9') { 1248 port = (port * 10) + (*p - '0'); 1249 p++; 1250 } 1251 if (port > 65535 || port == 0) 1252 goto bad; 1253 } else { 1254 port = CEPH_MON_PORT; 1255 } 1256 1257 addr_set_port(ss, port); 1258 1259 dout("parse_ips got %s\n", ceph_pr_addr(ss)); 1260 1261 if (p == end) 1262 break; 1263 if (*p != ',') 1264 goto bad; 1265 p++; 1266 } 1267 1268 if (p != end) 1269 goto bad; 1270 1271 if (count) 1272 *count = i + 1; 1273 return 0; 1274 1275 bad: 1276 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 1277 return ret; 1278 } 1279 EXPORT_SYMBOL(ceph_parse_ips); 1280 1281 static int process_banner(struct ceph_connection *con) 1282 { 1283 dout("process_banner on %p\n", con); 1284 1285 if (verify_hello(con) < 0) 1286 return -1; 1287 1288 ceph_decode_addr(&con->actual_peer_addr); 1289 ceph_decode_addr(&con->peer_addr_for_me); 1290 1291 /* 1292 * Make sure the other end is who we wanted. note that the other 1293 * end may not yet know their ip address, so if it's 0.0.0.0, give 1294 * them the benefit of the doubt. 1295 */ 1296 if (memcmp(&con->peer_addr, &con->actual_peer_addr, 1297 sizeof(con->peer_addr)) != 0 && 1298 !(addr_is_blank(&con->actual_peer_addr.in_addr) && 1299 con->actual_peer_addr.nonce == con->peer_addr.nonce)) { 1300 pr_warning("wrong peer, want %s/%d, got %s/%d\n", 1301 ceph_pr_addr(&con->peer_addr.in_addr), 1302 (int)le32_to_cpu(con->peer_addr.nonce), 1303 ceph_pr_addr(&con->actual_peer_addr.in_addr), 1304 (int)le32_to_cpu(con->actual_peer_addr.nonce)); 1305 con->error_msg = "wrong peer at address"; 1306 return -1; 1307 } 1308 1309 /* 1310 * did we learn our address? 1311 */ 1312 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) { 1313 int port = addr_port(&con->msgr->inst.addr.in_addr); 1314 1315 memcpy(&con->msgr->inst.addr.in_addr, 1316 &con->peer_addr_for_me.in_addr, 1317 sizeof(con->peer_addr_for_me.in_addr)); 1318 addr_set_port(&con->msgr->inst.addr.in_addr, port); 1319 encode_my_addr(con->msgr); 1320 dout("process_banner learned my addr is %s\n", 1321 ceph_pr_addr(&con->msgr->inst.addr.in_addr)); 1322 } 1323 1324 set_bit(NEGOTIATING, &con->state); 1325 prepare_read_connect(con); 1326 return 0; 1327 } 1328 1329 static void fail_protocol(struct ceph_connection *con) 1330 { 1331 reset_connection(con); 1332 set_bit(CLOSED, &con->state); /* in case there's queued work */ 1333 1334 mutex_unlock(&con->mutex); 1335 if (con->ops->bad_proto) 1336 con->ops->bad_proto(con); 1337 mutex_lock(&con->mutex); 1338 } 1339 1340 static int process_connect(struct ceph_connection *con) 1341 { 1342 u64 sup_feat = con->msgr->supported_features; 1343 u64 req_feat = con->msgr->required_features; 1344 u64 server_feat = le64_to_cpu(con->in_reply.features); 1345 int ret; 1346 1347 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 1348 1349 switch (con->in_reply.tag) { 1350 case CEPH_MSGR_TAG_FEATURES: 1351 pr_err("%s%lld %s feature set mismatch," 1352 " my %llx < server's %llx, missing %llx\n", 1353 ENTITY_NAME(con->peer_name), 1354 ceph_pr_addr(&con->peer_addr.in_addr), 1355 sup_feat, server_feat, server_feat & ~sup_feat); 1356 con->error_msg = "missing required protocol features"; 1357 fail_protocol(con); 1358 return -1; 1359 1360 case CEPH_MSGR_TAG_BADPROTOVER: 1361 pr_err("%s%lld %s protocol version mismatch," 1362 " my %d != server's %d\n", 1363 ENTITY_NAME(con->peer_name), 1364 ceph_pr_addr(&con->peer_addr.in_addr), 1365 le32_to_cpu(con->out_connect.protocol_version), 1366 le32_to_cpu(con->in_reply.protocol_version)); 1367 con->error_msg = "protocol version mismatch"; 1368 fail_protocol(con); 1369 return -1; 1370 1371 case CEPH_MSGR_TAG_BADAUTHORIZER: 1372 con->auth_retry++; 1373 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con, 1374 con->auth_retry); 1375 if (con->auth_retry == 2) { 1376 con->error_msg = "connect authorization failure"; 1377 return -1; 1378 } 1379 con->auth_retry = 1; 1380 ret = prepare_write_connect(con->msgr, con, 0); 1381 if (ret < 0) 1382 return ret; 1383 prepare_read_connect(con); 1384 break; 1385 1386 case CEPH_MSGR_TAG_RESETSESSION: 1387 /* 1388 * If we connected with a large connect_seq but the peer 1389 * has no record of a session with us (no connection, or 1390 * connect_seq == 0), they will send RESETSESION to indicate 1391 * that they must have reset their session, and may have 1392 * dropped messages. 1393 */ 1394 dout("process_connect got RESET peer seq %u\n", 1395 le32_to_cpu(con->in_connect.connect_seq)); 1396 pr_err("%s%lld %s connection reset\n", 1397 ENTITY_NAME(con->peer_name), 1398 ceph_pr_addr(&con->peer_addr.in_addr)); 1399 reset_connection(con); 1400 prepare_write_connect(con->msgr, con, 0); 1401 prepare_read_connect(con); 1402 1403 /* Tell ceph about it. */ 1404 mutex_unlock(&con->mutex); 1405 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name)); 1406 if (con->ops->peer_reset) 1407 con->ops->peer_reset(con); 1408 mutex_lock(&con->mutex); 1409 if (test_bit(CLOSED, &con->state) || 1410 test_bit(OPENING, &con->state)) 1411 return -EAGAIN; 1412 break; 1413 1414 case CEPH_MSGR_TAG_RETRY_SESSION: 1415 /* 1416 * If we sent a smaller connect_seq than the peer has, try 1417 * again with a larger value. 1418 */ 1419 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n", 1420 le32_to_cpu(con->out_connect.connect_seq), 1421 le32_to_cpu(con->in_connect.connect_seq)); 1422 con->connect_seq = le32_to_cpu(con->in_connect.connect_seq); 1423 prepare_write_connect(con->msgr, con, 0); 1424 prepare_read_connect(con); 1425 break; 1426 1427 case CEPH_MSGR_TAG_RETRY_GLOBAL: 1428 /* 1429 * If we sent a smaller global_seq than the peer has, try 1430 * again with a larger value. 1431 */ 1432 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", 1433 con->peer_global_seq, 1434 le32_to_cpu(con->in_connect.global_seq)); 1435 get_global_seq(con->msgr, 1436 le32_to_cpu(con->in_connect.global_seq)); 1437 prepare_write_connect(con->msgr, con, 0); 1438 prepare_read_connect(con); 1439 break; 1440 1441 case CEPH_MSGR_TAG_READY: 1442 if (req_feat & ~server_feat) { 1443 pr_err("%s%lld %s protocol feature mismatch," 1444 " my required %llx > server's %llx, need %llx\n", 1445 ENTITY_NAME(con->peer_name), 1446 ceph_pr_addr(&con->peer_addr.in_addr), 1447 req_feat, server_feat, req_feat & ~server_feat); 1448 con->error_msg = "missing required protocol features"; 1449 fail_protocol(con); 1450 return -1; 1451 } 1452 clear_bit(CONNECTING, &con->state); 1453 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 1454 con->connect_seq++; 1455 con->peer_features = server_feat; 1456 dout("process_connect got READY gseq %d cseq %d (%d)\n", 1457 con->peer_global_seq, 1458 le32_to_cpu(con->in_reply.connect_seq), 1459 con->connect_seq); 1460 WARN_ON(con->connect_seq != 1461 le32_to_cpu(con->in_reply.connect_seq)); 1462 1463 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) 1464 set_bit(LOSSYTX, &con->state); 1465 1466 prepare_read_tag(con); 1467 break; 1468 1469 case CEPH_MSGR_TAG_WAIT: 1470 /* 1471 * If there is a connection race (we are opening 1472 * connections to each other), one of us may just have 1473 * to WAIT. This shouldn't happen if we are the 1474 * client. 1475 */ 1476 pr_err("process_connect got WAIT as client\n"); 1477 con->error_msg = "protocol error, got WAIT as client"; 1478 return -1; 1479 1480 default: 1481 pr_err("connect protocol error, will retry\n"); 1482 con->error_msg = "protocol error, garbage tag during connect"; 1483 return -1; 1484 } 1485 return 0; 1486 } 1487 1488 1489 /* 1490 * read (part of) an ack 1491 */ 1492 static int read_partial_ack(struct ceph_connection *con) 1493 { 1494 int to = 0; 1495 1496 return read_partial(con, &to, sizeof(con->in_temp_ack), 1497 &con->in_temp_ack); 1498 } 1499 1500 1501 /* 1502 * We can finally discard anything that's been acked. 1503 */ 1504 static void process_ack(struct ceph_connection *con) 1505 { 1506 struct ceph_msg *m; 1507 u64 ack = le64_to_cpu(con->in_temp_ack); 1508 u64 seq; 1509 1510 while (!list_empty(&con->out_sent)) { 1511 m = list_first_entry(&con->out_sent, struct ceph_msg, 1512 list_head); 1513 seq = le64_to_cpu(m->hdr.seq); 1514 if (seq > ack) 1515 break; 1516 dout("got ack for seq %llu type %d at %p\n", seq, 1517 le16_to_cpu(m->hdr.type), m); 1518 m->ack_stamp = jiffies; 1519 ceph_msg_remove(m); 1520 } 1521 prepare_read_tag(con); 1522 } 1523 1524 1525 1526 1527 static int read_partial_message_section(struct ceph_connection *con, 1528 struct kvec *section, 1529 unsigned int sec_len, u32 *crc) 1530 { 1531 int ret, left; 1532 1533 BUG_ON(!section); 1534 1535 while (section->iov_len < sec_len) { 1536 BUG_ON(section->iov_base == NULL); 1537 left = sec_len - section->iov_len; 1538 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base + 1539 section->iov_len, left); 1540 if (ret <= 0) 1541 return ret; 1542 section->iov_len += ret; 1543 } 1544 if (section->iov_len == sec_len) 1545 *crc = crc32c(0, section->iov_base, section->iov_len); 1546 1547 return 1; 1548 } 1549 1550 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, 1551 struct ceph_msg_header *hdr, 1552 int *skip); 1553 1554 1555 static int read_partial_message_pages(struct ceph_connection *con, 1556 struct page **pages, 1557 unsigned data_len, bool do_datacrc) 1558 { 1559 void *p; 1560 int ret; 1561 int left; 1562 1563 left = min((int)(data_len - con->in_msg_pos.data_pos), 1564 (int)(PAGE_SIZE - con->in_msg_pos.page_pos)); 1565 /* (page) data */ 1566 BUG_ON(pages == NULL); 1567 p = kmap(pages[con->in_msg_pos.page]); 1568 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, 1569 left); 1570 if (ret > 0 && do_datacrc) 1571 con->in_data_crc = 1572 crc32c(con->in_data_crc, 1573 p + con->in_msg_pos.page_pos, ret); 1574 kunmap(pages[con->in_msg_pos.page]); 1575 if (ret <= 0) 1576 return ret; 1577 con->in_msg_pos.data_pos += ret; 1578 con->in_msg_pos.page_pos += ret; 1579 if (con->in_msg_pos.page_pos == PAGE_SIZE) { 1580 con->in_msg_pos.page_pos = 0; 1581 con->in_msg_pos.page++; 1582 } 1583 1584 return ret; 1585 } 1586 1587 #ifdef CONFIG_BLOCK 1588 static int read_partial_message_bio(struct ceph_connection *con, 1589 struct bio **bio_iter, int *bio_seg, 1590 unsigned data_len, bool do_datacrc) 1591 { 1592 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg); 1593 void *p; 1594 int ret, left; 1595 1596 if (IS_ERR(bv)) 1597 return PTR_ERR(bv); 1598 1599 left = min((int)(data_len - con->in_msg_pos.data_pos), 1600 (int)(bv->bv_len - con->in_msg_pos.page_pos)); 1601 1602 p = kmap(bv->bv_page) + bv->bv_offset; 1603 1604 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, 1605 left); 1606 if (ret > 0 && do_datacrc) 1607 con->in_data_crc = 1608 crc32c(con->in_data_crc, 1609 p + con->in_msg_pos.page_pos, ret); 1610 kunmap(bv->bv_page); 1611 if (ret <= 0) 1612 return ret; 1613 con->in_msg_pos.data_pos += ret; 1614 con->in_msg_pos.page_pos += ret; 1615 if (con->in_msg_pos.page_pos == bv->bv_len) { 1616 con->in_msg_pos.page_pos = 0; 1617 iter_bio_next(bio_iter, bio_seg); 1618 } 1619 1620 return ret; 1621 } 1622 #endif 1623 1624 /* 1625 * read (part of) a message. 1626 */ 1627 static int read_partial_message(struct ceph_connection *con) 1628 { 1629 struct ceph_msg *m = con->in_msg; 1630 int ret; 1631 int to, left; 1632 unsigned front_len, middle_len, data_len; 1633 bool do_datacrc = !con->msgr->nocrc; 1634 int skip; 1635 u64 seq; 1636 u32 crc; 1637 1638 dout("read_partial_message con %p msg %p\n", con, m); 1639 1640 /* header */ 1641 while (con->in_base_pos < sizeof(con->in_hdr)) { 1642 left = sizeof(con->in_hdr) - con->in_base_pos; 1643 ret = ceph_tcp_recvmsg(con->sock, 1644 (char *)&con->in_hdr + con->in_base_pos, 1645 left); 1646 if (ret <= 0) 1647 return ret; 1648 con->in_base_pos += ret; 1649 } 1650 1651 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc)); 1652 if (cpu_to_le32(crc) != con->in_hdr.crc) { 1653 pr_err("read_partial_message bad hdr " 1654 " crc %u != expected %u\n", 1655 crc, con->in_hdr.crc); 1656 return -EBADMSG; 1657 } 1658 1659 front_len = le32_to_cpu(con->in_hdr.front_len); 1660 if (front_len > CEPH_MSG_MAX_FRONT_LEN) 1661 return -EIO; 1662 middle_len = le32_to_cpu(con->in_hdr.middle_len); 1663 if (middle_len > CEPH_MSG_MAX_DATA_LEN) 1664 return -EIO; 1665 data_len = le32_to_cpu(con->in_hdr.data_len); 1666 if (data_len > CEPH_MSG_MAX_DATA_LEN) 1667 return -EIO; 1668 1669 /* verify seq# */ 1670 seq = le64_to_cpu(con->in_hdr.seq); 1671 if ((s64)seq - (s64)con->in_seq < 1) { 1672 pr_info("skipping %s%lld %s seq %lld expected %lld\n", 1673 ENTITY_NAME(con->peer_name), 1674 ceph_pr_addr(&con->peer_addr.in_addr), 1675 seq, con->in_seq + 1); 1676 con->in_base_pos = -front_len - middle_len - data_len - 1677 sizeof(m->footer); 1678 con->in_tag = CEPH_MSGR_TAG_READY; 1679 return 0; 1680 } else if ((s64)seq - (s64)con->in_seq > 1) { 1681 pr_err("read_partial_message bad seq %lld expected %lld\n", 1682 seq, con->in_seq + 1); 1683 con->error_msg = "bad message sequence # for incoming message"; 1684 return -EBADMSG; 1685 } 1686 1687 /* allocate message? */ 1688 if (!con->in_msg) { 1689 dout("got hdr type %d front %d data %d\n", con->in_hdr.type, 1690 con->in_hdr.front_len, con->in_hdr.data_len); 1691 skip = 0; 1692 con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip); 1693 if (skip) { 1694 /* skip this message */ 1695 dout("alloc_msg said skip message\n"); 1696 BUG_ON(con->in_msg); 1697 con->in_base_pos = -front_len - middle_len - data_len - 1698 sizeof(m->footer); 1699 con->in_tag = CEPH_MSGR_TAG_READY; 1700 con->in_seq++; 1701 return 0; 1702 } 1703 if (!con->in_msg) { 1704 con->error_msg = 1705 "error allocating memory for incoming message"; 1706 return -ENOMEM; 1707 } 1708 m = con->in_msg; 1709 m->front.iov_len = 0; /* haven't read it yet */ 1710 if (m->middle) 1711 m->middle->vec.iov_len = 0; 1712 1713 con->in_msg_pos.page = 0; 1714 if (m->pages) 1715 con->in_msg_pos.page_pos = m->page_alignment; 1716 else 1717 con->in_msg_pos.page_pos = 0; 1718 con->in_msg_pos.data_pos = 0; 1719 } 1720 1721 /* front */ 1722 ret = read_partial_message_section(con, &m->front, front_len, 1723 &con->in_front_crc); 1724 if (ret <= 0) 1725 return ret; 1726 1727 /* middle */ 1728 if (m->middle) { 1729 ret = read_partial_message_section(con, &m->middle->vec, 1730 middle_len, 1731 &con->in_middle_crc); 1732 if (ret <= 0) 1733 return ret; 1734 } 1735 #ifdef CONFIG_BLOCK 1736 if (m->bio && !m->bio_iter) 1737 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg); 1738 #endif 1739 1740 /* (page) data */ 1741 while (con->in_msg_pos.data_pos < data_len) { 1742 if (m->pages) { 1743 ret = read_partial_message_pages(con, m->pages, 1744 data_len, do_datacrc); 1745 if (ret <= 0) 1746 return ret; 1747 #ifdef CONFIG_BLOCK 1748 } else if (m->bio) { 1749 1750 ret = read_partial_message_bio(con, 1751 &m->bio_iter, &m->bio_seg, 1752 data_len, do_datacrc); 1753 if (ret <= 0) 1754 return ret; 1755 #endif 1756 } else { 1757 BUG_ON(1); 1758 } 1759 } 1760 1761 /* footer */ 1762 to = sizeof(m->hdr) + sizeof(m->footer); 1763 while (con->in_base_pos < to) { 1764 left = to - con->in_base_pos; 1765 ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer + 1766 (con->in_base_pos - sizeof(m->hdr)), 1767 left); 1768 if (ret <= 0) 1769 return ret; 1770 con->in_base_pos += ret; 1771 } 1772 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", 1773 m, front_len, m->footer.front_crc, middle_len, 1774 m->footer.middle_crc, data_len, m->footer.data_crc); 1775 1776 /* crc ok? */ 1777 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) { 1778 pr_err("read_partial_message %p front crc %u != exp. %u\n", 1779 m, con->in_front_crc, m->footer.front_crc); 1780 return -EBADMSG; 1781 } 1782 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) { 1783 pr_err("read_partial_message %p middle crc %u != exp %u\n", 1784 m, con->in_middle_crc, m->footer.middle_crc); 1785 return -EBADMSG; 1786 } 1787 if (do_datacrc && 1788 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 && 1789 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) { 1790 pr_err("read_partial_message %p data crc %u != exp. %u\n", m, 1791 con->in_data_crc, le32_to_cpu(m->footer.data_crc)); 1792 return -EBADMSG; 1793 } 1794 1795 return 1; /* done! */ 1796 } 1797 1798 /* 1799 * Process message. This happens in the worker thread. The callback should 1800 * be careful not to do anything that waits on other incoming messages or it 1801 * may deadlock. 1802 */ 1803 static void process_message(struct ceph_connection *con) 1804 { 1805 struct ceph_msg *msg; 1806 1807 msg = con->in_msg; 1808 con->in_msg = NULL; 1809 1810 /* if first message, set peer_name */ 1811 if (con->peer_name.type == 0) 1812 con->peer_name = msg->hdr.src; 1813 1814 con->in_seq++; 1815 mutex_unlock(&con->mutex); 1816 1817 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n", 1818 msg, le64_to_cpu(msg->hdr.seq), 1819 ENTITY_NAME(msg->hdr.src), 1820 le16_to_cpu(msg->hdr.type), 1821 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 1822 le32_to_cpu(msg->hdr.front_len), 1823 le32_to_cpu(msg->hdr.data_len), 1824 con->in_front_crc, con->in_middle_crc, con->in_data_crc); 1825 con->ops->dispatch(con, msg); 1826 1827 mutex_lock(&con->mutex); 1828 prepare_read_tag(con); 1829 } 1830 1831 1832 /* 1833 * Write something to the socket. Called in a worker thread when the 1834 * socket appears to be writeable and we have something ready to send. 1835 */ 1836 static int try_write(struct ceph_connection *con) 1837 { 1838 struct ceph_messenger *msgr = con->msgr; 1839 int ret = 1; 1840 1841 dout("try_write start %p state %lu nref %d\n", con, con->state, 1842 atomic_read(&con->nref)); 1843 1844 more: 1845 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 1846 1847 /* open the socket first? */ 1848 if (con->sock == NULL) { 1849 prepare_write_connect(msgr, con, 1); 1850 prepare_read_banner(con); 1851 set_bit(CONNECTING, &con->state); 1852 clear_bit(NEGOTIATING, &con->state); 1853 1854 BUG_ON(con->in_msg); 1855 con->in_tag = CEPH_MSGR_TAG_READY; 1856 dout("try_write initiating connect on %p new state %lu\n", 1857 con, con->state); 1858 ret = ceph_tcp_connect(con); 1859 if (ret < 0) { 1860 con->error_msg = "connect error"; 1861 goto out; 1862 } 1863 } 1864 1865 more_kvec: 1866 /* kvec data queued? */ 1867 if (con->out_skip) { 1868 ret = write_partial_skip(con); 1869 if (ret <= 0) 1870 goto out; 1871 } 1872 if (con->out_kvec_left) { 1873 ret = write_partial_kvec(con); 1874 if (ret <= 0) 1875 goto out; 1876 } 1877 1878 /* msg pages? */ 1879 if (con->out_msg) { 1880 if (con->out_msg_done) { 1881 ceph_msg_put(con->out_msg); 1882 con->out_msg = NULL; /* we're done with this one */ 1883 goto do_next; 1884 } 1885 1886 ret = write_partial_msg_pages(con); 1887 if (ret == 1) 1888 goto more_kvec; /* we need to send the footer, too! */ 1889 if (ret == 0) 1890 goto out; 1891 if (ret < 0) { 1892 dout("try_write write_partial_msg_pages err %d\n", 1893 ret); 1894 goto out; 1895 } 1896 } 1897 1898 do_next: 1899 if (!test_bit(CONNECTING, &con->state)) { 1900 /* is anything else pending? */ 1901 if (!list_empty(&con->out_queue)) { 1902 prepare_write_message(con); 1903 goto more; 1904 } 1905 if (con->in_seq > con->in_seq_acked) { 1906 prepare_write_ack(con); 1907 goto more; 1908 } 1909 if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) { 1910 prepare_write_keepalive(con); 1911 goto more; 1912 } 1913 } 1914 1915 /* Nothing to do! */ 1916 clear_bit(WRITE_PENDING, &con->state); 1917 dout("try_write nothing else to write.\n"); 1918 ret = 0; 1919 out: 1920 dout("try_write done on %p ret %d\n", con, ret); 1921 return ret; 1922 } 1923 1924 1925 1926 /* 1927 * Read what we can from the socket. 1928 */ 1929 static int try_read(struct ceph_connection *con) 1930 { 1931 int ret = -1; 1932 1933 if (!con->sock) 1934 return 0; 1935 1936 if (test_bit(STANDBY, &con->state)) 1937 return 0; 1938 1939 dout("try_read start on %p\n", con); 1940 1941 more: 1942 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 1943 con->in_base_pos); 1944 1945 /* 1946 * process_connect and process_message drop and re-take 1947 * con->mutex. make sure we handle a racing close or reopen. 1948 */ 1949 if (test_bit(CLOSED, &con->state) || 1950 test_bit(OPENING, &con->state)) { 1951 ret = -EAGAIN; 1952 goto out; 1953 } 1954 1955 if (test_bit(CONNECTING, &con->state)) { 1956 if (!test_bit(NEGOTIATING, &con->state)) { 1957 dout("try_read connecting\n"); 1958 ret = read_partial_banner(con); 1959 if (ret <= 0) 1960 goto out; 1961 ret = process_banner(con); 1962 if (ret < 0) 1963 goto out; 1964 } 1965 ret = read_partial_connect(con); 1966 if (ret <= 0) 1967 goto out; 1968 ret = process_connect(con); 1969 if (ret < 0) 1970 goto out; 1971 goto more; 1972 } 1973 1974 if (con->in_base_pos < 0) { 1975 /* 1976 * skipping + discarding content. 1977 * 1978 * FIXME: there must be a better way to do this! 1979 */ 1980 static char buf[SKIP_BUF_SIZE]; 1981 int skip = min((int) sizeof (buf), -con->in_base_pos); 1982 1983 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); 1984 ret = ceph_tcp_recvmsg(con->sock, buf, skip); 1985 if (ret <= 0) 1986 goto out; 1987 con->in_base_pos += ret; 1988 if (con->in_base_pos) 1989 goto more; 1990 } 1991 if (con->in_tag == CEPH_MSGR_TAG_READY) { 1992 /* 1993 * what's next? 1994 */ 1995 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 1996 if (ret <= 0) 1997 goto out; 1998 dout("try_read got tag %d\n", (int)con->in_tag); 1999 switch (con->in_tag) { 2000 case CEPH_MSGR_TAG_MSG: 2001 prepare_read_message(con); 2002 break; 2003 case CEPH_MSGR_TAG_ACK: 2004 prepare_read_ack(con); 2005 break; 2006 case CEPH_MSGR_TAG_CLOSE: 2007 set_bit(CLOSED, &con->state); /* fixme */ 2008 goto out; 2009 default: 2010 goto bad_tag; 2011 } 2012 } 2013 if (con->in_tag == CEPH_MSGR_TAG_MSG) { 2014 ret = read_partial_message(con); 2015 if (ret <= 0) { 2016 switch (ret) { 2017 case -EBADMSG: 2018 con->error_msg = "bad crc"; 2019 ret = -EIO; 2020 break; 2021 case -EIO: 2022 con->error_msg = "io error"; 2023 break; 2024 } 2025 goto out; 2026 } 2027 if (con->in_tag == CEPH_MSGR_TAG_READY) 2028 goto more; 2029 process_message(con); 2030 goto more; 2031 } 2032 if (con->in_tag == CEPH_MSGR_TAG_ACK) { 2033 ret = read_partial_ack(con); 2034 if (ret <= 0) 2035 goto out; 2036 process_ack(con); 2037 goto more; 2038 } 2039 2040 out: 2041 dout("try_read done on %p ret %d\n", con, ret); 2042 return ret; 2043 2044 bad_tag: 2045 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag); 2046 con->error_msg = "protocol error, garbage tag"; 2047 ret = -1; 2048 goto out; 2049 } 2050 2051 2052 /* 2053 * Atomically queue work on a connection. Bump @con reference to 2054 * avoid races with connection teardown. 2055 */ 2056 static void queue_con(struct ceph_connection *con) 2057 { 2058 if (test_bit(DEAD, &con->state)) { 2059 dout("queue_con %p ignoring: DEAD\n", 2060 con); 2061 return; 2062 } 2063 2064 if (!con->ops->get(con)) { 2065 dout("queue_con %p ref count 0\n", con); 2066 return; 2067 } 2068 2069 if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) { 2070 dout("queue_con %p - already queued\n", con); 2071 con->ops->put(con); 2072 } else { 2073 dout("queue_con %p\n", con); 2074 } 2075 } 2076 2077 /* 2078 * Do some work on a connection. Drop a connection ref when we're done. 2079 */ 2080 static void con_work(struct work_struct *work) 2081 { 2082 struct ceph_connection *con = container_of(work, struct ceph_connection, 2083 work.work); 2084 int ret; 2085 2086 mutex_lock(&con->mutex); 2087 restart: 2088 if (test_and_clear_bit(BACKOFF, &con->state)) { 2089 dout("con_work %p backing off\n", con); 2090 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2091 round_jiffies_relative(con->delay))) { 2092 dout("con_work %p backoff %lu\n", con, con->delay); 2093 mutex_unlock(&con->mutex); 2094 return; 2095 } else { 2096 con->ops->put(con); 2097 dout("con_work %p FAILED to back off %lu\n", con, 2098 con->delay); 2099 } 2100 } 2101 2102 if (test_bit(STANDBY, &con->state)) { 2103 dout("con_work %p STANDBY\n", con); 2104 goto done; 2105 } 2106 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ 2107 dout("con_work CLOSED\n"); 2108 con_close_socket(con); 2109 goto done; 2110 } 2111 if (test_and_clear_bit(OPENING, &con->state)) { 2112 /* reopen w/ new peer */ 2113 dout("con_work OPENING\n"); 2114 con_close_socket(con); 2115 } 2116 2117 if (test_and_clear_bit(SOCK_CLOSED, &con->state)) 2118 goto fault; 2119 2120 ret = try_read(con); 2121 if (ret == -EAGAIN) 2122 goto restart; 2123 if (ret < 0) 2124 goto fault; 2125 2126 ret = try_write(con); 2127 if (ret == -EAGAIN) 2128 goto restart; 2129 if (ret < 0) 2130 goto fault; 2131 2132 done: 2133 mutex_unlock(&con->mutex); 2134 done_unlocked: 2135 con->ops->put(con); 2136 return; 2137 2138 fault: 2139 mutex_unlock(&con->mutex); 2140 ceph_fault(con); /* error/fault path */ 2141 goto done_unlocked; 2142 } 2143 2144 2145 /* 2146 * Generic error/fault handler. A retry mechanism is used with 2147 * exponential backoff 2148 */ 2149 static void ceph_fault(struct ceph_connection *con) 2150 { 2151 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2152 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2153 dout("fault %p state %lu to peer %s\n", 2154 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2155 2156 if (test_bit(LOSSYTX, &con->state)) { 2157 dout("fault on LOSSYTX channel\n"); 2158 goto out; 2159 } 2160 2161 mutex_lock(&con->mutex); 2162 if (test_bit(CLOSED, &con->state)) 2163 goto out_unlock; 2164 2165 con_close_socket(con); 2166 2167 if (con->in_msg) { 2168 ceph_msg_put(con->in_msg); 2169 con->in_msg = NULL; 2170 } 2171 2172 /* Requeue anything that hasn't been acked */ 2173 list_splice_init(&con->out_sent, &con->out_queue); 2174 2175 /* If there are no messages queued or keepalive pending, place 2176 * the connection in a STANDBY state */ 2177 if (list_empty(&con->out_queue) && 2178 !test_bit(KEEPALIVE_PENDING, &con->state)) { 2179 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 2180 clear_bit(WRITE_PENDING, &con->state); 2181 set_bit(STANDBY, &con->state); 2182 } else { 2183 /* retry after a delay. */ 2184 if (con->delay == 0) 2185 con->delay = BASE_DELAY_INTERVAL; 2186 else if (con->delay < MAX_DELAY_INTERVAL) 2187 con->delay *= 2; 2188 con->ops->get(con); 2189 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2190 round_jiffies_relative(con->delay))) { 2191 dout("fault queued %p delay %lu\n", con, con->delay); 2192 } else { 2193 con->ops->put(con); 2194 dout("fault failed to queue %p delay %lu, backoff\n", 2195 con, con->delay); 2196 /* 2197 * In many cases we see a socket state change 2198 * while con_work is running and end up 2199 * queuing (non-delayed) work, such that we 2200 * can't backoff with a delay. Set a flag so 2201 * that when con_work restarts we schedule the 2202 * delay then. 2203 */ 2204 set_bit(BACKOFF, &con->state); 2205 } 2206 } 2207 2208 out_unlock: 2209 mutex_unlock(&con->mutex); 2210 out: 2211 /* 2212 * in case we faulted due to authentication, invalidate our 2213 * current tickets so that we can get new ones. 2214 */ 2215 if (con->auth_retry && con->ops->invalidate_authorizer) { 2216 dout("calling invalidate_authorizer()\n"); 2217 con->ops->invalidate_authorizer(con); 2218 } 2219 2220 if (con->ops->fault) 2221 con->ops->fault(con); 2222 } 2223 2224 2225 2226 /* 2227 * create a new messenger instance 2228 */ 2229 struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr, 2230 u32 supported_features, 2231 u32 required_features) 2232 { 2233 struct ceph_messenger *msgr; 2234 2235 msgr = kzalloc(sizeof(*msgr), GFP_KERNEL); 2236 if (msgr == NULL) 2237 return ERR_PTR(-ENOMEM); 2238 2239 msgr->supported_features = supported_features; 2240 msgr->required_features = required_features; 2241 2242 spin_lock_init(&msgr->global_seq_lock); 2243 2244 if (myaddr) 2245 msgr->inst.addr = *myaddr; 2246 2247 /* select a random nonce */ 2248 msgr->inst.addr.type = 0; 2249 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 2250 encode_my_addr(msgr); 2251 2252 dout("messenger_create %p\n", msgr); 2253 return msgr; 2254 } 2255 EXPORT_SYMBOL(ceph_messenger_create); 2256 2257 void ceph_messenger_destroy(struct ceph_messenger *msgr) 2258 { 2259 dout("destroy %p\n", msgr); 2260 kfree(msgr); 2261 dout("destroyed messenger %p\n", msgr); 2262 } 2263 EXPORT_SYMBOL(ceph_messenger_destroy); 2264 2265 static void clear_standby(struct ceph_connection *con) 2266 { 2267 /* come back from STANDBY? */ 2268 if (test_and_clear_bit(STANDBY, &con->state)) { 2269 mutex_lock(&con->mutex); 2270 dout("clear_standby %p and ++connect_seq\n", con); 2271 con->connect_seq++; 2272 WARN_ON(test_bit(WRITE_PENDING, &con->state)); 2273 WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state)); 2274 mutex_unlock(&con->mutex); 2275 } 2276 } 2277 2278 /* 2279 * Queue up an outgoing message on the given connection. 2280 */ 2281 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) 2282 { 2283 if (test_bit(CLOSED, &con->state)) { 2284 dout("con_send %p closed, dropping %p\n", con, msg); 2285 ceph_msg_put(msg); 2286 return; 2287 } 2288 2289 /* set src+dst */ 2290 msg->hdr.src = con->msgr->inst.name; 2291 2292 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); 2293 2294 msg->needs_out_seq = true; 2295 2296 /* queue */ 2297 mutex_lock(&con->mutex); 2298 BUG_ON(!list_empty(&msg->list_head)); 2299 list_add_tail(&msg->list_head, &con->out_queue); 2300 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, 2301 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type), 2302 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2303 le32_to_cpu(msg->hdr.front_len), 2304 le32_to_cpu(msg->hdr.middle_len), 2305 le32_to_cpu(msg->hdr.data_len)); 2306 mutex_unlock(&con->mutex); 2307 2308 /* if there wasn't anything waiting to send before, queue 2309 * new work */ 2310 clear_standby(con); 2311 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2312 queue_con(con); 2313 } 2314 EXPORT_SYMBOL(ceph_con_send); 2315 2316 /* 2317 * Revoke a message that was previously queued for send 2318 */ 2319 void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg) 2320 { 2321 mutex_lock(&con->mutex); 2322 if (!list_empty(&msg->list_head)) { 2323 dout("con_revoke %p msg %p - was on queue\n", con, msg); 2324 list_del_init(&msg->list_head); 2325 ceph_msg_put(msg); 2326 msg->hdr.seq = 0; 2327 } 2328 if (con->out_msg == msg) { 2329 dout("con_revoke %p msg %p - was sending\n", con, msg); 2330 con->out_msg = NULL; 2331 if (con->out_kvec_is_msg) { 2332 con->out_skip = con->out_kvec_bytes; 2333 con->out_kvec_is_msg = false; 2334 } 2335 ceph_msg_put(msg); 2336 msg->hdr.seq = 0; 2337 } 2338 mutex_unlock(&con->mutex); 2339 } 2340 2341 /* 2342 * Revoke a message that we may be reading data into 2343 */ 2344 void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg) 2345 { 2346 mutex_lock(&con->mutex); 2347 if (con->in_msg && con->in_msg == msg) { 2348 unsigned front_len = le32_to_cpu(con->in_hdr.front_len); 2349 unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len); 2350 unsigned data_len = le32_to_cpu(con->in_hdr.data_len); 2351 2352 /* skip rest of message */ 2353 dout("con_revoke_pages %p msg %p revoked\n", con, msg); 2354 con->in_base_pos = con->in_base_pos - 2355 sizeof(struct ceph_msg_header) - 2356 front_len - 2357 middle_len - 2358 data_len - 2359 sizeof(struct ceph_msg_footer); 2360 ceph_msg_put(con->in_msg); 2361 con->in_msg = NULL; 2362 con->in_tag = CEPH_MSGR_TAG_READY; 2363 con->in_seq++; 2364 } else { 2365 dout("con_revoke_pages %p msg %p pages %p no-op\n", 2366 con, con->in_msg, msg); 2367 } 2368 mutex_unlock(&con->mutex); 2369 } 2370 2371 /* 2372 * Queue a keepalive byte to ensure the tcp connection is alive. 2373 */ 2374 void ceph_con_keepalive(struct ceph_connection *con) 2375 { 2376 dout("con_keepalive %p\n", con); 2377 clear_standby(con); 2378 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && 2379 test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2380 queue_con(con); 2381 } 2382 EXPORT_SYMBOL(ceph_con_keepalive); 2383 2384 2385 /* 2386 * construct a new message with given type, size 2387 * the new msg has a ref count of 1. 2388 */ 2389 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, 2390 bool can_fail) 2391 { 2392 struct ceph_msg *m; 2393 2394 m = kmalloc(sizeof(*m), flags); 2395 if (m == NULL) 2396 goto out; 2397 kref_init(&m->kref); 2398 INIT_LIST_HEAD(&m->list_head); 2399 2400 m->hdr.tid = 0; 2401 m->hdr.type = cpu_to_le16(type); 2402 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); 2403 m->hdr.version = 0; 2404 m->hdr.front_len = cpu_to_le32(front_len); 2405 m->hdr.middle_len = 0; 2406 m->hdr.data_len = 0; 2407 m->hdr.data_off = 0; 2408 m->hdr.reserved = 0; 2409 m->footer.front_crc = 0; 2410 m->footer.middle_crc = 0; 2411 m->footer.data_crc = 0; 2412 m->footer.flags = 0; 2413 m->front_max = front_len; 2414 m->front_is_vmalloc = false; 2415 m->more_to_follow = false; 2416 m->ack_stamp = 0; 2417 m->pool = NULL; 2418 2419 /* middle */ 2420 m->middle = NULL; 2421 2422 /* data */ 2423 m->nr_pages = 0; 2424 m->page_alignment = 0; 2425 m->pages = NULL; 2426 m->pagelist = NULL; 2427 m->bio = NULL; 2428 m->bio_iter = NULL; 2429 m->bio_seg = 0; 2430 m->trail = NULL; 2431 2432 /* front */ 2433 if (front_len) { 2434 if (front_len > PAGE_CACHE_SIZE) { 2435 m->front.iov_base = __vmalloc(front_len, flags, 2436 PAGE_KERNEL); 2437 m->front_is_vmalloc = true; 2438 } else { 2439 m->front.iov_base = kmalloc(front_len, flags); 2440 } 2441 if (m->front.iov_base == NULL) { 2442 dout("ceph_msg_new can't allocate %d bytes\n", 2443 front_len); 2444 goto out2; 2445 } 2446 } else { 2447 m->front.iov_base = NULL; 2448 } 2449 m->front.iov_len = front_len; 2450 2451 dout("ceph_msg_new %p front %d\n", m, front_len); 2452 return m; 2453 2454 out2: 2455 ceph_msg_put(m); 2456 out: 2457 if (!can_fail) { 2458 pr_err("msg_new can't create type %d front %d\n", type, 2459 front_len); 2460 WARN_ON(1); 2461 } else { 2462 dout("msg_new can't create type %d front %d\n", type, 2463 front_len); 2464 } 2465 return NULL; 2466 } 2467 EXPORT_SYMBOL(ceph_msg_new); 2468 2469 /* 2470 * Allocate "middle" portion of a message, if it is needed and wasn't 2471 * allocated by alloc_msg. This allows us to read a small fixed-size 2472 * per-type header in the front and then gracefully fail (i.e., 2473 * propagate the error to the caller based on info in the front) when 2474 * the middle is too large. 2475 */ 2476 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) 2477 { 2478 int type = le16_to_cpu(msg->hdr.type); 2479 int middle_len = le32_to_cpu(msg->hdr.middle_len); 2480 2481 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type, 2482 ceph_msg_type_name(type), middle_len); 2483 BUG_ON(!middle_len); 2484 BUG_ON(msg->middle); 2485 2486 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS); 2487 if (!msg->middle) 2488 return -ENOMEM; 2489 return 0; 2490 } 2491 2492 /* 2493 * Generic message allocator, for incoming messages. 2494 */ 2495 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, 2496 struct ceph_msg_header *hdr, 2497 int *skip) 2498 { 2499 int type = le16_to_cpu(hdr->type); 2500 int front_len = le32_to_cpu(hdr->front_len); 2501 int middle_len = le32_to_cpu(hdr->middle_len); 2502 struct ceph_msg *msg = NULL; 2503 int ret; 2504 2505 if (con->ops->alloc_msg) { 2506 mutex_unlock(&con->mutex); 2507 msg = con->ops->alloc_msg(con, hdr, skip); 2508 mutex_lock(&con->mutex); 2509 if (!msg || *skip) 2510 return NULL; 2511 } 2512 if (!msg) { 2513 *skip = 0; 2514 msg = ceph_msg_new(type, front_len, GFP_NOFS, false); 2515 if (!msg) { 2516 pr_err("unable to allocate msg type %d len %d\n", 2517 type, front_len); 2518 return NULL; 2519 } 2520 msg->page_alignment = le16_to_cpu(hdr->data_off); 2521 } 2522 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 2523 2524 if (middle_len && !msg->middle) { 2525 ret = ceph_alloc_middle(con, msg); 2526 if (ret < 0) { 2527 ceph_msg_put(msg); 2528 return NULL; 2529 } 2530 } 2531 2532 return msg; 2533 } 2534 2535 2536 /* 2537 * Free a generically kmalloc'd message. 2538 */ 2539 void ceph_msg_kfree(struct ceph_msg *m) 2540 { 2541 dout("msg_kfree %p\n", m); 2542 if (m->front_is_vmalloc) 2543 vfree(m->front.iov_base); 2544 else 2545 kfree(m->front.iov_base); 2546 kfree(m); 2547 } 2548 2549 /* 2550 * Drop a msg ref. Destroy as needed. 2551 */ 2552 void ceph_msg_last_put(struct kref *kref) 2553 { 2554 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); 2555 2556 dout("ceph_msg_put last one on %p\n", m); 2557 WARN_ON(!list_empty(&m->list_head)); 2558 2559 /* drop middle, data, if any */ 2560 if (m->middle) { 2561 ceph_buffer_put(m->middle); 2562 m->middle = NULL; 2563 } 2564 m->nr_pages = 0; 2565 m->pages = NULL; 2566 2567 if (m->pagelist) { 2568 ceph_pagelist_release(m->pagelist); 2569 kfree(m->pagelist); 2570 m->pagelist = NULL; 2571 } 2572 2573 m->trail = NULL; 2574 2575 if (m->pool) 2576 ceph_msgpool_put(m->pool, m); 2577 else 2578 ceph_msg_kfree(m); 2579 } 2580 EXPORT_SYMBOL(ceph_msg_last_put); 2581 2582 void ceph_msg_dump(struct ceph_msg *msg) 2583 { 2584 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg, 2585 msg->front_max, msg->nr_pages); 2586 print_hex_dump(KERN_DEBUG, "header: ", 2587 DUMP_PREFIX_OFFSET, 16, 1, 2588 &msg->hdr, sizeof(msg->hdr), true); 2589 print_hex_dump(KERN_DEBUG, " front: ", 2590 DUMP_PREFIX_OFFSET, 16, 1, 2591 msg->front.iov_base, msg->front.iov_len, true); 2592 if (msg->middle) 2593 print_hex_dump(KERN_DEBUG, "middle: ", 2594 DUMP_PREFIX_OFFSET, 16, 1, 2595 msg->middle->vec.iov_base, 2596 msg->middle->vec.iov_len, true); 2597 print_hex_dump(KERN_DEBUG, "footer: ", 2598 DUMP_PREFIX_OFFSET, 16, 1, 2599 &msg->footer, sizeof(msg->footer), true); 2600 } 2601 EXPORT_SYMBOL(ceph_msg_dump); 2602