1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/crc32c.h> 4 #include <linux/ctype.h> 5 #include <linux/highmem.h> 6 #include <linux/inet.h> 7 #include <linux/kthread.h> 8 #include <linux/net.h> 9 #include <linux/slab.h> 10 #include <linux/socket.h> 11 #include <linux/string.h> 12 #include <linux/bio.h> 13 #include <linux/blkdev.h> 14 #include <net/tcp.h> 15 16 #include <linux/ceph/libceph.h> 17 #include <linux/ceph/messenger.h> 18 #include <linux/ceph/decode.h> 19 #include <linux/ceph/pagelist.h> 20 21 /* 22 * Ceph uses the messenger to exchange ceph_msg messages with other 23 * hosts in the system. The messenger provides ordered and reliable 24 * delivery. We tolerate TCP disconnects by reconnecting (with 25 * exponential backoff) in the case of a fault (disconnection, bad 26 * crc, protocol error). Acks allow sent messages to be discarded by 27 * the sender. 28 */ 29 30 /* static tag bytes (protocol control messages) */ 31 static char tag_msg = CEPH_MSGR_TAG_MSG; 32 static char tag_ack = CEPH_MSGR_TAG_ACK; 33 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 34 35 #ifdef CONFIG_LOCKDEP 36 static struct lock_class_key socket_class; 37 #endif 38 39 40 static void queue_con(struct ceph_connection *con); 41 static void con_work(struct work_struct *); 42 static void ceph_fault(struct ceph_connection *con); 43 44 /* 45 * nicely render a sockaddr as a string. 46 */ 47 #define MAX_ADDR_STR 20 48 #define MAX_ADDR_STR_LEN 60 49 static char addr_str[MAX_ADDR_STR][MAX_ADDR_STR_LEN]; 50 static DEFINE_SPINLOCK(addr_str_lock); 51 static int last_addr_str; 52 53 const char *ceph_pr_addr(const struct sockaddr_storage *ss) 54 { 55 int i; 56 char *s; 57 struct sockaddr_in *in4 = (void *)ss; 58 struct sockaddr_in6 *in6 = (void *)ss; 59 60 spin_lock(&addr_str_lock); 61 i = last_addr_str++; 62 if (last_addr_str == MAX_ADDR_STR) 63 last_addr_str = 0; 64 spin_unlock(&addr_str_lock); 65 s = addr_str[i]; 66 67 switch (ss->ss_family) { 68 case AF_INET: 69 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%u", &in4->sin_addr, 70 (unsigned int)ntohs(in4->sin_port)); 71 break; 72 73 case AF_INET6: 74 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%u", &in6->sin6_addr, 75 (unsigned int)ntohs(in6->sin6_port)); 76 break; 77 78 default: 79 sprintf(s, "(unknown sockaddr family %d)", (int)ss->ss_family); 80 } 81 82 return s; 83 } 84 EXPORT_SYMBOL(ceph_pr_addr); 85 86 static void encode_my_addr(struct ceph_messenger *msgr) 87 { 88 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr)); 89 ceph_encode_addr(&msgr->my_enc_addr); 90 } 91 92 /* 93 * work queue for all reading and writing to/from the socket. 94 */ 95 struct workqueue_struct *ceph_msgr_wq; 96 97 int ceph_msgr_init(void) 98 { 99 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0); 100 if (!ceph_msgr_wq) { 101 pr_err("msgr_init failed to create workqueue\n"); 102 return -ENOMEM; 103 } 104 return 0; 105 } 106 EXPORT_SYMBOL(ceph_msgr_init); 107 108 void ceph_msgr_exit(void) 109 { 110 destroy_workqueue(ceph_msgr_wq); 111 } 112 EXPORT_SYMBOL(ceph_msgr_exit); 113 114 void ceph_msgr_flush(void) 115 { 116 flush_workqueue(ceph_msgr_wq); 117 } 118 EXPORT_SYMBOL(ceph_msgr_flush); 119 120 121 /* 122 * socket callback functions 123 */ 124 125 /* data available on socket, or listen socket received a connect */ 126 static void ceph_data_ready(struct sock *sk, int count_unused) 127 { 128 struct ceph_connection *con = 129 (struct ceph_connection *)sk->sk_user_data; 130 if (sk->sk_state != TCP_CLOSE_WAIT) { 131 dout("ceph_data_ready on %p state = %lu, queueing work\n", 132 con, con->state); 133 queue_con(con); 134 } 135 } 136 137 /* socket has buffer space for writing */ 138 static void ceph_write_space(struct sock *sk) 139 { 140 struct ceph_connection *con = 141 (struct ceph_connection *)sk->sk_user_data; 142 143 /* only queue to workqueue if there is data we want to write. */ 144 if (test_bit(WRITE_PENDING, &con->state)) { 145 dout("ceph_write_space %p queueing write work\n", con); 146 queue_con(con); 147 } else { 148 dout("ceph_write_space %p nothing to write\n", con); 149 } 150 151 /* since we have our own write_space, clear the SOCK_NOSPACE flag */ 152 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 153 } 154 155 /* socket's state has changed */ 156 static void ceph_state_change(struct sock *sk) 157 { 158 struct ceph_connection *con = 159 (struct ceph_connection *)sk->sk_user_data; 160 161 dout("ceph_state_change %p state = %lu sk_state = %u\n", 162 con, con->state, sk->sk_state); 163 164 if (test_bit(CLOSED, &con->state)) 165 return; 166 167 switch (sk->sk_state) { 168 case TCP_CLOSE: 169 dout("ceph_state_change TCP_CLOSE\n"); 170 case TCP_CLOSE_WAIT: 171 dout("ceph_state_change TCP_CLOSE_WAIT\n"); 172 if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) { 173 if (test_bit(CONNECTING, &con->state)) 174 con->error_msg = "connection failed"; 175 else 176 con->error_msg = "socket closed"; 177 queue_con(con); 178 } 179 break; 180 case TCP_ESTABLISHED: 181 dout("ceph_state_change TCP_ESTABLISHED\n"); 182 queue_con(con); 183 break; 184 } 185 } 186 187 /* 188 * set up socket callbacks 189 */ 190 static void set_sock_callbacks(struct socket *sock, 191 struct ceph_connection *con) 192 { 193 struct sock *sk = sock->sk; 194 sk->sk_user_data = (void *)con; 195 sk->sk_data_ready = ceph_data_ready; 196 sk->sk_write_space = ceph_write_space; 197 sk->sk_state_change = ceph_state_change; 198 } 199 200 201 /* 202 * socket helpers 203 */ 204 205 /* 206 * initiate connection to a remote socket. 207 */ 208 static struct socket *ceph_tcp_connect(struct ceph_connection *con) 209 { 210 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 211 struct socket *sock; 212 int ret; 213 214 BUG_ON(con->sock); 215 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM, 216 IPPROTO_TCP, &sock); 217 if (ret) 218 return ERR_PTR(ret); 219 con->sock = sock; 220 sock->sk->sk_allocation = GFP_NOFS; 221 222 #ifdef CONFIG_LOCKDEP 223 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 224 #endif 225 226 set_sock_callbacks(sock, con); 227 228 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); 229 230 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), 231 O_NONBLOCK); 232 if (ret == -EINPROGRESS) { 233 dout("connect %s EINPROGRESS sk_state = %u\n", 234 ceph_pr_addr(&con->peer_addr.in_addr), 235 sock->sk->sk_state); 236 ret = 0; 237 } 238 if (ret < 0) { 239 pr_err("connect %s error %d\n", 240 ceph_pr_addr(&con->peer_addr.in_addr), ret); 241 sock_release(sock); 242 con->sock = NULL; 243 con->error_msg = "connect error"; 244 } 245 246 if (ret < 0) 247 return ERR_PTR(ret); 248 return sock; 249 } 250 251 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) 252 { 253 struct kvec iov = {buf, len}; 254 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 255 int r; 256 257 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); 258 if (r == -EAGAIN) 259 r = 0; 260 return r; 261 } 262 263 /* 264 * write something. @more is true if caller will be sending more data 265 * shortly. 266 */ 267 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, 268 size_t kvlen, size_t len, int more) 269 { 270 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 271 int r; 272 273 if (more) 274 msg.msg_flags |= MSG_MORE; 275 else 276 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 277 278 r = kernel_sendmsg(sock, &msg, iov, kvlen, len); 279 if (r == -EAGAIN) 280 r = 0; 281 return r; 282 } 283 284 285 /* 286 * Shutdown/close the socket for the given connection. 287 */ 288 static int con_close_socket(struct ceph_connection *con) 289 { 290 int rc; 291 292 dout("con_close_socket on %p sock %p\n", con, con->sock); 293 if (!con->sock) 294 return 0; 295 set_bit(SOCK_CLOSED, &con->state); 296 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); 297 sock_release(con->sock); 298 con->sock = NULL; 299 clear_bit(SOCK_CLOSED, &con->state); 300 return rc; 301 } 302 303 /* 304 * Reset a connection. Discard all incoming and outgoing messages 305 * and clear *_seq state. 306 */ 307 static void ceph_msg_remove(struct ceph_msg *msg) 308 { 309 list_del_init(&msg->list_head); 310 ceph_msg_put(msg); 311 } 312 static void ceph_msg_remove_list(struct list_head *head) 313 { 314 while (!list_empty(head)) { 315 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, 316 list_head); 317 ceph_msg_remove(msg); 318 } 319 } 320 321 static void reset_connection(struct ceph_connection *con) 322 { 323 /* reset connection, out_queue, msg_ and connect_seq */ 324 /* discard existing out_queue and msg_seq */ 325 ceph_msg_remove_list(&con->out_queue); 326 ceph_msg_remove_list(&con->out_sent); 327 328 if (con->in_msg) { 329 ceph_msg_put(con->in_msg); 330 con->in_msg = NULL; 331 } 332 333 con->connect_seq = 0; 334 con->out_seq = 0; 335 if (con->out_msg) { 336 ceph_msg_put(con->out_msg); 337 con->out_msg = NULL; 338 } 339 con->in_seq = 0; 340 con->in_seq_acked = 0; 341 } 342 343 /* 344 * mark a peer down. drop any open connections. 345 */ 346 void ceph_con_close(struct ceph_connection *con) 347 { 348 dout("con_close %p peer %s\n", con, 349 ceph_pr_addr(&con->peer_addr.in_addr)); 350 set_bit(CLOSED, &con->state); /* in case there's queued work */ 351 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */ 352 clear_bit(LOSSYTX, &con->state); /* so we retry next connect */ 353 clear_bit(KEEPALIVE_PENDING, &con->state); 354 clear_bit(WRITE_PENDING, &con->state); 355 mutex_lock(&con->mutex); 356 reset_connection(con); 357 con->peer_global_seq = 0; 358 cancel_delayed_work(&con->work); 359 mutex_unlock(&con->mutex); 360 queue_con(con); 361 } 362 EXPORT_SYMBOL(ceph_con_close); 363 364 /* 365 * Reopen a closed connection, with a new peer address. 366 */ 367 void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr) 368 { 369 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 370 set_bit(OPENING, &con->state); 371 clear_bit(CLOSED, &con->state); 372 memcpy(&con->peer_addr, addr, sizeof(*addr)); 373 con->delay = 0; /* reset backoff memory */ 374 queue_con(con); 375 } 376 EXPORT_SYMBOL(ceph_con_open); 377 378 /* 379 * return true if this connection ever successfully opened 380 */ 381 bool ceph_con_opened(struct ceph_connection *con) 382 { 383 return con->connect_seq > 0; 384 } 385 386 /* 387 * generic get/put 388 */ 389 struct ceph_connection *ceph_con_get(struct ceph_connection *con) 390 { 391 dout("con_get %p nref = %d -> %d\n", con, 392 atomic_read(&con->nref), atomic_read(&con->nref) + 1); 393 if (atomic_inc_not_zero(&con->nref)) 394 return con; 395 return NULL; 396 } 397 398 void ceph_con_put(struct ceph_connection *con) 399 { 400 dout("con_put %p nref = %d -> %d\n", con, 401 atomic_read(&con->nref), atomic_read(&con->nref) - 1); 402 BUG_ON(atomic_read(&con->nref) == 0); 403 if (atomic_dec_and_test(&con->nref)) { 404 BUG_ON(con->sock); 405 kfree(con); 406 } 407 } 408 409 /* 410 * initialize a new connection. 411 */ 412 void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con) 413 { 414 dout("con_init %p\n", con); 415 memset(con, 0, sizeof(*con)); 416 atomic_set(&con->nref, 1); 417 con->msgr = msgr; 418 mutex_init(&con->mutex); 419 INIT_LIST_HEAD(&con->out_queue); 420 INIT_LIST_HEAD(&con->out_sent); 421 INIT_DELAYED_WORK(&con->work, con_work); 422 } 423 EXPORT_SYMBOL(ceph_con_init); 424 425 426 /* 427 * We maintain a global counter to order connection attempts. Get 428 * a unique seq greater than @gt. 429 */ 430 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) 431 { 432 u32 ret; 433 434 spin_lock(&msgr->global_seq_lock); 435 if (msgr->global_seq < gt) 436 msgr->global_seq = gt; 437 ret = ++msgr->global_seq; 438 spin_unlock(&msgr->global_seq_lock); 439 return ret; 440 } 441 442 443 /* 444 * Prepare footer for currently outgoing message, and finish things 445 * off. Assumes out_kvec* are already valid.. we just add on to the end. 446 */ 447 static void prepare_write_message_footer(struct ceph_connection *con, int v) 448 { 449 struct ceph_msg *m = con->out_msg; 450 451 dout("prepare_write_message_footer %p\n", con); 452 con->out_kvec_is_msg = true; 453 con->out_kvec[v].iov_base = &m->footer; 454 con->out_kvec[v].iov_len = sizeof(m->footer); 455 con->out_kvec_bytes += sizeof(m->footer); 456 con->out_kvec_left++; 457 con->out_more = m->more_to_follow; 458 con->out_msg_done = true; 459 } 460 461 /* 462 * Prepare headers for the next outgoing message. 463 */ 464 static void prepare_write_message(struct ceph_connection *con) 465 { 466 struct ceph_msg *m; 467 int v = 0; 468 469 con->out_kvec_bytes = 0; 470 con->out_kvec_is_msg = true; 471 con->out_msg_done = false; 472 473 /* Sneak an ack in there first? If we can get it into the same 474 * TCP packet that's a good thing. */ 475 if (con->in_seq > con->in_seq_acked) { 476 con->in_seq_acked = con->in_seq; 477 con->out_kvec[v].iov_base = &tag_ack; 478 con->out_kvec[v++].iov_len = 1; 479 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 480 con->out_kvec[v].iov_base = &con->out_temp_ack; 481 con->out_kvec[v++].iov_len = sizeof(con->out_temp_ack); 482 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack); 483 } 484 485 m = list_first_entry(&con->out_queue, 486 struct ceph_msg, list_head); 487 con->out_msg = m; 488 if (test_bit(LOSSYTX, &con->state)) { 489 list_del_init(&m->list_head); 490 } else { 491 /* put message on sent list */ 492 ceph_msg_get(m); 493 list_move_tail(&m->list_head, &con->out_sent); 494 } 495 496 /* 497 * only assign outgoing seq # if we haven't sent this message 498 * yet. if it is requeued, resend with it's original seq. 499 */ 500 if (m->needs_out_seq) { 501 m->hdr.seq = cpu_to_le64(++con->out_seq); 502 m->needs_out_seq = false; 503 } 504 505 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n", 506 m, con->out_seq, le16_to_cpu(m->hdr.type), 507 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), 508 le32_to_cpu(m->hdr.data_len), 509 m->nr_pages); 510 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); 511 512 /* tag + hdr + front + middle */ 513 con->out_kvec[v].iov_base = &tag_msg; 514 con->out_kvec[v++].iov_len = 1; 515 con->out_kvec[v].iov_base = &m->hdr; 516 con->out_kvec[v++].iov_len = sizeof(m->hdr); 517 con->out_kvec[v++] = m->front; 518 if (m->middle) 519 con->out_kvec[v++] = m->middle->vec; 520 con->out_kvec_left = v; 521 con->out_kvec_bytes += 1 + sizeof(m->hdr) + m->front.iov_len + 522 (m->middle ? m->middle->vec.iov_len : 0); 523 con->out_kvec_cur = con->out_kvec; 524 525 /* fill in crc (except data pages), footer */ 526 con->out_msg->hdr.crc = 527 cpu_to_le32(crc32c(0, (void *)&m->hdr, 528 sizeof(m->hdr) - sizeof(m->hdr.crc))); 529 con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE; 530 con->out_msg->footer.front_crc = 531 cpu_to_le32(crc32c(0, m->front.iov_base, m->front.iov_len)); 532 if (m->middle) 533 con->out_msg->footer.middle_crc = 534 cpu_to_le32(crc32c(0, m->middle->vec.iov_base, 535 m->middle->vec.iov_len)); 536 else 537 con->out_msg->footer.middle_crc = 0; 538 con->out_msg->footer.data_crc = 0; 539 dout("prepare_write_message front_crc %u data_crc %u\n", 540 le32_to_cpu(con->out_msg->footer.front_crc), 541 le32_to_cpu(con->out_msg->footer.middle_crc)); 542 543 /* is there a data payload? */ 544 if (le32_to_cpu(m->hdr.data_len) > 0) { 545 /* initialize page iterator */ 546 con->out_msg_pos.page = 0; 547 if (m->pages) 548 con->out_msg_pos.page_pos = m->page_alignment; 549 else 550 con->out_msg_pos.page_pos = 0; 551 con->out_msg_pos.data_pos = 0; 552 con->out_msg_pos.did_page_crc = 0; 553 con->out_more = 1; /* data + footer will follow */ 554 } else { 555 /* no, queue up footer too and be done */ 556 prepare_write_message_footer(con, v); 557 } 558 559 set_bit(WRITE_PENDING, &con->state); 560 } 561 562 /* 563 * Prepare an ack. 564 */ 565 static void prepare_write_ack(struct ceph_connection *con) 566 { 567 dout("prepare_write_ack %p %llu -> %llu\n", con, 568 con->in_seq_acked, con->in_seq); 569 con->in_seq_acked = con->in_seq; 570 571 con->out_kvec[0].iov_base = &tag_ack; 572 con->out_kvec[0].iov_len = 1; 573 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 574 con->out_kvec[1].iov_base = &con->out_temp_ack; 575 con->out_kvec[1].iov_len = sizeof(con->out_temp_ack); 576 con->out_kvec_left = 2; 577 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack); 578 con->out_kvec_cur = con->out_kvec; 579 con->out_more = 1; /* more will follow.. eventually.. */ 580 set_bit(WRITE_PENDING, &con->state); 581 } 582 583 /* 584 * Prepare to write keepalive byte. 585 */ 586 static void prepare_write_keepalive(struct ceph_connection *con) 587 { 588 dout("prepare_write_keepalive %p\n", con); 589 con->out_kvec[0].iov_base = &tag_keepalive; 590 con->out_kvec[0].iov_len = 1; 591 con->out_kvec_left = 1; 592 con->out_kvec_bytes = 1; 593 con->out_kvec_cur = con->out_kvec; 594 set_bit(WRITE_PENDING, &con->state); 595 } 596 597 /* 598 * Connection negotiation. 599 */ 600 601 static void prepare_connect_authorizer(struct ceph_connection *con) 602 { 603 void *auth_buf; 604 int auth_len = 0; 605 int auth_protocol = 0; 606 607 mutex_unlock(&con->mutex); 608 if (con->ops->get_authorizer) 609 con->ops->get_authorizer(con, &auth_buf, &auth_len, 610 &auth_protocol, &con->auth_reply_buf, 611 &con->auth_reply_buf_len, 612 con->auth_retry); 613 mutex_lock(&con->mutex); 614 615 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol); 616 con->out_connect.authorizer_len = cpu_to_le32(auth_len); 617 618 con->out_kvec[con->out_kvec_left].iov_base = auth_buf; 619 con->out_kvec[con->out_kvec_left].iov_len = auth_len; 620 con->out_kvec_left++; 621 con->out_kvec_bytes += auth_len; 622 } 623 624 /* 625 * We connected to a peer and are saying hello. 626 */ 627 static void prepare_write_banner(struct ceph_messenger *msgr, 628 struct ceph_connection *con) 629 { 630 int len = strlen(CEPH_BANNER); 631 632 con->out_kvec[0].iov_base = CEPH_BANNER; 633 con->out_kvec[0].iov_len = len; 634 con->out_kvec[1].iov_base = &msgr->my_enc_addr; 635 con->out_kvec[1].iov_len = sizeof(msgr->my_enc_addr); 636 con->out_kvec_left = 2; 637 con->out_kvec_bytes = len + sizeof(msgr->my_enc_addr); 638 con->out_kvec_cur = con->out_kvec; 639 con->out_more = 0; 640 set_bit(WRITE_PENDING, &con->state); 641 } 642 643 static void prepare_write_connect(struct ceph_messenger *msgr, 644 struct ceph_connection *con, 645 int after_banner) 646 { 647 unsigned global_seq = get_global_seq(con->msgr, 0); 648 int proto; 649 650 switch (con->peer_name.type) { 651 case CEPH_ENTITY_TYPE_MON: 652 proto = CEPH_MONC_PROTOCOL; 653 break; 654 case CEPH_ENTITY_TYPE_OSD: 655 proto = CEPH_OSDC_PROTOCOL; 656 break; 657 case CEPH_ENTITY_TYPE_MDS: 658 proto = CEPH_MDSC_PROTOCOL; 659 break; 660 default: 661 BUG(); 662 } 663 664 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, 665 con->connect_seq, global_seq, proto); 666 667 con->out_connect.features = cpu_to_le64(msgr->supported_features); 668 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); 669 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); 670 con->out_connect.global_seq = cpu_to_le32(global_seq); 671 con->out_connect.protocol_version = cpu_to_le32(proto); 672 con->out_connect.flags = 0; 673 674 if (!after_banner) { 675 con->out_kvec_left = 0; 676 con->out_kvec_bytes = 0; 677 } 678 con->out_kvec[con->out_kvec_left].iov_base = &con->out_connect; 679 con->out_kvec[con->out_kvec_left].iov_len = sizeof(con->out_connect); 680 con->out_kvec_left++; 681 con->out_kvec_bytes += sizeof(con->out_connect); 682 con->out_kvec_cur = con->out_kvec; 683 con->out_more = 0; 684 set_bit(WRITE_PENDING, &con->state); 685 686 prepare_connect_authorizer(con); 687 } 688 689 690 /* 691 * write as much of pending kvecs to the socket as we can. 692 * 1 -> done 693 * 0 -> socket full, but more to do 694 * <0 -> error 695 */ 696 static int write_partial_kvec(struct ceph_connection *con) 697 { 698 int ret; 699 700 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes); 701 while (con->out_kvec_bytes > 0) { 702 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur, 703 con->out_kvec_left, con->out_kvec_bytes, 704 con->out_more); 705 if (ret <= 0) 706 goto out; 707 con->out_kvec_bytes -= ret; 708 if (con->out_kvec_bytes == 0) 709 break; /* done */ 710 while (ret > 0) { 711 if (ret >= con->out_kvec_cur->iov_len) { 712 ret -= con->out_kvec_cur->iov_len; 713 con->out_kvec_cur++; 714 con->out_kvec_left--; 715 } else { 716 con->out_kvec_cur->iov_len -= ret; 717 con->out_kvec_cur->iov_base += ret; 718 ret = 0; 719 break; 720 } 721 } 722 } 723 con->out_kvec_left = 0; 724 con->out_kvec_is_msg = false; 725 ret = 1; 726 out: 727 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con, 728 con->out_kvec_bytes, con->out_kvec_left, ret); 729 return ret; /* done! */ 730 } 731 732 #ifdef CONFIG_BLOCK 733 static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg) 734 { 735 if (!bio) { 736 *iter = NULL; 737 *seg = 0; 738 return; 739 } 740 *iter = bio; 741 *seg = bio->bi_idx; 742 } 743 744 static void iter_bio_next(struct bio **bio_iter, int *seg) 745 { 746 if (*bio_iter == NULL) 747 return; 748 749 BUG_ON(*seg >= (*bio_iter)->bi_vcnt); 750 751 (*seg)++; 752 if (*seg == (*bio_iter)->bi_vcnt) 753 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg); 754 } 755 #endif 756 757 /* 758 * Write as much message data payload as we can. If we finish, queue 759 * up the footer. 760 * 1 -> done, footer is now queued in out_kvec[]. 761 * 0 -> socket full, but more to do 762 * <0 -> error 763 */ 764 static int write_partial_msg_pages(struct ceph_connection *con) 765 { 766 struct ceph_msg *msg = con->out_msg; 767 unsigned data_len = le32_to_cpu(msg->hdr.data_len); 768 size_t len; 769 int crc = con->msgr->nocrc; 770 int ret; 771 int total_max_write; 772 int in_trail = 0; 773 size_t trail_len = (msg->trail ? msg->trail->length : 0); 774 775 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n", 776 con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages, 777 con->out_msg_pos.page_pos); 778 779 #ifdef CONFIG_BLOCK 780 if (msg->bio && !msg->bio_iter) 781 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg); 782 #endif 783 784 while (data_len > con->out_msg_pos.data_pos) { 785 struct page *page = NULL; 786 void *kaddr = NULL; 787 int max_write = PAGE_SIZE; 788 int page_shift = 0; 789 790 total_max_write = data_len - trail_len - 791 con->out_msg_pos.data_pos; 792 793 /* 794 * if we are calculating the data crc (the default), we need 795 * to map the page. if our pages[] has been revoked, use the 796 * zero page. 797 */ 798 799 /* have we reached the trail part of the data? */ 800 if (con->out_msg_pos.data_pos >= data_len - trail_len) { 801 in_trail = 1; 802 803 total_max_write = data_len - con->out_msg_pos.data_pos; 804 805 page = list_first_entry(&msg->trail->head, 806 struct page, lru); 807 if (crc) 808 kaddr = kmap(page); 809 max_write = PAGE_SIZE; 810 } else if (msg->pages) { 811 page = msg->pages[con->out_msg_pos.page]; 812 if (crc) 813 kaddr = kmap(page); 814 } else if (msg->pagelist) { 815 page = list_first_entry(&msg->pagelist->head, 816 struct page, lru); 817 if (crc) 818 kaddr = kmap(page); 819 #ifdef CONFIG_BLOCK 820 } else if (msg->bio) { 821 struct bio_vec *bv; 822 823 bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg); 824 page = bv->bv_page; 825 page_shift = bv->bv_offset; 826 if (crc) 827 kaddr = kmap(page) + page_shift; 828 max_write = bv->bv_len; 829 #endif 830 } else { 831 page = con->msgr->zero_page; 832 if (crc) 833 kaddr = page_address(con->msgr->zero_page); 834 } 835 len = min_t(int, max_write - con->out_msg_pos.page_pos, 836 total_max_write); 837 838 if (crc && !con->out_msg_pos.did_page_crc) { 839 void *base = kaddr + con->out_msg_pos.page_pos; 840 u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc); 841 842 BUG_ON(kaddr == NULL); 843 con->out_msg->footer.data_crc = 844 cpu_to_le32(crc32c(tmpcrc, base, len)); 845 con->out_msg_pos.did_page_crc = 1; 846 } 847 ret = kernel_sendpage(con->sock, page, 848 con->out_msg_pos.page_pos + page_shift, 849 len, 850 MSG_DONTWAIT | MSG_NOSIGNAL | 851 MSG_MORE); 852 853 if (crc && 854 (msg->pages || msg->pagelist || msg->bio || in_trail)) 855 kunmap(page); 856 857 if (ret == -EAGAIN) 858 ret = 0; 859 if (ret <= 0) 860 goto out; 861 862 con->out_msg_pos.data_pos += ret; 863 con->out_msg_pos.page_pos += ret; 864 if (ret == len) { 865 con->out_msg_pos.page_pos = 0; 866 con->out_msg_pos.page++; 867 con->out_msg_pos.did_page_crc = 0; 868 if (in_trail) 869 list_move_tail(&page->lru, 870 &msg->trail->head); 871 else if (msg->pagelist) 872 list_move_tail(&page->lru, 873 &msg->pagelist->head); 874 #ifdef CONFIG_BLOCK 875 else if (msg->bio) 876 iter_bio_next(&msg->bio_iter, &msg->bio_seg); 877 #endif 878 } 879 } 880 881 dout("write_partial_msg_pages %p msg %p done\n", con, msg); 882 883 /* prepare and queue up footer, too */ 884 if (!crc) 885 con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; 886 con->out_kvec_bytes = 0; 887 con->out_kvec_left = 0; 888 con->out_kvec_cur = con->out_kvec; 889 prepare_write_message_footer(con, 0); 890 ret = 1; 891 out: 892 return ret; 893 } 894 895 /* 896 * write some zeros 897 */ 898 static int write_partial_skip(struct ceph_connection *con) 899 { 900 int ret; 901 902 while (con->out_skip > 0) { 903 struct kvec iov = { 904 .iov_base = page_address(con->msgr->zero_page), 905 .iov_len = min(con->out_skip, (int)PAGE_CACHE_SIZE) 906 }; 907 908 ret = ceph_tcp_sendmsg(con->sock, &iov, 1, iov.iov_len, 1); 909 if (ret <= 0) 910 goto out; 911 con->out_skip -= ret; 912 } 913 ret = 1; 914 out: 915 return ret; 916 } 917 918 /* 919 * Prepare to read connection handshake, or an ack. 920 */ 921 static void prepare_read_banner(struct ceph_connection *con) 922 { 923 dout("prepare_read_banner %p\n", con); 924 con->in_base_pos = 0; 925 } 926 927 static void prepare_read_connect(struct ceph_connection *con) 928 { 929 dout("prepare_read_connect %p\n", con); 930 con->in_base_pos = 0; 931 } 932 933 static void prepare_read_ack(struct ceph_connection *con) 934 { 935 dout("prepare_read_ack %p\n", con); 936 con->in_base_pos = 0; 937 } 938 939 static void prepare_read_tag(struct ceph_connection *con) 940 { 941 dout("prepare_read_tag %p\n", con); 942 con->in_base_pos = 0; 943 con->in_tag = CEPH_MSGR_TAG_READY; 944 } 945 946 /* 947 * Prepare to read a message. 948 */ 949 static int prepare_read_message(struct ceph_connection *con) 950 { 951 dout("prepare_read_message %p\n", con); 952 BUG_ON(con->in_msg != NULL); 953 con->in_base_pos = 0; 954 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0; 955 return 0; 956 } 957 958 959 static int read_partial(struct ceph_connection *con, 960 int *to, int size, void *object) 961 { 962 *to += size; 963 while (con->in_base_pos < *to) { 964 int left = *to - con->in_base_pos; 965 int have = size - left; 966 int ret = ceph_tcp_recvmsg(con->sock, object + have, left); 967 if (ret <= 0) 968 return ret; 969 con->in_base_pos += ret; 970 } 971 return 1; 972 } 973 974 975 /* 976 * Read all or part of the connect-side handshake on a new connection 977 */ 978 static int read_partial_banner(struct ceph_connection *con) 979 { 980 int ret, to = 0; 981 982 dout("read_partial_banner %p at %d\n", con, con->in_base_pos); 983 984 /* peer's banner */ 985 ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner); 986 if (ret <= 0) 987 goto out; 988 ret = read_partial(con, &to, sizeof(con->actual_peer_addr), 989 &con->actual_peer_addr); 990 if (ret <= 0) 991 goto out; 992 ret = read_partial(con, &to, sizeof(con->peer_addr_for_me), 993 &con->peer_addr_for_me); 994 if (ret <= 0) 995 goto out; 996 out: 997 return ret; 998 } 999 1000 static int read_partial_connect(struct ceph_connection *con) 1001 { 1002 int ret, to = 0; 1003 1004 dout("read_partial_connect %p at %d\n", con, con->in_base_pos); 1005 1006 ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply); 1007 if (ret <= 0) 1008 goto out; 1009 ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len), 1010 con->auth_reply_buf); 1011 if (ret <= 0) 1012 goto out; 1013 1014 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n", 1015 con, (int)con->in_reply.tag, 1016 le32_to_cpu(con->in_reply.connect_seq), 1017 le32_to_cpu(con->in_reply.global_seq)); 1018 out: 1019 return ret; 1020 1021 } 1022 1023 /* 1024 * Verify the hello banner looks okay. 1025 */ 1026 static int verify_hello(struct ceph_connection *con) 1027 { 1028 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { 1029 pr_err("connect to %s got bad banner\n", 1030 ceph_pr_addr(&con->peer_addr.in_addr)); 1031 con->error_msg = "protocol error, bad banner"; 1032 return -1; 1033 } 1034 return 0; 1035 } 1036 1037 static bool addr_is_blank(struct sockaddr_storage *ss) 1038 { 1039 switch (ss->ss_family) { 1040 case AF_INET: 1041 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0; 1042 case AF_INET6: 1043 return 1044 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 && 1045 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 && 1046 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 && 1047 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0; 1048 } 1049 return false; 1050 } 1051 1052 static int addr_port(struct sockaddr_storage *ss) 1053 { 1054 switch (ss->ss_family) { 1055 case AF_INET: 1056 return ntohs(((struct sockaddr_in *)ss)->sin_port); 1057 case AF_INET6: 1058 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port); 1059 } 1060 return 0; 1061 } 1062 1063 static void addr_set_port(struct sockaddr_storage *ss, int p) 1064 { 1065 switch (ss->ss_family) { 1066 case AF_INET: 1067 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1068 case AF_INET6: 1069 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1070 } 1071 } 1072 1073 /* 1074 * Parse an ip[:port] list into an addr array. Use the default 1075 * monitor port if a port isn't specified. 1076 */ 1077 int ceph_parse_ips(const char *c, const char *end, 1078 struct ceph_entity_addr *addr, 1079 int max_count, int *count) 1080 { 1081 int i; 1082 const char *p = c; 1083 1084 dout("parse_ips on '%.*s'\n", (int)(end-c), c); 1085 for (i = 0; i < max_count; i++) { 1086 const char *ipend; 1087 struct sockaddr_storage *ss = &addr[i].in_addr; 1088 struct sockaddr_in *in4 = (void *)ss; 1089 struct sockaddr_in6 *in6 = (void *)ss; 1090 int port; 1091 char delim = ','; 1092 1093 if (*p == '[') { 1094 delim = ']'; 1095 p++; 1096 } 1097 1098 memset(ss, 0, sizeof(*ss)); 1099 if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr, 1100 delim, &ipend)) 1101 ss->ss_family = AF_INET; 1102 else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr, 1103 delim, &ipend)) 1104 ss->ss_family = AF_INET6; 1105 else 1106 goto bad; 1107 p = ipend; 1108 1109 if (delim == ']') { 1110 if (*p != ']') { 1111 dout("missing matching ']'\n"); 1112 goto bad; 1113 } 1114 p++; 1115 } 1116 1117 /* port? */ 1118 if (p < end && *p == ':') { 1119 port = 0; 1120 p++; 1121 while (p < end && *p >= '0' && *p <= '9') { 1122 port = (port * 10) + (*p - '0'); 1123 p++; 1124 } 1125 if (port > 65535 || port == 0) 1126 goto bad; 1127 } else { 1128 port = CEPH_MON_PORT; 1129 } 1130 1131 addr_set_port(ss, port); 1132 1133 dout("parse_ips got %s\n", ceph_pr_addr(ss)); 1134 1135 if (p == end) 1136 break; 1137 if (*p != ',') 1138 goto bad; 1139 p++; 1140 } 1141 1142 if (p != end) 1143 goto bad; 1144 1145 if (count) 1146 *count = i + 1; 1147 return 0; 1148 1149 bad: 1150 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 1151 return -EINVAL; 1152 } 1153 EXPORT_SYMBOL(ceph_parse_ips); 1154 1155 static int process_banner(struct ceph_connection *con) 1156 { 1157 dout("process_banner on %p\n", con); 1158 1159 if (verify_hello(con) < 0) 1160 return -1; 1161 1162 ceph_decode_addr(&con->actual_peer_addr); 1163 ceph_decode_addr(&con->peer_addr_for_me); 1164 1165 /* 1166 * Make sure the other end is who we wanted. note that the other 1167 * end may not yet know their ip address, so if it's 0.0.0.0, give 1168 * them the benefit of the doubt. 1169 */ 1170 if (memcmp(&con->peer_addr, &con->actual_peer_addr, 1171 sizeof(con->peer_addr)) != 0 && 1172 !(addr_is_blank(&con->actual_peer_addr.in_addr) && 1173 con->actual_peer_addr.nonce == con->peer_addr.nonce)) { 1174 pr_warning("wrong peer, want %s/%d, got %s/%d\n", 1175 ceph_pr_addr(&con->peer_addr.in_addr), 1176 (int)le32_to_cpu(con->peer_addr.nonce), 1177 ceph_pr_addr(&con->actual_peer_addr.in_addr), 1178 (int)le32_to_cpu(con->actual_peer_addr.nonce)); 1179 con->error_msg = "wrong peer at address"; 1180 return -1; 1181 } 1182 1183 /* 1184 * did we learn our address? 1185 */ 1186 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) { 1187 int port = addr_port(&con->msgr->inst.addr.in_addr); 1188 1189 memcpy(&con->msgr->inst.addr.in_addr, 1190 &con->peer_addr_for_me.in_addr, 1191 sizeof(con->peer_addr_for_me.in_addr)); 1192 addr_set_port(&con->msgr->inst.addr.in_addr, port); 1193 encode_my_addr(con->msgr); 1194 dout("process_banner learned my addr is %s\n", 1195 ceph_pr_addr(&con->msgr->inst.addr.in_addr)); 1196 } 1197 1198 set_bit(NEGOTIATING, &con->state); 1199 prepare_read_connect(con); 1200 return 0; 1201 } 1202 1203 static void fail_protocol(struct ceph_connection *con) 1204 { 1205 reset_connection(con); 1206 set_bit(CLOSED, &con->state); /* in case there's queued work */ 1207 1208 mutex_unlock(&con->mutex); 1209 if (con->ops->bad_proto) 1210 con->ops->bad_proto(con); 1211 mutex_lock(&con->mutex); 1212 } 1213 1214 static int process_connect(struct ceph_connection *con) 1215 { 1216 u64 sup_feat = con->msgr->supported_features; 1217 u64 req_feat = con->msgr->required_features; 1218 u64 server_feat = le64_to_cpu(con->in_reply.features); 1219 1220 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 1221 1222 switch (con->in_reply.tag) { 1223 case CEPH_MSGR_TAG_FEATURES: 1224 pr_err("%s%lld %s feature set mismatch," 1225 " my %llx < server's %llx, missing %llx\n", 1226 ENTITY_NAME(con->peer_name), 1227 ceph_pr_addr(&con->peer_addr.in_addr), 1228 sup_feat, server_feat, server_feat & ~sup_feat); 1229 con->error_msg = "missing required protocol features"; 1230 fail_protocol(con); 1231 return -1; 1232 1233 case CEPH_MSGR_TAG_BADPROTOVER: 1234 pr_err("%s%lld %s protocol version mismatch," 1235 " my %d != server's %d\n", 1236 ENTITY_NAME(con->peer_name), 1237 ceph_pr_addr(&con->peer_addr.in_addr), 1238 le32_to_cpu(con->out_connect.protocol_version), 1239 le32_to_cpu(con->in_reply.protocol_version)); 1240 con->error_msg = "protocol version mismatch"; 1241 fail_protocol(con); 1242 return -1; 1243 1244 case CEPH_MSGR_TAG_BADAUTHORIZER: 1245 con->auth_retry++; 1246 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con, 1247 con->auth_retry); 1248 if (con->auth_retry == 2) { 1249 con->error_msg = "connect authorization failure"; 1250 return -1; 1251 } 1252 con->auth_retry = 1; 1253 prepare_write_connect(con->msgr, con, 0); 1254 prepare_read_connect(con); 1255 break; 1256 1257 case CEPH_MSGR_TAG_RESETSESSION: 1258 /* 1259 * If we connected with a large connect_seq but the peer 1260 * has no record of a session with us (no connection, or 1261 * connect_seq == 0), they will send RESETSESION to indicate 1262 * that they must have reset their session, and may have 1263 * dropped messages. 1264 */ 1265 dout("process_connect got RESET peer seq %u\n", 1266 le32_to_cpu(con->in_connect.connect_seq)); 1267 pr_err("%s%lld %s connection reset\n", 1268 ENTITY_NAME(con->peer_name), 1269 ceph_pr_addr(&con->peer_addr.in_addr)); 1270 reset_connection(con); 1271 prepare_write_connect(con->msgr, con, 0); 1272 prepare_read_connect(con); 1273 1274 /* Tell ceph about it. */ 1275 mutex_unlock(&con->mutex); 1276 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name)); 1277 if (con->ops->peer_reset) 1278 con->ops->peer_reset(con); 1279 mutex_lock(&con->mutex); 1280 break; 1281 1282 case CEPH_MSGR_TAG_RETRY_SESSION: 1283 /* 1284 * If we sent a smaller connect_seq than the peer has, try 1285 * again with a larger value. 1286 */ 1287 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n", 1288 le32_to_cpu(con->out_connect.connect_seq), 1289 le32_to_cpu(con->in_connect.connect_seq)); 1290 con->connect_seq = le32_to_cpu(con->in_connect.connect_seq); 1291 prepare_write_connect(con->msgr, con, 0); 1292 prepare_read_connect(con); 1293 break; 1294 1295 case CEPH_MSGR_TAG_RETRY_GLOBAL: 1296 /* 1297 * If we sent a smaller global_seq than the peer has, try 1298 * again with a larger value. 1299 */ 1300 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", 1301 con->peer_global_seq, 1302 le32_to_cpu(con->in_connect.global_seq)); 1303 get_global_seq(con->msgr, 1304 le32_to_cpu(con->in_connect.global_seq)); 1305 prepare_write_connect(con->msgr, con, 0); 1306 prepare_read_connect(con); 1307 break; 1308 1309 case CEPH_MSGR_TAG_READY: 1310 if (req_feat & ~server_feat) { 1311 pr_err("%s%lld %s protocol feature mismatch," 1312 " my required %llx > server's %llx, need %llx\n", 1313 ENTITY_NAME(con->peer_name), 1314 ceph_pr_addr(&con->peer_addr.in_addr), 1315 req_feat, server_feat, req_feat & ~server_feat); 1316 con->error_msg = "missing required protocol features"; 1317 fail_protocol(con); 1318 return -1; 1319 } 1320 clear_bit(CONNECTING, &con->state); 1321 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 1322 con->connect_seq++; 1323 con->peer_features = server_feat; 1324 dout("process_connect got READY gseq %d cseq %d (%d)\n", 1325 con->peer_global_seq, 1326 le32_to_cpu(con->in_reply.connect_seq), 1327 con->connect_seq); 1328 WARN_ON(con->connect_seq != 1329 le32_to_cpu(con->in_reply.connect_seq)); 1330 1331 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) 1332 set_bit(LOSSYTX, &con->state); 1333 1334 prepare_read_tag(con); 1335 break; 1336 1337 case CEPH_MSGR_TAG_WAIT: 1338 /* 1339 * If there is a connection race (we are opening 1340 * connections to each other), one of us may just have 1341 * to WAIT. This shouldn't happen if we are the 1342 * client. 1343 */ 1344 pr_err("process_connect peer connecting WAIT\n"); 1345 1346 default: 1347 pr_err("connect protocol error, will retry\n"); 1348 con->error_msg = "protocol error, garbage tag during connect"; 1349 return -1; 1350 } 1351 return 0; 1352 } 1353 1354 1355 /* 1356 * read (part of) an ack 1357 */ 1358 static int read_partial_ack(struct ceph_connection *con) 1359 { 1360 int to = 0; 1361 1362 return read_partial(con, &to, sizeof(con->in_temp_ack), 1363 &con->in_temp_ack); 1364 } 1365 1366 1367 /* 1368 * We can finally discard anything that's been acked. 1369 */ 1370 static void process_ack(struct ceph_connection *con) 1371 { 1372 struct ceph_msg *m; 1373 u64 ack = le64_to_cpu(con->in_temp_ack); 1374 u64 seq; 1375 1376 while (!list_empty(&con->out_sent)) { 1377 m = list_first_entry(&con->out_sent, struct ceph_msg, 1378 list_head); 1379 seq = le64_to_cpu(m->hdr.seq); 1380 if (seq > ack) 1381 break; 1382 dout("got ack for seq %llu type %d at %p\n", seq, 1383 le16_to_cpu(m->hdr.type), m); 1384 ceph_msg_remove(m); 1385 } 1386 prepare_read_tag(con); 1387 } 1388 1389 1390 1391 1392 static int read_partial_message_section(struct ceph_connection *con, 1393 struct kvec *section, 1394 unsigned int sec_len, u32 *crc) 1395 { 1396 int ret, left; 1397 1398 BUG_ON(!section); 1399 1400 while (section->iov_len < sec_len) { 1401 BUG_ON(section->iov_base == NULL); 1402 left = sec_len - section->iov_len; 1403 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base + 1404 section->iov_len, left); 1405 if (ret <= 0) 1406 return ret; 1407 section->iov_len += ret; 1408 if (section->iov_len == sec_len) 1409 *crc = crc32c(0, section->iov_base, 1410 section->iov_len); 1411 } 1412 1413 return 1; 1414 } 1415 1416 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, 1417 struct ceph_msg_header *hdr, 1418 int *skip); 1419 1420 1421 static int read_partial_message_pages(struct ceph_connection *con, 1422 struct page **pages, 1423 unsigned data_len, int datacrc) 1424 { 1425 void *p; 1426 int ret; 1427 int left; 1428 1429 left = min((int)(data_len - con->in_msg_pos.data_pos), 1430 (int)(PAGE_SIZE - con->in_msg_pos.page_pos)); 1431 /* (page) data */ 1432 BUG_ON(pages == NULL); 1433 p = kmap(pages[con->in_msg_pos.page]); 1434 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, 1435 left); 1436 if (ret > 0 && datacrc) 1437 con->in_data_crc = 1438 crc32c(con->in_data_crc, 1439 p + con->in_msg_pos.page_pos, ret); 1440 kunmap(pages[con->in_msg_pos.page]); 1441 if (ret <= 0) 1442 return ret; 1443 con->in_msg_pos.data_pos += ret; 1444 con->in_msg_pos.page_pos += ret; 1445 if (con->in_msg_pos.page_pos == PAGE_SIZE) { 1446 con->in_msg_pos.page_pos = 0; 1447 con->in_msg_pos.page++; 1448 } 1449 1450 return ret; 1451 } 1452 1453 #ifdef CONFIG_BLOCK 1454 static int read_partial_message_bio(struct ceph_connection *con, 1455 struct bio **bio_iter, int *bio_seg, 1456 unsigned data_len, int datacrc) 1457 { 1458 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg); 1459 void *p; 1460 int ret, left; 1461 1462 if (IS_ERR(bv)) 1463 return PTR_ERR(bv); 1464 1465 left = min((int)(data_len - con->in_msg_pos.data_pos), 1466 (int)(bv->bv_len - con->in_msg_pos.page_pos)); 1467 1468 p = kmap(bv->bv_page) + bv->bv_offset; 1469 1470 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, 1471 left); 1472 if (ret > 0 && datacrc) 1473 con->in_data_crc = 1474 crc32c(con->in_data_crc, 1475 p + con->in_msg_pos.page_pos, ret); 1476 kunmap(bv->bv_page); 1477 if (ret <= 0) 1478 return ret; 1479 con->in_msg_pos.data_pos += ret; 1480 con->in_msg_pos.page_pos += ret; 1481 if (con->in_msg_pos.page_pos == bv->bv_len) { 1482 con->in_msg_pos.page_pos = 0; 1483 iter_bio_next(bio_iter, bio_seg); 1484 } 1485 1486 return ret; 1487 } 1488 #endif 1489 1490 /* 1491 * read (part of) a message. 1492 */ 1493 static int read_partial_message(struct ceph_connection *con) 1494 { 1495 struct ceph_msg *m = con->in_msg; 1496 int ret; 1497 int to, left; 1498 unsigned front_len, middle_len, data_len; 1499 int datacrc = con->msgr->nocrc; 1500 int skip; 1501 u64 seq; 1502 1503 dout("read_partial_message con %p msg %p\n", con, m); 1504 1505 /* header */ 1506 while (con->in_base_pos < sizeof(con->in_hdr)) { 1507 left = sizeof(con->in_hdr) - con->in_base_pos; 1508 ret = ceph_tcp_recvmsg(con->sock, 1509 (char *)&con->in_hdr + con->in_base_pos, 1510 left); 1511 if (ret <= 0) 1512 return ret; 1513 con->in_base_pos += ret; 1514 if (con->in_base_pos == sizeof(con->in_hdr)) { 1515 u32 crc = crc32c(0, (void *)&con->in_hdr, 1516 sizeof(con->in_hdr) - sizeof(con->in_hdr.crc)); 1517 if (crc != le32_to_cpu(con->in_hdr.crc)) { 1518 pr_err("read_partial_message bad hdr " 1519 " crc %u != expected %u\n", 1520 crc, con->in_hdr.crc); 1521 return -EBADMSG; 1522 } 1523 } 1524 } 1525 front_len = le32_to_cpu(con->in_hdr.front_len); 1526 if (front_len > CEPH_MSG_MAX_FRONT_LEN) 1527 return -EIO; 1528 middle_len = le32_to_cpu(con->in_hdr.middle_len); 1529 if (middle_len > CEPH_MSG_MAX_DATA_LEN) 1530 return -EIO; 1531 data_len = le32_to_cpu(con->in_hdr.data_len); 1532 if (data_len > CEPH_MSG_MAX_DATA_LEN) 1533 return -EIO; 1534 1535 /* verify seq# */ 1536 seq = le64_to_cpu(con->in_hdr.seq); 1537 if ((s64)seq - (s64)con->in_seq < 1) { 1538 pr_info("skipping %s%lld %s seq %lld expected %lld\n", 1539 ENTITY_NAME(con->peer_name), 1540 ceph_pr_addr(&con->peer_addr.in_addr), 1541 seq, con->in_seq + 1); 1542 con->in_base_pos = -front_len - middle_len - data_len - 1543 sizeof(m->footer); 1544 con->in_tag = CEPH_MSGR_TAG_READY; 1545 return 0; 1546 } else if ((s64)seq - (s64)con->in_seq > 1) { 1547 pr_err("read_partial_message bad seq %lld expected %lld\n", 1548 seq, con->in_seq + 1); 1549 con->error_msg = "bad message sequence # for incoming message"; 1550 return -EBADMSG; 1551 } 1552 1553 /* allocate message? */ 1554 if (!con->in_msg) { 1555 dout("got hdr type %d front %d data %d\n", con->in_hdr.type, 1556 con->in_hdr.front_len, con->in_hdr.data_len); 1557 skip = 0; 1558 con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip); 1559 if (skip) { 1560 /* skip this message */ 1561 dout("alloc_msg said skip message\n"); 1562 BUG_ON(con->in_msg); 1563 con->in_base_pos = -front_len - middle_len - data_len - 1564 sizeof(m->footer); 1565 con->in_tag = CEPH_MSGR_TAG_READY; 1566 con->in_seq++; 1567 return 0; 1568 } 1569 if (!con->in_msg) { 1570 con->error_msg = 1571 "error allocating memory for incoming message"; 1572 return -ENOMEM; 1573 } 1574 m = con->in_msg; 1575 m->front.iov_len = 0; /* haven't read it yet */ 1576 if (m->middle) 1577 m->middle->vec.iov_len = 0; 1578 1579 con->in_msg_pos.page = 0; 1580 if (m->pages) 1581 con->in_msg_pos.page_pos = m->page_alignment; 1582 else 1583 con->in_msg_pos.page_pos = 0; 1584 con->in_msg_pos.data_pos = 0; 1585 } 1586 1587 /* front */ 1588 ret = read_partial_message_section(con, &m->front, front_len, 1589 &con->in_front_crc); 1590 if (ret <= 0) 1591 return ret; 1592 1593 /* middle */ 1594 if (m->middle) { 1595 ret = read_partial_message_section(con, &m->middle->vec, 1596 middle_len, 1597 &con->in_middle_crc); 1598 if (ret <= 0) 1599 return ret; 1600 } 1601 #ifdef CONFIG_BLOCK 1602 if (m->bio && !m->bio_iter) 1603 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg); 1604 #endif 1605 1606 /* (page) data */ 1607 while (con->in_msg_pos.data_pos < data_len) { 1608 if (m->pages) { 1609 ret = read_partial_message_pages(con, m->pages, 1610 data_len, datacrc); 1611 if (ret <= 0) 1612 return ret; 1613 #ifdef CONFIG_BLOCK 1614 } else if (m->bio) { 1615 1616 ret = read_partial_message_bio(con, 1617 &m->bio_iter, &m->bio_seg, 1618 data_len, datacrc); 1619 if (ret <= 0) 1620 return ret; 1621 #endif 1622 } else { 1623 BUG_ON(1); 1624 } 1625 } 1626 1627 /* footer */ 1628 to = sizeof(m->hdr) + sizeof(m->footer); 1629 while (con->in_base_pos < to) { 1630 left = to - con->in_base_pos; 1631 ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer + 1632 (con->in_base_pos - sizeof(m->hdr)), 1633 left); 1634 if (ret <= 0) 1635 return ret; 1636 con->in_base_pos += ret; 1637 } 1638 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", 1639 m, front_len, m->footer.front_crc, middle_len, 1640 m->footer.middle_crc, data_len, m->footer.data_crc); 1641 1642 /* crc ok? */ 1643 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) { 1644 pr_err("read_partial_message %p front crc %u != exp. %u\n", 1645 m, con->in_front_crc, m->footer.front_crc); 1646 return -EBADMSG; 1647 } 1648 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) { 1649 pr_err("read_partial_message %p middle crc %u != exp %u\n", 1650 m, con->in_middle_crc, m->footer.middle_crc); 1651 return -EBADMSG; 1652 } 1653 if (datacrc && 1654 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 && 1655 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) { 1656 pr_err("read_partial_message %p data crc %u != exp. %u\n", m, 1657 con->in_data_crc, le32_to_cpu(m->footer.data_crc)); 1658 return -EBADMSG; 1659 } 1660 1661 return 1; /* done! */ 1662 } 1663 1664 /* 1665 * Process message. This happens in the worker thread. The callback should 1666 * be careful not to do anything that waits on other incoming messages or it 1667 * may deadlock. 1668 */ 1669 static void process_message(struct ceph_connection *con) 1670 { 1671 struct ceph_msg *msg; 1672 1673 msg = con->in_msg; 1674 con->in_msg = NULL; 1675 1676 /* if first message, set peer_name */ 1677 if (con->peer_name.type == 0) 1678 con->peer_name = msg->hdr.src; 1679 1680 con->in_seq++; 1681 mutex_unlock(&con->mutex); 1682 1683 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n", 1684 msg, le64_to_cpu(msg->hdr.seq), 1685 ENTITY_NAME(msg->hdr.src), 1686 le16_to_cpu(msg->hdr.type), 1687 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 1688 le32_to_cpu(msg->hdr.front_len), 1689 le32_to_cpu(msg->hdr.data_len), 1690 con->in_front_crc, con->in_middle_crc, con->in_data_crc); 1691 con->ops->dispatch(con, msg); 1692 1693 mutex_lock(&con->mutex); 1694 prepare_read_tag(con); 1695 } 1696 1697 1698 /* 1699 * Write something to the socket. Called in a worker thread when the 1700 * socket appears to be writeable and we have something ready to send. 1701 */ 1702 static int try_write(struct ceph_connection *con) 1703 { 1704 struct ceph_messenger *msgr = con->msgr; 1705 int ret = 1; 1706 1707 dout("try_write start %p state %lu nref %d\n", con, con->state, 1708 atomic_read(&con->nref)); 1709 1710 more: 1711 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 1712 1713 /* open the socket first? */ 1714 if (con->sock == NULL) { 1715 prepare_write_banner(msgr, con); 1716 prepare_write_connect(msgr, con, 1); 1717 prepare_read_banner(con); 1718 set_bit(CONNECTING, &con->state); 1719 clear_bit(NEGOTIATING, &con->state); 1720 1721 BUG_ON(con->in_msg); 1722 con->in_tag = CEPH_MSGR_TAG_READY; 1723 dout("try_write initiating connect on %p new state %lu\n", 1724 con, con->state); 1725 con->sock = ceph_tcp_connect(con); 1726 if (IS_ERR(con->sock)) { 1727 con->sock = NULL; 1728 con->error_msg = "connect error"; 1729 ret = -1; 1730 goto out; 1731 } 1732 } 1733 1734 more_kvec: 1735 /* kvec data queued? */ 1736 if (con->out_skip) { 1737 ret = write_partial_skip(con); 1738 if (ret <= 0) 1739 goto out; 1740 } 1741 if (con->out_kvec_left) { 1742 ret = write_partial_kvec(con); 1743 if (ret <= 0) 1744 goto out; 1745 } 1746 1747 /* msg pages? */ 1748 if (con->out_msg) { 1749 if (con->out_msg_done) { 1750 ceph_msg_put(con->out_msg); 1751 con->out_msg = NULL; /* we're done with this one */ 1752 goto do_next; 1753 } 1754 1755 ret = write_partial_msg_pages(con); 1756 if (ret == 1) 1757 goto more_kvec; /* we need to send the footer, too! */ 1758 if (ret == 0) 1759 goto out; 1760 if (ret < 0) { 1761 dout("try_write write_partial_msg_pages err %d\n", 1762 ret); 1763 goto out; 1764 } 1765 } 1766 1767 do_next: 1768 if (!test_bit(CONNECTING, &con->state)) { 1769 /* is anything else pending? */ 1770 if (!list_empty(&con->out_queue)) { 1771 prepare_write_message(con); 1772 goto more; 1773 } 1774 if (con->in_seq > con->in_seq_acked) { 1775 prepare_write_ack(con); 1776 goto more; 1777 } 1778 if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) { 1779 prepare_write_keepalive(con); 1780 goto more; 1781 } 1782 } 1783 1784 /* Nothing to do! */ 1785 clear_bit(WRITE_PENDING, &con->state); 1786 dout("try_write nothing else to write.\n"); 1787 ret = 0; 1788 out: 1789 dout("try_write done on %p ret %d\n", con, ret); 1790 return ret; 1791 } 1792 1793 1794 1795 /* 1796 * Read what we can from the socket. 1797 */ 1798 static int try_read(struct ceph_connection *con) 1799 { 1800 int ret = -1; 1801 1802 if (!con->sock) 1803 return 0; 1804 1805 if (test_bit(STANDBY, &con->state)) 1806 return 0; 1807 1808 dout("try_read start on %p\n", con); 1809 1810 more: 1811 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 1812 con->in_base_pos); 1813 if (test_bit(CONNECTING, &con->state)) { 1814 if (!test_bit(NEGOTIATING, &con->state)) { 1815 dout("try_read connecting\n"); 1816 ret = read_partial_banner(con); 1817 if (ret <= 0) 1818 goto out; 1819 ret = process_banner(con); 1820 if (ret < 0) 1821 goto out; 1822 } 1823 ret = read_partial_connect(con); 1824 if (ret <= 0) 1825 goto out; 1826 ret = process_connect(con); 1827 if (ret < 0) 1828 goto out; 1829 goto more; 1830 } 1831 1832 if (con->in_base_pos < 0) { 1833 /* 1834 * skipping + discarding content. 1835 * 1836 * FIXME: there must be a better way to do this! 1837 */ 1838 static char buf[1024]; 1839 int skip = min(1024, -con->in_base_pos); 1840 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); 1841 ret = ceph_tcp_recvmsg(con->sock, buf, skip); 1842 if (ret <= 0) 1843 goto out; 1844 con->in_base_pos += ret; 1845 if (con->in_base_pos) 1846 goto more; 1847 } 1848 if (con->in_tag == CEPH_MSGR_TAG_READY) { 1849 /* 1850 * what's next? 1851 */ 1852 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 1853 if (ret <= 0) 1854 goto out; 1855 dout("try_read got tag %d\n", (int)con->in_tag); 1856 switch (con->in_tag) { 1857 case CEPH_MSGR_TAG_MSG: 1858 prepare_read_message(con); 1859 break; 1860 case CEPH_MSGR_TAG_ACK: 1861 prepare_read_ack(con); 1862 break; 1863 case CEPH_MSGR_TAG_CLOSE: 1864 set_bit(CLOSED, &con->state); /* fixme */ 1865 goto out; 1866 default: 1867 goto bad_tag; 1868 } 1869 } 1870 if (con->in_tag == CEPH_MSGR_TAG_MSG) { 1871 ret = read_partial_message(con); 1872 if (ret <= 0) { 1873 switch (ret) { 1874 case -EBADMSG: 1875 con->error_msg = "bad crc"; 1876 ret = -EIO; 1877 break; 1878 case -EIO: 1879 con->error_msg = "io error"; 1880 break; 1881 } 1882 goto out; 1883 } 1884 if (con->in_tag == CEPH_MSGR_TAG_READY) 1885 goto more; 1886 process_message(con); 1887 goto more; 1888 } 1889 if (con->in_tag == CEPH_MSGR_TAG_ACK) { 1890 ret = read_partial_ack(con); 1891 if (ret <= 0) 1892 goto out; 1893 process_ack(con); 1894 goto more; 1895 } 1896 1897 out: 1898 dout("try_read done on %p ret %d\n", con, ret); 1899 return ret; 1900 1901 bad_tag: 1902 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag); 1903 con->error_msg = "protocol error, garbage tag"; 1904 ret = -1; 1905 goto out; 1906 } 1907 1908 1909 /* 1910 * Atomically queue work on a connection. Bump @con reference to 1911 * avoid races with connection teardown. 1912 */ 1913 static void queue_con(struct ceph_connection *con) 1914 { 1915 if (test_bit(DEAD, &con->state)) { 1916 dout("queue_con %p ignoring: DEAD\n", 1917 con); 1918 return; 1919 } 1920 1921 if (!con->ops->get(con)) { 1922 dout("queue_con %p ref count 0\n", con); 1923 return; 1924 } 1925 1926 if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) { 1927 dout("queue_con %p - already queued\n", con); 1928 con->ops->put(con); 1929 } else { 1930 dout("queue_con %p\n", con); 1931 } 1932 } 1933 1934 /* 1935 * Do some work on a connection. Drop a connection ref when we're done. 1936 */ 1937 static void con_work(struct work_struct *work) 1938 { 1939 struct ceph_connection *con = container_of(work, struct ceph_connection, 1940 work.work); 1941 1942 mutex_lock(&con->mutex); 1943 if (test_and_clear_bit(BACKOFF, &con->state)) { 1944 dout("con_work %p backing off\n", con); 1945 if (queue_delayed_work(ceph_msgr_wq, &con->work, 1946 round_jiffies_relative(con->delay))) { 1947 dout("con_work %p backoff %lu\n", con, con->delay); 1948 mutex_unlock(&con->mutex); 1949 return; 1950 } else { 1951 con->ops->put(con); 1952 dout("con_work %p FAILED to back off %lu\n", con, 1953 con->delay); 1954 } 1955 } 1956 1957 if (test_bit(STANDBY, &con->state)) { 1958 dout("con_work %p STANDBY\n", con); 1959 goto done; 1960 } 1961 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ 1962 dout("con_work CLOSED\n"); 1963 con_close_socket(con); 1964 goto done; 1965 } 1966 if (test_and_clear_bit(OPENING, &con->state)) { 1967 /* reopen w/ new peer */ 1968 dout("con_work OPENING\n"); 1969 con_close_socket(con); 1970 } 1971 1972 if (test_and_clear_bit(SOCK_CLOSED, &con->state) || 1973 try_read(con) < 0 || 1974 try_write(con) < 0) { 1975 mutex_unlock(&con->mutex); 1976 ceph_fault(con); /* error/fault path */ 1977 goto done_unlocked; 1978 } 1979 1980 done: 1981 mutex_unlock(&con->mutex); 1982 done_unlocked: 1983 con->ops->put(con); 1984 } 1985 1986 1987 /* 1988 * Generic error/fault handler. A retry mechanism is used with 1989 * exponential backoff 1990 */ 1991 static void ceph_fault(struct ceph_connection *con) 1992 { 1993 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 1994 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 1995 dout("fault %p state %lu to peer %s\n", 1996 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 1997 1998 if (test_bit(LOSSYTX, &con->state)) { 1999 dout("fault on LOSSYTX channel\n"); 2000 goto out; 2001 } 2002 2003 mutex_lock(&con->mutex); 2004 if (test_bit(CLOSED, &con->state)) 2005 goto out_unlock; 2006 2007 con_close_socket(con); 2008 2009 if (con->in_msg) { 2010 ceph_msg_put(con->in_msg); 2011 con->in_msg = NULL; 2012 } 2013 2014 /* Requeue anything that hasn't been acked */ 2015 list_splice_init(&con->out_sent, &con->out_queue); 2016 2017 /* If there are no messages queued or keepalive pending, place 2018 * the connection in a STANDBY state */ 2019 if (list_empty(&con->out_queue) && 2020 !test_bit(KEEPALIVE_PENDING, &con->state)) { 2021 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 2022 clear_bit(WRITE_PENDING, &con->state); 2023 set_bit(STANDBY, &con->state); 2024 } else { 2025 /* retry after a delay. */ 2026 if (con->delay == 0) 2027 con->delay = BASE_DELAY_INTERVAL; 2028 else if (con->delay < MAX_DELAY_INTERVAL) 2029 con->delay *= 2; 2030 con->ops->get(con); 2031 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2032 round_jiffies_relative(con->delay))) { 2033 dout("fault queued %p delay %lu\n", con, con->delay); 2034 } else { 2035 con->ops->put(con); 2036 dout("fault failed to queue %p delay %lu, backoff\n", 2037 con, con->delay); 2038 /* 2039 * In many cases we see a socket state change 2040 * while con_work is running and end up 2041 * queuing (non-delayed) work, such that we 2042 * can't backoff with a delay. Set a flag so 2043 * that when con_work restarts we schedule the 2044 * delay then. 2045 */ 2046 set_bit(BACKOFF, &con->state); 2047 } 2048 } 2049 2050 out_unlock: 2051 mutex_unlock(&con->mutex); 2052 out: 2053 /* 2054 * in case we faulted due to authentication, invalidate our 2055 * current tickets so that we can get new ones. 2056 */ 2057 if (con->auth_retry && con->ops->invalidate_authorizer) { 2058 dout("calling invalidate_authorizer()\n"); 2059 con->ops->invalidate_authorizer(con); 2060 } 2061 2062 if (con->ops->fault) 2063 con->ops->fault(con); 2064 } 2065 2066 2067 2068 /* 2069 * create a new messenger instance 2070 */ 2071 struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr, 2072 u32 supported_features, 2073 u32 required_features) 2074 { 2075 struct ceph_messenger *msgr; 2076 2077 msgr = kzalloc(sizeof(*msgr), GFP_KERNEL); 2078 if (msgr == NULL) 2079 return ERR_PTR(-ENOMEM); 2080 2081 msgr->supported_features = supported_features; 2082 msgr->required_features = required_features; 2083 2084 spin_lock_init(&msgr->global_seq_lock); 2085 2086 /* the zero page is needed if a request is "canceled" while the message 2087 * is being written over the socket */ 2088 msgr->zero_page = __page_cache_alloc(GFP_KERNEL | __GFP_ZERO); 2089 if (!msgr->zero_page) { 2090 kfree(msgr); 2091 return ERR_PTR(-ENOMEM); 2092 } 2093 kmap(msgr->zero_page); 2094 2095 if (myaddr) 2096 msgr->inst.addr = *myaddr; 2097 2098 /* select a random nonce */ 2099 msgr->inst.addr.type = 0; 2100 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 2101 encode_my_addr(msgr); 2102 2103 dout("messenger_create %p\n", msgr); 2104 return msgr; 2105 } 2106 EXPORT_SYMBOL(ceph_messenger_create); 2107 2108 void ceph_messenger_destroy(struct ceph_messenger *msgr) 2109 { 2110 dout("destroy %p\n", msgr); 2111 kunmap(msgr->zero_page); 2112 __free_page(msgr->zero_page); 2113 kfree(msgr); 2114 dout("destroyed messenger %p\n", msgr); 2115 } 2116 EXPORT_SYMBOL(ceph_messenger_destroy); 2117 2118 static void clear_standby(struct ceph_connection *con) 2119 { 2120 /* come back from STANDBY? */ 2121 if (test_and_clear_bit(STANDBY, &con->state)) { 2122 mutex_lock(&con->mutex); 2123 dout("clear_standby %p and ++connect_seq\n", con); 2124 con->connect_seq++; 2125 WARN_ON(test_bit(WRITE_PENDING, &con->state)); 2126 WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state)); 2127 mutex_unlock(&con->mutex); 2128 } 2129 } 2130 2131 /* 2132 * Queue up an outgoing message on the given connection. 2133 */ 2134 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) 2135 { 2136 if (test_bit(CLOSED, &con->state)) { 2137 dout("con_send %p closed, dropping %p\n", con, msg); 2138 ceph_msg_put(msg); 2139 return; 2140 } 2141 2142 /* set src+dst */ 2143 msg->hdr.src = con->msgr->inst.name; 2144 2145 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); 2146 2147 msg->needs_out_seq = true; 2148 2149 /* queue */ 2150 mutex_lock(&con->mutex); 2151 BUG_ON(!list_empty(&msg->list_head)); 2152 list_add_tail(&msg->list_head, &con->out_queue); 2153 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, 2154 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type), 2155 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2156 le32_to_cpu(msg->hdr.front_len), 2157 le32_to_cpu(msg->hdr.middle_len), 2158 le32_to_cpu(msg->hdr.data_len)); 2159 mutex_unlock(&con->mutex); 2160 2161 /* if there wasn't anything waiting to send before, queue 2162 * new work */ 2163 clear_standby(con); 2164 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2165 queue_con(con); 2166 } 2167 EXPORT_SYMBOL(ceph_con_send); 2168 2169 /* 2170 * Revoke a message that was previously queued for send 2171 */ 2172 void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg) 2173 { 2174 mutex_lock(&con->mutex); 2175 if (!list_empty(&msg->list_head)) { 2176 dout("con_revoke %p msg %p - was on queue\n", con, msg); 2177 list_del_init(&msg->list_head); 2178 ceph_msg_put(msg); 2179 msg->hdr.seq = 0; 2180 } 2181 if (con->out_msg == msg) { 2182 dout("con_revoke %p msg %p - was sending\n", con, msg); 2183 con->out_msg = NULL; 2184 if (con->out_kvec_is_msg) { 2185 con->out_skip = con->out_kvec_bytes; 2186 con->out_kvec_is_msg = false; 2187 } 2188 ceph_msg_put(msg); 2189 msg->hdr.seq = 0; 2190 } 2191 mutex_unlock(&con->mutex); 2192 } 2193 2194 /* 2195 * Revoke a message that we may be reading data into 2196 */ 2197 void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg) 2198 { 2199 mutex_lock(&con->mutex); 2200 if (con->in_msg && con->in_msg == msg) { 2201 unsigned front_len = le32_to_cpu(con->in_hdr.front_len); 2202 unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len); 2203 unsigned data_len = le32_to_cpu(con->in_hdr.data_len); 2204 2205 /* skip rest of message */ 2206 dout("con_revoke_pages %p msg %p revoked\n", con, msg); 2207 con->in_base_pos = con->in_base_pos - 2208 sizeof(struct ceph_msg_header) - 2209 front_len - 2210 middle_len - 2211 data_len - 2212 sizeof(struct ceph_msg_footer); 2213 ceph_msg_put(con->in_msg); 2214 con->in_msg = NULL; 2215 con->in_tag = CEPH_MSGR_TAG_READY; 2216 con->in_seq++; 2217 } else { 2218 dout("con_revoke_pages %p msg %p pages %p no-op\n", 2219 con, con->in_msg, msg); 2220 } 2221 mutex_unlock(&con->mutex); 2222 } 2223 2224 /* 2225 * Queue a keepalive byte to ensure the tcp connection is alive. 2226 */ 2227 void ceph_con_keepalive(struct ceph_connection *con) 2228 { 2229 dout("con_keepalive %p\n", con); 2230 clear_standby(con); 2231 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && 2232 test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2233 queue_con(con); 2234 } 2235 EXPORT_SYMBOL(ceph_con_keepalive); 2236 2237 2238 /* 2239 * construct a new message with given type, size 2240 * the new msg has a ref count of 1. 2241 */ 2242 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags) 2243 { 2244 struct ceph_msg *m; 2245 2246 m = kmalloc(sizeof(*m), flags); 2247 if (m == NULL) 2248 goto out; 2249 kref_init(&m->kref); 2250 INIT_LIST_HEAD(&m->list_head); 2251 2252 m->hdr.tid = 0; 2253 m->hdr.type = cpu_to_le16(type); 2254 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); 2255 m->hdr.version = 0; 2256 m->hdr.front_len = cpu_to_le32(front_len); 2257 m->hdr.middle_len = 0; 2258 m->hdr.data_len = 0; 2259 m->hdr.data_off = 0; 2260 m->hdr.reserved = 0; 2261 m->footer.front_crc = 0; 2262 m->footer.middle_crc = 0; 2263 m->footer.data_crc = 0; 2264 m->footer.flags = 0; 2265 m->front_max = front_len; 2266 m->front_is_vmalloc = false; 2267 m->more_to_follow = false; 2268 m->pool = NULL; 2269 2270 /* middle */ 2271 m->middle = NULL; 2272 2273 /* data */ 2274 m->nr_pages = 0; 2275 m->page_alignment = 0; 2276 m->pages = NULL; 2277 m->pagelist = NULL; 2278 m->bio = NULL; 2279 m->bio_iter = NULL; 2280 m->bio_seg = 0; 2281 m->trail = NULL; 2282 2283 /* front */ 2284 if (front_len) { 2285 if (front_len > PAGE_CACHE_SIZE) { 2286 m->front.iov_base = __vmalloc(front_len, flags, 2287 PAGE_KERNEL); 2288 m->front_is_vmalloc = true; 2289 } else { 2290 m->front.iov_base = kmalloc(front_len, flags); 2291 } 2292 if (m->front.iov_base == NULL) { 2293 pr_err("msg_new can't allocate %d bytes\n", 2294 front_len); 2295 goto out2; 2296 } 2297 } else { 2298 m->front.iov_base = NULL; 2299 } 2300 m->front.iov_len = front_len; 2301 2302 dout("ceph_msg_new %p front %d\n", m, front_len); 2303 return m; 2304 2305 out2: 2306 ceph_msg_put(m); 2307 out: 2308 pr_err("msg_new can't create type %d front %d\n", type, front_len); 2309 return NULL; 2310 } 2311 EXPORT_SYMBOL(ceph_msg_new); 2312 2313 /* 2314 * Allocate "middle" portion of a message, if it is needed and wasn't 2315 * allocated by alloc_msg. This allows us to read a small fixed-size 2316 * per-type header in the front and then gracefully fail (i.e., 2317 * propagate the error to the caller based on info in the front) when 2318 * the middle is too large. 2319 */ 2320 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) 2321 { 2322 int type = le16_to_cpu(msg->hdr.type); 2323 int middle_len = le32_to_cpu(msg->hdr.middle_len); 2324 2325 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type, 2326 ceph_msg_type_name(type), middle_len); 2327 BUG_ON(!middle_len); 2328 BUG_ON(msg->middle); 2329 2330 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS); 2331 if (!msg->middle) 2332 return -ENOMEM; 2333 return 0; 2334 } 2335 2336 /* 2337 * Generic message allocator, for incoming messages. 2338 */ 2339 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, 2340 struct ceph_msg_header *hdr, 2341 int *skip) 2342 { 2343 int type = le16_to_cpu(hdr->type); 2344 int front_len = le32_to_cpu(hdr->front_len); 2345 int middle_len = le32_to_cpu(hdr->middle_len); 2346 struct ceph_msg *msg = NULL; 2347 int ret; 2348 2349 if (con->ops->alloc_msg) { 2350 mutex_unlock(&con->mutex); 2351 msg = con->ops->alloc_msg(con, hdr, skip); 2352 mutex_lock(&con->mutex); 2353 if (!msg || *skip) 2354 return NULL; 2355 } 2356 if (!msg) { 2357 *skip = 0; 2358 msg = ceph_msg_new(type, front_len, GFP_NOFS); 2359 if (!msg) { 2360 pr_err("unable to allocate msg type %d len %d\n", 2361 type, front_len); 2362 return NULL; 2363 } 2364 msg->page_alignment = le16_to_cpu(hdr->data_off); 2365 } 2366 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 2367 2368 if (middle_len && !msg->middle) { 2369 ret = ceph_alloc_middle(con, msg); 2370 if (ret < 0) { 2371 ceph_msg_put(msg); 2372 return NULL; 2373 } 2374 } 2375 2376 return msg; 2377 } 2378 2379 2380 /* 2381 * Free a generically kmalloc'd message. 2382 */ 2383 void ceph_msg_kfree(struct ceph_msg *m) 2384 { 2385 dout("msg_kfree %p\n", m); 2386 if (m->front_is_vmalloc) 2387 vfree(m->front.iov_base); 2388 else 2389 kfree(m->front.iov_base); 2390 kfree(m); 2391 } 2392 2393 /* 2394 * Drop a msg ref. Destroy as needed. 2395 */ 2396 void ceph_msg_last_put(struct kref *kref) 2397 { 2398 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); 2399 2400 dout("ceph_msg_put last one on %p\n", m); 2401 WARN_ON(!list_empty(&m->list_head)); 2402 2403 /* drop middle, data, if any */ 2404 if (m->middle) { 2405 ceph_buffer_put(m->middle); 2406 m->middle = NULL; 2407 } 2408 m->nr_pages = 0; 2409 m->pages = NULL; 2410 2411 if (m->pagelist) { 2412 ceph_pagelist_release(m->pagelist); 2413 kfree(m->pagelist); 2414 m->pagelist = NULL; 2415 } 2416 2417 m->trail = NULL; 2418 2419 if (m->pool) 2420 ceph_msgpool_put(m->pool, m); 2421 else 2422 ceph_msg_kfree(m); 2423 } 2424 EXPORT_SYMBOL(ceph_msg_last_put); 2425 2426 void ceph_msg_dump(struct ceph_msg *msg) 2427 { 2428 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg, 2429 msg->front_max, msg->nr_pages); 2430 print_hex_dump(KERN_DEBUG, "header: ", 2431 DUMP_PREFIX_OFFSET, 16, 1, 2432 &msg->hdr, sizeof(msg->hdr), true); 2433 print_hex_dump(KERN_DEBUG, " front: ", 2434 DUMP_PREFIX_OFFSET, 16, 1, 2435 msg->front.iov_base, msg->front.iov_len, true); 2436 if (msg->middle) 2437 print_hex_dump(KERN_DEBUG, "middle: ", 2438 DUMP_PREFIX_OFFSET, 16, 1, 2439 msg->middle->vec.iov_base, 2440 msg->middle->vec.iov_len, true); 2441 print_hex_dump(KERN_DEBUG, "footer: ", 2442 DUMP_PREFIX_OFFSET, 16, 1, 2443 &msg->footer, sizeof(msg->footer), true); 2444 } 2445 EXPORT_SYMBOL(ceph_msg_dump); 2446