1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Ceph msgr2 protocol implementation 4 * 5 * Copyright (C) 2020 Ilya Dryomov <idryomov@gmail.com> 6 */ 7 8 #include <linux/ceph/ceph_debug.h> 9 10 #include <crypto/aead.h> 11 #include <crypto/algapi.h> /* for crypto_memneq() */ 12 #include <crypto/hash.h> 13 #include <crypto/sha2.h> 14 #include <linux/bvec.h> 15 #include <linux/crc32c.h> 16 #include <linux/net.h> 17 #include <linux/scatterlist.h> 18 #include <linux/socket.h> 19 #include <linux/sched/mm.h> 20 #include <net/sock.h> 21 #include <net/tcp.h> 22 23 #include <linux/ceph/ceph_features.h> 24 #include <linux/ceph/decode.h> 25 #include <linux/ceph/libceph.h> 26 #include <linux/ceph/messenger.h> 27 28 #include "crypto.h" /* for CEPH_KEY_LEN and CEPH_MAX_CON_SECRET_LEN */ 29 30 #define FRAME_TAG_HELLO 1 31 #define FRAME_TAG_AUTH_REQUEST 2 32 #define FRAME_TAG_AUTH_BAD_METHOD 3 33 #define FRAME_TAG_AUTH_REPLY_MORE 4 34 #define FRAME_TAG_AUTH_REQUEST_MORE 5 35 #define FRAME_TAG_AUTH_DONE 6 36 #define FRAME_TAG_AUTH_SIGNATURE 7 37 #define FRAME_TAG_CLIENT_IDENT 8 38 #define FRAME_TAG_SERVER_IDENT 9 39 #define FRAME_TAG_IDENT_MISSING_FEATURES 10 40 #define FRAME_TAG_SESSION_RECONNECT 11 41 #define FRAME_TAG_SESSION_RESET 12 42 #define FRAME_TAG_SESSION_RETRY 13 43 #define FRAME_TAG_SESSION_RETRY_GLOBAL 14 44 #define FRAME_TAG_SESSION_RECONNECT_OK 15 45 #define FRAME_TAG_WAIT 16 46 #define FRAME_TAG_MESSAGE 17 47 #define FRAME_TAG_KEEPALIVE2 18 48 #define FRAME_TAG_KEEPALIVE2_ACK 19 49 #define FRAME_TAG_ACK 20 50 51 #define FRAME_LATE_STATUS_ABORTED 0x1 52 #define FRAME_LATE_STATUS_COMPLETE 0xe 53 #define FRAME_LATE_STATUS_ABORTED_MASK 0xf 54 55 #define IN_S_HANDLE_PREAMBLE 1 56 #define IN_S_HANDLE_CONTROL 2 57 #define IN_S_HANDLE_CONTROL_REMAINDER 3 58 #define IN_S_PREPARE_READ_DATA 4 59 #define IN_S_PREPARE_READ_DATA_CONT 5 60 #define IN_S_PREPARE_READ_ENC_PAGE 6 61 #define IN_S_HANDLE_EPILOGUE 7 62 #define IN_S_FINISH_SKIP 8 63 64 #define OUT_S_QUEUE_DATA 1 65 #define OUT_S_QUEUE_DATA_CONT 2 66 #define OUT_S_QUEUE_ENC_PAGE 3 67 #define OUT_S_QUEUE_ZEROS 4 68 #define OUT_S_FINISH_MESSAGE 5 69 #define OUT_S_GET_NEXT 6 70 71 #define CTRL_BODY(p) ((void *)(p) + CEPH_PREAMBLE_LEN) 72 #define FRONT_PAD(p) ((void *)(p) + CEPH_EPILOGUE_SECURE_LEN) 73 #define MIDDLE_PAD(p) (FRONT_PAD(p) + CEPH_GCM_BLOCK_LEN) 74 #define DATA_PAD(p) (MIDDLE_PAD(p) + CEPH_GCM_BLOCK_LEN) 75 76 #define CEPH_MSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) 77 78 static int do_recvmsg(struct socket *sock, struct iov_iter *it) 79 { 80 struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS }; 81 int ret; 82 83 msg.msg_iter = *it; 84 while (iov_iter_count(it)) { 85 ret = sock_recvmsg(sock, &msg, msg.msg_flags); 86 if (ret <= 0) { 87 if (ret == -EAGAIN) 88 ret = 0; 89 return ret; 90 } 91 92 iov_iter_advance(it, ret); 93 } 94 95 WARN_ON(msg_data_left(&msg)); 96 return 1; 97 } 98 99 /* 100 * Read as much as possible. 101 * 102 * Return: 103 * 1 - done, nothing (else) to read 104 * 0 - socket is empty, need to wait 105 * <0 - error 106 */ 107 static int ceph_tcp_recv(struct ceph_connection *con) 108 { 109 int ret; 110 111 dout("%s con %p %s %zu\n", __func__, con, 112 iov_iter_is_discard(&con->v2.in_iter) ? "discard" : "need", 113 iov_iter_count(&con->v2.in_iter)); 114 ret = do_recvmsg(con->sock, &con->v2.in_iter); 115 dout("%s con %p ret %d left %zu\n", __func__, con, ret, 116 iov_iter_count(&con->v2.in_iter)); 117 return ret; 118 } 119 120 static int do_sendmsg(struct socket *sock, struct iov_iter *it) 121 { 122 struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS }; 123 int ret; 124 125 msg.msg_iter = *it; 126 while (iov_iter_count(it)) { 127 ret = sock_sendmsg(sock, &msg); 128 if (ret <= 0) { 129 if (ret == -EAGAIN) 130 ret = 0; 131 return ret; 132 } 133 134 iov_iter_advance(it, ret); 135 } 136 137 WARN_ON(msg_data_left(&msg)); 138 return 1; 139 } 140 141 static int do_try_sendpage(struct socket *sock, struct iov_iter *it) 142 { 143 struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS }; 144 struct bio_vec bv; 145 int ret; 146 147 if (WARN_ON(!iov_iter_is_bvec(it))) 148 return -EINVAL; 149 150 while (iov_iter_count(it)) { 151 /* iov_iter_iovec() for ITER_BVEC */ 152 bv.bv_page = it->bvec->bv_page; 153 bv.bv_offset = it->bvec->bv_offset + it->iov_offset; 154 bv.bv_len = min(iov_iter_count(it), 155 it->bvec->bv_len - it->iov_offset); 156 157 /* 158 * sendpage cannot properly handle pages with 159 * page_count == 0, we need to fall back to sendmsg if 160 * that's the case. 161 * 162 * Same goes for slab pages: skb_can_coalesce() allows 163 * coalescing neighboring slab objects into a single frag 164 * which triggers one of hardened usercopy checks. 165 */ 166 if (sendpage_ok(bv.bv_page)) { 167 ret = sock->ops->sendpage(sock, bv.bv_page, 168 bv.bv_offset, bv.bv_len, 169 CEPH_MSG_FLAGS); 170 } else { 171 iov_iter_bvec(&msg.msg_iter, WRITE, &bv, 1, bv.bv_len); 172 ret = sock_sendmsg(sock, &msg); 173 } 174 if (ret <= 0) { 175 if (ret == -EAGAIN) 176 ret = 0; 177 return ret; 178 } 179 180 iov_iter_advance(it, ret); 181 } 182 183 return 1; 184 } 185 186 /* 187 * Write as much as possible. The socket is expected to be corked, 188 * so we don't bother with MSG_MORE/MSG_SENDPAGE_NOTLAST here. 189 * 190 * Return: 191 * 1 - done, nothing (else) to write 192 * 0 - socket is full, need to wait 193 * <0 - error 194 */ 195 static int ceph_tcp_send(struct ceph_connection *con) 196 { 197 int ret; 198 199 dout("%s con %p have %zu try_sendpage %d\n", __func__, con, 200 iov_iter_count(&con->v2.out_iter), con->v2.out_iter_sendpage); 201 if (con->v2.out_iter_sendpage) 202 ret = do_try_sendpage(con->sock, &con->v2.out_iter); 203 else 204 ret = do_sendmsg(con->sock, &con->v2.out_iter); 205 dout("%s con %p ret %d left %zu\n", __func__, con, ret, 206 iov_iter_count(&con->v2.out_iter)); 207 return ret; 208 } 209 210 static void add_in_kvec(struct ceph_connection *con, void *buf, int len) 211 { 212 BUG_ON(con->v2.in_kvec_cnt >= ARRAY_SIZE(con->v2.in_kvecs)); 213 WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter)); 214 215 con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_base = buf; 216 con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_len = len; 217 con->v2.in_kvec_cnt++; 218 219 con->v2.in_iter.nr_segs++; 220 con->v2.in_iter.count += len; 221 } 222 223 static void reset_in_kvecs(struct ceph_connection *con) 224 { 225 WARN_ON(iov_iter_count(&con->v2.in_iter)); 226 227 con->v2.in_kvec_cnt = 0; 228 iov_iter_kvec(&con->v2.in_iter, READ, con->v2.in_kvecs, 0, 0); 229 } 230 231 static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv) 232 { 233 WARN_ON(iov_iter_count(&con->v2.in_iter)); 234 235 con->v2.in_bvec = *bv; 236 iov_iter_bvec(&con->v2.in_iter, READ, &con->v2.in_bvec, 1, bv->bv_len); 237 } 238 239 static void set_in_skip(struct ceph_connection *con, int len) 240 { 241 WARN_ON(iov_iter_count(&con->v2.in_iter)); 242 243 dout("%s con %p len %d\n", __func__, con, len); 244 iov_iter_discard(&con->v2.in_iter, READ, len); 245 } 246 247 static void add_out_kvec(struct ceph_connection *con, void *buf, int len) 248 { 249 BUG_ON(con->v2.out_kvec_cnt >= ARRAY_SIZE(con->v2.out_kvecs)); 250 WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter)); 251 WARN_ON(con->v2.out_zero); 252 253 con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_base = buf; 254 con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_len = len; 255 con->v2.out_kvec_cnt++; 256 257 con->v2.out_iter.nr_segs++; 258 con->v2.out_iter.count += len; 259 } 260 261 static void reset_out_kvecs(struct ceph_connection *con) 262 { 263 WARN_ON(iov_iter_count(&con->v2.out_iter)); 264 WARN_ON(con->v2.out_zero); 265 266 con->v2.out_kvec_cnt = 0; 267 268 iov_iter_kvec(&con->v2.out_iter, WRITE, con->v2.out_kvecs, 0, 0); 269 con->v2.out_iter_sendpage = false; 270 } 271 272 static void set_out_bvec(struct ceph_connection *con, const struct bio_vec *bv, 273 bool zerocopy) 274 { 275 WARN_ON(iov_iter_count(&con->v2.out_iter)); 276 WARN_ON(con->v2.out_zero); 277 278 con->v2.out_bvec = *bv; 279 con->v2.out_iter_sendpage = zerocopy; 280 iov_iter_bvec(&con->v2.out_iter, WRITE, &con->v2.out_bvec, 1, 281 con->v2.out_bvec.bv_len); 282 } 283 284 static void set_out_bvec_zero(struct ceph_connection *con) 285 { 286 WARN_ON(iov_iter_count(&con->v2.out_iter)); 287 WARN_ON(!con->v2.out_zero); 288 289 con->v2.out_bvec.bv_page = ceph_zero_page; 290 con->v2.out_bvec.bv_offset = 0; 291 con->v2.out_bvec.bv_len = min(con->v2.out_zero, (int)PAGE_SIZE); 292 con->v2.out_iter_sendpage = true; 293 iov_iter_bvec(&con->v2.out_iter, WRITE, &con->v2.out_bvec, 1, 294 con->v2.out_bvec.bv_len); 295 } 296 297 static void out_zero_add(struct ceph_connection *con, int len) 298 { 299 dout("%s con %p len %d\n", __func__, con, len); 300 con->v2.out_zero += len; 301 } 302 303 static void *alloc_conn_buf(struct ceph_connection *con, int len) 304 { 305 void *buf; 306 307 dout("%s con %p len %d\n", __func__, con, len); 308 309 if (WARN_ON(con->v2.conn_buf_cnt >= ARRAY_SIZE(con->v2.conn_bufs))) 310 return NULL; 311 312 buf = kvmalloc(len, GFP_NOIO); 313 if (!buf) 314 return NULL; 315 316 con->v2.conn_bufs[con->v2.conn_buf_cnt++] = buf; 317 return buf; 318 } 319 320 static void free_conn_bufs(struct ceph_connection *con) 321 { 322 while (con->v2.conn_buf_cnt) 323 kvfree(con->v2.conn_bufs[--con->v2.conn_buf_cnt]); 324 } 325 326 static void add_in_sign_kvec(struct ceph_connection *con, void *buf, int len) 327 { 328 BUG_ON(con->v2.in_sign_kvec_cnt >= ARRAY_SIZE(con->v2.in_sign_kvecs)); 329 330 con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_base = buf; 331 con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_len = len; 332 con->v2.in_sign_kvec_cnt++; 333 } 334 335 static void clear_in_sign_kvecs(struct ceph_connection *con) 336 { 337 con->v2.in_sign_kvec_cnt = 0; 338 } 339 340 static void add_out_sign_kvec(struct ceph_connection *con, void *buf, int len) 341 { 342 BUG_ON(con->v2.out_sign_kvec_cnt >= ARRAY_SIZE(con->v2.out_sign_kvecs)); 343 344 con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_base = buf; 345 con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_len = len; 346 con->v2.out_sign_kvec_cnt++; 347 } 348 349 static void clear_out_sign_kvecs(struct ceph_connection *con) 350 { 351 con->v2.out_sign_kvec_cnt = 0; 352 } 353 354 static bool con_secure(struct ceph_connection *con) 355 { 356 return con->v2.con_mode == CEPH_CON_MODE_SECURE; 357 } 358 359 static int front_len(const struct ceph_msg *msg) 360 { 361 return le32_to_cpu(msg->hdr.front_len); 362 } 363 364 static int middle_len(const struct ceph_msg *msg) 365 { 366 return le32_to_cpu(msg->hdr.middle_len); 367 } 368 369 static int data_len(const struct ceph_msg *msg) 370 { 371 return le32_to_cpu(msg->hdr.data_len); 372 } 373 374 static bool need_padding(int len) 375 { 376 return !IS_ALIGNED(len, CEPH_GCM_BLOCK_LEN); 377 } 378 379 static int padded_len(int len) 380 { 381 return ALIGN(len, CEPH_GCM_BLOCK_LEN); 382 } 383 384 static int padding_len(int len) 385 { 386 return padded_len(len) - len; 387 } 388 389 /* preamble + control segment */ 390 static int head_onwire_len(int ctrl_len, bool secure) 391 { 392 int head_len; 393 int rem_len; 394 395 if (secure) { 396 head_len = CEPH_PREAMBLE_SECURE_LEN; 397 if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) { 398 rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN; 399 head_len += padded_len(rem_len) + CEPH_GCM_TAG_LEN; 400 } 401 } else { 402 head_len = CEPH_PREAMBLE_PLAIN_LEN; 403 if (ctrl_len) 404 head_len += ctrl_len + CEPH_CRC_LEN; 405 } 406 return head_len; 407 } 408 409 /* front, middle and data segments + epilogue */ 410 static int __tail_onwire_len(int front_len, int middle_len, int data_len, 411 bool secure) 412 { 413 if (!front_len && !middle_len && !data_len) 414 return 0; 415 416 if (!secure) 417 return front_len + middle_len + data_len + 418 CEPH_EPILOGUE_PLAIN_LEN; 419 420 return padded_len(front_len) + padded_len(middle_len) + 421 padded_len(data_len) + CEPH_EPILOGUE_SECURE_LEN; 422 } 423 424 static int tail_onwire_len(const struct ceph_msg *msg, bool secure) 425 { 426 return __tail_onwire_len(front_len(msg), middle_len(msg), 427 data_len(msg), secure); 428 } 429 430 /* head_onwire_len(sizeof(struct ceph_msg_header2), false) */ 431 #define MESSAGE_HEAD_PLAIN_LEN (CEPH_PREAMBLE_PLAIN_LEN + \ 432 sizeof(struct ceph_msg_header2) + \ 433 CEPH_CRC_LEN) 434 435 static const int frame_aligns[] = { 436 sizeof(void *), 437 sizeof(void *), 438 sizeof(void *), 439 PAGE_SIZE 440 }; 441 442 /* 443 * Discards trailing empty segments, unless there is just one segment. 444 * A frame always has at least one (possibly empty) segment. 445 */ 446 static int calc_segment_count(const int *lens, int len_cnt) 447 { 448 int i; 449 450 for (i = len_cnt - 1; i >= 0; i--) { 451 if (lens[i]) 452 return i + 1; 453 } 454 455 return 1; 456 } 457 458 static void init_frame_desc(struct ceph_frame_desc *desc, int tag, 459 const int *lens, int len_cnt) 460 { 461 int i; 462 463 memset(desc, 0, sizeof(*desc)); 464 465 desc->fd_tag = tag; 466 desc->fd_seg_cnt = calc_segment_count(lens, len_cnt); 467 BUG_ON(desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT); 468 for (i = 0; i < desc->fd_seg_cnt; i++) { 469 desc->fd_lens[i] = lens[i]; 470 desc->fd_aligns[i] = frame_aligns[i]; 471 } 472 } 473 474 /* 475 * Preamble crc covers everything up to itself (28 bytes) and 476 * is calculated and verified irrespective of the connection mode 477 * (i.e. even if the frame is encrypted). 478 */ 479 static void encode_preamble(const struct ceph_frame_desc *desc, void *p) 480 { 481 void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN; 482 void *start = p; 483 int i; 484 485 memset(p, 0, CEPH_PREAMBLE_LEN); 486 487 ceph_encode_8(&p, desc->fd_tag); 488 ceph_encode_8(&p, desc->fd_seg_cnt); 489 for (i = 0; i < desc->fd_seg_cnt; i++) { 490 ceph_encode_32(&p, desc->fd_lens[i]); 491 ceph_encode_16(&p, desc->fd_aligns[i]); 492 } 493 494 put_unaligned_le32(crc32c(0, start, crcp - start), crcp); 495 } 496 497 static int decode_preamble(void *p, struct ceph_frame_desc *desc) 498 { 499 void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN; 500 u32 crc, expected_crc; 501 int i; 502 503 crc = crc32c(0, p, crcp - p); 504 expected_crc = get_unaligned_le32(crcp); 505 if (crc != expected_crc) { 506 pr_err("bad preamble crc, calculated %u, expected %u\n", 507 crc, expected_crc); 508 return -EBADMSG; 509 } 510 511 memset(desc, 0, sizeof(*desc)); 512 513 desc->fd_tag = ceph_decode_8(&p); 514 desc->fd_seg_cnt = ceph_decode_8(&p); 515 if (desc->fd_seg_cnt < 1 || 516 desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT) { 517 pr_err("bad segment count %d\n", desc->fd_seg_cnt); 518 return -EINVAL; 519 } 520 for (i = 0; i < desc->fd_seg_cnt; i++) { 521 desc->fd_lens[i] = ceph_decode_32(&p); 522 desc->fd_aligns[i] = ceph_decode_16(&p); 523 } 524 525 /* 526 * This would fire for FRAME_TAG_WAIT (it has one empty 527 * segment), but we should never get it as client. 528 */ 529 if (!desc->fd_lens[desc->fd_seg_cnt - 1]) { 530 pr_err("last segment empty\n"); 531 return -EINVAL; 532 } 533 534 if (desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) { 535 pr_err("control segment too big %d\n", desc->fd_lens[0]); 536 return -EINVAL; 537 } 538 if (desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) { 539 pr_err("front segment too big %d\n", desc->fd_lens[1]); 540 return -EINVAL; 541 } 542 if (desc->fd_lens[2] > CEPH_MSG_MAX_MIDDLE_LEN) { 543 pr_err("middle segment too big %d\n", desc->fd_lens[2]); 544 return -EINVAL; 545 } 546 if (desc->fd_lens[3] > CEPH_MSG_MAX_DATA_LEN) { 547 pr_err("data segment too big %d\n", desc->fd_lens[3]); 548 return -EINVAL; 549 } 550 551 return 0; 552 } 553 554 static void encode_epilogue_plain(struct ceph_connection *con, bool aborted) 555 { 556 con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED : 557 FRAME_LATE_STATUS_COMPLETE; 558 cpu_to_le32s(&con->v2.out_epil.front_crc); 559 cpu_to_le32s(&con->v2.out_epil.middle_crc); 560 cpu_to_le32s(&con->v2.out_epil.data_crc); 561 } 562 563 static void encode_epilogue_secure(struct ceph_connection *con, bool aborted) 564 { 565 memset(&con->v2.out_epil, 0, sizeof(con->v2.out_epil)); 566 con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED : 567 FRAME_LATE_STATUS_COMPLETE; 568 } 569 570 static int decode_epilogue(void *p, u32 *front_crc, u32 *middle_crc, 571 u32 *data_crc) 572 { 573 u8 late_status; 574 575 late_status = ceph_decode_8(&p); 576 if ((late_status & FRAME_LATE_STATUS_ABORTED_MASK) != 577 FRAME_LATE_STATUS_COMPLETE) { 578 /* we should never get an aborted message as client */ 579 pr_err("bad late_status 0x%x\n", late_status); 580 return -EINVAL; 581 } 582 583 if (front_crc && middle_crc && data_crc) { 584 *front_crc = ceph_decode_32(&p); 585 *middle_crc = ceph_decode_32(&p); 586 *data_crc = ceph_decode_32(&p); 587 } 588 589 return 0; 590 } 591 592 static void fill_header(struct ceph_msg_header *hdr, 593 const struct ceph_msg_header2 *hdr2, 594 int front_len, int middle_len, int data_len, 595 const struct ceph_entity_name *peer_name) 596 { 597 hdr->seq = hdr2->seq; 598 hdr->tid = hdr2->tid; 599 hdr->type = hdr2->type; 600 hdr->priority = hdr2->priority; 601 hdr->version = hdr2->version; 602 hdr->front_len = cpu_to_le32(front_len); 603 hdr->middle_len = cpu_to_le32(middle_len); 604 hdr->data_len = cpu_to_le32(data_len); 605 hdr->data_off = hdr2->data_off; 606 hdr->src = *peer_name; 607 hdr->compat_version = hdr2->compat_version; 608 hdr->reserved = 0; 609 hdr->crc = 0; 610 } 611 612 static void fill_header2(struct ceph_msg_header2 *hdr2, 613 const struct ceph_msg_header *hdr, u64 ack_seq) 614 { 615 hdr2->seq = hdr->seq; 616 hdr2->tid = hdr->tid; 617 hdr2->type = hdr->type; 618 hdr2->priority = hdr->priority; 619 hdr2->version = hdr->version; 620 hdr2->data_pre_padding_len = 0; 621 hdr2->data_off = hdr->data_off; 622 hdr2->ack_seq = cpu_to_le64(ack_seq); 623 hdr2->flags = 0; 624 hdr2->compat_version = hdr->compat_version; 625 hdr2->reserved = 0; 626 } 627 628 static int verify_control_crc(struct ceph_connection *con) 629 { 630 int ctrl_len = con->v2.in_desc.fd_lens[0]; 631 u32 crc, expected_crc; 632 633 WARN_ON(con->v2.in_kvecs[0].iov_len != ctrl_len); 634 WARN_ON(con->v2.in_kvecs[1].iov_len != CEPH_CRC_LEN); 635 636 crc = crc32c(-1, con->v2.in_kvecs[0].iov_base, ctrl_len); 637 expected_crc = get_unaligned_le32(con->v2.in_kvecs[1].iov_base); 638 if (crc != expected_crc) { 639 pr_err("bad control crc, calculated %u, expected %u\n", 640 crc, expected_crc); 641 return -EBADMSG; 642 } 643 644 return 0; 645 } 646 647 static int verify_epilogue_crcs(struct ceph_connection *con, u32 front_crc, 648 u32 middle_crc, u32 data_crc) 649 { 650 if (front_len(con->in_msg)) { 651 con->in_front_crc = crc32c(-1, con->in_msg->front.iov_base, 652 front_len(con->in_msg)); 653 } else { 654 WARN_ON(!middle_len(con->in_msg) && !data_len(con->in_msg)); 655 con->in_front_crc = -1; 656 } 657 658 if (middle_len(con->in_msg)) 659 con->in_middle_crc = crc32c(-1, 660 con->in_msg->middle->vec.iov_base, 661 middle_len(con->in_msg)); 662 else if (data_len(con->in_msg)) 663 con->in_middle_crc = -1; 664 else 665 con->in_middle_crc = 0; 666 667 if (!data_len(con->in_msg)) 668 con->in_data_crc = 0; 669 670 dout("%s con %p msg %p crcs %u %u %u\n", __func__, con, con->in_msg, 671 con->in_front_crc, con->in_middle_crc, con->in_data_crc); 672 673 if (con->in_front_crc != front_crc) { 674 pr_err("bad front crc, calculated %u, expected %u\n", 675 con->in_front_crc, front_crc); 676 return -EBADMSG; 677 } 678 if (con->in_middle_crc != middle_crc) { 679 pr_err("bad middle crc, calculated %u, expected %u\n", 680 con->in_middle_crc, middle_crc); 681 return -EBADMSG; 682 } 683 if (con->in_data_crc != data_crc) { 684 pr_err("bad data crc, calculated %u, expected %u\n", 685 con->in_data_crc, data_crc); 686 return -EBADMSG; 687 } 688 689 return 0; 690 } 691 692 static int setup_crypto(struct ceph_connection *con, 693 const u8 *session_key, int session_key_len, 694 const u8 *con_secret, int con_secret_len) 695 { 696 unsigned int noio_flag; 697 int ret; 698 699 dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n", 700 __func__, con, con->v2.con_mode, session_key_len, con_secret_len); 701 WARN_ON(con->v2.hmac_tfm || con->v2.gcm_tfm || con->v2.gcm_req); 702 703 if (con->v2.con_mode != CEPH_CON_MODE_CRC && 704 con->v2.con_mode != CEPH_CON_MODE_SECURE) { 705 pr_err("bad con_mode %d\n", con->v2.con_mode); 706 return -EINVAL; 707 } 708 709 if (!session_key_len) { 710 WARN_ON(con->v2.con_mode != CEPH_CON_MODE_CRC); 711 WARN_ON(con_secret_len); 712 return 0; /* auth_none */ 713 } 714 715 noio_flag = memalloc_noio_save(); 716 con->v2.hmac_tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); 717 memalloc_noio_restore(noio_flag); 718 if (IS_ERR(con->v2.hmac_tfm)) { 719 ret = PTR_ERR(con->v2.hmac_tfm); 720 con->v2.hmac_tfm = NULL; 721 pr_err("failed to allocate hmac tfm context: %d\n", ret); 722 return ret; 723 } 724 725 WARN_ON((unsigned long)session_key & 726 crypto_shash_alignmask(con->v2.hmac_tfm)); 727 ret = crypto_shash_setkey(con->v2.hmac_tfm, session_key, 728 session_key_len); 729 if (ret) { 730 pr_err("failed to set hmac key: %d\n", ret); 731 return ret; 732 } 733 734 if (con->v2.con_mode == CEPH_CON_MODE_CRC) { 735 WARN_ON(con_secret_len); 736 return 0; /* auth_x, plain mode */ 737 } 738 739 if (con_secret_len < CEPH_GCM_KEY_LEN + 2 * CEPH_GCM_IV_LEN) { 740 pr_err("con_secret too small %d\n", con_secret_len); 741 return -EINVAL; 742 } 743 744 noio_flag = memalloc_noio_save(); 745 con->v2.gcm_tfm = crypto_alloc_aead("gcm(aes)", 0, 0); 746 memalloc_noio_restore(noio_flag); 747 if (IS_ERR(con->v2.gcm_tfm)) { 748 ret = PTR_ERR(con->v2.gcm_tfm); 749 con->v2.gcm_tfm = NULL; 750 pr_err("failed to allocate gcm tfm context: %d\n", ret); 751 return ret; 752 } 753 754 WARN_ON((unsigned long)con_secret & 755 crypto_aead_alignmask(con->v2.gcm_tfm)); 756 ret = crypto_aead_setkey(con->v2.gcm_tfm, con_secret, CEPH_GCM_KEY_LEN); 757 if (ret) { 758 pr_err("failed to set gcm key: %d\n", ret); 759 return ret; 760 } 761 762 WARN_ON(crypto_aead_ivsize(con->v2.gcm_tfm) != CEPH_GCM_IV_LEN); 763 ret = crypto_aead_setauthsize(con->v2.gcm_tfm, CEPH_GCM_TAG_LEN); 764 if (ret) { 765 pr_err("failed to set gcm tag size: %d\n", ret); 766 return ret; 767 } 768 769 con->v2.gcm_req = aead_request_alloc(con->v2.gcm_tfm, GFP_NOIO); 770 if (!con->v2.gcm_req) { 771 pr_err("failed to allocate gcm request\n"); 772 return -ENOMEM; 773 } 774 775 crypto_init_wait(&con->v2.gcm_wait); 776 aead_request_set_callback(con->v2.gcm_req, CRYPTO_TFM_REQ_MAY_BACKLOG, 777 crypto_req_done, &con->v2.gcm_wait); 778 779 memcpy(&con->v2.in_gcm_nonce, con_secret + CEPH_GCM_KEY_LEN, 780 CEPH_GCM_IV_LEN); 781 memcpy(&con->v2.out_gcm_nonce, 782 con_secret + CEPH_GCM_KEY_LEN + CEPH_GCM_IV_LEN, 783 CEPH_GCM_IV_LEN); 784 return 0; /* auth_x, secure mode */ 785 } 786 787 static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs, 788 int kvec_cnt, u8 *hmac) 789 { 790 SHASH_DESC_ON_STACK(desc, con->v2.hmac_tfm); /* tfm arg is ignored */ 791 int ret; 792 int i; 793 794 dout("%s con %p hmac_tfm %p kvec_cnt %d\n", __func__, con, 795 con->v2.hmac_tfm, kvec_cnt); 796 797 if (!con->v2.hmac_tfm) { 798 memset(hmac, 0, SHA256_DIGEST_SIZE); 799 return 0; /* auth_none */ 800 } 801 802 desc->tfm = con->v2.hmac_tfm; 803 ret = crypto_shash_init(desc); 804 if (ret) 805 goto out; 806 807 for (i = 0; i < kvec_cnt; i++) { 808 WARN_ON((unsigned long)kvecs[i].iov_base & 809 crypto_shash_alignmask(con->v2.hmac_tfm)); 810 ret = crypto_shash_update(desc, kvecs[i].iov_base, 811 kvecs[i].iov_len); 812 if (ret) 813 goto out; 814 } 815 816 ret = crypto_shash_final(desc, hmac); 817 818 out: 819 shash_desc_zero(desc); 820 return ret; /* auth_x, both plain and secure modes */ 821 } 822 823 static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce) 824 { 825 u64 counter; 826 827 counter = le64_to_cpu(nonce->counter); 828 nonce->counter = cpu_to_le64(counter + 1); 829 } 830 831 static int gcm_crypt(struct ceph_connection *con, bool encrypt, 832 struct scatterlist *src, struct scatterlist *dst, 833 int src_len) 834 { 835 struct ceph_gcm_nonce *nonce; 836 int ret; 837 838 nonce = encrypt ? &con->v2.out_gcm_nonce : &con->v2.in_gcm_nonce; 839 840 aead_request_set_ad(con->v2.gcm_req, 0); /* no AAD */ 841 aead_request_set_crypt(con->v2.gcm_req, src, dst, src_len, (u8 *)nonce); 842 ret = crypto_wait_req(encrypt ? crypto_aead_encrypt(con->v2.gcm_req) : 843 crypto_aead_decrypt(con->v2.gcm_req), 844 &con->v2.gcm_wait); 845 if (ret) 846 return ret; 847 848 gcm_inc_nonce(nonce); 849 return 0; 850 } 851 852 static void get_bvec_at(struct ceph_msg_data_cursor *cursor, 853 struct bio_vec *bv) 854 { 855 struct page *page; 856 size_t off, len; 857 858 WARN_ON(!cursor->total_resid); 859 860 /* skip zero-length data items */ 861 while (!cursor->resid) 862 ceph_msg_data_advance(cursor, 0); 863 864 /* get a piece of data, cursor isn't advanced */ 865 page = ceph_msg_data_next(cursor, &off, &len, NULL); 866 867 bv->bv_page = page; 868 bv->bv_offset = off; 869 bv->bv_len = len; 870 } 871 872 static int calc_sg_cnt(void *buf, int buf_len) 873 { 874 int sg_cnt; 875 876 if (!buf_len) 877 return 0; 878 879 sg_cnt = need_padding(buf_len) ? 1 : 0; 880 if (is_vmalloc_addr(buf)) { 881 WARN_ON(offset_in_page(buf)); 882 sg_cnt += PAGE_ALIGN(buf_len) >> PAGE_SHIFT; 883 } else { 884 sg_cnt++; 885 } 886 887 return sg_cnt; 888 } 889 890 static int calc_sg_cnt_cursor(struct ceph_msg_data_cursor *cursor) 891 { 892 int data_len = cursor->total_resid; 893 struct bio_vec bv; 894 int sg_cnt; 895 896 if (!data_len) 897 return 0; 898 899 sg_cnt = need_padding(data_len) ? 1 : 0; 900 do { 901 get_bvec_at(cursor, &bv); 902 sg_cnt++; 903 904 ceph_msg_data_advance(cursor, bv.bv_len); 905 } while (cursor->total_resid); 906 907 return sg_cnt; 908 } 909 910 static void init_sgs(struct scatterlist **sg, void *buf, int buf_len, u8 *pad) 911 { 912 void *end = buf + buf_len; 913 struct page *page; 914 int len; 915 void *p; 916 917 if (!buf_len) 918 return; 919 920 if (is_vmalloc_addr(buf)) { 921 p = buf; 922 do { 923 page = vmalloc_to_page(p); 924 len = min_t(int, end - p, PAGE_SIZE); 925 WARN_ON(!page || !len || offset_in_page(p)); 926 sg_set_page(*sg, page, len, 0); 927 *sg = sg_next(*sg); 928 p += len; 929 } while (p != end); 930 } else { 931 sg_set_buf(*sg, buf, buf_len); 932 *sg = sg_next(*sg); 933 } 934 935 if (need_padding(buf_len)) { 936 sg_set_buf(*sg, pad, padding_len(buf_len)); 937 *sg = sg_next(*sg); 938 } 939 } 940 941 static void init_sgs_cursor(struct scatterlist **sg, 942 struct ceph_msg_data_cursor *cursor, u8 *pad) 943 { 944 int data_len = cursor->total_resid; 945 struct bio_vec bv; 946 947 if (!data_len) 948 return; 949 950 do { 951 get_bvec_at(cursor, &bv); 952 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); 953 *sg = sg_next(*sg); 954 955 ceph_msg_data_advance(cursor, bv.bv_len); 956 } while (cursor->total_resid); 957 958 if (need_padding(data_len)) { 959 sg_set_buf(*sg, pad, padding_len(data_len)); 960 *sg = sg_next(*sg); 961 } 962 } 963 964 static int setup_message_sgs(struct sg_table *sgt, struct ceph_msg *msg, 965 u8 *front_pad, u8 *middle_pad, u8 *data_pad, 966 void *epilogue, bool add_tag) 967 { 968 struct ceph_msg_data_cursor cursor; 969 struct scatterlist *cur_sg; 970 int sg_cnt; 971 int ret; 972 973 if (!front_len(msg) && !middle_len(msg) && !data_len(msg)) 974 return 0; 975 976 sg_cnt = 1; /* epilogue + [auth tag] */ 977 if (front_len(msg)) 978 sg_cnt += calc_sg_cnt(msg->front.iov_base, 979 front_len(msg)); 980 if (middle_len(msg)) 981 sg_cnt += calc_sg_cnt(msg->middle->vec.iov_base, 982 middle_len(msg)); 983 if (data_len(msg)) { 984 ceph_msg_data_cursor_init(&cursor, msg, data_len(msg)); 985 sg_cnt += calc_sg_cnt_cursor(&cursor); 986 } 987 988 ret = sg_alloc_table(sgt, sg_cnt, GFP_NOIO); 989 if (ret) 990 return ret; 991 992 cur_sg = sgt->sgl; 993 if (front_len(msg)) 994 init_sgs(&cur_sg, msg->front.iov_base, front_len(msg), 995 front_pad); 996 if (middle_len(msg)) 997 init_sgs(&cur_sg, msg->middle->vec.iov_base, middle_len(msg), 998 middle_pad); 999 if (data_len(msg)) { 1000 ceph_msg_data_cursor_init(&cursor, msg, data_len(msg)); 1001 init_sgs_cursor(&cur_sg, &cursor, data_pad); 1002 } 1003 1004 WARN_ON(!sg_is_last(cur_sg)); 1005 sg_set_buf(cur_sg, epilogue, 1006 CEPH_GCM_BLOCK_LEN + (add_tag ? CEPH_GCM_TAG_LEN : 0)); 1007 return 0; 1008 } 1009 1010 static int decrypt_preamble(struct ceph_connection *con) 1011 { 1012 struct scatterlist sg; 1013 1014 sg_init_one(&sg, con->v2.in_buf, CEPH_PREAMBLE_SECURE_LEN); 1015 return gcm_crypt(con, false, &sg, &sg, CEPH_PREAMBLE_SECURE_LEN); 1016 } 1017 1018 static int decrypt_control_remainder(struct ceph_connection *con) 1019 { 1020 int ctrl_len = con->v2.in_desc.fd_lens[0]; 1021 int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN; 1022 int pt_len = padding_len(rem_len) + CEPH_GCM_TAG_LEN; 1023 struct scatterlist sgs[2]; 1024 1025 WARN_ON(con->v2.in_kvecs[0].iov_len != rem_len); 1026 WARN_ON(con->v2.in_kvecs[1].iov_len != pt_len); 1027 1028 sg_init_table(sgs, 2); 1029 sg_set_buf(&sgs[0], con->v2.in_kvecs[0].iov_base, rem_len); 1030 sg_set_buf(&sgs[1], con->v2.in_buf, pt_len); 1031 1032 return gcm_crypt(con, false, sgs, sgs, 1033 padded_len(rem_len) + CEPH_GCM_TAG_LEN); 1034 } 1035 1036 static int decrypt_tail(struct ceph_connection *con) 1037 { 1038 struct sg_table enc_sgt = {}; 1039 struct sg_table sgt = {}; 1040 int tail_len; 1041 int ret; 1042 1043 tail_len = tail_onwire_len(con->in_msg, true); 1044 ret = sg_alloc_table_from_pages(&enc_sgt, con->v2.in_enc_pages, 1045 con->v2.in_enc_page_cnt, 0, tail_len, 1046 GFP_NOIO); 1047 if (ret) 1048 goto out; 1049 1050 ret = setup_message_sgs(&sgt, con->in_msg, FRONT_PAD(con->v2.in_buf), 1051 MIDDLE_PAD(con->v2.in_buf), DATA_PAD(con->v2.in_buf), 1052 con->v2.in_buf, true); 1053 if (ret) 1054 goto out; 1055 1056 dout("%s con %p msg %p enc_page_cnt %d sg_cnt %d\n", __func__, con, 1057 con->in_msg, con->v2.in_enc_page_cnt, sgt.orig_nents); 1058 ret = gcm_crypt(con, false, enc_sgt.sgl, sgt.sgl, tail_len); 1059 if (ret) 1060 goto out; 1061 1062 WARN_ON(!con->v2.in_enc_page_cnt); 1063 ceph_release_page_vector(con->v2.in_enc_pages, 1064 con->v2.in_enc_page_cnt); 1065 con->v2.in_enc_pages = NULL; 1066 con->v2.in_enc_page_cnt = 0; 1067 1068 out: 1069 sg_free_table(&sgt); 1070 sg_free_table(&enc_sgt); 1071 return ret; 1072 } 1073 1074 static int prepare_banner(struct ceph_connection *con) 1075 { 1076 int buf_len = CEPH_BANNER_V2_LEN + 2 + 8 + 8; 1077 void *buf, *p; 1078 1079 buf = alloc_conn_buf(con, buf_len); 1080 if (!buf) 1081 return -ENOMEM; 1082 1083 p = buf; 1084 ceph_encode_copy(&p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN); 1085 ceph_encode_16(&p, sizeof(u64) + sizeof(u64)); 1086 ceph_encode_64(&p, CEPH_MSGR2_SUPPORTED_FEATURES); 1087 ceph_encode_64(&p, CEPH_MSGR2_REQUIRED_FEATURES); 1088 WARN_ON(p != buf + buf_len); 1089 1090 add_out_kvec(con, buf, buf_len); 1091 add_out_sign_kvec(con, buf, buf_len); 1092 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING); 1093 return 0; 1094 } 1095 1096 /* 1097 * base: 1098 * preamble 1099 * control body (ctrl_len bytes) 1100 * space for control crc 1101 * 1102 * extdata (optional): 1103 * control body (extdata_len bytes) 1104 * 1105 * Compute control crc and gather base and extdata into: 1106 * 1107 * preamble 1108 * control body (ctrl_len + extdata_len bytes) 1109 * control crc 1110 * 1111 * Preamble should already be encoded at the start of base. 1112 */ 1113 static void prepare_head_plain(struct ceph_connection *con, void *base, 1114 int ctrl_len, void *extdata, int extdata_len, 1115 bool to_be_signed) 1116 { 1117 int base_len = CEPH_PREAMBLE_LEN + ctrl_len + CEPH_CRC_LEN; 1118 void *crcp = base + base_len - CEPH_CRC_LEN; 1119 u32 crc; 1120 1121 crc = crc32c(-1, CTRL_BODY(base), ctrl_len); 1122 if (extdata_len) 1123 crc = crc32c(crc, extdata, extdata_len); 1124 put_unaligned_le32(crc, crcp); 1125 1126 if (!extdata_len) { 1127 add_out_kvec(con, base, base_len); 1128 if (to_be_signed) 1129 add_out_sign_kvec(con, base, base_len); 1130 return; 1131 } 1132 1133 add_out_kvec(con, base, crcp - base); 1134 add_out_kvec(con, extdata, extdata_len); 1135 add_out_kvec(con, crcp, CEPH_CRC_LEN); 1136 if (to_be_signed) { 1137 add_out_sign_kvec(con, base, crcp - base); 1138 add_out_sign_kvec(con, extdata, extdata_len); 1139 add_out_sign_kvec(con, crcp, CEPH_CRC_LEN); 1140 } 1141 } 1142 1143 static int prepare_head_secure_small(struct ceph_connection *con, 1144 void *base, int ctrl_len) 1145 { 1146 struct scatterlist sg; 1147 int ret; 1148 1149 /* inline buffer padding? */ 1150 if (ctrl_len < CEPH_PREAMBLE_INLINE_LEN) 1151 memset(CTRL_BODY(base) + ctrl_len, 0, 1152 CEPH_PREAMBLE_INLINE_LEN - ctrl_len); 1153 1154 sg_init_one(&sg, base, CEPH_PREAMBLE_SECURE_LEN); 1155 ret = gcm_crypt(con, true, &sg, &sg, 1156 CEPH_PREAMBLE_SECURE_LEN - CEPH_GCM_TAG_LEN); 1157 if (ret) 1158 return ret; 1159 1160 add_out_kvec(con, base, CEPH_PREAMBLE_SECURE_LEN); 1161 return 0; 1162 } 1163 1164 /* 1165 * base: 1166 * preamble 1167 * control body (ctrl_len bytes) 1168 * space for padding, if needed 1169 * space for control remainder auth tag 1170 * space for preamble auth tag 1171 * 1172 * Encrypt preamble and the inline portion, then encrypt the remainder 1173 * and gather into: 1174 * 1175 * preamble 1176 * control body (48 bytes) 1177 * preamble auth tag 1178 * control body (ctrl_len - 48 bytes) 1179 * zero padding, if needed 1180 * control remainder auth tag 1181 * 1182 * Preamble should already be encoded at the start of base. 1183 */ 1184 static int prepare_head_secure_big(struct ceph_connection *con, 1185 void *base, int ctrl_len) 1186 { 1187 int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN; 1188 void *rem = CTRL_BODY(base) + CEPH_PREAMBLE_INLINE_LEN; 1189 void *rem_tag = rem + padded_len(rem_len); 1190 void *pmbl_tag = rem_tag + CEPH_GCM_TAG_LEN; 1191 struct scatterlist sgs[2]; 1192 int ret; 1193 1194 sg_init_table(sgs, 2); 1195 sg_set_buf(&sgs[0], base, rem - base); 1196 sg_set_buf(&sgs[1], pmbl_tag, CEPH_GCM_TAG_LEN); 1197 ret = gcm_crypt(con, true, sgs, sgs, rem - base); 1198 if (ret) 1199 return ret; 1200 1201 /* control remainder padding? */ 1202 if (need_padding(rem_len)) 1203 memset(rem + rem_len, 0, padding_len(rem_len)); 1204 1205 sg_init_one(&sgs[0], rem, pmbl_tag - rem); 1206 ret = gcm_crypt(con, true, sgs, sgs, rem_tag - rem); 1207 if (ret) 1208 return ret; 1209 1210 add_out_kvec(con, base, rem - base); 1211 add_out_kvec(con, pmbl_tag, CEPH_GCM_TAG_LEN); 1212 add_out_kvec(con, rem, pmbl_tag - rem); 1213 return 0; 1214 } 1215 1216 static int __prepare_control(struct ceph_connection *con, int tag, 1217 void *base, int ctrl_len, void *extdata, 1218 int extdata_len, bool to_be_signed) 1219 { 1220 int total_len = ctrl_len + extdata_len; 1221 struct ceph_frame_desc desc; 1222 int ret; 1223 1224 dout("%s con %p tag %d len %d (%d+%d)\n", __func__, con, tag, 1225 total_len, ctrl_len, extdata_len); 1226 1227 /* extdata may be vmalloc'ed but not base */ 1228 if (WARN_ON(is_vmalloc_addr(base) || !ctrl_len)) 1229 return -EINVAL; 1230 1231 init_frame_desc(&desc, tag, &total_len, 1); 1232 encode_preamble(&desc, base); 1233 1234 if (con_secure(con)) { 1235 if (WARN_ON(extdata_len || to_be_signed)) 1236 return -EINVAL; 1237 1238 if (ctrl_len <= CEPH_PREAMBLE_INLINE_LEN) 1239 /* fully inlined, inline buffer may need padding */ 1240 ret = prepare_head_secure_small(con, base, ctrl_len); 1241 else 1242 /* partially inlined, inline buffer is full */ 1243 ret = prepare_head_secure_big(con, base, ctrl_len); 1244 if (ret) 1245 return ret; 1246 } else { 1247 prepare_head_plain(con, base, ctrl_len, extdata, extdata_len, 1248 to_be_signed); 1249 } 1250 1251 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING); 1252 return 0; 1253 } 1254 1255 static int prepare_control(struct ceph_connection *con, int tag, 1256 void *base, int ctrl_len) 1257 { 1258 return __prepare_control(con, tag, base, ctrl_len, NULL, 0, false); 1259 } 1260 1261 static int prepare_hello(struct ceph_connection *con) 1262 { 1263 void *buf, *p; 1264 int ctrl_len; 1265 1266 ctrl_len = 1 + ceph_entity_addr_encoding_len(&con->peer_addr); 1267 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false)); 1268 if (!buf) 1269 return -ENOMEM; 1270 1271 p = CTRL_BODY(buf); 1272 ceph_encode_8(&p, CEPH_ENTITY_TYPE_CLIENT); 1273 ceph_encode_entity_addr(&p, &con->peer_addr); 1274 WARN_ON(p != CTRL_BODY(buf) + ctrl_len); 1275 1276 return __prepare_control(con, FRAME_TAG_HELLO, buf, ctrl_len, 1277 NULL, 0, true); 1278 } 1279 1280 /* so that head_onwire_len(AUTH_BUF_LEN, false) is 512 */ 1281 #define AUTH_BUF_LEN (512 - CEPH_CRC_LEN - CEPH_PREAMBLE_PLAIN_LEN) 1282 1283 static int prepare_auth_request(struct ceph_connection *con) 1284 { 1285 void *authorizer, *authorizer_copy; 1286 int ctrl_len, authorizer_len; 1287 void *buf; 1288 int ret; 1289 1290 ctrl_len = AUTH_BUF_LEN; 1291 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false)); 1292 if (!buf) 1293 return -ENOMEM; 1294 1295 mutex_unlock(&con->mutex); 1296 ret = con->ops->get_auth_request(con, CTRL_BODY(buf), &ctrl_len, 1297 &authorizer, &authorizer_len); 1298 mutex_lock(&con->mutex); 1299 if (con->state != CEPH_CON_S_V2_HELLO) { 1300 dout("%s con %p state changed to %d\n", __func__, con, 1301 con->state); 1302 return -EAGAIN; 1303 } 1304 1305 dout("%s con %p get_auth_request ret %d\n", __func__, con, ret); 1306 if (ret) 1307 return ret; 1308 1309 authorizer_copy = alloc_conn_buf(con, authorizer_len); 1310 if (!authorizer_copy) 1311 return -ENOMEM; 1312 1313 memcpy(authorizer_copy, authorizer, authorizer_len); 1314 1315 return __prepare_control(con, FRAME_TAG_AUTH_REQUEST, buf, ctrl_len, 1316 authorizer_copy, authorizer_len, true); 1317 } 1318 1319 static int prepare_auth_request_more(struct ceph_connection *con, 1320 void *reply, int reply_len) 1321 { 1322 int ctrl_len, authorizer_len; 1323 void *authorizer; 1324 void *buf; 1325 int ret; 1326 1327 ctrl_len = AUTH_BUF_LEN; 1328 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false)); 1329 if (!buf) 1330 return -ENOMEM; 1331 1332 mutex_unlock(&con->mutex); 1333 ret = con->ops->handle_auth_reply_more(con, reply, reply_len, 1334 CTRL_BODY(buf), &ctrl_len, 1335 &authorizer, &authorizer_len); 1336 mutex_lock(&con->mutex); 1337 if (con->state != CEPH_CON_S_V2_AUTH) { 1338 dout("%s con %p state changed to %d\n", __func__, con, 1339 con->state); 1340 return -EAGAIN; 1341 } 1342 1343 dout("%s con %p handle_auth_reply_more ret %d\n", __func__, con, ret); 1344 if (ret) 1345 return ret; 1346 1347 return __prepare_control(con, FRAME_TAG_AUTH_REQUEST_MORE, buf, 1348 ctrl_len, authorizer, authorizer_len, true); 1349 } 1350 1351 static int prepare_auth_signature(struct ceph_connection *con) 1352 { 1353 void *buf; 1354 int ret; 1355 1356 buf = alloc_conn_buf(con, head_onwire_len(SHA256_DIGEST_SIZE, 1357 con_secure(con))); 1358 if (!buf) 1359 return -ENOMEM; 1360 1361 ret = hmac_sha256(con, con->v2.in_sign_kvecs, con->v2.in_sign_kvec_cnt, 1362 CTRL_BODY(buf)); 1363 if (ret) 1364 return ret; 1365 1366 return prepare_control(con, FRAME_TAG_AUTH_SIGNATURE, buf, 1367 SHA256_DIGEST_SIZE); 1368 } 1369 1370 static int prepare_client_ident(struct ceph_connection *con) 1371 { 1372 struct ceph_entity_addr *my_addr = &con->msgr->inst.addr; 1373 struct ceph_client *client = from_msgr(con->msgr); 1374 u64 global_id = ceph_client_gid(client); 1375 void *buf, *p; 1376 int ctrl_len; 1377 1378 WARN_ON(con->v2.server_cookie); 1379 WARN_ON(con->v2.connect_seq); 1380 WARN_ON(con->v2.peer_global_seq); 1381 1382 if (!con->v2.client_cookie) { 1383 do { 1384 get_random_bytes(&con->v2.client_cookie, 1385 sizeof(con->v2.client_cookie)); 1386 } while (!con->v2.client_cookie); 1387 dout("%s con %p generated cookie 0x%llx\n", __func__, con, 1388 con->v2.client_cookie); 1389 } else { 1390 dout("%s con %p cookie already set 0x%llx\n", __func__, con, 1391 con->v2.client_cookie); 1392 } 1393 1394 dout("%s con %p my_addr %s/%u peer_addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx cookie 0x%llx\n", 1395 __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce), 1396 ceph_pr_addr(&con->peer_addr), le32_to_cpu(con->peer_addr.nonce), 1397 global_id, con->v2.global_seq, client->supported_features, 1398 client->required_features, con->v2.client_cookie); 1399 1400 ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) + 1401 ceph_entity_addr_encoding_len(&con->peer_addr) + 6 * 8; 1402 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con))); 1403 if (!buf) 1404 return -ENOMEM; 1405 1406 p = CTRL_BODY(buf); 1407 ceph_encode_8(&p, 2); /* addrvec marker */ 1408 ceph_encode_32(&p, 1); /* addr_cnt */ 1409 ceph_encode_entity_addr(&p, my_addr); 1410 ceph_encode_entity_addr(&p, &con->peer_addr); 1411 ceph_encode_64(&p, global_id); 1412 ceph_encode_64(&p, con->v2.global_seq); 1413 ceph_encode_64(&p, client->supported_features); 1414 ceph_encode_64(&p, client->required_features); 1415 ceph_encode_64(&p, 0); /* flags */ 1416 ceph_encode_64(&p, con->v2.client_cookie); 1417 WARN_ON(p != CTRL_BODY(buf) + ctrl_len); 1418 1419 return prepare_control(con, FRAME_TAG_CLIENT_IDENT, buf, ctrl_len); 1420 } 1421 1422 static int prepare_session_reconnect(struct ceph_connection *con) 1423 { 1424 struct ceph_entity_addr *my_addr = &con->msgr->inst.addr; 1425 void *buf, *p; 1426 int ctrl_len; 1427 1428 WARN_ON(!con->v2.client_cookie); 1429 WARN_ON(!con->v2.server_cookie); 1430 WARN_ON(!con->v2.connect_seq); 1431 WARN_ON(!con->v2.peer_global_seq); 1432 1433 dout("%s con %p my_addr %s/%u client_cookie 0x%llx server_cookie 0x%llx global_seq %llu connect_seq %llu in_seq %llu\n", 1434 __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce), 1435 con->v2.client_cookie, con->v2.server_cookie, con->v2.global_seq, 1436 con->v2.connect_seq, con->in_seq); 1437 1438 ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) + 5 * 8; 1439 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con))); 1440 if (!buf) 1441 return -ENOMEM; 1442 1443 p = CTRL_BODY(buf); 1444 ceph_encode_8(&p, 2); /* entity_addrvec_t marker */ 1445 ceph_encode_32(&p, 1); /* my_addrs len */ 1446 ceph_encode_entity_addr(&p, my_addr); 1447 ceph_encode_64(&p, con->v2.client_cookie); 1448 ceph_encode_64(&p, con->v2.server_cookie); 1449 ceph_encode_64(&p, con->v2.global_seq); 1450 ceph_encode_64(&p, con->v2.connect_seq); 1451 ceph_encode_64(&p, con->in_seq); 1452 WARN_ON(p != CTRL_BODY(buf) + ctrl_len); 1453 1454 return prepare_control(con, FRAME_TAG_SESSION_RECONNECT, buf, ctrl_len); 1455 } 1456 1457 static int prepare_keepalive2(struct ceph_connection *con) 1458 { 1459 struct ceph_timespec *ts = CTRL_BODY(con->v2.out_buf); 1460 struct timespec64 now; 1461 1462 ktime_get_real_ts64(&now); 1463 dout("%s con %p timestamp %lld.%09ld\n", __func__, con, now.tv_sec, 1464 now.tv_nsec); 1465 1466 ceph_encode_timespec64(ts, &now); 1467 1468 reset_out_kvecs(con); 1469 return prepare_control(con, FRAME_TAG_KEEPALIVE2, con->v2.out_buf, 1470 sizeof(struct ceph_timespec)); 1471 } 1472 1473 static int prepare_ack(struct ceph_connection *con) 1474 { 1475 void *p; 1476 1477 dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con, 1478 con->in_seq_acked, con->in_seq); 1479 con->in_seq_acked = con->in_seq; 1480 1481 p = CTRL_BODY(con->v2.out_buf); 1482 ceph_encode_64(&p, con->in_seq_acked); 1483 1484 reset_out_kvecs(con); 1485 return prepare_control(con, FRAME_TAG_ACK, con->v2.out_buf, 8); 1486 } 1487 1488 static void prepare_epilogue_plain(struct ceph_connection *con, bool aborted) 1489 { 1490 dout("%s con %p msg %p aborted %d crcs %u %u %u\n", __func__, con, 1491 con->out_msg, aborted, con->v2.out_epil.front_crc, 1492 con->v2.out_epil.middle_crc, con->v2.out_epil.data_crc); 1493 1494 encode_epilogue_plain(con, aborted); 1495 add_out_kvec(con, &con->v2.out_epil, CEPH_EPILOGUE_PLAIN_LEN); 1496 } 1497 1498 /* 1499 * For "used" empty segments, crc is -1. For unused (trailing) 1500 * segments, crc is 0. 1501 */ 1502 static void prepare_message_plain(struct ceph_connection *con) 1503 { 1504 struct ceph_msg *msg = con->out_msg; 1505 1506 prepare_head_plain(con, con->v2.out_buf, 1507 sizeof(struct ceph_msg_header2), NULL, 0, false); 1508 1509 if (!front_len(msg) && !middle_len(msg)) { 1510 if (!data_len(msg)) { 1511 /* 1512 * Empty message: once the head is written, 1513 * we are done -- there is no epilogue. 1514 */ 1515 con->v2.out_state = OUT_S_FINISH_MESSAGE; 1516 return; 1517 } 1518 1519 con->v2.out_epil.front_crc = -1; 1520 con->v2.out_epil.middle_crc = -1; 1521 con->v2.out_state = OUT_S_QUEUE_DATA; 1522 return; 1523 } 1524 1525 if (front_len(msg)) { 1526 con->v2.out_epil.front_crc = crc32c(-1, msg->front.iov_base, 1527 front_len(msg)); 1528 add_out_kvec(con, msg->front.iov_base, front_len(msg)); 1529 } else { 1530 /* middle (at least) is there, checked above */ 1531 con->v2.out_epil.front_crc = -1; 1532 } 1533 1534 if (middle_len(msg)) { 1535 con->v2.out_epil.middle_crc = 1536 crc32c(-1, msg->middle->vec.iov_base, middle_len(msg)); 1537 add_out_kvec(con, msg->middle->vec.iov_base, middle_len(msg)); 1538 } else { 1539 con->v2.out_epil.middle_crc = data_len(msg) ? -1 : 0; 1540 } 1541 1542 if (data_len(msg)) { 1543 con->v2.out_state = OUT_S_QUEUE_DATA; 1544 } else { 1545 con->v2.out_epil.data_crc = 0; 1546 prepare_epilogue_plain(con, false); 1547 con->v2.out_state = OUT_S_FINISH_MESSAGE; 1548 } 1549 } 1550 1551 /* 1552 * Unfortunately the kernel crypto API doesn't support streaming 1553 * (piecewise) operation for AEAD algorithms, so we can't get away 1554 * with a fixed size buffer and a couple sgs. Instead, we have to 1555 * allocate pages for the entire tail of the message (currently up 1556 * to ~32M) and two sgs arrays (up to ~256K each)... 1557 */ 1558 static int prepare_message_secure(struct ceph_connection *con) 1559 { 1560 void *zerop = page_address(ceph_zero_page); 1561 struct sg_table enc_sgt = {}; 1562 struct sg_table sgt = {}; 1563 struct page **enc_pages; 1564 int enc_page_cnt; 1565 int tail_len; 1566 int ret; 1567 1568 ret = prepare_head_secure_small(con, con->v2.out_buf, 1569 sizeof(struct ceph_msg_header2)); 1570 if (ret) 1571 return ret; 1572 1573 tail_len = tail_onwire_len(con->out_msg, true); 1574 if (!tail_len) { 1575 /* 1576 * Empty message: once the head is written, 1577 * we are done -- there is no epilogue. 1578 */ 1579 con->v2.out_state = OUT_S_FINISH_MESSAGE; 1580 return 0; 1581 } 1582 1583 encode_epilogue_secure(con, false); 1584 ret = setup_message_sgs(&sgt, con->out_msg, zerop, zerop, zerop, 1585 &con->v2.out_epil, false); 1586 if (ret) 1587 goto out; 1588 1589 enc_page_cnt = calc_pages_for(0, tail_len); 1590 enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO); 1591 if (IS_ERR(enc_pages)) { 1592 ret = PTR_ERR(enc_pages); 1593 goto out; 1594 } 1595 1596 WARN_ON(con->v2.out_enc_pages || con->v2.out_enc_page_cnt); 1597 con->v2.out_enc_pages = enc_pages; 1598 con->v2.out_enc_page_cnt = enc_page_cnt; 1599 con->v2.out_enc_resid = tail_len; 1600 con->v2.out_enc_i = 0; 1601 1602 ret = sg_alloc_table_from_pages(&enc_sgt, enc_pages, enc_page_cnt, 1603 0, tail_len, GFP_NOIO); 1604 if (ret) 1605 goto out; 1606 1607 ret = gcm_crypt(con, true, sgt.sgl, enc_sgt.sgl, 1608 tail_len - CEPH_GCM_TAG_LEN); 1609 if (ret) 1610 goto out; 1611 1612 dout("%s con %p msg %p sg_cnt %d enc_page_cnt %d\n", __func__, con, 1613 con->out_msg, sgt.orig_nents, enc_page_cnt); 1614 con->v2.out_state = OUT_S_QUEUE_ENC_PAGE; 1615 1616 out: 1617 sg_free_table(&sgt); 1618 sg_free_table(&enc_sgt); 1619 return ret; 1620 } 1621 1622 static int prepare_message(struct ceph_connection *con) 1623 { 1624 int lens[] = { 1625 sizeof(struct ceph_msg_header2), 1626 front_len(con->out_msg), 1627 middle_len(con->out_msg), 1628 data_len(con->out_msg) 1629 }; 1630 struct ceph_frame_desc desc; 1631 int ret; 1632 1633 dout("%s con %p msg %p logical %d+%d+%d+%d\n", __func__, con, 1634 con->out_msg, lens[0], lens[1], lens[2], lens[3]); 1635 1636 if (con->in_seq > con->in_seq_acked) { 1637 dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con, 1638 con->in_seq_acked, con->in_seq); 1639 con->in_seq_acked = con->in_seq; 1640 } 1641 1642 reset_out_kvecs(con); 1643 init_frame_desc(&desc, FRAME_TAG_MESSAGE, lens, 4); 1644 encode_preamble(&desc, con->v2.out_buf); 1645 fill_header2(CTRL_BODY(con->v2.out_buf), &con->out_msg->hdr, 1646 con->in_seq_acked); 1647 1648 if (con_secure(con)) { 1649 ret = prepare_message_secure(con); 1650 if (ret) 1651 return ret; 1652 } else { 1653 prepare_message_plain(con); 1654 } 1655 1656 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING); 1657 return 0; 1658 } 1659 1660 static int prepare_read_banner_prefix(struct ceph_connection *con) 1661 { 1662 void *buf; 1663 1664 buf = alloc_conn_buf(con, CEPH_BANNER_V2_PREFIX_LEN); 1665 if (!buf) 1666 return -ENOMEM; 1667 1668 reset_in_kvecs(con); 1669 add_in_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN); 1670 add_in_sign_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN); 1671 con->state = CEPH_CON_S_V2_BANNER_PREFIX; 1672 return 0; 1673 } 1674 1675 static int prepare_read_banner_payload(struct ceph_connection *con, 1676 int payload_len) 1677 { 1678 void *buf; 1679 1680 buf = alloc_conn_buf(con, payload_len); 1681 if (!buf) 1682 return -ENOMEM; 1683 1684 reset_in_kvecs(con); 1685 add_in_kvec(con, buf, payload_len); 1686 add_in_sign_kvec(con, buf, payload_len); 1687 con->state = CEPH_CON_S_V2_BANNER_PAYLOAD; 1688 return 0; 1689 } 1690 1691 static void prepare_read_preamble(struct ceph_connection *con) 1692 { 1693 reset_in_kvecs(con); 1694 add_in_kvec(con, con->v2.in_buf, 1695 con_secure(con) ? CEPH_PREAMBLE_SECURE_LEN : 1696 CEPH_PREAMBLE_PLAIN_LEN); 1697 con->v2.in_state = IN_S_HANDLE_PREAMBLE; 1698 } 1699 1700 static int prepare_read_control(struct ceph_connection *con) 1701 { 1702 int ctrl_len = con->v2.in_desc.fd_lens[0]; 1703 int head_len; 1704 void *buf; 1705 1706 reset_in_kvecs(con); 1707 if (con->state == CEPH_CON_S_V2_HELLO || 1708 con->state == CEPH_CON_S_V2_AUTH) { 1709 head_len = head_onwire_len(ctrl_len, false); 1710 buf = alloc_conn_buf(con, head_len); 1711 if (!buf) 1712 return -ENOMEM; 1713 1714 /* preserve preamble */ 1715 memcpy(buf, con->v2.in_buf, CEPH_PREAMBLE_LEN); 1716 1717 add_in_kvec(con, CTRL_BODY(buf), ctrl_len); 1718 add_in_kvec(con, CTRL_BODY(buf) + ctrl_len, CEPH_CRC_LEN); 1719 add_in_sign_kvec(con, buf, head_len); 1720 } else { 1721 if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) { 1722 buf = alloc_conn_buf(con, ctrl_len); 1723 if (!buf) 1724 return -ENOMEM; 1725 1726 add_in_kvec(con, buf, ctrl_len); 1727 } else { 1728 add_in_kvec(con, CTRL_BODY(con->v2.in_buf), ctrl_len); 1729 } 1730 add_in_kvec(con, con->v2.in_buf, CEPH_CRC_LEN); 1731 } 1732 con->v2.in_state = IN_S_HANDLE_CONTROL; 1733 return 0; 1734 } 1735 1736 static int prepare_read_control_remainder(struct ceph_connection *con) 1737 { 1738 int ctrl_len = con->v2.in_desc.fd_lens[0]; 1739 int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN; 1740 void *buf; 1741 1742 buf = alloc_conn_buf(con, ctrl_len); 1743 if (!buf) 1744 return -ENOMEM; 1745 1746 memcpy(buf, CTRL_BODY(con->v2.in_buf), CEPH_PREAMBLE_INLINE_LEN); 1747 1748 reset_in_kvecs(con); 1749 add_in_kvec(con, buf + CEPH_PREAMBLE_INLINE_LEN, rem_len); 1750 add_in_kvec(con, con->v2.in_buf, 1751 padding_len(rem_len) + CEPH_GCM_TAG_LEN); 1752 con->v2.in_state = IN_S_HANDLE_CONTROL_REMAINDER; 1753 return 0; 1754 } 1755 1756 static int prepare_read_data(struct ceph_connection *con) 1757 { 1758 struct bio_vec bv; 1759 1760 con->in_data_crc = -1; 1761 ceph_msg_data_cursor_init(&con->v2.in_cursor, con->in_msg, 1762 data_len(con->in_msg)); 1763 1764 get_bvec_at(&con->v2.in_cursor, &bv); 1765 if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) { 1766 if (unlikely(!con->bounce_page)) { 1767 con->bounce_page = alloc_page(GFP_NOIO); 1768 if (!con->bounce_page) { 1769 pr_err("failed to allocate bounce page\n"); 1770 return -ENOMEM; 1771 } 1772 } 1773 1774 bv.bv_page = con->bounce_page; 1775 bv.bv_offset = 0; 1776 set_in_bvec(con, &bv); 1777 } else { 1778 set_in_bvec(con, &bv); 1779 } 1780 con->v2.in_state = IN_S_PREPARE_READ_DATA_CONT; 1781 return 0; 1782 } 1783 1784 static void prepare_read_data_cont(struct ceph_connection *con) 1785 { 1786 struct bio_vec bv; 1787 1788 if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) { 1789 con->in_data_crc = crc32c(con->in_data_crc, 1790 page_address(con->bounce_page), 1791 con->v2.in_bvec.bv_len); 1792 1793 get_bvec_at(&con->v2.in_cursor, &bv); 1794 memcpy_to_page(bv.bv_page, bv.bv_offset, 1795 page_address(con->bounce_page), 1796 con->v2.in_bvec.bv_len); 1797 } else { 1798 con->in_data_crc = ceph_crc32c_page(con->in_data_crc, 1799 con->v2.in_bvec.bv_page, 1800 con->v2.in_bvec.bv_offset, 1801 con->v2.in_bvec.bv_len); 1802 } 1803 1804 ceph_msg_data_advance(&con->v2.in_cursor, con->v2.in_bvec.bv_len); 1805 if (con->v2.in_cursor.total_resid) { 1806 get_bvec_at(&con->v2.in_cursor, &bv); 1807 if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) { 1808 bv.bv_page = con->bounce_page; 1809 bv.bv_offset = 0; 1810 set_in_bvec(con, &bv); 1811 } else { 1812 set_in_bvec(con, &bv); 1813 } 1814 WARN_ON(con->v2.in_state != IN_S_PREPARE_READ_DATA_CONT); 1815 return; 1816 } 1817 1818 /* 1819 * We've read all data. Prepare to read epilogue. 1820 */ 1821 reset_in_kvecs(con); 1822 add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN); 1823 con->v2.in_state = IN_S_HANDLE_EPILOGUE; 1824 } 1825 1826 static int prepare_read_tail_plain(struct ceph_connection *con) 1827 { 1828 struct ceph_msg *msg = con->in_msg; 1829 1830 if (!front_len(msg) && !middle_len(msg)) { 1831 WARN_ON(!data_len(msg)); 1832 return prepare_read_data(con); 1833 } 1834 1835 reset_in_kvecs(con); 1836 if (front_len(msg)) { 1837 add_in_kvec(con, msg->front.iov_base, front_len(msg)); 1838 WARN_ON(msg->front.iov_len != front_len(msg)); 1839 } 1840 if (middle_len(msg)) { 1841 add_in_kvec(con, msg->middle->vec.iov_base, middle_len(msg)); 1842 WARN_ON(msg->middle->vec.iov_len != middle_len(msg)); 1843 } 1844 1845 if (data_len(msg)) { 1846 con->v2.in_state = IN_S_PREPARE_READ_DATA; 1847 } else { 1848 add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN); 1849 con->v2.in_state = IN_S_HANDLE_EPILOGUE; 1850 } 1851 return 0; 1852 } 1853 1854 static void prepare_read_enc_page(struct ceph_connection *con) 1855 { 1856 struct bio_vec bv; 1857 1858 dout("%s con %p i %d resid %d\n", __func__, con, con->v2.in_enc_i, 1859 con->v2.in_enc_resid); 1860 WARN_ON(!con->v2.in_enc_resid); 1861 1862 bv.bv_page = con->v2.in_enc_pages[con->v2.in_enc_i]; 1863 bv.bv_offset = 0; 1864 bv.bv_len = min(con->v2.in_enc_resid, (int)PAGE_SIZE); 1865 1866 set_in_bvec(con, &bv); 1867 con->v2.in_enc_i++; 1868 con->v2.in_enc_resid -= bv.bv_len; 1869 1870 if (con->v2.in_enc_resid) { 1871 con->v2.in_state = IN_S_PREPARE_READ_ENC_PAGE; 1872 return; 1873 } 1874 1875 /* 1876 * We are set to read the last piece of ciphertext (ending 1877 * with epilogue) + auth tag. 1878 */ 1879 WARN_ON(con->v2.in_enc_i != con->v2.in_enc_page_cnt); 1880 con->v2.in_state = IN_S_HANDLE_EPILOGUE; 1881 } 1882 1883 static int prepare_read_tail_secure(struct ceph_connection *con) 1884 { 1885 struct page **enc_pages; 1886 int enc_page_cnt; 1887 int tail_len; 1888 1889 tail_len = tail_onwire_len(con->in_msg, true); 1890 WARN_ON(!tail_len); 1891 1892 enc_page_cnt = calc_pages_for(0, tail_len); 1893 enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO); 1894 if (IS_ERR(enc_pages)) 1895 return PTR_ERR(enc_pages); 1896 1897 WARN_ON(con->v2.in_enc_pages || con->v2.in_enc_page_cnt); 1898 con->v2.in_enc_pages = enc_pages; 1899 con->v2.in_enc_page_cnt = enc_page_cnt; 1900 con->v2.in_enc_resid = tail_len; 1901 con->v2.in_enc_i = 0; 1902 1903 prepare_read_enc_page(con); 1904 return 0; 1905 } 1906 1907 static void __finish_skip(struct ceph_connection *con) 1908 { 1909 con->in_seq++; 1910 prepare_read_preamble(con); 1911 } 1912 1913 static void prepare_skip_message(struct ceph_connection *con) 1914 { 1915 struct ceph_frame_desc *desc = &con->v2.in_desc; 1916 int tail_len; 1917 1918 dout("%s con %p %d+%d+%d\n", __func__, con, desc->fd_lens[1], 1919 desc->fd_lens[2], desc->fd_lens[3]); 1920 1921 tail_len = __tail_onwire_len(desc->fd_lens[1], desc->fd_lens[2], 1922 desc->fd_lens[3], con_secure(con)); 1923 if (!tail_len) { 1924 __finish_skip(con); 1925 } else { 1926 set_in_skip(con, tail_len); 1927 con->v2.in_state = IN_S_FINISH_SKIP; 1928 } 1929 } 1930 1931 static int process_banner_prefix(struct ceph_connection *con) 1932 { 1933 int payload_len; 1934 void *p; 1935 1936 WARN_ON(con->v2.in_kvecs[0].iov_len != CEPH_BANNER_V2_PREFIX_LEN); 1937 1938 p = con->v2.in_kvecs[0].iov_base; 1939 if (memcmp(p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN)) { 1940 if (!memcmp(p, CEPH_BANNER, CEPH_BANNER_LEN)) 1941 con->error_msg = "server is speaking msgr1 protocol"; 1942 else 1943 con->error_msg = "protocol error, bad banner"; 1944 return -EINVAL; 1945 } 1946 1947 p += CEPH_BANNER_V2_LEN; 1948 payload_len = ceph_decode_16(&p); 1949 dout("%s con %p payload_len %d\n", __func__, con, payload_len); 1950 1951 return prepare_read_banner_payload(con, payload_len); 1952 } 1953 1954 static int process_banner_payload(struct ceph_connection *con) 1955 { 1956 void *end = con->v2.in_kvecs[0].iov_base + con->v2.in_kvecs[0].iov_len; 1957 u64 feat = CEPH_MSGR2_SUPPORTED_FEATURES; 1958 u64 req_feat = CEPH_MSGR2_REQUIRED_FEATURES; 1959 u64 server_feat, server_req_feat; 1960 void *p; 1961 int ret; 1962 1963 p = con->v2.in_kvecs[0].iov_base; 1964 ceph_decode_64_safe(&p, end, server_feat, bad); 1965 ceph_decode_64_safe(&p, end, server_req_feat, bad); 1966 1967 dout("%s con %p server_feat 0x%llx server_req_feat 0x%llx\n", 1968 __func__, con, server_feat, server_req_feat); 1969 1970 if (req_feat & ~server_feat) { 1971 pr_err("msgr2 feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n", 1972 server_feat, req_feat & ~server_feat); 1973 con->error_msg = "missing required protocol features"; 1974 return -EINVAL; 1975 } 1976 if (server_req_feat & ~feat) { 1977 pr_err("msgr2 feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n", 1978 feat, server_req_feat & ~feat); 1979 con->error_msg = "missing required protocol features"; 1980 return -EINVAL; 1981 } 1982 1983 /* no reset_out_kvecs() as our banner may still be pending */ 1984 ret = prepare_hello(con); 1985 if (ret) { 1986 pr_err("prepare_hello failed: %d\n", ret); 1987 return ret; 1988 } 1989 1990 con->state = CEPH_CON_S_V2_HELLO; 1991 prepare_read_preamble(con); 1992 return 0; 1993 1994 bad: 1995 pr_err("failed to decode banner payload\n"); 1996 return -EINVAL; 1997 } 1998 1999 static int process_hello(struct ceph_connection *con, void *p, void *end) 2000 { 2001 struct ceph_entity_addr *my_addr = &con->msgr->inst.addr; 2002 struct ceph_entity_addr addr_for_me; 2003 u8 entity_type; 2004 int ret; 2005 2006 if (con->state != CEPH_CON_S_V2_HELLO) { 2007 con->error_msg = "protocol error, unexpected hello"; 2008 return -EINVAL; 2009 } 2010 2011 ceph_decode_8_safe(&p, end, entity_type, bad); 2012 ret = ceph_decode_entity_addr(&p, end, &addr_for_me); 2013 if (ret) { 2014 pr_err("failed to decode addr_for_me: %d\n", ret); 2015 return ret; 2016 } 2017 2018 dout("%s con %p entity_type %d addr_for_me %s\n", __func__, con, 2019 entity_type, ceph_pr_addr(&addr_for_me)); 2020 2021 if (entity_type != con->peer_name.type) { 2022 pr_err("bad peer type, want %d, got %d\n", 2023 con->peer_name.type, entity_type); 2024 con->error_msg = "wrong peer at address"; 2025 return -EINVAL; 2026 } 2027 2028 /* 2029 * Set our address to the address our first peer (i.e. monitor) 2030 * sees that we are connecting from. If we are behind some sort 2031 * of NAT and want to be identified by some private (not NATed) 2032 * address, ip option should be used. 2033 */ 2034 if (ceph_addr_is_blank(my_addr)) { 2035 memcpy(&my_addr->in_addr, &addr_for_me.in_addr, 2036 sizeof(my_addr->in_addr)); 2037 ceph_addr_set_port(my_addr, 0); 2038 dout("%s con %p set my addr %s, as seen by peer %s\n", 2039 __func__, con, ceph_pr_addr(my_addr), 2040 ceph_pr_addr(&con->peer_addr)); 2041 } else { 2042 dout("%s con %p my addr already set %s\n", 2043 __func__, con, ceph_pr_addr(my_addr)); 2044 } 2045 2046 WARN_ON(ceph_addr_is_blank(my_addr) || ceph_addr_port(my_addr)); 2047 WARN_ON(my_addr->type != CEPH_ENTITY_ADDR_TYPE_ANY); 2048 WARN_ON(!my_addr->nonce); 2049 2050 /* no reset_out_kvecs() as our hello may still be pending */ 2051 ret = prepare_auth_request(con); 2052 if (ret) { 2053 if (ret != -EAGAIN) 2054 pr_err("prepare_auth_request failed: %d\n", ret); 2055 return ret; 2056 } 2057 2058 con->state = CEPH_CON_S_V2_AUTH; 2059 return 0; 2060 2061 bad: 2062 pr_err("failed to decode hello\n"); 2063 return -EINVAL; 2064 } 2065 2066 static int process_auth_bad_method(struct ceph_connection *con, 2067 void *p, void *end) 2068 { 2069 int allowed_protos[8], allowed_modes[8]; 2070 int allowed_proto_cnt, allowed_mode_cnt; 2071 int used_proto, result; 2072 int ret; 2073 int i; 2074 2075 if (con->state != CEPH_CON_S_V2_AUTH) { 2076 con->error_msg = "protocol error, unexpected auth_bad_method"; 2077 return -EINVAL; 2078 } 2079 2080 ceph_decode_32_safe(&p, end, used_proto, bad); 2081 ceph_decode_32_safe(&p, end, result, bad); 2082 dout("%s con %p used_proto %d result %d\n", __func__, con, used_proto, 2083 result); 2084 2085 ceph_decode_32_safe(&p, end, allowed_proto_cnt, bad); 2086 if (allowed_proto_cnt > ARRAY_SIZE(allowed_protos)) { 2087 pr_err("allowed_protos too big %d\n", allowed_proto_cnt); 2088 return -EINVAL; 2089 } 2090 for (i = 0; i < allowed_proto_cnt; i++) { 2091 ceph_decode_32_safe(&p, end, allowed_protos[i], bad); 2092 dout("%s con %p allowed_protos[%d] %d\n", __func__, con, 2093 i, allowed_protos[i]); 2094 } 2095 2096 ceph_decode_32_safe(&p, end, allowed_mode_cnt, bad); 2097 if (allowed_mode_cnt > ARRAY_SIZE(allowed_modes)) { 2098 pr_err("allowed_modes too big %d\n", allowed_mode_cnt); 2099 return -EINVAL; 2100 } 2101 for (i = 0; i < allowed_mode_cnt; i++) { 2102 ceph_decode_32_safe(&p, end, allowed_modes[i], bad); 2103 dout("%s con %p allowed_modes[%d] %d\n", __func__, con, 2104 i, allowed_modes[i]); 2105 } 2106 2107 mutex_unlock(&con->mutex); 2108 ret = con->ops->handle_auth_bad_method(con, used_proto, result, 2109 allowed_protos, 2110 allowed_proto_cnt, 2111 allowed_modes, 2112 allowed_mode_cnt); 2113 mutex_lock(&con->mutex); 2114 if (con->state != CEPH_CON_S_V2_AUTH) { 2115 dout("%s con %p state changed to %d\n", __func__, con, 2116 con->state); 2117 return -EAGAIN; 2118 } 2119 2120 dout("%s con %p handle_auth_bad_method ret %d\n", __func__, con, ret); 2121 return ret; 2122 2123 bad: 2124 pr_err("failed to decode auth_bad_method\n"); 2125 return -EINVAL; 2126 } 2127 2128 static int process_auth_reply_more(struct ceph_connection *con, 2129 void *p, void *end) 2130 { 2131 int payload_len; 2132 int ret; 2133 2134 if (con->state != CEPH_CON_S_V2_AUTH) { 2135 con->error_msg = "protocol error, unexpected auth_reply_more"; 2136 return -EINVAL; 2137 } 2138 2139 ceph_decode_32_safe(&p, end, payload_len, bad); 2140 ceph_decode_need(&p, end, payload_len, bad); 2141 2142 dout("%s con %p payload_len %d\n", __func__, con, payload_len); 2143 2144 reset_out_kvecs(con); 2145 ret = prepare_auth_request_more(con, p, payload_len); 2146 if (ret) { 2147 if (ret != -EAGAIN) 2148 pr_err("prepare_auth_request_more failed: %d\n", ret); 2149 return ret; 2150 } 2151 2152 return 0; 2153 2154 bad: 2155 pr_err("failed to decode auth_reply_more\n"); 2156 return -EINVAL; 2157 } 2158 2159 /* 2160 * Align session_key and con_secret to avoid GFP_ATOMIC allocation 2161 * inside crypto_shash_setkey() and crypto_aead_setkey() called from 2162 * setup_crypto(). __aligned(16) isn't guaranteed to work for stack 2163 * objects, so do it by hand. 2164 */ 2165 static int process_auth_done(struct ceph_connection *con, void *p, void *end) 2166 { 2167 u8 session_key_buf[CEPH_KEY_LEN + 16]; 2168 u8 con_secret_buf[CEPH_MAX_CON_SECRET_LEN + 16]; 2169 u8 *session_key = PTR_ALIGN(&session_key_buf[0], 16); 2170 u8 *con_secret = PTR_ALIGN(&con_secret_buf[0], 16); 2171 int session_key_len, con_secret_len; 2172 int payload_len; 2173 u64 global_id; 2174 int ret; 2175 2176 if (con->state != CEPH_CON_S_V2_AUTH) { 2177 con->error_msg = "protocol error, unexpected auth_done"; 2178 return -EINVAL; 2179 } 2180 2181 ceph_decode_64_safe(&p, end, global_id, bad); 2182 ceph_decode_32_safe(&p, end, con->v2.con_mode, bad); 2183 ceph_decode_32_safe(&p, end, payload_len, bad); 2184 2185 dout("%s con %p global_id %llu con_mode %d payload_len %d\n", 2186 __func__, con, global_id, con->v2.con_mode, payload_len); 2187 2188 mutex_unlock(&con->mutex); 2189 session_key_len = 0; 2190 con_secret_len = 0; 2191 ret = con->ops->handle_auth_done(con, global_id, p, payload_len, 2192 session_key, &session_key_len, 2193 con_secret, &con_secret_len); 2194 mutex_lock(&con->mutex); 2195 if (con->state != CEPH_CON_S_V2_AUTH) { 2196 dout("%s con %p state changed to %d\n", __func__, con, 2197 con->state); 2198 ret = -EAGAIN; 2199 goto out; 2200 } 2201 2202 dout("%s con %p handle_auth_done ret %d\n", __func__, con, ret); 2203 if (ret) 2204 goto out; 2205 2206 ret = setup_crypto(con, session_key, session_key_len, con_secret, 2207 con_secret_len); 2208 if (ret) 2209 goto out; 2210 2211 reset_out_kvecs(con); 2212 ret = prepare_auth_signature(con); 2213 if (ret) { 2214 pr_err("prepare_auth_signature failed: %d\n", ret); 2215 goto out; 2216 } 2217 2218 con->state = CEPH_CON_S_V2_AUTH_SIGNATURE; 2219 2220 out: 2221 memzero_explicit(session_key_buf, sizeof(session_key_buf)); 2222 memzero_explicit(con_secret_buf, sizeof(con_secret_buf)); 2223 return ret; 2224 2225 bad: 2226 pr_err("failed to decode auth_done\n"); 2227 return -EINVAL; 2228 } 2229 2230 static int process_auth_signature(struct ceph_connection *con, 2231 void *p, void *end) 2232 { 2233 u8 hmac[SHA256_DIGEST_SIZE]; 2234 int ret; 2235 2236 if (con->state != CEPH_CON_S_V2_AUTH_SIGNATURE) { 2237 con->error_msg = "protocol error, unexpected auth_signature"; 2238 return -EINVAL; 2239 } 2240 2241 ret = hmac_sha256(con, con->v2.out_sign_kvecs, 2242 con->v2.out_sign_kvec_cnt, hmac); 2243 if (ret) 2244 return ret; 2245 2246 ceph_decode_need(&p, end, SHA256_DIGEST_SIZE, bad); 2247 if (crypto_memneq(p, hmac, SHA256_DIGEST_SIZE)) { 2248 con->error_msg = "integrity error, bad auth signature"; 2249 return -EBADMSG; 2250 } 2251 2252 dout("%s con %p auth signature ok\n", __func__, con); 2253 2254 /* no reset_out_kvecs() as our auth_signature may still be pending */ 2255 if (!con->v2.server_cookie) { 2256 ret = prepare_client_ident(con); 2257 if (ret) { 2258 pr_err("prepare_client_ident failed: %d\n", ret); 2259 return ret; 2260 } 2261 2262 con->state = CEPH_CON_S_V2_SESSION_CONNECT; 2263 } else { 2264 ret = prepare_session_reconnect(con); 2265 if (ret) { 2266 pr_err("prepare_session_reconnect failed: %d\n", ret); 2267 return ret; 2268 } 2269 2270 con->state = CEPH_CON_S_V2_SESSION_RECONNECT; 2271 } 2272 2273 return 0; 2274 2275 bad: 2276 pr_err("failed to decode auth_signature\n"); 2277 return -EINVAL; 2278 } 2279 2280 static int process_server_ident(struct ceph_connection *con, 2281 void *p, void *end) 2282 { 2283 struct ceph_client *client = from_msgr(con->msgr); 2284 u64 features, required_features; 2285 struct ceph_entity_addr addr; 2286 u64 global_seq; 2287 u64 global_id; 2288 u64 cookie; 2289 u64 flags; 2290 int ret; 2291 2292 if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) { 2293 con->error_msg = "protocol error, unexpected server_ident"; 2294 return -EINVAL; 2295 } 2296 2297 ret = ceph_decode_entity_addrvec(&p, end, true, &addr); 2298 if (ret) { 2299 pr_err("failed to decode server addrs: %d\n", ret); 2300 return ret; 2301 } 2302 2303 ceph_decode_64_safe(&p, end, global_id, bad); 2304 ceph_decode_64_safe(&p, end, global_seq, bad); 2305 ceph_decode_64_safe(&p, end, features, bad); 2306 ceph_decode_64_safe(&p, end, required_features, bad); 2307 ceph_decode_64_safe(&p, end, flags, bad); 2308 ceph_decode_64_safe(&p, end, cookie, bad); 2309 2310 dout("%s con %p addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx flags 0x%llx cookie 0x%llx\n", 2311 __func__, con, ceph_pr_addr(&addr), le32_to_cpu(addr.nonce), 2312 global_id, global_seq, features, required_features, flags, cookie); 2313 2314 /* is this who we intended to talk to? */ 2315 if (memcmp(&addr, &con->peer_addr, sizeof(con->peer_addr))) { 2316 pr_err("bad peer addr/nonce, want %s/%u, got %s/%u\n", 2317 ceph_pr_addr(&con->peer_addr), 2318 le32_to_cpu(con->peer_addr.nonce), 2319 ceph_pr_addr(&addr), le32_to_cpu(addr.nonce)); 2320 con->error_msg = "wrong peer at address"; 2321 return -EINVAL; 2322 } 2323 2324 if (client->required_features & ~features) { 2325 pr_err("RADOS feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n", 2326 features, client->required_features & ~features); 2327 con->error_msg = "missing required protocol features"; 2328 return -EINVAL; 2329 } 2330 2331 /* 2332 * Both name->type and name->num are set in ceph_con_open() but 2333 * name->num may be bogus in the initial monmap. name->type is 2334 * verified in handle_hello(). 2335 */ 2336 WARN_ON(!con->peer_name.type); 2337 con->peer_name.num = cpu_to_le64(global_id); 2338 con->v2.peer_global_seq = global_seq; 2339 con->peer_features = features; 2340 WARN_ON(required_features & ~client->supported_features); 2341 con->v2.server_cookie = cookie; 2342 2343 if (flags & CEPH_MSG_CONNECT_LOSSY) { 2344 ceph_con_flag_set(con, CEPH_CON_F_LOSSYTX); 2345 WARN_ON(con->v2.server_cookie); 2346 } else { 2347 WARN_ON(!con->v2.server_cookie); 2348 } 2349 2350 clear_in_sign_kvecs(con); 2351 clear_out_sign_kvecs(con); 2352 free_conn_bufs(con); 2353 con->delay = 0; /* reset backoff memory */ 2354 2355 con->state = CEPH_CON_S_OPEN; 2356 con->v2.out_state = OUT_S_GET_NEXT; 2357 return 0; 2358 2359 bad: 2360 pr_err("failed to decode server_ident\n"); 2361 return -EINVAL; 2362 } 2363 2364 static int process_ident_missing_features(struct ceph_connection *con, 2365 void *p, void *end) 2366 { 2367 struct ceph_client *client = from_msgr(con->msgr); 2368 u64 missing_features; 2369 2370 if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) { 2371 con->error_msg = "protocol error, unexpected ident_missing_features"; 2372 return -EINVAL; 2373 } 2374 2375 ceph_decode_64_safe(&p, end, missing_features, bad); 2376 pr_err("RADOS feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n", 2377 client->supported_features, missing_features); 2378 con->error_msg = "missing required protocol features"; 2379 return -EINVAL; 2380 2381 bad: 2382 pr_err("failed to decode ident_missing_features\n"); 2383 return -EINVAL; 2384 } 2385 2386 static int process_session_reconnect_ok(struct ceph_connection *con, 2387 void *p, void *end) 2388 { 2389 u64 seq; 2390 2391 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) { 2392 con->error_msg = "protocol error, unexpected session_reconnect_ok"; 2393 return -EINVAL; 2394 } 2395 2396 ceph_decode_64_safe(&p, end, seq, bad); 2397 2398 dout("%s con %p seq %llu\n", __func__, con, seq); 2399 ceph_con_discard_requeued(con, seq); 2400 2401 clear_in_sign_kvecs(con); 2402 clear_out_sign_kvecs(con); 2403 free_conn_bufs(con); 2404 con->delay = 0; /* reset backoff memory */ 2405 2406 con->state = CEPH_CON_S_OPEN; 2407 con->v2.out_state = OUT_S_GET_NEXT; 2408 return 0; 2409 2410 bad: 2411 pr_err("failed to decode session_reconnect_ok\n"); 2412 return -EINVAL; 2413 } 2414 2415 static int process_session_retry(struct ceph_connection *con, 2416 void *p, void *end) 2417 { 2418 u64 connect_seq; 2419 int ret; 2420 2421 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) { 2422 con->error_msg = "protocol error, unexpected session_retry"; 2423 return -EINVAL; 2424 } 2425 2426 ceph_decode_64_safe(&p, end, connect_seq, bad); 2427 2428 dout("%s con %p connect_seq %llu\n", __func__, con, connect_seq); 2429 WARN_ON(connect_seq <= con->v2.connect_seq); 2430 con->v2.connect_seq = connect_seq + 1; 2431 2432 free_conn_bufs(con); 2433 2434 reset_out_kvecs(con); 2435 ret = prepare_session_reconnect(con); 2436 if (ret) { 2437 pr_err("prepare_session_reconnect (cseq) failed: %d\n", ret); 2438 return ret; 2439 } 2440 2441 return 0; 2442 2443 bad: 2444 pr_err("failed to decode session_retry\n"); 2445 return -EINVAL; 2446 } 2447 2448 static int process_session_retry_global(struct ceph_connection *con, 2449 void *p, void *end) 2450 { 2451 u64 global_seq; 2452 int ret; 2453 2454 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) { 2455 con->error_msg = "protocol error, unexpected session_retry_global"; 2456 return -EINVAL; 2457 } 2458 2459 ceph_decode_64_safe(&p, end, global_seq, bad); 2460 2461 dout("%s con %p global_seq %llu\n", __func__, con, global_seq); 2462 WARN_ON(global_seq <= con->v2.global_seq); 2463 con->v2.global_seq = ceph_get_global_seq(con->msgr, global_seq); 2464 2465 free_conn_bufs(con); 2466 2467 reset_out_kvecs(con); 2468 ret = prepare_session_reconnect(con); 2469 if (ret) { 2470 pr_err("prepare_session_reconnect (gseq) failed: %d\n", ret); 2471 return ret; 2472 } 2473 2474 return 0; 2475 2476 bad: 2477 pr_err("failed to decode session_retry_global\n"); 2478 return -EINVAL; 2479 } 2480 2481 static int process_session_reset(struct ceph_connection *con, 2482 void *p, void *end) 2483 { 2484 bool full; 2485 int ret; 2486 2487 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) { 2488 con->error_msg = "protocol error, unexpected session_reset"; 2489 return -EINVAL; 2490 } 2491 2492 ceph_decode_8_safe(&p, end, full, bad); 2493 if (!full) { 2494 con->error_msg = "protocol error, bad session_reset"; 2495 return -EINVAL; 2496 } 2497 2498 pr_info("%s%lld %s session reset\n", ENTITY_NAME(con->peer_name), 2499 ceph_pr_addr(&con->peer_addr)); 2500 ceph_con_reset_session(con); 2501 2502 mutex_unlock(&con->mutex); 2503 if (con->ops->peer_reset) 2504 con->ops->peer_reset(con); 2505 mutex_lock(&con->mutex); 2506 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) { 2507 dout("%s con %p state changed to %d\n", __func__, con, 2508 con->state); 2509 return -EAGAIN; 2510 } 2511 2512 free_conn_bufs(con); 2513 2514 reset_out_kvecs(con); 2515 ret = prepare_client_ident(con); 2516 if (ret) { 2517 pr_err("prepare_client_ident (rst) failed: %d\n", ret); 2518 return ret; 2519 } 2520 2521 con->state = CEPH_CON_S_V2_SESSION_CONNECT; 2522 return 0; 2523 2524 bad: 2525 pr_err("failed to decode session_reset\n"); 2526 return -EINVAL; 2527 } 2528 2529 static int process_keepalive2_ack(struct ceph_connection *con, 2530 void *p, void *end) 2531 { 2532 if (con->state != CEPH_CON_S_OPEN) { 2533 con->error_msg = "protocol error, unexpected keepalive2_ack"; 2534 return -EINVAL; 2535 } 2536 2537 ceph_decode_need(&p, end, sizeof(struct ceph_timespec), bad); 2538 ceph_decode_timespec64(&con->last_keepalive_ack, p); 2539 2540 dout("%s con %p timestamp %lld.%09ld\n", __func__, con, 2541 con->last_keepalive_ack.tv_sec, con->last_keepalive_ack.tv_nsec); 2542 2543 return 0; 2544 2545 bad: 2546 pr_err("failed to decode keepalive2_ack\n"); 2547 return -EINVAL; 2548 } 2549 2550 static int process_ack(struct ceph_connection *con, void *p, void *end) 2551 { 2552 u64 seq; 2553 2554 if (con->state != CEPH_CON_S_OPEN) { 2555 con->error_msg = "protocol error, unexpected ack"; 2556 return -EINVAL; 2557 } 2558 2559 ceph_decode_64_safe(&p, end, seq, bad); 2560 2561 dout("%s con %p seq %llu\n", __func__, con, seq); 2562 ceph_con_discard_sent(con, seq); 2563 return 0; 2564 2565 bad: 2566 pr_err("failed to decode ack\n"); 2567 return -EINVAL; 2568 } 2569 2570 static int process_control(struct ceph_connection *con, void *p, void *end) 2571 { 2572 int tag = con->v2.in_desc.fd_tag; 2573 int ret; 2574 2575 dout("%s con %p tag %d len %d\n", __func__, con, tag, (int)(end - p)); 2576 2577 switch (tag) { 2578 case FRAME_TAG_HELLO: 2579 ret = process_hello(con, p, end); 2580 break; 2581 case FRAME_TAG_AUTH_BAD_METHOD: 2582 ret = process_auth_bad_method(con, p, end); 2583 break; 2584 case FRAME_TAG_AUTH_REPLY_MORE: 2585 ret = process_auth_reply_more(con, p, end); 2586 break; 2587 case FRAME_TAG_AUTH_DONE: 2588 ret = process_auth_done(con, p, end); 2589 break; 2590 case FRAME_TAG_AUTH_SIGNATURE: 2591 ret = process_auth_signature(con, p, end); 2592 break; 2593 case FRAME_TAG_SERVER_IDENT: 2594 ret = process_server_ident(con, p, end); 2595 break; 2596 case FRAME_TAG_IDENT_MISSING_FEATURES: 2597 ret = process_ident_missing_features(con, p, end); 2598 break; 2599 case FRAME_TAG_SESSION_RECONNECT_OK: 2600 ret = process_session_reconnect_ok(con, p, end); 2601 break; 2602 case FRAME_TAG_SESSION_RETRY: 2603 ret = process_session_retry(con, p, end); 2604 break; 2605 case FRAME_TAG_SESSION_RETRY_GLOBAL: 2606 ret = process_session_retry_global(con, p, end); 2607 break; 2608 case FRAME_TAG_SESSION_RESET: 2609 ret = process_session_reset(con, p, end); 2610 break; 2611 case FRAME_TAG_KEEPALIVE2_ACK: 2612 ret = process_keepalive2_ack(con, p, end); 2613 break; 2614 case FRAME_TAG_ACK: 2615 ret = process_ack(con, p, end); 2616 break; 2617 default: 2618 pr_err("bad tag %d\n", tag); 2619 con->error_msg = "protocol error, bad tag"; 2620 return -EINVAL; 2621 } 2622 if (ret) { 2623 dout("%s con %p error %d\n", __func__, con, ret); 2624 return ret; 2625 } 2626 2627 prepare_read_preamble(con); 2628 return 0; 2629 } 2630 2631 /* 2632 * Return: 2633 * 1 - con->in_msg set, read message 2634 * 0 - skip message 2635 * <0 - error 2636 */ 2637 static int process_message_header(struct ceph_connection *con, 2638 void *p, void *end) 2639 { 2640 struct ceph_frame_desc *desc = &con->v2.in_desc; 2641 struct ceph_msg_header2 *hdr2 = p; 2642 struct ceph_msg_header hdr; 2643 int skip; 2644 int ret; 2645 u64 seq; 2646 2647 /* verify seq# */ 2648 seq = le64_to_cpu(hdr2->seq); 2649 if ((s64)seq - (s64)con->in_seq < 1) { 2650 pr_info("%s%lld %s skipping old message: seq %llu, expected %llu\n", 2651 ENTITY_NAME(con->peer_name), 2652 ceph_pr_addr(&con->peer_addr), 2653 seq, con->in_seq + 1); 2654 return 0; 2655 } 2656 if ((s64)seq - (s64)con->in_seq > 1) { 2657 pr_err("bad seq %llu, expected %llu\n", seq, con->in_seq + 1); 2658 con->error_msg = "bad message sequence # for incoming message"; 2659 return -EBADE; 2660 } 2661 2662 ceph_con_discard_sent(con, le64_to_cpu(hdr2->ack_seq)); 2663 2664 fill_header(&hdr, hdr2, desc->fd_lens[1], desc->fd_lens[2], 2665 desc->fd_lens[3], &con->peer_name); 2666 ret = ceph_con_in_msg_alloc(con, &hdr, &skip); 2667 if (ret) 2668 return ret; 2669 2670 WARN_ON(!con->in_msg ^ skip); 2671 if (skip) 2672 return 0; 2673 2674 WARN_ON(!con->in_msg); 2675 WARN_ON(con->in_msg->con != con); 2676 return 1; 2677 } 2678 2679 static int process_message(struct ceph_connection *con) 2680 { 2681 ceph_con_process_message(con); 2682 2683 /* 2684 * We could have been closed by ceph_con_close() because 2685 * ceph_con_process_message() temporarily drops con->mutex. 2686 */ 2687 if (con->state != CEPH_CON_S_OPEN) { 2688 dout("%s con %p state changed to %d\n", __func__, con, 2689 con->state); 2690 return -EAGAIN; 2691 } 2692 2693 prepare_read_preamble(con); 2694 return 0; 2695 } 2696 2697 static int __handle_control(struct ceph_connection *con, void *p) 2698 { 2699 void *end = p + con->v2.in_desc.fd_lens[0]; 2700 struct ceph_msg *msg; 2701 int ret; 2702 2703 if (con->v2.in_desc.fd_tag != FRAME_TAG_MESSAGE) 2704 return process_control(con, p, end); 2705 2706 ret = process_message_header(con, p, end); 2707 if (ret < 0) 2708 return ret; 2709 if (ret == 0) { 2710 prepare_skip_message(con); 2711 return 0; 2712 } 2713 2714 msg = con->in_msg; /* set in process_message_header() */ 2715 if (front_len(msg)) { 2716 WARN_ON(front_len(msg) > msg->front_alloc_len); 2717 msg->front.iov_len = front_len(msg); 2718 } else { 2719 msg->front.iov_len = 0; 2720 } 2721 if (middle_len(msg)) { 2722 WARN_ON(middle_len(msg) > msg->middle->alloc_len); 2723 msg->middle->vec.iov_len = middle_len(msg); 2724 } else if (msg->middle) { 2725 msg->middle->vec.iov_len = 0; 2726 } 2727 2728 if (!front_len(msg) && !middle_len(msg) && !data_len(msg)) 2729 return process_message(con); 2730 2731 if (con_secure(con)) 2732 return prepare_read_tail_secure(con); 2733 2734 return prepare_read_tail_plain(con); 2735 } 2736 2737 static int handle_preamble(struct ceph_connection *con) 2738 { 2739 struct ceph_frame_desc *desc = &con->v2.in_desc; 2740 int ret; 2741 2742 if (con_secure(con)) { 2743 ret = decrypt_preamble(con); 2744 if (ret) { 2745 if (ret == -EBADMSG) 2746 con->error_msg = "integrity error, bad preamble auth tag"; 2747 return ret; 2748 } 2749 } 2750 2751 ret = decode_preamble(con->v2.in_buf, desc); 2752 if (ret) { 2753 if (ret == -EBADMSG) 2754 con->error_msg = "integrity error, bad crc"; 2755 else 2756 con->error_msg = "protocol error, bad preamble"; 2757 return ret; 2758 } 2759 2760 dout("%s con %p tag %d seg_cnt %d %d+%d+%d+%d\n", __func__, 2761 con, desc->fd_tag, desc->fd_seg_cnt, desc->fd_lens[0], 2762 desc->fd_lens[1], desc->fd_lens[2], desc->fd_lens[3]); 2763 2764 if (!con_secure(con)) 2765 return prepare_read_control(con); 2766 2767 if (desc->fd_lens[0] > CEPH_PREAMBLE_INLINE_LEN) 2768 return prepare_read_control_remainder(con); 2769 2770 return __handle_control(con, CTRL_BODY(con->v2.in_buf)); 2771 } 2772 2773 static int handle_control(struct ceph_connection *con) 2774 { 2775 int ctrl_len = con->v2.in_desc.fd_lens[0]; 2776 void *buf; 2777 int ret; 2778 2779 WARN_ON(con_secure(con)); 2780 2781 ret = verify_control_crc(con); 2782 if (ret) { 2783 con->error_msg = "integrity error, bad crc"; 2784 return ret; 2785 } 2786 2787 if (con->state == CEPH_CON_S_V2_AUTH) { 2788 buf = alloc_conn_buf(con, ctrl_len); 2789 if (!buf) 2790 return -ENOMEM; 2791 2792 memcpy(buf, con->v2.in_kvecs[0].iov_base, ctrl_len); 2793 return __handle_control(con, buf); 2794 } 2795 2796 return __handle_control(con, con->v2.in_kvecs[0].iov_base); 2797 } 2798 2799 static int handle_control_remainder(struct ceph_connection *con) 2800 { 2801 int ret; 2802 2803 WARN_ON(!con_secure(con)); 2804 2805 ret = decrypt_control_remainder(con); 2806 if (ret) { 2807 if (ret == -EBADMSG) 2808 con->error_msg = "integrity error, bad control remainder auth tag"; 2809 return ret; 2810 } 2811 2812 return __handle_control(con, con->v2.in_kvecs[0].iov_base - 2813 CEPH_PREAMBLE_INLINE_LEN); 2814 } 2815 2816 static int handle_epilogue(struct ceph_connection *con) 2817 { 2818 u32 front_crc, middle_crc, data_crc; 2819 int ret; 2820 2821 if (con_secure(con)) { 2822 ret = decrypt_tail(con); 2823 if (ret) { 2824 if (ret == -EBADMSG) 2825 con->error_msg = "integrity error, bad epilogue auth tag"; 2826 return ret; 2827 } 2828 2829 /* just late_status */ 2830 ret = decode_epilogue(con->v2.in_buf, NULL, NULL, NULL); 2831 if (ret) { 2832 con->error_msg = "protocol error, bad epilogue"; 2833 return ret; 2834 } 2835 } else { 2836 ret = decode_epilogue(con->v2.in_buf, &front_crc, 2837 &middle_crc, &data_crc); 2838 if (ret) { 2839 con->error_msg = "protocol error, bad epilogue"; 2840 return ret; 2841 } 2842 2843 ret = verify_epilogue_crcs(con, front_crc, middle_crc, 2844 data_crc); 2845 if (ret) { 2846 con->error_msg = "integrity error, bad crc"; 2847 return ret; 2848 } 2849 } 2850 2851 return process_message(con); 2852 } 2853 2854 static void finish_skip(struct ceph_connection *con) 2855 { 2856 dout("%s con %p\n", __func__, con); 2857 2858 if (con_secure(con)) 2859 gcm_inc_nonce(&con->v2.in_gcm_nonce); 2860 2861 __finish_skip(con); 2862 } 2863 2864 static int populate_in_iter(struct ceph_connection *con) 2865 { 2866 int ret; 2867 2868 dout("%s con %p state %d in_state %d\n", __func__, con, con->state, 2869 con->v2.in_state); 2870 WARN_ON(iov_iter_count(&con->v2.in_iter)); 2871 2872 if (con->state == CEPH_CON_S_V2_BANNER_PREFIX) { 2873 ret = process_banner_prefix(con); 2874 } else if (con->state == CEPH_CON_S_V2_BANNER_PAYLOAD) { 2875 ret = process_banner_payload(con); 2876 } else if ((con->state >= CEPH_CON_S_V2_HELLO && 2877 con->state <= CEPH_CON_S_V2_SESSION_RECONNECT) || 2878 con->state == CEPH_CON_S_OPEN) { 2879 switch (con->v2.in_state) { 2880 case IN_S_HANDLE_PREAMBLE: 2881 ret = handle_preamble(con); 2882 break; 2883 case IN_S_HANDLE_CONTROL: 2884 ret = handle_control(con); 2885 break; 2886 case IN_S_HANDLE_CONTROL_REMAINDER: 2887 ret = handle_control_remainder(con); 2888 break; 2889 case IN_S_PREPARE_READ_DATA: 2890 ret = prepare_read_data(con); 2891 break; 2892 case IN_S_PREPARE_READ_DATA_CONT: 2893 prepare_read_data_cont(con); 2894 ret = 0; 2895 break; 2896 case IN_S_PREPARE_READ_ENC_PAGE: 2897 prepare_read_enc_page(con); 2898 ret = 0; 2899 break; 2900 case IN_S_HANDLE_EPILOGUE: 2901 ret = handle_epilogue(con); 2902 break; 2903 case IN_S_FINISH_SKIP: 2904 finish_skip(con); 2905 ret = 0; 2906 break; 2907 default: 2908 WARN(1, "bad in_state %d", con->v2.in_state); 2909 return -EINVAL; 2910 } 2911 } else { 2912 WARN(1, "bad state %d", con->state); 2913 return -EINVAL; 2914 } 2915 if (ret) { 2916 dout("%s con %p error %d\n", __func__, con, ret); 2917 return ret; 2918 } 2919 2920 if (WARN_ON(!iov_iter_count(&con->v2.in_iter))) 2921 return -ENODATA; 2922 dout("%s con %p populated %zu\n", __func__, con, 2923 iov_iter_count(&con->v2.in_iter)); 2924 return 1; 2925 } 2926 2927 int ceph_con_v2_try_read(struct ceph_connection *con) 2928 { 2929 int ret; 2930 2931 dout("%s con %p state %d need %zu\n", __func__, con, con->state, 2932 iov_iter_count(&con->v2.in_iter)); 2933 2934 if (con->state == CEPH_CON_S_PREOPEN) 2935 return 0; 2936 2937 /* 2938 * We should always have something pending here. If not, 2939 * avoid calling populate_in_iter() as if we read something 2940 * (ceph_tcp_recv() would immediately return 1). 2941 */ 2942 if (WARN_ON(!iov_iter_count(&con->v2.in_iter))) 2943 return -ENODATA; 2944 2945 for (;;) { 2946 ret = ceph_tcp_recv(con); 2947 if (ret <= 0) 2948 return ret; 2949 2950 ret = populate_in_iter(con); 2951 if (ret <= 0) { 2952 if (ret && ret != -EAGAIN && !con->error_msg) 2953 con->error_msg = "read processing error"; 2954 return ret; 2955 } 2956 } 2957 } 2958 2959 static void queue_data(struct ceph_connection *con) 2960 { 2961 struct bio_vec bv; 2962 2963 con->v2.out_epil.data_crc = -1; 2964 ceph_msg_data_cursor_init(&con->v2.out_cursor, con->out_msg, 2965 data_len(con->out_msg)); 2966 2967 get_bvec_at(&con->v2.out_cursor, &bv); 2968 set_out_bvec(con, &bv, true); 2969 con->v2.out_state = OUT_S_QUEUE_DATA_CONT; 2970 } 2971 2972 static void queue_data_cont(struct ceph_connection *con) 2973 { 2974 struct bio_vec bv; 2975 2976 con->v2.out_epil.data_crc = ceph_crc32c_page( 2977 con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page, 2978 con->v2.out_bvec.bv_offset, con->v2.out_bvec.bv_len); 2979 2980 ceph_msg_data_advance(&con->v2.out_cursor, con->v2.out_bvec.bv_len); 2981 if (con->v2.out_cursor.total_resid) { 2982 get_bvec_at(&con->v2.out_cursor, &bv); 2983 set_out_bvec(con, &bv, true); 2984 WARN_ON(con->v2.out_state != OUT_S_QUEUE_DATA_CONT); 2985 return; 2986 } 2987 2988 /* 2989 * We've written all data. Queue epilogue. Once it's written, 2990 * we are done. 2991 */ 2992 reset_out_kvecs(con); 2993 prepare_epilogue_plain(con, false); 2994 con->v2.out_state = OUT_S_FINISH_MESSAGE; 2995 } 2996 2997 static void queue_enc_page(struct ceph_connection *con) 2998 { 2999 struct bio_vec bv; 3000 3001 dout("%s con %p i %d resid %d\n", __func__, con, con->v2.out_enc_i, 3002 con->v2.out_enc_resid); 3003 WARN_ON(!con->v2.out_enc_resid); 3004 3005 bv.bv_page = con->v2.out_enc_pages[con->v2.out_enc_i]; 3006 bv.bv_offset = 0; 3007 bv.bv_len = min(con->v2.out_enc_resid, (int)PAGE_SIZE); 3008 3009 set_out_bvec(con, &bv, false); 3010 con->v2.out_enc_i++; 3011 con->v2.out_enc_resid -= bv.bv_len; 3012 3013 if (con->v2.out_enc_resid) { 3014 WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE); 3015 return; 3016 } 3017 3018 /* 3019 * We've queued the last piece of ciphertext (ending with 3020 * epilogue) + auth tag. Once it's written, we are done. 3021 */ 3022 WARN_ON(con->v2.out_enc_i != con->v2.out_enc_page_cnt); 3023 con->v2.out_state = OUT_S_FINISH_MESSAGE; 3024 } 3025 3026 static void queue_zeros(struct ceph_connection *con) 3027 { 3028 dout("%s con %p out_zero %d\n", __func__, con, con->v2.out_zero); 3029 3030 if (con->v2.out_zero) { 3031 set_out_bvec_zero(con); 3032 con->v2.out_zero -= con->v2.out_bvec.bv_len; 3033 con->v2.out_state = OUT_S_QUEUE_ZEROS; 3034 return; 3035 } 3036 3037 /* 3038 * We've zero-filled everything up to epilogue. Queue epilogue 3039 * with late_status set to ABORTED and crcs adjusted for zeros. 3040 * Once it's written, we are done patching up for the revoke. 3041 */ 3042 reset_out_kvecs(con); 3043 prepare_epilogue_plain(con, true); 3044 con->v2.out_state = OUT_S_FINISH_MESSAGE; 3045 } 3046 3047 static void finish_message(struct ceph_connection *con) 3048 { 3049 dout("%s con %p msg %p\n", __func__, con, con->out_msg); 3050 3051 /* we end up here both plain and secure modes */ 3052 if (con->v2.out_enc_pages) { 3053 WARN_ON(!con->v2.out_enc_page_cnt); 3054 ceph_release_page_vector(con->v2.out_enc_pages, 3055 con->v2.out_enc_page_cnt); 3056 con->v2.out_enc_pages = NULL; 3057 con->v2.out_enc_page_cnt = 0; 3058 } 3059 /* message may have been revoked */ 3060 if (con->out_msg) { 3061 ceph_msg_put(con->out_msg); 3062 con->out_msg = NULL; 3063 } 3064 3065 con->v2.out_state = OUT_S_GET_NEXT; 3066 } 3067 3068 static int populate_out_iter(struct ceph_connection *con) 3069 { 3070 int ret; 3071 3072 dout("%s con %p state %d out_state %d\n", __func__, con, con->state, 3073 con->v2.out_state); 3074 WARN_ON(iov_iter_count(&con->v2.out_iter)); 3075 3076 if (con->state != CEPH_CON_S_OPEN) { 3077 WARN_ON(con->state < CEPH_CON_S_V2_BANNER_PREFIX || 3078 con->state > CEPH_CON_S_V2_SESSION_RECONNECT); 3079 goto nothing_pending; 3080 } 3081 3082 switch (con->v2.out_state) { 3083 case OUT_S_QUEUE_DATA: 3084 WARN_ON(!con->out_msg); 3085 queue_data(con); 3086 goto populated; 3087 case OUT_S_QUEUE_DATA_CONT: 3088 WARN_ON(!con->out_msg); 3089 queue_data_cont(con); 3090 goto populated; 3091 case OUT_S_QUEUE_ENC_PAGE: 3092 queue_enc_page(con); 3093 goto populated; 3094 case OUT_S_QUEUE_ZEROS: 3095 WARN_ON(con->out_msg); /* revoked */ 3096 queue_zeros(con); 3097 goto populated; 3098 case OUT_S_FINISH_MESSAGE: 3099 finish_message(con); 3100 break; 3101 case OUT_S_GET_NEXT: 3102 break; 3103 default: 3104 WARN(1, "bad out_state %d", con->v2.out_state); 3105 return -EINVAL; 3106 } 3107 3108 WARN_ON(con->v2.out_state != OUT_S_GET_NEXT); 3109 if (ceph_con_flag_test_and_clear(con, CEPH_CON_F_KEEPALIVE_PENDING)) { 3110 ret = prepare_keepalive2(con); 3111 if (ret) { 3112 pr_err("prepare_keepalive2 failed: %d\n", ret); 3113 return ret; 3114 } 3115 } else if (!list_empty(&con->out_queue)) { 3116 ceph_con_get_out_msg(con); 3117 ret = prepare_message(con); 3118 if (ret) { 3119 pr_err("prepare_message failed: %d\n", ret); 3120 return ret; 3121 } 3122 } else if (con->in_seq > con->in_seq_acked) { 3123 ret = prepare_ack(con); 3124 if (ret) { 3125 pr_err("prepare_ack failed: %d\n", ret); 3126 return ret; 3127 } 3128 } else { 3129 goto nothing_pending; 3130 } 3131 3132 populated: 3133 if (WARN_ON(!iov_iter_count(&con->v2.out_iter))) 3134 return -ENODATA; 3135 dout("%s con %p populated %zu\n", __func__, con, 3136 iov_iter_count(&con->v2.out_iter)); 3137 return 1; 3138 3139 nothing_pending: 3140 WARN_ON(iov_iter_count(&con->v2.out_iter)); 3141 dout("%s con %p nothing pending\n", __func__, con); 3142 ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING); 3143 return 0; 3144 } 3145 3146 int ceph_con_v2_try_write(struct ceph_connection *con) 3147 { 3148 int ret; 3149 3150 dout("%s con %p state %d have %zu\n", __func__, con, con->state, 3151 iov_iter_count(&con->v2.out_iter)); 3152 3153 /* open the socket first? */ 3154 if (con->state == CEPH_CON_S_PREOPEN) { 3155 WARN_ON(con->peer_addr.type != CEPH_ENTITY_ADDR_TYPE_MSGR2); 3156 3157 /* 3158 * Always bump global_seq. Bump connect_seq only if 3159 * there is a session (i.e. we are reconnecting and will 3160 * send session_reconnect instead of client_ident). 3161 */ 3162 con->v2.global_seq = ceph_get_global_seq(con->msgr, 0); 3163 if (con->v2.server_cookie) 3164 con->v2.connect_seq++; 3165 3166 ret = prepare_read_banner_prefix(con); 3167 if (ret) { 3168 pr_err("prepare_read_banner_prefix failed: %d\n", ret); 3169 con->error_msg = "connect error"; 3170 return ret; 3171 } 3172 3173 reset_out_kvecs(con); 3174 ret = prepare_banner(con); 3175 if (ret) { 3176 pr_err("prepare_banner failed: %d\n", ret); 3177 con->error_msg = "connect error"; 3178 return ret; 3179 } 3180 3181 ret = ceph_tcp_connect(con); 3182 if (ret) { 3183 pr_err("ceph_tcp_connect failed: %d\n", ret); 3184 con->error_msg = "connect error"; 3185 return ret; 3186 } 3187 } 3188 3189 if (!iov_iter_count(&con->v2.out_iter)) { 3190 ret = populate_out_iter(con); 3191 if (ret <= 0) { 3192 if (ret && ret != -EAGAIN && !con->error_msg) 3193 con->error_msg = "write processing error"; 3194 return ret; 3195 } 3196 } 3197 3198 tcp_sock_set_cork(con->sock->sk, true); 3199 for (;;) { 3200 ret = ceph_tcp_send(con); 3201 if (ret <= 0) 3202 break; 3203 3204 ret = populate_out_iter(con); 3205 if (ret <= 0) { 3206 if (ret && ret != -EAGAIN && !con->error_msg) 3207 con->error_msg = "write processing error"; 3208 break; 3209 } 3210 } 3211 3212 tcp_sock_set_cork(con->sock->sk, false); 3213 return ret; 3214 } 3215 3216 static u32 crc32c_zeros(u32 crc, int zero_len) 3217 { 3218 int len; 3219 3220 while (zero_len) { 3221 len = min(zero_len, (int)PAGE_SIZE); 3222 crc = crc32c(crc, page_address(ceph_zero_page), len); 3223 zero_len -= len; 3224 } 3225 3226 return crc; 3227 } 3228 3229 static void prepare_zero_front(struct ceph_connection *con, int resid) 3230 { 3231 int sent; 3232 3233 WARN_ON(!resid || resid > front_len(con->out_msg)); 3234 sent = front_len(con->out_msg) - resid; 3235 dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid); 3236 3237 if (sent) { 3238 con->v2.out_epil.front_crc = 3239 crc32c(-1, con->out_msg->front.iov_base, sent); 3240 con->v2.out_epil.front_crc = 3241 crc32c_zeros(con->v2.out_epil.front_crc, resid); 3242 } else { 3243 con->v2.out_epil.front_crc = crc32c_zeros(-1, resid); 3244 } 3245 3246 con->v2.out_iter.count -= resid; 3247 out_zero_add(con, resid); 3248 } 3249 3250 static void prepare_zero_middle(struct ceph_connection *con, int resid) 3251 { 3252 int sent; 3253 3254 WARN_ON(!resid || resid > middle_len(con->out_msg)); 3255 sent = middle_len(con->out_msg) - resid; 3256 dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid); 3257 3258 if (sent) { 3259 con->v2.out_epil.middle_crc = 3260 crc32c(-1, con->out_msg->middle->vec.iov_base, sent); 3261 con->v2.out_epil.middle_crc = 3262 crc32c_zeros(con->v2.out_epil.middle_crc, resid); 3263 } else { 3264 con->v2.out_epil.middle_crc = crc32c_zeros(-1, resid); 3265 } 3266 3267 con->v2.out_iter.count -= resid; 3268 out_zero_add(con, resid); 3269 } 3270 3271 static void prepare_zero_data(struct ceph_connection *con) 3272 { 3273 dout("%s con %p\n", __func__, con); 3274 con->v2.out_epil.data_crc = crc32c_zeros(-1, data_len(con->out_msg)); 3275 out_zero_add(con, data_len(con->out_msg)); 3276 } 3277 3278 static void revoke_at_queue_data(struct ceph_connection *con) 3279 { 3280 int boundary; 3281 int resid; 3282 3283 WARN_ON(!data_len(con->out_msg)); 3284 WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter)); 3285 resid = iov_iter_count(&con->v2.out_iter); 3286 3287 boundary = front_len(con->out_msg) + middle_len(con->out_msg); 3288 if (resid > boundary) { 3289 resid -= boundary; 3290 WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN); 3291 dout("%s con %p was sending head\n", __func__, con); 3292 if (front_len(con->out_msg)) 3293 prepare_zero_front(con, front_len(con->out_msg)); 3294 if (middle_len(con->out_msg)) 3295 prepare_zero_middle(con, middle_len(con->out_msg)); 3296 prepare_zero_data(con); 3297 WARN_ON(iov_iter_count(&con->v2.out_iter) != resid); 3298 con->v2.out_state = OUT_S_QUEUE_ZEROS; 3299 return; 3300 } 3301 3302 boundary = middle_len(con->out_msg); 3303 if (resid > boundary) { 3304 resid -= boundary; 3305 dout("%s con %p was sending front\n", __func__, con); 3306 prepare_zero_front(con, resid); 3307 if (middle_len(con->out_msg)) 3308 prepare_zero_middle(con, middle_len(con->out_msg)); 3309 prepare_zero_data(con); 3310 queue_zeros(con); 3311 return; 3312 } 3313 3314 WARN_ON(!resid); 3315 dout("%s con %p was sending middle\n", __func__, con); 3316 prepare_zero_middle(con, resid); 3317 prepare_zero_data(con); 3318 queue_zeros(con); 3319 } 3320 3321 static void revoke_at_queue_data_cont(struct ceph_connection *con) 3322 { 3323 int sent, resid; /* current piece of data */ 3324 3325 WARN_ON(!data_len(con->out_msg)); 3326 WARN_ON(!iov_iter_is_bvec(&con->v2.out_iter)); 3327 resid = iov_iter_count(&con->v2.out_iter); 3328 WARN_ON(!resid || resid > con->v2.out_bvec.bv_len); 3329 sent = con->v2.out_bvec.bv_len - resid; 3330 dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid); 3331 3332 if (sent) { 3333 con->v2.out_epil.data_crc = ceph_crc32c_page( 3334 con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page, 3335 con->v2.out_bvec.bv_offset, sent); 3336 ceph_msg_data_advance(&con->v2.out_cursor, sent); 3337 } 3338 WARN_ON(resid > con->v2.out_cursor.total_resid); 3339 con->v2.out_epil.data_crc = crc32c_zeros(con->v2.out_epil.data_crc, 3340 con->v2.out_cursor.total_resid); 3341 3342 con->v2.out_iter.count -= resid; 3343 out_zero_add(con, con->v2.out_cursor.total_resid); 3344 queue_zeros(con); 3345 } 3346 3347 static void revoke_at_finish_message(struct ceph_connection *con) 3348 { 3349 int boundary; 3350 int resid; 3351 3352 WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter)); 3353 resid = iov_iter_count(&con->v2.out_iter); 3354 3355 if (!front_len(con->out_msg) && !middle_len(con->out_msg) && 3356 !data_len(con->out_msg)) { 3357 WARN_ON(!resid || resid > MESSAGE_HEAD_PLAIN_LEN); 3358 dout("%s con %p was sending head (empty message) - noop\n", 3359 __func__, con); 3360 return; 3361 } 3362 3363 boundary = front_len(con->out_msg) + middle_len(con->out_msg) + 3364 CEPH_EPILOGUE_PLAIN_LEN; 3365 if (resid > boundary) { 3366 resid -= boundary; 3367 WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN); 3368 dout("%s con %p was sending head\n", __func__, con); 3369 if (front_len(con->out_msg)) 3370 prepare_zero_front(con, front_len(con->out_msg)); 3371 if (middle_len(con->out_msg)) 3372 prepare_zero_middle(con, middle_len(con->out_msg)); 3373 con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN; 3374 WARN_ON(iov_iter_count(&con->v2.out_iter) != resid); 3375 con->v2.out_state = OUT_S_QUEUE_ZEROS; 3376 return; 3377 } 3378 3379 boundary = middle_len(con->out_msg) + CEPH_EPILOGUE_PLAIN_LEN; 3380 if (resid > boundary) { 3381 resid -= boundary; 3382 dout("%s con %p was sending front\n", __func__, con); 3383 prepare_zero_front(con, resid); 3384 if (middle_len(con->out_msg)) 3385 prepare_zero_middle(con, middle_len(con->out_msg)); 3386 con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN; 3387 queue_zeros(con); 3388 return; 3389 } 3390 3391 boundary = CEPH_EPILOGUE_PLAIN_LEN; 3392 if (resid > boundary) { 3393 resid -= boundary; 3394 dout("%s con %p was sending middle\n", __func__, con); 3395 prepare_zero_middle(con, resid); 3396 con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN; 3397 queue_zeros(con); 3398 return; 3399 } 3400 3401 WARN_ON(!resid); 3402 dout("%s con %p was sending epilogue - noop\n", __func__, con); 3403 } 3404 3405 void ceph_con_v2_revoke(struct ceph_connection *con) 3406 { 3407 WARN_ON(con->v2.out_zero); 3408 3409 if (con_secure(con)) { 3410 WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE && 3411 con->v2.out_state != OUT_S_FINISH_MESSAGE); 3412 dout("%s con %p secure - noop\n", __func__, con); 3413 return; 3414 } 3415 3416 switch (con->v2.out_state) { 3417 case OUT_S_QUEUE_DATA: 3418 revoke_at_queue_data(con); 3419 break; 3420 case OUT_S_QUEUE_DATA_CONT: 3421 revoke_at_queue_data_cont(con); 3422 break; 3423 case OUT_S_FINISH_MESSAGE: 3424 revoke_at_finish_message(con); 3425 break; 3426 default: 3427 WARN(1, "bad out_state %d", con->v2.out_state); 3428 break; 3429 } 3430 } 3431 3432 static void revoke_at_prepare_read_data(struct ceph_connection *con) 3433 { 3434 int remaining; 3435 int resid; 3436 3437 WARN_ON(con_secure(con)); 3438 WARN_ON(!data_len(con->in_msg)); 3439 WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter)); 3440 resid = iov_iter_count(&con->v2.in_iter); 3441 WARN_ON(!resid); 3442 3443 remaining = data_len(con->in_msg) + CEPH_EPILOGUE_PLAIN_LEN; 3444 dout("%s con %p resid %d remaining %d\n", __func__, con, resid, 3445 remaining); 3446 con->v2.in_iter.count -= resid; 3447 set_in_skip(con, resid + remaining); 3448 con->v2.in_state = IN_S_FINISH_SKIP; 3449 } 3450 3451 static void revoke_at_prepare_read_data_cont(struct ceph_connection *con) 3452 { 3453 int recved, resid; /* current piece of data */ 3454 int remaining; 3455 3456 WARN_ON(con_secure(con)); 3457 WARN_ON(!data_len(con->in_msg)); 3458 WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter)); 3459 resid = iov_iter_count(&con->v2.in_iter); 3460 WARN_ON(!resid || resid > con->v2.in_bvec.bv_len); 3461 recved = con->v2.in_bvec.bv_len - resid; 3462 dout("%s con %p recved %d resid %d\n", __func__, con, recved, resid); 3463 3464 if (recved) 3465 ceph_msg_data_advance(&con->v2.in_cursor, recved); 3466 WARN_ON(resid > con->v2.in_cursor.total_resid); 3467 3468 remaining = CEPH_EPILOGUE_PLAIN_LEN; 3469 dout("%s con %p total_resid %zu remaining %d\n", __func__, con, 3470 con->v2.in_cursor.total_resid, remaining); 3471 con->v2.in_iter.count -= resid; 3472 set_in_skip(con, con->v2.in_cursor.total_resid + remaining); 3473 con->v2.in_state = IN_S_FINISH_SKIP; 3474 } 3475 3476 static void revoke_at_prepare_read_enc_page(struct ceph_connection *con) 3477 { 3478 int resid; /* current enc page (not necessarily data) */ 3479 3480 WARN_ON(!con_secure(con)); 3481 WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter)); 3482 resid = iov_iter_count(&con->v2.in_iter); 3483 WARN_ON(!resid || resid > con->v2.in_bvec.bv_len); 3484 3485 dout("%s con %p resid %d enc_resid %d\n", __func__, con, resid, 3486 con->v2.in_enc_resid); 3487 con->v2.in_iter.count -= resid; 3488 set_in_skip(con, resid + con->v2.in_enc_resid); 3489 con->v2.in_state = IN_S_FINISH_SKIP; 3490 } 3491 3492 static void revoke_at_handle_epilogue(struct ceph_connection *con) 3493 { 3494 int resid; 3495 3496 resid = iov_iter_count(&con->v2.in_iter); 3497 WARN_ON(!resid); 3498 3499 dout("%s con %p resid %d\n", __func__, con, resid); 3500 con->v2.in_iter.count -= resid; 3501 set_in_skip(con, resid); 3502 con->v2.in_state = IN_S_FINISH_SKIP; 3503 } 3504 3505 void ceph_con_v2_revoke_incoming(struct ceph_connection *con) 3506 { 3507 switch (con->v2.in_state) { 3508 case IN_S_PREPARE_READ_DATA: 3509 revoke_at_prepare_read_data(con); 3510 break; 3511 case IN_S_PREPARE_READ_DATA_CONT: 3512 revoke_at_prepare_read_data_cont(con); 3513 break; 3514 case IN_S_PREPARE_READ_ENC_PAGE: 3515 revoke_at_prepare_read_enc_page(con); 3516 break; 3517 case IN_S_HANDLE_EPILOGUE: 3518 revoke_at_handle_epilogue(con); 3519 break; 3520 default: 3521 WARN(1, "bad in_state %d", con->v2.in_state); 3522 break; 3523 } 3524 } 3525 3526 bool ceph_con_v2_opened(struct ceph_connection *con) 3527 { 3528 return con->v2.peer_global_seq; 3529 } 3530 3531 void ceph_con_v2_reset_session(struct ceph_connection *con) 3532 { 3533 con->v2.client_cookie = 0; 3534 con->v2.server_cookie = 0; 3535 con->v2.global_seq = 0; 3536 con->v2.connect_seq = 0; 3537 con->v2.peer_global_seq = 0; 3538 } 3539 3540 void ceph_con_v2_reset_protocol(struct ceph_connection *con) 3541 { 3542 iov_iter_truncate(&con->v2.in_iter, 0); 3543 iov_iter_truncate(&con->v2.out_iter, 0); 3544 con->v2.out_zero = 0; 3545 3546 clear_in_sign_kvecs(con); 3547 clear_out_sign_kvecs(con); 3548 free_conn_bufs(con); 3549 3550 if (con->v2.in_enc_pages) { 3551 WARN_ON(!con->v2.in_enc_page_cnt); 3552 ceph_release_page_vector(con->v2.in_enc_pages, 3553 con->v2.in_enc_page_cnt); 3554 con->v2.in_enc_pages = NULL; 3555 con->v2.in_enc_page_cnt = 0; 3556 } 3557 if (con->v2.out_enc_pages) { 3558 WARN_ON(!con->v2.out_enc_page_cnt); 3559 ceph_release_page_vector(con->v2.out_enc_pages, 3560 con->v2.out_enc_page_cnt); 3561 con->v2.out_enc_pages = NULL; 3562 con->v2.out_enc_page_cnt = 0; 3563 } 3564 3565 con->v2.con_mode = CEPH_CON_MODE_UNKNOWN; 3566 memzero_explicit(&con->v2.in_gcm_nonce, CEPH_GCM_IV_LEN); 3567 memzero_explicit(&con->v2.out_gcm_nonce, CEPH_GCM_IV_LEN); 3568 3569 if (con->v2.hmac_tfm) { 3570 crypto_free_shash(con->v2.hmac_tfm); 3571 con->v2.hmac_tfm = NULL; 3572 } 3573 if (con->v2.gcm_req) { 3574 aead_request_free(con->v2.gcm_req); 3575 con->v2.gcm_req = NULL; 3576 } 3577 if (con->v2.gcm_tfm) { 3578 crypto_free_aead(con->v2.gcm_tfm); 3579 con->v2.gcm_tfm = NULL; 3580 } 3581 } 3582