xref: /openbmc/linux/net/ceph/messenger_v2.c (revision dfe94d40)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Ceph msgr2 protocol implementation
4  *
5  * Copyright (C) 2020 Ilya Dryomov <idryomov@gmail.com>
6  */
7 
8 #include <linux/ceph/ceph_debug.h>
9 
10 #include <crypto/aead.h>
11 #include <crypto/algapi.h>  /* for crypto_memneq() */
12 #include <crypto/hash.h>
13 #include <crypto/sha2.h>
14 #include <linux/bvec.h>
15 #include <linux/crc32c.h>
16 #include <linux/net.h>
17 #include <linux/scatterlist.h>
18 #include <linux/socket.h>
19 #include <linux/sched/mm.h>
20 #include <net/sock.h>
21 #include <net/tcp.h>
22 
23 #include <linux/ceph/ceph_features.h>
24 #include <linux/ceph/decode.h>
25 #include <linux/ceph/libceph.h>
26 #include <linux/ceph/messenger.h>
27 
28 #include "crypto.h"  /* for CEPH_KEY_LEN and CEPH_MAX_CON_SECRET_LEN */
29 
30 #define FRAME_TAG_HELLO			1
31 #define FRAME_TAG_AUTH_REQUEST		2
32 #define FRAME_TAG_AUTH_BAD_METHOD	3
33 #define FRAME_TAG_AUTH_REPLY_MORE	4
34 #define FRAME_TAG_AUTH_REQUEST_MORE	5
35 #define FRAME_TAG_AUTH_DONE		6
36 #define FRAME_TAG_AUTH_SIGNATURE	7
37 #define FRAME_TAG_CLIENT_IDENT		8
38 #define FRAME_TAG_SERVER_IDENT		9
39 #define FRAME_TAG_IDENT_MISSING_FEATURES 10
40 #define FRAME_TAG_SESSION_RECONNECT	11
41 #define FRAME_TAG_SESSION_RESET		12
42 #define FRAME_TAG_SESSION_RETRY		13
43 #define FRAME_TAG_SESSION_RETRY_GLOBAL	14
44 #define FRAME_TAG_SESSION_RECONNECT_OK	15
45 #define FRAME_TAG_WAIT			16
46 #define FRAME_TAG_MESSAGE		17
47 #define FRAME_TAG_KEEPALIVE2		18
48 #define FRAME_TAG_KEEPALIVE2_ACK	19
49 #define FRAME_TAG_ACK			20
50 
51 #define FRAME_LATE_STATUS_ABORTED	0x1
52 #define FRAME_LATE_STATUS_COMPLETE	0xe
53 #define FRAME_LATE_STATUS_ABORTED_MASK	0xf
54 
55 #define IN_S_HANDLE_PREAMBLE		1
56 #define IN_S_HANDLE_CONTROL		2
57 #define IN_S_HANDLE_CONTROL_REMAINDER	3
58 #define IN_S_PREPARE_READ_DATA		4
59 #define IN_S_PREPARE_READ_DATA_CONT	5
60 #define IN_S_HANDLE_EPILOGUE		6
61 #define IN_S_FINISH_SKIP		7
62 
63 #define OUT_S_QUEUE_DATA		1
64 #define OUT_S_QUEUE_DATA_CONT		2
65 #define OUT_S_QUEUE_ENC_PAGE		3
66 #define OUT_S_QUEUE_ZEROS		4
67 #define OUT_S_FINISH_MESSAGE		5
68 #define OUT_S_GET_NEXT			6
69 
70 #define CTRL_BODY(p)	((void *)(p) + CEPH_PREAMBLE_LEN)
71 #define FRONT_PAD(p)	((void *)(p) + CEPH_EPILOGUE_SECURE_LEN)
72 #define MIDDLE_PAD(p)	(FRONT_PAD(p) + CEPH_GCM_BLOCK_LEN)
73 #define DATA_PAD(p)	(MIDDLE_PAD(p) + CEPH_GCM_BLOCK_LEN)
74 
75 #define CEPH_MSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
76 
77 static int do_recvmsg(struct socket *sock, struct iov_iter *it)
78 {
79 	struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
80 	int ret;
81 
82 	msg.msg_iter = *it;
83 	while (iov_iter_count(it)) {
84 		ret = sock_recvmsg(sock, &msg, msg.msg_flags);
85 		if (ret <= 0) {
86 			if (ret == -EAGAIN)
87 				ret = 0;
88 			return ret;
89 		}
90 
91 		iov_iter_advance(it, ret);
92 	}
93 
94 	WARN_ON(msg_data_left(&msg));
95 	return 1;
96 }
97 
98 /*
99  * Read as much as possible.
100  *
101  * Return:
102  *   1 - done, nothing (else) to read
103  *   0 - socket is empty, need to wait
104  *  <0 - error
105  */
106 static int ceph_tcp_recv(struct ceph_connection *con)
107 {
108 	int ret;
109 
110 	dout("%s con %p %s %zu\n", __func__, con,
111 	     iov_iter_is_discard(&con->v2.in_iter) ? "discard" : "need",
112 	     iov_iter_count(&con->v2.in_iter));
113 	ret = do_recvmsg(con->sock, &con->v2.in_iter);
114 	dout("%s con %p ret %d left %zu\n", __func__, con, ret,
115 	     iov_iter_count(&con->v2.in_iter));
116 	return ret;
117 }
118 
119 static int do_sendmsg(struct socket *sock, struct iov_iter *it)
120 {
121 	struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
122 	int ret;
123 
124 	msg.msg_iter = *it;
125 	while (iov_iter_count(it)) {
126 		ret = sock_sendmsg(sock, &msg);
127 		if (ret <= 0) {
128 			if (ret == -EAGAIN)
129 				ret = 0;
130 			return ret;
131 		}
132 
133 		iov_iter_advance(it, ret);
134 	}
135 
136 	WARN_ON(msg_data_left(&msg));
137 	return 1;
138 }
139 
140 static int do_try_sendpage(struct socket *sock, struct iov_iter *it)
141 {
142 	struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
143 	struct bio_vec bv;
144 	int ret;
145 
146 	if (WARN_ON(!iov_iter_is_bvec(it)))
147 		return -EINVAL;
148 
149 	while (iov_iter_count(it)) {
150 		/* iov_iter_iovec() for ITER_BVEC */
151 		bv.bv_page = it->bvec->bv_page;
152 		bv.bv_offset = it->bvec->bv_offset + it->iov_offset;
153 		bv.bv_len = min(iov_iter_count(it),
154 				it->bvec->bv_len - it->iov_offset);
155 
156 		/*
157 		 * sendpage cannot properly handle pages with
158 		 * page_count == 0, we need to fall back to sendmsg if
159 		 * that's the case.
160 		 *
161 		 * Same goes for slab pages: skb_can_coalesce() allows
162 		 * coalescing neighboring slab objects into a single frag
163 		 * which triggers one of hardened usercopy checks.
164 		 */
165 		if (sendpage_ok(bv.bv_page)) {
166 			ret = sock->ops->sendpage(sock, bv.bv_page,
167 						  bv.bv_offset, bv.bv_len,
168 						  CEPH_MSG_FLAGS);
169 		} else {
170 			iov_iter_bvec(&msg.msg_iter, WRITE, &bv, 1, bv.bv_len);
171 			ret = sock_sendmsg(sock, &msg);
172 		}
173 		if (ret <= 0) {
174 			if (ret == -EAGAIN)
175 				ret = 0;
176 			return ret;
177 		}
178 
179 		iov_iter_advance(it, ret);
180 	}
181 
182 	return 1;
183 }
184 
185 /*
186  * Write as much as possible.  The socket is expected to be corked,
187  * so we don't bother with MSG_MORE/MSG_SENDPAGE_NOTLAST here.
188  *
189  * Return:
190  *   1 - done, nothing (else) to write
191  *   0 - socket is full, need to wait
192  *  <0 - error
193  */
194 static int ceph_tcp_send(struct ceph_connection *con)
195 {
196 	int ret;
197 
198 	dout("%s con %p have %zu try_sendpage %d\n", __func__, con,
199 	     iov_iter_count(&con->v2.out_iter), con->v2.out_iter_sendpage);
200 	if (con->v2.out_iter_sendpage)
201 		ret = do_try_sendpage(con->sock, &con->v2.out_iter);
202 	else
203 		ret = do_sendmsg(con->sock, &con->v2.out_iter);
204 	dout("%s con %p ret %d left %zu\n", __func__, con, ret,
205 	     iov_iter_count(&con->v2.out_iter));
206 	return ret;
207 }
208 
209 static void add_in_kvec(struct ceph_connection *con, void *buf, int len)
210 {
211 	BUG_ON(con->v2.in_kvec_cnt >= ARRAY_SIZE(con->v2.in_kvecs));
212 	WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
213 
214 	con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_base = buf;
215 	con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_len = len;
216 	con->v2.in_kvec_cnt++;
217 
218 	con->v2.in_iter.nr_segs++;
219 	con->v2.in_iter.count += len;
220 }
221 
222 static void reset_in_kvecs(struct ceph_connection *con)
223 {
224 	WARN_ON(iov_iter_count(&con->v2.in_iter));
225 
226 	con->v2.in_kvec_cnt = 0;
227 	iov_iter_kvec(&con->v2.in_iter, READ, con->v2.in_kvecs, 0, 0);
228 }
229 
230 static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv)
231 {
232 	WARN_ON(iov_iter_count(&con->v2.in_iter));
233 
234 	con->v2.in_bvec = *bv;
235 	iov_iter_bvec(&con->v2.in_iter, READ, &con->v2.in_bvec, 1, bv->bv_len);
236 }
237 
238 static void set_in_skip(struct ceph_connection *con, int len)
239 {
240 	WARN_ON(iov_iter_count(&con->v2.in_iter));
241 
242 	dout("%s con %p len %d\n", __func__, con, len);
243 	iov_iter_discard(&con->v2.in_iter, READ, len);
244 }
245 
246 static void add_out_kvec(struct ceph_connection *con, void *buf, int len)
247 {
248 	BUG_ON(con->v2.out_kvec_cnt >= ARRAY_SIZE(con->v2.out_kvecs));
249 	WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
250 	WARN_ON(con->v2.out_zero);
251 
252 	con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_base = buf;
253 	con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_len = len;
254 	con->v2.out_kvec_cnt++;
255 
256 	con->v2.out_iter.nr_segs++;
257 	con->v2.out_iter.count += len;
258 }
259 
260 static void reset_out_kvecs(struct ceph_connection *con)
261 {
262 	WARN_ON(iov_iter_count(&con->v2.out_iter));
263 	WARN_ON(con->v2.out_zero);
264 
265 	con->v2.out_kvec_cnt = 0;
266 
267 	iov_iter_kvec(&con->v2.out_iter, WRITE, con->v2.out_kvecs, 0, 0);
268 	con->v2.out_iter_sendpage = false;
269 }
270 
271 static void set_out_bvec(struct ceph_connection *con, const struct bio_vec *bv,
272 			 bool zerocopy)
273 {
274 	WARN_ON(iov_iter_count(&con->v2.out_iter));
275 	WARN_ON(con->v2.out_zero);
276 
277 	con->v2.out_bvec = *bv;
278 	con->v2.out_iter_sendpage = zerocopy;
279 	iov_iter_bvec(&con->v2.out_iter, WRITE, &con->v2.out_bvec, 1,
280 		      con->v2.out_bvec.bv_len);
281 }
282 
283 static void set_out_bvec_zero(struct ceph_connection *con)
284 {
285 	WARN_ON(iov_iter_count(&con->v2.out_iter));
286 	WARN_ON(!con->v2.out_zero);
287 
288 	con->v2.out_bvec.bv_page = ceph_zero_page;
289 	con->v2.out_bvec.bv_offset = 0;
290 	con->v2.out_bvec.bv_len = min(con->v2.out_zero, (int)PAGE_SIZE);
291 	con->v2.out_iter_sendpage = true;
292 	iov_iter_bvec(&con->v2.out_iter, WRITE, &con->v2.out_bvec, 1,
293 		      con->v2.out_bvec.bv_len);
294 }
295 
296 static void out_zero_add(struct ceph_connection *con, int len)
297 {
298 	dout("%s con %p len %d\n", __func__, con, len);
299 	con->v2.out_zero += len;
300 }
301 
302 static void *alloc_conn_buf(struct ceph_connection *con, int len)
303 {
304 	void *buf;
305 
306 	dout("%s con %p len %d\n", __func__, con, len);
307 
308 	if (WARN_ON(con->v2.conn_buf_cnt >= ARRAY_SIZE(con->v2.conn_bufs)))
309 		return NULL;
310 
311 	buf = ceph_kvmalloc(len, GFP_NOIO);
312 	if (!buf)
313 		return NULL;
314 
315 	con->v2.conn_bufs[con->v2.conn_buf_cnt++] = buf;
316 	return buf;
317 }
318 
319 static void free_conn_bufs(struct ceph_connection *con)
320 {
321 	while (con->v2.conn_buf_cnt)
322 		kvfree(con->v2.conn_bufs[--con->v2.conn_buf_cnt]);
323 }
324 
325 static void add_in_sign_kvec(struct ceph_connection *con, void *buf, int len)
326 {
327 	BUG_ON(con->v2.in_sign_kvec_cnt >= ARRAY_SIZE(con->v2.in_sign_kvecs));
328 
329 	con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_base = buf;
330 	con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_len = len;
331 	con->v2.in_sign_kvec_cnt++;
332 }
333 
334 static void clear_in_sign_kvecs(struct ceph_connection *con)
335 {
336 	con->v2.in_sign_kvec_cnt = 0;
337 }
338 
339 static void add_out_sign_kvec(struct ceph_connection *con, void *buf, int len)
340 {
341 	BUG_ON(con->v2.out_sign_kvec_cnt >= ARRAY_SIZE(con->v2.out_sign_kvecs));
342 
343 	con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_base = buf;
344 	con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_len = len;
345 	con->v2.out_sign_kvec_cnt++;
346 }
347 
348 static void clear_out_sign_kvecs(struct ceph_connection *con)
349 {
350 	con->v2.out_sign_kvec_cnt = 0;
351 }
352 
353 static bool con_secure(struct ceph_connection *con)
354 {
355 	return con->v2.con_mode == CEPH_CON_MODE_SECURE;
356 }
357 
358 static int front_len(const struct ceph_msg *msg)
359 {
360 	return le32_to_cpu(msg->hdr.front_len);
361 }
362 
363 static int middle_len(const struct ceph_msg *msg)
364 {
365 	return le32_to_cpu(msg->hdr.middle_len);
366 }
367 
368 static int data_len(const struct ceph_msg *msg)
369 {
370 	return le32_to_cpu(msg->hdr.data_len);
371 }
372 
373 static bool need_padding(int len)
374 {
375 	return !IS_ALIGNED(len, CEPH_GCM_BLOCK_LEN);
376 }
377 
378 static int padded_len(int len)
379 {
380 	return ALIGN(len, CEPH_GCM_BLOCK_LEN);
381 }
382 
383 static int padding_len(int len)
384 {
385 	return padded_len(len) - len;
386 }
387 
388 /* preamble + control segment */
389 static int head_onwire_len(int ctrl_len, bool secure)
390 {
391 	int head_len;
392 	int rem_len;
393 
394 	if (secure) {
395 		head_len = CEPH_PREAMBLE_SECURE_LEN;
396 		if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
397 			rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
398 			head_len += padded_len(rem_len) + CEPH_GCM_TAG_LEN;
399 		}
400 	} else {
401 		head_len = CEPH_PREAMBLE_PLAIN_LEN;
402 		if (ctrl_len)
403 			head_len += ctrl_len + CEPH_CRC_LEN;
404 	}
405 	return head_len;
406 }
407 
408 /* front, middle and data segments + epilogue */
409 static int __tail_onwire_len(int front_len, int middle_len, int data_len,
410 			     bool secure)
411 {
412 	if (!front_len && !middle_len && !data_len)
413 		return 0;
414 
415 	if (!secure)
416 		return front_len + middle_len + data_len +
417 		       CEPH_EPILOGUE_PLAIN_LEN;
418 
419 	return padded_len(front_len) + padded_len(middle_len) +
420 	       padded_len(data_len) + CEPH_EPILOGUE_SECURE_LEN;
421 }
422 
423 static int tail_onwire_len(const struct ceph_msg *msg, bool secure)
424 {
425 	return __tail_onwire_len(front_len(msg), middle_len(msg),
426 				 data_len(msg), secure);
427 }
428 
429 /* head_onwire_len(sizeof(struct ceph_msg_header2), false) */
430 #define MESSAGE_HEAD_PLAIN_LEN	(CEPH_PREAMBLE_PLAIN_LEN +		\
431 				 sizeof(struct ceph_msg_header2) +	\
432 				 CEPH_CRC_LEN)
433 
434 static const int frame_aligns[] = {
435 	sizeof(void *),
436 	sizeof(void *),
437 	sizeof(void *),
438 	PAGE_SIZE
439 };
440 
441 /*
442  * Discards trailing empty segments, unless there is just one segment.
443  * A frame always has at least one (possibly empty) segment.
444  */
445 static int calc_segment_count(const int *lens, int len_cnt)
446 {
447 	int i;
448 
449 	for (i = len_cnt - 1; i >= 0; i--) {
450 		if (lens[i])
451 			return i + 1;
452 	}
453 
454 	return 1;
455 }
456 
457 static void init_frame_desc(struct ceph_frame_desc *desc, int tag,
458 			    const int *lens, int len_cnt)
459 {
460 	int i;
461 
462 	memset(desc, 0, sizeof(*desc));
463 
464 	desc->fd_tag = tag;
465 	desc->fd_seg_cnt = calc_segment_count(lens, len_cnt);
466 	BUG_ON(desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT);
467 	for (i = 0; i < desc->fd_seg_cnt; i++) {
468 		desc->fd_lens[i] = lens[i];
469 		desc->fd_aligns[i] = frame_aligns[i];
470 	}
471 }
472 
473 /*
474  * Preamble crc covers everything up to itself (28 bytes) and
475  * is calculated and verified irrespective of the connection mode
476  * (i.e. even if the frame is encrypted).
477  */
478 static void encode_preamble(const struct ceph_frame_desc *desc, void *p)
479 {
480 	void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
481 	void *start = p;
482 	int i;
483 
484 	memset(p, 0, CEPH_PREAMBLE_LEN);
485 
486 	ceph_encode_8(&p, desc->fd_tag);
487 	ceph_encode_8(&p, desc->fd_seg_cnt);
488 	for (i = 0; i < desc->fd_seg_cnt; i++) {
489 		ceph_encode_32(&p, desc->fd_lens[i]);
490 		ceph_encode_16(&p, desc->fd_aligns[i]);
491 	}
492 
493 	put_unaligned_le32(crc32c(0, start, crcp - start), crcp);
494 }
495 
496 static int decode_preamble(void *p, struct ceph_frame_desc *desc)
497 {
498 	void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
499 	u32 crc, expected_crc;
500 	int i;
501 
502 	crc = crc32c(0, p, crcp - p);
503 	expected_crc = get_unaligned_le32(crcp);
504 	if (crc != expected_crc) {
505 		pr_err("bad preamble crc, calculated %u, expected %u\n",
506 		       crc, expected_crc);
507 		return -EBADMSG;
508 	}
509 
510 	memset(desc, 0, sizeof(*desc));
511 
512 	desc->fd_tag = ceph_decode_8(&p);
513 	desc->fd_seg_cnt = ceph_decode_8(&p);
514 	if (desc->fd_seg_cnt < 1 ||
515 	    desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT) {
516 		pr_err("bad segment count %d\n", desc->fd_seg_cnt);
517 		return -EINVAL;
518 	}
519 	for (i = 0; i < desc->fd_seg_cnt; i++) {
520 		desc->fd_lens[i] = ceph_decode_32(&p);
521 		desc->fd_aligns[i] = ceph_decode_16(&p);
522 	}
523 
524 	/*
525 	 * This would fire for FRAME_TAG_WAIT (it has one empty
526 	 * segment), but we should never get it as client.
527 	 */
528 	if (!desc->fd_lens[desc->fd_seg_cnt - 1]) {
529 		pr_err("last segment empty\n");
530 		return -EINVAL;
531 	}
532 
533 	if (desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) {
534 		pr_err("control segment too big %d\n", desc->fd_lens[0]);
535 		return -EINVAL;
536 	}
537 	if (desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) {
538 		pr_err("front segment too big %d\n", desc->fd_lens[1]);
539 		return -EINVAL;
540 	}
541 	if (desc->fd_lens[2] > CEPH_MSG_MAX_MIDDLE_LEN) {
542 		pr_err("middle segment too big %d\n", desc->fd_lens[2]);
543 		return -EINVAL;
544 	}
545 	if (desc->fd_lens[3] > CEPH_MSG_MAX_DATA_LEN) {
546 		pr_err("data segment too big %d\n", desc->fd_lens[3]);
547 		return -EINVAL;
548 	}
549 
550 	return 0;
551 }
552 
553 static void encode_epilogue_plain(struct ceph_connection *con, bool aborted)
554 {
555 	con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
556 						 FRAME_LATE_STATUS_COMPLETE;
557 	cpu_to_le32s(&con->v2.out_epil.front_crc);
558 	cpu_to_le32s(&con->v2.out_epil.middle_crc);
559 	cpu_to_le32s(&con->v2.out_epil.data_crc);
560 }
561 
562 static void encode_epilogue_secure(struct ceph_connection *con, bool aborted)
563 {
564 	memset(&con->v2.out_epil, 0, sizeof(con->v2.out_epil));
565 	con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
566 						 FRAME_LATE_STATUS_COMPLETE;
567 }
568 
569 static int decode_epilogue(void *p, u32 *front_crc, u32 *middle_crc,
570 			   u32 *data_crc)
571 {
572 	u8 late_status;
573 
574 	late_status = ceph_decode_8(&p);
575 	if ((late_status & FRAME_LATE_STATUS_ABORTED_MASK) !=
576 			FRAME_LATE_STATUS_COMPLETE) {
577 		/* we should never get an aborted message as client */
578 		pr_err("bad late_status 0x%x\n", late_status);
579 		return -EINVAL;
580 	}
581 
582 	if (front_crc && middle_crc && data_crc) {
583 		*front_crc = ceph_decode_32(&p);
584 		*middle_crc = ceph_decode_32(&p);
585 		*data_crc = ceph_decode_32(&p);
586 	}
587 
588 	return 0;
589 }
590 
591 static void fill_header(struct ceph_msg_header *hdr,
592 			const struct ceph_msg_header2 *hdr2,
593 			int front_len, int middle_len, int data_len,
594 			const struct ceph_entity_name *peer_name)
595 {
596 	hdr->seq = hdr2->seq;
597 	hdr->tid = hdr2->tid;
598 	hdr->type = hdr2->type;
599 	hdr->priority = hdr2->priority;
600 	hdr->version = hdr2->version;
601 	hdr->front_len = cpu_to_le32(front_len);
602 	hdr->middle_len = cpu_to_le32(middle_len);
603 	hdr->data_len = cpu_to_le32(data_len);
604 	hdr->data_off = hdr2->data_off;
605 	hdr->src = *peer_name;
606 	hdr->compat_version = hdr2->compat_version;
607 	hdr->reserved = 0;
608 	hdr->crc = 0;
609 }
610 
611 static void fill_header2(struct ceph_msg_header2 *hdr2,
612 			 const struct ceph_msg_header *hdr, u64 ack_seq)
613 {
614 	hdr2->seq = hdr->seq;
615 	hdr2->tid = hdr->tid;
616 	hdr2->type = hdr->type;
617 	hdr2->priority = hdr->priority;
618 	hdr2->version = hdr->version;
619 	hdr2->data_pre_padding_len = 0;
620 	hdr2->data_off = hdr->data_off;
621 	hdr2->ack_seq = cpu_to_le64(ack_seq);
622 	hdr2->flags = 0;
623 	hdr2->compat_version = hdr->compat_version;
624 	hdr2->reserved = 0;
625 }
626 
627 static int verify_control_crc(struct ceph_connection *con)
628 {
629 	int ctrl_len = con->v2.in_desc.fd_lens[0];
630 	u32 crc, expected_crc;
631 
632 	WARN_ON(con->v2.in_kvecs[0].iov_len != ctrl_len);
633 	WARN_ON(con->v2.in_kvecs[1].iov_len != CEPH_CRC_LEN);
634 
635 	crc = crc32c(-1, con->v2.in_kvecs[0].iov_base, ctrl_len);
636 	expected_crc = get_unaligned_le32(con->v2.in_kvecs[1].iov_base);
637 	if (crc != expected_crc) {
638 		pr_err("bad control crc, calculated %u, expected %u\n",
639 		       crc, expected_crc);
640 		return -EBADMSG;
641 	}
642 
643 	return 0;
644 }
645 
646 static int verify_epilogue_crcs(struct ceph_connection *con, u32 front_crc,
647 				u32 middle_crc, u32 data_crc)
648 {
649 	if (front_len(con->in_msg)) {
650 		con->in_front_crc = crc32c(-1, con->in_msg->front.iov_base,
651 					   front_len(con->in_msg));
652 	} else {
653 		WARN_ON(!middle_len(con->in_msg) && !data_len(con->in_msg));
654 		con->in_front_crc = -1;
655 	}
656 
657 	if (middle_len(con->in_msg))
658 		con->in_middle_crc = crc32c(-1,
659 					    con->in_msg->middle->vec.iov_base,
660 					    middle_len(con->in_msg));
661 	else if (data_len(con->in_msg))
662 		con->in_middle_crc = -1;
663 	else
664 		con->in_middle_crc = 0;
665 
666 	if (!data_len(con->in_msg))
667 		con->in_data_crc = 0;
668 
669 	dout("%s con %p msg %p crcs %u %u %u\n", __func__, con, con->in_msg,
670 	     con->in_front_crc, con->in_middle_crc, con->in_data_crc);
671 
672 	if (con->in_front_crc != front_crc) {
673 		pr_err("bad front crc, calculated %u, expected %u\n",
674 		       con->in_front_crc, front_crc);
675 		return -EBADMSG;
676 	}
677 	if (con->in_middle_crc != middle_crc) {
678 		pr_err("bad middle crc, calculated %u, expected %u\n",
679 		       con->in_middle_crc, middle_crc);
680 		return -EBADMSG;
681 	}
682 	if (con->in_data_crc != data_crc) {
683 		pr_err("bad data crc, calculated %u, expected %u\n",
684 		       con->in_data_crc, data_crc);
685 		return -EBADMSG;
686 	}
687 
688 	return 0;
689 }
690 
691 static int setup_crypto(struct ceph_connection *con,
692 			u8 *session_key, int session_key_len,
693 			u8 *con_secret, int con_secret_len)
694 {
695 	unsigned int noio_flag;
696 	void *p;
697 	int ret;
698 
699 	dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
700 	     __func__, con, con->v2.con_mode, session_key_len, con_secret_len);
701 	WARN_ON(con->v2.hmac_tfm || con->v2.gcm_tfm || con->v2.gcm_req);
702 
703 	if (con->v2.con_mode != CEPH_CON_MODE_CRC &&
704 	    con->v2.con_mode != CEPH_CON_MODE_SECURE) {
705 		pr_err("bad con_mode %d\n", con->v2.con_mode);
706 		return -EINVAL;
707 	}
708 
709 	if (!session_key_len) {
710 		WARN_ON(con->v2.con_mode != CEPH_CON_MODE_CRC);
711 		WARN_ON(con_secret_len);
712 		return 0;  /* auth_none */
713 	}
714 
715 	noio_flag = memalloc_noio_save();
716 	con->v2.hmac_tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
717 	memalloc_noio_restore(noio_flag);
718 	if (IS_ERR(con->v2.hmac_tfm)) {
719 		ret = PTR_ERR(con->v2.hmac_tfm);
720 		con->v2.hmac_tfm = NULL;
721 		pr_err("failed to allocate hmac tfm context: %d\n", ret);
722 		return ret;
723 	}
724 
725 	WARN_ON((unsigned long)session_key &
726 		crypto_shash_alignmask(con->v2.hmac_tfm));
727 	ret = crypto_shash_setkey(con->v2.hmac_tfm, session_key,
728 				  session_key_len);
729 	if (ret) {
730 		pr_err("failed to set hmac key: %d\n", ret);
731 		return ret;
732 	}
733 
734 	if (con->v2.con_mode == CEPH_CON_MODE_CRC) {
735 		WARN_ON(con_secret_len);
736 		return 0;  /* auth_x, plain mode */
737 	}
738 
739 	if (con_secret_len < CEPH_GCM_KEY_LEN + 2 * CEPH_GCM_IV_LEN) {
740 		pr_err("con_secret too small %d\n", con_secret_len);
741 		return -EINVAL;
742 	}
743 
744 	noio_flag = memalloc_noio_save();
745 	con->v2.gcm_tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
746 	memalloc_noio_restore(noio_flag);
747 	if (IS_ERR(con->v2.gcm_tfm)) {
748 		ret = PTR_ERR(con->v2.gcm_tfm);
749 		con->v2.gcm_tfm = NULL;
750 		pr_err("failed to allocate gcm tfm context: %d\n", ret);
751 		return ret;
752 	}
753 
754 	p = con_secret;
755 	WARN_ON((unsigned long)p & crypto_aead_alignmask(con->v2.gcm_tfm));
756 	ret = crypto_aead_setkey(con->v2.gcm_tfm, p, CEPH_GCM_KEY_LEN);
757 	if (ret) {
758 		pr_err("failed to set gcm key: %d\n", ret);
759 		return ret;
760 	}
761 
762 	p += CEPH_GCM_KEY_LEN;
763 	WARN_ON(crypto_aead_ivsize(con->v2.gcm_tfm) != CEPH_GCM_IV_LEN);
764 	ret = crypto_aead_setauthsize(con->v2.gcm_tfm, CEPH_GCM_TAG_LEN);
765 	if (ret) {
766 		pr_err("failed to set gcm tag size: %d\n", ret);
767 		return ret;
768 	}
769 
770 	con->v2.gcm_req = aead_request_alloc(con->v2.gcm_tfm, GFP_NOIO);
771 	if (!con->v2.gcm_req) {
772 		pr_err("failed to allocate gcm request\n");
773 		return -ENOMEM;
774 	}
775 
776 	crypto_init_wait(&con->v2.gcm_wait);
777 	aead_request_set_callback(con->v2.gcm_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
778 				  crypto_req_done, &con->v2.gcm_wait);
779 
780 	memcpy(&con->v2.in_gcm_nonce, p, CEPH_GCM_IV_LEN);
781 	memcpy(&con->v2.out_gcm_nonce, p + CEPH_GCM_IV_LEN, CEPH_GCM_IV_LEN);
782 	return 0;  /* auth_x, secure mode */
783 }
784 
785 static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs,
786 		       int kvec_cnt, u8 *hmac)
787 {
788 	SHASH_DESC_ON_STACK(desc, con->v2.hmac_tfm);  /* tfm arg is ignored */
789 	int ret;
790 	int i;
791 
792 	dout("%s con %p hmac_tfm %p kvec_cnt %d\n", __func__, con,
793 	     con->v2.hmac_tfm, kvec_cnt);
794 
795 	if (!con->v2.hmac_tfm) {
796 		memset(hmac, 0, SHA256_DIGEST_SIZE);
797 		return 0;  /* auth_none */
798 	}
799 
800 	desc->tfm = con->v2.hmac_tfm;
801 	ret = crypto_shash_init(desc);
802 	if (ret)
803 		return ret;
804 
805 	for (i = 0; i < kvec_cnt; i++) {
806 		WARN_ON((unsigned long)kvecs[i].iov_base &
807 			crypto_shash_alignmask(con->v2.hmac_tfm));
808 		ret = crypto_shash_update(desc, kvecs[i].iov_base,
809 					  kvecs[i].iov_len);
810 		if (ret)
811 			return ret;
812 	}
813 
814 	ret = crypto_shash_final(desc, hmac);
815 	if (ret)
816 		return ret;
817 
818 	shash_desc_zero(desc);
819 	return 0;  /* auth_x, both plain and secure modes */
820 }
821 
822 static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce)
823 {
824 	u64 counter;
825 
826 	counter = le64_to_cpu(nonce->counter);
827 	nonce->counter = cpu_to_le64(counter + 1);
828 }
829 
830 static int gcm_crypt(struct ceph_connection *con, bool encrypt,
831 		     struct scatterlist *src, struct scatterlist *dst,
832 		     int src_len)
833 {
834 	struct ceph_gcm_nonce *nonce;
835 	int ret;
836 
837 	nonce = encrypt ? &con->v2.out_gcm_nonce : &con->v2.in_gcm_nonce;
838 
839 	aead_request_set_ad(con->v2.gcm_req, 0);  /* no AAD */
840 	aead_request_set_crypt(con->v2.gcm_req, src, dst, src_len, (u8 *)nonce);
841 	ret = crypto_wait_req(encrypt ? crypto_aead_encrypt(con->v2.gcm_req) :
842 					crypto_aead_decrypt(con->v2.gcm_req),
843 			      &con->v2.gcm_wait);
844 	if (ret)
845 		return ret;
846 
847 	gcm_inc_nonce(nonce);
848 	return 0;
849 }
850 
851 static void get_bvec_at(struct ceph_msg_data_cursor *cursor,
852 			struct bio_vec *bv)
853 {
854 	struct page *page;
855 	size_t off, len;
856 
857 	WARN_ON(!cursor->total_resid);
858 
859 	/* skip zero-length data items */
860 	while (!cursor->resid)
861 		ceph_msg_data_advance(cursor, 0);
862 
863 	/* get a piece of data, cursor isn't advanced */
864 	page = ceph_msg_data_next(cursor, &off, &len, NULL);
865 
866 	bv->bv_page = page;
867 	bv->bv_offset = off;
868 	bv->bv_len = len;
869 }
870 
871 static int calc_sg_cnt(void *buf, int buf_len)
872 {
873 	int sg_cnt;
874 
875 	if (!buf_len)
876 		return 0;
877 
878 	sg_cnt = need_padding(buf_len) ? 1 : 0;
879 	if (is_vmalloc_addr(buf)) {
880 		WARN_ON(offset_in_page(buf));
881 		sg_cnt += PAGE_ALIGN(buf_len) >> PAGE_SHIFT;
882 	} else {
883 		sg_cnt++;
884 	}
885 
886 	return sg_cnt;
887 }
888 
889 static int calc_sg_cnt_cursor(struct ceph_msg_data_cursor *cursor)
890 {
891 	int data_len = cursor->total_resid;
892 	struct bio_vec bv;
893 	int sg_cnt;
894 
895 	if (!data_len)
896 		return 0;
897 
898 	sg_cnt = need_padding(data_len) ? 1 : 0;
899 	do {
900 		get_bvec_at(cursor, &bv);
901 		sg_cnt++;
902 
903 		ceph_msg_data_advance(cursor, bv.bv_len);
904 	} while (cursor->total_resid);
905 
906 	return sg_cnt;
907 }
908 
909 static void init_sgs(struct scatterlist **sg, void *buf, int buf_len, u8 *pad)
910 {
911 	void *end = buf + buf_len;
912 	struct page *page;
913 	int len;
914 	void *p;
915 
916 	if (!buf_len)
917 		return;
918 
919 	if (is_vmalloc_addr(buf)) {
920 		p = buf;
921 		do {
922 			page = vmalloc_to_page(p);
923 			len = min_t(int, end - p, PAGE_SIZE);
924 			WARN_ON(!page || !len || offset_in_page(p));
925 			sg_set_page(*sg, page, len, 0);
926 			*sg = sg_next(*sg);
927 			p += len;
928 		} while (p != end);
929 	} else {
930 		sg_set_buf(*sg, buf, buf_len);
931 		*sg = sg_next(*sg);
932 	}
933 
934 	if (need_padding(buf_len)) {
935 		sg_set_buf(*sg, pad, padding_len(buf_len));
936 		*sg = sg_next(*sg);
937 	}
938 }
939 
940 static void init_sgs_cursor(struct scatterlist **sg,
941 			    struct ceph_msg_data_cursor *cursor, u8 *pad)
942 {
943 	int data_len = cursor->total_resid;
944 	struct bio_vec bv;
945 
946 	if (!data_len)
947 		return;
948 
949 	do {
950 		get_bvec_at(cursor, &bv);
951 		sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
952 		*sg = sg_next(*sg);
953 
954 		ceph_msg_data_advance(cursor, bv.bv_len);
955 	} while (cursor->total_resid);
956 
957 	if (need_padding(data_len)) {
958 		sg_set_buf(*sg, pad, padding_len(data_len));
959 		*sg = sg_next(*sg);
960 	}
961 }
962 
963 static int setup_message_sgs(struct sg_table *sgt, struct ceph_msg *msg,
964 			     u8 *front_pad, u8 *middle_pad, u8 *data_pad,
965 			     void *epilogue, bool add_tag)
966 {
967 	struct ceph_msg_data_cursor cursor;
968 	struct scatterlist *cur_sg;
969 	int sg_cnt;
970 	int ret;
971 
972 	if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
973 		return 0;
974 
975 	sg_cnt = 1;  /* epilogue + [auth tag] */
976 	if (front_len(msg))
977 		sg_cnt += calc_sg_cnt(msg->front.iov_base,
978 				      front_len(msg));
979 	if (middle_len(msg))
980 		sg_cnt += calc_sg_cnt(msg->middle->vec.iov_base,
981 				      middle_len(msg));
982 	if (data_len(msg)) {
983 		ceph_msg_data_cursor_init(&cursor, msg, data_len(msg));
984 		sg_cnt += calc_sg_cnt_cursor(&cursor);
985 	}
986 
987 	ret = sg_alloc_table(sgt, sg_cnt, GFP_NOIO);
988 	if (ret)
989 		return ret;
990 
991 	cur_sg = sgt->sgl;
992 	if (front_len(msg))
993 		init_sgs(&cur_sg, msg->front.iov_base, front_len(msg),
994 			 front_pad);
995 	if (middle_len(msg))
996 		init_sgs(&cur_sg, msg->middle->vec.iov_base, middle_len(msg),
997 			 middle_pad);
998 	if (data_len(msg)) {
999 		ceph_msg_data_cursor_init(&cursor, msg, data_len(msg));
1000 		init_sgs_cursor(&cur_sg, &cursor, data_pad);
1001 	}
1002 
1003 	WARN_ON(!sg_is_last(cur_sg));
1004 	sg_set_buf(cur_sg, epilogue,
1005 		   CEPH_GCM_BLOCK_LEN + (add_tag ? CEPH_GCM_TAG_LEN : 0));
1006 	return 0;
1007 }
1008 
1009 static int decrypt_preamble(struct ceph_connection *con)
1010 {
1011 	struct scatterlist sg;
1012 
1013 	sg_init_one(&sg, con->v2.in_buf, CEPH_PREAMBLE_SECURE_LEN);
1014 	return gcm_crypt(con, false, &sg, &sg, CEPH_PREAMBLE_SECURE_LEN);
1015 }
1016 
1017 static int decrypt_control_remainder(struct ceph_connection *con)
1018 {
1019 	int ctrl_len = con->v2.in_desc.fd_lens[0];
1020 	int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1021 	int pt_len = padding_len(rem_len) + CEPH_GCM_TAG_LEN;
1022 	struct scatterlist sgs[2];
1023 
1024 	WARN_ON(con->v2.in_kvecs[0].iov_len != rem_len);
1025 	WARN_ON(con->v2.in_kvecs[1].iov_len != pt_len);
1026 
1027 	sg_init_table(sgs, 2);
1028 	sg_set_buf(&sgs[0], con->v2.in_kvecs[0].iov_base, rem_len);
1029 	sg_set_buf(&sgs[1], con->v2.in_buf, pt_len);
1030 
1031 	return gcm_crypt(con, false, sgs, sgs,
1032 			 padded_len(rem_len) + CEPH_GCM_TAG_LEN);
1033 }
1034 
1035 static int decrypt_message(struct ceph_connection *con)
1036 {
1037 	struct sg_table sgt = {};
1038 	int ret;
1039 
1040 	ret = setup_message_sgs(&sgt, con->in_msg, FRONT_PAD(con->v2.in_buf),
1041 			MIDDLE_PAD(con->v2.in_buf), DATA_PAD(con->v2.in_buf),
1042 			con->v2.in_buf, true);
1043 	if (ret)
1044 		goto out;
1045 
1046 	ret = gcm_crypt(con, false, sgt.sgl, sgt.sgl,
1047 			tail_onwire_len(con->in_msg, true));
1048 
1049 out:
1050 	sg_free_table(&sgt);
1051 	return ret;
1052 }
1053 
1054 static int prepare_banner(struct ceph_connection *con)
1055 {
1056 	int buf_len = CEPH_BANNER_V2_LEN + 2 + 8 + 8;
1057 	void *buf, *p;
1058 
1059 	buf = alloc_conn_buf(con, buf_len);
1060 	if (!buf)
1061 		return -ENOMEM;
1062 
1063 	p = buf;
1064 	ceph_encode_copy(&p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN);
1065 	ceph_encode_16(&p, sizeof(u64) + sizeof(u64));
1066 	ceph_encode_64(&p, CEPH_MSGR2_SUPPORTED_FEATURES);
1067 	ceph_encode_64(&p, CEPH_MSGR2_REQUIRED_FEATURES);
1068 	WARN_ON(p != buf + buf_len);
1069 
1070 	add_out_kvec(con, buf, buf_len);
1071 	add_out_sign_kvec(con, buf, buf_len);
1072 	ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1073 	return 0;
1074 }
1075 
1076 /*
1077  * base:
1078  *   preamble
1079  *   control body (ctrl_len bytes)
1080  *   space for control crc
1081  *
1082  * extdata (optional):
1083  *   control body (extdata_len bytes)
1084  *
1085  * Compute control crc and gather base and extdata into:
1086  *
1087  *   preamble
1088  *   control body (ctrl_len + extdata_len bytes)
1089  *   control crc
1090  *
1091  * Preamble should already be encoded at the start of base.
1092  */
1093 static void prepare_head_plain(struct ceph_connection *con, void *base,
1094 			       int ctrl_len, void *extdata, int extdata_len,
1095 			       bool to_be_signed)
1096 {
1097 	int base_len = CEPH_PREAMBLE_LEN + ctrl_len + CEPH_CRC_LEN;
1098 	void *crcp = base + base_len - CEPH_CRC_LEN;
1099 	u32 crc;
1100 
1101 	crc = crc32c(-1, CTRL_BODY(base), ctrl_len);
1102 	if (extdata_len)
1103 		crc = crc32c(crc, extdata, extdata_len);
1104 	put_unaligned_le32(crc, crcp);
1105 
1106 	if (!extdata_len) {
1107 		add_out_kvec(con, base, base_len);
1108 		if (to_be_signed)
1109 			add_out_sign_kvec(con, base, base_len);
1110 		return;
1111 	}
1112 
1113 	add_out_kvec(con, base, crcp - base);
1114 	add_out_kvec(con, extdata, extdata_len);
1115 	add_out_kvec(con, crcp, CEPH_CRC_LEN);
1116 	if (to_be_signed) {
1117 		add_out_sign_kvec(con, base, crcp - base);
1118 		add_out_sign_kvec(con, extdata, extdata_len);
1119 		add_out_sign_kvec(con, crcp, CEPH_CRC_LEN);
1120 	}
1121 }
1122 
1123 static int prepare_head_secure_small(struct ceph_connection *con,
1124 				     void *base, int ctrl_len)
1125 {
1126 	struct scatterlist sg;
1127 	int ret;
1128 
1129 	/* inline buffer padding? */
1130 	if (ctrl_len < CEPH_PREAMBLE_INLINE_LEN)
1131 		memset(CTRL_BODY(base) + ctrl_len, 0,
1132 		       CEPH_PREAMBLE_INLINE_LEN - ctrl_len);
1133 
1134 	sg_init_one(&sg, base, CEPH_PREAMBLE_SECURE_LEN);
1135 	ret = gcm_crypt(con, true, &sg, &sg,
1136 			CEPH_PREAMBLE_SECURE_LEN - CEPH_GCM_TAG_LEN);
1137 	if (ret)
1138 		return ret;
1139 
1140 	add_out_kvec(con, base, CEPH_PREAMBLE_SECURE_LEN);
1141 	return 0;
1142 }
1143 
1144 /*
1145  * base:
1146  *   preamble
1147  *   control body (ctrl_len bytes)
1148  *   space for padding, if needed
1149  *   space for control remainder auth tag
1150  *   space for preamble auth tag
1151  *
1152  * Encrypt preamble and the inline portion, then encrypt the remainder
1153  * and gather into:
1154  *
1155  *   preamble
1156  *   control body (48 bytes)
1157  *   preamble auth tag
1158  *   control body (ctrl_len - 48 bytes)
1159  *   zero padding, if needed
1160  *   control remainder auth tag
1161  *
1162  * Preamble should already be encoded at the start of base.
1163  */
1164 static int prepare_head_secure_big(struct ceph_connection *con,
1165 				   void *base, int ctrl_len)
1166 {
1167 	int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1168 	void *rem = CTRL_BODY(base) + CEPH_PREAMBLE_INLINE_LEN;
1169 	void *rem_tag = rem + padded_len(rem_len);
1170 	void *pmbl_tag = rem_tag + CEPH_GCM_TAG_LEN;
1171 	struct scatterlist sgs[2];
1172 	int ret;
1173 
1174 	sg_init_table(sgs, 2);
1175 	sg_set_buf(&sgs[0], base, rem - base);
1176 	sg_set_buf(&sgs[1], pmbl_tag, CEPH_GCM_TAG_LEN);
1177 	ret = gcm_crypt(con, true, sgs, sgs, rem - base);
1178 	if (ret)
1179 		return ret;
1180 
1181 	/* control remainder padding? */
1182 	if (need_padding(rem_len))
1183 		memset(rem + rem_len, 0, padding_len(rem_len));
1184 
1185 	sg_init_one(&sgs[0], rem, pmbl_tag - rem);
1186 	ret = gcm_crypt(con, true, sgs, sgs, rem_tag - rem);
1187 	if (ret)
1188 		return ret;
1189 
1190 	add_out_kvec(con, base, rem - base);
1191 	add_out_kvec(con, pmbl_tag, CEPH_GCM_TAG_LEN);
1192 	add_out_kvec(con, rem, pmbl_tag - rem);
1193 	return 0;
1194 }
1195 
1196 static int __prepare_control(struct ceph_connection *con, int tag,
1197 			     void *base, int ctrl_len, void *extdata,
1198 			     int extdata_len, bool to_be_signed)
1199 {
1200 	int total_len = ctrl_len + extdata_len;
1201 	struct ceph_frame_desc desc;
1202 	int ret;
1203 
1204 	dout("%s con %p tag %d len %d (%d+%d)\n", __func__, con, tag,
1205 	     total_len, ctrl_len, extdata_len);
1206 
1207 	/* extdata may be vmalloc'ed but not base */
1208 	if (WARN_ON(is_vmalloc_addr(base) || !ctrl_len))
1209 		return -EINVAL;
1210 
1211 	init_frame_desc(&desc, tag, &total_len, 1);
1212 	encode_preamble(&desc, base);
1213 
1214 	if (con_secure(con)) {
1215 		if (WARN_ON(extdata_len || to_be_signed))
1216 			return -EINVAL;
1217 
1218 		if (ctrl_len <= CEPH_PREAMBLE_INLINE_LEN)
1219 			/* fully inlined, inline buffer may need padding */
1220 			ret = prepare_head_secure_small(con, base, ctrl_len);
1221 		else
1222 			/* partially inlined, inline buffer is full */
1223 			ret = prepare_head_secure_big(con, base, ctrl_len);
1224 		if (ret)
1225 			return ret;
1226 	} else {
1227 		prepare_head_plain(con, base, ctrl_len, extdata, extdata_len,
1228 				   to_be_signed);
1229 	}
1230 
1231 	ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1232 	return 0;
1233 }
1234 
1235 static int prepare_control(struct ceph_connection *con, int tag,
1236 			   void *base, int ctrl_len)
1237 {
1238 	return __prepare_control(con, tag, base, ctrl_len, NULL, 0, false);
1239 }
1240 
1241 static int prepare_hello(struct ceph_connection *con)
1242 {
1243 	void *buf, *p;
1244 	int ctrl_len;
1245 
1246 	ctrl_len = 1 + ceph_entity_addr_encoding_len(&con->peer_addr);
1247 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1248 	if (!buf)
1249 		return -ENOMEM;
1250 
1251 	p = CTRL_BODY(buf);
1252 	ceph_encode_8(&p, CEPH_ENTITY_TYPE_CLIENT);
1253 	ceph_encode_entity_addr(&p, &con->peer_addr);
1254 	WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1255 
1256 	return __prepare_control(con, FRAME_TAG_HELLO, buf, ctrl_len,
1257 				 NULL, 0, true);
1258 }
1259 
1260 /* so that head_onwire_len(AUTH_BUF_LEN, false) is 512 */
1261 #define AUTH_BUF_LEN	(512 - CEPH_CRC_LEN - CEPH_PREAMBLE_PLAIN_LEN)
1262 
1263 static int prepare_auth_request(struct ceph_connection *con)
1264 {
1265 	void *authorizer, *authorizer_copy;
1266 	int ctrl_len, authorizer_len;
1267 	void *buf;
1268 	int ret;
1269 
1270 	ctrl_len = AUTH_BUF_LEN;
1271 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1272 	if (!buf)
1273 		return -ENOMEM;
1274 
1275 	mutex_unlock(&con->mutex);
1276 	ret = con->ops->get_auth_request(con, CTRL_BODY(buf), &ctrl_len,
1277 					 &authorizer, &authorizer_len);
1278 	mutex_lock(&con->mutex);
1279 	if (con->state != CEPH_CON_S_V2_HELLO) {
1280 		dout("%s con %p state changed to %d\n", __func__, con,
1281 		     con->state);
1282 		return -EAGAIN;
1283 	}
1284 
1285 	dout("%s con %p get_auth_request ret %d\n", __func__, con, ret);
1286 	if (ret)
1287 		return ret;
1288 
1289 	authorizer_copy = alloc_conn_buf(con, authorizer_len);
1290 	if (!authorizer_copy)
1291 		return -ENOMEM;
1292 
1293 	memcpy(authorizer_copy, authorizer, authorizer_len);
1294 
1295 	return __prepare_control(con, FRAME_TAG_AUTH_REQUEST, buf, ctrl_len,
1296 				 authorizer_copy, authorizer_len, true);
1297 }
1298 
1299 static int prepare_auth_request_more(struct ceph_connection *con,
1300 				     void *reply, int reply_len)
1301 {
1302 	int ctrl_len, authorizer_len;
1303 	void *authorizer;
1304 	void *buf;
1305 	int ret;
1306 
1307 	ctrl_len = AUTH_BUF_LEN;
1308 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1309 	if (!buf)
1310 		return -ENOMEM;
1311 
1312 	mutex_unlock(&con->mutex);
1313 	ret = con->ops->handle_auth_reply_more(con, reply, reply_len,
1314 					       CTRL_BODY(buf), &ctrl_len,
1315 					       &authorizer, &authorizer_len);
1316 	mutex_lock(&con->mutex);
1317 	if (con->state != CEPH_CON_S_V2_AUTH) {
1318 		dout("%s con %p state changed to %d\n", __func__, con,
1319 		     con->state);
1320 		return -EAGAIN;
1321 	}
1322 
1323 	dout("%s con %p handle_auth_reply_more ret %d\n", __func__, con, ret);
1324 	if (ret)
1325 		return ret;
1326 
1327 	return __prepare_control(con, FRAME_TAG_AUTH_REQUEST_MORE, buf,
1328 				 ctrl_len, authorizer, authorizer_len, true);
1329 }
1330 
1331 static int prepare_auth_signature(struct ceph_connection *con)
1332 {
1333 	void *buf;
1334 	int ret;
1335 
1336 	buf = alloc_conn_buf(con, head_onwire_len(SHA256_DIGEST_SIZE,
1337 						  con_secure(con)));
1338 	if (!buf)
1339 		return -ENOMEM;
1340 
1341 	ret = hmac_sha256(con, con->v2.in_sign_kvecs, con->v2.in_sign_kvec_cnt,
1342 			  CTRL_BODY(buf));
1343 	if (ret)
1344 		return ret;
1345 
1346 	return prepare_control(con, FRAME_TAG_AUTH_SIGNATURE, buf,
1347 			       SHA256_DIGEST_SIZE);
1348 }
1349 
1350 static int prepare_client_ident(struct ceph_connection *con)
1351 {
1352 	struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1353 	struct ceph_client *client = from_msgr(con->msgr);
1354 	u64 global_id = ceph_client_gid(client);
1355 	void *buf, *p;
1356 	int ctrl_len;
1357 
1358 	WARN_ON(con->v2.server_cookie);
1359 	WARN_ON(con->v2.connect_seq);
1360 	WARN_ON(con->v2.peer_global_seq);
1361 
1362 	if (!con->v2.client_cookie) {
1363 		do {
1364 			get_random_bytes(&con->v2.client_cookie,
1365 					 sizeof(con->v2.client_cookie));
1366 		} while (!con->v2.client_cookie);
1367 		dout("%s con %p generated cookie 0x%llx\n", __func__, con,
1368 		     con->v2.client_cookie);
1369 	} else {
1370 		dout("%s con %p cookie already set 0x%llx\n", __func__, con,
1371 		     con->v2.client_cookie);
1372 	}
1373 
1374 	dout("%s con %p my_addr %s/%u peer_addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx cookie 0x%llx\n",
1375 	     __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
1376 	     ceph_pr_addr(&con->peer_addr), le32_to_cpu(con->peer_addr.nonce),
1377 	     global_id, con->v2.global_seq, client->supported_features,
1378 	     client->required_features, con->v2.client_cookie);
1379 
1380 	ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) +
1381 		   ceph_entity_addr_encoding_len(&con->peer_addr) + 6 * 8;
1382 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
1383 	if (!buf)
1384 		return -ENOMEM;
1385 
1386 	p = CTRL_BODY(buf);
1387 	ceph_encode_8(&p, 2);  /* addrvec marker */
1388 	ceph_encode_32(&p, 1);  /* addr_cnt */
1389 	ceph_encode_entity_addr(&p, my_addr);
1390 	ceph_encode_entity_addr(&p, &con->peer_addr);
1391 	ceph_encode_64(&p, global_id);
1392 	ceph_encode_64(&p, con->v2.global_seq);
1393 	ceph_encode_64(&p, client->supported_features);
1394 	ceph_encode_64(&p, client->required_features);
1395 	ceph_encode_64(&p, 0);  /* flags */
1396 	ceph_encode_64(&p, con->v2.client_cookie);
1397 	WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1398 
1399 	return prepare_control(con, FRAME_TAG_CLIENT_IDENT, buf, ctrl_len);
1400 }
1401 
1402 static int prepare_session_reconnect(struct ceph_connection *con)
1403 {
1404 	struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1405 	void *buf, *p;
1406 	int ctrl_len;
1407 
1408 	WARN_ON(!con->v2.client_cookie);
1409 	WARN_ON(!con->v2.server_cookie);
1410 	WARN_ON(!con->v2.connect_seq);
1411 	WARN_ON(!con->v2.peer_global_seq);
1412 
1413 	dout("%s con %p my_addr %s/%u client_cookie 0x%llx server_cookie 0x%llx global_seq %llu connect_seq %llu in_seq %llu\n",
1414 	     __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
1415 	     con->v2.client_cookie, con->v2.server_cookie, con->v2.global_seq,
1416 	     con->v2.connect_seq, con->in_seq);
1417 
1418 	ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) + 5 * 8;
1419 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
1420 	if (!buf)
1421 		return -ENOMEM;
1422 
1423 	p = CTRL_BODY(buf);
1424 	ceph_encode_8(&p, 2);  /* entity_addrvec_t marker */
1425 	ceph_encode_32(&p, 1);  /* my_addrs len */
1426 	ceph_encode_entity_addr(&p, my_addr);
1427 	ceph_encode_64(&p, con->v2.client_cookie);
1428 	ceph_encode_64(&p, con->v2.server_cookie);
1429 	ceph_encode_64(&p, con->v2.global_seq);
1430 	ceph_encode_64(&p, con->v2.connect_seq);
1431 	ceph_encode_64(&p, con->in_seq);
1432 	WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1433 
1434 	return prepare_control(con, FRAME_TAG_SESSION_RECONNECT, buf, ctrl_len);
1435 }
1436 
1437 static int prepare_keepalive2(struct ceph_connection *con)
1438 {
1439 	struct ceph_timespec *ts = CTRL_BODY(con->v2.out_buf);
1440 	struct timespec64 now;
1441 
1442 	ktime_get_real_ts64(&now);
1443 	dout("%s con %p timestamp %lld.%09ld\n", __func__, con, now.tv_sec,
1444 	     now.tv_nsec);
1445 
1446 	ceph_encode_timespec64(ts, &now);
1447 
1448 	reset_out_kvecs(con);
1449 	return prepare_control(con, FRAME_TAG_KEEPALIVE2, con->v2.out_buf,
1450 			       sizeof(struct ceph_timespec));
1451 }
1452 
1453 static int prepare_ack(struct ceph_connection *con)
1454 {
1455 	void *p;
1456 
1457 	dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
1458 	     con->in_seq_acked, con->in_seq);
1459 	con->in_seq_acked = con->in_seq;
1460 
1461 	p = CTRL_BODY(con->v2.out_buf);
1462 	ceph_encode_64(&p, con->in_seq_acked);
1463 
1464 	reset_out_kvecs(con);
1465 	return prepare_control(con, FRAME_TAG_ACK, con->v2.out_buf, 8);
1466 }
1467 
1468 static void prepare_epilogue_plain(struct ceph_connection *con, bool aborted)
1469 {
1470 	dout("%s con %p msg %p aborted %d crcs %u %u %u\n", __func__, con,
1471 	     con->out_msg, aborted, con->v2.out_epil.front_crc,
1472 	     con->v2.out_epil.middle_crc, con->v2.out_epil.data_crc);
1473 
1474 	encode_epilogue_plain(con, aborted);
1475 	add_out_kvec(con, &con->v2.out_epil, CEPH_EPILOGUE_PLAIN_LEN);
1476 }
1477 
1478 /*
1479  * For "used" empty segments, crc is -1.  For unused (trailing)
1480  * segments, crc is 0.
1481  */
1482 static void prepare_message_plain(struct ceph_connection *con)
1483 {
1484 	struct ceph_msg *msg = con->out_msg;
1485 
1486 	prepare_head_plain(con, con->v2.out_buf,
1487 			   sizeof(struct ceph_msg_header2), NULL, 0, false);
1488 
1489 	if (!front_len(msg) && !middle_len(msg)) {
1490 		if (!data_len(msg)) {
1491 			/*
1492 			 * Empty message: once the head is written,
1493 			 * we are done -- there is no epilogue.
1494 			 */
1495 			con->v2.out_state = OUT_S_FINISH_MESSAGE;
1496 			return;
1497 		}
1498 
1499 		con->v2.out_epil.front_crc = -1;
1500 		con->v2.out_epil.middle_crc = -1;
1501 		con->v2.out_state = OUT_S_QUEUE_DATA;
1502 		return;
1503 	}
1504 
1505 	if (front_len(msg)) {
1506 		con->v2.out_epil.front_crc = crc32c(-1, msg->front.iov_base,
1507 						    front_len(msg));
1508 		add_out_kvec(con, msg->front.iov_base, front_len(msg));
1509 	} else {
1510 		/* middle (at least) is there, checked above */
1511 		con->v2.out_epil.front_crc = -1;
1512 	}
1513 
1514 	if (middle_len(msg)) {
1515 		con->v2.out_epil.middle_crc =
1516 			crc32c(-1, msg->middle->vec.iov_base, middle_len(msg));
1517 		add_out_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
1518 	} else {
1519 		con->v2.out_epil.middle_crc = data_len(msg) ? -1 : 0;
1520 	}
1521 
1522 	if (data_len(msg)) {
1523 		con->v2.out_state = OUT_S_QUEUE_DATA;
1524 	} else {
1525 		con->v2.out_epil.data_crc = 0;
1526 		prepare_epilogue_plain(con, false);
1527 		con->v2.out_state = OUT_S_FINISH_MESSAGE;
1528 	}
1529 }
1530 
1531 /*
1532  * Unfortunately the kernel crypto API doesn't support streaming
1533  * (piecewise) operation for AEAD algorithms, so we can't get away
1534  * with a fixed size buffer and a couple sgs.  Instead, we have to
1535  * allocate pages for the entire tail of the message (currently up
1536  * to ~32M) and two sgs arrays (up to ~256K each)...
1537  */
1538 static int prepare_message_secure(struct ceph_connection *con)
1539 {
1540 	void *zerop = page_address(ceph_zero_page);
1541 	struct sg_table enc_sgt = {};
1542 	struct sg_table sgt = {};
1543 	struct page **enc_pages;
1544 	int enc_page_cnt;
1545 	int tail_len;
1546 	int ret;
1547 
1548 	ret = prepare_head_secure_small(con, con->v2.out_buf,
1549 					sizeof(struct ceph_msg_header2));
1550 	if (ret)
1551 		return ret;
1552 
1553 	tail_len = tail_onwire_len(con->out_msg, true);
1554 	if (!tail_len) {
1555 		/*
1556 		 * Empty message: once the head is written,
1557 		 * we are done -- there is no epilogue.
1558 		 */
1559 		con->v2.out_state = OUT_S_FINISH_MESSAGE;
1560 		return 0;
1561 	}
1562 
1563 	encode_epilogue_secure(con, false);
1564 	ret = setup_message_sgs(&sgt, con->out_msg, zerop, zerop, zerop,
1565 				&con->v2.out_epil, false);
1566 	if (ret)
1567 		goto out;
1568 
1569 	enc_page_cnt = calc_pages_for(0, tail_len);
1570 	enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
1571 	if (IS_ERR(enc_pages)) {
1572 		ret = PTR_ERR(enc_pages);
1573 		goto out;
1574 	}
1575 
1576 	WARN_ON(con->v2.out_enc_pages || con->v2.out_enc_page_cnt);
1577 	con->v2.out_enc_pages = enc_pages;
1578 	con->v2.out_enc_page_cnt = enc_page_cnt;
1579 	con->v2.out_enc_resid = tail_len;
1580 	con->v2.out_enc_i = 0;
1581 
1582 	ret = sg_alloc_table_from_pages(&enc_sgt, enc_pages, enc_page_cnt,
1583 					0, tail_len, GFP_NOIO);
1584 	if (ret)
1585 		goto out;
1586 
1587 	ret = gcm_crypt(con, true, sgt.sgl, enc_sgt.sgl,
1588 			tail_len - CEPH_GCM_TAG_LEN);
1589 	if (ret)
1590 		goto out;
1591 
1592 	dout("%s con %p msg %p sg_cnt %d enc_page_cnt %d\n", __func__, con,
1593 	     con->out_msg, sgt.orig_nents, enc_page_cnt);
1594 	con->v2.out_state = OUT_S_QUEUE_ENC_PAGE;
1595 
1596 out:
1597 	sg_free_table(&sgt);
1598 	sg_free_table(&enc_sgt);
1599 	return ret;
1600 }
1601 
1602 static int prepare_message(struct ceph_connection *con)
1603 {
1604 	int lens[] = {
1605 		sizeof(struct ceph_msg_header2),
1606 		front_len(con->out_msg),
1607 		middle_len(con->out_msg),
1608 		data_len(con->out_msg)
1609 	};
1610 	struct ceph_frame_desc desc;
1611 	int ret;
1612 
1613 	dout("%s con %p msg %p logical %d+%d+%d+%d\n", __func__, con,
1614 	     con->out_msg, lens[0], lens[1], lens[2], lens[3]);
1615 
1616 	if (con->in_seq > con->in_seq_acked) {
1617 		dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
1618 		     con->in_seq_acked, con->in_seq);
1619 		con->in_seq_acked = con->in_seq;
1620 	}
1621 
1622 	reset_out_kvecs(con);
1623 	init_frame_desc(&desc, FRAME_TAG_MESSAGE, lens, 4);
1624 	encode_preamble(&desc, con->v2.out_buf);
1625 	fill_header2(CTRL_BODY(con->v2.out_buf), &con->out_msg->hdr,
1626 		     con->in_seq_acked);
1627 
1628 	if (con_secure(con)) {
1629 		ret = prepare_message_secure(con);
1630 		if (ret)
1631 			return ret;
1632 	} else {
1633 		prepare_message_plain(con);
1634 	}
1635 
1636 	ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1637 	return 0;
1638 }
1639 
1640 static int prepare_read_banner_prefix(struct ceph_connection *con)
1641 {
1642 	void *buf;
1643 
1644 	buf = alloc_conn_buf(con, CEPH_BANNER_V2_PREFIX_LEN);
1645 	if (!buf)
1646 		return -ENOMEM;
1647 
1648 	reset_in_kvecs(con);
1649 	add_in_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
1650 	add_in_sign_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
1651 	con->state = CEPH_CON_S_V2_BANNER_PREFIX;
1652 	return 0;
1653 }
1654 
1655 static int prepare_read_banner_payload(struct ceph_connection *con,
1656 				       int payload_len)
1657 {
1658 	void *buf;
1659 
1660 	buf = alloc_conn_buf(con, payload_len);
1661 	if (!buf)
1662 		return -ENOMEM;
1663 
1664 	reset_in_kvecs(con);
1665 	add_in_kvec(con, buf, payload_len);
1666 	add_in_sign_kvec(con, buf, payload_len);
1667 	con->state = CEPH_CON_S_V2_BANNER_PAYLOAD;
1668 	return 0;
1669 }
1670 
1671 static void prepare_read_preamble(struct ceph_connection *con)
1672 {
1673 	reset_in_kvecs(con);
1674 	add_in_kvec(con, con->v2.in_buf,
1675 		    con_secure(con) ? CEPH_PREAMBLE_SECURE_LEN :
1676 				      CEPH_PREAMBLE_PLAIN_LEN);
1677 	con->v2.in_state = IN_S_HANDLE_PREAMBLE;
1678 }
1679 
1680 static int prepare_read_control(struct ceph_connection *con)
1681 {
1682 	int ctrl_len = con->v2.in_desc.fd_lens[0];
1683 	int head_len;
1684 	void *buf;
1685 
1686 	reset_in_kvecs(con);
1687 	if (con->state == CEPH_CON_S_V2_HELLO ||
1688 	    con->state == CEPH_CON_S_V2_AUTH) {
1689 		head_len = head_onwire_len(ctrl_len, false);
1690 		buf = alloc_conn_buf(con, head_len);
1691 		if (!buf)
1692 			return -ENOMEM;
1693 
1694 		/* preserve preamble */
1695 		memcpy(buf, con->v2.in_buf, CEPH_PREAMBLE_LEN);
1696 
1697 		add_in_kvec(con, CTRL_BODY(buf), ctrl_len);
1698 		add_in_kvec(con, CTRL_BODY(buf) + ctrl_len, CEPH_CRC_LEN);
1699 		add_in_sign_kvec(con, buf, head_len);
1700 	} else {
1701 		if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
1702 			buf = alloc_conn_buf(con, ctrl_len);
1703 			if (!buf)
1704 				return -ENOMEM;
1705 
1706 			add_in_kvec(con, buf, ctrl_len);
1707 		} else {
1708 			add_in_kvec(con, CTRL_BODY(con->v2.in_buf), ctrl_len);
1709 		}
1710 		add_in_kvec(con, con->v2.in_buf, CEPH_CRC_LEN);
1711 	}
1712 	con->v2.in_state = IN_S_HANDLE_CONTROL;
1713 	return 0;
1714 }
1715 
1716 static int prepare_read_control_remainder(struct ceph_connection *con)
1717 {
1718 	int ctrl_len = con->v2.in_desc.fd_lens[0];
1719 	int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1720 	void *buf;
1721 
1722 	buf = alloc_conn_buf(con, ctrl_len);
1723 	if (!buf)
1724 		return -ENOMEM;
1725 
1726 	memcpy(buf, CTRL_BODY(con->v2.in_buf), CEPH_PREAMBLE_INLINE_LEN);
1727 
1728 	reset_in_kvecs(con);
1729 	add_in_kvec(con, buf + CEPH_PREAMBLE_INLINE_LEN, rem_len);
1730 	add_in_kvec(con, con->v2.in_buf,
1731 		    padding_len(rem_len) + CEPH_GCM_TAG_LEN);
1732 	con->v2.in_state = IN_S_HANDLE_CONTROL_REMAINDER;
1733 	return 0;
1734 }
1735 
1736 static void prepare_read_data(struct ceph_connection *con)
1737 {
1738 	struct bio_vec bv;
1739 
1740 	if (!con_secure(con))
1741 		con->in_data_crc = -1;
1742 	ceph_msg_data_cursor_init(&con->v2.in_cursor, con->in_msg,
1743 				  data_len(con->in_msg));
1744 
1745 	get_bvec_at(&con->v2.in_cursor, &bv);
1746 	set_in_bvec(con, &bv);
1747 	con->v2.in_state = IN_S_PREPARE_READ_DATA_CONT;
1748 }
1749 
1750 static void prepare_read_data_cont(struct ceph_connection *con)
1751 {
1752 	struct bio_vec bv;
1753 
1754 	if (!con_secure(con))
1755 		con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
1756 						    con->v2.in_bvec.bv_page,
1757 						    con->v2.in_bvec.bv_offset,
1758 						    con->v2.in_bvec.bv_len);
1759 
1760 	ceph_msg_data_advance(&con->v2.in_cursor, con->v2.in_bvec.bv_len);
1761 	if (con->v2.in_cursor.total_resid) {
1762 		get_bvec_at(&con->v2.in_cursor, &bv);
1763 		set_in_bvec(con, &bv);
1764 		WARN_ON(con->v2.in_state != IN_S_PREPARE_READ_DATA_CONT);
1765 		return;
1766 	}
1767 
1768 	/*
1769 	 * We've read all data.  Prepare to read data padding (if any)
1770 	 * and epilogue.
1771 	 */
1772 	reset_in_kvecs(con);
1773 	if (con_secure(con)) {
1774 		if (need_padding(data_len(con->in_msg)))
1775 			add_in_kvec(con, DATA_PAD(con->v2.in_buf),
1776 				    padding_len(data_len(con->in_msg)));
1777 		add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_SECURE_LEN);
1778 	} else {
1779 		add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
1780 	}
1781 	con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1782 }
1783 
1784 static void __finish_skip(struct ceph_connection *con)
1785 {
1786 	con->in_seq++;
1787 	prepare_read_preamble(con);
1788 }
1789 
1790 static void prepare_skip_message(struct ceph_connection *con)
1791 {
1792 	struct ceph_frame_desc *desc = &con->v2.in_desc;
1793 	int tail_len;
1794 
1795 	dout("%s con %p %d+%d+%d\n", __func__, con, desc->fd_lens[1],
1796 	     desc->fd_lens[2], desc->fd_lens[3]);
1797 
1798 	tail_len = __tail_onwire_len(desc->fd_lens[1], desc->fd_lens[2],
1799 				     desc->fd_lens[3], con_secure(con));
1800 	if (!tail_len) {
1801 		__finish_skip(con);
1802 	} else {
1803 		set_in_skip(con, tail_len);
1804 		con->v2.in_state = IN_S_FINISH_SKIP;
1805 	}
1806 }
1807 
1808 static int process_banner_prefix(struct ceph_connection *con)
1809 {
1810 	int payload_len;
1811 	void *p;
1812 
1813 	WARN_ON(con->v2.in_kvecs[0].iov_len != CEPH_BANNER_V2_PREFIX_LEN);
1814 
1815 	p = con->v2.in_kvecs[0].iov_base;
1816 	if (memcmp(p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN)) {
1817 		if (!memcmp(p, CEPH_BANNER, CEPH_BANNER_LEN))
1818 			con->error_msg = "server is speaking msgr1 protocol";
1819 		else
1820 			con->error_msg = "protocol error, bad banner";
1821 		return -EINVAL;
1822 	}
1823 
1824 	p += CEPH_BANNER_V2_LEN;
1825 	payload_len = ceph_decode_16(&p);
1826 	dout("%s con %p payload_len %d\n", __func__, con, payload_len);
1827 
1828 	return prepare_read_banner_payload(con, payload_len);
1829 }
1830 
1831 static int process_banner_payload(struct ceph_connection *con)
1832 {
1833 	void *end = con->v2.in_kvecs[0].iov_base + con->v2.in_kvecs[0].iov_len;
1834 	u64 feat = CEPH_MSGR2_SUPPORTED_FEATURES;
1835 	u64 req_feat = CEPH_MSGR2_REQUIRED_FEATURES;
1836 	u64 server_feat, server_req_feat;
1837 	void *p;
1838 	int ret;
1839 
1840 	p = con->v2.in_kvecs[0].iov_base;
1841 	ceph_decode_64_safe(&p, end, server_feat, bad);
1842 	ceph_decode_64_safe(&p, end, server_req_feat, bad);
1843 
1844 	dout("%s con %p server_feat 0x%llx server_req_feat 0x%llx\n",
1845 	     __func__, con, server_feat, server_req_feat);
1846 
1847 	if (req_feat & ~server_feat) {
1848 		pr_err("msgr2 feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
1849 		       server_feat, req_feat & ~server_feat);
1850 		con->error_msg = "missing required protocol features";
1851 		return -EINVAL;
1852 	}
1853 	if (server_req_feat & ~feat) {
1854 		pr_err("msgr2 feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
1855 		       feat, server_req_feat & ~feat);
1856 		con->error_msg = "missing required protocol features";
1857 		return -EINVAL;
1858 	}
1859 
1860 	/* no reset_out_kvecs() as our banner may still be pending */
1861 	ret = prepare_hello(con);
1862 	if (ret) {
1863 		pr_err("prepare_hello failed: %d\n", ret);
1864 		return ret;
1865 	}
1866 
1867 	con->state = CEPH_CON_S_V2_HELLO;
1868 	prepare_read_preamble(con);
1869 	return 0;
1870 
1871 bad:
1872 	pr_err("failed to decode banner payload\n");
1873 	return -EINVAL;
1874 }
1875 
1876 static int process_hello(struct ceph_connection *con, void *p, void *end)
1877 {
1878 	struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1879 	struct ceph_entity_addr addr_for_me;
1880 	u8 entity_type;
1881 	int ret;
1882 
1883 	if (con->state != CEPH_CON_S_V2_HELLO) {
1884 		con->error_msg = "protocol error, unexpected hello";
1885 		return -EINVAL;
1886 	}
1887 
1888 	ceph_decode_8_safe(&p, end, entity_type, bad);
1889 	ret = ceph_decode_entity_addr(&p, end, &addr_for_me);
1890 	if (ret) {
1891 		pr_err("failed to decode addr_for_me: %d\n", ret);
1892 		return ret;
1893 	}
1894 
1895 	dout("%s con %p entity_type %d addr_for_me %s\n", __func__, con,
1896 	     entity_type, ceph_pr_addr(&addr_for_me));
1897 
1898 	if (entity_type != con->peer_name.type) {
1899 		pr_err("bad peer type, want %d, got %d\n",
1900 		       con->peer_name.type, entity_type);
1901 		con->error_msg = "wrong peer at address";
1902 		return -EINVAL;
1903 	}
1904 
1905 	/*
1906 	 * Set our address to the address our first peer (i.e. monitor)
1907 	 * sees that we are connecting from.  If we are behind some sort
1908 	 * of NAT and want to be identified by some private (not NATed)
1909 	 * address, ip option should be used.
1910 	 */
1911 	if (ceph_addr_is_blank(my_addr)) {
1912 		memcpy(&my_addr->in_addr, &addr_for_me.in_addr,
1913 		       sizeof(my_addr->in_addr));
1914 		ceph_addr_set_port(my_addr, 0);
1915 		dout("%s con %p set my addr %s, as seen by peer %s\n",
1916 		     __func__, con, ceph_pr_addr(my_addr),
1917 		     ceph_pr_addr(&con->peer_addr));
1918 	} else {
1919 		dout("%s con %p my addr already set %s\n",
1920 		     __func__, con, ceph_pr_addr(my_addr));
1921 	}
1922 
1923 	WARN_ON(ceph_addr_is_blank(my_addr) || ceph_addr_port(my_addr));
1924 	WARN_ON(my_addr->type != CEPH_ENTITY_ADDR_TYPE_ANY);
1925 	WARN_ON(!my_addr->nonce);
1926 
1927 	/* no reset_out_kvecs() as our hello may still be pending */
1928 	ret = prepare_auth_request(con);
1929 	if (ret) {
1930 		if (ret != -EAGAIN)
1931 			pr_err("prepare_auth_request failed: %d\n", ret);
1932 		return ret;
1933 	}
1934 
1935 	con->state = CEPH_CON_S_V2_AUTH;
1936 	return 0;
1937 
1938 bad:
1939 	pr_err("failed to decode hello\n");
1940 	return -EINVAL;
1941 }
1942 
1943 static int process_auth_bad_method(struct ceph_connection *con,
1944 				   void *p, void *end)
1945 {
1946 	int allowed_protos[8], allowed_modes[8];
1947 	int allowed_proto_cnt, allowed_mode_cnt;
1948 	int used_proto, result;
1949 	int ret;
1950 	int i;
1951 
1952 	if (con->state != CEPH_CON_S_V2_AUTH) {
1953 		con->error_msg = "protocol error, unexpected auth_bad_method";
1954 		return -EINVAL;
1955 	}
1956 
1957 	ceph_decode_32_safe(&p, end, used_proto, bad);
1958 	ceph_decode_32_safe(&p, end, result, bad);
1959 	dout("%s con %p used_proto %d result %d\n", __func__, con, used_proto,
1960 	     result);
1961 
1962 	ceph_decode_32_safe(&p, end, allowed_proto_cnt, bad);
1963 	if (allowed_proto_cnt > ARRAY_SIZE(allowed_protos)) {
1964 		pr_err("allowed_protos too big %d\n", allowed_proto_cnt);
1965 		return -EINVAL;
1966 	}
1967 	for (i = 0; i < allowed_proto_cnt; i++) {
1968 		ceph_decode_32_safe(&p, end, allowed_protos[i], bad);
1969 		dout("%s con %p allowed_protos[%d] %d\n", __func__, con,
1970 		     i, allowed_protos[i]);
1971 	}
1972 
1973 	ceph_decode_32_safe(&p, end, allowed_mode_cnt, bad);
1974 	if (allowed_mode_cnt > ARRAY_SIZE(allowed_modes)) {
1975 		pr_err("allowed_modes too big %d\n", allowed_mode_cnt);
1976 		return -EINVAL;
1977 	}
1978 	for (i = 0; i < allowed_mode_cnt; i++) {
1979 		ceph_decode_32_safe(&p, end, allowed_modes[i], bad);
1980 		dout("%s con %p allowed_modes[%d] %d\n", __func__, con,
1981 		     i, allowed_modes[i]);
1982 	}
1983 
1984 	mutex_unlock(&con->mutex);
1985 	ret = con->ops->handle_auth_bad_method(con, used_proto, result,
1986 					       allowed_protos,
1987 					       allowed_proto_cnt,
1988 					       allowed_modes,
1989 					       allowed_mode_cnt);
1990 	mutex_lock(&con->mutex);
1991 	if (con->state != CEPH_CON_S_V2_AUTH) {
1992 		dout("%s con %p state changed to %d\n", __func__, con,
1993 		     con->state);
1994 		return -EAGAIN;
1995 	}
1996 
1997 	dout("%s con %p handle_auth_bad_method ret %d\n", __func__, con, ret);
1998 	return ret;
1999 
2000 bad:
2001 	pr_err("failed to decode auth_bad_method\n");
2002 	return -EINVAL;
2003 }
2004 
2005 static int process_auth_reply_more(struct ceph_connection *con,
2006 				   void *p, void *end)
2007 {
2008 	int payload_len;
2009 	int ret;
2010 
2011 	if (con->state != CEPH_CON_S_V2_AUTH) {
2012 		con->error_msg = "protocol error, unexpected auth_reply_more";
2013 		return -EINVAL;
2014 	}
2015 
2016 	ceph_decode_32_safe(&p, end, payload_len, bad);
2017 	ceph_decode_need(&p, end, payload_len, bad);
2018 
2019 	dout("%s con %p payload_len %d\n", __func__, con, payload_len);
2020 
2021 	reset_out_kvecs(con);
2022 	ret = prepare_auth_request_more(con, p, payload_len);
2023 	if (ret) {
2024 		if (ret != -EAGAIN)
2025 			pr_err("prepare_auth_request_more failed: %d\n", ret);
2026 		return ret;
2027 	}
2028 
2029 	return 0;
2030 
2031 bad:
2032 	pr_err("failed to decode auth_reply_more\n");
2033 	return -EINVAL;
2034 }
2035 
2036 /*
2037  * Align session_key and con_secret to avoid GFP_ATOMIC allocation
2038  * inside crypto_shash_setkey() and crypto_aead_setkey() called from
2039  * setup_crypto().  __aligned(16) isn't guaranteed to work for stack
2040  * objects, so do it by hand.
2041  */
2042 static int process_auth_done(struct ceph_connection *con, void *p, void *end)
2043 {
2044 	u8 session_key_buf[CEPH_KEY_LEN + 16];
2045 	u8 con_secret_buf[CEPH_MAX_CON_SECRET_LEN + 16];
2046 	u8 *session_key = PTR_ALIGN(&session_key_buf[0], 16);
2047 	u8 *con_secret = PTR_ALIGN(&con_secret_buf[0], 16);
2048 	int session_key_len, con_secret_len;
2049 	int payload_len;
2050 	u64 global_id;
2051 	int ret;
2052 
2053 	if (con->state != CEPH_CON_S_V2_AUTH) {
2054 		con->error_msg = "protocol error, unexpected auth_done";
2055 		return -EINVAL;
2056 	}
2057 
2058 	ceph_decode_64_safe(&p, end, global_id, bad);
2059 	ceph_decode_32_safe(&p, end, con->v2.con_mode, bad);
2060 	ceph_decode_32_safe(&p, end, payload_len, bad);
2061 
2062 	dout("%s con %p global_id %llu con_mode %d payload_len %d\n",
2063 	     __func__, con, global_id, con->v2.con_mode, payload_len);
2064 
2065 	mutex_unlock(&con->mutex);
2066 	session_key_len = 0;
2067 	con_secret_len = 0;
2068 	ret = con->ops->handle_auth_done(con, global_id, p, payload_len,
2069 					 session_key, &session_key_len,
2070 					 con_secret, &con_secret_len);
2071 	mutex_lock(&con->mutex);
2072 	if (con->state != CEPH_CON_S_V2_AUTH) {
2073 		dout("%s con %p state changed to %d\n", __func__, con,
2074 		     con->state);
2075 		return -EAGAIN;
2076 	}
2077 
2078 	dout("%s con %p handle_auth_done ret %d\n", __func__, con, ret);
2079 	if (ret)
2080 		return ret;
2081 
2082 	ret = setup_crypto(con, session_key, session_key_len, con_secret,
2083 			   con_secret_len);
2084 	if (ret)
2085 		return ret;
2086 
2087 	reset_out_kvecs(con);
2088 	ret = prepare_auth_signature(con);
2089 	if (ret) {
2090 		pr_err("prepare_auth_signature failed: %d\n", ret);
2091 		return ret;
2092 	}
2093 
2094 	con->state = CEPH_CON_S_V2_AUTH_SIGNATURE;
2095 	return 0;
2096 
2097 bad:
2098 	pr_err("failed to decode auth_done\n");
2099 	return -EINVAL;
2100 }
2101 
2102 static int process_auth_signature(struct ceph_connection *con,
2103 				  void *p, void *end)
2104 {
2105 	u8 hmac[SHA256_DIGEST_SIZE];
2106 	int ret;
2107 
2108 	if (con->state != CEPH_CON_S_V2_AUTH_SIGNATURE) {
2109 		con->error_msg = "protocol error, unexpected auth_signature";
2110 		return -EINVAL;
2111 	}
2112 
2113 	ret = hmac_sha256(con, con->v2.out_sign_kvecs,
2114 			  con->v2.out_sign_kvec_cnt, hmac);
2115 	if (ret)
2116 		return ret;
2117 
2118 	ceph_decode_need(&p, end, SHA256_DIGEST_SIZE, bad);
2119 	if (crypto_memneq(p, hmac, SHA256_DIGEST_SIZE)) {
2120 		con->error_msg = "integrity error, bad auth signature";
2121 		return -EBADMSG;
2122 	}
2123 
2124 	dout("%s con %p auth signature ok\n", __func__, con);
2125 
2126 	/* no reset_out_kvecs() as our auth_signature may still be pending */
2127 	if (!con->v2.server_cookie) {
2128 		ret = prepare_client_ident(con);
2129 		if (ret) {
2130 			pr_err("prepare_client_ident failed: %d\n", ret);
2131 			return ret;
2132 		}
2133 
2134 		con->state = CEPH_CON_S_V2_SESSION_CONNECT;
2135 	} else {
2136 		ret = prepare_session_reconnect(con);
2137 		if (ret) {
2138 			pr_err("prepare_session_reconnect failed: %d\n", ret);
2139 			return ret;
2140 		}
2141 
2142 		con->state = CEPH_CON_S_V2_SESSION_RECONNECT;
2143 	}
2144 
2145 	return 0;
2146 
2147 bad:
2148 	pr_err("failed to decode auth_signature\n");
2149 	return -EINVAL;
2150 }
2151 
2152 static int process_server_ident(struct ceph_connection *con,
2153 				void *p, void *end)
2154 {
2155 	struct ceph_client *client = from_msgr(con->msgr);
2156 	u64 features, required_features;
2157 	struct ceph_entity_addr addr;
2158 	u64 global_seq;
2159 	u64 global_id;
2160 	u64 cookie;
2161 	u64 flags;
2162 	int ret;
2163 
2164 	if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
2165 		con->error_msg = "protocol error, unexpected server_ident";
2166 		return -EINVAL;
2167 	}
2168 
2169 	ret = ceph_decode_entity_addrvec(&p, end, true, &addr);
2170 	if (ret) {
2171 		pr_err("failed to decode server addrs: %d\n", ret);
2172 		return ret;
2173 	}
2174 
2175 	ceph_decode_64_safe(&p, end, global_id, bad);
2176 	ceph_decode_64_safe(&p, end, global_seq, bad);
2177 	ceph_decode_64_safe(&p, end, features, bad);
2178 	ceph_decode_64_safe(&p, end, required_features, bad);
2179 	ceph_decode_64_safe(&p, end, flags, bad);
2180 	ceph_decode_64_safe(&p, end, cookie, bad);
2181 
2182 	dout("%s con %p addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx flags 0x%llx cookie 0x%llx\n",
2183 	     __func__, con, ceph_pr_addr(&addr), le32_to_cpu(addr.nonce),
2184 	     global_id, global_seq, features, required_features, flags, cookie);
2185 
2186 	/* is this who we intended to talk to? */
2187 	if (memcmp(&addr, &con->peer_addr, sizeof(con->peer_addr))) {
2188 		pr_err("bad peer addr/nonce, want %s/%u, got %s/%u\n",
2189 		       ceph_pr_addr(&con->peer_addr),
2190 		       le32_to_cpu(con->peer_addr.nonce),
2191 		       ceph_pr_addr(&addr), le32_to_cpu(addr.nonce));
2192 		con->error_msg = "wrong peer at address";
2193 		return -EINVAL;
2194 	}
2195 
2196 	if (client->required_features & ~features) {
2197 		pr_err("RADOS feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
2198 		       features, client->required_features & ~features);
2199 		con->error_msg = "missing required protocol features";
2200 		return -EINVAL;
2201 	}
2202 
2203 	/*
2204 	 * Both name->type and name->num are set in ceph_con_open() but
2205 	 * name->num may be bogus in the initial monmap.  name->type is
2206 	 * verified in handle_hello().
2207 	 */
2208 	WARN_ON(!con->peer_name.type);
2209 	con->peer_name.num = cpu_to_le64(global_id);
2210 	con->v2.peer_global_seq = global_seq;
2211 	con->peer_features = features;
2212 	WARN_ON(required_features & ~client->supported_features);
2213 	con->v2.server_cookie = cookie;
2214 
2215 	if (flags & CEPH_MSG_CONNECT_LOSSY) {
2216 		ceph_con_flag_set(con, CEPH_CON_F_LOSSYTX);
2217 		WARN_ON(con->v2.server_cookie);
2218 	} else {
2219 		WARN_ON(!con->v2.server_cookie);
2220 	}
2221 
2222 	clear_in_sign_kvecs(con);
2223 	clear_out_sign_kvecs(con);
2224 	free_conn_bufs(con);
2225 	con->delay = 0;  /* reset backoff memory */
2226 
2227 	con->state = CEPH_CON_S_OPEN;
2228 	con->v2.out_state = OUT_S_GET_NEXT;
2229 	return 0;
2230 
2231 bad:
2232 	pr_err("failed to decode server_ident\n");
2233 	return -EINVAL;
2234 }
2235 
2236 static int process_ident_missing_features(struct ceph_connection *con,
2237 					  void *p, void *end)
2238 {
2239 	struct ceph_client *client = from_msgr(con->msgr);
2240 	u64 missing_features;
2241 
2242 	if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
2243 		con->error_msg = "protocol error, unexpected ident_missing_features";
2244 		return -EINVAL;
2245 	}
2246 
2247 	ceph_decode_64_safe(&p, end, missing_features, bad);
2248 	pr_err("RADOS feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
2249 	       client->supported_features, missing_features);
2250 	con->error_msg = "missing required protocol features";
2251 	return -EINVAL;
2252 
2253 bad:
2254 	pr_err("failed to decode ident_missing_features\n");
2255 	return -EINVAL;
2256 }
2257 
2258 static int process_session_reconnect_ok(struct ceph_connection *con,
2259 					void *p, void *end)
2260 {
2261 	u64 seq;
2262 
2263 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2264 		con->error_msg = "protocol error, unexpected session_reconnect_ok";
2265 		return -EINVAL;
2266 	}
2267 
2268 	ceph_decode_64_safe(&p, end, seq, bad);
2269 
2270 	dout("%s con %p seq %llu\n", __func__, con, seq);
2271 	ceph_con_discard_requeued(con, seq);
2272 
2273 	clear_in_sign_kvecs(con);
2274 	clear_out_sign_kvecs(con);
2275 	free_conn_bufs(con);
2276 	con->delay = 0;  /* reset backoff memory */
2277 
2278 	con->state = CEPH_CON_S_OPEN;
2279 	con->v2.out_state = OUT_S_GET_NEXT;
2280 	return 0;
2281 
2282 bad:
2283 	pr_err("failed to decode session_reconnect_ok\n");
2284 	return -EINVAL;
2285 }
2286 
2287 static int process_session_retry(struct ceph_connection *con,
2288 				 void *p, void *end)
2289 {
2290 	u64 connect_seq;
2291 	int ret;
2292 
2293 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2294 		con->error_msg = "protocol error, unexpected session_retry";
2295 		return -EINVAL;
2296 	}
2297 
2298 	ceph_decode_64_safe(&p, end, connect_seq, bad);
2299 
2300 	dout("%s con %p connect_seq %llu\n", __func__, con, connect_seq);
2301 	WARN_ON(connect_seq <= con->v2.connect_seq);
2302 	con->v2.connect_seq = connect_seq + 1;
2303 
2304 	free_conn_bufs(con);
2305 
2306 	reset_out_kvecs(con);
2307 	ret = prepare_session_reconnect(con);
2308 	if (ret) {
2309 		pr_err("prepare_session_reconnect (cseq) failed: %d\n", ret);
2310 		return ret;
2311 	}
2312 
2313 	return 0;
2314 
2315 bad:
2316 	pr_err("failed to decode session_retry\n");
2317 	return -EINVAL;
2318 }
2319 
2320 static int process_session_retry_global(struct ceph_connection *con,
2321 					void *p, void *end)
2322 {
2323 	u64 global_seq;
2324 	int ret;
2325 
2326 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2327 		con->error_msg = "protocol error, unexpected session_retry_global";
2328 		return -EINVAL;
2329 	}
2330 
2331 	ceph_decode_64_safe(&p, end, global_seq, bad);
2332 
2333 	dout("%s con %p global_seq %llu\n", __func__, con, global_seq);
2334 	WARN_ON(global_seq <= con->v2.global_seq);
2335 	con->v2.global_seq = ceph_get_global_seq(con->msgr, global_seq);
2336 
2337 	free_conn_bufs(con);
2338 
2339 	reset_out_kvecs(con);
2340 	ret = prepare_session_reconnect(con);
2341 	if (ret) {
2342 		pr_err("prepare_session_reconnect (gseq) failed: %d\n", ret);
2343 		return ret;
2344 	}
2345 
2346 	return 0;
2347 
2348 bad:
2349 	pr_err("failed to decode session_retry_global\n");
2350 	return -EINVAL;
2351 }
2352 
2353 static int process_session_reset(struct ceph_connection *con,
2354 				 void *p, void *end)
2355 {
2356 	bool full;
2357 	int ret;
2358 
2359 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2360 		con->error_msg = "protocol error, unexpected session_reset";
2361 		return -EINVAL;
2362 	}
2363 
2364 	ceph_decode_8_safe(&p, end, full, bad);
2365 	if (!full) {
2366 		con->error_msg = "protocol error, bad session_reset";
2367 		return -EINVAL;
2368 	}
2369 
2370 	pr_info("%s%lld %s session reset\n", ENTITY_NAME(con->peer_name),
2371 		ceph_pr_addr(&con->peer_addr));
2372 	ceph_con_reset_session(con);
2373 
2374 	mutex_unlock(&con->mutex);
2375 	if (con->ops->peer_reset)
2376 		con->ops->peer_reset(con);
2377 	mutex_lock(&con->mutex);
2378 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2379 		dout("%s con %p state changed to %d\n", __func__, con,
2380 		     con->state);
2381 		return -EAGAIN;
2382 	}
2383 
2384 	free_conn_bufs(con);
2385 
2386 	reset_out_kvecs(con);
2387 	ret = prepare_client_ident(con);
2388 	if (ret) {
2389 		pr_err("prepare_client_ident (rst) failed: %d\n", ret);
2390 		return ret;
2391 	}
2392 
2393 	con->state = CEPH_CON_S_V2_SESSION_CONNECT;
2394 	return 0;
2395 
2396 bad:
2397 	pr_err("failed to decode session_reset\n");
2398 	return -EINVAL;
2399 }
2400 
2401 static int process_keepalive2_ack(struct ceph_connection *con,
2402 				  void *p, void *end)
2403 {
2404 	if (con->state != CEPH_CON_S_OPEN) {
2405 		con->error_msg = "protocol error, unexpected keepalive2_ack";
2406 		return -EINVAL;
2407 	}
2408 
2409 	ceph_decode_need(&p, end, sizeof(struct ceph_timespec), bad);
2410 	ceph_decode_timespec64(&con->last_keepalive_ack, p);
2411 
2412 	dout("%s con %p timestamp %lld.%09ld\n", __func__, con,
2413 	     con->last_keepalive_ack.tv_sec, con->last_keepalive_ack.tv_nsec);
2414 
2415 	return 0;
2416 
2417 bad:
2418 	pr_err("failed to decode keepalive2_ack\n");
2419 	return -EINVAL;
2420 }
2421 
2422 static int process_ack(struct ceph_connection *con, void *p, void *end)
2423 {
2424 	u64 seq;
2425 
2426 	if (con->state != CEPH_CON_S_OPEN) {
2427 		con->error_msg = "protocol error, unexpected ack";
2428 		return -EINVAL;
2429 	}
2430 
2431 	ceph_decode_64_safe(&p, end, seq, bad);
2432 
2433 	dout("%s con %p seq %llu\n", __func__, con, seq);
2434 	ceph_con_discard_sent(con, seq);
2435 	return 0;
2436 
2437 bad:
2438 	pr_err("failed to decode ack\n");
2439 	return -EINVAL;
2440 }
2441 
2442 static int process_control(struct ceph_connection *con, void *p, void *end)
2443 {
2444 	int tag = con->v2.in_desc.fd_tag;
2445 	int ret;
2446 
2447 	dout("%s con %p tag %d len %d\n", __func__, con, tag, (int)(end - p));
2448 
2449 	switch (tag) {
2450 	case FRAME_TAG_HELLO:
2451 		ret = process_hello(con, p, end);
2452 		break;
2453 	case FRAME_TAG_AUTH_BAD_METHOD:
2454 		ret = process_auth_bad_method(con, p, end);
2455 		break;
2456 	case FRAME_TAG_AUTH_REPLY_MORE:
2457 		ret = process_auth_reply_more(con, p, end);
2458 		break;
2459 	case FRAME_TAG_AUTH_DONE:
2460 		ret = process_auth_done(con, p, end);
2461 		break;
2462 	case FRAME_TAG_AUTH_SIGNATURE:
2463 		ret = process_auth_signature(con, p, end);
2464 		break;
2465 	case FRAME_TAG_SERVER_IDENT:
2466 		ret = process_server_ident(con, p, end);
2467 		break;
2468 	case FRAME_TAG_IDENT_MISSING_FEATURES:
2469 		ret = process_ident_missing_features(con, p, end);
2470 		break;
2471 	case FRAME_TAG_SESSION_RECONNECT_OK:
2472 		ret = process_session_reconnect_ok(con, p, end);
2473 		break;
2474 	case FRAME_TAG_SESSION_RETRY:
2475 		ret = process_session_retry(con, p, end);
2476 		break;
2477 	case FRAME_TAG_SESSION_RETRY_GLOBAL:
2478 		ret = process_session_retry_global(con, p, end);
2479 		break;
2480 	case FRAME_TAG_SESSION_RESET:
2481 		ret = process_session_reset(con, p, end);
2482 		break;
2483 	case FRAME_TAG_KEEPALIVE2_ACK:
2484 		ret = process_keepalive2_ack(con, p, end);
2485 		break;
2486 	case FRAME_TAG_ACK:
2487 		ret = process_ack(con, p, end);
2488 		break;
2489 	default:
2490 		pr_err("bad tag %d\n", tag);
2491 		con->error_msg = "protocol error, bad tag";
2492 		return -EINVAL;
2493 	}
2494 	if (ret) {
2495 		dout("%s con %p error %d\n", __func__, con, ret);
2496 		return ret;
2497 	}
2498 
2499 	prepare_read_preamble(con);
2500 	return 0;
2501 }
2502 
2503 /*
2504  * Return:
2505  *   1 - con->in_msg set, read message
2506  *   0 - skip message
2507  *  <0 - error
2508  */
2509 static int process_message_header(struct ceph_connection *con,
2510 				  void *p, void *end)
2511 {
2512 	struct ceph_frame_desc *desc = &con->v2.in_desc;
2513 	struct ceph_msg_header2 *hdr2 = p;
2514 	struct ceph_msg_header hdr;
2515 	int skip;
2516 	int ret;
2517 	u64 seq;
2518 
2519 	/* verify seq# */
2520 	seq = le64_to_cpu(hdr2->seq);
2521 	if ((s64)seq - (s64)con->in_seq < 1) {
2522 		pr_info("%s%lld %s skipping old message: seq %llu, expected %llu\n",
2523 			ENTITY_NAME(con->peer_name),
2524 			ceph_pr_addr(&con->peer_addr),
2525 			seq, con->in_seq + 1);
2526 		return 0;
2527 	}
2528 	if ((s64)seq - (s64)con->in_seq > 1) {
2529 		pr_err("bad seq %llu, expected %llu\n", seq, con->in_seq + 1);
2530 		con->error_msg = "bad message sequence # for incoming message";
2531 		return -EBADE;
2532 	}
2533 
2534 	ceph_con_discard_sent(con, le64_to_cpu(hdr2->ack_seq));
2535 
2536 	fill_header(&hdr, hdr2, desc->fd_lens[1], desc->fd_lens[2],
2537 		    desc->fd_lens[3], &con->peer_name);
2538 	ret = ceph_con_in_msg_alloc(con, &hdr, &skip);
2539 	if (ret)
2540 		return ret;
2541 
2542 	WARN_ON(!con->in_msg ^ skip);
2543 	if (skip)
2544 		return 0;
2545 
2546 	WARN_ON(!con->in_msg);
2547 	WARN_ON(con->in_msg->con != con);
2548 	return 1;
2549 }
2550 
2551 static int process_message(struct ceph_connection *con)
2552 {
2553 	ceph_con_process_message(con);
2554 
2555 	/*
2556 	 * We could have been closed by ceph_con_close() because
2557 	 * ceph_con_process_message() temporarily drops con->mutex.
2558 	 */
2559 	if (con->state != CEPH_CON_S_OPEN) {
2560 		dout("%s con %p state changed to %d\n", __func__, con,
2561 		     con->state);
2562 		return -EAGAIN;
2563 	}
2564 
2565 	prepare_read_preamble(con);
2566 	return 0;
2567 }
2568 
2569 static int __handle_control(struct ceph_connection *con, void *p)
2570 {
2571 	void *end = p + con->v2.in_desc.fd_lens[0];
2572 	struct ceph_msg *msg;
2573 	int ret;
2574 
2575 	if (con->v2.in_desc.fd_tag != FRAME_TAG_MESSAGE)
2576 		return process_control(con, p, end);
2577 
2578 	ret = process_message_header(con, p, end);
2579 	if (ret < 0)
2580 		return ret;
2581 	if (ret == 0) {
2582 		prepare_skip_message(con);
2583 		return 0;
2584 	}
2585 
2586 	msg = con->in_msg;  /* set in process_message_header() */
2587 	if (!front_len(msg) && !middle_len(msg)) {
2588 		if (!data_len(msg))
2589 			return process_message(con);
2590 
2591 		prepare_read_data(con);
2592 		return 0;
2593 	}
2594 
2595 	reset_in_kvecs(con);
2596 	if (front_len(msg)) {
2597 		WARN_ON(front_len(msg) > msg->front_alloc_len);
2598 		add_in_kvec(con, msg->front.iov_base, front_len(msg));
2599 		msg->front.iov_len = front_len(msg);
2600 
2601 		if (con_secure(con) && need_padding(front_len(msg)))
2602 			add_in_kvec(con, FRONT_PAD(con->v2.in_buf),
2603 				    padding_len(front_len(msg)));
2604 	} else {
2605 		msg->front.iov_len = 0;
2606 	}
2607 	if (middle_len(msg)) {
2608 		WARN_ON(middle_len(msg) > msg->middle->alloc_len);
2609 		add_in_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
2610 		msg->middle->vec.iov_len = middle_len(msg);
2611 
2612 		if (con_secure(con) && need_padding(middle_len(msg)))
2613 			add_in_kvec(con, MIDDLE_PAD(con->v2.in_buf),
2614 				    padding_len(middle_len(msg)));
2615 	} else if (msg->middle) {
2616 		msg->middle->vec.iov_len = 0;
2617 	}
2618 
2619 	if (data_len(msg)) {
2620 		con->v2.in_state = IN_S_PREPARE_READ_DATA;
2621 	} else {
2622 		add_in_kvec(con, con->v2.in_buf,
2623 			    con_secure(con) ? CEPH_EPILOGUE_SECURE_LEN :
2624 					      CEPH_EPILOGUE_PLAIN_LEN);
2625 		con->v2.in_state = IN_S_HANDLE_EPILOGUE;
2626 	}
2627 	return 0;
2628 }
2629 
2630 static int handle_preamble(struct ceph_connection *con)
2631 {
2632 	struct ceph_frame_desc *desc = &con->v2.in_desc;
2633 	int ret;
2634 
2635 	if (con_secure(con)) {
2636 		ret = decrypt_preamble(con);
2637 		if (ret) {
2638 			if (ret == -EBADMSG)
2639 				con->error_msg = "integrity error, bad preamble auth tag";
2640 			return ret;
2641 		}
2642 	}
2643 
2644 	ret = decode_preamble(con->v2.in_buf, desc);
2645 	if (ret) {
2646 		if (ret == -EBADMSG)
2647 			con->error_msg = "integrity error, bad crc";
2648 		else
2649 			con->error_msg = "protocol error, bad preamble";
2650 		return ret;
2651 	}
2652 
2653 	dout("%s con %p tag %d seg_cnt %d %d+%d+%d+%d\n", __func__,
2654 	     con, desc->fd_tag, desc->fd_seg_cnt, desc->fd_lens[0],
2655 	     desc->fd_lens[1], desc->fd_lens[2], desc->fd_lens[3]);
2656 
2657 	if (!con_secure(con))
2658 		return prepare_read_control(con);
2659 
2660 	if (desc->fd_lens[0] > CEPH_PREAMBLE_INLINE_LEN)
2661 		return prepare_read_control_remainder(con);
2662 
2663 	return __handle_control(con, CTRL_BODY(con->v2.in_buf));
2664 }
2665 
2666 static int handle_control(struct ceph_connection *con)
2667 {
2668 	int ctrl_len = con->v2.in_desc.fd_lens[0];
2669 	void *buf;
2670 	int ret;
2671 
2672 	WARN_ON(con_secure(con));
2673 
2674 	ret = verify_control_crc(con);
2675 	if (ret) {
2676 		con->error_msg = "integrity error, bad crc";
2677 		return ret;
2678 	}
2679 
2680 	if (con->state == CEPH_CON_S_V2_AUTH) {
2681 		buf = alloc_conn_buf(con, ctrl_len);
2682 		if (!buf)
2683 			return -ENOMEM;
2684 
2685 		memcpy(buf, con->v2.in_kvecs[0].iov_base, ctrl_len);
2686 		return __handle_control(con, buf);
2687 	}
2688 
2689 	return __handle_control(con, con->v2.in_kvecs[0].iov_base);
2690 }
2691 
2692 static int handle_control_remainder(struct ceph_connection *con)
2693 {
2694 	int ret;
2695 
2696 	WARN_ON(!con_secure(con));
2697 
2698 	ret = decrypt_control_remainder(con);
2699 	if (ret) {
2700 		if (ret == -EBADMSG)
2701 			con->error_msg = "integrity error, bad control remainder auth tag";
2702 		return ret;
2703 	}
2704 
2705 	return __handle_control(con, con->v2.in_kvecs[0].iov_base -
2706 				     CEPH_PREAMBLE_INLINE_LEN);
2707 }
2708 
2709 static int handle_epilogue(struct ceph_connection *con)
2710 {
2711 	u32 front_crc, middle_crc, data_crc;
2712 	int ret;
2713 
2714 	if (con_secure(con)) {
2715 		ret = decrypt_message(con);
2716 		if (ret) {
2717 			if (ret == -EBADMSG)
2718 				con->error_msg = "integrity error, bad epilogue auth tag";
2719 			return ret;
2720 		}
2721 
2722 		/* just late_status */
2723 		ret = decode_epilogue(con->v2.in_buf, NULL, NULL, NULL);
2724 		if (ret) {
2725 			con->error_msg = "protocol error, bad epilogue";
2726 			return ret;
2727 		}
2728 	} else {
2729 		ret = decode_epilogue(con->v2.in_buf, &front_crc,
2730 				      &middle_crc, &data_crc);
2731 		if (ret) {
2732 			con->error_msg = "protocol error, bad epilogue";
2733 			return ret;
2734 		}
2735 
2736 		ret = verify_epilogue_crcs(con, front_crc, middle_crc,
2737 					   data_crc);
2738 		if (ret) {
2739 			con->error_msg = "integrity error, bad crc";
2740 			return ret;
2741 		}
2742 	}
2743 
2744 	return process_message(con);
2745 }
2746 
2747 static void finish_skip(struct ceph_connection *con)
2748 {
2749 	dout("%s con %p\n", __func__, con);
2750 
2751 	if (con_secure(con))
2752 		gcm_inc_nonce(&con->v2.in_gcm_nonce);
2753 
2754 	__finish_skip(con);
2755 }
2756 
2757 static int populate_in_iter(struct ceph_connection *con)
2758 {
2759 	int ret;
2760 
2761 	dout("%s con %p state %d in_state %d\n", __func__, con, con->state,
2762 	     con->v2.in_state);
2763 	WARN_ON(iov_iter_count(&con->v2.in_iter));
2764 
2765 	if (con->state == CEPH_CON_S_V2_BANNER_PREFIX) {
2766 		ret = process_banner_prefix(con);
2767 	} else if (con->state == CEPH_CON_S_V2_BANNER_PAYLOAD) {
2768 		ret = process_banner_payload(con);
2769 	} else if ((con->state >= CEPH_CON_S_V2_HELLO &&
2770 		    con->state <= CEPH_CON_S_V2_SESSION_RECONNECT) ||
2771 		   con->state == CEPH_CON_S_OPEN) {
2772 		switch (con->v2.in_state) {
2773 		case IN_S_HANDLE_PREAMBLE:
2774 			ret = handle_preamble(con);
2775 			break;
2776 		case IN_S_HANDLE_CONTROL:
2777 			ret = handle_control(con);
2778 			break;
2779 		case IN_S_HANDLE_CONTROL_REMAINDER:
2780 			ret = handle_control_remainder(con);
2781 			break;
2782 		case IN_S_PREPARE_READ_DATA:
2783 			prepare_read_data(con);
2784 			ret = 0;
2785 			break;
2786 		case IN_S_PREPARE_READ_DATA_CONT:
2787 			prepare_read_data_cont(con);
2788 			ret = 0;
2789 			break;
2790 		case IN_S_HANDLE_EPILOGUE:
2791 			ret = handle_epilogue(con);
2792 			break;
2793 		case IN_S_FINISH_SKIP:
2794 			finish_skip(con);
2795 			ret = 0;
2796 			break;
2797 		default:
2798 			WARN(1, "bad in_state %d", con->v2.in_state);
2799 			return -EINVAL;
2800 		}
2801 	} else {
2802 		WARN(1, "bad state %d", con->state);
2803 		return -EINVAL;
2804 	}
2805 	if (ret) {
2806 		dout("%s con %p error %d\n", __func__, con, ret);
2807 		return ret;
2808 	}
2809 
2810 	if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
2811 		return -ENODATA;
2812 	dout("%s con %p populated %zu\n", __func__, con,
2813 	     iov_iter_count(&con->v2.in_iter));
2814 	return 1;
2815 }
2816 
2817 int ceph_con_v2_try_read(struct ceph_connection *con)
2818 {
2819 	int ret;
2820 
2821 	dout("%s con %p state %d need %zu\n", __func__, con, con->state,
2822 	     iov_iter_count(&con->v2.in_iter));
2823 
2824 	if (con->state == CEPH_CON_S_PREOPEN)
2825 		return 0;
2826 
2827 	/*
2828 	 * We should always have something pending here.  If not,
2829 	 * avoid calling populate_in_iter() as if we read something
2830 	 * (ceph_tcp_recv() would immediately return 1).
2831 	 */
2832 	if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
2833 		return -ENODATA;
2834 
2835 	for (;;) {
2836 		ret = ceph_tcp_recv(con);
2837 		if (ret <= 0)
2838 			return ret;
2839 
2840 		ret = populate_in_iter(con);
2841 		if (ret <= 0) {
2842 			if (ret && ret != -EAGAIN && !con->error_msg)
2843 				con->error_msg = "read processing error";
2844 			return ret;
2845 		}
2846 	}
2847 }
2848 
2849 static void queue_data(struct ceph_connection *con)
2850 {
2851 	struct bio_vec bv;
2852 
2853 	con->v2.out_epil.data_crc = -1;
2854 	ceph_msg_data_cursor_init(&con->v2.out_cursor, con->out_msg,
2855 				  data_len(con->out_msg));
2856 
2857 	get_bvec_at(&con->v2.out_cursor, &bv);
2858 	set_out_bvec(con, &bv, true);
2859 	con->v2.out_state = OUT_S_QUEUE_DATA_CONT;
2860 }
2861 
2862 static void queue_data_cont(struct ceph_connection *con)
2863 {
2864 	struct bio_vec bv;
2865 
2866 	con->v2.out_epil.data_crc = ceph_crc32c_page(
2867 		con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
2868 		con->v2.out_bvec.bv_offset, con->v2.out_bvec.bv_len);
2869 
2870 	ceph_msg_data_advance(&con->v2.out_cursor, con->v2.out_bvec.bv_len);
2871 	if (con->v2.out_cursor.total_resid) {
2872 		get_bvec_at(&con->v2.out_cursor, &bv);
2873 		set_out_bvec(con, &bv, true);
2874 		WARN_ON(con->v2.out_state != OUT_S_QUEUE_DATA_CONT);
2875 		return;
2876 	}
2877 
2878 	/*
2879 	 * We've written all data.  Queue epilogue.  Once it's written,
2880 	 * we are done.
2881 	 */
2882 	reset_out_kvecs(con);
2883 	prepare_epilogue_plain(con, false);
2884 	con->v2.out_state = OUT_S_FINISH_MESSAGE;
2885 }
2886 
2887 static void queue_enc_page(struct ceph_connection *con)
2888 {
2889 	struct bio_vec bv;
2890 
2891 	dout("%s con %p i %d resid %d\n", __func__, con, con->v2.out_enc_i,
2892 	     con->v2.out_enc_resid);
2893 	WARN_ON(!con->v2.out_enc_resid);
2894 
2895 	bv.bv_page = con->v2.out_enc_pages[con->v2.out_enc_i];
2896 	bv.bv_offset = 0;
2897 	bv.bv_len = min(con->v2.out_enc_resid, (int)PAGE_SIZE);
2898 
2899 	set_out_bvec(con, &bv, false);
2900 	con->v2.out_enc_i++;
2901 	con->v2.out_enc_resid -= bv.bv_len;
2902 
2903 	if (con->v2.out_enc_resid) {
2904 		WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE);
2905 		return;
2906 	}
2907 
2908 	/*
2909 	 * We've queued the last piece of ciphertext (ending with
2910 	 * epilogue) + auth tag.  Once it's written, we are done.
2911 	 */
2912 	WARN_ON(con->v2.out_enc_i != con->v2.out_enc_page_cnt);
2913 	con->v2.out_state = OUT_S_FINISH_MESSAGE;
2914 }
2915 
2916 static void queue_zeros(struct ceph_connection *con)
2917 {
2918 	dout("%s con %p out_zero %d\n", __func__, con, con->v2.out_zero);
2919 
2920 	if (con->v2.out_zero) {
2921 		set_out_bvec_zero(con);
2922 		con->v2.out_zero -= con->v2.out_bvec.bv_len;
2923 		con->v2.out_state = OUT_S_QUEUE_ZEROS;
2924 		return;
2925 	}
2926 
2927 	/*
2928 	 * We've zero-filled everything up to epilogue.  Queue epilogue
2929 	 * with late_status set to ABORTED and crcs adjusted for zeros.
2930 	 * Once it's written, we are done patching up for the revoke.
2931 	 */
2932 	reset_out_kvecs(con);
2933 	prepare_epilogue_plain(con, true);
2934 	con->v2.out_state = OUT_S_FINISH_MESSAGE;
2935 }
2936 
2937 static void finish_message(struct ceph_connection *con)
2938 {
2939 	dout("%s con %p msg %p\n", __func__, con, con->out_msg);
2940 
2941 	/* we end up here both plain and secure modes */
2942 	if (con->v2.out_enc_pages) {
2943 		WARN_ON(!con->v2.out_enc_page_cnt);
2944 		ceph_release_page_vector(con->v2.out_enc_pages,
2945 					 con->v2.out_enc_page_cnt);
2946 		con->v2.out_enc_pages = NULL;
2947 		con->v2.out_enc_page_cnt = 0;
2948 	}
2949 	/* message may have been revoked */
2950 	if (con->out_msg) {
2951 		ceph_msg_put(con->out_msg);
2952 		con->out_msg = NULL;
2953 	}
2954 
2955 	con->v2.out_state = OUT_S_GET_NEXT;
2956 }
2957 
2958 static int populate_out_iter(struct ceph_connection *con)
2959 {
2960 	int ret;
2961 
2962 	dout("%s con %p state %d out_state %d\n", __func__, con, con->state,
2963 	     con->v2.out_state);
2964 	WARN_ON(iov_iter_count(&con->v2.out_iter));
2965 
2966 	if (con->state != CEPH_CON_S_OPEN) {
2967 		WARN_ON(con->state < CEPH_CON_S_V2_BANNER_PREFIX ||
2968 			con->state > CEPH_CON_S_V2_SESSION_RECONNECT);
2969 		goto nothing_pending;
2970 	}
2971 
2972 	switch (con->v2.out_state) {
2973 	case OUT_S_QUEUE_DATA:
2974 		WARN_ON(!con->out_msg);
2975 		queue_data(con);
2976 		goto populated;
2977 	case OUT_S_QUEUE_DATA_CONT:
2978 		WARN_ON(!con->out_msg);
2979 		queue_data_cont(con);
2980 		goto populated;
2981 	case OUT_S_QUEUE_ENC_PAGE:
2982 		queue_enc_page(con);
2983 		goto populated;
2984 	case OUT_S_QUEUE_ZEROS:
2985 		WARN_ON(con->out_msg);  /* revoked */
2986 		queue_zeros(con);
2987 		goto populated;
2988 	case OUT_S_FINISH_MESSAGE:
2989 		finish_message(con);
2990 		break;
2991 	case OUT_S_GET_NEXT:
2992 		break;
2993 	default:
2994 		WARN(1, "bad out_state %d", con->v2.out_state);
2995 		return -EINVAL;
2996 	}
2997 
2998 	WARN_ON(con->v2.out_state != OUT_S_GET_NEXT);
2999 	if (ceph_con_flag_test_and_clear(con, CEPH_CON_F_KEEPALIVE_PENDING)) {
3000 		ret = prepare_keepalive2(con);
3001 		if (ret) {
3002 			pr_err("prepare_keepalive2 failed: %d\n", ret);
3003 			return ret;
3004 		}
3005 	} else if (!list_empty(&con->out_queue)) {
3006 		ceph_con_get_out_msg(con);
3007 		ret = prepare_message(con);
3008 		if (ret) {
3009 			pr_err("prepare_message failed: %d\n", ret);
3010 			return ret;
3011 		}
3012 	} else if (con->in_seq > con->in_seq_acked) {
3013 		ret = prepare_ack(con);
3014 		if (ret) {
3015 			pr_err("prepare_ack failed: %d\n", ret);
3016 			return ret;
3017 		}
3018 	} else {
3019 		goto nothing_pending;
3020 	}
3021 
3022 populated:
3023 	if (WARN_ON(!iov_iter_count(&con->v2.out_iter)))
3024 		return -ENODATA;
3025 	dout("%s con %p populated %zu\n", __func__, con,
3026 	     iov_iter_count(&con->v2.out_iter));
3027 	return 1;
3028 
3029 nothing_pending:
3030 	WARN_ON(iov_iter_count(&con->v2.out_iter));
3031 	dout("%s con %p nothing pending\n", __func__, con);
3032 	ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
3033 	return 0;
3034 }
3035 
3036 int ceph_con_v2_try_write(struct ceph_connection *con)
3037 {
3038 	int ret;
3039 
3040 	dout("%s con %p state %d have %zu\n", __func__, con, con->state,
3041 	     iov_iter_count(&con->v2.out_iter));
3042 
3043 	/* open the socket first? */
3044 	if (con->state == CEPH_CON_S_PREOPEN) {
3045 		WARN_ON(con->peer_addr.type != CEPH_ENTITY_ADDR_TYPE_MSGR2);
3046 
3047 		/*
3048 		 * Always bump global_seq.  Bump connect_seq only if
3049 		 * there is a session (i.e. we are reconnecting and will
3050 		 * send session_reconnect instead of client_ident).
3051 		 */
3052 		con->v2.global_seq = ceph_get_global_seq(con->msgr, 0);
3053 		if (con->v2.server_cookie)
3054 			con->v2.connect_seq++;
3055 
3056 		ret = prepare_read_banner_prefix(con);
3057 		if (ret) {
3058 			pr_err("prepare_read_banner_prefix failed: %d\n", ret);
3059 			con->error_msg = "connect error";
3060 			return ret;
3061 		}
3062 
3063 		reset_out_kvecs(con);
3064 		ret = prepare_banner(con);
3065 		if (ret) {
3066 			pr_err("prepare_banner failed: %d\n", ret);
3067 			con->error_msg = "connect error";
3068 			return ret;
3069 		}
3070 
3071 		ret = ceph_tcp_connect(con);
3072 		if (ret) {
3073 			pr_err("ceph_tcp_connect failed: %d\n", ret);
3074 			con->error_msg = "connect error";
3075 			return ret;
3076 		}
3077 	}
3078 
3079 	if (!iov_iter_count(&con->v2.out_iter)) {
3080 		ret = populate_out_iter(con);
3081 		if (ret <= 0) {
3082 			if (ret && ret != -EAGAIN && !con->error_msg)
3083 				con->error_msg = "write processing error";
3084 			return ret;
3085 		}
3086 	}
3087 
3088 	tcp_sock_set_cork(con->sock->sk, true);
3089 	for (;;) {
3090 		ret = ceph_tcp_send(con);
3091 		if (ret <= 0)
3092 			break;
3093 
3094 		ret = populate_out_iter(con);
3095 		if (ret <= 0) {
3096 			if (ret && ret != -EAGAIN && !con->error_msg)
3097 				con->error_msg = "write processing error";
3098 			break;
3099 		}
3100 	}
3101 
3102 	tcp_sock_set_cork(con->sock->sk, false);
3103 	return ret;
3104 }
3105 
3106 static u32 crc32c_zeros(u32 crc, int zero_len)
3107 {
3108 	int len;
3109 
3110 	while (zero_len) {
3111 		len = min(zero_len, (int)PAGE_SIZE);
3112 		crc = crc32c(crc, page_address(ceph_zero_page), len);
3113 		zero_len -= len;
3114 	}
3115 
3116 	return crc;
3117 }
3118 
3119 static void prepare_zero_front(struct ceph_connection *con, int resid)
3120 {
3121 	int sent;
3122 
3123 	WARN_ON(!resid || resid > front_len(con->out_msg));
3124 	sent = front_len(con->out_msg) - resid;
3125 	dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3126 
3127 	if (sent) {
3128 		con->v2.out_epil.front_crc =
3129 			crc32c(-1, con->out_msg->front.iov_base, sent);
3130 		con->v2.out_epil.front_crc =
3131 			crc32c_zeros(con->v2.out_epil.front_crc, resid);
3132 	} else {
3133 		con->v2.out_epil.front_crc = crc32c_zeros(-1, resid);
3134 	}
3135 
3136 	con->v2.out_iter.count -= resid;
3137 	out_zero_add(con, resid);
3138 }
3139 
3140 static void prepare_zero_middle(struct ceph_connection *con, int resid)
3141 {
3142 	int sent;
3143 
3144 	WARN_ON(!resid || resid > middle_len(con->out_msg));
3145 	sent = middle_len(con->out_msg) - resid;
3146 	dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3147 
3148 	if (sent) {
3149 		con->v2.out_epil.middle_crc =
3150 			crc32c(-1, con->out_msg->middle->vec.iov_base, sent);
3151 		con->v2.out_epil.middle_crc =
3152 			crc32c_zeros(con->v2.out_epil.middle_crc, resid);
3153 	} else {
3154 		con->v2.out_epil.middle_crc = crc32c_zeros(-1, resid);
3155 	}
3156 
3157 	con->v2.out_iter.count -= resid;
3158 	out_zero_add(con, resid);
3159 }
3160 
3161 static void prepare_zero_data(struct ceph_connection *con)
3162 {
3163 	dout("%s con %p\n", __func__, con);
3164 	con->v2.out_epil.data_crc = crc32c_zeros(-1, data_len(con->out_msg));
3165 	out_zero_add(con, data_len(con->out_msg));
3166 }
3167 
3168 static void revoke_at_queue_data(struct ceph_connection *con)
3169 {
3170 	int boundary;
3171 	int resid;
3172 
3173 	WARN_ON(!data_len(con->out_msg));
3174 	WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
3175 	resid = iov_iter_count(&con->v2.out_iter);
3176 
3177 	boundary = front_len(con->out_msg) + middle_len(con->out_msg);
3178 	if (resid > boundary) {
3179 		resid -= boundary;
3180 		WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
3181 		dout("%s con %p was sending head\n", __func__, con);
3182 		if (front_len(con->out_msg))
3183 			prepare_zero_front(con, front_len(con->out_msg));
3184 		if (middle_len(con->out_msg))
3185 			prepare_zero_middle(con, middle_len(con->out_msg));
3186 		prepare_zero_data(con);
3187 		WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
3188 		con->v2.out_state = OUT_S_QUEUE_ZEROS;
3189 		return;
3190 	}
3191 
3192 	boundary = middle_len(con->out_msg);
3193 	if (resid > boundary) {
3194 		resid -= boundary;
3195 		dout("%s con %p was sending front\n", __func__, con);
3196 		prepare_zero_front(con, resid);
3197 		if (middle_len(con->out_msg))
3198 			prepare_zero_middle(con, middle_len(con->out_msg));
3199 		prepare_zero_data(con);
3200 		queue_zeros(con);
3201 		return;
3202 	}
3203 
3204 	WARN_ON(!resid);
3205 	dout("%s con %p was sending middle\n", __func__, con);
3206 	prepare_zero_middle(con, resid);
3207 	prepare_zero_data(con);
3208 	queue_zeros(con);
3209 }
3210 
3211 static void revoke_at_queue_data_cont(struct ceph_connection *con)
3212 {
3213 	int sent, resid;  /* current piece of data */
3214 
3215 	WARN_ON(!data_len(con->out_msg));
3216 	WARN_ON(!iov_iter_is_bvec(&con->v2.out_iter));
3217 	resid = iov_iter_count(&con->v2.out_iter);
3218 	WARN_ON(!resid || resid > con->v2.out_bvec.bv_len);
3219 	sent = con->v2.out_bvec.bv_len - resid;
3220 	dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3221 
3222 	if (sent) {
3223 		con->v2.out_epil.data_crc = ceph_crc32c_page(
3224 			con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
3225 			con->v2.out_bvec.bv_offset, sent);
3226 		ceph_msg_data_advance(&con->v2.out_cursor, sent);
3227 	}
3228 	WARN_ON(resid > con->v2.out_cursor.total_resid);
3229 	con->v2.out_epil.data_crc = crc32c_zeros(con->v2.out_epil.data_crc,
3230 						con->v2.out_cursor.total_resid);
3231 
3232 	con->v2.out_iter.count -= resid;
3233 	out_zero_add(con, con->v2.out_cursor.total_resid);
3234 	queue_zeros(con);
3235 }
3236 
3237 static void revoke_at_finish_message(struct ceph_connection *con)
3238 {
3239 	int boundary;
3240 	int resid;
3241 
3242 	WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
3243 	resid = iov_iter_count(&con->v2.out_iter);
3244 
3245 	if (!front_len(con->out_msg) && !middle_len(con->out_msg) &&
3246 	    !data_len(con->out_msg)) {
3247 		WARN_ON(!resid || resid > MESSAGE_HEAD_PLAIN_LEN);
3248 		dout("%s con %p was sending head (empty message) - noop\n",
3249 		     __func__, con);
3250 		return;
3251 	}
3252 
3253 	boundary = front_len(con->out_msg) + middle_len(con->out_msg) +
3254 		   CEPH_EPILOGUE_PLAIN_LEN;
3255 	if (resid > boundary) {
3256 		resid -= boundary;
3257 		WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
3258 		dout("%s con %p was sending head\n", __func__, con);
3259 		if (front_len(con->out_msg))
3260 			prepare_zero_front(con, front_len(con->out_msg));
3261 		if (middle_len(con->out_msg))
3262 			prepare_zero_middle(con, middle_len(con->out_msg));
3263 		con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3264 		WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
3265 		con->v2.out_state = OUT_S_QUEUE_ZEROS;
3266 		return;
3267 	}
3268 
3269 	boundary = middle_len(con->out_msg) + CEPH_EPILOGUE_PLAIN_LEN;
3270 	if (resid > boundary) {
3271 		resid -= boundary;
3272 		dout("%s con %p was sending front\n", __func__, con);
3273 		prepare_zero_front(con, resid);
3274 		if (middle_len(con->out_msg))
3275 			prepare_zero_middle(con, middle_len(con->out_msg));
3276 		con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3277 		queue_zeros(con);
3278 		return;
3279 	}
3280 
3281 	boundary = CEPH_EPILOGUE_PLAIN_LEN;
3282 	if (resid > boundary) {
3283 		resid -= boundary;
3284 		dout("%s con %p was sending middle\n", __func__, con);
3285 		prepare_zero_middle(con, resid);
3286 		con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3287 		queue_zeros(con);
3288 		return;
3289 	}
3290 
3291 	WARN_ON(!resid);
3292 	dout("%s con %p was sending epilogue - noop\n", __func__, con);
3293 }
3294 
3295 void ceph_con_v2_revoke(struct ceph_connection *con)
3296 {
3297 	WARN_ON(con->v2.out_zero);
3298 
3299 	if (con_secure(con)) {
3300 		WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE &&
3301 			con->v2.out_state != OUT_S_FINISH_MESSAGE);
3302 		dout("%s con %p secure - noop\n", __func__, con);
3303 		return;
3304 	}
3305 
3306 	switch (con->v2.out_state) {
3307 	case OUT_S_QUEUE_DATA:
3308 		revoke_at_queue_data(con);
3309 		break;
3310 	case OUT_S_QUEUE_DATA_CONT:
3311 		revoke_at_queue_data_cont(con);
3312 		break;
3313 	case OUT_S_FINISH_MESSAGE:
3314 		revoke_at_finish_message(con);
3315 		break;
3316 	default:
3317 		WARN(1, "bad out_state %d", con->v2.out_state);
3318 		break;
3319 	}
3320 }
3321 
3322 static void revoke_at_prepare_read_data(struct ceph_connection *con)
3323 {
3324 	int remaining;  /* data + [data padding] + epilogue */
3325 	int resid;
3326 
3327 	WARN_ON(!data_len(con->in_msg));
3328 	WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
3329 	resid = iov_iter_count(&con->v2.in_iter);
3330 	WARN_ON(!resid);
3331 
3332 	if (con_secure(con))
3333 		remaining = padded_len(data_len(con->in_msg)) +
3334 			    CEPH_EPILOGUE_SECURE_LEN;
3335 	else
3336 		remaining = data_len(con->in_msg) + CEPH_EPILOGUE_PLAIN_LEN;
3337 
3338 	dout("%s con %p resid %d remaining %d\n", __func__, con, resid,
3339 	     remaining);
3340 	con->v2.in_iter.count -= resid;
3341 	set_in_skip(con, resid + remaining);
3342 	con->v2.in_state = IN_S_FINISH_SKIP;
3343 }
3344 
3345 static void revoke_at_prepare_read_data_cont(struct ceph_connection *con)
3346 {
3347 	int recved, resid;  /* current piece of data */
3348 	int remaining;  /* [data padding] + epilogue */
3349 
3350 	WARN_ON(!data_len(con->in_msg));
3351 	WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3352 	resid = iov_iter_count(&con->v2.in_iter);
3353 	WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
3354 	recved = con->v2.in_bvec.bv_len - resid;
3355 	dout("%s con %p recved %d resid %d\n", __func__, con, recved, resid);
3356 
3357 	if (recved)
3358 		ceph_msg_data_advance(&con->v2.in_cursor, recved);
3359 	WARN_ON(resid > con->v2.in_cursor.total_resid);
3360 
3361 	if (con_secure(con))
3362 		remaining = padding_len(data_len(con->in_msg)) +
3363 			    CEPH_EPILOGUE_SECURE_LEN;
3364 	else
3365 		remaining = CEPH_EPILOGUE_PLAIN_LEN;
3366 
3367 	dout("%s con %p total_resid %zu remaining %d\n", __func__, con,
3368 	     con->v2.in_cursor.total_resid, remaining);
3369 	con->v2.in_iter.count -= resid;
3370 	set_in_skip(con, con->v2.in_cursor.total_resid + remaining);
3371 	con->v2.in_state = IN_S_FINISH_SKIP;
3372 }
3373 
3374 static void revoke_at_handle_epilogue(struct ceph_connection *con)
3375 {
3376 	int resid;
3377 
3378 	WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
3379 	resid = iov_iter_count(&con->v2.in_iter);
3380 	WARN_ON(!resid);
3381 
3382 	dout("%s con %p resid %d\n", __func__, con, resid);
3383 	con->v2.in_iter.count -= resid;
3384 	set_in_skip(con, resid);
3385 	con->v2.in_state = IN_S_FINISH_SKIP;
3386 }
3387 
3388 void ceph_con_v2_revoke_incoming(struct ceph_connection *con)
3389 {
3390 	switch (con->v2.in_state) {
3391 	case IN_S_PREPARE_READ_DATA:
3392 		revoke_at_prepare_read_data(con);
3393 		break;
3394 	case IN_S_PREPARE_READ_DATA_CONT:
3395 		revoke_at_prepare_read_data_cont(con);
3396 		break;
3397 	case IN_S_HANDLE_EPILOGUE:
3398 		revoke_at_handle_epilogue(con);
3399 		break;
3400 	default:
3401 		WARN(1, "bad in_state %d", con->v2.in_state);
3402 		break;
3403 	}
3404 }
3405 
3406 bool ceph_con_v2_opened(struct ceph_connection *con)
3407 {
3408 	return con->v2.peer_global_seq;
3409 }
3410 
3411 void ceph_con_v2_reset_session(struct ceph_connection *con)
3412 {
3413 	con->v2.client_cookie = 0;
3414 	con->v2.server_cookie = 0;
3415 	con->v2.global_seq = 0;
3416 	con->v2.connect_seq = 0;
3417 	con->v2.peer_global_seq = 0;
3418 }
3419 
3420 void ceph_con_v2_reset_protocol(struct ceph_connection *con)
3421 {
3422 	iov_iter_truncate(&con->v2.in_iter, 0);
3423 	iov_iter_truncate(&con->v2.out_iter, 0);
3424 	con->v2.out_zero = 0;
3425 
3426 	clear_in_sign_kvecs(con);
3427 	clear_out_sign_kvecs(con);
3428 	free_conn_bufs(con);
3429 
3430 	if (con->v2.out_enc_pages) {
3431 		WARN_ON(!con->v2.out_enc_page_cnt);
3432 		ceph_release_page_vector(con->v2.out_enc_pages,
3433 					 con->v2.out_enc_page_cnt);
3434 		con->v2.out_enc_pages = NULL;
3435 		con->v2.out_enc_page_cnt = 0;
3436 	}
3437 
3438 	con->v2.con_mode = CEPH_CON_MODE_UNKNOWN;
3439 
3440 	if (con->v2.hmac_tfm) {
3441 		crypto_free_shash(con->v2.hmac_tfm);
3442 		con->v2.hmac_tfm = NULL;
3443 	}
3444 	if (con->v2.gcm_req) {
3445 		aead_request_free(con->v2.gcm_req);
3446 		con->v2.gcm_req = NULL;
3447 	}
3448 	if (con->v2.gcm_tfm) {
3449 		crypto_free_aead(con->v2.gcm_tfm);
3450 		con->v2.gcm_tfm = NULL;
3451 	}
3452 }
3453