xref: /openbmc/linux/net/ceph/messenger_v2.c (revision ec3bc567)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Ceph msgr2 protocol implementation
4  *
5  * Copyright (C) 2020 Ilya Dryomov <idryomov@gmail.com>
6  */
7 
8 #include <linux/ceph/ceph_debug.h>
9 
10 #include <crypto/aead.h>
11 #include <crypto/algapi.h>  /* for crypto_memneq() */
12 #include <crypto/hash.h>
13 #include <crypto/sha2.h>
14 #include <linux/bvec.h>
15 #include <linux/crc32c.h>
16 #include <linux/net.h>
17 #include <linux/scatterlist.h>
18 #include <linux/socket.h>
19 #include <linux/sched/mm.h>
20 #include <net/sock.h>
21 #include <net/tcp.h>
22 
23 #include <linux/ceph/ceph_features.h>
24 #include <linux/ceph/decode.h>
25 #include <linux/ceph/libceph.h>
26 #include <linux/ceph/messenger.h>
27 
28 #include "crypto.h"  /* for CEPH_KEY_LEN and CEPH_MAX_CON_SECRET_LEN */
29 
30 #define FRAME_TAG_HELLO			1
31 #define FRAME_TAG_AUTH_REQUEST		2
32 #define FRAME_TAG_AUTH_BAD_METHOD	3
33 #define FRAME_TAG_AUTH_REPLY_MORE	4
34 #define FRAME_TAG_AUTH_REQUEST_MORE	5
35 #define FRAME_TAG_AUTH_DONE		6
36 #define FRAME_TAG_AUTH_SIGNATURE	7
37 #define FRAME_TAG_CLIENT_IDENT		8
38 #define FRAME_TAG_SERVER_IDENT		9
39 #define FRAME_TAG_IDENT_MISSING_FEATURES 10
40 #define FRAME_TAG_SESSION_RECONNECT	11
41 #define FRAME_TAG_SESSION_RESET		12
42 #define FRAME_TAG_SESSION_RETRY		13
43 #define FRAME_TAG_SESSION_RETRY_GLOBAL	14
44 #define FRAME_TAG_SESSION_RECONNECT_OK	15
45 #define FRAME_TAG_WAIT			16
46 #define FRAME_TAG_MESSAGE		17
47 #define FRAME_TAG_KEEPALIVE2		18
48 #define FRAME_TAG_KEEPALIVE2_ACK	19
49 #define FRAME_TAG_ACK			20
50 
51 #define FRAME_LATE_STATUS_ABORTED	0x1
52 #define FRAME_LATE_STATUS_COMPLETE	0xe
53 #define FRAME_LATE_STATUS_ABORTED_MASK	0xf
54 
55 #define IN_S_HANDLE_PREAMBLE			1
56 #define IN_S_HANDLE_CONTROL			2
57 #define IN_S_HANDLE_CONTROL_REMAINDER		3
58 #define IN_S_PREPARE_READ_DATA			4
59 #define IN_S_PREPARE_READ_DATA_CONT		5
60 #define IN_S_PREPARE_READ_ENC_PAGE		6
61 #define IN_S_PREPARE_SPARSE_DATA		7
62 #define IN_S_PREPARE_SPARSE_DATA_CONT		8
63 #define IN_S_HANDLE_EPILOGUE			9
64 #define IN_S_FINISH_SKIP			10
65 
66 #define OUT_S_QUEUE_DATA		1
67 #define OUT_S_QUEUE_DATA_CONT		2
68 #define OUT_S_QUEUE_ENC_PAGE		3
69 #define OUT_S_QUEUE_ZEROS		4
70 #define OUT_S_FINISH_MESSAGE		5
71 #define OUT_S_GET_NEXT			6
72 
73 #define CTRL_BODY(p)	((void *)(p) + CEPH_PREAMBLE_LEN)
74 #define FRONT_PAD(p)	((void *)(p) + CEPH_EPILOGUE_SECURE_LEN)
75 #define MIDDLE_PAD(p)	(FRONT_PAD(p) + CEPH_GCM_BLOCK_LEN)
76 #define DATA_PAD(p)	(MIDDLE_PAD(p) + CEPH_GCM_BLOCK_LEN)
77 
78 #define CEPH_MSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
79 
80 static int do_recvmsg(struct socket *sock, struct iov_iter *it)
81 {
82 	struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
83 	int ret;
84 
85 	msg.msg_iter = *it;
86 	while (iov_iter_count(it)) {
87 		ret = sock_recvmsg(sock, &msg, msg.msg_flags);
88 		if (ret <= 0) {
89 			if (ret == -EAGAIN)
90 				ret = 0;
91 			return ret;
92 		}
93 
94 		iov_iter_advance(it, ret);
95 	}
96 
97 	WARN_ON(msg_data_left(&msg));
98 	return 1;
99 }
100 
101 /*
102  * Read as much as possible.
103  *
104  * Return:
105  *   1 - done, nothing (else) to read
106  *   0 - socket is empty, need to wait
107  *  <0 - error
108  */
109 static int ceph_tcp_recv(struct ceph_connection *con)
110 {
111 	int ret;
112 
113 	dout("%s con %p %s %zu\n", __func__, con,
114 	     iov_iter_is_discard(&con->v2.in_iter) ? "discard" : "need",
115 	     iov_iter_count(&con->v2.in_iter));
116 	ret = do_recvmsg(con->sock, &con->v2.in_iter);
117 	dout("%s con %p ret %d left %zu\n", __func__, con, ret,
118 	     iov_iter_count(&con->v2.in_iter));
119 	return ret;
120 }
121 
122 static int do_sendmsg(struct socket *sock, struct iov_iter *it)
123 {
124 	struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
125 	int ret;
126 
127 	msg.msg_iter = *it;
128 	while (iov_iter_count(it)) {
129 		ret = sock_sendmsg(sock, &msg);
130 		if (ret <= 0) {
131 			if (ret == -EAGAIN)
132 				ret = 0;
133 			return ret;
134 		}
135 
136 		iov_iter_advance(it, ret);
137 	}
138 
139 	WARN_ON(msg_data_left(&msg));
140 	return 1;
141 }
142 
143 static int do_try_sendpage(struct socket *sock, struct iov_iter *it)
144 {
145 	struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
146 	struct bio_vec bv;
147 	int ret;
148 
149 	if (WARN_ON(!iov_iter_is_bvec(it)))
150 		return -EINVAL;
151 
152 	while (iov_iter_count(it)) {
153 		/* iov_iter_iovec() for ITER_BVEC */
154 		bvec_set_page(&bv, it->bvec->bv_page,
155 			      min(iov_iter_count(it),
156 				  it->bvec->bv_len - it->iov_offset),
157 			      it->bvec->bv_offset + it->iov_offset);
158 
159 		/*
160 		 * MSG_SPLICE_PAGES cannot properly handle pages with
161 		 * page_count == 0, we need to fall back to sendmsg if
162 		 * that's the case.
163 		 *
164 		 * Same goes for slab pages: skb_can_coalesce() allows
165 		 * coalescing neighboring slab objects into a single frag
166 		 * which triggers one of hardened usercopy checks.
167 		 */
168 		if (sendpage_ok(bv.bv_page))
169 			msg.msg_flags |= MSG_SPLICE_PAGES;
170 		else
171 			msg.msg_flags &= ~MSG_SPLICE_PAGES;
172 
173 		iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, bv.bv_len);
174 		ret = sock_sendmsg(sock, &msg);
175 		if (ret <= 0) {
176 			if (ret == -EAGAIN)
177 				ret = 0;
178 			return ret;
179 		}
180 
181 		iov_iter_advance(it, ret);
182 	}
183 
184 	return 1;
185 }
186 
187 /*
188  * Write as much as possible.  The socket is expected to be corked,
189  * so we don't bother with MSG_MORE here.
190  *
191  * Return:
192  *   1 - done, nothing (else) to write
193  *   0 - socket is full, need to wait
194  *  <0 - error
195  */
196 static int ceph_tcp_send(struct ceph_connection *con)
197 {
198 	int ret;
199 
200 	dout("%s con %p have %zu try_sendpage %d\n", __func__, con,
201 	     iov_iter_count(&con->v2.out_iter), con->v2.out_iter_sendpage);
202 	if (con->v2.out_iter_sendpage)
203 		ret = do_try_sendpage(con->sock, &con->v2.out_iter);
204 	else
205 		ret = do_sendmsg(con->sock, &con->v2.out_iter);
206 	dout("%s con %p ret %d left %zu\n", __func__, con, ret,
207 	     iov_iter_count(&con->v2.out_iter));
208 	return ret;
209 }
210 
211 static void add_in_kvec(struct ceph_connection *con, void *buf, int len)
212 {
213 	BUG_ON(con->v2.in_kvec_cnt >= ARRAY_SIZE(con->v2.in_kvecs));
214 	WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
215 
216 	con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_base = buf;
217 	con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_len = len;
218 	con->v2.in_kvec_cnt++;
219 
220 	con->v2.in_iter.nr_segs++;
221 	con->v2.in_iter.count += len;
222 }
223 
224 static void reset_in_kvecs(struct ceph_connection *con)
225 {
226 	WARN_ON(iov_iter_count(&con->v2.in_iter));
227 
228 	con->v2.in_kvec_cnt = 0;
229 	iov_iter_kvec(&con->v2.in_iter, ITER_DEST, con->v2.in_kvecs, 0, 0);
230 }
231 
232 static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv)
233 {
234 	WARN_ON(iov_iter_count(&con->v2.in_iter));
235 
236 	con->v2.in_bvec = *bv;
237 	iov_iter_bvec(&con->v2.in_iter, ITER_DEST, &con->v2.in_bvec, 1, bv->bv_len);
238 }
239 
240 static void set_in_skip(struct ceph_connection *con, int len)
241 {
242 	WARN_ON(iov_iter_count(&con->v2.in_iter));
243 
244 	dout("%s con %p len %d\n", __func__, con, len);
245 	iov_iter_discard(&con->v2.in_iter, ITER_DEST, len);
246 }
247 
248 static void add_out_kvec(struct ceph_connection *con, void *buf, int len)
249 {
250 	BUG_ON(con->v2.out_kvec_cnt >= ARRAY_SIZE(con->v2.out_kvecs));
251 	WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
252 	WARN_ON(con->v2.out_zero);
253 
254 	con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_base = buf;
255 	con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_len = len;
256 	con->v2.out_kvec_cnt++;
257 
258 	con->v2.out_iter.nr_segs++;
259 	con->v2.out_iter.count += len;
260 }
261 
262 static void reset_out_kvecs(struct ceph_connection *con)
263 {
264 	WARN_ON(iov_iter_count(&con->v2.out_iter));
265 	WARN_ON(con->v2.out_zero);
266 
267 	con->v2.out_kvec_cnt = 0;
268 
269 	iov_iter_kvec(&con->v2.out_iter, ITER_SOURCE, con->v2.out_kvecs, 0, 0);
270 	con->v2.out_iter_sendpage = false;
271 }
272 
273 static void set_out_bvec(struct ceph_connection *con, const struct bio_vec *bv,
274 			 bool zerocopy)
275 {
276 	WARN_ON(iov_iter_count(&con->v2.out_iter));
277 	WARN_ON(con->v2.out_zero);
278 
279 	con->v2.out_bvec = *bv;
280 	con->v2.out_iter_sendpage = zerocopy;
281 	iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
282 		      con->v2.out_bvec.bv_len);
283 }
284 
285 static void set_out_bvec_zero(struct ceph_connection *con)
286 {
287 	WARN_ON(iov_iter_count(&con->v2.out_iter));
288 	WARN_ON(!con->v2.out_zero);
289 
290 	bvec_set_page(&con->v2.out_bvec, ceph_zero_page,
291 		      min(con->v2.out_zero, (int)PAGE_SIZE), 0);
292 	con->v2.out_iter_sendpage = true;
293 	iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
294 		      con->v2.out_bvec.bv_len);
295 }
296 
297 static void out_zero_add(struct ceph_connection *con, int len)
298 {
299 	dout("%s con %p len %d\n", __func__, con, len);
300 	con->v2.out_zero += len;
301 }
302 
303 static void *alloc_conn_buf(struct ceph_connection *con, int len)
304 {
305 	void *buf;
306 
307 	dout("%s con %p len %d\n", __func__, con, len);
308 
309 	if (WARN_ON(con->v2.conn_buf_cnt >= ARRAY_SIZE(con->v2.conn_bufs)))
310 		return NULL;
311 
312 	buf = kvmalloc(len, GFP_NOIO);
313 	if (!buf)
314 		return NULL;
315 
316 	con->v2.conn_bufs[con->v2.conn_buf_cnt++] = buf;
317 	return buf;
318 }
319 
320 static void free_conn_bufs(struct ceph_connection *con)
321 {
322 	while (con->v2.conn_buf_cnt)
323 		kvfree(con->v2.conn_bufs[--con->v2.conn_buf_cnt]);
324 }
325 
326 static void add_in_sign_kvec(struct ceph_connection *con, void *buf, int len)
327 {
328 	BUG_ON(con->v2.in_sign_kvec_cnt >= ARRAY_SIZE(con->v2.in_sign_kvecs));
329 
330 	con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_base = buf;
331 	con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_len = len;
332 	con->v2.in_sign_kvec_cnt++;
333 }
334 
335 static void clear_in_sign_kvecs(struct ceph_connection *con)
336 {
337 	con->v2.in_sign_kvec_cnt = 0;
338 }
339 
340 static void add_out_sign_kvec(struct ceph_connection *con, void *buf, int len)
341 {
342 	BUG_ON(con->v2.out_sign_kvec_cnt >= ARRAY_SIZE(con->v2.out_sign_kvecs));
343 
344 	con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_base = buf;
345 	con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_len = len;
346 	con->v2.out_sign_kvec_cnt++;
347 }
348 
349 static void clear_out_sign_kvecs(struct ceph_connection *con)
350 {
351 	con->v2.out_sign_kvec_cnt = 0;
352 }
353 
354 static bool con_secure(struct ceph_connection *con)
355 {
356 	return con->v2.con_mode == CEPH_CON_MODE_SECURE;
357 }
358 
359 static int front_len(const struct ceph_msg *msg)
360 {
361 	return le32_to_cpu(msg->hdr.front_len);
362 }
363 
364 static int middle_len(const struct ceph_msg *msg)
365 {
366 	return le32_to_cpu(msg->hdr.middle_len);
367 }
368 
369 static int data_len(const struct ceph_msg *msg)
370 {
371 	return le32_to_cpu(msg->hdr.data_len);
372 }
373 
374 static bool need_padding(int len)
375 {
376 	return !IS_ALIGNED(len, CEPH_GCM_BLOCK_LEN);
377 }
378 
379 static int padded_len(int len)
380 {
381 	return ALIGN(len, CEPH_GCM_BLOCK_LEN);
382 }
383 
384 static int padding_len(int len)
385 {
386 	return padded_len(len) - len;
387 }
388 
389 /* preamble + control segment */
390 static int head_onwire_len(int ctrl_len, bool secure)
391 {
392 	int head_len;
393 	int rem_len;
394 
395 	BUG_ON(ctrl_len < 0 || ctrl_len > CEPH_MSG_MAX_CONTROL_LEN);
396 
397 	if (secure) {
398 		head_len = CEPH_PREAMBLE_SECURE_LEN;
399 		if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
400 			rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
401 			head_len += padded_len(rem_len) + CEPH_GCM_TAG_LEN;
402 		}
403 	} else {
404 		head_len = CEPH_PREAMBLE_PLAIN_LEN;
405 		if (ctrl_len)
406 			head_len += ctrl_len + CEPH_CRC_LEN;
407 	}
408 	return head_len;
409 }
410 
411 /* front, middle and data segments + epilogue */
412 static int __tail_onwire_len(int front_len, int middle_len, int data_len,
413 			     bool secure)
414 {
415 	BUG_ON(front_len < 0 || front_len > CEPH_MSG_MAX_FRONT_LEN ||
416 	       middle_len < 0 || middle_len > CEPH_MSG_MAX_MIDDLE_LEN ||
417 	       data_len < 0 || data_len > CEPH_MSG_MAX_DATA_LEN);
418 
419 	if (!front_len && !middle_len && !data_len)
420 		return 0;
421 
422 	if (!secure)
423 		return front_len + middle_len + data_len +
424 		       CEPH_EPILOGUE_PLAIN_LEN;
425 
426 	return padded_len(front_len) + padded_len(middle_len) +
427 	       padded_len(data_len) + CEPH_EPILOGUE_SECURE_LEN;
428 }
429 
430 static int tail_onwire_len(const struct ceph_msg *msg, bool secure)
431 {
432 	return __tail_onwire_len(front_len(msg), middle_len(msg),
433 				 data_len(msg), secure);
434 }
435 
436 /* head_onwire_len(sizeof(struct ceph_msg_header2), false) */
437 #define MESSAGE_HEAD_PLAIN_LEN	(CEPH_PREAMBLE_PLAIN_LEN +		\
438 				 sizeof(struct ceph_msg_header2) +	\
439 				 CEPH_CRC_LEN)
440 
441 static const int frame_aligns[] = {
442 	sizeof(void *),
443 	sizeof(void *),
444 	sizeof(void *),
445 	PAGE_SIZE
446 };
447 
448 /*
449  * Discards trailing empty segments, unless there is just one segment.
450  * A frame always has at least one (possibly empty) segment.
451  */
452 static int calc_segment_count(const int *lens, int len_cnt)
453 {
454 	int i;
455 
456 	for (i = len_cnt - 1; i >= 0; i--) {
457 		if (lens[i])
458 			return i + 1;
459 	}
460 
461 	return 1;
462 }
463 
464 static void init_frame_desc(struct ceph_frame_desc *desc, int tag,
465 			    const int *lens, int len_cnt)
466 {
467 	int i;
468 
469 	memset(desc, 0, sizeof(*desc));
470 
471 	desc->fd_tag = tag;
472 	desc->fd_seg_cnt = calc_segment_count(lens, len_cnt);
473 	BUG_ON(desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT);
474 	for (i = 0; i < desc->fd_seg_cnt; i++) {
475 		desc->fd_lens[i] = lens[i];
476 		desc->fd_aligns[i] = frame_aligns[i];
477 	}
478 }
479 
480 /*
481  * Preamble crc covers everything up to itself (28 bytes) and
482  * is calculated and verified irrespective of the connection mode
483  * (i.e. even if the frame is encrypted).
484  */
485 static void encode_preamble(const struct ceph_frame_desc *desc, void *p)
486 {
487 	void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
488 	void *start = p;
489 	int i;
490 
491 	memset(p, 0, CEPH_PREAMBLE_LEN);
492 
493 	ceph_encode_8(&p, desc->fd_tag);
494 	ceph_encode_8(&p, desc->fd_seg_cnt);
495 	for (i = 0; i < desc->fd_seg_cnt; i++) {
496 		ceph_encode_32(&p, desc->fd_lens[i]);
497 		ceph_encode_16(&p, desc->fd_aligns[i]);
498 	}
499 
500 	put_unaligned_le32(crc32c(0, start, crcp - start), crcp);
501 }
502 
503 static int decode_preamble(void *p, struct ceph_frame_desc *desc)
504 {
505 	void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
506 	u32 crc, expected_crc;
507 	int i;
508 
509 	crc = crc32c(0, p, crcp - p);
510 	expected_crc = get_unaligned_le32(crcp);
511 	if (crc != expected_crc) {
512 		pr_err("bad preamble crc, calculated %u, expected %u\n",
513 		       crc, expected_crc);
514 		return -EBADMSG;
515 	}
516 
517 	memset(desc, 0, sizeof(*desc));
518 
519 	desc->fd_tag = ceph_decode_8(&p);
520 	desc->fd_seg_cnt = ceph_decode_8(&p);
521 	if (desc->fd_seg_cnt < 1 ||
522 	    desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT) {
523 		pr_err("bad segment count %d\n", desc->fd_seg_cnt);
524 		return -EINVAL;
525 	}
526 	for (i = 0; i < desc->fd_seg_cnt; i++) {
527 		desc->fd_lens[i] = ceph_decode_32(&p);
528 		desc->fd_aligns[i] = ceph_decode_16(&p);
529 	}
530 
531 	if (desc->fd_lens[0] < 0 ||
532 	    desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) {
533 		pr_err("bad control segment length %d\n", desc->fd_lens[0]);
534 		return -EINVAL;
535 	}
536 	if (desc->fd_lens[1] < 0 ||
537 	    desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) {
538 		pr_err("bad front segment length %d\n", desc->fd_lens[1]);
539 		return -EINVAL;
540 	}
541 	if (desc->fd_lens[2] < 0 ||
542 	    desc->fd_lens[2] > CEPH_MSG_MAX_MIDDLE_LEN) {
543 		pr_err("bad middle segment length %d\n", desc->fd_lens[2]);
544 		return -EINVAL;
545 	}
546 	if (desc->fd_lens[3] < 0 ||
547 	    desc->fd_lens[3] > CEPH_MSG_MAX_DATA_LEN) {
548 		pr_err("bad data segment length %d\n", desc->fd_lens[3]);
549 		return -EINVAL;
550 	}
551 
552 	/*
553 	 * This would fire for FRAME_TAG_WAIT (it has one empty
554 	 * segment), but we should never get it as client.
555 	 */
556 	if (!desc->fd_lens[desc->fd_seg_cnt - 1]) {
557 		pr_err("last segment empty, segment count %d\n",
558 		       desc->fd_seg_cnt);
559 		return -EINVAL;
560 	}
561 
562 	return 0;
563 }
564 
565 static void encode_epilogue_plain(struct ceph_connection *con, bool aborted)
566 {
567 	con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
568 						 FRAME_LATE_STATUS_COMPLETE;
569 	cpu_to_le32s(&con->v2.out_epil.front_crc);
570 	cpu_to_le32s(&con->v2.out_epil.middle_crc);
571 	cpu_to_le32s(&con->v2.out_epil.data_crc);
572 }
573 
574 static void encode_epilogue_secure(struct ceph_connection *con, bool aborted)
575 {
576 	memset(&con->v2.out_epil, 0, sizeof(con->v2.out_epil));
577 	con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
578 						 FRAME_LATE_STATUS_COMPLETE;
579 }
580 
581 static int decode_epilogue(void *p, u32 *front_crc, u32 *middle_crc,
582 			   u32 *data_crc)
583 {
584 	u8 late_status;
585 
586 	late_status = ceph_decode_8(&p);
587 	if ((late_status & FRAME_LATE_STATUS_ABORTED_MASK) !=
588 			FRAME_LATE_STATUS_COMPLETE) {
589 		/* we should never get an aborted message as client */
590 		pr_err("bad late_status 0x%x\n", late_status);
591 		return -EINVAL;
592 	}
593 
594 	if (front_crc && middle_crc && data_crc) {
595 		*front_crc = ceph_decode_32(&p);
596 		*middle_crc = ceph_decode_32(&p);
597 		*data_crc = ceph_decode_32(&p);
598 	}
599 
600 	return 0;
601 }
602 
603 static void fill_header(struct ceph_msg_header *hdr,
604 			const struct ceph_msg_header2 *hdr2,
605 			int front_len, int middle_len, int data_len,
606 			const struct ceph_entity_name *peer_name)
607 {
608 	hdr->seq = hdr2->seq;
609 	hdr->tid = hdr2->tid;
610 	hdr->type = hdr2->type;
611 	hdr->priority = hdr2->priority;
612 	hdr->version = hdr2->version;
613 	hdr->front_len = cpu_to_le32(front_len);
614 	hdr->middle_len = cpu_to_le32(middle_len);
615 	hdr->data_len = cpu_to_le32(data_len);
616 	hdr->data_off = hdr2->data_off;
617 	hdr->src = *peer_name;
618 	hdr->compat_version = hdr2->compat_version;
619 	hdr->reserved = 0;
620 	hdr->crc = 0;
621 }
622 
623 static void fill_header2(struct ceph_msg_header2 *hdr2,
624 			 const struct ceph_msg_header *hdr, u64 ack_seq)
625 {
626 	hdr2->seq = hdr->seq;
627 	hdr2->tid = hdr->tid;
628 	hdr2->type = hdr->type;
629 	hdr2->priority = hdr->priority;
630 	hdr2->version = hdr->version;
631 	hdr2->data_pre_padding_len = 0;
632 	hdr2->data_off = hdr->data_off;
633 	hdr2->ack_seq = cpu_to_le64(ack_seq);
634 	hdr2->flags = 0;
635 	hdr2->compat_version = hdr->compat_version;
636 	hdr2->reserved = 0;
637 }
638 
639 static int verify_control_crc(struct ceph_connection *con)
640 {
641 	int ctrl_len = con->v2.in_desc.fd_lens[0];
642 	u32 crc, expected_crc;
643 
644 	WARN_ON(con->v2.in_kvecs[0].iov_len != ctrl_len);
645 	WARN_ON(con->v2.in_kvecs[1].iov_len != CEPH_CRC_LEN);
646 
647 	crc = crc32c(-1, con->v2.in_kvecs[0].iov_base, ctrl_len);
648 	expected_crc = get_unaligned_le32(con->v2.in_kvecs[1].iov_base);
649 	if (crc != expected_crc) {
650 		pr_err("bad control crc, calculated %u, expected %u\n",
651 		       crc, expected_crc);
652 		return -EBADMSG;
653 	}
654 
655 	return 0;
656 }
657 
658 static int verify_epilogue_crcs(struct ceph_connection *con, u32 front_crc,
659 				u32 middle_crc, u32 data_crc)
660 {
661 	if (front_len(con->in_msg)) {
662 		con->in_front_crc = crc32c(-1, con->in_msg->front.iov_base,
663 					   front_len(con->in_msg));
664 	} else {
665 		WARN_ON(!middle_len(con->in_msg) && !data_len(con->in_msg));
666 		con->in_front_crc = -1;
667 	}
668 
669 	if (middle_len(con->in_msg))
670 		con->in_middle_crc = crc32c(-1,
671 					    con->in_msg->middle->vec.iov_base,
672 					    middle_len(con->in_msg));
673 	else if (data_len(con->in_msg))
674 		con->in_middle_crc = -1;
675 	else
676 		con->in_middle_crc = 0;
677 
678 	if (!data_len(con->in_msg))
679 		con->in_data_crc = 0;
680 
681 	dout("%s con %p msg %p crcs %u %u %u\n", __func__, con, con->in_msg,
682 	     con->in_front_crc, con->in_middle_crc, con->in_data_crc);
683 
684 	if (con->in_front_crc != front_crc) {
685 		pr_err("bad front crc, calculated %u, expected %u\n",
686 		       con->in_front_crc, front_crc);
687 		return -EBADMSG;
688 	}
689 	if (con->in_middle_crc != middle_crc) {
690 		pr_err("bad middle crc, calculated %u, expected %u\n",
691 		       con->in_middle_crc, middle_crc);
692 		return -EBADMSG;
693 	}
694 	if (con->in_data_crc != data_crc) {
695 		pr_err("bad data crc, calculated %u, expected %u\n",
696 		       con->in_data_crc, data_crc);
697 		return -EBADMSG;
698 	}
699 
700 	return 0;
701 }
702 
703 static int setup_crypto(struct ceph_connection *con,
704 			const u8 *session_key, int session_key_len,
705 			const u8 *con_secret, int con_secret_len)
706 {
707 	unsigned int noio_flag;
708 	int ret;
709 
710 	dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
711 	     __func__, con, con->v2.con_mode, session_key_len, con_secret_len);
712 	WARN_ON(con->v2.hmac_tfm || con->v2.gcm_tfm || con->v2.gcm_req);
713 
714 	if (con->v2.con_mode != CEPH_CON_MODE_CRC &&
715 	    con->v2.con_mode != CEPH_CON_MODE_SECURE) {
716 		pr_err("bad con_mode %d\n", con->v2.con_mode);
717 		return -EINVAL;
718 	}
719 
720 	if (!session_key_len) {
721 		WARN_ON(con->v2.con_mode != CEPH_CON_MODE_CRC);
722 		WARN_ON(con_secret_len);
723 		return 0;  /* auth_none */
724 	}
725 
726 	noio_flag = memalloc_noio_save();
727 	con->v2.hmac_tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
728 	memalloc_noio_restore(noio_flag);
729 	if (IS_ERR(con->v2.hmac_tfm)) {
730 		ret = PTR_ERR(con->v2.hmac_tfm);
731 		con->v2.hmac_tfm = NULL;
732 		pr_err("failed to allocate hmac tfm context: %d\n", ret);
733 		return ret;
734 	}
735 
736 	WARN_ON((unsigned long)session_key &
737 		crypto_shash_alignmask(con->v2.hmac_tfm));
738 	ret = crypto_shash_setkey(con->v2.hmac_tfm, session_key,
739 				  session_key_len);
740 	if (ret) {
741 		pr_err("failed to set hmac key: %d\n", ret);
742 		return ret;
743 	}
744 
745 	if (con->v2.con_mode == CEPH_CON_MODE_CRC) {
746 		WARN_ON(con_secret_len);
747 		return 0;  /* auth_x, plain mode */
748 	}
749 
750 	if (con_secret_len < CEPH_GCM_KEY_LEN + 2 * CEPH_GCM_IV_LEN) {
751 		pr_err("con_secret too small %d\n", con_secret_len);
752 		return -EINVAL;
753 	}
754 
755 	noio_flag = memalloc_noio_save();
756 	con->v2.gcm_tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
757 	memalloc_noio_restore(noio_flag);
758 	if (IS_ERR(con->v2.gcm_tfm)) {
759 		ret = PTR_ERR(con->v2.gcm_tfm);
760 		con->v2.gcm_tfm = NULL;
761 		pr_err("failed to allocate gcm tfm context: %d\n", ret);
762 		return ret;
763 	}
764 
765 	WARN_ON((unsigned long)con_secret &
766 		crypto_aead_alignmask(con->v2.gcm_tfm));
767 	ret = crypto_aead_setkey(con->v2.gcm_tfm, con_secret, CEPH_GCM_KEY_LEN);
768 	if (ret) {
769 		pr_err("failed to set gcm key: %d\n", ret);
770 		return ret;
771 	}
772 
773 	WARN_ON(crypto_aead_ivsize(con->v2.gcm_tfm) != CEPH_GCM_IV_LEN);
774 	ret = crypto_aead_setauthsize(con->v2.gcm_tfm, CEPH_GCM_TAG_LEN);
775 	if (ret) {
776 		pr_err("failed to set gcm tag size: %d\n", ret);
777 		return ret;
778 	}
779 
780 	con->v2.gcm_req = aead_request_alloc(con->v2.gcm_tfm, GFP_NOIO);
781 	if (!con->v2.gcm_req) {
782 		pr_err("failed to allocate gcm request\n");
783 		return -ENOMEM;
784 	}
785 
786 	crypto_init_wait(&con->v2.gcm_wait);
787 	aead_request_set_callback(con->v2.gcm_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
788 				  crypto_req_done, &con->v2.gcm_wait);
789 
790 	memcpy(&con->v2.in_gcm_nonce, con_secret + CEPH_GCM_KEY_LEN,
791 	       CEPH_GCM_IV_LEN);
792 	memcpy(&con->v2.out_gcm_nonce,
793 	       con_secret + CEPH_GCM_KEY_LEN + CEPH_GCM_IV_LEN,
794 	       CEPH_GCM_IV_LEN);
795 	return 0;  /* auth_x, secure mode */
796 }
797 
798 static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs,
799 		       int kvec_cnt, u8 *hmac)
800 {
801 	SHASH_DESC_ON_STACK(desc, con->v2.hmac_tfm);  /* tfm arg is ignored */
802 	int ret;
803 	int i;
804 
805 	dout("%s con %p hmac_tfm %p kvec_cnt %d\n", __func__, con,
806 	     con->v2.hmac_tfm, kvec_cnt);
807 
808 	if (!con->v2.hmac_tfm) {
809 		memset(hmac, 0, SHA256_DIGEST_SIZE);
810 		return 0;  /* auth_none */
811 	}
812 
813 	desc->tfm = con->v2.hmac_tfm;
814 	ret = crypto_shash_init(desc);
815 	if (ret)
816 		goto out;
817 
818 	for (i = 0; i < kvec_cnt; i++) {
819 		WARN_ON((unsigned long)kvecs[i].iov_base &
820 			crypto_shash_alignmask(con->v2.hmac_tfm));
821 		ret = crypto_shash_update(desc, kvecs[i].iov_base,
822 					  kvecs[i].iov_len);
823 		if (ret)
824 			goto out;
825 	}
826 
827 	ret = crypto_shash_final(desc, hmac);
828 
829 out:
830 	shash_desc_zero(desc);
831 	return ret;  /* auth_x, both plain and secure modes */
832 }
833 
834 static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce)
835 {
836 	u64 counter;
837 
838 	counter = le64_to_cpu(nonce->counter);
839 	nonce->counter = cpu_to_le64(counter + 1);
840 }
841 
842 static int gcm_crypt(struct ceph_connection *con, bool encrypt,
843 		     struct scatterlist *src, struct scatterlist *dst,
844 		     int src_len)
845 {
846 	struct ceph_gcm_nonce *nonce;
847 	int ret;
848 
849 	nonce = encrypt ? &con->v2.out_gcm_nonce : &con->v2.in_gcm_nonce;
850 
851 	aead_request_set_ad(con->v2.gcm_req, 0);  /* no AAD */
852 	aead_request_set_crypt(con->v2.gcm_req, src, dst, src_len, (u8 *)nonce);
853 	ret = crypto_wait_req(encrypt ? crypto_aead_encrypt(con->v2.gcm_req) :
854 					crypto_aead_decrypt(con->v2.gcm_req),
855 			      &con->v2.gcm_wait);
856 	if (ret)
857 		return ret;
858 
859 	gcm_inc_nonce(nonce);
860 	return 0;
861 }
862 
863 static void get_bvec_at(struct ceph_msg_data_cursor *cursor,
864 			struct bio_vec *bv)
865 {
866 	struct page *page;
867 	size_t off, len;
868 
869 	WARN_ON(!cursor->total_resid);
870 
871 	/* skip zero-length data items */
872 	while (!cursor->resid)
873 		ceph_msg_data_advance(cursor, 0);
874 
875 	/* get a piece of data, cursor isn't advanced */
876 	page = ceph_msg_data_next(cursor, &off, &len);
877 	bvec_set_page(bv, page, len, off);
878 }
879 
880 static int calc_sg_cnt(void *buf, int buf_len)
881 {
882 	int sg_cnt;
883 
884 	if (!buf_len)
885 		return 0;
886 
887 	sg_cnt = need_padding(buf_len) ? 1 : 0;
888 	if (is_vmalloc_addr(buf)) {
889 		WARN_ON(offset_in_page(buf));
890 		sg_cnt += PAGE_ALIGN(buf_len) >> PAGE_SHIFT;
891 	} else {
892 		sg_cnt++;
893 	}
894 
895 	return sg_cnt;
896 }
897 
898 static int calc_sg_cnt_cursor(struct ceph_msg_data_cursor *cursor)
899 {
900 	int data_len = cursor->total_resid;
901 	struct bio_vec bv;
902 	int sg_cnt;
903 
904 	if (!data_len)
905 		return 0;
906 
907 	sg_cnt = need_padding(data_len) ? 1 : 0;
908 	do {
909 		get_bvec_at(cursor, &bv);
910 		sg_cnt++;
911 
912 		ceph_msg_data_advance(cursor, bv.bv_len);
913 	} while (cursor->total_resid);
914 
915 	return sg_cnt;
916 }
917 
918 static void init_sgs(struct scatterlist **sg, void *buf, int buf_len, u8 *pad)
919 {
920 	void *end = buf + buf_len;
921 	struct page *page;
922 	int len;
923 	void *p;
924 
925 	if (!buf_len)
926 		return;
927 
928 	if (is_vmalloc_addr(buf)) {
929 		p = buf;
930 		do {
931 			page = vmalloc_to_page(p);
932 			len = min_t(int, end - p, PAGE_SIZE);
933 			WARN_ON(!page || !len || offset_in_page(p));
934 			sg_set_page(*sg, page, len, 0);
935 			*sg = sg_next(*sg);
936 			p += len;
937 		} while (p != end);
938 	} else {
939 		sg_set_buf(*sg, buf, buf_len);
940 		*sg = sg_next(*sg);
941 	}
942 
943 	if (need_padding(buf_len)) {
944 		sg_set_buf(*sg, pad, padding_len(buf_len));
945 		*sg = sg_next(*sg);
946 	}
947 }
948 
949 static void init_sgs_cursor(struct scatterlist **sg,
950 			    struct ceph_msg_data_cursor *cursor, u8 *pad)
951 {
952 	int data_len = cursor->total_resid;
953 	struct bio_vec bv;
954 
955 	if (!data_len)
956 		return;
957 
958 	do {
959 		get_bvec_at(cursor, &bv);
960 		sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
961 		*sg = sg_next(*sg);
962 
963 		ceph_msg_data_advance(cursor, bv.bv_len);
964 	} while (cursor->total_resid);
965 
966 	if (need_padding(data_len)) {
967 		sg_set_buf(*sg, pad, padding_len(data_len));
968 		*sg = sg_next(*sg);
969 	}
970 }
971 
972 static int setup_message_sgs(struct sg_table *sgt, struct ceph_msg *msg,
973 			     u8 *front_pad, u8 *middle_pad, u8 *data_pad,
974 			     void *epilogue, bool add_tag)
975 {
976 	struct ceph_msg_data_cursor cursor;
977 	struct scatterlist *cur_sg;
978 	int sg_cnt;
979 	int ret;
980 
981 	if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
982 		return 0;
983 
984 	sg_cnt = 1;  /* epilogue + [auth tag] */
985 	if (front_len(msg))
986 		sg_cnt += calc_sg_cnt(msg->front.iov_base,
987 				      front_len(msg));
988 	if (middle_len(msg))
989 		sg_cnt += calc_sg_cnt(msg->middle->vec.iov_base,
990 				      middle_len(msg));
991 	if (data_len(msg)) {
992 		ceph_msg_data_cursor_init(&cursor, msg, data_len(msg));
993 		sg_cnt += calc_sg_cnt_cursor(&cursor);
994 	}
995 
996 	ret = sg_alloc_table(sgt, sg_cnt, GFP_NOIO);
997 	if (ret)
998 		return ret;
999 
1000 	cur_sg = sgt->sgl;
1001 	if (front_len(msg))
1002 		init_sgs(&cur_sg, msg->front.iov_base, front_len(msg),
1003 			 front_pad);
1004 	if (middle_len(msg))
1005 		init_sgs(&cur_sg, msg->middle->vec.iov_base, middle_len(msg),
1006 			 middle_pad);
1007 	if (data_len(msg)) {
1008 		ceph_msg_data_cursor_init(&cursor, msg, data_len(msg));
1009 		init_sgs_cursor(&cur_sg, &cursor, data_pad);
1010 	}
1011 
1012 	WARN_ON(!sg_is_last(cur_sg));
1013 	sg_set_buf(cur_sg, epilogue,
1014 		   CEPH_GCM_BLOCK_LEN + (add_tag ? CEPH_GCM_TAG_LEN : 0));
1015 	return 0;
1016 }
1017 
1018 static int decrypt_preamble(struct ceph_connection *con)
1019 {
1020 	struct scatterlist sg;
1021 
1022 	sg_init_one(&sg, con->v2.in_buf, CEPH_PREAMBLE_SECURE_LEN);
1023 	return gcm_crypt(con, false, &sg, &sg, CEPH_PREAMBLE_SECURE_LEN);
1024 }
1025 
1026 static int decrypt_control_remainder(struct ceph_connection *con)
1027 {
1028 	int ctrl_len = con->v2.in_desc.fd_lens[0];
1029 	int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1030 	int pt_len = padding_len(rem_len) + CEPH_GCM_TAG_LEN;
1031 	struct scatterlist sgs[2];
1032 
1033 	WARN_ON(con->v2.in_kvecs[0].iov_len != rem_len);
1034 	WARN_ON(con->v2.in_kvecs[1].iov_len != pt_len);
1035 
1036 	sg_init_table(sgs, 2);
1037 	sg_set_buf(&sgs[0], con->v2.in_kvecs[0].iov_base, rem_len);
1038 	sg_set_buf(&sgs[1], con->v2.in_buf, pt_len);
1039 
1040 	return gcm_crypt(con, false, sgs, sgs,
1041 			 padded_len(rem_len) + CEPH_GCM_TAG_LEN);
1042 }
1043 
1044 static int decrypt_tail(struct ceph_connection *con)
1045 {
1046 	struct sg_table enc_sgt = {};
1047 	struct sg_table sgt = {};
1048 	int tail_len;
1049 	int ret;
1050 
1051 	tail_len = tail_onwire_len(con->in_msg, true);
1052 	ret = sg_alloc_table_from_pages(&enc_sgt, con->v2.in_enc_pages,
1053 					con->v2.in_enc_page_cnt, 0, tail_len,
1054 					GFP_NOIO);
1055 	if (ret)
1056 		goto out;
1057 
1058 	ret = setup_message_sgs(&sgt, con->in_msg, FRONT_PAD(con->v2.in_buf),
1059 			MIDDLE_PAD(con->v2.in_buf), DATA_PAD(con->v2.in_buf),
1060 			con->v2.in_buf, true);
1061 	if (ret)
1062 		goto out;
1063 
1064 	dout("%s con %p msg %p enc_page_cnt %d sg_cnt %d\n", __func__, con,
1065 	     con->in_msg, con->v2.in_enc_page_cnt, sgt.orig_nents);
1066 	ret = gcm_crypt(con, false, enc_sgt.sgl, sgt.sgl, tail_len);
1067 	if (ret)
1068 		goto out;
1069 
1070 	WARN_ON(!con->v2.in_enc_page_cnt);
1071 	ceph_release_page_vector(con->v2.in_enc_pages,
1072 				 con->v2.in_enc_page_cnt);
1073 	con->v2.in_enc_pages = NULL;
1074 	con->v2.in_enc_page_cnt = 0;
1075 
1076 out:
1077 	sg_free_table(&sgt);
1078 	sg_free_table(&enc_sgt);
1079 	return ret;
1080 }
1081 
1082 static int prepare_banner(struct ceph_connection *con)
1083 {
1084 	int buf_len = CEPH_BANNER_V2_LEN + 2 + 8 + 8;
1085 	void *buf, *p;
1086 
1087 	buf = alloc_conn_buf(con, buf_len);
1088 	if (!buf)
1089 		return -ENOMEM;
1090 
1091 	p = buf;
1092 	ceph_encode_copy(&p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN);
1093 	ceph_encode_16(&p, sizeof(u64) + sizeof(u64));
1094 	ceph_encode_64(&p, CEPH_MSGR2_SUPPORTED_FEATURES);
1095 	ceph_encode_64(&p, CEPH_MSGR2_REQUIRED_FEATURES);
1096 	WARN_ON(p != buf + buf_len);
1097 
1098 	add_out_kvec(con, buf, buf_len);
1099 	add_out_sign_kvec(con, buf, buf_len);
1100 	ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1101 	return 0;
1102 }
1103 
1104 /*
1105  * base:
1106  *   preamble
1107  *   control body (ctrl_len bytes)
1108  *   space for control crc
1109  *
1110  * extdata (optional):
1111  *   control body (extdata_len bytes)
1112  *
1113  * Compute control crc and gather base and extdata into:
1114  *
1115  *   preamble
1116  *   control body (ctrl_len + extdata_len bytes)
1117  *   control crc
1118  *
1119  * Preamble should already be encoded at the start of base.
1120  */
1121 static void prepare_head_plain(struct ceph_connection *con, void *base,
1122 			       int ctrl_len, void *extdata, int extdata_len,
1123 			       bool to_be_signed)
1124 {
1125 	int base_len = CEPH_PREAMBLE_LEN + ctrl_len + CEPH_CRC_LEN;
1126 	void *crcp = base + base_len - CEPH_CRC_LEN;
1127 	u32 crc;
1128 
1129 	crc = crc32c(-1, CTRL_BODY(base), ctrl_len);
1130 	if (extdata_len)
1131 		crc = crc32c(crc, extdata, extdata_len);
1132 	put_unaligned_le32(crc, crcp);
1133 
1134 	if (!extdata_len) {
1135 		add_out_kvec(con, base, base_len);
1136 		if (to_be_signed)
1137 			add_out_sign_kvec(con, base, base_len);
1138 		return;
1139 	}
1140 
1141 	add_out_kvec(con, base, crcp - base);
1142 	add_out_kvec(con, extdata, extdata_len);
1143 	add_out_kvec(con, crcp, CEPH_CRC_LEN);
1144 	if (to_be_signed) {
1145 		add_out_sign_kvec(con, base, crcp - base);
1146 		add_out_sign_kvec(con, extdata, extdata_len);
1147 		add_out_sign_kvec(con, crcp, CEPH_CRC_LEN);
1148 	}
1149 }
1150 
1151 static int prepare_head_secure_small(struct ceph_connection *con,
1152 				     void *base, int ctrl_len)
1153 {
1154 	struct scatterlist sg;
1155 	int ret;
1156 
1157 	/* inline buffer padding? */
1158 	if (ctrl_len < CEPH_PREAMBLE_INLINE_LEN)
1159 		memset(CTRL_BODY(base) + ctrl_len, 0,
1160 		       CEPH_PREAMBLE_INLINE_LEN - ctrl_len);
1161 
1162 	sg_init_one(&sg, base, CEPH_PREAMBLE_SECURE_LEN);
1163 	ret = gcm_crypt(con, true, &sg, &sg,
1164 			CEPH_PREAMBLE_SECURE_LEN - CEPH_GCM_TAG_LEN);
1165 	if (ret)
1166 		return ret;
1167 
1168 	add_out_kvec(con, base, CEPH_PREAMBLE_SECURE_LEN);
1169 	return 0;
1170 }
1171 
1172 /*
1173  * base:
1174  *   preamble
1175  *   control body (ctrl_len bytes)
1176  *   space for padding, if needed
1177  *   space for control remainder auth tag
1178  *   space for preamble auth tag
1179  *
1180  * Encrypt preamble and the inline portion, then encrypt the remainder
1181  * and gather into:
1182  *
1183  *   preamble
1184  *   control body (48 bytes)
1185  *   preamble auth tag
1186  *   control body (ctrl_len - 48 bytes)
1187  *   zero padding, if needed
1188  *   control remainder auth tag
1189  *
1190  * Preamble should already be encoded at the start of base.
1191  */
1192 static int prepare_head_secure_big(struct ceph_connection *con,
1193 				   void *base, int ctrl_len)
1194 {
1195 	int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1196 	void *rem = CTRL_BODY(base) + CEPH_PREAMBLE_INLINE_LEN;
1197 	void *rem_tag = rem + padded_len(rem_len);
1198 	void *pmbl_tag = rem_tag + CEPH_GCM_TAG_LEN;
1199 	struct scatterlist sgs[2];
1200 	int ret;
1201 
1202 	sg_init_table(sgs, 2);
1203 	sg_set_buf(&sgs[0], base, rem - base);
1204 	sg_set_buf(&sgs[1], pmbl_tag, CEPH_GCM_TAG_LEN);
1205 	ret = gcm_crypt(con, true, sgs, sgs, rem - base);
1206 	if (ret)
1207 		return ret;
1208 
1209 	/* control remainder padding? */
1210 	if (need_padding(rem_len))
1211 		memset(rem + rem_len, 0, padding_len(rem_len));
1212 
1213 	sg_init_one(&sgs[0], rem, pmbl_tag - rem);
1214 	ret = gcm_crypt(con, true, sgs, sgs, rem_tag - rem);
1215 	if (ret)
1216 		return ret;
1217 
1218 	add_out_kvec(con, base, rem - base);
1219 	add_out_kvec(con, pmbl_tag, CEPH_GCM_TAG_LEN);
1220 	add_out_kvec(con, rem, pmbl_tag - rem);
1221 	return 0;
1222 }
1223 
1224 static int __prepare_control(struct ceph_connection *con, int tag,
1225 			     void *base, int ctrl_len, void *extdata,
1226 			     int extdata_len, bool to_be_signed)
1227 {
1228 	int total_len = ctrl_len + extdata_len;
1229 	struct ceph_frame_desc desc;
1230 	int ret;
1231 
1232 	dout("%s con %p tag %d len %d (%d+%d)\n", __func__, con, tag,
1233 	     total_len, ctrl_len, extdata_len);
1234 
1235 	/* extdata may be vmalloc'ed but not base */
1236 	if (WARN_ON(is_vmalloc_addr(base) || !ctrl_len))
1237 		return -EINVAL;
1238 
1239 	init_frame_desc(&desc, tag, &total_len, 1);
1240 	encode_preamble(&desc, base);
1241 
1242 	if (con_secure(con)) {
1243 		if (WARN_ON(extdata_len || to_be_signed))
1244 			return -EINVAL;
1245 
1246 		if (ctrl_len <= CEPH_PREAMBLE_INLINE_LEN)
1247 			/* fully inlined, inline buffer may need padding */
1248 			ret = prepare_head_secure_small(con, base, ctrl_len);
1249 		else
1250 			/* partially inlined, inline buffer is full */
1251 			ret = prepare_head_secure_big(con, base, ctrl_len);
1252 		if (ret)
1253 			return ret;
1254 	} else {
1255 		prepare_head_plain(con, base, ctrl_len, extdata, extdata_len,
1256 				   to_be_signed);
1257 	}
1258 
1259 	ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1260 	return 0;
1261 }
1262 
1263 static int prepare_control(struct ceph_connection *con, int tag,
1264 			   void *base, int ctrl_len)
1265 {
1266 	return __prepare_control(con, tag, base, ctrl_len, NULL, 0, false);
1267 }
1268 
1269 static int prepare_hello(struct ceph_connection *con)
1270 {
1271 	void *buf, *p;
1272 	int ctrl_len;
1273 
1274 	ctrl_len = 1 + ceph_entity_addr_encoding_len(&con->peer_addr);
1275 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1276 	if (!buf)
1277 		return -ENOMEM;
1278 
1279 	p = CTRL_BODY(buf);
1280 	ceph_encode_8(&p, CEPH_ENTITY_TYPE_CLIENT);
1281 	ceph_encode_entity_addr(&p, &con->peer_addr);
1282 	WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1283 
1284 	return __prepare_control(con, FRAME_TAG_HELLO, buf, ctrl_len,
1285 				 NULL, 0, true);
1286 }
1287 
1288 /* so that head_onwire_len(AUTH_BUF_LEN, false) is 512 */
1289 #define AUTH_BUF_LEN	(512 - CEPH_CRC_LEN - CEPH_PREAMBLE_PLAIN_LEN)
1290 
1291 static int prepare_auth_request(struct ceph_connection *con)
1292 {
1293 	void *authorizer, *authorizer_copy;
1294 	int ctrl_len, authorizer_len;
1295 	void *buf;
1296 	int ret;
1297 
1298 	ctrl_len = AUTH_BUF_LEN;
1299 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1300 	if (!buf)
1301 		return -ENOMEM;
1302 
1303 	mutex_unlock(&con->mutex);
1304 	ret = con->ops->get_auth_request(con, CTRL_BODY(buf), &ctrl_len,
1305 					 &authorizer, &authorizer_len);
1306 	mutex_lock(&con->mutex);
1307 	if (con->state != CEPH_CON_S_V2_HELLO) {
1308 		dout("%s con %p state changed to %d\n", __func__, con,
1309 		     con->state);
1310 		return -EAGAIN;
1311 	}
1312 
1313 	dout("%s con %p get_auth_request ret %d\n", __func__, con, ret);
1314 	if (ret)
1315 		return ret;
1316 
1317 	authorizer_copy = alloc_conn_buf(con, authorizer_len);
1318 	if (!authorizer_copy)
1319 		return -ENOMEM;
1320 
1321 	memcpy(authorizer_copy, authorizer, authorizer_len);
1322 
1323 	return __prepare_control(con, FRAME_TAG_AUTH_REQUEST, buf, ctrl_len,
1324 				 authorizer_copy, authorizer_len, true);
1325 }
1326 
1327 static int prepare_auth_request_more(struct ceph_connection *con,
1328 				     void *reply, int reply_len)
1329 {
1330 	int ctrl_len, authorizer_len;
1331 	void *authorizer;
1332 	void *buf;
1333 	int ret;
1334 
1335 	ctrl_len = AUTH_BUF_LEN;
1336 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1337 	if (!buf)
1338 		return -ENOMEM;
1339 
1340 	mutex_unlock(&con->mutex);
1341 	ret = con->ops->handle_auth_reply_more(con, reply, reply_len,
1342 					       CTRL_BODY(buf), &ctrl_len,
1343 					       &authorizer, &authorizer_len);
1344 	mutex_lock(&con->mutex);
1345 	if (con->state != CEPH_CON_S_V2_AUTH) {
1346 		dout("%s con %p state changed to %d\n", __func__, con,
1347 		     con->state);
1348 		return -EAGAIN;
1349 	}
1350 
1351 	dout("%s con %p handle_auth_reply_more ret %d\n", __func__, con, ret);
1352 	if (ret)
1353 		return ret;
1354 
1355 	return __prepare_control(con, FRAME_TAG_AUTH_REQUEST_MORE, buf,
1356 				 ctrl_len, authorizer, authorizer_len, true);
1357 }
1358 
1359 static int prepare_auth_signature(struct ceph_connection *con)
1360 {
1361 	void *buf;
1362 	int ret;
1363 
1364 	buf = alloc_conn_buf(con, head_onwire_len(SHA256_DIGEST_SIZE,
1365 						  con_secure(con)));
1366 	if (!buf)
1367 		return -ENOMEM;
1368 
1369 	ret = hmac_sha256(con, con->v2.in_sign_kvecs, con->v2.in_sign_kvec_cnt,
1370 			  CTRL_BODY(buf));
1371 	if (ret)
1372 		return ret;
1373 
1374 	return prepare_control(con, FRAME_TAG_AUTH_SIGNATURE, buf,
1375 			       SHA256_DIGEST_SIZE);
1376 }
1377 
1378 static int prepare_client_ident(struct ceph_connection *con)
1379 {
1380 	struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1381 	struct ceph_client *client = from_msgr(con->msgr);
1382 	u64 global_id = ceph_client_gid(client);
1383 	void *buf, *p;
1384 	int ctrl_len;
1385 
1386 	WARN_ON(con->v2.server_cookie);
1387 	WARN_ON(con->v2.connect_seq);
1388 	WARN_ON(con->v2.peer_global_seq);
1389 
1390 	if (!con->v2.client_cookie) {
1391 		do {
1392 			get_random_bytes(&con->v2.client_cookie,
1393 					 sizeof(con->v2.client_cookie));
1394 		} while (!con->v2.client_cookie);
1395 		dout("%s con %p generated cookie 0x%llx\n", __func__, con,
1396 		     con->v2.client_cookie);
1397 	} else {
1398 		dout("%s con %p cookie already set 0x%llx\n", __func__, con,
1399 		     con->v2.client_cookie);
1400 	}
1401 
1402 	dout("%s con %p my_addr %s/%u peer_addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx cookie 0x%llx\n",
1403 	     __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
1404 	     ceph_pr_addr(&con->peer_addr), le32_to_cpu(con->peer_addr.nonce),
1405 	     global_id, con->v2.global_seq, client->supported_features,
1406 	     client->required_features, con->v2.client_cookie);
1407 
1408 	ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) +
1409 		   ceph_entity_addr_encoding_len(&con->peer_addr) + 6 * 8;
1410 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
1411 	if (!buf)
1412 		return -ENOMEM;
1413 
1414 	p = CTRL_BODY(buf);
1415 	ceph_encode_8(&p, 2);  /* addrvec marker */
1416 	ceph_encode_32(&p, 1);  /* addr_cnt */
1417 	ceph_encode_entity_addr(&p, my_addr);
1418 	ceph_encode_entity_addr(&p, &con->peer_addr);
1419 	ceph_encode_64(&p, global_id);
1420 	ceph_encode_64(&p, con->v2.global_seq);
1421 	ceph_encode_64(&p, client->supported_features);
1422 	ceph_encode_64(&p, client->required_features);
1423 	ceph_encode_64(&p, 0);  /* flags */
1424 	ceph_encode_64(&p, con->v2.client_cookie);
1425 	WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1426 
1427 	return prepare_control(con, FRAME_TAG_CLIENT_IDENT, buf, ctrl_len);
1428 }
1429 
1430 static int prepare_session_reconnect(struct ceph_connection *con)
1431 {
1432 	struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1433 	void *buf, *p;
1434 	int ctrl_len;
1435 
1436 	WARN_ON(!con->v2.client_cookie);
1437 	WARN_ON(!con->v2.server_cookie);
1438 	WARN_ON(!con->v2.connect_seq);
1439 	WARN_ON(!con->v2.peer_global_seq);
1440 
1441 	dout("%s con %p my_addr %s/%u client_cookie 0x%llx server_cookie 0x%llx global_seq %llu connect_seq %llu in_seq %llu\n",
1442 	     __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
1443 	     con->v2.client_cookie, con->v2.server_cookie, con->v2.global_seq,
1444 	     con->v2.connect_seq, con->in_seq);
1445 
1446 	ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) + 5 * 8;
1447 	buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
1448 	if (!buf)
1449 		return -ENOMEM;
1450 
1451 	p = CTRL_BODY(buf);
1452 	ceph_encode_8(&p, 2);  /* entity_addrvec_t marker */
1453 	ceph_encode_32(&p, 1);  /* my_addrs len */
1454 	ceph_encode_entity_addr(&p, my_addr);
1455 	ceph_encode_64(&p, con->v2.client_cookie);
1456 	ceph_encode_64(&p, con->v2.server_cookie);
1457 	ceph_encode_64(&p, con->v2.global_seq);
1458 	ceph_encode_64(&p, con->v2.connect_seq);
1459 	ceph_encode_64(&p, con->in_seq);
1460 	WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1461 
1462 	return prepare_control(con, FRAME_TAG_SESSION_RECONNECT, buf, ctrl_len);
1463 }
1464 
1465 static int prepare_keepalive2(struct ceph_connection *con)
1466 {
1467 	struct ceph_timespec *ts = CTRL_BODY(con->v2.out_buf);
1468 	struct timespec64 now;
1469 
1470 	ktime_get_real_ts64(&now);
1471 	dout("%s con %p timestamp %lld.%09ld\n", __func__, con, now.tv_sec,
1472 	     now.tv_nsec);
1473 
1474 	ceph_encode_timespec64(ts, &now);
1475 
1476 	reset_out_kvecs(con);
1477 	return prepare_control(con, FRAME_TAG_KEEPALIVE2, con->v2.out_buf,
1478 			       sizeof(struct ceph_timespec));
1479 }
1480 
1481 static int prepare_ack(struct ceph_connection *con)
1482 {
1483 	void *p;
1484 
1485 	dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
1486 	     con->in_seq_acked, con->in_seq);
1487 	con->in_seq_acked = con->in_seq;
1488 
1489 	p = CTRL_BODY(con->v2.out_buf);
1490 	ceph_encode_64(&p, con->in_seq_acked);
1491 
1492 	reset_out_kvecs(con);
1493 	return prepare_control(con, FRAME_TAG_ACK, con->v2.out_buf, 8);
1494 }
1495 
1496 static void prepare_epilogue_plain(struct ceph_connection *con, bool aborted)
1497 {
1498 	dout("%s con %p msg %p aborted %d crcs %u %u %u\n", __func__, con,
1499 	     con->out_msg, aborted, con->v2.out_epil.front_crc,
1500 	     con->v2.out_epil.middle_crc, con->v2.out_epil.data_crc);
1501 
1502 	encode_epilogue_plain(con, aborted);
1503 	add_out_kvec(con, &con->v2.out_epil, CEPH_EPILOGUE_PLAIN_LEN);
1504 }
1505 
1506 /*
1507  * For "used" empty segments, crc is -1.  For unused (trailing)
1508  * segments, crc is 0.
1509  */
1510 static void prepare_message_plain(struct ceph_connection *con)
1511 {
1512 	struct ceph_msg *msg = con->out_msg;
1513 
1514 	prepare_head_plain(con, con->v2.out_buf,
1515 			   sizeof(struct ceph_msg_header2), NULL, 0, false);
1516 
1517 	if (!front_len(msg) && !middle_len(msg)) {
1518 		if (!data_len(msg)) {
1519 			/*
1520 			 * Empty message: once the head is written,
1521 			 * we are done -- there is no epilogue.
1522 			 */
1523 			con->v2.out_state = OUT_S_FINISH_MESSAGE;
1524 			return;
1525 		}
1526 
1527 		con->v2.out_epil.front_crc = -1;
1528 		con->v2.out_epil.middle_crc = -1;
1529 		con->v2.out_state = OUT_S_QUEUE_DATA;
1530 		return;
1531 	}
1532 
1533 	if (front_len(msg)) {
1534 		con->v2.out_epil.front_crc = crc32c(-1, msg->front.iov_base,
1535 						    front_len(msg));
1536 		add_out_kvec(con, msg->front.iov_base, front_len(msg));
1537 	} else {
1538 		/* middle (at least) is there, checked above */
1539 		con->v2.out_epil.front_crc = -1;
1540 	}
1541 
1542 	if (middle_len(msg)) {
1543 		con->v2.out_epil.middle_crc =
1544 			crc32c(-1, msg->middle->vec.iov_base, middle_len(msg));
1545 		add_out_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
1546 	} else {
1547 		con->v2.out_epil.middle_crc = data_len(msg) ? -1 : 0;
1548 	}
1549 
1550 	if (data_len(msg)) {
1551 		con->v2.out_state = OUT_S_QUEUE_DATA;
1552 	} else {
1553 		con->v2.out_epil.data_crc = 0;
1554 		prepare_epilogue_plain(con, false);
1555 		con->v2.out_state = OUT_S_FINISH_MESSAGE;
1556 	}
1557 }
1558 
1559 /*
1560  * Unfortunately the kernel crypto API doesn't support streaming
1561  * (piecewise) operation for AEAD algorithms, so we can't get away
1562  * with a fixed size buffer and a couple sgs.  Instead, we have to
1563  * allocate pages for the entire tail of the message (currently up
1564  * to ~32M) and two sgs arrays (up to ~256K each)...
1565  */
1566 static int prepare_message_secure(struct ceph_connection *con)
1567 {
1568 	void *zerop = page_address(ceph_zero_page);
1569 	struct sg_table enc_sgt = {};
1570 	struct sg_table sgt = {};
1571 	struct page **enc_pages;
1572 	int enc_page_cnt;
1573 	int tail_len;
1574 	int ret;
1575 
1576 	ret = prepare_head_secure_small(con, con->v2.out_buf,
1577 					sizeof(struct ceph_msg_header2));
1578 	if (ret)
1579 		return ret;
1580 
1581 	tail_len = tail_onwire_len(con->out_msg, true);
1582 	if (!tail_len) {
1583 		/*
1584 		 * Empty message: once the head is written,
1585 		 * we are done -- there is no epilogue.
1586 		 */
1587 		con->v2.out_state = OUT_S_FINISH_MESSAGE;
1588 		return 0;
1589 	}
1590 
1591 	encode_epilogue_secure(con, false);
1592 	ret = setup_message_sgs(&sgt, con->out_msg, zerop, zerop, zerop,
1593 				&con->v2.out_epil, false);
1594 	if (ret)
1595 		goto out;
1596 
1597 	enc_page_cnt = calc_pages_for(0, tail_len);
1598 	enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
1599 	if (IS_ERR(enc_pages)) {
1600 		ret = PTR_ERR(enc_pages);
1601 		goto out;
1602 	}
1603 
1604 	WARN_ON(con->v2.out_enc_pages || con->v2.out_enc_page_cnt);
1605 	con->v2.out_enc_pages = enc_pages;
1606 	con->v2.out_enc_page_cnt = enc_page_cnt;
1607 	con->v2.out_enc_resid = tail_len;
1608 	con->v2.out_enc_i = 0;
1609 
1610 	ret = sg_alloc_table_from_pages(&enc_sgt, enc_pages, enc_page_cnt,
1611 					0, tail_len, GFP_NOIO);
1612 	if (ret)
1613 		goto out;
1614 
1615 	ret = gcm_crypt(con, true, sgt.sgl, enc_sgt.sgl,
1616 			tail_len - CEPH_GCM_TAG_LEN);
1617 	if (ret)
1618 		goto out;
1619 
1620 	dout("%s con %p msg %p sg_cnt %d enc_page_cnt %d\n", __func__, con,
1621 	     con->out_msg, sgt.orig_nents, enc_page_cnt);
1622 	con->v2.out_state = OUT_S_QUEUE_ENC_PAGE;
1623 
1624 out:
1625 	sg_free_table(&sgt);
1626 	sg_free_table(&enc_sgt);
1627 	return ret;
1628 }
1629 
1630 static int prepare_message(struct ceph_connection *con)
1631 {
1632 	int lens[] = {
1633 		sizeof(struct ceph_msg_header2),
1634 		front_len(con->out_msg),
1635 		middle_len(con->out_msg),
1636 		data_len(con->out_msg)
1637 	};
1638 	struct ceph_frame_desc desc;
1639 	int ret;
1640 
1641 	dout("%s con %p msg %p logical %d+%d+%d+%d\n", __func__, con,
1642 	     con->out_msg, lens[0], lens[1], lens[2], lens[3]);
1643 
1644 	if (con->in_seq > con->in_seq_acked) {
1645 		dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
1646 		     con->in_seq_acked, con->in_seq);
1647 		con->in_seq_acked = con->in_seq;
1648 	}
1649 
1650 	reset_out_kvecs(con);
1651 	init_frame_desc(&desc, FRAME_TAG_MESSAGE, lens, 4);
1652 	encode_preamble(&desc, con->v2.out_buf);
1653 	fill_header2(CTRL_BODY(con->v2.out_buf), &con->out_msg->hdr,
1654 		     con->in_seq_acked);
1655 
1656 	if (con_secure(con)) {
1657 		ret = prepare_message_secure(con);
1658 		if (ret)
1659 			return ret;
1660 	} else {
1661 		prepare_message_plain(con);
1662 	}
1663 
1664 	ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1665 	return 0;
1666 }
1667 
1668 static int prepare_read_banner_prefix(struct ceph_connection *con)
1669 {
1670 	void *buf;
1671 
1672 	buf = alloc_conn_buf(con, CEPH_BANNER_V2_PREFIX_LEN);
1673 	if (!buf)
1674 		return -ENOMEM;
1675 
1676 	reset_in_kvecs(con);
1677 	add_in_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
1678 	add_in_sign_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
1679 	con->state = CEPH_CON_S_V2_BANNER_PREFIX;
1680 	return 0;
1681 }
1682 
1683 static int prepare_read_banner_payload(struct ceph_connection *con,
1684 				       int payload_len)
1685 {
1686 	void *buf;
1687 
1688 	buf = alloc_conn_buf(con, payload_len);
1689 	if (!buf)
1690 		return -ENOMEM;
1691 
1692 	reset_in_kvecs(con);
1693 	add_in_kvec(con, buf, payload_len);
1694 	add_in_sign_kvec(con, buf, payload_len);
1695 	con->state = CEPH_CON_S_V2_BANNER_PAYLOAD;
1696 	return 0;
1697 }
1698 
1699 static void prepare_read_preamble(struct ceph_connection *con)
1700 {
1701 	reset_in_kvecs(con);
1702 	add_in_kvec(con, con->v2.in_buf,
1703 		    con_secure(con) ? CEPH_PREAMBLE_SECURE_LEN :
1704 				      CEPH_PREAMBLE_PLAIN_LEN);
1705 	con->v2.in_state = IN_S_HANDLE_PREAMBLE;
1706 }
1707 
1708 static int prepare_read_control(struct ceph_connection *con)
1709 {
1710 	int ctrl_len = con->v2.in_desc.fd_lens[0];
1711 	int head_len;
1712 	void *buf;
1713 
1714 	reset_in_kvecs(con);
1715 	if (con->state == CEPH_CON_S_V2_HELLO ||
1716 	    con->state == CEPH_CON_S_V2_AUTH) {
1717 		head_len = head_onwire_len(ctrl_len, false);
1718 		buf = alloc_conn_buf(con, head_len);
1719 		if (!buf)
1720 			return -ENOMEM;
1721 
1722 		/* preserve preamble */
1723 		memcpy(buf, con->v2.in_buf, CEPH_PREAMBLE_LEN);
1724 
1725 		add_in_kvec(con, CTRL_BODY(buf), ctrl_len);
1726 		add_in_kvec(con, CTRL_BODY(buf) + ctrl_len, CEPH_CRC_LEN);
1727 		add_in_sign_kvec(con, buf, head_len);
1728 	} else {
1729 		if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
1730 			buf = alloc_conn_buf(con, ctrl_len);
1731 			if (!buf)
1732 				return -ENOMEM;
1733 
1734 			add_in_kvec(con, buf, ctrl_len);
1735 		} else {
1736 			add_in_kvec(con, CTRL_BODY(con->v2.in_buf), ctrl_len);
1737 		}
1738 		add_in_kvec(con, con->v2.in_buf, CEPH_CRC_LEN);
1739 	}
1740 	con->v2.in_state = IN_S_HANDLE_CONTROL;
1741 	return 0;
1742 }
1743 
1744 static int prepare_read_control_remainder(struct ceph_connection *con)
1745 {
1746 	int ctrl_len = con->v2.in_desc.fd_lens[0];
1747 	int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1748 	void *buf;
1749 
1750 	buf = alloc_conn_buf(con, ctrl_len);
1751 	if (!buf)
1752 		return -ENOMEM;
1753 
1754 	memcpy(buf, CTRL_BODY(con->v2.in_buf), CEPH_PREAMBLE_INLINE_LEN);
1755 
1756 	reset_in_kvecs(con);
1757 	add_in_kvec(con, buf + CEPH_PREAMBLE_INLINE_LEN, rem_len);
1758 	add_in_kvec(con, con->v2.in_buf,
1759 		    padding_len(rem_len) + CEPH_GCM_TAG_LEN);
1760 	con->v2.in_state = IN_S_HANDLE_CONTROL_REMAINDER;
1761 	return 0;
1762 }
1763 
1764 static int prepare_read_data(struct ceph_connection *con)
1765 {
1766 	struct bio_vec bv;
1767 
1768 	con->in_data_crc = -1;
1769 	ceph_msg_data_cursor_init(&con->v2.in_cursor, con->in_msg,
1770 				  data_len(con->in_msg));
1771 
1772 	get_bvec_at(&con->v2.in_cursor, &bv);
1773 	if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1774 		if (unlikely(!con->bounce_page)) {
1775 			con->bounce_page = alloc_page(GFP_NOIO);
1776 			if (!con->bounce_page) {
1777 				pr_err("failed to allocate bounce page\n");
1778 				return -ENOMEM;
1779 			}
1780 		}
1781 
1782 		bv.bv_page = con->bounce_page;
1783 		bv.bv_offset = 0;
1784 	}
1785 	set_in_bvec(con, &bv);
1786 	con->v2.in_state = IN_S_PREPARE_READ_DATA_CONT;
1787 	return 0;
1788 }
1789 
1790 static void prepare_read_data_cont(struct ceph_connection *con)
1791 {
1792 	struct bio_vec bv;
1793 
1794 	if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1795 		con->in_data_crc = crc32c(con->in_data_crc,
1796 					  page_address(con->bounce_page),
1797 					  con->v2.in_bvec.bv_len);
1798 
1799 		get_bvec_at(&con->v2.in_cursor, &bv);
1800 		memcpy_to_page(bv.bv_page, bv.bv_offset,
1801 			       page_address(con->bounce_page),
1802 			       con->v2.in_bvec.bv_len);
1803 	} else {
1804 		con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
1805 						    con->v2.in_bvec.bv_page,
1806 						    con->v2.in_bvec.bv_offset,
1807 						    con->v2.in_bvec.bv_len);
1808 	}
1809 
1810 	ceph_msg_data_advance(&con->v2.in_cursor, con->v2.in_bvec.bv_len);
1811 	if (con->v2.in_cursor.total_resid) {
1812 		get_bvec_at(&con->v2.in_cursor, &bv);
1813 		if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1814 			bv.bv_page = con->bounce_page;
1815 			bv.bv_offset = 0;
1816 		}
1817 		set_in_bvec(con, &bv);
1818 		WARN_ON(con->v2.in_state != IN_S_PREPARE_READ_DATA_CONT);
1819 		return;
1820 	}
1821 
1822 	/*
1823 	 * We've read all data.  Prepare to read epilogue.
1824 	 */
1825 	reset_in_kvecs(con);
1826 	add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
1827 	con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1828 }
1829 
1830 static int prepare_sparse_read_cont(struct ceph_connection *con)
1831 {
1832 	int ret;
1833 	struct bio_vec bv;
1834 	char *buf = NULL;
1835 	struct ceph_msg_data_cursor *cursor = &con->v2.in_cursor;
1836 
1837 	WARN_ON(con->v2.in_state != IN_S_PREPARE_SPARSE_DATA_CONT);
1838 
1839 	if (iov_iter_is_bvec(&con->v2.in_iter)) {
1840 		if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1841 			con->in_data_crc = crc32c(con->in_data_crc,
1842 						  page_address(con->bounce_page),
1843 						  con->v2.in_bvec.bv_len);
1844 			get_bvec_at(cursor, &bv);
1845 			memcpy_to_page(bv.bv_page, bv.bv_offset,
1846 				       page_address(con->bounce_page),
1847 				       con->v2.in_bvec.bv_len);
1848 		} else {
1849 			con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
1850 							    con->v2.in_bvec.bv_page,
1851 							    con->v2.in_bvec.bv_offset,
1852 							    con->v2.in_bvec.bv_len);
1853 		}
1854 
1855 		ceph_msg_data_advance(cursor, con->v2.in_bvec.bv_len);
1856 		cursor->sr_resid -= con->v2.in_bvec.bv_len;
1857 		dout("%s: advance by 0x%x sr_resid 0x%x\n", __func__,
1858 		     con->v2.in_bvec.bv_len, cursor->sr_resid);
1859 		WARN_ON_ONCE(cursor->sr_resid > cursor->total_resid);
1860 		if (cursor->sr_resid) {
1861 			get_bvec_at(cursor, &bv);
1862 			if (bv.bv_len > cursor->sr_resid)
1863 				bv.bv_len = cursor->sr_resid;
1864 			if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1865 				bv.bv_page = con->bounce_page;
1866 				bv.bv_offset = 0;
1867 			}
1868 			set_in_bvec(con, &bv);
1869 			con->v2.data_len_remain -= bv.bv_len;
1870 			return 0;
1871 		}
1872 	} else if (iov_iter_is_kvec(&con->v2.in_iter)) {
1873 		/* On first call, we have no kvec so don't compute crc */
1874 		if (con->v2.in_kvec_cnt) {
1875 			WARN_ON_ONCE(con->v2.in_kvec_cnt > 1);
1876 			con->in_data_crc = crc32c(con->in_data_crc,
1877 						  con->v2.in_kvecs[0].iov_base,
1878 						  con->v2.in_kvecs[0].iov_len);
1879 		}
1880 	} else {
1881 		return -EIO;
1882 	}
1883 
1884 	/* get next extent */
1885 	ret = con->ops->sparse_read(con, cursor, &buf);
1886 	if (ret <= 0) {
1887 		if (ret < 0)
1888 			return ret;
1889 
1890 		reset_in_kvecs(con);
1891 		add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
1892 		con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1893 		return 0;
1894 	}
1895 
1896 	if (buf) {
1897 		/* receive into buffer */
1898 		reset_in_kvecs(con);
1899 		add_in_kvec(con, buf, ret);
1900 		con->v2.data_len_remain -= ret;
1901 		return 0;
1902 	}
1903 
1904 	if (ret > cursor->total_resid) {
1905 		pr_warn("%s: ret 0x%x total_resid 0x%zx resid 0x%zx\n",
1906 			__func__, ret, cursor->total_resid, cursor->resid);
1907 		return -EIO;
1908 	}
1909 	get_bvec_at(cursor, &bv);
1910 	if (bv.bv_len > cursor->sr_resid)
1911 		bv.bv_len = cursor->sr_resid;
1912 	if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1913 		if (unlikely(!con->bounce_page)) {
1914 			con->bounce_page = alloc_page(GFP_NOIO);
1915 			if (!con->bounce_page) {
1916 				pr_err("failed to allocate bounce page\n");
1917 				return -ENOMEM;
1918 			}
1919 		}
1920 
1921 		bv.bv_page = con->bounce_page;
1922 		bv.bv_offset = 0;
1923 	}
1924 	set_in_bvec(con, &bv);
1925 	con->v2.data_len_remain -= ret;
1926 	return ret;
1927 }
1928 
1929 static int prepare_sparse_read_data(struct ceph_connection *con)
1930 {
1931 	struct ceph_msg *msg = con->in_msg;
1932 
1933 	dout("%s: starting sparse read\n", __func__);
1934 
1935 	if (WARN_ON_ONCE(!con->ops->sparse_read))
1936 		return -EOPNOTSUPP;
1937 
1938 	if (!con_secure(con))
1939 		con->in_data_crc = -1;
1940 
1941 	reset_in_kvecs(con);
1942 	con->v2.in_state = IN_S_PREPARE_SPARSE_DATA_CONT;
1943 	con->v2.data_len_remain = data_len(msg);
1944 	return prepare_sparse_read_cont(con);
1945 }
1946 
1947 static int prepare_read_tail_plain(struct ceph_connection *con)
1948 {
1949 	struct ceph_msg *msg = con->in_msg;
1950 
1951 	if (!front_len(msg) && !middle_len(msg)) {
1952 		WARN_ON(!data_len(msg));
1953 		return prepare_read_data(con);
1954 	}
1955 
1956 	reset_in_kvecs(con);
1957 	if (front_len(msg)) {
1958 		add_in_kvec(con, msg->front.iov_base, front_len(msg));
1959 		WARN_ON(msg->front.iov_len != front_len(msg));
1960 	}
1961 	if (middle_len(msg)) {
1962 		add_in_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
1963 		WARN_ON(msg->middle->vec.iov_len != middle_len(msg));
1964 	}
1965 
1966 	if (data_len(msg)) {
1967 		if (msg->sparse_read)
1968 			con->v2.in_state = IN_S_PREPARE_SPARSE_DATA;
1969 		else
1970 			con->v2.in_state = IN_S_PREPARE_READ_DATA;
1971 	} else {
1972 		add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
1973 		con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1974 	}
1975 	return 0;
1976 }
1977 
1978 static void prepare_read_enc_page(struct ceph_connection *con)
1979 {
1980 	struct bio_vec bv;
1981 
1982 	dout("%s con %p i %d resid %d\n", __func__, con, con->v2.in_enc_i,
1983 	     con->v2.in_enc_resid);
1984 	WARN_ON(!con->v2.in_enc_resid);
1985 
1986 	bvec_set_page(&bv, con->v2.in_enc_pages[con->v2.in_enc_i],
1987 		      min(con->v2.in_enc_resid, (int)PAGE_SIZE), 0);
1988 
1989 	set_in_bvec(con, &bv);
1990 	con->v2.in_enc_i++;
1991 	con->v2.in_enc_resid -= bv.bv_len;
1992 
1993 	if (con->v2.in_enc_resid) {
1994 		con->v2.in_state = IN_S_PREPARE_READ_ENC_PAGE;
1995 		return;
1996 	}
1997 
1998 	/*
1999 	 * We are set to read the last piece of ciphertext (ending
2000 	 * with epilogue) + auth tag.
2001 	 */
2002 	WARN_ON(con->v2.in_enc_i != con->v2.in_enc_page_cnt);
2003 	con->v2.in_state = IN_S_HANDLE_EPILOGUE;
2004 }
2005 
2006 static int prepare_read_tail_secure(struct ceph_connection *con)
2007 {
2008 	struct page **enc_pages;
2009 	int enc_page_cnt;
2010 	int tail_len;
2011 
2012 	tail_len = tail_onwire_len(con->in_msg, true);
2013 	WARN_ON(!tail_len);
2014 
2015 	enc_page_cnt = calc_pages_for(0, tail_len);
2016 	enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
2017 	if (IS_ERR(enc_pages))
2018 		return PTR_ERR(enc_pages);
2019 
2020 	WARN_ON(con->v2.in_enc_pages || con->v2.in_enc_page_cnt);
2021 	con->v2.in_enc_pages = enc_pages;
2022 	con->v2.in_enc_page_cnt = enc_page_cnt;
2023 	con->v2.in_enc_resid = tail_len;
2024 	con->v2.in_enc_i = 0;
2025 
2026 	prepare_read_enc_page(con);
2027 	return 0;
2028 }
2029 
2030 static void __finish_skip(struct ceph_connection *con)
2031 {
2032 	con->in_seq++;
2033 	prepare_read_preamble(con);
2034 }
2035 
2036 static void prepare_skip_message(struct ceph_connection *con)
2037 {
2038 	struct ceph_frame_desc *desc = &con->v2.in_desc;
2039 	int tail_len;
2040 
2041 	dout("%s con %p %d+%d+%d\n", __func__, con, desc->fd_lens[1],
2042 	     desc->fd_lens[2], desc->fd_lens[3]);
2043 
2044 	tail_len = __tail_onwire_len(desc->fd_lens[1], desc->fd_lens[2],
2045 				     desc->fd_lens[3], con_secure(con));
2046 	if (!tail_len) {
2047 		__finish_skip(con);
2048 	} else {
2049 		set_in_skip(con, tail_len);
2050 		con->v2.in_state = IN_S_FINISH_SKIP;
2051 	}
2052 }
2053 
2054 static int process_banner_prefix(struct ceph_connection *con)
2055 {
2056 	int payload_len;
2057 	void *p;
2058 
2059 	WARN_ON(con->v2.in_kvecs[0].iov_len != CEPH_BANNER_V2_PREFIX_LEN);
2060 
2061 	p = con->v2.in_kvecs[0].iov_base;
2062 	if (memcmp(p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN)) {
2063 		if (!memcmp(p, CEPH_BANNER, CEPH_BANNER_LEN))
2064 			con->error_msg = "server is speaking msgr1 protocol";
2065 		else
2066 			con->error_msg = "protocol error, bad banner";
2067 		return -EINVAL;
2068 	}
2069 
2070 	p += CEPH_BANNER_V2_LEN;
2071 	payload_len = ceph_decode_16(&p);
2072 	dout("%s con %p payload_len %d\n", __func__, con, payload_len);
2073 
2074 	return prepare_read_banner_payload(con, payload_len);
2075 }
2076 
2077 static int process_banner_payload(struct ceph_connection *con)
2078 {
2079 	void *end = con->v2.in_kvecs[0].iov_base + con->v2.in_kvecs[0].iov_len;
2080 	u64 feat = CEPH_MSGR2_SUPPORTED_FEATURES;
2081 	u64 req_feat = CEPH_MSGR2_REQUIRED_FEATURES;
2082 	u64 server_feat, server_req_feat;
2083 	void *p;
2084 	int ret;
2085 
2086 	p = con->v2.in_kvecs[0].iov_base;
2087 	ceph_decode_64_safe(&p, end, server_feat, bad);
2088 	ceph_decode_64_safe(&p, end, server_req_feat, bad);
2089 
2090 	dout("%s con %p server_feat 0x%llx server_req_feat 0x%llx\n",
2091 	     __func__, con, server_feat, server_req_feat);
2092 
2093 	if (req_feat & ~server_feat) {
2094 		pr_err("msgr2 feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
2095 		       server_feat, req_feat & ~server_feat);
2096 		con->error_msg = "missing required protocol features";
2097 		return -EINVAL;
2098 	}
2099 	if (server_req_feat & ~feat) {
2100 		pr_err("msgr2 feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
2101 		       feat, server_req_feat & ~feat);
2102 		con->error_msg = "missing required protocol features";
2103 		return -EINVAL;
2104 	}
2105 
2106 	/* no reset_out_kvecs() as our banner may still be pending */
2107 	ret = prepare_hello(con);
2108 	if (ret) {
2109 		pr_err("prepare_hello failed: %d\n", ret);
2110 		return ret;
2111 	}
2112 
2113 	con->state = CEPH_CON_S_V2_HELLO;
2114 	prepare_read_preamble(con);
2115 	return 0;
2116 
2117 bad:
2118 	pr_err("failed to decode banner payload\n");
2119 	return -EINVAL;
2120 }
2121 
2122 static int process_hello(struct ceph_connection *con, void *p, void *end)
2123 {
2124 	struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
2125 	struct ceph_entity_addr addr_for_me;
2126 	u8 entity_type;
2127 	int ret;
2128 
2129 	if (con->state != CEPH_CON_S_V2_HELLO) {
2130 		con->error_msg = "protocol error, unexpected hello";
2131 		return -EINVAL;
2132 	}
2133 
2134 	ceph_decode_8_safe(&p, end, entity_type, bad);
2135 	ret = ceph_decode_entity_addr(&p, end, &addr_for_me);
2136 	if (ret) {
2137 		pr_err("failed to decode addr_for_me: %d\n", ret);
2138 		return ret;
2139 	}
2140 
2141 	dout("%s con %p entity_type %d addr_for_me %s\n", __func__, con,
2142 	     entity_type, ceph_pr_addr(&addr_for_me));
2143 
2144 	if (entity_type != con->peer_name.type) {
2145 		pr_err("bad peer type, want %d, got %d\n",
2146 		       con->peer_name.type, entity_type);
2147 		con->error_msg = "wrong peer at address";
2148 		return -EINVAL;
2149 	}
2150 
2151 	/*
2152 	 * Set our address to the address our first peer (i.e. monitor)
2153 	 * sees that we are connecting from.  If we are behind some sort
2154 	 * of NAT and want to be identified by some private (not NATed)
2155 	 * address, ip option should be used.
2156 	 */
2157 	if (ceph_addr_is_blank(my_addr)) {
2158 		memcpy(&my_addr->in_addr, &addr_for_me.in_addr,
2159 		       sizeof(my_addr->in_addr));
2160 		ceph_addr_set_port(my_addr, 0);
2161 		dout("%s con %p set my addr %s, as seen by peer %s\n",
2162 		     __func__, con, ceph_pr_addr(my_addr),
2163 		     ceph_pr_addr(&con->peer_addr));
2164 	} else {
2165 		dout("%s con %p my addr already set %s\n",
2166 		     __func__, con, ceph_pr_addr(my_addr));
2167 	}
2168 
2169 	WARN_ON(ceph_addr_is_blank(my_addr) || ceph_addr_port(my_addr));
2170 	WARN_ON(my_addr->type != CEPH_ENTITY_ADDR_TYPE_ANY);
2171 	WARN_ON(!my_addr->nonce);
2172 
2173 	/* no reset_out_kvecs() as our hello may still be pending */
2174 	ret = prepare_auth_request(con);
2175 	if (ret) {
2176 		if (ret != -EAGAIN)
2177 			pr_err("prepare_auth_request failed: %d\n", ret);
2178 		return ret;
2179 	}
2180 
2181 	con->state = CEPH_CON_S_V2_AUTH;
2182 	return 0;
2183 
2184 bad:
2185 	pr_err("failed to decode hello\n");
2186 	return -EINVAL;
2187 }
2188 
2189 static int process_auth_bad_method(struct ceph_connection *con,
2190 				   void *p, void *end)
2191 {
2192 	int allowed_protos[8], allowed_modes[8];
2193 	int allowed_proto_cnt, allowed_mode_cnt;
2194 	int used_proto, result;
2195 	int ret;
2196 	int i;
2197 
2198 	if (con->state != CEPH_CON_S_V2_AUTH) {
2199 		con->error_msg = "protocol error, unexpected auth_bad_method";
2200 		return -EINVAL;
2201 	}
2202 
2203 	ceph_decode_32_safe(&p, end, used_proto, bad);
2204 	ceph_decode_32_safe(&p, end, result, bad);
2205 	dout("%s con %p used_proto %d result %d\n", __func__, con, used_proto,
2206 	     result);
2207 
2208 	ceph_decode_32_safe(&p, end, allowed_proto_cnt, bad);
2209 	if (allowed_proto_cnt > ARRAY_SIZE(allowed_protos)) {
2210 		pr_err("allowed_protos too big %d\n", allowed_proto_cnt);
2211 		return -EINVAL;
2212 	}
2213 	for (i = 0; i < allowed_proto_cnt; i++) {
2214 		ceph_decode_32_safe(&p, end, allowed_protos[i], bad);
2215 		dout("%s con %p allowed_protos[%d] %d\n", __func__, con,
2216 		     i, allowed_protos[i]);
2217 	}
2218 
2219 	ceph_decode_32_safe(&p, end, allowed_mode_cnt, bad);
2220 	if (allowed_mode_cnt > ARRAY_SIZE(allowed_modes)) {
2221 		pr_err("allowed_modes too big %d\n", allowed_mode_cnt);
2222 		return -EINVAL;
2223 	}
2224 	for (i = 0; i < allowed_mode_cnt; i++) {
2225 		ceph_decode_32_safe(&p, end, allowed_modes[i], bad);
2226 		dout("%s con %p allowed_modes[%d] %d\n", __func__, con,
2227 		     i, allowed_modes[i]);
2228 	}
2229 
2230 	mutex_unlock(&con->mutex);
2231 	ret = con->ops->handle_auth_bad_method(con, used_proto, result,
2232 					       allowed_protos,
2233 					       allowed_proto_cnt,
2234 					       allowed_modes,
2235 					       allowed_mode_cnt);
2236 	mutex_lock(&con->mutex);
2237 	if (con->state != CEPH_CON_S_V2_AUTH) {
2238 		dout("%s con %p state changed to %d\n", __func__, con,
2239 		     con->state);
2240 		return -EAGAIN;
2241 	}
2242 
2243 	dout("%s con %p handle_auth_bad_method ret %d\n", __func__, con, ret);
2244 	return ret;
2245 
2246 bad:
2247 	pr_err("failed to decode auth_bad_method\n");
2248 	return -EINVAL;
2249 }
2250 
2251 static int process_auth_reply_more(struct ceph_connection *con,
2252 				   void *p, void *end)
2253 {
2254 	int payload_len;
2255 	int ret;
2256 
2257 	if (con->state != CEPH_CON_S_V2_AUTH) {
2258 		con->error_msg = "protocol error, unexpected auth_reply_more";
2259 		return -EINVAL;
2260 	}
2261 
2262 	ceph_decode_32_safe(&p, end, payload_len, bad);
2263 	ceph_decode_need(&p, end, payload_len, bad);
2264 
2265 	dout("%s con %p payload_len %d\n", __func__, con, payload_len);
2266 
2267 	reset_out_kvecs(con);
2268 	ret = prepare_auth_request_more(con, p, payload_len);
2269 	if (ret) {
2270 		if (ret != -EAGAIN)
2271 			pr_err("prepare_auth_request_more failed: %d\n", ret);
2272 		return ret;
2273 	}
2274 
2275 	return 0;
2276 
2277 bad:
2278 	pr_err("failed to decode auth_reply_more\n");
2279 	return -EINVAL;
2280 }
2281 
2282 /*
2283  * Align session_key and con_secret to avoid GFP_ATOMIC allocation
2284  * inside crypto_shash_setkey() and crypto_aead_setkey() called from
2285  * setup_crypto().  __aligned(16) isn't guaranteed to work for stack
2286  * objects, so do it by hand.
2287  */
2288 static int process_auth_done(struct ceph_connection *con, void *p, void *end)
2289 {
2290 	u8 session_key_buf[CEPH_KEY_LEN + 16];
2291 	u8 con_secret_buf[CEPH_MAX_CON_SECRET_LEN + 16];
2292 	u8 *session_key = PTR_ALIGN(&session_key_buf[0], 16);
2293 	u8 *con_secret = PTR_ALIGN(&con_secret_buf[0], 16);
2294 	int session_key_len, con_secret_len;
2295 	int payload_len;
2296 	u64 global_id;
2297 	int ret;
2298 
2299 	if (con->state != CEPH_CON_S_V2_AUTH) {
2300 		con->error_msg = "protocol error, unexpected auth_done";
2301 		return -EINVAL;
2302 	}
2303 
2304 	ceph_decode_64_safe(&p, end, global_id, bad);
2305 	ceph_decode_32_safe(&p, end, con->v2.con_mode, bad);
2306 	ceph_decode_32_safe(&p, end, payload_len, bad);
2307 
2308 	dout("%s con %p global_id %llu con_mode %d payload_len %d\n",
2309 	     __func__, con, global_id, con->v2.con_mode, payload_len);
2310 
2311 	mutex_unlock(&con->mutex);
2312 	session_key_len = 0;
2313 	con_secret_len = 0;
2314 	ret = con->ops->handle_auth_done(con, global_id, p, payload_len,
2315 					 session_key, &session_key_len,
2316 					 con_secret, &con_secret_len);
2317 	mutex_lock(&con->mutex);
2318 	if (con->state != CEPH_CON_S_V2_AUTH) {
2319 		dout("%s con %p state changed to %d\n", __func__, con,
2320 		     con->state);
2321 		ret = -EAGAIN;
2322 		goto out;
2323 	}
2324 
2325 	dout("%s con %p handle_auth_done ret %d\n", __func__, con, ret);
2326 	if (ret)
2327 		goto out;
2328 
2329 	ret = setup_crypto(con, session_key, session_key_len, con_secret,
2330 			   con_secret_len);
2331 	if (ret)
2332 		goto out;
2333 
2334 	reset_out_kvecs(con);
2335 	ret = prepare_auth_signature(con);
2336 	if (ret) {
2337 		pr_err("prepare_auth_signature failed: %d\n", ret);
2338 		goto out;
2339 	}
2340 
2341 	con->state = CEPH_CON_S_V2_AUTH_SIGNATURE;
2342 
2343 out:
2344 	memzero_explicit(session_key_buf, sizeof(session_key_buf));
2345 	memzero_explicit(con_secret_buf, sizeof(con_secret_buf));
2346 	return ret;
2347 
2348 bad:
2349 	pr_err("failed to decode auth_done\n");
2350 	return -EINVAL;
2351 }
2352 
2353 static int process_auth_signature(struct ceph_connection *con,
2354 				  void *p, void *end)
2355 {
2356 	u8 hmac[SHA256_DIGEST_SIZE];
2357 	int ret;
2358 
2359 	if (con->state != CEPH_CON_S_V2_AUTH_SIGNATURE) {
2360 		con->error_msg = "protocol error, unexpected auth_signature";
2361 		return -EINVAL;
2362 	}
2363 
2364 	ret = hmac_sha256(con, con->v2.out_sign_kvecs,
2365 			  con->v2.out_sign_kvec_cnt, hmac);
2366 	if (ret)
2367 		return ret;
2368 
2369 	ceph_decode_need(&p, end, SHA256_DIGEST_SIZE, bad);
2370 	if (crypto_memneq(p, hmac, SHA256_DIGEST_SIZE)) {
2371 		con->error_msg = "integrity error, bad auth signature";
2372 		return -EBADMSG;
2373 	}
2374 
2375 	dout("%s con %p auth signature ok\n", __func__, con);
2376 
2377 	/* no reset_out_kvecs() as our auth_signature may still be pending */
2378 	if (!con->v2.server_cookie) {
2379 		ret = prepare_client_ident(con);
2380 		if (ret) {
2381 			pr_err("prepare_client_ident failed: %d\n", ret);
2382 			return ret;
2383 		}
2384 
2385 		con->state = CEPH_CON_S_V2_SESSION_CONNECT;
2386 	} else {
2387 		ret = prepare_session_reconnect(con);
2388 		if (ret) {
2389 			pr_err("prepare_session_reconnect failed: %d\n", ret);
2390 			return ret;
2391 		}
2392 
2393 		con->state = CEPH_CON_S_V2_SESSION_RECONNECT;
2394 	}
2395 
2396 	return 0;
2397 
2398 bad:
2399 	pr_err("failed to decode auth_signature\n");
2400 	return -EINVAL;
2401 }
2402 
2403 static int process_server_ident(struct ceph_connection *con,
2404 				void *p, void *end)
2405 {
2406 	struct ceph_client *client = from_msgr(con->msgr);
2407 	u64 features, required_features;
2408 	struct ceph_entity_addr addr;
2409 	u64 global_seq;
2410 	u64 global_id;
2411 	u64 cookie;
2412 	u64 flags;
2413 	int ret;
2414 
2415 	if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
2416 		con->error_msg = "protocol error, unexpected server_ident";
2417 		return -EINVAL;
2418 	}
2419 
2420 	ret = ceph_decode_entity_addrvec(&p, end, true, &addr);
2421 	if (ret) {
2422 		pr_err("failed to decode server addrs: %d\n", ret);
2423 		return ret;
2424 	}
2425 
2426 	ceph_decode_64_safe(&p, end, global_id, bad);
2427 	ceph_decode_64_safe(&p, end, global_seq, bad);
2428 	ceph_decode_64_safe(&p, end, features, bad);
2429 	ceph_decode_64_safe(&p, end, required_features, bad);
2430 	ceph_decode_64_safe(&p, end, flags, bad);
2431 	ceph_decode_64_safe(&p, end, cookie, bad);
2432 
2433 	dout("%s con %p addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx flags 0x%llx cookie 0x%llx\n",
2434 	     __func__, con, ceph_pr_addr(&addr), le32_to_cpu(addr.nonce),
2435 	     global_id, global_seq, features, required_features, flags, cookie);
2436 
2437 	/* is this who we intended to talk to? */
2438 	if (memcmp(&addr, &con->peer_addr, sizeof(con->peer_addr))) {
2439 		pr_err("bad peer addr/nonce, want %s/%u, got %s/%u\n",
2440 		       ceph_pr_addr(&con->peer_addr),
2441 		       le32_to_cpu(con->peer_addr.nonce),
2442 		       ceph_pr_addr(&addr), le32_to_cpu(addr.nonce));
2443 		con->error_msg = "wrong peer at address";
2444 		return -EINVAL;
2445 	}
2446 
2447 	if (client->required_features & ~features) {
2448 		pr_err("RADOS feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
2449 		       features, client->required_features & ~features);
2450 		con->error_msg = "missing required protocol features";
2451 		return -EINVAL;
2452 	}
2453 
2454 	/*
2455 	 * Both name->type and name->num are set in ceph_con_open() but
2456 	 * name->num may be bogus in the initial monmap.  name->type is
2457 	 * verified in handle_hello().
2458 	 */
2459 	WARN_ON(!con->peer_name.type);
2460 	con->peer_name.num = cpu_to_le64(global_id);
2461 	con->v2.peer_global_seq = global_seq;
2462 	con->peer_features = features;
2463 	WARN_ON(required_features & ~client->supported_features);
2464 	con->v2.server_cookie = cookie;
2465 
2466 	if (flags & CEPH_MSG_CONNECT_LOSSY) {
2467 		ceph_con_flag_set(con, CEPH_CON_F_LOSSYTX);
2468 		WARN_ON(con->v2.server_cookie);
2469 	} else {
2470 		WARN_ON(!con->v2.server_cookie);
2471 	}
2472 
2473 	clear_in_sign_kvecs(con);
2474 	clear_out_sign_kvecs(con);
2475 	free_conn_bufs(con);
2476 	con->delay = 0;  /* reset backoff memory */
2477 
2478 	con->state = CEPH_CON_S_OPEN;
2479 	con->v2.out_state = OUT_S_GET_NEXT;
2480 	return 0;
2481 
2482 bad:
2483 	pr_err("failed to decode server_ident\n");
2484 	return -EINVAL;
2485 }
2486 
2487 static int process_ident_missing_features(struct ceph_connection *con,
2488 					  void *p, void *end)
2489 {
2490 	struct ceph_client *client = from_msgr(con->msgr);
2491 	u64 missing_features;
2492 
2493 	if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
2494 		con->error_msg = "protocol error, unexpected ident_missing_features";
2495 		return -EINVAL;
2496 	}
2497 
2498 	ceph_decode_64_safe(&p, end, missing_features, bad);
2499 	pr_err("RADOS feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
2500 	       client->supported_features, missing_features);
2501 	con->error_msg = "missing required protocol features";
2502 	return -EINVAL;
2503 
2504 bad:
2505 	pr_err("failed to decode ident_missing_features\n");
2506 	return -EINVAL;
2507 }
2508 
2509 static int process_session_reconnect_ok(struct ceph_connection *con,
2510 					void *p, void *end)
2511 {
2512 	u64 seq;
2513 
2514 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2515 		con->error_msg = "protocol error, unexpected session_reconnect_ok";
2516 		return -EINVAL;
2517 	}
2518 
2519 	ceph_decode_64_safe(&p, end, seq, bad);
2520 
2521 	dout("%s con %p seq %llu\n", __func__, con, seq);
2522 	ceph_con_discard_requeued(con, seq);
2523 
2524 	clear_in_sign_kvecs(con);
2525 	clear_out_sign_kvecs(con);
2526 	free_conn_bufs(con);
2527 	con->delay = 0;  /* reset backoff memory */
2528 
2529 	con->state = CEPH_CON_S_OPEN;
2530 	con->v2.out_state = OUT_S_GET_NEXT;
2531 	return 0;
2532 
2533 bad:
2534 	pr_err("failed to decode session_reconnect_ok\n");
2535 	return -EINVAL;
2536 }
2537 
2538 static int process_session_retry(struct ceph_connection *con,
2539 				 void *p, void *end)
2540 {
2541 	u64 connect_seq;
2542 	int ret;
2543 
2544 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2545 		con->error_msg = "protocol error, unexpected session_retry";
2546 		return -EINVAL;
2547 	}
2548 
2549 	ceph_decode_64_safe(&p, end, connect_seq, bad);
2550 
2551 	dout("%s con %p connect_seq %llu\n", __func__, con, connect_seq);
2552 	WARN_ON(connect_seq <= con->v2.connect_seq);
2553 	con->v2.connect_seq = connect_seq + 1;
2554 
2555 	free_conn_bufs(con);
2556 
2557 	reset_out_kvecs(con);
2558 	ret = prepare_session_reconnect(con);
2559 	if (ret) {
2560 		pr_err("prepare_session_reconnect (cseq) failed: %d\n", ret);
2561 		return ret;
2562 	}
2563 
2564 	return 0;
2565 
2566 bad:
2567 	pr_err("failed to decode session_retry\n");
2568 	return -EINVAL;
2569 }
2570 
2571 static int process_session_retry_global(struct ceph_connection *con,
2572 					void *p, void *end)
2573 {
2574 	u64 global_seq;
2575 	int ret;
2576 
2577 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2578 		con->error_msg = "protocol error, unexpected session_retry_global";
2579 		return -EINVAL;
2580 	}
2581 
2582 	ceph_decode_64_safe(&p, end, global_seq, bad);
2583 
2584 	dout("%s con %p global_seq %llu\n", __func__, con, global_seq);
2585 	WARN_ON(global_seq <= con->v2.global_seq);
2586 	con->v2.global_seq = ceph_get_global_seq(con->msgr, global_seq);
2587 
2588 	free_conn_bufs(con);
2589 
2590 	reset_out_kvecs(con);
2591 	ret = prepare_session_reconnect(con);
2592 	if (ret) {
2593 		pr_err("prepare_session_reconnect (gseq) failed: %d\n", ret);
2594 		return ret;
2595 	}
2596 
2597 	return 0;
2598 
2599 bad:
2600 	pr_err("failed to decode session_retry_global\n");
2601 	return -EINVAL;
2602 }
2603 
2604 static int process_session_reset(struct ceph_connection *con,
2605 				 void *p, void *end)
2606 {
2607 	bool full;
2608 	int ret;
2609 
2610 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2611 		con->error_msg = "protocol error, unexpected session_reset";
2612 		return -EINVAL;
2613 	}
2614 
2615 	ceph_decode_8_safe(&p, end, full, bad);
2616 	if (!full) {
2617 		con->error_msg = "protocol error, bad session_reset";
2618 		return -EINVAL;
2619 	}
2620 
2621 	pr_info("%s%lld %s session reset\n", ENTITY_NAME(con->peer_name),
2622 		ceph_pr_addr(&con->peer_addr));
2623 	ceph_con_reset_session(con);
2624 
2625 	mutex_unlock(&con->mutex);
2626 	if (con->ops->peer_reset)
2627 		con->ops->peer_reset(con);
2628 	mutex_lock(&con->mutex);
2629 	if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2630 		dout("%s con %p state changed to %d\n", __func__, con,
2631 		     con->state);
2632 		return -EAGAIN;
2633 	}
2634 
2635 	free_conn_bufs(con);
2636 
2637 	reset_out_kvecs(con);
2638 	ret = prepare_client_ident(con);
2639 	if (ret) {
2640 		pr_err("prepare_client_ident (rst) failed: %d\n", ret);
2641 		return ret;
2642 	}
2643 
2644 	con->state = CEPH_CON_S_V2_SESSION_CONNECT;
2645 	return 0;
2646 
2647 bad:
2648 	pr_err("failed to decode session_reset\n");
2649 	return -EINVAL;
2650 }
2651 
2652 static int process_keepalive2_ack(struct ceph_connection *con,
2653 				  void *p, void *end)
2654 {
2655 	if (con->state != CEPH_CON_S_OPEN) {
2656 		con->error_msg = "protocol error, unexpected keepalive2_ack";
2657 		return -EINVAL;
2658 	}
2659 
2660 	ceph_decode_need(&p, end, sizeof(struct ceph_timespec), bad);
2661 	ceph_decode_timespec64(&con->last_keepalive_ack, p);
2662 
2663 	dout("%s con %p timestamp %lld.%09ld\n", __func__, con,
2664 	     con->last_keepalive_ack.tv_sec, con->last_keepalive_ack.tv_nsec);
2665 
2666 	return 0;
2667 
2668 bad:
2669 	pr_err("failed to decode keepalive2_ack\n");
2670 	return -EINVAL;
2671 }
2672 
2673 static int process_ack(struct ceph_connection *con, void *p, void *end)
2674 {
2675 	u64 seq;
2676 
2677 	if (con->state != CEPH_CON_S_OPEN) {
2678 		con->error_msg = "protocol error, unexpected ack";
2679 		return -EINVAL;
2680 	}
2681 
2682 	ceph_decode_64_safe(&p, end, seq, bad);
2683 
2684 	dout("%s con %p seq %llu\n", __func__, con, seq);
2685 	ceph_con_discard_sent(con, seq);
2686 	return 0;
2687 
2688 bad:
2689 	pr_err("failed to decode ack\n");
2690 	return -EINVAL;
2691 }
2692 
2693 static int process_control(struct ceph_connection *con, void *p, void *end)
2694 {
2695 	int tag = con->v2.in_desc.fd_tag;
2696 	int ret;
2697 
2698 	dout("%s con %p tag %d len %d\n", __func__, con, tag, (int)(end - p));
2699 
2700 	switch (tag) {
2701 	case FRAME_TAG_HELLO:
2702 		ret = process_hello(con, p, end);
2703 		break;
2704 	case FRAME_TAG_AUTH_BAD_METHOD:
2705 		ret = process_auth_bad_method(con, p, end);
2706 		break;
2707 	case FRAME_TAG_AUTH_REPLY_MORE:
2708 		ret = process_auth_reply_more(con, p, end);
2709 		break;
2710 	case FRAME_TAG_AUTH_DONE:
2711 		ret = process_auth_done(con, p, end);
2712 		break;
2713 	case FRAME_TAG_AUTH_SIGNATURE:
2714 		ret = process_auth_signature(con, p, end);
2715 		break;
2716 	case FRAME_TAG_SERVER_IDENT:
2717 		ret = process_server_ident(con, p, end);
2718 		break;
2719 	case FRAME_TAG_IDENT_MISSING_FEATURES:
2720 		ret = process_ident_missing_features(con, p, end);
2721 		break;
2722 	case FRAME_TAG_SESSION_RECONNECT_OK:
2723 		ret = process_session_reconnect_ok(con, p, end);
2724 		break;
2725 	case FRAME_TAG_SESSION_RETRY:
2726 		ret = process_session_retry(con, p, end);
2727 		break;
2728 	case FRAME_TAG_SESSION_RETRY_GLOBAL:
2729 		ret = process_session_retry_global(con, p, end);
2730 		break;
2731 	case FRAME_TAG_SESSION_RESET:
2732 		ret = process_session_reset(con, p, end);
2733 		break;
2734 	case FRAME_TAG_KEEPALIVE2_ACK:
2735 		ret = process_keepalive2_ack(con, p, end);
2736 		break;
2737 	case FRAME_TAG_ACK:
2738 		ret = process_ack(con, p, end);
2739 		break;
2740 	default:
2741 		pr_err("bad tag %d\n", tag);
2742 		con->error_msg = "protocol error, bad tag";
2743 		return -EINVAL;
2744 	}
2745 	if (ret) {
2746 		dout("%s con %p error %d\n", __func__, con, ret);
2747 		return ret;
2748 	}
2749 
2750 	prepare_read_preamble(con);
2751 	return 0;
2752 }
2753 
2754 /*
2755  * Return:
2756  *   1 - con->in_msg set, read message
2757  *   0 - skip message
2758  *  <0 - error
2759  */
2760 static int process_message_header(struct ceph_connection *con,
2761 				  void *p, void *end)
2762 {
2763 	struct ceph_frame_desc *desc = &con->v2.in_desc;
2764 	struct ceph_msg_header2 *hdr2 = p;
2765 	struct ceph_msg_header hdr;
2766 	int skip;
2767 	int ret;
2768 	u64 seq;
2769 
2770 	/* verify seq# */
2771 	seq = le64_to_cpu(hdr2->seq);
2772 	if ((s64)seq - (s64)con->in_seq < 1) {
2773 		pr_info("%s%lld %s skipping old message: seq %llu, expected %llu\n",
2774 			ENTITY_NAME(con->peer_name),
2775 			ceph_pr_addr(&con->peer_addr),
2776 			seq, con->in_seq + 1);
2777 		return 0;
2778 	}
2779 	if ((s64)seq - (s64)con->in_seq > 1) {
2780 		pr_err("bad seq %llu, expected %llu\n", seq, con->in_seq + 1);
2781 		con->error_msg = "bad message sequence # for incoming message";
2782 		return -EBADE;
2783 	}
2784 
2785 	ceph_con_discard_sent(con, le64_to_cpu(hdr2->ack_seq));
2786 
2787 	fill_header(&hdr, hdr2, desc->fd_lens[1], desc->fd_lens[2],
2788 		    desc->fd_lens[3], &con->peer_name);
2789 	ret = ceph_con_in_msg_alloc(con, &hdr, &skip);
2790 	if (ret)
2791 		return ret;
2792 
2793 	WARN_ON(!con->in_msg ^ skip);
2794 	if (skip)
2795 		return 0;
2796 
2797 	WARN_ON(!con->in_msg);
2798 	WARN_ON(con->in_msg->con != con);
2799 	return 1;
2800 }
2801 
2802 static int process_message(struct ceph_connection *con)
2803 {
2804 	ceph_con_process_message(con);
2805 
2806 	/*
2807 	 * We could have been closed by ceph_con_close() because
2808 	 * ceph_con_process_message() temporarily drops con->mutex.
2809 	 */
2810 	if (con->state != CEPH_CON_S_OPEN) {
2811 		dout("%s con %p state changed to %d\n", __func__, con,
2812 		     con->state);
2813 		return -EAGAIN;
2814 	}
2815 
2816 	prepare_read_preamble(con);
2817 	return 0;
2818 }
2819 
2820 static int __handle_control(struct ceph_connection *con, void *p)
2821 {
2822 	void *end = p + con->v2.in_desc.fd_lens[0];
2823 	struct ceph_msg *msg;
2824 	int ret;
2825 
2826 	if (con->v2.in_desc.fd_tag != FRAME_TAG_MESSAGE)
2827 		return process_control(con, p, end);
2828 
2829 	ret = process_message_header(con, p, end);
2830 	if (ret < 0)
2831 		return ret;
2832 	if (ret == 0) {
2833 		prepare_skip_message(con);
2834 		return 0;
2835 	}
2836 
2837 	msg = con->in_msg;  /* set in process_message_header() */
2838 	if (front_len(msg)) {
2839 		WARN_ON(front_len(msg) > msg->front_alloc_len);
2840 		msg->front.iov_len = front_len(msg);
2841 	} else {
2842 		msg->front.iov_len = 0;
2843 	}
2844 	if (middle_len(msg)) {
2845 		WARN_ON(middle_len(msg) > msg->middle->alloc_len);
2846 		msg->middle->vec.iov_len = middle_len(msg);
2847 	} else if (msg->middle) {
2848 		msg->middle->vec.iov_len = 0;
2849 	}
2850 
2851 	if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
2852 		return process_message(con);
2853 
2854 	if (con_secure(con))
2855 		return prepare_read_tail_secure(con);
2856 
2857 	return prepare_read_tail_plain(con);
2858 }
2859 
2860 static int handle_preamble(struct ceph_connection *con)
2861 {
2862 	struct ceph_frame_desc *desc = &con->v2.in_desc;
2863 	int ret;
2864 
2865 	if (con_secure(con)) {
2866 		ret = decrypt_preamble(con);
2867 		if (ret) {
2868 			if (ret == -EBADMSG)
2869 				con->error_msg = "integrity error, bad preamble auth tag";
2870 			return ret;
2871 		}
2872 	}
2873 
2874 	ret = decode_preamble(con->v2.in_buf, desc);
2875 	if (ret) {
2876 		if (ret == -EBADMSG)
2877 			con->error_msg = "integrity error, bad crc";
2878 		else
2879 			con->error_msg = "protocol error, bad preamble";
2880 		return ret;
2881 	}
2882 
2883 	dout("%s con %p tag %d seg_cnt %d %d+%d+%d+%d\n", __func__,
2884 	     con, desc->fd_tag, desc->fd_seg_cnt, desc->fd_lens[0],
2885 	     desc->fd_lens[1], desc->fd_lens[2], desc->fd_lens[3]);
2886 
2887 	if (!con_secure(con))
2888 		return prepare_read_control(con);
2889 
2890 	if (desc->fd_lens[0] > CEPH_PREAMBLE_INLINE_LEN)
2891 		return prepare_read_control_remainder(con);
2892 
2893 	return __handle_control(con, CTRL_BODY(con->v2.in_buf));
2894 }
2895 
2896 static int handle_control(struct ceph_connection *con)
2897 {
2898 	int ctrl_len = con->v2.in_desc.fd_lens[0];
2899 	void *buf;
2900 	int ret;
2901 
2902 	WARN_ON(con_secure(con));
2903 
2904 	ret = verify_control_crc(con);
2905 	if (ret) {
2906 		con->error_msg = "integrity error, bad crc";
2907 		return ret;
2908 	}
2909 
2910 	if (con->state == CEPH_CON_S_V2_AUTH) {
2911 		buf = alloc_conn_buf(con, ctrl_len);
2912 		if (!buf)
2913 			return -ENOMEM;
2914 
2915 		memcpy(buf, con->v2.in_kvecs[0].iov_base, ctrl_len);
2916 		return __handle_control(con, buf);
2917 	}
2918 
2919 	return __handle_control(con, con->v2.in_kvecs[0].iov_base);
2920 }
2921 
2922 static int handle_control_remainder(struct ceph_connection *con)
2923 {
2924 	int ret;
2925 
2926 	WARN_ON(!con_secure(con));
2927 
2928 	ret = decrypt_control_remainder(con);
2929 	if (ret) {
2930 		if (ret == -EBADMSG)
2931 			con->error_msg = "integrity error, bad control remainder auth tag";
2932 		return ret;
2933 	}
2934 
2935 	return __handle_control(con, con->v2.in_kvecs[0].iov_base -
2936 				     CEPH_PREAMBLE_INLINE_LEN);
2937 }
2938 
2939 static int handle_epilogue(struct ceph_connection *con)
2940 {
2941 	u32 front_crc, middle_crc, data_crc;
2942 	int ret;
2943 
2944 	if (con_secure(con)) {
2945 		ret = decrypt_tail(con);
2946 		if (ret) {
2947 			if (ret == -EBADMSG)
2948 				con->error_msg = "integrity error, bad epilogue auth tag";
2949 			return ret;
2950 		}
2951 
2952 		/* just late_status */
2953 		ret = decode_epilogue(con->v2.in_buf, NULL, NULL, NULL);
2954 		if (ret) {
2955 			con->error_msg = "protocol error, bad epilogue";
2956 			return ret;
2957 		}
2958 	} else {
2959 		ret = decode_epilogue(con->v2.in_buf, &front_crc,
2960 				      &middle_crc, &data_crc);
2961 		if (ret) {
2962 			con->error_msg = "protocol error, bad epilogue";
2963 			return ret;
2964 		}
2965 
2966 		ret = verify_epilogue_crcs(con, front_crc, middle_crc,
2967 					   data_crc);
2968 		if (ret) {
2969 			con->error_msg = "integrity error, bad crc";
2970 			return ret;
2971 		}
2972 	}
2973 
2974 	return process_message(con);
2975 }
2976 
2977 static void finish_skip(struct ceph_connection *con)
2978 {
2979 	dout("%s con %p\n", __func__, con);
2980 
2981 	if (con_secure(con))
2982 		gcm_inc_nonce(&con->v2.in_gcm_nonce);
2983 
2984 	__finish_skip(con);
2985 }
2986 
2987 static int populate_in_iter(struct ceph_connection *con)
2988 {
2989 	int ret;
2990 
2991 	dout("%s con %p state %d in_state %d\n", __func__, con, con->state,
2992 	     con->v2.in_state);
2993 	WARN_ON(iov_iter_count(&con->v2.in_iter));
2994 
2995 	if (con->state == CEPH_CON_S_V2_BANNER_PREFIX) {
2996 		ret = process_banner_prefix(con);
2997 	} else if (con->state == CEPH_CON_S_V2_BANNER_PAYLOAD) {
2998 		ret = process_banner_payload(con);
2999 	} else if ((con->state >= CEPH_CON_S_V2_HELLO &&
3000 		    con->state <= CEPH_CON_S_V2_SESSION_RECONNECT) ||
3001 		   con->state == CEPH_CON_S_OPEN) {
3002 		switch (con->v2.in_state) {
3003 		case IN_S_HANDLE_PREAMBLE:
3004 			ret = handle_preamble(con);
3005 			break;
3006 		case IN_S_HANDLE_CONTROL:
3007 			ret = handle_control(con);
3008 			break;
3009 		case IN_S_HANDLE_CONTROL_REMAINDER:
3010 			ret = handle_control_remainder(con);
3011 			break;
3012 		case IN_S_PREPARE_READ_DATA:
3013 			ret = prepare_read_data(con);
3014 			break;
3015 		case IN_S_PREPARE_READ_DATA_CONT:
3016 			prepare_read_data_cont(con);
3017 			ret = 0;
3018 			break;
3019 		case IN_S_PREPARE_READ_ENC_PAGE:
3020 			prepare_read_enc_page(con);
3021 			ret = 0;
3022 			break;
3023 		case IN_S_PREPARE_SPARSE_DATA:
3024 			ret = prepare_sparse_read_data(con);
3025 			break;
3026 		case IN_S_PREPARE_SPARSE_DATA_CONT:
3027 			ret = prepare_sparse_read_cont(con);
3028 			break;
3029 		case IN_S_HANDLE_EPILOGUE:
3030 			ret = handle_epilogue(con);
3031 			break;
3032 		case IN_S_FINISH_SKIP:
3033 			finish_skip(con);
3034 			ret = 0;
3035 			break;
3036 		default:
3037 			WARN(1, "bad in_state %d", con->v2.in_state);
3038 			return -EINVAL;
3039 		}
3040 	} else {
3041 		WARN(1, "bad state %d", con->state);
3042 		return -EINVAL;
3043 	}
3044 	if (ret) {
3045 		dout("%s con %p error %d\n", __func__, con, ret);
3046 		return ret;
3047 	}
3048 
3049 	if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
3050 		return -ENODATA;
3051 	dout("%s con %p populated %zu\n", __func__, con,
3052 	     iov_iter_count(&con->v2.in_iter));
3053 	return 1;
3054 }
3055 
3056 int ceph_con_v2_try_read(struct ceph_connection *con)
3057 {
3058 	int ret;
3059 
3060 	dout("%s con %p state %d need %zu\n", __func__, con, con->state,
3061 	     iov_iter_count(&con->v2.in_iter));
3062 
3063 	if (con->state == CEPH_CON_S_PREOPEN)
3064 		return 0;
3065 
3066 	/*
3067 	 * We should always have something pending here.  If not,
3068 	 * avoid calling populate_in_iter() as if we read something
3069 	 * (ceph_tcp_recv() would immediately return 1).
3070 	 */
3071 	if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
3072 		return -ENODATA;
3073 
3074 	for (;;) {
3075 		ret = ceph_tcp_recv(con);
3076 		if (ret <= 0)
3077 			return ret;
3078 
3079 		ret = populate_in_iter(con);
3080 		if (ret <= 0) {
3081 			if (ret && ret != -EAGAIN && !con->error_msg)
3082 				con->error_msg = "read processing error";
3083 			return ret;
3084 		}
3085 	}
3086 }
3087 
3088 static void queue_data(struct ceph_connection *con)
3089 {
3090 	struct bio_vec bv;
3091 
3092 	con->v2.out_epil.data_crc = -1;
3093 	ceph_msg_data_cursor_init(&con->v2.out_cursor, con->out_msg,
3094 				  data_len(con->out_msg));
3095 
3096 	get_bvec_at(&con->v2.out_cursor, &bv);
3097 	set_out_bvec(con, &bv, true);
3098 	con->v2.out_state = OUT_S_QUEUE_DATA_CONT;
3099 }
3100 
3101 static void queue_data_cont(struct ceph_connection *con)
3102 {
3103 	struct bio_vec bv;
3104 
3105 	con->v2.out_epil.data_crc = ceph_crc32c_page(
3106 		con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
3107 		con->v2.out_bvec.bv_offset, con->v2.out_bvec.bv_len);
3108 
3109 	ceph_msg_data_advance(&con->v2.out_cursor, con->v2.out_bvec.bv_len);
3110 	if (con->v2.out_cursor.total_resid) {
3111 		get_bvec_at(&con->v2.out_cursor, &bv);
3112 		set_out_bvec(con, &bv, true);
3113 		WARN_ON(con->v2.out_state != OUT_S_QUEUE_DATA_CONT);
3114 		return;
3115 	}
3116 
3117 	/*
3118 	 * We've written all data.  Queue epilogue.  Once it's written,
3119 	 * we are done.
3120 	 */
3121 	reset_out_kvecs(con);
3122 	prepare_epilogue_plain(con, false);
3123 	con->v2.out_state = OUT_S_FINISH_MESSAGE;
3124 }
3125 
3126 static void queue_enc_page(struct ceph_connection *con)
3127 {
3128 	struct bio_vec bv;
3129 
3130 	dout("%s con %p i %d resid %d\n", __func__, con, con->v2.out_enc_i,
3131 	     con->v2.out_enc_resid);
3132 	WARN_ON(!con->v2.out_enc_resid);
3133 
3134 	bvec_set_page(&bv, con->v2.out_enc_pages[con->v2.out_enc_i],
3135 		      min(con->v2.out_enc_resid, (int)PAGE_SIZE), 0);
3136 
3137 	set_out_bvec(con, &bv, false);
3138 	con->v2.out_enc_i++;
3139 	con->v2.out_enc_resid -= bv.bv_len;
3140 
3141 	if (con->v2.out_enc_resid) {
3142 		WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE);
3143 		return;
3144 	}
3145 
3146 	/*
3147 	 * We've queued the last piece of ciphertext (ending with
3148 	 * epilogue) + auth tag.  Once it's written, we are done.
3149 	 */
3150 	WARN_ON(con->v2.out_enc_i != con->v2.out_enc_page_cnt);
3151 	con->v2.out_state = OUT_S_FINISH_MESSAGE;
3152 }
3153 
3154 static void queue_zeros(struct ceph_connection *con)
3155 {
3156 	dout("%s con %p out_zero %d\n", __func__, con, con->v2.out_zero);
3157 
3158 	if (con->v2.out_zero) {
3159 		set_out_bvec_zero(con);
3160 		con->v2.out_zero -= con->v2.out_bvec.bv_len;
3161 		con->v2.out_state = OUT_S_QUEUE_ZEROS;
3162 		return;
3163 	}
3164 
3165 	/*
3166 	 * We've zero-filled everything up to epilogue.  Queue epilogue
3167 	 * with late_status set to ABORTED and crcs adjusted for zeros.
3168 	 * Once it's written, we are done patching up for the revoke.
3169 	 */
3170 	reset_out_kvecs(con);
3171 	prepare_epilogue_plain(con, true);
3172 	con->v2.out_state = OUT_S_FINISH_MESSAGE;
3173 }
3174 
3175 static void finish_message(struct ceph_connection *con)
3176 {
3177 	dout("%s con %p msg %p\n", __func__, con, con->out_msg);
3178 
3179 	/* we end up here both plain and secure modes */
3180 	if (con->v2.out_enc_pages) {
3181 		WARN_ON(!con->v2.out_enc_page_cnt);
3182 		ceph_release_page_vector(con->v2.out_enc_pages,
3183 					 con->v2.out_enc_page_cnt);
3184 		con->v2.out_enc_pages = NULL;
3185 		con->v2.out_enc_page_cnt = 0;
3186 	}
3187 	/* message may have been revoked */
3188 	if (con->out_msg) {
3189 		ceph_msg_put(con->out_msg);
3190 		con->out_msg = NULL;
3191 	}
3192 
3193 	con->v2.out_state = OUT_S_GET_NEXT;
3194 }
3195 
3196 static int populate_out_iter(struct ceph_connection *con)
3197 {
3198 	int ret;
3199 
3200 	dout("%s con %p state %d out_state %d\n", __func__, con, con->state,
3201 	     con->v2.out_state);
3202 	WARN_ON(iov_iter_count(&con->v2.out_iter));
3203 
3204 	if (con->state != CEPH_CON_S_OPEN) {
3205 		WARN_ON(con->state < CEPH_CON_S_V2_BANNER_PREFIX ||
3206 			con->state > CEPH_CON_S_V2_SESSION_RECONNECT);
3207 		goto nothing_pending;
3208 	}
3209 
3210 	switch (con->v2.out_state) {
3211 	case OUT_S_QUEUE_DATA:
3212 		WARN_ON(!con->out_msg);
3213 		queue_data(con);
3214 		goto populated;
3215 	case OUT_S_QUEUE_DATA_CONT:
3216 		WARN_ON(!con->out_msg);
3217 		queue_data_cont(con);
3218 		goto populated;
3219 	case OUT_S_QUEUE_ENC_PAGE:
3220 		queue_enc_page(con);
3221 		goto populated;
3222 	case OUT_S_QUEUE_ZEROS:
3223 		WARN_ON(con->out_msg);  /* revoked */
3224 		queue_zeros(con);
3225 		goto populated;
3226 	case OUT_S_FINISH_MESSAGE:
3227 		finish_message(con);
3228 		break;
3229 	case OUT_S_GET_NEXT:
3230 		break;
3231 	default:
3232 		WARN(1, "bad out_state %d", con->v2.out_state);
3233 		return -EINVAL;
3234 	}
3235 
3236 	WARN_ON(con->v2.out_state != OUT_S_GET_NEXT);
3237 	if (ceph_con_flag_test_and_clear(con, CEPH_CON_F_KEEPALIVE_PENDING)) {
3238 		ret = prepare_keepalive2(con);
3239 		if (ret) {
3240 			pr_err("prepare_keepalive2 failed: %d\n", ret);
3241 			return ret;
3242 		}
3243 	} else if (!list_empty(&con->out_queue)) {
3244 		ceph_con_get_out_msg(con);
3245 		ret = prepare_message(con);
3246 		if (ret) {
3247 			pr_err("prepare_message failed: %d\n", ret);
3248 			return ret;
3249 		}
3250 	} else if (con->in_seq > con->in_seq_acked) {
3251 		ret = prepare_ack(con);
3252 		if (ret) {
3253 			pr_err("prepare_ack failed: %d\n", ret);
3254 			return ret;
3255 		}
3256 	} else {
3257 		goto nothing_pending;
3258 	}
3259 
3260 populated:
3261 	if (WARN_ON(!iov_iter_count(&con->v2.out_iter)))
3262 		return -ENODATA;
3263 	dout("%s con %p populated %zu\n", __func__, con,
3264 	     iov_iter_count(&con->v2.out_iter));
3265 	return 1;
3266 
3267 nothing_pending:
3268 	WARN_ON(iov_iter_count(&con->v2.out_iter));
3269 	dout("%s con %p nothing pending\n", __func__, con);
3270 	ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
3271 	return 0;
3272 }
3273 
3274 int ceph_con_v2_try_write(struct ceph_connection *con)
3275 {
3276 	int ret;
3277 
3278 	dout("%s con %p state %d have %zu\n", __func__, con, con->state,
3279 	     iov_iter_count(&con->v2.out_iter));
3280 
3281 	/* open the socket first? */
3282 	if (con->state == CEPH_CON_S_PREOPEN) {
3283 		WARN_ON(con->peer_addr.type != CEPH_ENTITY_ADDR_TYPE_MSGR2);
3284 
3285 		/*
3286 		 * Always bump global_seq.  Bump connect_seq only if
3287 		 * there is a session (i.e. we are reconnecting and will
3288 		 * send session_reconnect instead of client_ident).
3289 		 */
3290 		con->v2.global_seq = ceph_get_global_seq(con->msgr, 0);
3291 		if (con->v2.server_cookie)
3292 			con->v2.connect_seq++;
3293 
3294 		ret = prepare_read_banner_prefix(con);
3295 		if (ret) {
3296 			pr_err("prepare_read_banner_prefix failed: %d\n", ret);
3297 			con->error_msg = "connect error";
3298 			return ret;
3299 		}
3300 
3301 		reset_out_kvecs(con);
3302 		ret = prepare_banner(con);
3303 		if (ret) {
3304 			pr_err("prepare_banner failed: %d\n", ret);
3305 			con->error_msg = "connect error";
3306 			return ret;
3307 		}
3308 
3309 		ret = ceph_tcp_connect(con);
3310 		if (ret) {
3311 			pr_err("ceph_tcp_connect failed: %d\n", ret);
3312 			con->error_msg = "connect error";
3313 			return ret;
3314 		}
3315 	}
3316 
3317 	if (!iov_iter_count(&con->v2.out_iter)) {
3318 		ret = populate_out_iter(con);
3319 		if (ret <= 0) {
3320 			if (ret && ret != -EAGAIN && !con->error_msg)
3321 				con->error_msg = "write processing error";
3322 			return ret;
3323 		}
3324 	}
3325 
3326 	tcp_sock_set_cork(con->sock->sk, true);
3327 	for (;;) {
3328 		ret = ceph_tcp_send(con);
3329 		if (ret <= 0)
3330 			break;
3331 
3332 		ret = populate_out_iter(con);
3333 		if (ret <= 0) {
3334 			if (ret && ret != -EAGAIN && !con->error_msg)
3335 				con->error_msg = "write processing error";
3336 			break;
3337 		}
3338 	}
3339 
3340 	tcp_sock_set_cork(con->sock->sk, false);
3341 	return ret;
3342 }
3343 
3344 static u32 crc32c_zeros(u32 crc, int zero_len)
3345 {
3346 	int len;
3347 
3348 	while (zero_len) {
3349 		len = min(zero_len, (int)PAGE_SIZE);
3350 		crc = crc32c(crc, page_address(ceph_zero_page), len);
3351 		zero_len -= len;
3352 	}
3353 
3354 	return crc;
3355 }
3356 
3357 static void prepare_zero_front(struct ceph_connection *con, int resid)
3358 {
3359 	int sent;
3360 
3361 	WARN_ON(!resid || resid > front_len(con->out_msg));
3362 	sent = front_len(con->out_msg) - resid;
3363 	dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3364 
3365 	if (sent) {
3366 		con->v2.out_epil.front_crc =
3367 			crc32c(-1, con->out_msg->front.iov_base, sent);
3368 		con->v2.out_epil.front_crc =
3369 			crc32c_zeros(con->v2.out_epil.front_crc, resid);
3370 	} else {
3371 		con->v2.out_epil.front_crc = crc32c_zeros(-1, resid);
3372 	}
3373 
3374 	con->v2.out_iter.count -= resid;
3375 	out_zero_add(con, resid);
3376 }
3377 
3378 static void prepare_zero_middle(struct ceph_connection *con, int resid)
3379 {
3380 	int sent;
3381 
3382 	WARN_ON(!resid || resid > middle_len(con->out_msg));
3383 	sent = middle_len(con->out_msg) - resid;
3384 	dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3385 
3386 	if (sent) {
3387 		con->v2.out_epil.middle_crc =
3388 			crc32c(-1, con->out_msg->middle->vec.iov_base, sent);
3389 		con->v2.out_epil.middle_crc =
3390 			crc32c_zeros(con->v2.out_epil.middle_crc, resid);
3391 	} else {
3392 		con->v2.out_epil.middle_crc = crc32c_zeros(-1, resid);
3393 	}
3394 
3395 	con->v2.out_iter.count -= resid;
3396 	out_zero_add(con, resid);
3397 }
3398 
3399 static void prepare_zero_data(struct ceph_connection *con)
3400 {
3401 	dout("%s con %p\n", __func__, con);
3402 	con->v2.out_epil.data_crc = crc32c_zeros(-1, data_len(con->out_msg));
3403 	out_zero_add(con, data_len(con->out_msg));
3404 }
3405 
3406 static void revoke_at_queue_data(struct ceph_connection *con)
3407 {
3408 	int boundary;
3409 	int resid;
3410 
3411 	WARN_ON(!data_len(con->out_msg));
3412 	WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
3413 	resid = iov_iter_count(&con->v2.out_iter);
3414 
3415 	boundary = front_len(con->out_msg) + middle_len(con->out_msg);
3416 	if (resid > boundary) {
3417 		resid -= boundary;
3418 		WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
3419 		dout("%s con %p was sending head\n", __func__, con);
3420 		if (front_len(con->out_msg))
3421 			prepare_zero_front(con, front_len(con->out_msg));
3422 		if (middle_len(con->out_msg))
3423 			prepare_zero_middle(con, middle_len(con->out_msg));
3424 		prepare_zero_data(con);
3425 		WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
3426 		con->v2.out_state = OUT_S_QUEUE_ZEROS;
3427 		return;
3428 	}
3429 
3430 	boundary = middle_len(con->out_msg);
3431 	if (resid > boundary) {
3432 		resid -= boundary;
3433 		dout("%s con %p was sending front\n", __func__, con);
3434 		prepare_zero_front(con, resid);
3435 		if (middle_len(con->out_msg))
3436 			prepare_zero_middle(con, middle_len(con->out_msg));
3437 		prepare_zero_data(con);
3438 		queue_zeros(con);
3439 		return;
3440 	}
3441 
3442 	WARN_ON(!resid);
3443 	dout("%s con %p was sending middle\n", __func__, con);
3444 	prepare_zero_middle(con, resid);
3445 	prepare_zero_data(con);
3446 	queue_zeros(con);
3447 }
3448 
3449 static void revoke_at_queue_data_cont(struct ceph_connection *con)
3450 {
3451 	int sent, resid;  /* current piece of data */
3452 
3453 	WARN_ON(!data_len(con->out_msg));
3454 	WARN_ON(!iov_iter_is_bvec(&con->v2.out_iter));
3455 	resid = iov_iter_count(&con->v2.out_iter);
3456 	WARN_ON(!resid || resid > con->v2.out_bvec.bv_len);
3457 	sent = con->v2.out_bvec.bv_len - resid;
3458 	dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3459 
3460 	if (sent) {
3461 		con->v2.out_epil.data_crc = ceph_crc32c_page(
3462 			con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
3463 			con->v2.out_bvec.bv_offset, sent);
3464 		ceph_msg_data_advance(&con->v2.out_cursor, sent);
3465 	}
3466 	WARN_ON(resid > con->v2.out_cursor.total_resid);
3467 	con->v2.out_epil.data_crc = crc32c_zeros(con->v2.out_epil.data_crc,
3468 						con->v2.out_cursor.total_resid);
3469 
3470 	con->v2.out_iter.count -= resid;
3471 	out_zero_add(con, con->v2.out_cursor.total_resid);
3472 	queue_zeros(con);
3473 }
3474 
3475 static void revoke_at_finish_message(struct ceph_connection *con)
3476 {
3477 	int boundary;
3478 	int resid;
3479 
3480 	WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
3481 	resid = iov_iter_count(&con->v2.out_iter);
3482 
3483 	if (!front_len(con->out_msg) && !middle_len(con->out_msg) &&
3484 	    !data_len(con->out_msg)) {
3485 		WARN_ON(!resid || resid > MESSAGE_HEAD_PLAIN_LEN);
3486 		dout("%s con %p was sending head (empty message) - noop\n",
3487 		     __func__, con);
3488 		return;
3489 	}
3490 
3491 	boundary = front_len(con->out_msg) + middle_len(con->out_msg) +
3492 		   CEPH_EPILOGUE_PLAIN_LEN;
3493 	if (resid > boundary) {
3494 		resid -= boundary;
3495 		WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
3496 		dout("%s con %p was sending head\n", __func__, con);
3497 		if (front_len(con->out_msg))
3498 			prepare_zero_front(con, front_len(con->out_msg));
3499 		if (middle_len(con->out_msg))
3500 			prepare_zero_middle(con, middle_len(con->out_msg));
3501 		con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3502 		WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
3503 		con->v2.out_state = OUT_S_QUEUE_ZEROS;
3504 		return;
3505 	}
3506 
3507 	boundary = middle_len(con->out_msg) + CEPH_EPILOGUE_PLAIN_LEN;
3508 	if (resid > boundary) {
3509 		resid -= boundary;
3510 		dout("%s con %p was sending front\n", __func__, con);
3511 		prepare_zero_front(con, resid);
3512 		if (middle_len(con->out_msg))
3513 			prepare_zero_middle(con, middle_len(con->out_msg));
3514 		con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3515 		queue_zeros(con);
3516 		return;
3517 	}
3518 
3519 	boundary = CEPH_EPILOGUE_PLAIN_LEN;
3520 	if (resid > boundary) {
3521 		resid -= boundary;
3522 		dout("%s con %p was sending middle\n", __func__, con);
3523 		prepare_zero_middle(con, resid);
3524 		con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3525 		queue_zeros(con);
3526 		return;
3527 	}
3528 
3529 	WARN_ON(!resid);
3530 	dout("%s con %p was sending epilogue - noop\n", __func__, con);
3531 }
3532 
3533 void ceph_con_v2_revoke(struct ceph_connection *con)
3534 {
3535 	WARN_ON(con->v2.out_zero);
3536 
3537 	if (con_secure(con)) {
3538 		WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE &&
3539 			con->v2.out_state != OUT_S_FINISH_MESSAGE);
3540 		dout("%s con %p secure - noop\n", __func__, con);
3541 		return;
3542 	}
3543 
3544 	switch (con->v2.out_state) {
3545 	case OUT_S_QUEUE_DATA:
3546 		revoke_at_queue_data(con);
3547 		break;
3548 	case OUT_S_QUEUE_DATA_CONT:
3549 		revoke_at_queue_data_cont(con);
3550 		break;
3551 	case OUT_S_FINISH_MESSAGE:
3552 		revoke_at_finish_message(con);
3553 		break;
3554 	default:
3555 		WARN(1, "bad out_state %d", con->v2.out_state);
3556 		break;
3557 	}
3558 }
3559 
3560 static void revoke_at_prepare_read_data(struct ceph_connection *con)
3561 {
3562 	int remaining;
3563 	int resid;
3564 
3565 	WARN_ON(con_secure(con));
3566 	WARN_ON(!data_len(con->in_msg));
3567 	WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
3568 	resid = iov_iter_count(&con->v2.in_iter);
3569 	WARN_ON(!resid);
3570 
3571 	remaining = data_len(con->in_msg) + CEPH_EPILOGUE_PLAIN_LEN;
3572 	dout("%s con %p resid %d remaining %d\n", __func__, con, resid,
3573 	     remaining);
3574 	con->v2.in_iter.count -= resid;
3575 	set_in_skip(con, resid + remaining);
3576 	con->v2.in_state = IN_S_FINISH_SKIP;
3577 }
3578 
3579 static void revoke_at_prepare_read_data_cont(struct ceph_connection *con)
3580 {
3581 	int recved, resid;  /* current piece of data */
3582 	int remaining;
3583 
3584 	WARN_ON(con_secure(con));
3585 	WARN_ON(!data_len(con->in_msg));
3586 	WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3587 	resid = iov_iter_count(&con->v2.in_iter);
3588 	WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
3589 	recved = con->v2.in_bvec.bv_len - resid;
3590 	dout("%s con %p recved %d resid %d\n", __func__, con, recved, resid);
3591 
3592 	if (recved)
3593 		ceph_msg_data_advance(&con->v2.in_cursor, recved);
3594 	WARN_ON(resid > con->v2.in_cursor.total_resid);
3595 
3596 	remaining = CEPH_EPILOGUE_PLAIN_LEN;
3597 	dout("%s con %p total_resid %zu remaining %d\n", __func__, con,
3598 	     con->v2.in_cursor.total_resid, remaining);
3599 	con->v2.in_iter.count -= resid;
3600 	set_in_skip(con, con->v2.in_cursor.total_resid + remaining);
3601 	con->v2.in_state = IN_S_FINISH_SKIP;
3602 }
3603 
3604 static void revoke_at_prepare_read_enc_page(struct ceph_connection *con)
3605 {
3606 	int resid;  /* current enc page (not necessarily data) */
3607 
3608 	WARN_ON(!con_secure(con));
3609 	WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3610 	resid = iov_iter_count(&con->v2.in_iter);
3611 	WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
3612 
3613 	dout("%s con %p resid %d enc_resid %d\n", __func__, con, resid,
3614 	     con->v2.in_enc_resid);
3615 	con->v2.in_iter.count -= resid;
3616 	set_in_skip(con, resid + con->v2.in_enc_resid);
3617 	con->v2.in_state = IN_S_FINISH_SKIP;
3618 }
3619 
3620 static void revoke_at_prepare_sparse_data(struct ceph_connection *con)
3621 {
3622 	int resid;  /* current piece of data */
3623 	int remaining;
3624 
3625 	WARN_ON(con_secure(con));
3626 	WARN_ON(!data_len(con->in_msg));
3627 	WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3628 	resid = iov_iter_count(&con->v2.in_iter);
3629 	dout("%s con %p resid %d\n", __func__, con, resid);
3630 
3631 	remaining = CEPH_EPILOGUE_PLAIN_LEN + con->v2.data_len_remain;
3632 	con->v2.in_iter.count -= resid;
3633 	set_in_skip(con, resid + remaining);
3634 	con->v2.in_state = IN_S_FINISH_SKIP;
3635 }
3636 
3637 static void revoke_at_handle_epilogue(struct ceph_connection *con)
3638 {
3639 	int resid;
3640 
3641 	resid = iov_iter_count(&con->v2.in_iter);
3642 	WARN_ON(!resid);
3643 
3644 	dout("%s con %p resid %d\n", __func__, con, resid);
3645 	con->v2.in_iter.count -= resid;
3646 	set_in_skip(con, resid);
3647 	con->v2.in_state = IN_S_FINISH_SKIP;
3648 }
3649 
3650 void ceph_con_v2_revoke_incoming(struct ceph_connection *con)
3651 {
3652 	switch (con->v2.in_state) {
3653 	case IN_S_PREPARE_SPARSE_DATA:
3654 	case IN_S_PREPARE_READ_DATA:
3655 		revoke_at_prepare_read_data(con);
3656 		break;
3657 	case IN_S_PREPARE_READ_DATA_CONT:
3658 		revoke_at_prepare_read_data_cont(con);
3659 		break;
3660 	case IN_S_PREPARE_READ_ENC_PAGE:
3661 		revoke_at_prepare_read_enc_page(con);
3662 		break;
3663 	case IN_S_PREPARE_SPARSE_DATA_CONT:
3664 		revoke_at_prepare_sparse_data(con);
3665 		break;
3666 	case IN_S_HANDLE_EPILOGUE:
3667 		revoke_at_handle_epilogue(con);
3668 		break;
3669 	default:
3670 		WARN(1, "bad in_state %d", con->v2.in_state);
3671 		break;
3672 	}
3673 }
3674 
3675 bool ceph_con_v2_opened(struct ceph_connection *con)
3676 {
3677 	return con->v2.peer_global_seq;
3678 }
3679 
3680 void ceph_con_v2_reset_session(struct ceph_connection *con)
3681 {
3682 	con->v2.client_cookie = 0;
3683 	con->v2.server_cookie = 0;
3684 	con->v2.global_seq = 0;
3685 	con->v2.connect_seq = 0;
3686 	con->v2.peer_global_seq = 0;
3687 }
3688 
3689 void ceph_con_v2_reset_protocol(struct ceph_connection *con)
3690 {
3691 	iov_iter_truncate(&con->v2.in_iter, 0);
3692 	iov_iter_truncate(&con->v2.out_iter, 0);
3693 	con->v2.out_zero = 0;
3694 
3695 	clear_in_sign_kvecs(con);
3696 	clear_out_sign_kvecs(con);
3697 	free_conn_bufs(con);
3698 
3699 	if (con->v2.in_enc_pages) {
3700 		WARN_ON(!con->v2.in_enc_page_cnt);
3701 		ceph_release_page_vector(con->v2.in_enc_pages,
3702 					 con->v2.in_enc_page_cnt);
3703 		con->v2.in_enc_pages = NULL;
3704 		con->v2.in_enc_page_cnt = 0;
3705 	}
3706 	if (con->v2.out_enc_pages) {
3707 		WARN_ON(!con->v2.out_enc_page_cnt);
3708 		ceph_release_page_vector(con->v2.out_enc_pages,
3709 					 con->v2.out_enc_page_cnt);
3710 		con->v2.out_enc_pages = NULL;
3711 		con->v2.out_enc_page_cnt = 0;
3712 	}
3713 
3714 	con->v2.con_mode = CEPH_CON_MODE_UNKNOWN;
3715 	memzero_explicit(&con->v2.in_gcm_nonce, CEPH_GCM_IV_LEN);
3716 	memzero_explicit(&con->v2.out_gcm_nonce, CEPH_GCM_IV_LEN);
3717 
3718 	if (con->v2.hmac_tfm) {
3719 		crypto_free_shash(con->v2.hmac_tfm);
3720 		con->v2.hmac_tfm = NULL;
3721 	}
3722 	if (con->v2.gcm_req) {
3723 		aead_request_free(con->v2.gcm_req);
3724 		con->v2.gcm_req = NULL;
3725 	}
3726 	if (con->v2.gcm_tfm) {
3727 		crypto_free_aead(con->v2.gcm_tfm);
3728 		con->v2.gcm_tfm = NULL;
3729 	}
3730 }
3731