1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Ceph msgr2 protocol implementation
4 *
5 * Copyright (C) 2020 Ilya Dryomov <idryomov@gmail.com>
6 */
7
8 #include <linux/ceph/ceph_debug.h>
9
10 #include <crypto/aead.h>
11 #include <crypto/hash.h>
12 #include <crypto/sha2.h>
13 #include <crypto/utils.h>
14 #include <linux/bvec.h>
15 #include <linux/crc32c.h>
16 #include <linux/net.h>
17 #include <linux/scatterlist.h>
18 #include <linux/socket.h>
19 #include <linux/sched/mm.h>
20 #include <net/sock.h>
21 #include <net/tcp.h>
22
23 #include <linux/ceph/ceph_features.h>
24 #include <linux/ceph/decode.h>
25 #include <linux/ceph/libceph.h>
26 #include <linux/ceph/messenger.h>
27
28 #include "crypto.h" /* for CEPH_KEY_LEN and CEPH_MAX_CON_SECRET_LEN */
29
30 #define FRAME_TAG_HELLO 1
31 #define FRAME_TAG_AUTH_REQUEST 2
32 #define FRAME_TAG_AUTH_BAD_METHOD 3
33 #define FRAME_TAG_AUTH_REPLY_MORE 4
34 #define FRAME_TAG_AUTH_REQUEST_MORE 5
35 #define FRAME_TAG_AUTH_DONE 6
36 #define FRAME_TAG_AUTH_SIGNATURE 7
37 #define FRAME_TAG_CLIENT_IDENT 8
38 #define FRAME_TAG_SERVER_IDENT 9
39 #define FRAME_TAG_IDENT_MISSING_FEATURES 10
40 #define FRAME_TAG_SESSION_RECONNECT 11
41 #define FRAME_TAG_SESSION_RESET 12
42 #define FRAME_TAG_SESSION_RETRY 13
43 #define FRAME_TAG_SESSION_RETRY_GLOBAL 14
44 #define FRAME_TAG_SESSION_RECONNECT_OK 15
45 #define FRAME_TAG_WAIT 16
46 #define FRAME_TAG_MESSAGE 17
47 #define FRAME_TAG_KEEPALIVE2 18
48 #define FRAME_TAG_KEEPALIVE2_ACK 19
49 #define FRAME_TAG_ACK 20
50
51 #define FRAME_LATE_STATUS_ABORTED 0x1
52 #define FRAME_LATE_STATUS_COMPLETE 0xe
53 #define FRAME_LATE_STATUS_ABORTED_MASK 0xf
54
55 #define IN_S_HANDLE_PREAMBLE 1
56 #define IN_S_HANDLE_CONTROL 2
57 #define IN_S_HANDLE_CONTROL_REMAINDER 3
58 #define IN_S_PREPARE_READ_DATA 4
59 #define IN_S_PREPARE_READ_DATA_CONT 5
60 #define IN_S_PREPARE_READ_ENC_PAGE 6
61 #define IN_S_PREPARE_SPARSE_DATA 7
62 #define IN_S_PREPARE_SPARSE_DATA_CONT 8
63 #define IN_S_HANDLE_EPILOGUE 9
64 #define IN_S_FINISH_SKIP 10
65
66 #define OUT_S_QUEUE_DATA 1
67 #define OUT_S_QUEUE_DATA_CONT 2
68 #define OUT_S_QUEUE_ENC_PAGE 3
69 #define OUT_S_QUEUE_ZEROS 4
70 #define OUT_S_FINISH_MESSAGE 5
71 #define OUT_S_GET_NEXT 6
72
73 #define CTRL_BODY(p) ((void *)(p) + CEPH_PREAMBLE_LEN)
74 #define FRONT_PAD(p) ((void *)(p) + CEPH_EPILOGUE_SECURE_LEN)
75 #define MIDDLE_PAD(p) (FRONT_PAD(p) + CEPH_GCM_BLOCK_LEN)
76 #define DATA_PAD(p) (MIDDLE_PAD(p) + CEPH_GCM_BLOCK_LEN)
77
78 #define CEPH_MSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
79
do_recvmsg(struct socket * sock,struct iov_iter * it)80 static int do_recvmsg(struct socket *sock, struct iov_iter *it)
81 {
82 struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
83 int ret;
84
85 msg.msg_iter = *it;
86 while (iov_iter_count(it)) {
87 ret = sock_recvmsg(sock, &msg, msg.msg_flags);
88 if (ret <= 0) {
89 if (ret == -EAGAIN)
90 ret = 0;
91 return ret;
92 }
93
94 iov_iter_advance(it, ret);
95 }
96
97 WARN_ON(msg_data_left(&msg));
98 return 1;
99 }
100
101 /*
102 * Read as much as possible.
103 *
104 * Return:
105 * 1 - done, nothing (else) to read
106 * 0 - socket is empty, need to wait
107 * <0 - error
108 */
ceph_tcp_recv(struct ceph_connection * con)109 static int ceph_tcp_recv(struct ceph_connection *con)
110 {
111 int ret;
112
113 dout("%s con %p %s %zu\n", __func__, con,
114 iov_iter_is_discard(&con->v2.in_iter) ? "discard" : "need",
115 iov_iter_count(&con->v2.in_iter));
116 ret = do_recvmsg(con->sock, &con->v2.in_iter);
117 dout("%s con %p ret %d left %zu\n", __func__, con, ret,
118 iov_iter_count(&con->v2.in_iter));
119 return ret;
120 }
121
do_sendmsg(struct socket * sock,struct iov_iter * it)122 static int do_sendmsg(struct socket *sock, struct iov_iter *it)
123 {
124 struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
125 int ret;
126
127 msg.msg_iter = *it;
128 while (iov_iter_count(it)) {
129 ret = sock_sendmsg(sock, &msg);
130 if (ret <= 0) {
131 if (ret == -EAGAIN)
132 ret = 0;
133 return ret;
134 }
135
136 iov_iter_advance(it, ret);
137 }
138
139 WARN_ON(msg_data_left(&msg));
140 return 1;
141 }
142
do_try_sendpage(struct socket * sock,struct iov_iter * it)143 static int do_try_sendpage(struct socket *sock, struct iov_iter *it)
144 {
145 struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
146 struct bio_vec bv;
147 int ret;
148
149 if (WARN_ON(!iov_iter_is_bvec(it)))
150 return -EINVAL;
151
152 while (iov_iter_count(it)) {
153 /* iov_iter_iovec() for ITER_BVEC */
154 bvec_set_page(&bv, it->bvec->bv_page,
155 min(iov_iter_count(it),
156 it->bvec->bv_len - it->iov_offset),
157 it->bvec->bv_offset + it->iov_offset);
158
159 /*
160 * MSG_SPLICE_PAGES cannot properly handle pages with
161 * page_count == 0, we need to fall back to sendmsg if
162 * that's the case.
163 *
164 * Same goes for slab pages: skb_can_coalesce() allows
165 * coalescing neighboring slab objects into a single frag
166 * which triggers one of hardened usercopy checks.
167 */
168 if (sendpage_ok(bv.bv_page))
169 msg.msg_flags |= MSG_SPLICE_PAGES;
170 else
171 msg.msg_flags &= ~MSG_SPLICE_PAGES;
172
173 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, bv.bv_len);
174 ret = sock_sendmsg(sock, &msg);
175 if (ret <= 0) {
176 if (ret == -EAGAIN)
177 ret = 0;
178 return ret;
179 }
180
181 iov_iter_advance(it, ret);
182 }
183
184 return 1;
185 }
186
187 /*
188 * Write as much as possible. The socket is expected to be corked,
189 * so we don't bother with MSG_MORE here.
190 *
191 * Return:
192 * 1 - done, nothing (else) to write
193 * 0 - socket is full, need to wait
194 * <0 - error
195 */
ceph_tcp_send(struct ceph_connection * con)196 static int ceph_tcp_send(struct ceph_connection *con)
197 {
198 int ret;
199
200 dout("%s con %p have %zu try_sendpage %d\n", __func__, con,
201 iov_iter_count(&con->v2.out_iter), con->v2.out_iter_sendpage);
202 if (con->v2.out_iter_sendpage)
203 ret = do_try_sendpage(con->sock, &con->v2.out_iter);
204 else
205 ret = do_sendmsg(con->sock, &con->v2.out_iter);
206 dout("%s con %p ret %d left %zu\n", __func__, con, ret,
207 iov_iter_count(&con->v2.out_iter));
208 return ret;
209 }
210
add_in_kvec(struct ceph_connection * con,void * buf,int len)211 static void add_in_kvec(struct ceph_connection *con, void *buf, int len)
212 {
213 BUG_ON(con->v2.in_kvec_cnt >= ARRAY_SIZE(con->v2.in_kvecs));
214 WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
215
216 con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_base = buf;
217 con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_len = len;
218 con->v2.in_kvec_cnt++;
219
220 con->v2.in_iter.nr_segs++;
221 con->v2.in_iter.count += len;
222 }
223
reset_in_kvecs(struct ceph_connection * con)224 static void reset_in_kvecs(struct ceph_connection *con)
225 {
226 WARN_ON(iov_iter_count(&con->v2.in_iter));
227
228 con->v2.in_kvec_cnt = 0;
229 iov_iter_kvec(&con->v2.in_iter, ITER_DEST, con->v2.in_kvecs, 0, 0);
230 }
231
set_in_bvec(struct ceph_connection * con,const struct bio_vec * bv)232 static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv)
233 {
234 WARN_ON(iov_iter_count(&con->v2.in_iter));
235
236 con->v2.in_bvec = *bv;
237 iov_iter_bvec(&con->v2.in_iter, ITER_DEST, &con->v2.in_bvec, 1, bv->bv_len);
238 }
239
set_in_skip(struct ceph_connection * con,int len)240 static void set_in_skip(struct ceph_connection *con, int len)
241 {
242 WARN_ON(iov_iter_count(&con->v2.in_iter));
243
244 dout("%s con %p len %d\n", __func__, con, len);
245 iov_iter_discard(&con->v2.in_iter, ITER_DEST, len);
246 }
247
add_out_kvec(struct ceph_connection * con,void * buf,int len)248 static void add_out_kvec(struct ceph_connection *con, void *buf, int len)
249 {
250 BUG_ON(con->v2.out_kvec_cnt >= ARRAY_SIZE(con->v2.out_kvecs));
251 WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
252 WARN_ON(con->v2.out_zero);
253
254 con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_base = buf;
255 con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_len = len;
256 con->v2.out_kvec_cnt++;
257
258 con->v2.out_iter.nr_segs++;
259 con->v2.out_iter.count += len;
260 }
261
reset_out_kvecs(struct ceph_connection * con)262 static void reset_out_kvecs(struct ceph_connection *con)
263 {
264 WARN_ON(iov_iter_count(&con->v2.out_iter));
265 WARN_ON(con->v2.out_zero);
266
267 con->v2.out_kvec_cnt = 0;
268
269 iov_iter_kvec(&con->v2.out_iter, ITER_SOURCE, con->v2.out_kvecs, 0, 0);
270 con->v2.out_iter_sendpage = false;
271 }
272
set_out_bvec(struct ceph_connection * con,const struct bio_vec * bv,bool zerocopy)273 static void set_out_bvec(struct ceph_connection *con, const struct bio_vec *bv,
274 bool zerocopy)
275 {
276 WARN_ON(iov_iter_count(&con->v2.out_iter));
277 WARN_ON(con->v2.out_zero);
278
279 con->v2.out_bvec = *bv;
280 con->v2.out_iter_sendpage = zerocopy;
281 iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
282 con->v2.out_bvec.bv_len);
283 }
284
set_out_bvec_zero(struct ceph_connection * con)285 static void set_out_bvec_zero(struct ceph_connection *con)
286 {
287 WARN_ON(iov_iter_count(&con->v2.out_iter));
288 WARN_ON(!con->v2.out_zero);
289
290 bvec_set_page(&con->v2.out_bvec, ceph_zero_page,
291 min(con->v2.out_zero, (int)PAGE_SIZE), 0);
292 con->v2.out_iter_sendpage = true;
293 iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
294 con->v2.out_bvec.bv_len);
295 }
296
out_zero_add(struct ceph_connection * con,int len)297 static void out_zero_add(struct ceph_connection *con, int len)
298 {
299 dout("%s con %p len %d\n", __func__, con, len);
300 con->v2.out_zero += len;
301 }
302
alloc_conn_buf(struct ceph_connection * con,int len)303 static void *alloc_conn_buf(struct ceph_connection *con, int len)
304 {
305 void *buf;
306
307 dout("%s con %p len %d\n", __func__, con, len);
308
309 if (WARN_ON(con->v2.conn_buf_cnt >= ARRAY_SIZE(con->v2.conn_bufs)))
310 return NULL;
311
312 buf = kvmalloc(len, GFP_NOIO);
313 if (!buf)
314 return NULL;
315
316 con->v2.conn_bufs[con->v2.conn_buf_cnt++] = buf;
317 return buf;
318 }
319
free_conn_bufs(struct ceph_connection * con)320 static void free_conn_bufs(struct ceph_connection *con)
321 {
322 while (con->v2.conn_buf_cnt)
323 kvfree(con->v2.conn_bufs[--con->v2.conn_buf_cnt]);
324 }
325
add_in_sign_kvec(struct ceph_connection * con,void * buf,int len)326 static void add_in_sign_kvec(struct ceph_connection *con, void *buf, int len)
327 {
328 BUG_ON(con->v2.in_sign_kvec_cnt >= ARRAY_SIZE(con->v2.in_sign_kvecs));
329
330 con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_base = buf;
331 con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_len = len;
332 con->v2.in_sign_kvec_cnt++;
333 }
334
clear_in_sign_kvecs(struct ceph_connection * con)335 static void clear_in_sign_kvecs(struct ceph_connection *con)
336 {
337 con->v2.in_sign_kvec_cnt = 0;
338 }
339
add_out_sign_kvec(struct ceph_connection * con,void * buf,int len)340 static void add_out_sign_kvec(struct ceph_connection *con, void *buf, int len)
341 {
342 BUG_ON(con->v2.out_sign_kvec_cnt >= ARRAY_SIZE(con->v2.out_sign_kvecs));
343
344 con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_base = buf;
345 con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_len = len;
346 con->v2.out_sign_kvec_cnt++;
347 }
348
clear_out_sign_kvecs(struct ceph_connection * con)349 static void clear_out_sign_kvecs(struct ceph_connection *con)
350 {
351 con->v2.out_sign_kvec_cnt = 0;
352 }
353
con_secure(struct ceph_connection * con)354 static bool con_secure(struct ceph_connection *con)
355 {
356 return con->v2.con_mode == CEPH_CON_MODE_SECURE;
357 }
358
front_len(const struct ceph_msg * msg)359 static int front_len(const struct ceph_msg *msg)
360 {
361 return le32_to_cpu(msg->hdr.front_len);
362 }
363
middle_len(const struct ceph_msg * msg)364 static int middle_len(const struct ceph_msg *msg)
365 {
366 return le32_to_cpu(msg->hdr.middle_len);
367 }
368
data_len(const struct ceph_msg * msg)369 static int data_len(const struct ceph_msg *msg)
370 {
371 return le32_to_cpu(msg->hdr.data_len);
372 }
373
need_padding(int len)374 static bool need_padding(int len)
375 {
376 return !IS_ALIGNED(len, CEPH_GCM_BLOCK_LEN);
377 }
378
padded_len(int len)379 static int padded_len(int len)
380 {
381 return ALIGN(len, CEPH_GCM_BLOCK_LEN);
382 }
383
padding_len(int len)384 static int padding_len(int len)
385 {
386 return padded_len(len) - len;
387 }
388
389 /* preamble + control segment */
head_onwire_len(int ctrl_len,bool secure)390 static int head_onwire_len(int ctrl_len, bool secure)
391 {
392 int head_len;
393 int rem_len;
394
395 BUG_ON(ctrl_len < 0 || ctrl_len > CEPH_MSG_MAX_CONTROL_LEN);
396
397 if (secure) {
398 head_len = CEPH_PREAMBLE_SECURE_LEN;
399 if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
400 rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
401 head_len += padded_len(rem_len) + CEPH_GCM_TAG_LEN;
402 }
403 } else {
404 head_len = CEPH_PREAMBLE_PLAIN_LEN;
405 if (ctrl_len)
406 head_len += ctrl_len + CEPH_CRC_LEN;
407 }
408 return head_len;
409 }
410
411 /* front, middle and data segments + epilogue */
__tail_onwire_len(int front_len,int middle_len,int data_len,bool secure)412 static int __tail_onwire_len(int front_len, int middle_len, int data_len,
413 bool secure)
414 {
415 BUG_ON(front_len < 0 || front_len > CEPH_MSG_MAX_FRONT_LEN ||
416 middle_len < 0 || middle_len > CEPH_MSG_MAX_MIDDLE_LEN ||
417 data_len < 0 || data_len > CEPH_MSG_MAX_DATA_LEN);
418
419 if (!front_len && !middle_len && !data_len)
420 return 0;
421
422 if (!secure)
423 return front_len + middle_len + data_len +
424 CEPH_EPILOGUE_PLAIN_LEN;
425
426 return padded_len(front_len) + padded_len(middle_len) +
427 padded_len(data_len) + CEPH_EPILOGUE_SECURE_LEN;
428 }
429
tail_onwire_len(const struct ceph_msg * msg,bool secure)430 static int tail_onwire_len(const struct ceph_msg *msg, bool secure)
431 {
432 return __tail_onwire_len(front_len(msg), middle_len(msg),
433 data_len(msg), secure);
434 }
435
436 /* head_onwire_len(sizeof(struct ceph_msg_header2), false) */
437 #define MESSAGE_HEAD_PLAIN_LEN (CEPH_PREAMBLE_PLAIN_LEN + \
438 sizeof(struct ceph_msg_header2) + \
439 CEPH_CRC_LEN)
440
441 static const int frame_aligns[] = {
442 sizeof(void *),
443 sizeof(void *),
444 sizeof(void *),
445 PAGE_SIZE
446 };
447
448 /*
449 * Discards trailing empty segments, unless there is just one segment.
450 * A frame always has at least one (possibly empty) segment.
451 */
calc_segment_count(const int * lens,int len_cnt)452 static int calc_segment_count(const int *lens, int len_cnt)
453 {
454 int i;
455
456 for (i = len_cnt - 1; i >= 0; i--) {
457 if (lens[i])
458 return i + 1;
459 }
460
461 return 1;
462 }
463
init_frame_desc(struct ceph_frame_desc * desc,int tag,const int * lens,int len_cnt)464 static void init_frame_desc(struct ceph_frame_desc *desc, int tag,
465 const int *lens, int len_cnt)
466 {
467 int i;
468
469 memset(desc, 0, sizeof(*desc));
470
471 desc->fd_tag = tag;
472 desc->fd_seg_cnt = calc_segment_count(lens, len_cnt);
473 BUG_ON(desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT);
474 for (i = 0; i < desc->fd_seg_cnt; i++) {
475 desc->fd_lens[i] = lens[i];
476 desc->fd_aligns[i] = frame_aligns[i];
477 }
478 }
479
480 /*
481 * Preamble crc covers everything up to itself (28 bytes) and
482 * is calculated and verified irrespective of the connection mode
483 * (i.e. even if the frame is encrypted).
484 */
encode_preamble(const struct ceph_frame_desc * desc,void * p)485 static void encode_preamble(const struct ceph_frame_desc *desc, void *p)
486 {
487 void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
488 void *start = p;
489 int i;
490
491 memset(p, 0, CEPH_PREAMBLE_LEN);
492
493 ceph_encode_8(&p, desc->fd_tag);
494 ceph_encode_8(&p, desc->fd_seg_cnt);
495 for (i = 0; i < desc->fd_seg_cnt; i++) {
496 ceph_encode_32(&p, desc->fd_lens[i]);
497 ceph_encode_16(&p, desc->fd_aligns[i]);
498 }
499
500 put_unaligned_le32(crc32c(0, start, crcp - start), crcp);
501 }
502
decode_preamble(void * p,struct ceph_frame_desc * desc)503 static int decode_preamble(void *p, struct ceph_frame_desc *desc)
504 {
505 void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
506 u32 crc, expected_crc;
507 int i;
508
509 crc = crc32c(0, p, crcp - p);
510 expected_crc = get_unaligned_le32(crcp);
511 if (crc != expected_crc) {
512 pr_err("bad preamble crc, calculated %u, expected %u\n",
513 crc, expected_crc);
514 return -EBADMSG;
515 }
516
517 memset(desc, 0, sizeof(*desc));
518
519 desc->fd_tag = ceph_decode_8(&p);
520 desc->fd_seg_cnt = ceph_decode_8(&p);
521 if (desc->fd_seg_cnt < 1 ||
522 desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT) {
523 pr_err("bad segment count %d\n", desc->fd_seg_cnt);
524 return -EINVAL;
525 }
526 for (i = 0; i < desc->fd_seg_cnt; i++) {
527 desc->fd_lens[i] = ceph_decode_32(&p);
528 desc->fd_aligns[i] = ceph_decode_16(&p);
529 }
530
531 if (desc->fd_lens[0] < 0 ||
532 desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) {
533 pr_err("bad control segment length %d\n", desc->fd_lens[0]);
534 return -EINVAL;
535 }
536 if (desc->fd_lens[1] < 0 ||
537 desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) {
538 pr_err("bad front segment length %d\n", desc->fd_lens[1]);
539 return -EINVAL;
540 }
541 if (desc->fd_lens[2] < 0 ||
542 desc->fd_lens[2] > CEPH_MSG_MAX_MIDDLE_LEN) {
543 pr_err("bad middle segment length %d\n", desc->fd_lens[2]);
544 return -EINVAL;
545 }
546 if (desc->fd_lens[3] < 0 ||
547 desc->fd_lens[3] > CEPH_MSG_MAX_DATA_LEN) {
548 pr_err("bad data segment length %d\n", desc->fd_lens[3]);
549 return -EINVAL;
550 }
551
552 /*
553 * This would fire for FRAME_TAG_WAIT (it has one empty
554 * segment), but we should never get it as client.
555 */
556 if (!desc->fd_lens[desc->fd_seg_cnt - 1]) {
557 pr_err("last segment empty, segment count %d\n",
558 desc->fd_seg_cnt);
559 return -EINVAL;
560 }
561
562 return 0;
563 }
564
encode_epilogue_plain(struct ceph_connection * con,bool aborted)565 static void encode_epilogue_plain(struct ceph_connection *con, bool aborted)
566 {
567 con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
568 FRAME_LATE_STATUS_COMPLETE;
569 cpu_to_le32s(&con->v2.out_epil.front_crc);
570 cpu_to_le32s(&con->v2.out_epil.middle_crc);
571 cpu_to_le32s(&con->v2.out_epil.data_crc);
572 }
573
encode_epilogue_secure(struct ceph_connection * con,bool aborted)574 static void encode_epilogue_secure(struct ceph_connection *con, bool aborted)
575 {
576 memset(&con->v2.out_epil, 0, sizeof(con->v2.out_epil));
577 con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
578 FRAME_LATE_STATUS_COMPLETE;
579 }
580
decode_epilogue(void * p,u32 * front_crc,u32 * middle_crc,u32 * data_crc)581 static int decode_epilogue(void *p, u32 *front_crc, u32 *middle_crc,
582 u32 *data_crc)
583 {
584 u8 late_status;
585
586 late_status = ceph_decode_8(&p);
587 if ((late_status & FRAME_LATE_STATUS_ABORTED_MASK) !=
588 FRAME_LATE_STATUS_COMPLETE) {
589 /* we should never get an aborted message as client */
590 pr_err("bad late_status 0x%x\n", late_status);
591 return -EINVAL;
592 }
593
594 if (front_crc && middle_crc && data_crc) {
595 *front_crc = ceph_decode_32(&p);
596 *middle_crc = ceph_decode_32(&p);
597 *data_crc = ceph_decode_32(&p);
598 }
599
600 return 0;
601 }
602
fill_header(struct ceph_msg_header * hdr,const struct ceph_msg_header2 * hdr2,int front_len,int middle_len,int data_len,const struct ceph_entity_name * peer_name)603 static void fill_header(struct ceph_msg_header *hdr,
604 const struct ceph_msg_header2 *hdr2,
605 int front_len, int middle_len, int data_len,
606 const struct ceph_entity_name *peer_name)
607 {
608 hdr->seq = hdr2->seq;
609 hdr->tid = hdr2->tid;
610 hdr->type = hdr2->type;
611 hdr->priority = hdr2->priority;
612 hdr->version = hdr2->version;
613 hdr->front_len = cpu_to_le32(front_len);
614 hdr->middle_len = cpu_to_le32(middle_len);
615 hdr->data_len = cpu_to_le32(data_len);
616 hdr->data_off = hdr2->data_off;
617 hdr->src = *peer_name;
618 hdr->compat_version = hdr2->compat_version;
619 hdr->reserved = 0;
620 hdr->crc = 0;
621 }
622
fill_header2(struct ceph_msg_header2 * hdr2,const struct ceph_msg_header * hdr,u64 ack_seq)623 static void fill_header2(struct ceph_msg_header2 *hdr2,
624 const struct ceph_msg_header *hdr, u64 ack_seq)
625 {
626 hdr2->seq = hdr->seq;
627 hdr2->tid = hdr->tid;
628 hdr2->type = hdr->type;
629 hdr2->priority = hdr->priority;
630 hdr2->version = hdr->version;
631 hdr2->data_pre_padding_len = 0;
632 hdr2->data_off = hdr->data_off;
633 hdr2->ack_seq = cpu_to_le64(ack_seq);
634 hdr2->flags = 0;
635 hdr2->compat_version = hdr->compat_version;
636 hdr2->reserved = 0;
637 }
638
verify_control_crc(struct ceph_connection * con)639 static int verify_control_crc(struct ceph_connection *con)
640 {
641 int ctrl_len = con->v2.in_desc.fd_lens[0];
642 u32 crc, expected_crc;
643
644 WARN_ON(con->v2.in_kvecs[0].iov_len != ctrl_len);
645 WARN_ON(con->v2.in_kvecs[1].iov_len != CEPH_CRC_LEN);
646
647 crc = crc32c(-1, con->v2.in_kvecs[0].iov_base, ctrl_len);
648 expected_crc = get_unaligned_le32(con->v2.in_kvecs[1].iov_base);
649 if (crc != expected_crc) {
650 pr_err("bad control crc, calculated %u, expected %u\n",
651 crc, expected_crc);
652 return -EBADMSG;
653 }
654
655 return 0;
656 }
657
verify_epilogue_crcs(struct ceph_connection * con,u32 front_crc,u32 middle_crc,u32 data_crc)658 static int verify_epilogue_crcs(struct ceph_connection *con, u32 front_crc,
659 u32 middle_crc, u32 data_crc)
660 {
661 if (front_len(con->in_msg)) {
662 con->in_front_crc = crc32c(-1, con->in_msg->front.iov_base,
663 front_len(con->in_msg));
664 } else {
665 WARN_ON(!middle_len(con->in_msg) && !data_len(con->in_msg));
666 con->in_front_crc = -1;
667 }
668
669 if (middle_len(con->in_msg))
670 con->in_middle_crc = crc32c(-1,
671 con->in_msg->middle->vec.iov_base,
672 middle_len(con->in_msg));
673 else if (data_len(con->in_msg))
674 con->in_middle_crc = -1;
675 else
676 con->in_middle_crc = 0;
677
678 if (!data_len(con->in_msg))
679 con->in_data_crc = 0;
680
681 dout("%s con %p msg %p crcs %u %u %u\n", __func__, con, con->in_msg,
682 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
683
684 if (con->in_front_crc != front_crc) {
685 pr_err("bad front crc, calculated %u, expected %u\n",
686 con->in_front_crc, front_crc);
687 return -EBADMSG;
688 }
689 if (con->in_middle_crc != middle_crc) {
690 pr_err("bad middle crc, calculated %u, expected %u\n",
691 con->in_middle_crc, middle_crc);
692 return -EBADMSG;
693 }
694 if (con->in_data_crc != data_crc) {
695 pr_err("bad data crc, calculated %u, expected %u\n",
696 con->in_data_crc, data_crc);
697 return -EBADMSG;
698 }
699
700 return 0;
701 }
702
setup_crypto(struct ceph_connection * con,const u8 * session_key,int session_key_len,const u8 * con_secret,int con_secret_len)703 static int setup_crypto(struct ceph_connection *con,
704 const u8 *session_key, int session_key_len,
705 const u8 *con_secret, int con_secret_len)
706 {
707 unsigned int noio_flag;
708 int ret;
709
710 dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
711 __func__, con, con->v2.con_mode, session_key_len, con_secret_len);
712 WARN_ON(con->v2.hmac_tfm || con->v2.gcm_tfm || con->v2.gcm_req);
713
714 if (con->v2.con_mode != CEPH_CON_MODE_CRC &&
715 con->v2.con_mode != CEPH_CON_MODE_SECURE) {
716 pr_err("bad con_mode %d\n", con->v2.con_mode);
717 return -EINVAL;
718 }
719
720 if (!session_key_len) {
721 WARN_ON(con->v2.con_mode != CEPH_CON_MODE_CRC);
722 WARN_ON(con_secret_len);
723 return 0; /* auth_none */
724 }
725
726 noio_flag = memalloc_noio_save();
727 con->v2.hmac_tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
728 memalloc_noio_restore(noio_flag);
729 if (IS_ERR(con->v2.hmac_tfm)) {
730 ret = PTR_ERR(con->v2.hmac_tfm);
731 con->v2.hmac_tfm = NULL;
732 pr_err("failed to allocate hmac tfm context: %d\n", ret);
733 return ret;
734 }
735
736 WARN_ON((unsigned long)session_key &
737 crypto_shash_alignmask(con->v2.hmac_tfm));
738 ret = crypto_shash_setkey(con->v2.hmac_tfm, session_key,
739 session_key_len);
740 if (ret) {
741 pr_err("failed to set hmac key: %d\n", ret);
742 return ret;
743 }
744
745 if (con->v2.con_mode == CEPH_CON_MODE_CRC) {
746 WARN_ON(con_secret_len);
747 return 0; /* auth_x, plain mode */
748 }
749
750 if (con_secret_len < CEPH_GCM_KEY_LEN + 2 * CEPH_GCM_IV_LEN) {
751 pr_err("con_secret too small %d\n", con_secret_len);
752 return -EINVAL;
753 }
754
755 noio_flag = memalloc_noio_save();
756 con->v2.gcm_tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
757 memalloc_noio_restore(noio_flag);
758 if (IS_ERR(con->v2.gcm_tfm)) {
759 ret = PTR_ERR(con->v2.gcm_tfm);
760 con->v2.gcm_tfm = NULL;
761 pr_err("failed to allocate gcm tfm context: %d\n", ret);
762 return ret;
763 }
764
765 WARN_ON((unsigned long)con_secret &
766 crypto_aead_alignmask(con->v2.gcm_tfm));
767 ret = crypto_aead_setkey(con->v2.gcm_tfm, con_secret, CEPH_GCM_KEY_LEN);
768 if (ret) {
769 pr_err("failed to set gcm key: %d\n", ret);
770 return ret;
771 }
772
773 WARN_ON(crypto_aead_ivsize(con->v2.gcm_tfm) != CEPH_GCM_IV_LEN);
774 ret = crypto_aead_setauthsize(con->v2.gcm_tfm, CEPH_GCM_TAG_LEN);
775 if (ret) {
776 pr_err("failed to set gcm tag size: %d\n", ret);
777 return ret;
778 }
779
780 con->v2.gcm_req = aead_request_alloc(con->v2.gcm_tfm, GFP_NOIO);
781 if (!con->v2.gcm_req) {
782 pr_err("failed to allocate gcm request\n");
783 return -ENOMEM;
784 }
785
786 crypto_init_wait(&con->v2.gcm_wait);
787 aead_request_set_callback(con->v2.gcm_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
788 crypto_req_done, &con->v2.gcm_wait);
789
790 memcpy(&con->v2.in_gcm_nonce, con_secret + CEPH_GCM_KEY_LEN,
791 CEPH_GCM_IV_LEN);
792 memcpy(&con->v2.out_gcm_nonce,
793 con_secret + CEPH_GCM_KEY_LEN + CEPH_GCM_IV_LEN,
794 CEPH_GCM_IV_LEN);
795 return 0; /* auth_x, secure mode */
796 }
797
hmac_sha256(struct ceph_connection * con,const struct kvec * kvecs,int kvec_cnt,u8 * hmac)798 static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs,
799 int kvec_cnt, u8 *hmac)
800 {
801 SHASH_DESC_ON_STACK(desc, con->v2.hmac_tfm); /* tfm arg is ignored */
802 int ret;
803 int i;
804
805 dout("%s con %p hmac_tfm %p kvec_cnt %d\n", __func__, con,
806 con->v2.hmac_tfm, kvec_cnt);
807
808 if (!con->v2.hmac_tfm) {
809 memset(hmac, 0, SHA256_DIGEST_SIZE);
810 return 0; /* auth_none */
811 }
812
813 desc->tfm = con->v2.hmac_tfm;
814 ret = crypto_shash_init(desc);
815 if (ret)
816 goto out;
817
818 for (i = 0; i < kvec_cnt; i++) {
819 WARN_ON((unsigned long)kvecs[i].iov_base &
820 crypto_shash_alignmask(con->v2.hmac_tfm));
821 ret = crypto_shash_update(desc, kvecs[i].iov_base,
822 kvecs[i].iov_len);
823 if (ret)
824 goto out;
825 }
826
827 ret = crypto_shash_final(desc, hmac);
828
829 out:
830 shash_desc_zero(desc);
831 return ret; /* auth_x, both plain and secure modes */
832 }
833
gcm_inc_nonce(struct ceph_gcm_nonce * nonce)834 static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce)
835 {
836 u64 counter;
837
838 counter = le64_to_cpu(nonce->counter);
839 nonce->counter = cpu_to_le64(counter + 1);
840 }
841
gcm_crypt(struct ceph_connection * con,bool encrypt,struct scatterlist * src,struct scatterlist * dst,int src_len)842 static int gcm_crypt(struct ceph_connection *con, bool encrypt,
843 struct scatterlist *src, struct scatterlist *dst,
844 int src_len)
845 {
846 struct ceph_gcm_nonce *nonce;
847 int ret;
848
849 nonce = encrypt ? &con->v2.out_gcm_nonce : &con->v2.in_gcm_nonce;
850
851 aead_request_set_ad(con->v2.gcm_req, 0); /* no AAD */
852 aead_request_set_crypt(con->v2.gcm_req, src, dst, src_len, (u8 *)nonce);
853 ret = crypto_wait_req(encrypt ? crypto_aead_encrypt(con->v2.gcm_req) :
854 crypto_aead_decrypt(con->v2.gcm_req),
855 &con->v2.gcm_wait);
856 if (ret)
857 return ret;
858
859 gcm_inc_nonce(nonce);
860 return 0;
861 }
862
get_bvec_at(struct ceph_msg_data_cursor * cursor,struct bio_vec * bv)863 static void get_bvec_at(struct ceph_msg_data_cursor *cursor,
864 struct bio_vec *bv)
865 {
866 struct page *page;
867 size_t off, len;
868
869 WARN_ON(!cursor->total_resid);
870
871 /* skip zero-length data items */
872 while (!cursor->resid)
873 ceph_msg_data_advance(cursor, 0);
874
875 /* get a piece of data, cursor isn't advanced */
876 page = ceph_msg_data_next(cursor, &off, &len);
877 bvec_set_page(bv, page, len, off);
878 }
879
calc_sg_cnt(void * buf,int buf_len)880 static int calc_sg_cnt(void *buf, int buf_len)
881 {
882 int sg_cnt;
883
884 if (!buf_len)
885 return 0;
886
887 sg_cnt = need_padding(buf_len) ? 1 : 0;
888 if (is_vmalloc_addr(buf)) {
889 WARN_ON(offset_in_page(buf));
890 sg_cnt += PAGE_ALIGN(buf_len) >> PAGE_SHIFT;
891 } else {
892 sg_cnt++;
893 }
894
895 return sg_cnt;
896 }
897
calc_sg_cnt_cursor(struct ceph_msg_data_cursor * cursor)898 static int calc_sg_cnt_cursor(struct ceph_msg_data_cursor *cursor)
899 {
900 int data_len = cursor->total_resid;
901 struct bio_vec bv;
902 int sg_cnt;
903
904 if (!data_len)
905 return 0;
906
907 sg_cnt = need_padding(data_len) ? 1 : 0;
908 do {
909 get_bvec_at(cursor, &bv);
910 sg_cnt++;
911
912 ceph_msg_data_advance(cursor, bv.bv_len);
913 } while (cursor->total_resid);
914
915 return sg_cnt;
916 }
917
init_sgs(struct scatterlist ** sg,void * buf,int buf_len,u8 * pad)918 static void init_sgs(struct scatterlist **sg, void *buf, int buf_len, u8 *pad)
919 {
920 void *end = buf + buf_len;
921 struct page *page;
922 int len;
923 void *p;
924
925 if (!buf_len)
926 return;
927
928 if (is_vmalloc_addr(buf)) {
929 p = buf;
930 do {
931 page = vmalloc_to_page(p);
932 len = min_t(int, end - p, PAGE_SIZE);
933 WARN_ON(!page || !len || offset_in_page(p));
934 sg_set_page(*sg, page, len, 0);
935 *sg = sg_next(*sg);
936 p += len;
937 } while (p != end);
938 } else {
939 sg_set_buf(*sg, buf, buf_len);
940 *sg = sg_next(*sg);
941 }
942
943 if (need_padding(buf_len)) {
944 sg_set_buf(*sg, pad, padding_len(buf_len));
945 *sg = sg_next(*sg);
946 }
947 }
948
init_sgs_cursor(struct scatterlist ** sg,struct ceph_msg_data_cursor * cursor,u8 * pad)949 static void init_sgs_cursor(struct scatterlist **sg,
950 struct ceph_msg_data_cursor *cursor, u8 *pad)
951 {
952 int data_len = cursor->total_resid;
953 struct bio_vec bv;
954
955 if (!data_len)
956 return;
957
958 do {
959 get_bvec_at(cursor, &bv);
960 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
961 *sg = sg_next(*sg);
962
963 ceph_msg_data_advance(cursor, bv.bv_len);
964 } while (cursor->total_resid);
965
966 if (need_padding(data_len)) {
967 sg_set_buf(*sg, pad, padding_len(data_len));
968 *sg = sg_next(*sg);
969 }
970 }
971
972 /**
973 * init_sgs_pages: set up scatterlist on an array of page pointers
974 * @sg: scatterlist to populate
975 * @pages: pointer to page array
976 * @dpos: position in the array to start (bytes)
977 * @dlen: len to add to sg (bytes)
978 * @pad: pointer to pad destination (if any)
979 *
980 * Populate the scatterlist from the page array, starting at an arbitrary
981 * byte in the array and running for a specified length.
982 */
init_sgs_pages(struct scatterlist ** sg,struct page ** pages,int dpos,int dlen,u8 * pad)983 static void init_sgs_pages(struct scatterlist **sg, struct page **pages,
984 int dpos, int dlen, u8 *pad)
985 {
986 int idx = dpos >> PAGE_SHIFT;
987 int off = offset_in_page(dpos);
988 int resid = dlen;
989
990 do {
991 int len = min(resid, (int)PAGE_SIZE - off);
992
993 sg_set_page(*sg, pages[idx], len, off);
994 *sg = sg_next(*sg);
995 off = 0;
996 ++idx;
997 resid -= len;
998 } while (resid);
999
1000 if (need_padding(dlen)) {
1001 sg_set_buf(*sg, pad, padding_len(dlen));
1002 *sg = sg_next(*sg);
1003 }
1004 }
1005
setup_message_sgs(struct sg_table * sgt,struct ceph_msg * msg,u8 * front_pad,u8 * middle_pad,u8 * data_pad,void * epilogue,struct page ** pages,int dpos,bool add_tag)1006 static int setup_message_sgs(struct sg_table *sgt, struct ceph_msg *msg,
1007 u8 *front_pad, u8 *middle_pad, u8 *data_pad,
1008 void *epilogue, struct page **pages, int dpos,
1009 bool add_tag)
1010 {
1011 struct ceph_msg_data_cursor cursor;
1012 struct scatterlist *cur_sg;
1013 int dlen = data_len(msg);
1014 int sg_cnt;
1015 int ret;
1016
1017 if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
1018 return 0;
1019
1020 sg_cnt = 1; /* epilogue + [auth tag] */
1021 if (front_len(msg))
1022 sg_cnt += calc_sg_cnt(msg->front.iov_base,
1023 front_len(msg));
1024 if (middle_len(msg))
1025 sg_cnt += calc_sg_cnt(msg->middle->vec.iov_base,
1026 middle_len(msg));
1027 if (dlen) {
1028 if (pages) {
1029 sg_cnt += calc_pages_for(dpos, dlen);
1030 if (need_padding(dlen))
1031 sg_cnt++;
1032 } else {
1033 ceph_msg_data_cursor_init(&cursor, msg, dlen);
1034 sg_cnt += calc_sg_cnt_cursor(&cursor);
1035 }
1036 }
1037
1038 ret = sg_alloc_table(sgt, sg_cnt, GFP_NOIO);
1039 if (ret)
1040 return ret;
1041
1042 cur_sg = sgt->sgl;
1043 if (front_len(msg))
1044 init_sgs(&cur_sg, msg->front.iov_base, front_len(msg),
1045 front_pad);
1046 if (middle_len(msg))
1047 init_sgs(&cur_sg, msg->middle->vec.iov_base, middle_len(msg),
1048 middle_pad);
1049 if (dlen) {
1050 if (pages) {
1051 init_sgs_pages(&cur_sg, pages, dpos, dlen, data_pad);
1052 } else {
1053 ceph_msg_data_cursor_init(&cursor, msg, dlen);
1054 init_sgs_cursor(&cur_sg, &cursor, data_pad);
1055 }
1056 }
1057
1058 WARN_ON(!sg_is_last(cur_sg));
1059 sg_set_buf(cur_sg, epilogue,
1060 CEPH_GCM_BLOCK_LEN + (add_tag ? CEPH_GCM_TAG_LEN : 0));
1061 return 0;
1062 }
1063
decrypt_preamble(struct ceph_connection * con)1064 static int decrypt_preamble(struct ceph_connection *con)
1065 {
1066 struct scatterlist sg;
1067
1068 sg_init_one(&sg, con->v2.in_buf, CEPH_PREAMBLE_SECURE_LEN);
1069 return gcm_crypt(con, false, &sg, &sg, CEPH_PREAMBLE_SECURE_LEN);
1070 }
1071
decrypt_control_remainder(struct ceph_connection * con)1072 static int decrypt_control_remainder(struct ceph_connection *con)
1073 {
1074 int ctrl_len = con->v2.in_desc.fd_lens[0];
1075 int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1076 int pt_len = padding_len(rem_len) + CEPH_GCM_TAG_LEN;
1077 struct scatterlist sgs[2];
1078
1079 WARN_ON(con->v2.in_kvecs[0].iov_len != rem_len);
1080 WARN_ON(con->v2.in_kvecs[1].iov_len != pt_len);
1081
1082 sg_init_table(sgs, 2);
1083 sg_set_buf(&sgs[0], con->v2.in_kvecs[0].iov_base, rem_len);
1084 sg_set_buf(&sgs[1], con->v2.in_buf, pt_len);
1085
1086 return gcm_crypt(con, false, sgs, sgs,
1087 padded_len(rem_len) + CEPH_GCM_TAG_LEN);
1088 }
1089
1090 /* Process sparse read data that lives in a buffer */
process_v2_sparse_read(struct ceph_connection * con,struct page ** pages,int spos)1091 static int process_v2_sparse_read(struct ceph_connection *con,
1092 struct page **pages, int spos)
1093 {
1094 struct ceph_msg_data_cursor *cursor = &con->v2.in_cursor;
1095 int ret;
1096
1097 for (;;) {
1098 char *buf = NULL;
1099
1100 ret = con->ops->sparse_read(con, cursor, &buf);
1101 if (ret <= 0)
1102 return ret;
1103
1104 dout("%s: sparse_read return %x buf %p\n", __func__, ret, buf);
1105
1106 do {
1107 int idx = spos >> PAGE_SHIFT;
1108 int soff = offset_in_page(spos);
1109 struct page *spage = con->v2.in_enc_pages[idx];
1110 int len = min_t(int, ret, PAGE_SIZE - soff);
1111
1112 if (buf) {
1113 memcpy_from_page(buf, spage, soff, len);
1114 buf += len;
1115 } else {
1116 struct bio_vec bv;
1117
1118 get_bvec_at(cursor, &bv);
1119 len = min_t(int, len, bv.bv_len);
1120 memcpy_page(bv.bv_page, bv.bv_offset,
1121 spage, soff, len);
1122 ceph_msg_data_advance(cursor, len);
1123 }
1124 spos += len;
1125 ret -= len;
1126 } while (ret);
1127 }
1128 }
1129
decrypt_tail(struct ceph_connection * con)1130 static int decrypt_tail(struct ceph_connection *con)
1131 {
1132 struct sg_table enc_sgt = {};
1133 struct sg_table sgt = {};
1134 struct page **pages = NULL;
1135 bool sparse = !!con->in_msg->sparse_read_total;
1136 int dpos = 0;
1137 int tail_len;
1138 int ret;
1139
1140 tail_len = tail_onwire_len(con->in_msg, true);
1141 ret = sg_alloc_table_from_pages(&enc_sgt, con->v2.in_enc_pages,
1142 con->v2.in_enc_page_cnt, 0, tail_len,
1143 GFP_NOIO);
1144 if (ret)
1145 goto out;
1146
1147 if (sparse) {
1148 dpos = padded_len(front_len(con->in_msg) + padded_len(middle_len(con->in_msg)));
1149 pages = con->v2.in_enc_pages;
1150 }
1151
1152 ret = setup_message_sgs(&sgt, con->in_msg, FRONT_PAD(con->v2.in_buf),
1153 MIDDLE_PAD(con->v2.in_buf), DATA_PAD(con->v2.in_buf),
1154 con->v2.in_buf, pages, dpos, true);
1155 if (ret)
1156 goto out;
1157
1158 dout("%s con %p msg %p enc_page_cnt %d sg_cnt %d\n", __func__, con,
1159 con->in_msg, con->v2.in_enc_page_cnt, sgt.orig_nents);
1160 ret = gcm_crypt(con, false, enc_sgt.sgl, sgt.sgl, tail_len);
1161 if (ret)
1162 goto out;
1163
1164 if (sparse && data_len(con->in_msg)) {
1165 ret = process_v2_sparse_read(con, con->v2.in_enc_pages, dpos);
1166 if (ret)
1167 goto out;
1168 }
1169
1170 WARN_ON(!con->v2.in_enc_page_cnt);
1171 ceph_release_page_vector(con->v2.in_enc_pages,
1172 con->v2.in_enc_page_cnt);
1173 con->v2.in_enc_pages = NULL;
1174 con->v2.in_enc_page_cnt = 0;
1175
1176 out:
1177 sg_free_table(&sgt);
1178 sg_free_table(&enc_sgt);
1179 return ret;
1180 }
1181
prepare_banner(struct ceph_connection * con)1182 static int prepare_banner(struct ceph_connection *con)
1183 {
1184 int buf_len = CEPH_BANNER_V2_LEN + 2 + 8 + 8;
1185 void *buf, *p;
1186
1187 buf = alloc_conn_buf(con, buf_len);
1188 if (!buf)
1189 return -ENOMEM;
1190
1191 p = buf;
1192 ceph_encode_copy(&p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN);
1193 ceph_encode_16(&p, sizeof(u64) + sizeof(u64));
1194 ceph_encode_64(&p, CEPH_MSGR2_SUPPORTED_FEATURES);
1195 ceph_encode_64(&p, CEPH_MSGR2_REQUIRED_FEATURES);
1196 WARN_ON(p != buf + buf_len);
1197
1198 add_out_kvec(con, buf, buf_len);
1199 add_out_sign_kvec(con, buf, buf_len);
1200 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1201 return 0;
1202 }
1203
1204 /*
1205 * base:
1206 * preamble
1207 * control body (ctrl_len bytes)
1208 * space for control crc
1209 *
1210 * extdata (optional):
1211 * control body (extdata_len bytes)
1212 *
1213 * Compute control crc and gather base and extdata into:
1214 *
1215 * preamble
1216 * control body (ctrl_len + extdata_len bytes)
1217 * control crc
1218 *
1219 * Preamble should already be encoded at the start of base.
1220 */
prepare_head_plain(struct ceph_connection * con,void * base,int ctrl_len,void * extdata,int extdata_len,bool to_be_signed)1221 static void prepare_head_plain(struct ceph_connection *con, void *base,
1222 int ctrl_len, void *extdata, int extdata_len,
1223 bool to_be_signed)
1224 {
1225 int base_len = CEPH_PREAMBLE_LEN + ctrl_len + CEPH_CRC_LEN;
1226 void *crcp = base + base_len - CEPH_CRC_LEN;
1227 u32 crc;
1228
1229 crc = crc32c(-1, CTRL_BODY(base), ctrl_len);
1230 if (extdata_len)
1231 crc = crc32c(crc, extdata, extdata_len);
1232 put_unaligned_le32(crc, crcp);
1233
1234 if (!extdata_len) {
1235 add_out_kvec(con, base, base_len);
1236 if (to_be_signed)
1237 add_out_sign_kvec(con, base, base_len);
1238 return;
1239 }
1240
1241 add_out_kvec(con, base, crcp - base);
1242 add_out_kvec(con, extdata, extdata_len);
1243 add_out_kvec(con, crcp, CEPH_CRC_LEN);
1244 if (to_be_signed) {
1245 add_out_sign_kvec(con, base, crcp - base);
1246 add_out_sign_kvec(con, extdata, extdata_len);
1247 add_out_sign_kvec(con, crcp, CEPH_CRC_LEN);
1248 }
1249 }
1250
prepare_head_secure_small(struct ceph_connection * con,void * base,int ctrl_len)1251 static int prepare_head_secure_small(struct ceph_connection *con,
1252 void *base, int ctrl_len)
1253 {
1254 struct scatterlist sg;
1255 int ret;
1256
1257 /* inline buffer padding? */
1258 if (ctrl_len < CEPH_PREAMBLE_INLINE_LEN)
1259 memset(CTRL_BODY(base) + ctrl_len, 0,
1260 CEPH_PREAMBLE_INLINE_LEN - ctrl_len);
1261
1262 sg_init_one(&sg, base, CEPH_PREAMBLE_SECURE_LEN);
1263 ret = gcm_crypt(con, true, &sg, &sg,
1264 CEPH_PREAMBLE_SECURE_LEN - CEPH_GCM_TAG_LEN);
1265 if (ret)
1266 return ret;
1267
1268 add_out_kvec(con, base, CEPH_PREAMBLE_SECURE_LEN);
1269 return 0;
1270 }
1271
1272 /*
1273 * base:
1274 * preamble
1275 * control body (ctrl_len bytes)
1276 * space for padding, if needed
1277 * space for control remainder auth tag
1278 * space for preamble auth tag
1279 *
1280 * Encrypt preamble and the inline portion, then encrypt the remainder
1281 * and gather into:
1282 *
1283 * preamble
1284 * control body (48 bytes)
1285 * preamble auth tag
1286 * control body (ctrl_len - 48 bytes)
1287 * zero padding, if needed
1288 * control remainder auth tag
1289 *
1290 * Preamble should already be encoded at the start of base.
1291 */
prepare_head_secure_big(struct ceph_connection * con,void * base,int ctrl_len)1292 static int prepare_head_secure_big(struct ceph_connection *con,
1293 void *base, int ctrl_len)
1294 {
1295 int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1296 void *rem = CTRL_BODY(base) + CEPH_PREAMBLE_INLINE_LEN;
1297 void *rem_tag = rem + padded_len(rem_len);
1298 void *pmbl_tag = rem_tag + CEPH_GCM_TAG_LEN;
1299 struct scatterlist sgs[2];
1300 int ret;
1301
1302 sg_init_table(sgs, 2);
1303 sg_set_buf(&sgs[0], base, rem - base);
1304 sg_set_buf(&sgs[1], pmbl_tag, CEPH_GCM_TAG_LEN);
1305 ret = gcm_crypt(con, true, sgs, sgs, rem - base);
1306 if (ret)
1307 return ret;
1308
1309 /* control remainder padding? */
1310 if (need_padding(rem_len))
1311 memset(rem + rem_len, 0, padding_len(rem_len));
1312
1313 sg_init_one(&sgs[0], rem, pmbl_tag - rem);
1314 ret = gcm_crypt(con, true, sgs, sgs, rem_tag - rem);
1315 if (ret)
1316 return ret;
1317
1318 add_out_kvec(con, base, rem - base);
1319 add_out_kvec(con, pmbl_tag, CEPH_GCM_TAG_LEN);
1320 add_out_kvec(con, rem, pmbl_tag - rem);
1321 return 0;
1322 }
1323
__prepare_control(struct ceph_connection * con,int tag,void * base,int ctrl_len,void * extdata,int extdata_len,bool to_be_signed)1324 static int __prepare_control(struct ceph_connection *con, int tag,
1325 void *base, int ctrl_len, void *extdata,
1326 int extdata_len, bool to_be_signed)
1327 {
1328 int total_len = ctrl_len + extdata_len;
1329 struct ceph_frame_desc desc;
1330 int ret;
1331
1332 dout("%s con %p tag %d len %d (%d+%d)\n", __func__, con, tag,
1333 total_len, ctrl_len, extdata_len);
1334
1335 /* extdata may be vmalloc'ed but not base */
1336 if (WARN_ON(is_vmalloc_addr(base) || !ctrl_len))
1337 return -EINVAL;
1338
1339 init_frame_desc(&desc, tag, &total_len, 1);
1340 encode_preamble(&desc, base);
1341
1342 if (con_secure(con)) {
1343 if (WARN_ON(extdata_len || to_be_signed))
1344 return -EINVAL;
1345
1346 if (ctrl_len <= CEPH_PREAMBLE_INLINE_LEN)
1347 /* fully inlined, inline buffer may need padding */
1348 ret = prepare_head_secure_small(con, base, ctrl_len);
1349 else
1350 /* partially inlined, inline buffer is full */
1351 ret = prepare_head_secure_big(con, base, ctrl_len);
1352 if (ret)
1353 return ret;
1354 } else {
1355 prepare_head_plain(con, base, ctrl_len, extdata, extdata_len,
1356 to_be_signed);
1357 }
1358
1359 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1360 return 0;
1361 }
1362
prepare_control(struct ceph_connection * con,int tag,void * base,int ctrl_len)1363 static int prepare_control(struct ceph_connection *con, int tag,
1364 void *base, int ctrl_len)
1365 {
1366 return __prepare_control(con, tag, base, ctrl_len, NULL, 0, false);
1367 }
1368
prepare_hello(struct ceph_connection * con)1369 static int prepare_hello(struct ceph_connection *con)
1370 {
1371 void *buf, *p;
1372 int ctrl_len;
1373
1374 ctrl_len = 1 + ceph_entity_addr_encoding_len(&con->peer_addr);
1375 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1376 if (!buf)
1377 return -ENOMEM;
1378
1379 p = CTRL_BODY(buf);
1380 ceph_encode_8(&p, CEPH_ENTITY_TYPE_CLIENT);
1381 ceph_encode_entity_addr(&p, &con->peer_addr);
1382 WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1383
1384 return __prepare_control(con, FRAME_TAG_HELLO, buf, ctrl_len,
1385 NULL, 0, true);
1386 }
1387
1388 /* so that head_onwire_len(AUTH_BUF_LEN, false) is 512 */
1389 #define AUTH_BUF_LEN (512 - CEPH_CRC_LEN - CEPH_PREAMBLE_PLAIN_LEN)
1390
prepare_auth_request(struct ceph_connection * con)1391 static int prepare_auth_request(struct ceph_connection *con)
1392 {
1393 void *authorizer, *authorizer_copy;
1394 int ctrl_len, authorizer_len;
1395 void *buf;
1396 int ret;
1397
1398 ctrl_len = AUTH_BUF_LEN;
1399 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1400 if (!buf)
1401 return -ENOMEM;
1402
1403 mutex_unlock(&con->mutex);
1404 ret = con->ops->get_auth_request(con, CTRL_BODY(buf), &ctrl_len,
1405 &authorizer, &authorizer_len);
1406 mutex_lock(&con->mutex);
1407 if (con->state != CEPH_CON_S_V2_HELLO) {
1408 dout("%s con %p state changed to %d\n", __func__, con,
1409 con->state);
1410 return -EAGAIN;
1411 }
1412
1413 dout("%s con %p get_auth_request ret %d\n", __func__, con, ret);
1414 if (ret)
1415 return ret;
1416
1417 authorizer_copy = alloc_conn_buf(con, authorizer_len);
1418 if (!authorizer_copy)
1419 return -ENOMEM;
1420
1421 memcpy(authorizer_copy, authorizer, authorizer_len);
1422
1423 return __prepare_control(con, FRAME_TAG_AUTH_REQUEST, buf, ctrl_len,
1424 authorizer_copy, authorizer_len, true);
1425 }
1426
prepare_auth_request_more(struct ceph_connection * con,void * reply,int reply_len)1427 static int prepare_auth_request_more(struct ceph_connection *con,
1428 void *reply, int reply_len)
1429 {
1430 int ctrl_len, authorizer_len;
1431 void *authorizer;
1432 void *buf;
1433 int ret;
1434
1435 ctrl_len = AUTH_BUF_LEN;
1436 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1437 if (!buf)
1438 return -ENOMEM;
1439
1440 mutex_unlock(&con->mutex);
1441 ret = con->ops->handle_auth_reply_more(con, reply, reply_len,
1442 CTRL_BODY(buf), &ctrl_len,
1443 &authorizer, &authorizer_len);
1444 mutex_lock(&con->mutex);
1445 if (con->state != CEPH_CON_S_V2_AUTH) {
1446 dout("%s con %p state changed to %d\n", __func__, con,
1447 con->state);
1448 return -EAGAIN;
1449 }
1450
1451 dout("%s con %p handle_auth_reply_more ret %d\n", __func__, con, ret);
1452 if (ret)
1453 return ret;
1454
1455 return __prepare_control(con, FRAME_TAG_AUTH_REQUEST_MORE, buf,
1456 ctrl_len, authorizer, authorizer_len, true);
1457 }
1458
prepare_auth_signature(struct ceph_connection * con)1459 static int prepare_auth_signature(struct ceph_connection *con)
1460 {
1461 void *buf;
1462 int ret;
1463
1464 buf = alloc_conn_buf(con, head_onwire_len(SHA256_DIGEST_SIZE,
1465 con_secure(con)));
1466 if (!buf)
1467 return -ENOMEM;
1468
1469 ret = hmac_sha256(con, con->v2.in_sign_kvecs, con->v2.in_sign_kvec_cnt,
1470 CTRL_BODY(buf));
1471 if (ret)
1472 return ret;
1473
1474 return prepare_control(con, FRAME_TAG_AUTH_SIGNATURE, buf,
1475 SHA256_DIGEST_SIZE);
1476 }
1477
prepare_client_ident(struct ceph_connection * con)1478 static int prepare_client_ident(struct ceph_connection *con)
1479 {
1480 struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1481 struct ceph_client *client = from_msgr(con->msgr);
1482 u64 global_id = ceph_client_gid(client);
1483 void *buf, *p;
1484 int ctrl_len;
1485
1486 WARN_ON(con->v2.server_cookie);
1487 WARN_ON(con->v2.connect_seq);
1488 WARN_ON(con->v2.peer_global_seq);
1489
1490 if (!con->v2.client_cookie) {
1491 do {
1492 get_random_bytes(&con->v2.client_cookie,
1493 sizeof(con->v2.client_cookie));
1494 } while (!con->v2.client_cookie);
1495 dout("%s con %p generated cookie 0x%llx\n", __func__, con,
1496 con->v2.client_cookie);
1497 } else {
1498 dout("%s con %p cookie already set 0x%llx\n", __func__, con,
1499 con->v2.client_cookie);
1500 }
1501
1502 dout("%s con %p my_addr %s/%u peer_addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx cookie 0x%llx\n",
1503 __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
1504 ceph_pr_addr(&con->peer_addr), le32_to_cpu(con->peer_addr.nonce),
1505 global_id, con->v2.global_seq, client->supported_features,
1506 client->required_features, con->v2.client_cookie);
1507
1508 ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) +
1509 ceph_entity_addr_encoding_len(&con->peer_addr) + 6 * 8;
1510 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
1511 if (!buf)
1512 return -ENOMEM;
1513
1514 p = CTRL_BODY(buf);
1515 ceph_encode_8(&p, 2); /* addrvec marker */
1516 ceph_encode_32(&p, 1); /* addr_cnt */
1517 ceph_encode_entity_addr(&p, my_addr);
1518 ceph_encode_entity_addr(&p, &con->peer_addr);
1519 ceph_encode_64(&p, global_id);
1520 ceph_encode_64(&p, con->v2.global_seq);
1521 ceph_encode_64(&p, client->supported_features);
1522 ceph_encode_64(&p, client->required_features);
1523 ceph_encode_64(&p, 0); /* flags */
1524 ceph_encode_64(&p, con->v2.client_cookie);
1525 WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1526
1527 return prepare_control(con, FRAME_TAG_CLIENT_IDENT, buf, ctrl_len);
1528 }
1529
prepare_session_reconnect(struct ceph_connection * con)1530 static int prepare_session_reconnect(struct ceph_connection *con)
1531 {
1532 struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1533 void *buf, *p;
1534 int ctrl_len;
1535
1536 WARN_ON(!con->v2.client_cookie);
1537 WARN_ON(!con->v2.server_cookie);
1538 WARN_ON(!con->v2.connect_seq);
1539 WARN_ON(!con->v2.peer_global_seq);
1540
1541 dout("%s con %p my_addr %s/%u client_cookie 0x%llx server_cookie 0x%llx global_seq %llu connect_seq %llu in_seq %llu\n",
1542 __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
1543 con->v2.client_cookie, con->v2.server_cookie, con->v2.global_seq,
1544 con->v2.connect_seq, con->in_seq);
1545
1546 ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) + 5 * 8;
1547 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
1548 if (!buf)
1549 return -ENOMEM;
1550
1551 p = CTRL_BODY(buf);
1552 ceph_encode_8(&p, 2); /* entity_addrvec_t marker */
1553 ceph_encode_32(&p, 1); /* my_addrs len */
1554 ceph_encode_entity_addr(&p, my_addr);
1555 ceph_encode_64(&p, con->v2.client_cookie);
1556 ceph_encode_64(&p, con->v2.server_cookie);
1557 ceph_encode_64(&p, con->v2.global_seq);
1558 ceph_encode_64(&p, con->v2.connect_seq);
1559 ceph_encode_64(&p, con->in_seq);
1560 WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1561
1562 return prepare_control(con, FRAME_TAG_SESSION_RECONNECT, buf, ctrl_len);
1563 }
1564
prepare_keepalive2(struct ceph_connection * con)1565 static int prepare_keepalive2(struct ceph_connection *con)
1566 {
1567 struct ceph_timespec *ts = CTRL_BODY(con->v2.out_buf);
1568 struct timespec64 now;
1569
1570 ktime_get_real_ts64(&now);
1571 dout("%s con %p timestamp %lld.%09ld\n", __func__, con, now.tv_sec,
1572 now.tv_nsec);
1573
1574 ceph_encode_timespec64(ts, &now);
1575
1576 reset_out_kvecs(con);
1577 return prepare_control(con, FRAME_TAG_KEEPALIVE2, con->v2.out_buf,
1578 sizeof(struct ceph_timespec));
1579 }
1580
prepare_ack(struct ceph_connection * con)1581 static int prepare_ack(struct ceph_connection *con)
1582 {
1583 void *p;
1584
1585 dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
1586 con->in_seq_acked, con->in_seq);
1587 con->in_seq_acked = con->in_seq;
1588
1589 p = CTRL_BODY(con->v2.out_buf);
1590 ceph_encode_64(&p, con->in_seq_acked);
1591
1592 reset_out_kvecs(con);
1593 return prepare_control(con, FRAME_TAG_ACK, con->v2.out_buf, 8);
1594 }
1595
prepare_epilogue_plain(struct ceph_connection * con,bool aborted)1596 static void prepare_epilogue_plain(struct ceph_connection *con, bool aborted)
1597 {
1598 dout("%s con %p msg %p aborted %d crcs %u %u %u\n", __func__, con,
1599 con->out_msg, aborted, con->v2.out_epil.front_crc,
1600 con->v2.out_epil.middle_crc, con->v2.out_epil.data_crc);
1601
1602 encode_epilogue_plain(con, aborted);
1603 add_out_kvec(con, &con->v2.out_epil, CEPH_EPILOGUE_PLAIN_LEN);
1604 }
1605
1606 /*
1607 * For "used" empty segments, crc is -1. For unused (trailing)
1608 * segments, crc is 0.
1609 */
prepare_message_plain(struct ceph_connection * con)1610 static void prepare_message_plain(struct ceph_connection *con)
1611 {
1612 struct ceph_msg *msg = con->out_msg;
1613
1614 prepare_head_plain(con, con->v2.out_buf,
1615 sizeof(struct ceph_msg_header2), NULL, 0, false);
1616
1617 if (!front_len(msg) && !middle_len(msg)) {
1618 if (!data_len(msg)) {
1619 /*
1620 * Empty message: once the head is written,
1621 * we are done -- there is no epilogue.
1622 */
1623 con->v2.out_state = OUT_S_FINISH_MESSAGE;
1624 return;
1625 }
1626
1627 con->v2.out_epil.front_crc = -1;
1628 con->v2.out_epil.middle_crc = -1;
1629 con->v2.out_state = OUT_S_QUEUE_DATA;
1630 return;
1631 }
1632
1633 if (front_len(msg)) {
1634 con->v2.out_epil.front_crc = crc32c(-1, msg->front.iov_base,
1635 front_len(msg));
1636 add_out_kvec(con, msg->front.iov_base, front_len(msg));
1637 } else {
1638 /* middle (at least) is there, checked above */
1639 con->v2.out_epil.front_crc = -1;
1640 }
1641
1642 if (middle_len(msg)) {
1643 con->v2.out_epil.middle_crc =
1644 crc32c(-1, msg->middle->vec.iov_base, middle_len(msg));
1645 add_out_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
1646 } else {
1647 con->v2.out_epil.middle_crc = data_len(msg) ? -1 : 0;
1648 }
1649
1650 if (data_len(msg)) {
1651 con->v2.out_state = OUT_S_QUEUE_DATA;
1652 } else {
1653 con->v2.out_epil.data_crc = 0;
1654 prepare_epilogue_plain(con, false);
1655 con->v2.out_state = OUT_S_FINISH_MESSAGE;
1656 }
1657 }
1658
1659 /*
1660 * Unfortunately the kernel crypto API doesn't support streaming
1661 * (piecewise) operation for AEAD algorithms, so we can't get away
1662 * with a fixed size buffer and a couple sgs. Instead, we have to
1663 * allocate pages for the entire tail of the message (currently up
1664 * to ~32M) and two sgs arrays (up to ~256K each)...
1665 */
prepare_message_secure(struct ceph_connection * con)1666 static int prepare_message_secure(struct ceph_connection *con)
1667 {
1668 void *zerop = page_address(ceph_zero_page);
1669 struct sg_table enc_sgt = {};
1670 struct sg_table sgt = {};
1671 struct page **enc_pages;
1672 int enc_page_cnt;
1673 int tail_len;
1674 int ret;
1675
1676 ret = prepare_head_secure_small(con, con->v2.out_buf,
1677 sizeof(struct ceph_msg_header2));
1678 if (ret)
1679 return ret;
1680
1681 tail_len = tail_onwire_len(con->out_msg, true);
1682 if (!tail_len) {
1683 /*
1684 * Empty message: once the head is written,
1685 * we are done -- there is no epilogue.
1686 */
1687 con->v2.out_state = OUT_S_FINISH_MESSAGE;
1688 return 0;
1689 }
1690
1691 encode_epilogue_secure(con, false);
1692 ret = setup_message_sgs(&sgt, con->out_msg, zerop, zerop, zerop,
1693 &con->v2.out_epil, NULL, 0, false);
1694 if (ret)
1695 goto out;
1696
1697 enc_page_cnt = calc_pages_for(0, tail_len);
1698 enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
1699 if (IS_ERR(enc_pages)) {
1700 ret = PTR_ERR(enc_pages);
1701 goto out;
1702 }
1703
1704 WARN_ON(con->v2.out_enc_pages || con->v2.out_enc_page_cnt);
1705 con->v2.out_enc_pages = enc_pages;
1706 con->v2.out_enc_page_cnt = enc_page_cnt;
1707 con->v2.out_enc_resid = tail_len;
1708 con->v2.out_enc_i = 0;
1709
1710 ret = sg_alloc_table_from_pages(&enc_sgt, enc_pages, enc_page_cnt,
1711 0, tail_len, GFP_NOIO);
1712 if (ret)
1713 goto out;
1714
1715 ret = gcm_crypt(con, true, sgt.sgl, enc_sgt.sgl,
1716 tail_len - CEPH_GCM_TAG_LEN);
1717 if (ret)
1718 goto out;
1719
1720 dout("%s con %p msg %p sg_cnt %d enc_page_cnt %d\n", __func__, con,
1721 con->out_msg, sgt.orig_nents, enc_page_cnt);
1722 con->v2.out_state = OUT_S_QUEUE_ENC_PAGE;
1723
1724 out:
1725 sg_free_table(&sgt);
1726 sg_free_table(&enc_sgt);
1727 return ret;
1728 }
1729
prepare_message(struct ceph_connection * con)1730 static int prepare_message(struct ceph_connection *con)
1731 {
1732 int lens[] = {
1733 sizeof(struct ceph_msg_header2),
1734 front_len(con->out_msg),
1735 middle_len(con->out_msg),
1736 data_len(con->out_msg)
1737 };
1738 struct ceph_frame_desc desc;
1739 int ret;
1740
1741 dout("%s con %p msg %p logical %d+%d+%d+%d\n", __func__, con,
1742 con->out_msg, lens[0], lens[1], lens[2], lens[3]);
1743
1744 if (con->in_seq > con->in_seq_acked) {
1745 dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
1746 con->in_seq_acked, con->in_seq);
1747 con->in_seq_acked = con->in_seq;
1748 }
1749
1750 reset_out_kvecs(con);
1751 init_frame_desc(&desc, FRAME_TAG_MESSAGE, lens, 4);
1752 encode_preamble(&desc, con->v2.out_buf);
1753 fill_header2(CTRL_BODY(con->v2.out_buf), &con->out_msg->hdr,
1754 con->in_seq_acked);
1755
1756 if (con_secure(con)) {
1757 ret = prepare_message_secure(con);
1758 if (ret)
1759 return ret;
1760 } else {
1761 prepare_message_plain(con);
1762 }
1763
1764 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1765 return 0;
1766 }
1767
prepare_read_banner_prefix(struct ceph_connection * con)1768 static int prepare_read_banner_prefix(struct ceph_connection *con)
1769 {
1770 void *buf;
1771
1772 buf = alloc_conn_buf(con, CEPH_BANNER_V2_PREFIX_LEN);
1773 if (!buf)
1774 return -ENOMEM;
1775
1776 reset_in_kvecs(con);
1777 add_in_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
1778 add_in_sign_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
1779 con->state = CEPH_CON_S_V2_BANNER_PREFIX;
1780 return 0;
1781 }
1782
prepare_read_banner_payload(struct ceph_connection * con,int payload_len)1783 static int prepare_read_banner_payload(struct ceph_connection *con,
1784 int payload_len)
1785 {
1786 void *buf;
1787
1788 buf = alloc_conn_buf(con, payload_len);
1789 if (!buf)
1790 return -ENOMEM;
1791
1792 reset_in_kvecs(con);
1793 add_in_kvec(con, buf, payload_len);
1794 add_in_sign_kvec(con, buf, payload_len);
1795 con->state = CEPH_CON_S_V2_BANNER_PAYLOAD;
1796 return 0;
1797 }
1798
prepare_read_preamble(struct ceph_connection * con)1799 static void prepare_read_preamble(struct ceph_connection *con)
1800 {
1801 reset_in_kvecs(con);
1802 add_in_kvec(con, con->v2.in_buf,
1803 con_secure(con) ? CEPH_PREAMBLE_SECURE_LEN :
1804 CEPH_PREAMBLE_PLAIN_LEN);
1805 con->v2.in_state = IN_S_HANDLE_PREAMBLE;
1806 }
1807
prepare_read_control(struct ceph_connection * con)1808 static int prepare_read_control(struct ceph_connection *con)
1809 {
1810 int ctrl_len = con->v2.in_desc.fd_lens[0];
1811 int head_len;
1812 void *buf;
1813
1814 reset_in_kvecs(con);
1815 if (con->state == CEPH_CON_S_V2_HELLO ||
1816 con->state == CEPH_CON_S_V2_AUTH) {
1817 head_len = head_onwire_len(ctrl_len, false);
1818 buf = alloc_conn_buf(con, head_len);
1819 if (!buf)
1820 return -ENOMEM;
1821
1822 /* preserve preamble */
1823 memcpy(buf, con->v2.in_buf, CEPH_PREAMBLE_LEN);
1824
1825 add_in_kvec(con, CTRL_BODY(buf), ctrl_len);
1826 add_in_kvec(con, CTRL_BODY(buf) + ctrl_len, CEPH_CRC_LEN);
1827 add_in_sign_kvec(con, buf, head_len);
1828 } else {
1829 if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
1830 buf = alloc_conn_buf(con, ctrl_len);
1831 if (!buf)
1832 return -ENOMEM;
1833
1834 add_in_kvec(con, buf, ctrl_len);
1835 } else {
1836 add_in_kvec(con, CTRL_BODY(con->v2.in_buf), ctrl_len);
1837 }
1838 add_in_kvec(con, con->v2.in_buf, CEPH_CRC_LEN);
1839 }
1840 con->v2.in_state = IN_S_HANDLE_CONTROL;
1841 return 0;
1842 }
1843
prepare_read_control_remainder(struct ceph_connection * con)1844 static int prepare_read_control_remainder(struct ceph_connection *con)
1845 {
1846 int ctrl_len = con->v2.in_desc.fd_lens[0];
1847 int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1848 void *buf;
1849
1850 buf = alloc_conn_buf(con, ctrl_len);
1851 if (!buf)
1852 return -ENOMEM;
1853
1854 memcpy(buf, CTRL_BODY(con->v2.in_buf), CEPH_PREAMBLE_INLINE_LEN);
1855
1856 reset_in_kvecs(con);
1857 add_in_kvec(con, buf + CEPH_PREAMBLE_INLINE_LEN, rem_len);
1858 add_in_kvec(con, con->v2.in_buf,
1859 padding_len(rem_len) + CEPH_GCM_TAG_LEN);
1860 con->v2.in_state = IN_S_HANDLE_CONTROL_REMAINDER;
1861 return 0;
1862 }
1863
prepare_read_data(struct ceph_connection * con)1864 static int prepare_read_data(struct ceph_connection *con)
1865 {
1866 struct bio_vec bv;
1867
1868 con->in_data_crc = -1;
1869 ceph_msg_data_cursor_init(&con->v2.in_cursor, con->in_msg,
1870 data_len(con->in_msg));
1871
1872 get_bvec_at(&con->v2.in_cursor, &bv);
1873 if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1874 if (unlikely(!con->bounce_page)) {
1875 con->bounce_page = alloc_page(GFP_NOIO);
1876 if (!con->bounce_page) {
1877 pr_err("failed to allocate bounce page\n");
1878 return -ENOMEM;
1879 }
1880 }
1881
1882 bv.bv_page = con->bounce_page;
1883 bv.bv_offset = 0;
1884 }
1885 set_in_bvec(con, &bv);
1886 con->v2.in_state = IN_S_PREPARE_READ_DATA_CONT;
1887 return 0;
1888 }
1889
prepare_read_data_cont(struct ceph_connection * con)1890 static void prepare_read_data_cont(struct ceph_connection *con)
1891 {
1892 struct bio_vec bv;
1893
1894 if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1895 con->in_data_crc = crc32c(con->in_data_crc,
1896 page_address(con->bounce_page),
1897 con->v2.in_bvec.bv_len);
1898
1899 get_bvec_at(&con->v2.in_cursor, &bv);
1900 memcpy_to_page(bv.bv_page, bv.bv_offset,
1901 page_address(con->bounce_page),
1902 con->v2.in_bvec.bv_len);
1903 } else {
1904 con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
1905 con->v2.in_bvec.bv_page,
1906 con->v2.in_bvec.bv_offset,
1907 con->v2.in_bvec.bv_len);
1908 }
1909
1910 ceph_msg_data_advance(&con->v2.in_cursor, con->v2.in_bvec.bv_len);
1911 if (con->v2.in_cursor.total_resid) {
1912 get_bvec_at(&con->v2.in_cursor, &bv);
1913 if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1914 bv.bv_page = con->bounce_page;
1915 bv.bv_offset = 0;
1916 }
1917 set_in_bvec(con, &bv);
1918 WARN_ON(con->v2.in_state != IN_S_PREPARE_READ_DATA_CONT);
1919 return;
1920 }
1921
1922 /*
1923 * We've read all data. Prepare to read epilogue.
1924 */
1925 reset_in_kvecs(con);
1926 add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
1927 con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1928 }
1929
prepare_sparse_read_cont(struct ceph_connection * con)1930 static int prepare_sparse_read_cont(struct ceph_connection *con)
1931 {
1932 int ret;
1933 struct bio_vec bv;
1934 char *buf = NULL;
1935 struct ceph_msg_data_cursor *cursor = &con->v2.in_cursor;
1936
1937 WARN_ON(con->v2.in_state != IN_S_PREPARE_SPARSE_DATA_CONT);
1938
1939 if (iov_iter_is_bvec(&con->v2.in_iter)) {
1940 if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1941 con->in_data_crc = crc32c(con->in_data_crc,
1942 page_address(con->bounce_page),
1943 con->v2.in_bvec.bv_len);
1944 get_bvec_at(cursor, &bv);
1945 memcpy_to_page(bv.bv_page, bv.bv_offset,
1946 page_address(con->bounce_page),
1947 con->v2.in_bvec.bv_len);
1948 } else {
1949 con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
1950 con->v2.in_bvec.bv_page,
1951 con->v2.in_bvec.bv_offset,
1952 con->v2.in_bvec.bv_len);
1953 }
1954
1955 ceph_msg_data_advance(cursor, con->v2.in_bvec.bv_len);
1956 cursor->sr_resid -= con->v2.in_bvec.bv_len;
1957 dout("%s: advance by 0x%x sr_resid 0x%x\n", __func__,
1958 con->v2.in_bvec.bv_len, cursor->sr_resid);
1959 WARN_ON_ONCE(cursor->sr_resid > cursor->total_resid);
1960 if (cursor->sr_resid) {
1961 get_bvec_at(cursor, &bv);
1962 if (bv.bv_len > cursor->sr_resid)
1963 bv.bv_len = cursor->sr_resid;
1964 if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1965 bv.bv_page = con->bounce_page;
1966 bv.bv_offset = 0;
1967 }
1968 set_in_bvec(con, &bv);
1969 con->v2.data_len_remain -= bv.bv_len;
1970 return 0;
1971 }
1972 } else if (iov_iter_is_kvec(&con->v2.in_iter)) {
1973 /* On first call, we have no kvec so don't compute crc */
1974 if (con->v2.in_kvec_cnt) {
1975 WARN_ON_ONCE(con->v2.in_kvec_cnt > 1);
1976 con->in_data_crc = crc32c(con->in_data_crc,
1977 con->v2.in_kvecs[0].iov_base,
1978 con->v2.in_kvecs[0].iov_len);
1979 }
1980 } else {
1981 return -EIO;
1982 }
1983
1984 /* get next extent */
1985 ret = con->ops->sparse_read(con, cursor, &buf);
1986 if (ret <= 0) {
1987 if (ret < 0)
1988 return ret;
1989
1990 reset_in_kvecs(con);
1991 add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
1992 con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1993 return 0;
1994 }
1995
1996 if (buf) {
1997 /* receive into buffer */
1998 reset_in_kvecs(con);
1999 add_in_kvec(con, buf, ret);
2000 con->v2.data_len_remain -= ret;
2001 return 0;
2002 }
2003
2004 if (ret > cursor->total_resid) {
2005 pr_warn("%s: ret 0x%x total_resid 0x%zx resid 0x%zx\n",
2006 __func__, ret, cursor->total_resid, cursor->resid);
2007 return -EIO;
2008 }
2009 get_bvec_at(cursor, &bv);
2010 if (bv.bv_len > cursor->sr_resid)
2011 bv.bv_len = cursor->sr_resid;
2012 if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
2013 if (unlikely(!con->bounce_page)) {
2014 con->bounce_page = alloc_page(GFP_NOIO);
2015 if (!con->bounce_page) {
2016 pr_err("failed to allocate bounce page\n");
2017 return -ENOMEM;
2018 }
2019 }
2020
2021 bv.bv_page = con->bounce_page;
2022 bv.bv_offset = 0;
2023 }
2024 set_in_bvec(con, &bv);
2025 con->v2.data_len_remain -= ret;
2026 return ret;
2027 }
2028
prepare_sparse_read_data(struct ceph_connection * con)2029 static int prepare_sparse_read_data(struct ceph_connection *con)
2030 {
2031 struct ceph_msg *msg = con->in_msg;
2032
2033 dout("%s: starting sparse read\n", __func__);
2034
2035 if (WARN_ON_ONCE(!con->ops->sparse_read))
2036 return -EOPNOTSUPP;
2037
2038 if (!con_secure(con))
2039 con->in_data_crc = -1;
2040
2041 ceph_msg_data_cursor_init(&con->v2.in_cursor, msg,
2042 msg->sparse_read_total);
2043
2044 reset_in_kvecs(con);
2045 con->v2.in_state = IN_S_PREPARE_SPARSE_DATA_CONT;
2046 con->v2.data_len_remain = data_len(msg);
2047 return prepare_sparse_read_cont(con);
2048 }
2049
prepare_read_tail_plain(struct ceph_connection * con)2050 static int prepare_read_tail_plain(struct ceph_connection *con)
2051 {
2052 struct ceph_msg *msg = con->in_msg;
2053
2054 if (!front_len(msg) && !middle_len(msg)) {
2055 WARN_ON(!data_len(msg));
2056 return prepare_read_data(con);
2057 }
2058
2059 reset_in_kvecs(con);
2060 if (front_len(msg)) {
2061 add_in_kvec(con, msg->front.iov_base, front_len(msg));
2062 WARN_ON(msg->front.iov_len != front_len(msg));
2063 }
2064 if (middle_len(msg)) {
2065 add_in_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
2066 WARN_ON(msg->middle->vec.iov_len != middle_len(msg));
2067 }
2068
2069 if (data_len(msg)) {
2070 if (msg->sparse_read_total)
2071 con->v2.in_state = IN_S_PREPARE_SPARSE_DATA;
2072 else
2073 con->v2.in_state = IN_S_PREPARE_READ_DATA;
2074 } else {
2075 add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
2076 con->v2.in_state = IN_S_HANDLE_EPILOGUE;
2077 }
2078 return 0;
2079 }
2080
prepare_read_enc_page(struct ceph_connection * con)2081 static void prepare_read_enc_page(struct ceph_connection *con)
2082 {
2083 struct bio_vec bv;
2084
2085 dout("%s con %p i %d resid %d\n", __func__, con, con->v2.in_enc_i,
2086 con->v2.in_enc_resid);
2087 WARN_ON(!con->v2.in_enc_resid);
2088
2089 bvec_set_page(&bv, con->v2.in_enc_pages[con->v2.in_enc_i],
2090 min(con->v2.in_enc_resid, (int)PAGE_SIZE), 0);
2091
2092 set_in_bvec(con, &bv);
2093 con->v2.in_enc_i++;
2094 con->v2.in_enc_resid -= bv.bv_len;
2095
2096 if (con->v2.in_enc_resid) {
2097 con->v2.in_state = IN_S_PREPARE_READ_ENC_PAGE;
2098 return;
2099 }
2100
2101 /*
2102 * We are set to read the last piece of ciphertext (ending
2103 * with epilogue) + auth tag.
2104 */
2105 WARN_ON(con->v2.in_enc_i != con->v2.in_enc_page_cnt);
2106 con->v2.in_state = IN_S_HANDLE_EPILOGUE;
2107 }
2108
prepare_read_tail_secure(struct ceph_connection * con)2109 static int prepare_read_tail_secure(struct ceph_connection *con)
2110 {
2111 struct page **enc_pages;
2112 int enc_page_cnt;
2113 int tail_len;
2114
2115 tail_len = tail_onwire_len(con->in_msg, true);
2116 WARN_ON(!tail_len);
2117
2118 enc_page_cnt = calc_pages_for(0, tail_len);
2119 enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
2120 if (IS_ERR(enc_pages))
2121 return PTR_ERR(enc_pages);
2122
2123 WARN_ON(con->v2.in_enc_pages || con->v2.in_enc_page_cnt);
2124 con->v2.in_enc_pages = enc_pages;
2125 con->v2.in_enc_page_cnt = enc_page_cnt;
2126 con->v2.in_enc_resid = tail_len;
2127 con->v2.in_enc_i = 0;
2128
2129 prepare_read_enc_page(con);
2130 return 0;
2131 }
2132
__finish_skip(struct ceph_connection * con)2133 static void __finish_skip(struct ceph_connection *con)
2134 {
2135 con->in_seq++;
2136 prepare_read_preamble(con);
2137 }
2138
prepare_skip_message(struct ceph_connection * con)2139 static void prepare_skip_message(struct ceph_connection *con)
2140 {
2141 struct ceph_frame_desc *desc = &con->v2.in_desc;
2142 int tail_len;
2143
2144 dout("%s con %p %d+%d+%d\n", __func__, con, desc->fd_lens[1],
2145 desc->fd_lens[2], desc->fd_lens[3]);
2146
2147 tail_len = __tail_onwire_len(desc->fd_lens[1], desc->fd_lens[2],
2148 desc->fd_lens[3], con_secure(con));
2149 if (!tail_len) {
2150 __finish_skip(con);
2151 } else {
2152 set_in_skip(con, tail_len);
2153 con->v2.in_state = IN_S_FINISH_SKIP;
2154 }
2155 }
2156
process_banner_prefix(struct ceph_connection * con)2157 static int process_banner_prefix(struct ceph_connection *con)
2158 {
2159 int payload_len;
2160 void *p;
2161
2162 WARN_ON(con->v2.in_kvecs[0].iov_len != CEPH_BANNER_V2_PREFIX_LEN);
2163
2164 p = con->v2.in_kvecs[0].iov_base;
2165 if (memcmp(p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN)) {
2166 if (!memcmp(p, CEPH_BANNER, CEPH_BANNER_LEN))
2167 con->error_msg = "server is speaking msgr1 protocol";
2168 else
2169 con->error_msg = "protocol error, bad banner";
2170 return -EINVAL;
2171 }
2172
2173 p += CEPH_BANNER_V2_LEN;
2174 payload_len = ceph_decode_16(&p);
2175 dout("%s con %p payload_len %d\n", __func__, con, payload_len);
2176
2177 return prepare_read_banner_payload(con, payload_len);
2178 }
2179
process_banner_payload(struct ceph_connection * con)2180 static int process_banner_payload(struct ceph_connection *con)
2181 {
2182 void *end = con->v2.in_kvecs[0].iov_base + con->v2.in_kvecs[0].iov_len;
2183 u64 feat = CEPH_MSGR2_SUPPORTED_FEATURES;
2184 u64 req_feat = CEPH_MSGR2_REQUIRED_FEATURES;
2185 u64 server_feat, server_req_feat;
2186 void *p;
2187 int ret;
2188
2189 p = con->v2.in_kvecs[0].iov_base;
2190 ceph_decode_64_safe(&p, end, server_feat, bad);
2191 ceph_decode_64_safe(&p, end, server_req_feat, bad);
2192
2193 dout("%s con %p server_feat 0x%llx server_req_feat 0x%llx\n",
2194 __func__, con, server_feat, server_req_feat);
2195
2196 if (req_feat & ~server_feat) {
2197 pr_err("msgr2 feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
2198 server_feat, req_feat & ~server_feat);
2199 con->error_msg = "missing required protocol features";
2200 return -EINVAL;
2201 }
2202 if (server_req_feat & ~feat) {
2203 pr_err("msgr2 feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
2204 feat, server_req_feat & ~feat);
2205 con->error_msg = "missing required protocol features";
2206 return -EINVAL;
2207 }
2208
2209 /* no reset_out_kvecs() as our banner may still be pending */
2210 ret = prepare_hello(con);
2211 if (ret) {
2212 pr_err("prepare_hello failed: %d\n", ret);
2213 return ret;
2214 }
2215
2216 con->state = CEPH_CON_S_V2_HELLO;
2217 prepare_read_preamble(con);
2218 return 0;
2219
2220 bad:
2221 pr_err("failed to decode banner payload\n");
2222 return -EINVAL;
2223 }
2224
process_hello(struct ceph_connection * con,void * p,void * end)2225 static int process_hello(struct ceph_connection *con, void *p, void *end)
2226 {
2227 struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
2228 struct ceph_entity_addr addr_for_me;
2229 u8 entity_type;
2230 int ret;
2231
2232 if (con->state != CEPH_CON_S_V2_HELLO) {
2233 con->error_msg = "protocol error, unexpected hello";
2234 return -EINVAL;
2235 }
2236
2237 ceph_decode_8_safe(&p, end, entity_type, bad);
2238 ret = ceph_decode_entity_addr(&p, end, &addr_for_me);
2239 if (ret) {
2240 pr_err("failed to decode addr_for_me: %d\n", ret);
2241 return ret;
2242 }
2243
2244 dout("%s con %p entity_type %d addr_for_me %s\n", __func__, con,
2245 entity_type, ceph_pr_addr(&addr_for_me));
2246
2247 if (entity_type != con->peer_name.type) {
2248 pr_err("bad peer type, want %d, got %d\n",
2249 con->peer_name.type, entity_type);
2250 con->error_msg = "wrong peer at address";
2251 return -EINVAL;
2252 }
2253
2254 /*
2255 * Set our address to the address our first peer (i.e. monitor)
2256 * sees that we are connecting from. If we are behind some sort
2257 * of NAT and want to be identified by some private (not NATed)
2258 * address, ip option should be used.
2259 */
2260 if (ceph_addr_is_blank(my_addr)) {
2261 memcpy(&my_addr->in_addr, &addr_for_me.in_addr,
2262 sizeof(my_addr->in_addr));
2263 ceph_addr_set_port(my_addr, 0);
2264 dout("%s con %p set my addr %s, as seen by peer %s\n",
2265 __func__, con, ceph_pr_addr(my_addr),
2266 ceph_pr_addr(&con->peer_addr));
2267 } else {
2268 dout("%s con %p my addr already set %s\n",
2269 __func__, con, ceph_pr_addr(my_addr));
2270 }
2271
2272 WARN_ON(ceph_addr_is_blank(my_addr) || ceph_addr_port(my_addr));
2273 WARN_ON(my_addr->type != CEPH_ENTITY_ADDR_TYPE_ANY);
2274 WARN_ON(!my_addr->nonce);
2275
2276 /* no reset_out_kvecs() as our hello may still be pending */
2277 ret = prepare_auth_request(con);
2278 if (ret) {
2279 if (ret != -EAGAIN)
2280 pr_err("prepare_auth_request failed: %d\n", ret);
2281 return ret;
2282 }
2283
2284 con->state = CEPH_CON_S_V2_AUTH;
2285 return 0;
2286
2287 bad:
2288 pr_err("failed to decode hello\n");
2289 return -EINVAL;
2290 }
2291
process_auth_bad_method(struct ceph_connection * con,void * p,void * end)2292 static int process_auth_bad_method(struct ceph_connection *con,
2293 void *p, void *end)
2294 {
2295 int allowed_protos[8], allowed_modes[8];
2296 int allowed_proto_cnt, allowed_mode_cnt;
2297 int used_proto, result;
2298 int ret;
2299 int i;
2300
2301 if (con->state != CEPH_CON_S_V2_AUTH) {
2302 con->error_msg = "protocol error, unexpected auth_bad_method";
2303 return -EINVAL;
2304 }
2305
2306 ceph_decode_32_safe(&p, end, used_proto, bad);
2307 ceph_decode_32_safe(&p, end, result, bad);
2308 dout("%s con %p used_proto %d result %d\n", __func__, con, used_proto,
2309 result);
2310
2311 ceph_decode_32_safe(&p, end, allowed_proto_cnt, bad);
2312 if (allowed_proto_cnt > ARRAY_SIZE(allowed_protos)) {
2313 pr_err("allowed_protos too big %d\n", allowed_proto_cnt);
2314 return -EINVAL;
2315 }
2316 for (i = 0; i < allowed_proto_cnt; i++) {
2317 ceph_decode_32_safe(&p, end, allowed_protos[i], bad);
2318 dout("%s con %p allowed_protos[%d] %d\n", __func__, con,
2319 i, allowed_protos[i]);
2320 }
2321
2322 ceph_decode_32_safe(&p, end, allowed_mode_cnt, bad);
2323 if (allowed_mode_cnt > ARRAY_SIZE(allowed_modes)) {
2324 pr_err("allowed_modes too big %d\n", allowed_mode_cnt);
2325 return -EINVAL;
2326 }
2327 for (i = 0; i < allowed_mode_cnt; i++) {
2328 ceph_decode_32_safe(&p, end, allowed_modes[i], bad);
2329 dout("%s con %p allowed_modes[%d] %d\n", __func__, con,
2330 i, allowed_modes[i]);
2331 }
2332
2333 mutex_unlock(&con->mutex);
2334 ret = con->ops->handle_auth_bad_method(con, used_proto, result,
2335 allowed_protos,
2336 allowed_proto_cnt,
2337 allowed_modes,
2338 allowed_mode_cnt);
2339 mutex_lock(&con->mutex);
2340 if (con->state != CEPH_CON_S_V2_AUTH) {
2341 dout("%s con %p state changed to %d\n", __func__, con,
2342 con->state);
2343 return -EAGAIN;
2344 }
2345
2346 dout("%s con %p handle_auth_bad_method ret %d\n", __func__, con, ret);
2347 return ret;
2348
2349 bad:
2350 pr_err("failed to decode auth_bad_method\n");
2351 return -EINVAL;
2352 }
2353
process_auth_reply_more(struct ceph_connection * con,void * p,void * end)2354 static int process_auth_reply_more(struct ceph_connection *con,
2355 void *p, void *end)
2356 {
2357 int payload_len;
2358 int ret;
2359
2360 if (con->state != CEPH_CON_S_V2_AUTH) {
2361 con->error_msg = "protocol error, unexpected auth_reply_more";
2362 return -EINVAL;
2363 }
2364
2365 ceph_decode_32_safe(&p, end, payload_len, bad);
2366 ceph_decode_need(&p, end, payload_len, bad);
2367
2368 dout("%s con %p payload_len %d\n", __func__, con, payload_len);
2369
2370 reset_out_kvecs(con);
2371 ret = prepare_auth_request_more(con, p, payload_len);
2372 if (ret) {
2373 if (ret != -EAGAIN)
2374 pr_err("prepare_auth_request_more failed: %d\n", ret);
2375 return ret;
2376 }
2377
2378 return 0;
2379
2380 bad:
2381 pr_err("failed to decode auth_reply_more\n");
2382 return -EINVAL;
2383 }
2384
2385 /*
2386 * Align session_key and con_secret to avoid GFP_ATOMIC allocation
2387 * inside crypto_shash_setkey() and crypto_aead_setkey() called from
2388 * setup_crypto(). __aligned(16) isn't guaranteed to work for stack
2389 * objects, so do it by hand.
2390 */
process_auth_done(struct ceph_connection * con,void * p,void * end)2391 static int process_auth_done(struct ceph_connection *con, void *p, void *end)
2392 {
2393 u8 session_key_buf[CEPH_KEY_LEN + 16];
2394 u8 con_secret_buf[CEPH_MAX_CON_SECRET_LEN + 16];
2395 u8 *session_key = PTR_ALIGN(&session_key_buf[0], 16);
2396 u8 *con_secret = PTR_ALIGN(&con_secret_buf[0], 16);
2397 int session_key_len, con_secret_len;
2398 int payload_len;
2399 u64 global_id;
2400 int ret;
2401
2402 if (con->state != CEPH_CON_S_V2_AUTH) {
2403 con->error_msg = "protocol error, unexpected auth_done";
2404 return -EINVAL;
2405 }
2406
2407 ceph_decode_64_safe(&p, end, global_id, bad);
2408 ceph_decode_32_safe(&p, end, con->v2.con_mode, bad);
2409 ceph_decode_32_safe(&p, end, payload_len, bad);
2410
2411 dout("%s con %p global_id %llu con_mode %d payload_len %d\n",
2412 __func__, con, global_id, con->v2.con_mode, payload_len);
2413
2414 mutex_unlock(&con->mutex);
2415 session_key_len = 0;
2416 con_secret_len = 0;
2417 ret = con->ops->handle_auth_done(con, global_id, p, payload_len,
2418 session_key, &session_key_len,
2419 con_secret, &con_secret_len);
2420 mutex_lock(&con->mutex);
2421 if (con->state != CEPH_CON_S_V2_AUTH) {
2422 dout("%s con %p state changed to %d\n", __func__, con,
2423 con->state);
2424 ret = -EAGAIN;
2425 goto out;
2426 }
2427
2428 dout("%s con %p handle_auth_done ret %d\n", __func__, con, ret);
2429 if (ret)
2430 goto out;
2431
2432 ret = setup_crypto(con, session_key, session_key_len, con_secret,
2433 con_secret_len);
2434 if (ret)
2435 goto out;
2436
2437 reset_out_kvecs(con);
2438 ret = prepare_auth_signature(con);
2439 if (ret) {
2440 pr_err("prepare_auth_signature failed: %d\n", ret);
2441 goto out;
2442 }
2443
2444 con->state = CEPH_CON_S_V2_AUTH_SIGNATURE;
2445
2446 out:
2447 memzero_explicit(session_key_buf, sizeof(session_key_buf));
2448 memzero_explicit(con_secret_buf, sizeof(con_secret_buf));
2449 return ret;
2450
2451 bad:
2452 pr_err("failed to decode auth_done\n");
2453 return -EINVAL;
2454 }
2455
process_auth_signature(struct ceph_connection * con,void * p,void * end)2456 static int process_auth_signature(struct ceph_connection *con,
2457 void *p, void *end)
2458 {
2459 u8 hmac[SHA256_DIGEST_SIZE];
2460 int ret;
2461
2462 if (con->state != CEPH_CON_S_V2_AUTH_SIGNATURE) {
2463 con->error_msg = "protocol error, unexpected auth_signature";
2464 return -EINVAL;
2465 }
2466
2467 ret = hmac_sha256(con, con->v2.out_sign_kvecs,
2468 con->v2.out_sign_kvec_cnt, hmac);
2469 if (ret)
2470 return ret;
2471
2472 ceph_decode_need(&p, end, SHA256_DIGEST_SIZE, bad);
2473 if (crypto_memneq(p, hmac, SHA256_DIGEST_SIZE)) {
2474 con->error_msg = "integrity error, bad auth signature";
2475 return -EBADMSG;
2476 }
2477
2478 dout("%s con %p auth signature ok\n", __func__, con);
2479
2480 /* no reset_out_kvecs() as our auth_signature may still be pending */
2481 if (!con->v2.server_cookie) {
2482 ret = prepare_client_ident(con);
2483 if (ret) {
2484 pr_err("prepare_client_ident failed: %d\n", ret);
2485 return ret;
2486 }
2487
2488 con->state = CEPH_CON_S_V2_SESSION_CONNECT;
2489 } else {
2490 ret = prepare_session_reconnect(con);
2491 if (ret) {
2492 pr_err("prepare_session_reconnect failed: %d\n", ret);
2493 return ret;
2494 }
2495
2496 con->state = CEPH_CON_S_V2_SESSION_RECONNECT;
2497 }
2498
2499 return 0;
2500
2501 bad:
2502 pr_err("failed to decode auth_signature\n");
2503 return -EINVAL;
2504 }
2505
process_server_ident(struct ceph_connection * con,void * p,void * end)2506 static int process_server_ident(struct ceph_connection *con,
2507 void *p, void *end)
2508 {
2509 struct ceph_client *client = from_msgr(con->msgr);
2510 u64 features, required_features;
2511 struct ceph_entity_addr addr;
2512 u64 global_seq;
2513 u64 global_id;
2514 u64 cookie;
2515 u64 flags;
2516 int ret;
2517
2518 if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
2519 con->error_msg = "protocol error, unexpected server_ident";
2520 return -EINVAL;
2521 }
2522
2523 ret = ceph_decode_entity_addrvec(&p, end, true, &addr);
2524 if (ret) {
2525 pr_err("failed to decode server addrs: %d\n", ret);
2526 return ret;
2527 }
2528
2529 ceph_decode_64_safe(&p, end, global_id, bad);
2530 ceph_decode_64_safe(&p, end, global_seq, bad);
2531 ceph_decode_64_safe(&p, end, features, bad);
2532 ceph_decode_64_safe(&p, end, required_features, bad);
2533 ceph_decode_64_safe(&p, end, flags, bad);
2534 ceph_decode_64_safe(&p, end, cookie, bad);
2535
2536 dout("%s con %p addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx flags 0x%llx cookie 0x%llx\n",
2537 __func__, con, ceph_pr_addr(&addr), le32_to_cpu(addr.nonce),
2538 global_id, global_seq, features, required_features, flags, cookie);
2539
2540 /* is this who we intended to talk to? */
2541 if (memcmp(&addr, &con->peer_addr, sizeof(con->peer_addr))) {
2542 pr_err("bad peer addr/nonce, want %s/%u, got %s/%u\n",
2543 ceph_pr_addr(&con->peer_addr),
2544 le32_to_cpu(con->peer_addr.nonce),
2545 ceph_pr_addr(&addr), le32_to_cpu(addr.nonce));
2546 con->error_msg = "wrong peer at address";
2547 return -EINVAL;
2548 }
2549
2550 if (client->required_features & ~features) {
2551 pr_err("RADOS feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
2552 features, client->required_features & ~features);
2553 con->error_msg = "missing required protocol features";
2554 return -EINVAL;
2555 }
2556
2557 /*
2558 * Both name->type and name->num are set in ceph_con_open() but
2559 * name->num may be bogus in the initial monmap. name->type is
2560 * verified in handle_hello().
2561 */
2562 WARN_ON(!con->peer_name.type);
2563 con->peer_name.num = cpu_to_le64(global_id);
2564 con->v2.peer_global_seq = global_seq;
2565 con->peer_features = features;
2566 WARN_ON(required_features & ~client->supported_features);
2567 con->v2.server_cookie = cookie;
2568
2569 if (flags & CEPH_MSG_CONNECT_LOSSY) {
2570 ceph_con_flag_set(con, CEPH_CON_F_LOSSYTX);
2571 WARN_ON(con->v2.server_cookie);
2572 } else {
2573 WARN_ON(!con->v2.server_cookie);
2574 }
2575
2576 clear_in_sign_kvecs(con);
2577 clear_out_sign_kvecs(con);
2578 free_conn_bufs(con);
2579 con->delay = 0; /* reset backoff memory */
2580
2581 con->state = CEPH_CON_S_OPEN;
2582 con->v2.out_state = OUT_S_GET_NEXT;
2583 return 0;
2584
2585 bad:
2586 pr_err("failed to decode server_ident\n");
2587 return -EINVAL;
2588 }
2589
process_ident_missing_features(struct ceph_connection * con,void * p,void * end)2590 static int process_ident_missing_features(struct ceph_connection *con,
2591 void *p, void *end)
2592 {
2593 struct ceph_client *client = from_msgr(con->msgr);
2594 u64 missing_features;
2595
2596 if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
2597 con->error_msg = "protocol error, unexpected ident_missing_features";
2598 return -EINVAL;
2599 }
2600
2601 ceph_decode_64_safe(&p, end, missing_features, bad);
2602 pr_err("RADOS feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
2603 client->supported_features, missing_features);
2604 con->error_msg = "missing required protocol features";
2605 return -EINVAL;
2606
2607 bad:
2608 pr_err("failed to decode ident_missing_features\n");
2609 return -EINVAL;
2610 }
2611
process_session_reconnect_ok(struct ceph_connection * con,void * p,void * end)2612 static int process_session_reconnect_ok(struct ceph_connection *con,
2613 void *p, void *end)
2614 {
2615 u64 seq;
2616
2617 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2618 con->error_msg = "protocol error, unexpected session_reconnect_ok";
2619 return -EINVAL;
2620 }
2621
2622 ceph_decode_64_safe(&p, end, seq, bad);
2623
2624 dout("%s con %p seq %llu\n", __func__, con, seq);
2625 ceph_con_discard_requeued(con, seq);
2626
2627 clear_in_sign_kvecs(con);
2628 clear_out_sign_kvecs(con);
2629 free_conn_bufs(con);
2630 con->delay = 0; /* reset backoff memory */
2631
2632 con->state = CEPH_CON_S_OPEN;
2633 con->v2.out_state = OUT_S_GET_NEXT;
2634 return 0;
2635
2636 bad:
2637 pr_err("failed to decode session_reconnect_ok\n");
2638 return -EINVAL;
2639 }
2640
process_session_retry(struct ceph_connection * con,void * p,void * end)2641 static int process_session_retry(struct ceph_connection *con,
2642 void *p, void *end)
2643 {
2644 u64 connect_seq;
2645 int ret;
2646
2647 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2648 con->error_msg = "protocol error, unexpected session_retry";
2649 return -EINVAL;
2650 }
2651
2652 ceph_decode_64_safe(&p, end, connect_seq, bad);
2653
2654 dout("%s con %p connect_seq %llu\n", __func__, con, connect_seq);
2655 WARN_ON(connect_seq <= con->v2.connect_seq);
2656 con->v2.connect_seq = connect_seq + 1;
2657
2658 free_conn_bufs(con);
2659
2660 reset_out_kvecs(con);
2661 ret = prepare_session_reconnect(con);
2662 if (ret) {
2663 pr_err("prepare_session_reconnect (cseq) failed: %d\n", ret);
2664 return ret;
2665 }
2666
2667 return 0;
2668
2669 bad:
2670 pr_err("failed to decode session_retry\n");
2671 return -EINVAL;
2672 }
2673
process_session_retry_global(struct ceph_connection * con,void * p,void * end)2674 static int process_session_retry_global(struct ceph_connection *con,
2675 void *p, void *end)
2676 {
2677 u64 global_seq;
2678 int ret;
2679
2680 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2681 con->error_msg = "protocol error, unexpected session_retry_global";
2682 return -EINVAL;
2683 }
2684
2685 ceph_decode_64_safe(&p, end, global_seq, bad);
2686
2687 dout("%s con %p global_seq %llu\n", __func__, con, global_seq);
2688 WARN_ON(global_seq <= con->v2.global_seq);
2689 con->v2.global_seq = ceph_get_global_seq(con->msgr, global_seq);
2690
2691 free_conn_bufs(con);
2692
2693 reset_out_kvecs(con);
2694 ret = prepare_session_reconnect(con);
2695 if (ret) {
2696 pr_err("prepare_session_reconnect (gseq) failed: %d\n", ret);
2697 return ret;
2698 }
2699
2700 return 0;
2701
2702 bad:
2703 pr_err("failed to decode session_retry_global\n");
2704 return -EINVAL;
2705 }
2706
process_session_reset(struct ceph_connection * con,void * p,void * end)2707 static int process_session_reset(struct ceph_connection *con,
2708 void *p, void *end)
2709 {
2710 bool full;
2711 int ret;
2712
2713 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2714 con->error_msg = "protocol error, unexpected session_reset";
2715 return -EINVAL;
2716 }
2717
2718 ceph_decode_8_safe(&p, end, full, bad);
2719 if (!full) {
2720 con->error_msg = "protocol error, bad session_reset";
2721 return -EINVAL;
2722 }
2723
2724 pr_info("%s%lld %s session reset\n", ENTITY_NAME(con->peer_name),
2725 ceph_pr_addr(&con->peer_addr));
2726 ceph_con_reset_session(con);
2727
2728 mutex_unlock(&con->mutex);
2729 if (con->ops->peer_reset)
2730 con->ops->peer_reset(con);
2731 mutex_lock(&con->mutex);
2732 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2733 dout("%s con %p state changed to %d\n", __func__, con,
2734 con->state);
2735 return -EAGAIN;
2736 }
2737
2738 free_conn_bufs(con);
2739
2740 reset_out_kvecs(con);
2741 ret = prepare_client_ident(con);
2742 if (ret) {
2743 pr_err("prepare_client_ident (rst) failed: %d\n", ret);
2744 return ret;
2745 }
2746
2747 con->state = CEPH_CON_S_V2_SESSION_CONNECT;
2748 return 0;
2749
2750 bad:
2751 pr_err("failed to decode session_reset\n");
2752 return -EINVAL;
2753 }
2754
process_keepalive2_ack(struct ceph_connection * con,void * p,void * end)2755 static int process_keepalive2_ack(struct ceph_connection *con,
2756 void *p, void *end)
2757 {
2758 if (con->state != CEPH_CON_S_OPEN) {
2759 con->error_msg = "protocol error, unexpected keepalive2_ack";
2760 return -EINVAL;
2761 }
2762
2763 ceph_decode_need(&p, end, sizeof(struct ceph_timespec), bad);
2764 ceph_decode_timespec64(&con->last_keepalive_ack, p);
2765
2766 dout("%s con %p timestamp %lld.%09ld\n", __func__, con,
2767 con->last_keepalive_ack.tv_sec, con->last_keepalive_ack.tv_nsec);
2768
2769 return 0;
2770
2771 bad:
2772 pr_err("failed to decode keepalive2_ack\n");
2773 return -EINVAL;
2774 }
2775
process_ack(struct ceph_connection * con,void * p,void * end)2776 static int process_ack(struct ceph_connection *con, void *p, void *end)
2777 {
2778 u64 seq;
2779
2780 if (con->state != CEPH_CON_S_OPEN) {
2781 con->error_msg = "protocol error, unexpected ack";
2782 return -EINVAL;
2783 }
2784
2785 ceph_decode_64_safe(&p, end, seq, bad);
2786
2787 dout("%s con %p seq %llu\n", __func__, con, seq);
2788 ceph_con_discard_sent(con, seq);
2789 return 0;
2790
2791 bad:
2792 pr_err("failed to decode ack\n");
2793 return -EINVAL;
2794 }
2795
process_control(struct ceph_connection * con,void * p,void * end)2796 static int process_control(struct ceph_connection *con, void *p, void *end)
2797 {
2798 int tag = con->v2.in_desc.fd_tag;
2799 int ret;
2800
2801 dout("%s con %p tag %d len %d\n", __func__, con, tag, (int)(end - p));
2802
2803 switch (tag) {
2804 case FRAME_TAG_HELLO:
2805 ret = process_hello(con, p, end);
2806 break;
2807 case FRAME_TAG_AUTH_BAD_METHOD:
2808 ret = process_auth_bad_method(con, p, end);
2809 break;
2810 case FRAME_TAG_AUTH_REPLY_MORE:
2811 ret = process_auth_reply_more(con, p, end);
2812 break;
2813 case FRAME_TAG_AUTH_DONE:
2814 ret = process_auth_done(con, p, end);
2815 break;
2816 case FRAME_TAG_AUTH_SIGNATURE:
2817 ret = process_auth_signature(con, p, end);
2818 break;
2819 case FRAME_TAG_SERVER_IDENT:
2820 ret = process_server_ident(con, p, end);
2821 break;
2822 case FRAME_TAG_IDENT_MISSING_FEATURES:
2823 ret = process_ident_missing_features(con, p, end);
2824 break;
2825 case FRAME_TAG_SESSION_RECONNECT_OK:
2826 ret = process_session_reconnect_ok(con, p, end);
2827 break;
2828 case FRAME_TAG_SESSION_RETRY:
2829 ret = process_session_retry(con, p, end);
2830 break;
2831 case FRAME_TAG_SESSION_RETRY_GLOBAL:
2832 ret = process_session_retry_global(con, p, end);
2833 break;
2834 case FRAME_TAG_SESSION_RESET:
2835 ret = process_session_reset(con, p, end);
2836 break;
2837 case FRAME_TAG_KEEPALIVE2_ACK:
2838 ret = process_keepalive2_ack(con, p, end);
2839 break;
2840 case FRAME_TAG_ACK:
2841 ret = process_ack(con, p, end);
2842 break;
2843 default:
2844 pr_err("bad tag %d\n", tag);
2845 con->error_msg = "protocol error, bad tag";
2846 return -EINVAL;
2847 }
2848 if (ret) {
2849 dout("%s con %p error %d\n", __func__, con, ret);
2850 return ret;
2851 }
2852
2853 prepare_read_preamble(con);
2854 return 0;
2855 }
2856
2857 /*
2858 * Return:
2859 * 1 - con->in_msg set, read message
2860 * 0 - skip message
2861 * <0 - error
2862 */
process_message_header(struct ceph_connection * con,void * p,void * end)2863 static int process_message_header(struct ceph_connection *con,
2864 void *p, void *end)
2865 {
2866 struct ceph_frame_desc *desc = &con->v2.in_desc;
2867 struct ceph_msg_header2 *hdr2 = p;
2868 struct ceph_msg_header hdr;
2869 int skip;
2870 int ret;
2871 u64 seq;
2872
2873 /* verify seq# */
2874 seq = le64_to_cpu(hdr2->seq);
2875 if ((s64)seq - (s64)con->in_seq < 1) {
2876 pr_info("%s%lld %s skipping old message: seq %llu, expected %llu\n",
2877 ENTITY_NAME(con->peer_name),
2878 ceph_pr_addr(&con->peer_addr),
2879 seq, con->in_seq + 1);
2880 return 0;
2881 }
2882 if ((s64)seq - (s64)con->in_seq > 1) {
2883 pr_err("bad seq %llu, expected %llu\n", seq, con->in_seq + 1);
2884 con->error_msg = "bad message sequence # for incoming message";
2885 return -EBADE;
2886 }
2887
2888 ceph_con_discard_sent(con, le64_to_cpu(hdr2->ack_seq));
2889
2890 fill_header(&hdr, hdr2, desc->fd_lens[1], desc->fd_lens[2],
2891 desc->fd_lens[3], &con->peer_name);
2892 ret = ceph_con_in_msg_alloc(con, &hdr, &skip);
2893 if (ret)
2894 return ret;
2895
2896 WARN_ON(!con->in_msg ^ skip);
2897 if (skip)
2898 return 0;
2899
2900 WARN_ON(!con->in_msg);
2901 WARN_ON(con->in_msg->con != con);
2902 return 1;
2903 }
2904
process_message(struct ceph_connection * con)2905 static int process_message(struct ceph_connection *con)
2906 {
2907 ceph_con_process_message(con);
2908
2909 /*
2910 * We could have been closed by ceph_con_close() because
2911 * ceph_con_process_message() temporarily drops con->mutex.
2912 */
2913 if (con->state != CEPH_CON_S_OPEN) {
2914 dout("%s con %p state changed to %d\n", __func__, con,
2915 con->state);
2916 return -EAGAIN;
2917 }
2918
2919 prepare_read_preamble(con);
2920 return 0;
2921 }
2922
__handle_control(struct ceph_connection * con,void * p)2923 static int __handle_control(struct ceph_connection *con, void *p)
2924 {
2925 void *end = p + con->v2.in_desc.fd_lens[0];
2926 struct ceph_msg *msg;
2927 int ret;
2928
2929 if (con->v2.in_desc.fd_tag != FRAME_TAG_MESSAGE)
2930 return process_control(con, p, end);
2931
2932 ret = process_message_header(con, p, end);
2933 if (ret < 0)
2934 return ret;
2935 if (ret == 0) {
2936 prepare_skip_message(con);
2937 return 0;
2938 }
2939
2940 msg = con->in_msg; /* set in process_message_header() */
2941 if (front_len(msg)) {
2942 WARN_ON(front_len(msg) > msg->front_alloc_len);
2943 msg->front.iov_len = front_len(msg);
2944 } else {
2945 msg->front.iov_len = 0;
2946 }
2947 if (middle_len(msg)) {
2948 WARN_ON(middle_len(msg) > msg->middle->alloc_len);
2949 msg->middle->vec.iov_len = middle_len(msg);
2950 } else if (msg->middle) {
2951 msg->middle->vec.iov_len = 0;
2952 }
2953
2954 if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
2955 return process_message(con);
2956
2957 if (con_secure(con))
2958 return prepare_read_tail_secure(con);
2959
2960 return prepare_read_tail_plain(con);
2961 }
2962
handle_preamble(struct ceph_connection * con)2963 static int handle_preamble(struct ceph_connection *con)
2964 {
2965 struct ceph_frame_desc *desc = &con->v2.in_desc;
2966 int ret;
2967
2968 if (con_secure(con)) {
2969 ret = decrypt_preamble(con);
2970 if (ret) {
2971 if (ret == -EBADMSG)
2972 con->error_msg = "integrity error, bad preamble auth tag";
2973 return ret;
2974 }
2975 }
2976
2977 ret = decode_preamble(con->v2.in_buf, desc);
2978 if (ret) {
2979 if (ret == -EBADMSG)
2980 con->error_msg = "integrity error, bad crc";
2981 else
2982 con->error_msg = "protocol error, bad preamble";
2983 return ret;
2984 }
2985
2986 dout("%s con %p tag %d seg_cnt %d %d+%d+%d+%d\n", __func__,
2987 con, desc->fd_tag, desc->fd_seg_cnt, desc->fd_lens[0],
2988 desc->fd_lens[1], desc->fd_lens[2], desc->fd_lens[3]);
2989
2990 if (!con_secure(con))
2991 return prepare_read_control(con);
2992
2993 if (desc->fd_lens[0] > CEPH_PREAMBLE_INLINE_LEN)
2994 return prepare_read_control_remainder(con);
2995
2996 return __handle_control(con, CTRL_BODY(con->v2.in_buf));
2997 }
2998
handle_control(struct ceph_connection * con)2999 static int handle_control(struct ceph_connection *con)
3000 {
3001 int ctrl_len = con->v2.in_desc.fd_lens[0];
3002 void *buf;
3003 int ret;
3004
3005 WARN_ON(con_secure(con));
3006
3007 ret = verify_control_crc(con);
3008 if (ret) {
3009 con->error_msg = "integrity error, bad crc";
3010 return ret;
3011 }
3012
3013 if (con->state == CEPH_CON_S_V2_AUTH) {
3014 buf = alloc_conn_buf(con, ctrl_len);
3015 if (!buf)
3016 return -ENOMEM;
3017
3018 memcpy(buf, con->v2.in_kvecs[0].iov_base, ctrl_len);
3019 return __handle_control(con, buf);
3020 }
3021
3022 return __handle_control(con, con->v2.in_kvecs[0].iov_base);
3023 }
3024
handle_control_remainder(struct ceph_connection * con)3025 static int handle_control_remainder(struct ceph_connection *con)
3026 {
3027 int ret;
3028
3029 WARN_ON(!con_secure(con));
3030
3031 ret = decrypt_control_remainder(con);
3032 if (ret) {
3033 if (ret == -EBADMSG)
3034 con->error_msg = "integrity error, bad control remainder auth tag";
3035 return ret;
3036 }
3037
3038 return __handle_control(con, con->v2.in_kvecs[0].iov_base -
3039 CEPH_PREAMBLE_INLINE_LEN);
3040 }
3041
handle_epilogue(struct ceph_connection * con)3042 static int handle_epilogue(struct ceph_connection *con)
3043 {
3044 u32 front_crc, middle_crc, data_crc;
3045 int ret;
3046
3047 if (con_secure(con)) {
3048 ret = decrypt_tail(con);
3049 if (ret) {
3050 if (ret == -EBADMSG)
3051 con->error_msg = "integrity error, bad epilogue auth tag";
3052 return ret;
3053 }
3054
3055 /* just late_status */
3056 ret = decode_epilogue(con->v2.in_buf, NULL, NULL, NULL);
3057 if (ret) {
3058 con->error_msg = "protocol error, bad epilogue";
3059 return ret;
3060 }
3061 } else {
3062 ret = decode_epilogue(con->v2.in_buf, &front_crc,
3063 &middle_crc, &data_crc);
3064 if (ret) {
3065 con->error_msg = "protocol error, bad epilogue";
3066 return ret;
3067 }
3068
3069 ret = verify_epilogue_crcs(con, front_crc, middle_crc,
3070 data_crc);
3071 if (ret) {
3072 con->error_msg = "integrity error, bad crc";
3073 return ret;
3074 }
3075 }
3076
3077 return process_message(con);
3078 }
3079
finish_skip(struct ceph_connection * con)3080 static void finish_skip(struct ceph_connection *con)
3081 {
3082 dout("%s con %p\n", __func__, con);
3083
3084 if (con_secure(con))
3085 gcm_inc_nonce(&con->v2.in_gcm_nonce);
3086
3087 __finish_skip(con);
3088 }
3089
populate_in_iter(struct ceph_connection * con)3090 static int populate_in_iter(struct ceph_connection *con)
3091 {
3092 int ret;
3093
3094 dout("%s con %p state %d in_state %d\n", __func__, con, con->state,
3095 con->v2.in_state);
3096 WARN_ON(iov_iter_count(&con->v2.in_iter));
3097
3098 if (con->state == CEPH_CON_S_V2_BANNER_PREFIX) {
3099 ret = process_banner_prefix(con);
3100 } else if (con->state == CEPH_CON_S_V2_BANNER_PAYLOAD) {
3101 ret = process_banner_payload(con);
3102 } else if ((con->state >= CEPH_CON_S_V2_HELLO &&
3103 con->state <= CEPH_CON_S_V2_SESSION_RECONNECT) ||
3104 con->state == CEPH_CON_S_OPEN) {
3105 switch (con->v2.in_state) {
3106 case IN_S_HANDLE_PREAMBLE:
3107 ret = handle_preamble(con);
3108 break;
3109 case IN_S_HANDLE_CONTROL:
3110 ret = handle_control(con);
3111 break;
3112 case IN_S_HANDLE_CONTROL_REMAINDER:
3113 ret = handle_control_remainder(con);
3114 break;
3115 case IN_S_PREPARE_READ_DATA:
3116 ret = prepare_read_data(con);
3117 break;
3118 case IN_S_PREPARE_READ_DATA_CONT:
3119 prepare_read_data_cont(con);
3120 ret = 0;
3121 break;
3122 case IN_S_PREPARE_READ_ENC_PAGE:
3123 prepare_read_enc_page(con);
3124 ret = 0;
3125 break;
3126 case IN_S_PREPARE_SPARSE_DATA:
3127 ret = prepare_sparse_read_data(con);
3128 break;
3129 case IN_S_PREPARE_SPARSE_DATA_CONT:
3130 ret = prepare_sparse_read_cont(con);
3131 break;
3132 case IN_S_HANDLE_EPILOGUE:
3133 ret = handle_epilogue(con);
3134 break;
3135 case IN_S_FINISH_SKIP:
3136 finish_skip(con);
3137 ret = 0;
3138 break;
3139 default:
3140 WARN(1, "bad in_state %d", con->v2.in_state);
3141 return -EINVAL;
3142 }
3143 } else {
3144 WARN(1, "bad state %d", con->state);
3145 return -EINVAL;
3146 }
3147 if (ret) {
3148 dout("%s con %p error %d\n", __func__, con, ret);
3149 return ret;
3150 }
3151
3152 if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
3153 return -ENODATA;
3154 dout("%s con %p populated %zu\n", __func__, con,
3155 iov_iter_count(&con->v2.in_iter));
3156 return 1;
3157 }
3158
ceph_con_v2_try_read(struct ceph_connection * con)3159 int ceph_con_v2_try_read(struct ceph_connection *con)
3160 {
3161 int ret;
3162
3163 dout("%s con %p state %d need %zu\n", __func__, con, con->state,
3164 iov_iter_count(&con->v2.in_iter));
3165
3166 if (con->state == CEPH_CON_S_PREOPEN)
3167 return 0;
3168
3169 /*
3170 * We should always have something pending here. If not,
3171 * avoid calling populate_in_iter() as if we read something
3172 * (ceph_tcp_recv() would immediately return 1).
3173 */
3174 if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
3175 return -ENODATA;
3176
3177 for (;;) {
3178 ret = ceph_tcp_recv(con);
3179 if (ret <= 0)
3180 return ret;
3181
3182 ret = populate_in_iter(con);
3183 if (ret <= 0) {
3184 if (ret && ret != -EAGAIN && !con->error_msg)
3185 con->error_msg = "read processing error";
3186 return ret;
3187 }
3188 }
3189 }
3190
queue_data(struct ceph_connection * con)3191 static void queue_data(struct ceph_connection *con)
3192 {
3193 struct bio_vec bv;
3194
3195 con->v2.out_epil.data_crc = -1;
3196 ceph_msg_data_cursor_init(&con->v2.out_cursor, con->out_msg,
3197 data_len(con->out_msg));
3198
3199 get_bvec_at(&con->v2.out_cursor, &bv);
3200 set_out_bvec(con, &bv, true);
3201 con->v2.out_state = OUT_S_QUEUE_DATA_CONT;
3202 }
3203
queue_data_cont(struct ceph_connection * con)3204 static void queue_data_cont(struct ceph_connection *con)
3205 {
3206 struct bio_vec bv;
3207
3208 con->v2.out_epil.data_crc = ceph_crc32c_page(
3209 con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
3210 con->v2.out_bvec.bv_offset, con->v2.out_bvec.bv_len);
3211
3212 ceph_msg_data_advance(&con->v2.out_cursor, con->v2.out_bvec.bv_len);
3213 if (con->v2.out_cursor.total_resid) {
3214 get_bvec_at(&con->v2.out_cursor, &bv);
3215 set_out_bvec(con, &bv, true);
3216 WARN_ON(con->v2.out_state != OUT_S_QUEUE_DATA_CONT);
3217 return;
3218 }
3219
3220 /*
3221 * We've written all data. Queue epilogue. Once it's written,
3222 * we are done.
3223 */
3224 reset_out_kvecs(con);
3225 prepare_epilogue_plain(con, false);
3226 con->v2.out_state = OUT_S_FINISH_MESSAGE;
3227 }
3228
queue_enc_page(struct ceph_connection * con)3229 static void queue_enc_page(struct ceph_connection *con)
3230 {
3231 struct bio_vec bv;
3232
3233 dout("%s con %p i %d resid %d\n", __func__, con, con->v2.out_enc_i,
3234 con->v2.out_enc_resid);
3235 WARN_ON(!con->v2.out_enc_resid);
3236
3237 bvec_set_page(&bv, con->v2.out_enc_pages[con->v2.out_enc_i],
3238 min(con->v2.out_enc_resid, (int)PAGE_SIZE), 0);
3239
3240 set_out_bvec(con, &bv, false);
3241 con->v2.out_enc_i++;
3242 con->v2.out_enc_resid -= bv.bv_len;
3243
3244 if (con->v2.out_enc_resid) {
3245 WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE);
3246 return;
3247 }
3248
3249 /*
3250 * We've queued the last piece of ciphertext (ending with
3251 * epilogue) + auth tag. Once it's written, we are done.
3252 */
3253 WARN_ON(con->v2.out_enc_i != con->v2.out_enc_page_cnt);
3254 con->v2.out_state = OUT_S_FINISH_MESSAGE;
3255 }
3256
queue_zeros(struct ceph_connection * con)3257 static void queue_zeros(struct ceph_connection *con)
3258 {
3259 dout("%s con %p out_zero %d\n", __func__, con, con->v2.out_zero);
3260
3261 if (con->v2.out_zero) {
3262 set_out_bvec_zero(con);
3263 con->v2.out_zero -= con->v2.out_bvec.bv_len;
3264 con->v2.out_state = OUT_S_QUEUE_ZEROS;
3265 return;
3266 }
3267
3268 /*
3269 * We've zero-filled everything up to epilogue. Queue epilogue
3270 * with late_status set to ABORTED and crcs adjusted for zeros.
3271 * Once it's written, we are done patching up for the revoke.
3272 */
3273 reset_out_kvecs(con);
3274 prepare_epilogue_plain(con, true);
3275 con->v2.out_state = OUT_S_FINISH_MESSAGE;
3276 }
3277
finish_message(struct ceph_connection * con)3278 static void finish_message(struct ceph_connection *con)
3279 {
3280 dout("%s con %p msg %p\n", __func__, con, con->out_msg);
3281
3282 /* we end up here both plain and secure modes */
3283 if (con->v2.out_enc_pages) {
3284 WARN_ON(!con->v2.out_enc_page_cnt);
3285 ceph_release_page_vector(con->v2.out_enc_pages,
3286 con->v2.out_enc_page_cnt);
3287 con->v2.out_enc_pages = NULL;
3288 con->v2.out_enc_page_cnt = 0;
3289 }
3290 /* message may have been revoked */
3291 if (con->out_msg) {
3292 ceph_msg_put(con->out_msg);
3293 con->out_msg = NULL;
3294 }
3295
3296 con->v2.out_state = OUT_S_GET_NEXT;
3297 }
3298
populate_out_iter(struct ceph_connection * con)3299 static int populate_out_iter(struct ceph_connection *con)
3300 {
3301 int ret;
3302
3303 dout("%s con %p state %d out_state %d\n", __func__, con, con->state,
3304 con->v2.out_state);
3305 WARN_ON(iov_iter_count(&con->v2.out_iter));
3306
3307 if (con->state != CEPH_CON_S_OPEN) {
3308 WARN_ON(con->state < CEPH_CON_S_V2_BANNER_PREFIX ||
3309 con->state > CEPH_CON_S_V2_SESSION_RECONNECT);
3310 goto nothing_pending;
3311 }
3312
3313 switch (con->v2.out_state) {
3314 case OUT_S_QUEUE_DATA:
3315 WARN_ON(!con->out_msg);
3316 queue_data(con);
3317 goto populated;
3318 case OUT_S_QUEUE_DATA_CONT:
3319 WARN_ON(!con->out_msg);
3320 queue_data_cont(con);
3321 goto populated;
3322 case OUT_S_QUEUE_ENC_PAGE:
3323 queue_enc_page(con);
3324 goto populated;
3325 case OUT_S_QUEUE_ZEROS:
3326 WARN_ON(con->out_msg); /* revoked */
3327 queue_zeros(con);
3328 goto populated;
3329 case OUT_S_FINISH_MESSAGE:
3330 finish_message(con);
3331 break;
3332 case OUT_S_GET_NEXT:
3333 break;
3334 default:
3335 WARN(1, "bad out_state %d", con->v2.out_state);
3336 return -EINVAL;
3337 }
3338
3339 WARN_ON(con->v2.out_state != OUT_S_GET_NEXT);
3340 if (ceph_con_flag_test_and_clear(con, CEPH_CON_F_KEEPALIVE_PENDING)) {
3341 ret = prepare_keepalive2(con);
3342 if (ret) {
3343 pr_err("prepare_keepalive2 failed: %d\n", ret);
3344 return ret;
3345 }
3346 } else if (!list_empty(&con->out_queue)) {
3347 ceph_con_get_out_msg(con);
3348 ret = prepare_message(con);
3349 if (ret) {
3350 pr_err("prepare_message failed: %d\n", ret);
3351 return ret;
3352 }
3353 } else if (con->in_seq > con->in_seq_acked) {
3354 ret = prepare_ack(con);
3355 if (ret) {
3356 pr_err("prepare_ack failed: %d\n", ret);
3357 return ret;
3358 }
3359 } else {
3360 goto nothing_pending;
3361 }
3362
3363 populated:
3364 if (WARN_ON(!iov_iter_count(&con->v2.out_iter)))
3365 return -ENODATA;
3366 dout("%s con %p populated %zu\n", __func__, con,
3367 iov_iter_count(&con->v2.out_iter));
3368 return 1;
3369
3370 nothing_pending:
3371 WARN_ON(iov_iter_count(&con->v2.out_iter));
3372 dout("%s con %p nothing pending\n", __func__, con);
3373 ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
3374 return 0;
3375 }
3376
ceph_con_v2_try_write(struct ceph_connection * con)3377 int ceph_con_v2_try_write(struct ceph_connection *con)
3378 {
3379 int ret;
3380
3381 dout("%s con %p state %d have %zu\n", __func__, con, con->state,
3382 iov_iter_count(&con->v2.out_iter));
3383
3384 /* open the socket first? */
3385 if (con->state == CEPH_CON_S_PREOPEN) {
3386 WARN_ON(con->peer_addr.type != CEPH_ENTITY_ADDR_TYPE_MSGR2);
3387
3388 /*
3389 * Always bump global_seq. Bump connect_seq only if
3390 * there is a session (i.e. we are reconnecting and will
3391 * send session_reconnect instead of client_ident).
3392 */
3393 con->v2.global_seq = ceph_get_global_seq(con->msgr, 0);
3394 if (con->v2.server_cookie)
3395 con->v2.connect_seq++;
3396
3397 ret = prepare_read_banner_prefix(con);
3398 if (ret) {
3399 pr_err("prepare_read_banner_prefix failed: %d\n", ret);
3400 con->error_msg = "connect error";
3401 return ret;
3402 }
3403
3404 reset_out_kvecs(con);
3405 ret = prepare_banner(con);
3406 if (ret) {
3407 pr_err("prepare_banner failed: %d\n", ret);
3408 con->error_msg = "connect error";
3409 return ret;
3410 }
3411
3412 ret = ceph_tcp_connect(con);
3413 if (ret) {
3414 pr_err("ceph_tcp_connect failed: %d\n", ret);
3415 con->error_msg = "connect error";
3416 return ret;
3417 }
3418 }
3419
3420 if (!iov_iter_count(&con->v2.out_iter)) {
3421 ret = populate_out_iter(con);
3422 if (ret <= 0) {
3423 if (ret && ret != -EAGAIN && !con->error_msg)
3424 con->error_msg = "write processing error";
3425 return ret;
3426 }
3427 }
3428
3429 tcp_sock_set_cork(con->sock->sk, true);
3430 for (;;) {
3431 ret = ceph_tcp_send(con);
3432 if (ret <= 0)
3433 break;
3434
3435 ret = populate_out_iter(con);
3436 if (ret <= 0) {
3437 if (ret && ret != -EAGAIN && !con->error_msg)
3438 con->error_msg = "write processing error";
3439 break;
3440 }
3441 }
3442
3443 tcp_sock_set_cork(con->sock->sk, false);
3444 return ret;
3445 }
3446
crc32c_zeros(u32 crc,int zero_len)3447 static u32 crc32c_zeros(u32 crc, int zero_len)
3448 {
3449 int len;
3450
3451 while (zero_len) {
3452 len = min(zero_len, (int)PAGE_SIZE);
3453 crc = crc32c(crc, page_address(ceph_zero_page), len);
3454 zero_len -= len;
3455 }
3456
3457 return crc;
3458 }
3459
prepare_zero_front(struct ceph_connection * con,int resid)3460 static void prepare_zero_front(struct ceph_connection *con, int resid)
3461 {
3462 int sent;
3463
3464 WARN_ON(!resid || resid > front_len(con->out_msg));
3465 sent = front_len(con->out_msg) - resid;
3466 dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3467
3468 if (sent) {
3469 con->v2.out_epil.front_crc =
3470 crc32c(-1, con->out_msg->front.iov_base, sent);
3471 con->v2.out_epil.front_crc =
3472 crc32c_zeros(con->v2.out_epil.front_crc, resid);
3473 } else {
3474 con->v2.out_epil.front_crc = crc32c_zeros(-1, resid);
3475 }
3476
3477 con->v2.out_iter.count -= resid;
3478 out_zero_add(con, resid);
3479 }
3480
prepare_zero_middle(struct ceph_connection * con,int resid)3481 static void prepare_zero_middle(struct ceph_connection *con, int resid)
3482 {
3483 int sent;
3484
3485 WARN_ON(!resid || resid > middle_len(con->out_msg));
3486 sent = middle_len(con->out_msg) - resid;
3487 dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3488
3489 if (sent) {
3490 con->v2.out_epil.middle_crc =
3491 crc32c(-1, con->out_msg->middle->vec.iov_base, sent);
3492 con->v2.out_epil.middle_crc =
3493 crc32c_zeros(con->v2.out_epil.middle_crc, resid);
3494 } else {
3495 con->v2.out_epil.middle_crc = crc32c_zeros(-1, resid);
3496 }
3497
3498 con->v2.out_iter.count -= resid;
3499 out_zero_add(con, resid);
3500 }
3501
prepare_zero_data(struct ceph_connection * con)3502 static void prepare_zero_data(struct ceph_connection *con)
3503 {
3504 dout("%s con %p\n", __func__, con);
3505 con->v2.out_epil.data_crc = crc32c_zeros(-1, data_len(con->out_msg));
3506 out_zero_add(con, data_len(con->out_msg));
3507 }
3508
revoke_at_queue_data(struct ceph_connection * con)3509 static void revoke_at_queue_data(struct ceph_connection *con)
3510 {
3511 int boundary;
3512 int resid;
3513
3514 WARN_ON(!data_len(con->out_msg));
3515 WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
3516 resid = iov_iter_count(&con->v2.out_iter);
3517
3518 boundary = front_len(con->out_msg) + middle_len(con->out_msg);
3519 if (resid > boundary) {
3520 resid -= boundary;
3521 WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
3522 dout("%s con %p was sending head\n", __func__, con);
3523 if (front_len(con->out_msg))
3524 prepare_zero_front(con, front_len(con->out_msg));
3525 if (middle_len(con->out_msg))
3526 prepare_zero_middle(con, middle_len(con->out_msg));
3527 prepare_zero_data(con);
3528 WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
3529 con->v2.out_state = OUT_S_QUEUE_ZEROS;
3530 return;
3531 }
3532
3533 boundary = middle_len(con->out_msg);
3534 if (resid > boundary) {
3535 resid -= boundary;
3536 dout("%s con %p was sending front\n", __func__, con);
3537 prepare_zero_front(con, resid);
3538 if (middle_len(con->out_msg))
3539 prepare_zero_middle(con, middle_len(con->out_msg));
3540 prepare_zero_data(con);
3541 queue_zeros(con);
3542 return;
3543 }
3544
3545 WARN_ON(!resid);
3546 dout("%s con %p was sending middle\n", __func__, con);
3547 prepare_zero_middle(con, resid);
3548 prepare_zero_data(con);
3549 queue_zeros(con);
3550 }
3551
revoke_at_queue_data_cont(struct ceph_connection * con)3552 static void revoke_at_queue_data_cont(struct ceph_connection *con)
3553 {
3554 int sent, resid; /* current piece of data */
3555
3556 WARN_ON(!data_len(con->out_msg));
3557 WARN_ON(!iov_iter_is_bvec(&con->v2.out_iter));
3558 resid = iov_iter_count(&con->v2.out_iter);
3559 WARN_ON(!resid || resid > con->v2.out_bvec.bv_len);
3560 sent = con->v2.out_bvec.bv_len - resid;
3561 dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3562
3563 if (sent) {
3564 con->v2.out_epil.data_crc = ceph_crc32c_page(
3565 con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
3566 con->v2.out_bvec.bv_offset, sent);
3567 ceph_msg_data_advance(&con->v2.out_cursor, sent);
3568 }
3569 WARN_ON(resid > con->v2.out_cursor.total_resid);
3570 con->v2.out_epil.data_crc = crc32c_zeros(con->v2.out_epil.data_crc,
3571 con->v2.out_cursor.total_resid);
3572
3573 con->v2.out_iter.count -= resid;
3574 out_zero_add(con, con->v2.out_cursor.total_resid);
3575 queue_zeros(con);
3576 }
3577
revoke_at_finish_message(struct ceph_connection * con)3578 static void revoke_at_finish_message(struct ceph_connection *con)
3579 {
3580 int boundary;
3581 int resid;
3582
3583 WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
3584 resid = iov_iter_count(&con->v2.out_iter);
3585
3586 if (!front_len(con->out_msg) && !middle_len(con->out_msg) &&
3587 !data_len(con->out_msg)) {
3588 WARN_ON(!resid || resid > MESSAGE_HEAD_PLAIN_LEN);
3589 dout("%s con %p was sending head (empty message) - noop\n",
3590 __func__, con);
3591 return;
3592 }
3593
3594 boundary = front_len(con->out_msg) + middle_len(con->out_msg) +
3595 CEPH_EPILOGUE_PLAIN_LEN;
3596 if (resid > boundary) {
3597 resid -= boundary;
3598 WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
3599 dout("%s con %p was sending head\n", __func__, con);
3600 if (front_len(con->out_msg))
3601 prepare_zero_front(con, front_len(con->out_msg));
3602 if (middle_len(con->out_msg))
3603 prepare_zero_middle(con, middle_len(con->out_msg));
3604 con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3605 WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
3606 con->v2.out_state = OUT_S_QUEUE_ZEROS;
3607 return;
3608 }
3609
3610 boundary = middle_len(con->out_msg) + CEPH_EPILOGUE_PLAIN_LEN;
3611 if (resid > boundary) {
3612 resid -= boundary;
3613 dout("%s con %p was sending front\n", __func__, con);
3614 prepare_zero_front(con, resid);
3615 if (middle_len(con->out_msg))
3616 prepare_zero_middle(con, middle_len(con->out_msg));
3617 con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3618 queue_zeros(con);
3619 return;
3620 }
3621
3622 boundary = CEPH_EPILOGUE_PLAIN_LEN;
3623 if (resid > boundary) {
3624 resid -= boundary;
3625 dout("%s con %p was sending middle\n", __func__, con);
3626 prepare_zero_middle(con, resid);
3627 con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3628 queue_zeros(con);
3629 return;
3630 }
3631
3632 WARN_ON(!resid);
3633 dout("%s con %p was sending epilogue - noop\n", __func__, con);
3634 }
3635
ceph_con_v2_revoke(struct ceph_connection * con)3636 void ceph_con_v2_revoke(struct ceph_connection *con)
3637 {
3638 WARN_ON(con->v2.out_zero);
3639
3640 if (con_secure(con)) {
3641 WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE &&
3642 con->v2.out_state != OUT_S_FINISH_MESSAGE);
3643 dout("%s con %p secure - noop\n", __func__, con);
3644 return;
3645 }
3646
3647 switch (con->v2.out_state) {
3648 case OUT_S_QUEUE_DATA:
3649 revoke_at_queue_data(con);
3650 break;
3651 case OUT_S_QUEUE_DATA_CONT:
3652 revoke_at_queue_data_cont(con);
3653 break;
3654 case OUT_S_FINISH_MESSAGE:
3655 revoke_at_finish_message(con);
3656 break;
3657 default:
3658 WARN(1, "bad out_state %d", con->v2.out_state);
3659 break;
3660 }
3661 }
3662
revoke_at_prepare_read_data(struct ceph_connection * con)3663 static void revoke_at_prepare_read_data(struct ceph_connection *con)
3664 {
3665 int remaining;
3666 int resid;
3667
3668 WARN_ON(con_secure(con));
3669 WARN_ON(!data_len(con->in_msg));
3670 WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
3671 resid = iov_iter_count(&con->v2.in_iter);
3672 WARN_ON(!resid);
3673
3674 remaining = data_len(con->in_msg) + CEPH_EPILOGUE_PLAIN_LEN;
3675 dout("%s con %p resid %d remaining %d\n", __func__, con, resid,
3676 remaining);
3677 con->v2.in_iter.count -= resid;
3678 set_in_skip(con, resid + remaining);
3679 con->v2.in_state = IN_S_FINISH_SKIP;
3680 }
3681
revoke_at_prepare_read_data_cont(struct ceph_connection * con)3682 static void revoke_at_prepare_read_data_cont(struct ceph_connection *con)
3683 {
3684 int recved, resid; /* current piece of data */
3685 int remaining;
3686
3687 WARN_ON(con_secure(con));
3688 WARN_ON(!data_len(con->in_msg));
3689 WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3690 resid = iov_iter_count(&con->v2.in_iter);
3691 WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
3692 recved = con->v2.in_bvec.bv_len - resid;
3693 dout("%s con %p recved %d resid %d\n", __func__, con, recved, resid);
3694
3695 if (recved)
3696 ceph_msg_data_advance(&con->v2.in_cursor, recved);
3697 WARN_ON(resid > con->v2.in_cursor.total_resid);
3698
3699 remaining = CEPH_EPILOGUE_PLAIN_LEN;
3700 dout("%s con %p total_resid %zu remaining %d\n", __func__, con,
3701 con->v2.in_cursor.total_resid, remaining);
3702 con->v2.in_iter.count -= resid;
3703 set_in_skip(con, con->v2.in_cursor.total_resid + remaining);
3704 con->v2.in_state = IN_S_FINISH_SKIP;
3705 }
3706
revoke_at_prepare_read_enc_page(struct ceph_connection * con)3707 static void revoke_at_prepare_read_enc_page(struct ceph_connection *con)
3708 {
3709 int resid; /* current enc page (not necessarily data) */
3710
3711 WARN_ON(!con_secure(con));
3712 WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3713 resid = iov_iter_count(&con->v2.in_iter);
3714 WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
3715
3716 dout("%s con %p resid %d enc_resid %d\n", __func__, con, resid,
3717 con->v2.in_enc_resid);
3718 con->v2.in_iter.count -= resid;
3719 set_in_skip(con, resid + con->v2.in_enc_resid);
3720 con->v2.in_state = IN_S_FINISH_SKIP;
3721 }
3722
revoke_at_prepare_sparse_data(struct ceph_connection * con)3723 static void revoke_at_prepare_sparse_data(struct ceph_connection *con)
3724 {
3725 int resid; /* current piece of data */
3726 int remaining;
3727
3728 WARN_ON(con_secure(con));
3729 WARN_ON(!data_len(con->in_msg));
3730 WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3731 resid = iov_iter_count(&con->v2.in_iter);
3732 dout("%s con %p resid %d\n", __func__, con, resid);
3733
3734 remaining = CEPH_EPILOGUE_PLAIN_LEN + con->v2.data_len_remain;
3735 con->v2.in_iter.count -= resid;
3736 set_in_skip(con, resid + remaining);
3737 con->v2.in_state = IN_S_FINISH_SKIP;
3738 }
3739
revoke_at_handle_epilogue(struct ceph_connection * con)3740 static void revoke_at_handle_epilogue(struct ceph_connection *con)
3741 {
3742 int resid;
3743
3744 resid = iov_iter_count(&con->v2.in_iter);
3745 WARN_ON(!resid);
3746
3747 dout("%s con %p resid %d\n", __func__, con, resid);
3748 con->v2.in_iter.count -= resid;
3749 set_in_skip(con, resid);
3750 con->v2.in_state = IN_S_FINISH_SKIP;
3751 }
3752
ceph_con_v2_revoke_incoming(struct ceph_connection * con)3753 void ceph_con_v2_revoke_incoming(struct ceph_connection *con)
3754 {
3755 switch (con->v2.in_state) {
3756 case IN_S_PREPARE_SPARSE_DATA:
3757 case IN_S_PREPARE_READ_DATA:
3758 revoke_at_prepare_read_data(con);
3759 break;
3760 case IN_S_PREPARE_READ_DATA_CONT:
3761 revoke_at_prepare_read_data_cont(con);
3762 break;
3763 case IN_S_PREPARE_READ_ENC_PAGE:
3764 revoke_at_prepare_read_enc_page(con);
3765 break;
3766 case IN_S_PREPARE_SPARSE_DATA_CONT:
3767 revoke_at_prepare_sparse_data(con);
3768 break;
3769 case IN_S_HANDLE_EPILOGUE:
3770 revoke_at_handle_epilogue(con);
3771 break;
3772 default:
3773 WARN(1, "bad in_state %d", con->v2.in_state);
3774 break;
3775 }
3776 }
3777
ceph_con_v2_opened(struct ceph_connection * con)3778 bool ceph_con_v2_opened(struct ceph_connection *con)
3779 {
3780 return con->v2.peer_global_seq;
3781 }
3782
ceph_con_v2_reset_session(struct ceph_connection * con)3783 void ceph_con_v2_reset_session(struct ceph_connection *con)
3784 {
3785 con->v2.client_cookie = 0;
3786 con->v2.server_cookie = 0;
3787 con->v2.global_seq = 0;
3788 con->v2.connect_seq = 0;
3789 con->v2.peer_global_seq = 0;
3790 }
3791
ceph_con_v2_reset_protocol(struct ceph_connection * con)3792 void ceph_con_v2_reset_protocol(struct ceph_connection *con)
3793 {
3794 iov_iter_truncate(&con->v2.in_iter, 0);
3795 iov_iter_truncate(&con->v2.out_iter, 0);
3796 con->v2.out_zero = 0;
3797
3798 clear_in_sign_kvecs(con);
3799 clear_out_sign_kvecs(con);
3800 free_conn_bufs(con);
3801
3802 if (con->v2.in_enc_pages) {
3803 WARN_ON(!con->v2.in_enc_page_cnt);
3804 ceph_release_page_vector(con->v2.in_enc_pages,
3805 con->v2.in_enc_page_cnt);
3806 con->v2.in_enc_pages = NULL;
3807 con->v2.in_enc_page_cnt = 0;
3808 }
3809 if (con->v2.out_enc_pages) {
3810 WARN_ON(!con->v2.out_enc_page_cnt);
3811 ceph_release_page_vector(con->v2.out_enc_pages,
3812 con->v2.out_enc_page_cnt);
3813 con->v2.out_enc_pages = NULL;
3814 con->v2.out_enc_page_cnt = 0;
3815 }
3816
3817 con->v2.con_mode = CEPH_CON_MODE_UNKNOWN;
3818 memzero_explicit(&con->v2.in_gcm_nonce, CEPH_GCM_IV_LEN);
3819 memzero_explicit(&con->v2.out_gcm_nonce, CEPH_GCM_IV_LEN);
3820
3821 if (con->v2.hmac_tfm) {
3822 crypto_free_shash(con->v2.hmac_tfm);
3823 con->v2.hmac_tfm = NULL;
3824 }
3825 if (con->v2.gcm_req) {
3826 aead_request_free(con->v2.gcm_req);
3827 con->v2.gcm_req = NULL;
3828 }
3829 if (con->v2.gcm_tfm) {
3830 crypto_free_aead(con->v2.gcm_tfm);
3831 con->v2.gcm_tfm = NULL;
3832 }
3833 }
3834