xref: /openbmc/linux/net/ceph/messenger.c (revision 83b975b5)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/crc32c.h>
5 #include <linux/ctype.h>
6 #include <linux/highmem.h>
7 #include <linux/inet.h>
8 #include <linux/kthread.h>
9 #include <linux/net.h>
10 #include <linux/nsproxy.h>
11 #include <linux/sched/mm.h>
12 #include <linux/slab.h>
13 #include <linux/socket.h>
14 #include <linux/string.h>
15 #ifdef	CONFIG_BLOCK
16 #include <linux/bio.h>
17 #endif	/* CONFIG_BLOCK */
18 #include <linux/dns_resolver.h>
19 #include <net/tcp.h>
20 
21 #include <linux/ceph/ceph_features.h>
22 #include <linux/ceph/libceph.h>
23 #include <linux/ceph/messenger.h>
24 #include <linux/ceph/decode.h>
25 #include <linux/ceph/pagelist.h>
26 #include <linux/export.h>
27 
28 /*
29  * Ceph uses the messenger to exchange ceph_msg messages with other
30  * hosts in the system.  The messenger provides ordered and reliable
31  * delivery.  We tolerate TCP disconnects by reconnecting (with
32  * exponential backoff) in the case of a fault (disconnection, bad
33  * crc, protocol error).  Acks allow sent messages to be discarded by
34  * the sender.
35  */
36 
37 /*
38  * We track the state of the socket on a given connection using
39  * values defined below.  The transition to a new socket state is
40  * handled by a function which verifies we aren't coming from an
41  * unexpected state.
42  *
43  *      --------
44  *      | NEW* |  transient initial state
45  *      --------
46  *          | con_sock_state_init()
47  *          v
48  *      ----------
49  *      | CLOSED |  initialized, but no socket (and no
50  *      ----------  TCP connection)
51  *       ^      \
52  *       |       \ con_sock_state_connecting()
53  *       |        ----------------------
54  *       |                              \
55  *       + con_sock_state_closed()       \
56  *       |+---------------------------    \
57  *       | \                          \    \
58  *       |  -----------                \    \
59  *       |  | CLOSING |  socket event;  \    \
60  *       |  -----------  await close     \    \
61  *       |       ^                        \   |
62  *       |       |                         \  |
63  *       |       + con_sock_state_closing() \ |
64  *       |      / \                         | |
65  *       |     /   ---------------          | |
66  *       |    /                   \         v v
67  *       |   /                    --------------
68  *       |  /    -----------------| CONNECTING |  socket created, TCP
69  *       |  |   /                 --------------  connect initiated
70  *       |  |   | con_sock_state_connected()
71  *       |  |   v
72  *      -------------
73  *      | CONNECTED |  TCP connection established
74  *      -------------
75  *
76  * State values for ceph_connection->sock_state; NEW is assumed to be 0.
77  */
78 
79 #define CON_SOCK_STATE_NEW		0	/* -> CLOSED */
80 #define CON_SOCK_STATE_CLOSED		1	/* -> CONNECTING */
81 #define CON_SOCK_STATE_CONNECTING	2	/* -> CONNECTED or -> CLOSING */
82 #define CON_SOCK_STATE_CONNECTED	3	/* -> CLOSING or -> CLOSED */
83 #define CON_SOCK_STATE_CLOSING		4	/* -> CLOSED */
84 
85 static bool con_flag_valid(unsigned long con_flag)
86 {
87 	switch (con_flag) {
88 	case CEPH_CON_F_LOSSYTX:
89 	case CEPH_CON_F_KEEPALIVE_PENDING:
90 	case CEPH_CON_F_WRITE_PENDING:
91 	case CEPH_CON_F_SOCK_CLOSED:
92 	case CEPH_CON_F_BACKOFF:
93 		return true;
94 	default:
95 		return false;
96 	}
97 }
98 
99 void ceph_con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
100 {
101 	BUG_ON(!con_flag_valid(con_flag));
102 
103 	clear_bit(con_flag, &con->flags);
104 }
105 
106 void ceph_con_flag_set(struct ceph_connection *con, unsigned long con_flag)
107 {
108 	BUG_ON(!con_flag_valid(con_flag));
109 
110 	set_bit(con_flag, &con->flags);
111 }
112 
113 bool ceph_con_flag_test(struct ceph_connection *con, unsigned long con_flag)
114 {
115 	BUG_ON(!con_flag_valid(con_flag));
116 
117 	return test_bit(con_flag, &con->flags);
118 }
119 
120 bool ceph_con_flag_test_and_clear(struct ceph_connection *con,
121 				  unsigned long con_flag)
122 {
123 	BUG_ON(!con_flag_valid(con_flag));
124 
125 	return test_and_clear_bit(con_flag, &con->flags);
126 }
127 
128 bool ceph_con_flag_test_and_set(struct ceph_connection *con,
129 				unsigned long con_flag)
130 {
131 	BUG_ON(!con_flag_valid(con_flag));
132 
133 	return test_and_set_bit(con_flag, &con->flags);
134 }
135 
136 /* Slab caches for frequently-allocated structures */
137 
138 static struct kmem_cache	*ceph_msg_cache;
139 
140 #ifdef CONFIG_LOCKDEP
141 static struct lock_class_key socket_class;
142 #endif
143 
144 static void queue_con(struct ceph_connection *con);
145 static void cancel_con(struct ceph_connection *con);
146 static void ceph_con_workfn(struct work_struct *);
147 static void con_fault(struct ceph_connection *con);
148 
149 /*
150  * Nicely render a sockaddr as a string.  An array of formatted
151  * strings is used, to approximate reentrancy.
152  */
153 #define ADDR_STR_COUNT_LOG	5	/* log2(# address strings in array) */
154 #define ADDR_STR_COUNT		(1 << ADDR_STR_COUNT_LOG)
155 #define ADDR_STR_COUNT_MASK	(ADDR_STR_COUNT - 1)
156 #define MAX_ADDR_STR_LEN	64	/* 54 is enough */
157 
158 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
159 static atomic_t addr_str_seq = ATOMIC_INIT(0);
160 
161 struct page *ceph_zero_page;		/* used in certain error cases */
162 
163 const char *ceph_pr_addr(const struct ceph_entity_addr *addr)
164 {
165 	int i;
166 	char *s;
167 	struct sockaddr_storage ss = addr->in_addr; /* align */
168 	struct sockaddr_in *in4 = (struct sockaddr_in *)&ss;
169 	struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)&ss;
170 
171 	i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
172 	s = addr_str[i];
173 
174 	switch (ss.ss_family) {
175 	case AF_INET:
176 		snprintf(s, MAX_ADDR_STR_LEN, "(%d)%pI4:%hu",
177 			 le32_to_cpu(addr->type), &in4->sin_addr,
178 			 ntohs(in4->sin_port));
179 		break;
180 
181 	case AF_INET6:
182 		snprintf(s, MAX_ADDR_STR_LEN, "(%d)[%pI6c]:%hu",
183 			 le32_to_cpu(addr->type), &in6->sin6_addr,
184 			 ntohs(in6->sin6_port));
185 		break;
186 
187 	default:
188 		snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
189 			 ss.ss_family);
190 	}
191 
192 	return s;
193 }
194 EXPORT_SYMBOL(ceph_pr_addr);
195 
196 void ceph_encode_my_addr(struct ceph_messenger *msgr)
197 {
198 	if (!ceph_msgr2(from_msgr(msgr))) {
199 		memcpy(&msgr->my_enc_addr, &msgr->inst.addr,
200 		       sizeof(msgr->my_enc_addr));
201 		ceph_encode_banner_addr(&msgr->my_enc_addr);
202 	}
203 }
204 
205 /*
206  * work queue for all reading and writing to/from the socket.
207  */
208 static struct workqueue_struct *ceph_msgr_wq;
209 
210 static int ceph_msgr_slab_init(void)
211 {
212 	BUG_ON(ceph_msg_cache);
213 	ceph_msg_cache = KMEM_CACHE(ceph_msg, 0);
214 	if (!ceph_msg_cache)
215 		return -ENOMEM;
216 
217 	return 0;
218 }
219 
220 static void ceph_msgr_slab_exit(void)
221 {
222 	BUG_ON(!ceph_msg_cache);
223 	kmem_cache_destroy(ceph_msg_cache);
224 	ceph_msg_cache = NULL;
225 }
226 
227 static void _ceph_msgr_exit(void)
228 {
229 	if (ceph_msgr_wq) {
230 		destroy_workqueue(ceph_msgr_wq);
231 		ceph_msgr_wq = NULL;
232 	}
233 
234 	BUG_ON(!ceph_zero_page);
235 	put_page(ceph_zero_page);
236 	ceph_zero_page = NULL;
237 
238 	ceph_msgr_slab_exit();
239 }
240 
241 int __init ceph_msgr_init(void)
242 {
243 	if (ceph_msgr_slab_init())
244 		return -ENOMEM;
245 
246 	BUG_ON(ceph_zero_page);
247 	ceph_zero_page = ZERO_PAGE(0);
248 	get_page(ceph_zero_page);
249 
250 	/*
251 	 * The number of active work items is limited by the number of
252 	 * connections, so leave @max_active at default.
253 	 */
254 	ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
255 	if (ceph_msgr_wq)
256 		return 0;
257 
258 	pr_err("msgr_init failed to create workqueue\n");
259 	_ceph_msgr_exit();
260 
261 	return -ENOMEM;
262 }
263 
264 void ceph_msgr_exit(void)
265 {
266 	BUG_ON(ceph_msgr_wq == NULL);
267 
268 	_ceph_msgr_exit();
269 }
270 
271 void ceph_msgr_flush(void)
272 {
273 	flush_workqueue(ceph_msgr_wq);
274 }
275 EXPORT_SYMBOL(ceph_msgr_flush);
276 
277 /* Connection socket state transition functions */
278 
279 static void con_sock_state_init(struct ceph_connection *con)
280 {
281 	int old_state;
282 
283 	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
284 	if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
285 		printk("%s: unexpected old state %d\n", __func__, old_state);
286 	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
287 	     CON_SOCK_STATE_CLOSED);
288 }
289 
290 static void con_sock_state_connecting(struct ceph_connection *con)
291 {
292 	int old_state;
293 
294 	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
295 	if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
296 		printk("%s: unexpected old state %d\n", __func__, old_state);
297 	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
298 	     CON_SOCK_STATE_CONNECTING);
299 }
300 
301 static void con_sock_state_connected(struct ceph_connection *con)
302 {
303 	int old_state;
304 
305 	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
306 	if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
307 		printk("%s: unexpected old state %d\n", __func__, old_state);
308 	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
309 	     CON_SOCK_STATE_CONNECTED);
310 }
311 
312 static void con_sock_state_closing(struct ceph_connection *con)
313 {
314 	int old_state;
315 
316 	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
317 	if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
318 			old_state != CON_SOCK_STATE_CONNECTED &&
319 			old_state != CON_SOCK_STATE_CLOSING))
320 		printk("%s: unexpected old state %d\n", __func__, old_state);
321 	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
322 	     CON_SOCK_STATE_CLOSING);
323 }
324 
325 static void con_sock_state_closed(struct ceph_connection *con)
326 {
327 	int old_state;
328 
329 	old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
330 	if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
331 		    old_state != CON_SOCK_STATE_CLOSING &&
332 		    old_state != CON_SOCK_STATE_CONNECTING &&
333 		    old_state != CON_SOCK_STATE_CLOSED))
334 		printk("%s: unexpected old state %d\n", __func__, old_state);
335 	dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
336 	     CON_SOCK_STATE_CLOSED);
337 }
338 
339 /*
340  * socket callback functions
341  */
342 
343 /* data available on socket, or listen socket received a connect */
344 static void ceph_sock_data_ready(struct sock *sk)
345 {
346 	struct ceph_connection *con = sk->sk_user_data;
347 	if (atomic_read(&con->msgr->stopping)) {
348 		return;
349 	}
350 
351 	if (sk->sk_state != TCP_CLOSE_WAIT) {
352 		dout("%s %p state = %d, queueing work\n", __func__,
353 		     con, con->state);
354 		queue_con(con);
355 	}
356 }
357 
358 /* socket has buffer space for writing */
359 static void ceph_sock_write_space(struct sock *sk)
360 {
361 	struct ceph_connection *con = sk->sk_user_data;
362 
363 	/* only queue to workqueue if there is data we want to write,
364 	 * and there is sufficient space in the socket buffer to accept
365 	 * more data.  clear SOCK_NOSPACE so that ceph_sock_write_space()
366 	 * doesn't get called again until try_write() fills the socket
367 	 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
368 	 * and net/core/stream.c:sk_stream_write_space().
369 	 */
370 	if (ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING)) {
371 		if (sk_stream_is_writeable(sk)) {
372 			dout("%s %p queueing write work\n", __func__, con);
373 			clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
374 			queue_con(con);
375 		}
376 	} else {
377 		dout("%s %p nothing to write\n", __func__, con);
378 	}
379 }
380 
381 /* socket's state has changed */
382 static void ceph_sock_state_change(struct sock *sk)
383 {
384 	struct ceph_connection *con = sk->sk_user_data;
385 
386 	dout("%s %p state = %d sk_state = %u\n", __func__,
387 	     con, con->state, sk->sk_state);
388 
389 	switch (sk->sk_state) {
390 	case TCP_CLOSE:
391 		dout("%s TCP_CLOSE\n", __func__);
392 		fallthrough;
393 	case TCP_CLOSE_WAIT:
394 		dout("%s TCP_CLOSE_WAIT\n", __func__);
395 		con_sock_state_closing(con);
396 		ceph_con_flag_set(con, CEPH_CON_F_SOCK_CLOSED);
397 		queue_con(con);
398 		break;
399 	case TCP_ESTABLISHED:
400 		dout("%s TCP_ESTABLISHED\n", __func__);
401 		con_sock_state_connected(con);
402 		queue_con(con);
403 		break;
404 	default:	/* Everything else is uninteresting */
405 		break;
406 	}
407 }
408 
409 /*
410  * set up socket callbacks
411  */
412 static void set_sock_callbacks(struct socket *sock,
413 			       struct ceph_connection *con)
414 {
415 	struct sock *sk = sock->sk;
416 	sk->sk_user_data = con;
417 	sk->sk_data_ready = ceph_sock_data_ready;
418 	sk->sk_write_space = ceph_sock_write_space;
419 	sk->sk_state_change = ceph_sock_state_change;
420 }
421 
422 
423 /*
424  * socket helpers
425  */
426 
427 /*
428  * initiate connection to a remote socket.
429  */
430 int ceph_tcp_connect(struct ceph_connection *con)
431 {
432 	struct sockaddr_storage ss = con->peer_addr.in_addr; /* align */
433 	struct socket *sock;
434 	unsigned int noio_flag;
435 	int ret;
436 
437 	dout("%s con %p peer_addr %s\n", __func__, con,
438 	     ceph_pr_addr(&con->peer_addr));
439 	BUG_ON(con->sock);
440 
441 	/* sock_create_kern() allocates with GFP_KERNEL */
442 	noio_flag = memalloc_noio_save();
443 	ret = sock_create_kern(read_pnet(&con->msgr->net), ss.ss_family,
444 			       SOCK_STREAM, IPPROTO_TCP, &sock);
445 	memalloc_noio_restore(noio_flag);
446 	if (ret)
447 		return ret;
448 	sock->sk->sk_allocation = GFP_NOFS;
449 
450 #ifdef CONFIG_LOCKDEP
451 	lockdep_set_class(&sock->sk->sk_lock, &socket_class);
452 #endif
453 
454 	set_sock_callbacks(sock, con);
455 
456 	con_sock_state_connecting(con);
457 	ret = sock->ops->connect(sock, (struct sockaddr *)&ss, sizeof(ss),
458 				 O_NONBLOCK);
459 	if (ret == -EINPROGRESS) {
460 		dout("connect %s EINPROGRESS sk_state = %u\n",
461 		     ceph_pr_addr(&con->peer_addr),
462 		     sock->sk->sk_state);
463 	} else if (ret < 0) {
464 		pr_err("connect %s error %d\n",
465 		       ceph_pr_addr(&con->peer_addr), ret);
466 		sock_release(sock);
467 		return ret;
468 	}
469 
470 	if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY))
471 		tcp_sock_set_nodelay(sock->sk);
472 
473 	con->sock = sock;
474 	return 0;
475 }
476 
477 /*
478  * Shutdown/close the socket for the given connection.
479  */
480 int ceph_con_close_socket(struct ceph_connection *con)
481 {
482 	int rc = 0;
483 
484 	dout("%s con %p sock %p\n", __func__, con, con->sock);
485 	if (con->sock) {
486 		rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
487 		sock_release(con->sock);
488 		con->sock = NULL;
489 	}
490 
491 	/*
492 	 * Forcibly clear the SOCK_CLOSED flag.  It gets set
493 	 * independent of the connection mutex, and we could have
494 	 * received a socket close event before we had the chance to
495 	 * shut the socket down.
496 	 */
497 	ceph_con_flag_clear(con, CEPH_CON_F_SOCK_CLOSED);
498 
499 	con_sock_state_closed(con);
500 	return rc;
501 }
502 
503 static void ceph_con_reset_protocol(struct ceph_connection *con)
504 {
505 	dout("%s con %p\n", __func__, con);
506 
507 	ceph_con_close_socket(con);
508 	if (con->in_msg) {
509 		WARN_ON(con->in_msg->con != con);
510 		ceph_msg_put(con->in_msg);
511 		con->in_msg = NULL;
512 	}
513 	if (con->out_msg) {
514 		WARN_ON(con->out_msg->con != con);
515 		ceph_msg_put(con->out_msg);
516 		con->out_msg = NULL;
517 	}
518 	if (con->bounce_page) {
519 		__free_page(con->bounce_page);
520 		con->bounce_page = NULL;
521 	}
522 
523 	if (ceph_msgr2(from_msgr(con->msgr)))
524 		ceph_con_v2_reset_protocol(con);
525 	else
526 		ceph_con_v1_reset_protocol(con);
527 }
528 
529 /*
530  * Reset a connection.  Discard all incoming and outgoing messages
531  * and clear *_seq state.
532  */
533 static void ceph_msg_remove(struct ceph_msg *msg)
534 {
535 	list_del_init(&msg->list_head);
536 
537 	ceph_msg_put(msg);
538 }
539 
540 static void ceph_msg_remove_list(struct list_head *head)
541 {
542 	while (!list_empty(head)) {
543 		struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
544 							list_head);
545 		ceph_msg_remove(msg);
546 	}
547 }
548 
549 void ceph_con_reset_session(struct ceph_connection *con)
550 {
551 	dout("%s con %p\n", __func__, con);
552 
553 	WARN_ON(con->in_msg);
554 	WARN_ON(con->out_msg);
555 	ceph_msg_remove_list(&con->out_queue);
556 	ceph_msg_remove_list(&con->out_sent);
557 	con->out_seq = 0;
558 	con->in_seq = 0;
559 	con->in_seq_acked = 0;
560 
561 	if (ceph_msgr2(from_msgr(con->msgr)))
562 		ceph_con_v2_reset_session(con);
563 	else
564 		ceph_con_v1_reset_session(con);
565 }
566 
567 /*
568  * mark a peer down.  drop any open connections.
569  */
570 void ceph_con_close(struct ceph_connection *con)
571 {
572 	mutex_lock(&con->mutex);
573 	dout("con_close %p peer %s\n", con, ceph_pr_addr(&con->peer_addr));
574 	con->state = CEPH_CON_S_CLOSED;
575 
576 	ceph_con_flag_clear(con, CEPH_CON_F_LOSSYTX);  /* so we retry next
577 							  connect */
578 	ceph_con_flag_clear(con, CEPH_CON_F_KEEPALIVE_PENDING);
579 	ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
580 	ceph_con_flag_clear(con, CEPH_CON_F_BACKOFF);
581 
582 	ceph_con_reset_protocol(con);
583 	ceph_con_reset_session(con);
584 	cancel_con(con);
585 	mutex_unlock(&con->mutex);
586 }
587 EXPORT_SYMBOL(ceph_con_close);
588 
589 /*
590  * Reopen a closed connection, with a new peer address.
591  */
592 void ceph_con_open(struct ceph_connection *con,
593 		   __u8 entity_type, __u64 entity_num,
594 		   struct ceph_entity_addr *addr)
595 {
596 	mutex_lock(&con->mutex);
597 	dout("con_open %p %s\n", con, ceph_pr_addr(addr));
598 
599 	WARN_ON(con->state != CEPH_CON_S_CLOSED);
600 	con->state = CEPH_CON_S_PREOPEN;
601 
602 	con->peer_name.type = (__u8) entity_type;
603 	con->peer_name.num = cpu_to_le64(entity_num);
604 
605 	memcpy(&con->peer_addr, addr, sizeof(*addr));
606 	con->delay = 0;      /* reset backoff memory */
607 	mutex_unlock(&con->mutex);
608 	queue_con(con);
609 }
610 EXPORT_SYMBOL(ceph_con_open);
611 
612 /*
613  * return true if this connection ever successfully opened
614  */
615 bool ceph_con_opened(struct ceph_connection *con)
616 {
617 	if (ceph_msgr2(from_msgr(con->msgr)))
618 		return ceph_con_v2_opened(con);
619 
620 	return ceph_con_v1_opened(con);
621 }
622 
623 /*
624  * initialize a new connection.
625  */
626 void ceph_con_init(struct ceph_connection *con, void *private,
627 	const struct ceph_connection_operations *ops,
628 	struct ceph_messenger *msgr)
629 {
630 	dout("con_init %p\n", con);
631 	memset(con, 0, sizeof(*con));
632 	con->private = private;
633 	con->ops = ops;
634 	con->msgr = msgr;
635 
636 	con_sock_state_init(con);
637 
638 	mutex_init(&con->mutex);
639 	INIT_LIST_HEAD(&con->out_queue);
640 	INIT_LIST_HEAD(&con->out_sent);
641 	INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
642 
643 	con->state = CEPH_CON_S_CLOSED;
644 }
645 EXPORT_SYMBOL(ceph_con_init);
646 
647 /*
648  * We maintain a global counter to order connection attempts.  Get
649  * a unique seq greater than @gt.
650  */
651 u32 ceph_get_global_seq(struct ceph_messenger *msgr, u32 gt)
652 {
653 	u32 ret;
654 
655 	spin_lock(&msgr->global_seq_lock);
656 	if (msgr->global_seq < gt)
657 		msgr->global_seq = gt;
658 	ret = ++msgr->global_seq;
659 	spin_unlock(&msgr->global_seq_lock);
660 	return ret;
661 }
662 
663 /*
664  * Discard messages that have been acked by the server.
665  */
666 void ceph_con_discard_sent(struct ceph_connection *con, u64 ack_seq)
667 {
668 	struct ceph_msg *msg;
669 	u64 seq;
670 
671 	dout("%s con %p ack_seq %llu\n", __func__, con, ack_seq);
672 	while (!list_empty(&con->out_sent)) {
673 		msg = list_first_entry(&con->out_sent, struct ceph_msg,
674 				       list_head);
675 		WARN_ON(msg->needs_out_seq);
676 		seq = le64_to_cpu(msg->hdr.seq);
677 		if (seq > ack_seq)
678 			break;
679 
680 		dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
681 		     msg, seq);
682 		ceph_msg_remove(msg);
683 	}
684 }
685 
686 /*
687  * Discard messages that have been requeued in con_fault(), up to
688  * reconnect_seq.  This avoids gratuitously resending messages that
689  * the server had received and handled prior to reconnect.
690  */
691 void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq)
692 {
693 	struct ceph_msg *msg;
694 	u64 seq;
695 
696 	dout("%s con %p reconnect_seq %llu\n", __func__, con, reconnect_seq);
697 	while (!list_empty(&con->out_queue)) {
698 		msg = list_first_entry(&con->out_queue, struct ceph_msg,
699 				       list_head);
700 		if (msg->needs_out_seq)
701 			break;
702 		seq = le64_to_cpu(msg->hdr.seq);
703 		if (seq > reconnect_seq)
704 			break;
705 
706 		dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
707 		     msg, seq);
708 		ceph_msg_remove(msg);
709 	}
710 }
711 
712 #ifdef CONFIG_BLOCK
713 
714 /*
715  * For a bio data item, a piece is whatever remains of the next
716  * entry in the current bio iovec, or the first entry in the next
717  * bio in the list.
718  */
719 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
720 					size_t length)
721 {
722 	struct ceph_msg_data *data = cursor->data;
723 	struct ceph_bio_iter *it = &cursor->bio_iter;
724 
725 	cursor->resid = min_t(size_t, length, data->bio_length);
726 	*it = data->bio_pos;
727 	if (cursor->resid < it->iter.bi_size)
728 		it->iter.bi_size = cursor->resid;
729 
730 	BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
731 }
732 
733 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
734 						size_t *page_offset,
735 						size_t *length)
736 {
737 	struct bio_vec bv = bio_iter_iovec(cursor->bio_iter.bio,
738 					   cursor->bio_iter.iter);
739 
740 	*page_offset = bv.bv_offset;
741 	*length = bv.bv_len;
742 	return bv.bv_page;
743 }
744 
745 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
746 					size_t bytes)
747 {
748 	struct ceph_bio_iter *it = &cursor->bio_iter;
749 	struct page *page = bio_iter_page(it->bio, it->iter);
750 
751 	BUG_ON(bytes > cursor->resid);
752 	BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
753 	cursor->resid -= bytes;
754 	bio_advance_iter(it->bio, &it->iter, bytes);
755 
756 	if (!cursor->resid)
757 		return false;   /* no more data */
758 
759 	if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
760 		       page == bio_iter_page(it->bio, it->iter)))
761 		return false;	/* more bytes to process in this segment */
762 
763 	if (!it->iter.bi_size) {
764 		it->bio = it->bio->bi_next;
765 		it->iter = it->bio->bi_iter;
766 		if (cursor->resid < it->iter.bi_size)
767 			it->iter.bi_size = cursor->resid;
768 	}
769 
770 	BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
771 	return true;
772 }
773 #endif /* CONFIG_BLOCK */
774 
775 static void ceph_msg_data_bvecs_cursor_init(struct ceph_msg_data_cursor *cursor,
776 					size_t length)
777 {
778 	struct ceph_msg_data *data = cursor->data;
779 	struct bio_vec *bvecs = data->bvec_pos.bvecs;
780 
781 	cursor->resid = min_t(size_t, length, data->bvec_pos.iter.bi_size);
782 	cursor->bvec_iter = data->bvec_pos.iter;
783 	cursor->bvec_iter.bi_size = cursor->resid;
784 
785 	BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
786 }
787 
788 static struct page *ceph_msg_data_bvecs_next(struct ceph_msg_data_cursor *cursor,
789 						size_t *page_offset,
790 						size_t *length)
791 {
792 	struct bio_vec bv = bvec_iter_bvec(cursor->data->bvec_pos.bvecs,
793 					   cursor->bvec_iter);
794 
795 	*page_offset = bv.bv_offset;
796 	*length = bv.bv_len;
797 	return bv.bv_page;
798 }
799 
800 static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
801 					size_t bytes)
802 {
803 	struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
804 	struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
805 
806 	BUG_ON(bytes > cursor->resid);
807 	BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
808 	cursor->resid -= bytes;
809 	bvec_iter_advance(bvecs, &cursor->bvec_iter, bytes);
810 
811 	if (!cursor->resid)
812 		return false;   /* no more data */
813 
814 	if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
815 		       page == bvec_iter_page(bvecs, cursor->bvec_iter)))
816 		return false;	/* more bytes to process in this segment */
817 
818 	BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
819 	return true;
820 }
821 
822 /*
823  * For a page array, a piece comes from the first page in the array
824  * that has not already been fully consumed.
825  */
826 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
827 					size_t length)
828 {
829 	struct ceph_msg_data *data = cursor->data;
830 	int page_count;
831 
832 	BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
833 
834 	BUG_ON(!data->pages);
835 	BUG_ON(!data->length);
836 
837 	cursor->resid = min(length, data->length);
838 	page_count = calc_pages_for(data->alignment, (u64)data->length);
839 	cursor->page_offset = data->alignment & ~PAGE_MASK;
840 	cursor->page_index = 0;
841 	BUG_ON(page_count > (int)USHRT_MAX);
842 	cursor->page_count = (unsigned short)page_count;
843 	BUG_ON(length > SIZE_MAX - cursor->page_offset);
844 }
845 
846 static struct page *
847 ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
848 					size_t *page_offset, size_t *length)
849 {
850 	struct ceph_msg_data *data = cursor->data;
851 
852 	BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
853 
854 	BUG_ON(cursor->page_index >= cursor->page_count);
855 	BUG_ON(cursor->page_offset >= PAGE_SIZE);
856 
857 	*page_offset = cursor->page_offset;
858 	*length = min_t(size_t, cursor->resid, PAGE_SIZE - *page_offset);
859 	return data->pages[cursor->page_index];
860 }
861 
862 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
863 						size_t bytes)
864 {
865 	BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
866 
867 	BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
868 
869 	/* Advance the cursor page offset */
870 
871 	cursor->resid -= bytes;
872 	cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
873 	if (!bytes || cursor->page_offset)
874 		return false;	/* more bytes to process in the current page */
875 
876 	if (!cursor->resid)
877 		return false;   /* no more data */
878 
879 	/* Move on to the next page; offset is already at 0 */
880 
881 	BUG_ON(cursor->page_index >= cursor->page_count);
882 	cursor->page_index++;
883 	return true;
884 }
885 
886 /*
887  * For a pagelist, a piece is whatever remains to be consumed in the
888  * first page in the list, or the front of the next page.
889  */
890 static void
891 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor,
892 					size_t length)
893 {
894 	struct ceph_msg_data *data = cursor->data;
895 	struct ceph_pagelist *pagelist;
896 	struct page *page;
897 
898 	BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
899 
900 	pagelist = data->pagelist;
901 	BUG_ON(!pagelist);
902 
903 	if (!length)
904 		return;		/* pagelist can be assigned but empty */
905 
906 	BUG_ON(list_empty(&pagelist->head));
907 	page = list_first_entry(&pagelist->head, struct page, lru);
908 
909 	cursor->resid = min(length, pagelist->length);
910 	cursor->page = page;
911 	cursor->offset = 0;
912 }
913 
914 static struct page *
915 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor,
916 				size_t *page_offset, size_t *length)
917 {
918 	struct ceph_msg_data *data = cursor->data;
919 	struct ceph_pagelist *pagelist;
920 
921 	BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
922 
923 	pagelist = data->pagelist;
924 	BUG_ON(!pagelist);
925 
926 	BUG_ON(!cursor->page);
927 	BUG_ON(cursor->offset + cursor->resid != pagelist->length);
928 
929 	/* offset of first page in pagelist is always 0 */
930 	*page_offset = cursor->offset & ~PAGE_MASK;
931 	*length = min_t(size_t, cursor->resid, PAGE_SIZE - *page_offset);
932 	return cursor->page;
933 }
934 
935 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
936 						size_t bytes)
937 {
938 	struct ceph_msg_data *data = cursor->data;
939 	struct ceph_pagelist *pagelist;
940 
941 	BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
942 
943 	pagelist = data->pagelist;
944 	BUG_ON(!pagelist);
945 
946 	BUG_ON(cursor->offset + cursor->resid != pagelist->length);
947 	BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
948 
949 	/* Advance the cursor offset */
950 
951 	cursor->resid -= bytes;
952 	cursor->offset += bytes;
953 	/* offset of first page in pagelist is always 0 */
954 	if (!bytes || cursor->offset & ~PAGE_MASK)
955 		return false;	/* more bytes to process in the current page */
956 
957 	if (!cursor->resid)
958 		return false;   /* no more data */
959 
960 	/* Move on to the next page */
961 
962 	BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
963 	cursor->page = list_next_entry(cursor->page, lru);
964 	return true;
965 }
966 
967 /*
968  * Message data is handled (sent or received) in pieces, where each
969  * piece resides on a single page.  The network layer might not
970  * consume an entire piece at once.  A data item's cursor keeps
971  * track of which piece is next to process and how much remains to
972  * be processed in that piece.  It also tracks whether the current
973  * piece is the last one in the data item.
974  */
975 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
976 {
977 	size_t length = cursor->total_resid;
978 
979 	switch (cursor->data->type) {
980 	case CEPH_MSG_DATA_PAGELIST:
981 		ceph_msg_data_pagelist_cursor_init(cursor, length);
982 		break;
983 	case CEPH_MSG_DATA_PAGES:
984 		ceph_msg_data_pages_cursor_init(cursor, length);
985 		break;
986 #ifdef CONFIG_BLOCK
987 	case CEPH_MSG_DATA_BIO:
988 		ceph_msg_data_bio_cursor_init(cursor, length);
989 		break;
990 #endif /* CONFIG_BLOCK */
991 	case CEPH_MSG_DATA_BVECS:
992 		ceph_msg_data_bvecs_cursor_init(cursor, length);
993 		break;
994 	case CEPH_MSG_DATA_NONE:
995 	default:
996 		/* BUG(); */
997 		break;
998 	}
999 	cursor->need_crc = true;
1000 }
1001 
1002 void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor,
1003 			       struct ceph_msg *msg, size_t length)
1004 {
1005 	BUG_ON(!length);
1006 	BUG_ON(length > msg->data_length);
1007 	BUG_ON(!msg->num_data_items);
1008 
1009 	cursor->total_resid = length;
1010 	cursor->data = msg->data;
1011 
1012 	__ceph_msg_data_cursor_init(cursor);
1013 }
1014 
1015 /*
1016  * Return the page containing the next piece to process for a given
1017  * data item, and supply the page offset and length of that piece.
1018  * Indicate whether this is the last piece in this data item.
1019  */
1020 struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1021 				size_t *page_offset, size_t *length)
1022 {
1023 	struct page *page;
1024 
1025 	switch (cursor->data->type) {
1026 	case CEPH_MSG_DATA_PAGELIST:
1027 		page = ceph_msg_data_pagelist_next(cursor, page_offset, length);
1028 		break;
1029 	case CEPH_MSG_DATA_PAGES:
1030 		page = ceph_msg_data_pages_next(cursor, page_offset, length);
1031 		break;
1032 #ifdef CONFIG_BLOCK
1033 	case CEPH_MSG_DATA_BIO:
1034 		page = ceph_msg_data_bio_next(cursor, page_offset, length);
1035 		break;
1036 #endif /* CONFIG_BLOCK */
1037 	case CEPH_MSG_DATA_BVECS:
1038 		page = ceph_msg_data_bvecs_next(cursor, page_offset, length);
1039 		break;
1040 	case CEPH_MSG_DATA_NONE:
1041 	default:
1042 		page = NULL;
1043 		break;
1044 	}
1045 
1046 	BUG_ON(!page);
1047 	BUG_ON(*page_offset + *length > PAGE_SIZE);
1048 	BUG_ON(!*length);
1049 	BUG_ON(*length > cursor->resid);
1050 
1051 	return page;
1052 }
1053 
1054 /*
1055  * Returns true if the result moves the cursor on to the next piece
1056  * of the data item.
1057  */
1058 void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes)
1059 {
1060 	bool new_piece;
1061 
1062 	BUG_ON(bytes > cursor->resid);
1063 	switch (cursor->data->type) {
1064 	case CEPH_MSG_DATA_PAGELIST:
1065 		new_piece = ceph_msg_data_pagelist_advance(cursor, bytes);
1066 		break;
1067 	case CEPH_MSG_DATA_PAGES:
1068 		new_piece = ceph_msg_data_pages_advance(cursor, bytes);
1069 		break;
1070 #ifdef CONFIG_BLOCK
1071 	case CEPH_MSG_DATA_BIO:
1072 		new_piece = ceph_msg_data_bio_advance(cursor, bytes);
1073 		break;
1074 #endif /* CONFIG_BLOCK */
1075 	case CEPH_MSG_DATA_BVECS:
1076 		new_piece = ceph_msg_data_bvecs_advance(cursor, bytes);
1077 		break;
1078 	case CEPH_MSG_DATA_NONE:
1079 	default:
1080 		BUG();
1081 		break;
1082 	}
1083 	cursor->total_resid -= bytes;
1084 
1085 	if (!cursor->resid && cursor->total_resid) {
1086 		cursor->data++;
1087 		__ceph_msg_data_cursor_init(cursor);
1088 		new_piece = true;
1089 	}
1090 	cursor->need_crc = new_piece;
1091 }
1092 
1093 u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset,
1094 		     unsigned int length)
1095 {
1096 	char *kaddr;
1097 
1098 	kaddr = kmap(page);
1099 	BUG_ON(kaddr == NULL);
1100 	crc = crc32c(crc, kaddr + page_offset, length);
1101 	kunmap(page);
1102 
1103 	return crc;
1104 }
1105 
1106 bool ceph_addr_is_blank(const struct ceph_entity_addr *addr)
1107 {
1108 	struct sockaddr_storage ss = addr->in_addr; /* align */
1109 	struct in_addr *addr4 = &((struct sockaddr_in *)&ss)->sin_addr;
1110 	struct in6_addr *addr6 = &((struct sockaddr_in6 *)&ss)->sin6_addr;
1111 
1112 	switch (ss.ss_family) {
1113 	case AF_INET:
1114 		return addr4->s_addr == htonl(INADDR_ANY);
1115 	case AF_INET6:
1116 		return ipv6_addr_any(addr6);
1117 	default:
1118 		return true;
1119 	}
1120 }
1121 
1122 int ceph_addr_port(const struct ceph_entity_addr *addr)
1123 {
1124 	switch (get_unaligned(&addr->in_addr.ss_family)) {
1125 	case AF_INET:
1126 		return ntohs(get_unaligned(&((struct sockaddr_in *)&addr->in_addr)->sin_port));
1127 	case AF_INET6:
1128 		return ntohs(get_unaligned(&((struct sockaddr_in6 *)&addr->in_addr)->sin6_port));
1129 	}
1130 	return 0;
1131 }
1132 
1133 void ceph_addr_set_port(struct ceph_entity_addr *addr, int p)
1134 {
1135 	switch (get_unaligned(&addr->in_addr.ss_family)) {
1136 	case AF_INET:
1137 		put_unaligned(htons(p), &((struct sockaddr_in *)&addr->in_addr)->sin_port);
1138 		break;
1139 	case AF_INET6:
1140 		put_unaligned(htons(p), &((struct sockaddr_in6 *)&addr->in_addr)->sin6_port);
1141 		break;
1142 	}
1143 }
1144 
1145 /*
1146  * Unlike other *_pton function semantics, zero indicates success.
1147  */
1148 static int ceph_pton(const char *str, size_t len, struct ceph_entity_addr *addr,
1149 		char delim, const char **ipend)
1150 {
1151 	memset(&addr->in_addr, 0, sizeof(addr->in_addr));
1152 
1153 	if (in4_pton(str, len, (u8 *)&((struct sockaddr_in *)&addr->in_addr)->sin_addr.s_addr, delim, ipend)) {
1154 		put_unaligned(AF_INET, &addr->in_addr.ss_family);
1155 		return 0;
1156 	}
1157 
1158 	if (in6_pton(str, len, (u8 *)&((struct sockaddr_in6 *)&addr->in_addr)->sin6_addr.s6_addr, delim, ipend)) {
1159 		put_unaligned(AF_INET6, &addr->in_addr.ss_family);
1160 		return 0;
1161 	}
1162 
1163 	return -EINVAL;
1164 }
1165 
1166 /*
1167  * Extract hostname string and resolve using kernel DNS facility.
1168  */
1169 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1170 static int ceph_dns_resolve_name(const char *name, size_t namelen,
1171 		struct ceph_entity_addr *addr, char delim, const char **ipend)
1172 {
1173 	const char *end, *delim_p;
1174 	char *colon_p, *ip_addr = NULL;
1175 	int ip_len, ret;
1176 
1177 	/*
1178 	 * The end of the hostname occurs immediately preceding the delimiter or
1179 	 * the port marker (':') where the delimiter takes precedence.
1180 	 */
1181 	delim_p = memchr(name, delim, namelen);
1182 	colon_p = memchr(name, ':', namelen);
1183 
1184 	if (delim_p && colon_p)
1185 		end = delim_p < colon_p ? delim_p : colon_p;
1186 	else if (!delim_p && colon_p)
1187 		end = colon_p;
1188 	else {
1189 		end = delim_p;
1190 		if (!end) /* case: hostname:/ */
1191 			end = name + namelen;
1192 	}
1193 
1194 	if (end <= name)
1195 		return -EINVAL;
1196 
1197 	/* do dns_resolve upcall */
1198 	ip_len = dns_query(current->nsproxy->net_ns,
1199 			   NULL, name, end - name, NULL, &ip_addr, NULL, false);
1200 	if (ip_len > 0)
1201 		ret = ceph_pton(ip_addr, ip_len, addr, -1, NULL);
1202 	else
1203 		ret = -ESRCH;
1204 
1205 	kfree(ip_addr);
1206 
1207 	*ipend = end;
1208 
1209 	pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1210 			ret, ret ? "failed" : ceph_pr_addr(addr));
1211 
1212 	return ret;
1213 }
1214 #else
1215 static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1216 		struct ceph_entity_addr *addr, char delim, const char **ipend)
1217 {
1218 	return -EINVAL;
1219 }
1220 #endif
1221 
1222 /*
1223  * Parse a server name (IP or hostname). If a valid IP address is not found
1224  * then try to extract a hostname to resolve using userspace DNS upcall.
1225  */
1226 static int ceph_parse_server_name(const char *name, size_t namelen,
1227 		struct ceph_entity_addr *addr, char delim, const char **ipend)
1228 {
1229 	int ret;
1230 
1231 	ret = ceph_pton(name, namelen, addr, delim, ipend);
1232 	if (ret)
1233 		ret = ceph_dns_resolve_name(name, namelen, addr, delim, ipend);
1234 
1235 	return ret;
1236 }
1237 
1238 /*
1239  * Parse an ip[:port] list into an addr array.  Use the default
1240  * monitor port if a port isn't specified.
1241  */
1242 int ceph_parse_ips(const char *c, const char *end,
1243 		   struct ceph_entity_addr *addr,
1244 		   int max_count, int *count, char delim)
1245 {
1246 	int i, ret = -EINVAL;
1247 	const char *p = c;
1248 
1249 	dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1250 	for (i = 0; i < max_count; i++) {
1251 		char cur_delim = delim;
1252 		const char *ipend;
1253 		int port;
1254 
1255 		if (*p == '[') {
1256 			cur_delim = ']';
1257 			p++;
1258 		}
1259 
1260 		ret = ceph_parse_server_name(p, end - p, &addr[i], cur_delim,
1261 					     &ipend);
1262 		if (ret)
1263 			goto bad;
1264 		ret = -EINVAL;
1265 
1266 		p = ipend;
1267 
1268 		if (cur_delim == ']') {
1269 			if (*p != ']') {
1270 				dout("missing matching ']'\n");
1271 				goto bad;
1272 			}
1273 			p++;
1274 		}
1275 
1276 		/* port? */
1277 		if (p < end && *p == ':') {
1278 			port = 0;
1279 			p++;
1280 			while (p < end && *p >= '0' && *p <= '9') {
1281 				port = (port * 10) + (*p - '0');
1282 				p++;
1283 			}
1284 			if (port == 0)
1285 				port = CEPH_MON_PORT;
1286 			else if (port > 65535)
1287 				goto bad;
1288 		} else {
1289 			port = CEPH_MON_PORT;
1290 		}
1291 
1292 		ceph_addr_set_port(&addr[i], port);
1293 		/*
1294 		 * We want the type to be set according to ms_mode
1295 		 * option, but options are normally parsed after mon
1296 		 * addresses.  Rather than complicating parsing, set
1297 		 * to LEGACY and override in build_initial_monmap()
1298 		 * for mon addresses and ceph_messenger_init() for
1299 		 * ip option.
1300 		 */
1301 		addr[i].type = CEPH_ENTITY_ADDR_TYPE_LEGACY;
1302 		addr[i].nonce = 0;
1303 
1304 		dout("%s got %s\n", __func__, ceph_pr_addr(&addr[i]));
1305 
1306 		if (p == end)
1307 			break;
1308 		if (*p != delim)
1309 			goto bad;
1310 		p++;
1311 	}
1312 
1313 	if (p != end)
1314 		goto bad;
1315 
1316 	if (count)
1317 		*count = i + 1;
1318 	return 0;
1319 
1320 bad:
1321 	return ret;
1322 }
1323 
1324 /*
1325  * Process message.  This happens in the worker thread.  The callback should
1326  * be careful not to do anything that waits on other incoming messages or it
1327  * may deadlock.
1328  */
1329 void ceph_con_process_message(struct ceph_connection *con)
1330 {
1331 	struct ceph_msg *msg = con->in_msg;
1332 
1333 	BUG_ON(con->in_msg->con != con);
1334 	con->in_msg = NULL;
1335 
1336 	/* if first message, set peer_name */
1337 	if (con->peer_name.type == 0)
1338 		con->peer_name = msg->hdr.src;
1339 
1340 	con->in_seq++;
1341 	mutex_unlock(&con->mutex);
1342 
1343 	dout("===== %p %llu from %s%lld %d=%s len %d+%d+%d (%u %u %u) =====\n",
1344 	     msg, le64_to_cpu(msg->hdr.seq),
1345 	     ENTITY_NAME(msg->hdr.src),
1346 	     le16_to_cpu(msg->hdr.type),
1347 	     ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1348 	     le32_to_cpu(msg->hdr.front_len),
1349 	     le32_to_cpu(msg->hdr.middle_len),
1350 	     le32_to_cpu(msg->hdr.data_len),
1351 	     con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1352 	con->ops->dispatch(con, msg);
1353 
1354 	mutex_lock(&con->mutex);
1355 }
1356 
1357 /*
1358  * Atomically queue work on a connection after the specified delay.
1359  * Bump @con reference to avoid races with connection teardown.
1360  * Returns 0 if work was queued, or an error code otherwise.
1361  */
1362 static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
1363 {
1364 	if (!con->ops->get(con)) {
1365 		dout("%s %p ref count 0\n", __func__, con);
1366 		return -ENOENT;
1367 	}
1368 
1369 	if (delay >= HZ)
1370 		delay = round_jiffies_relative(delay);
1371 
1372 	dout("%s %p %lu\n", __func__, con, delay);
1373 	if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
1374 		dout("%s %p - already queued\n", __func__, con);
1375 		con->ops->put(con);
1376 		return -EBUSY;
1377 	}
1378 
1379 	return 0;
1380 }
1381 
1382 static void queue_con(struct ceph_connection *con)
1383 {
1384 	(void) queue_con_delay(con, 0);
1385 }
1386 
1387 static void cancel_con(struct ceph_connection *con)
1388 {
1389 	if (cancel_delayed_work(&con->work)) {
1390 		dout("%s %p\n", __func__, con);
1391 		con->ops->put(con);
1392 	}
1393 }
1394 
1395 static bool con_sock_closed(struct ceph_connection *con)
1396 {
1397 	if (!ceph_con_flag_test_and_clear(con, CEPH_CON_F_SOCK_CLOSED))
1398 		return false;
1399 
1400 #define CASE(x)								\
1401 	case CEPH_CON_S_ ## x:						\
1402 		con->error_msg = "socket closed (con state " #x ")";	\
1403 		break;
1404 
1405 	switch (con->state) {
1406 	CASE(CLOSED);
1407 	CASE(PREOPEN);
1408 	CASE(V1_BANNER);
1409 	CASE(V1_CONNECT_MSG);
1410 	CASE(V2_BANNER_PREFIX);
1411 	CASE(V2_BANNER_PAYLOAD);
1412 	CASE(V2_HELLO);
1413 	CASE(V2_AUTH);
1414 	CASE(V2_AUTH_SIGNATURE);
1415 	CASE(V2_SESSION_CONNECT);
1416 	CASE(V2_SESSION_RECONNECT);
1417 	CASE(OPEN);
1418 	CASE(STANDBY);
1419 	default:
1420 		BUG();
1421 	}
1422 #undef CASE
1423 
1424 	return true;
1425 }
1426 
1427 static bool con_backoff(struct ceph_connection *con)
1428 {
1429 	int ret;
1430 
1431 	if (!ceph_con_flag_test_and_clear(con, CEPH_CON_F_BACKOFF))
1432 		return false;
1433 
1434 	ret = queue_con_delay(con, con->delay);
1435 	if (ret) {
1436 		dout("%s: con %p FAILED to back off %lu\n", __func__,
1437 			con, con->delay);
1438 		BUG_ON(ret == -ENOENT);
1439 		ceph_con_flag_set(con, CEPH_CON_F_BACKOFF);
1440 	}
1441 
1442 	return true;
1443 }
1444 
1445 /* Finish fault handling; con->mutex must *not* be held here */
1446 
1447 static void con_fault_finish(struct ceph_connection *con)
1448 {
1449 	dout("%s %p\n", __func__, con);
1450 
1451 	/*
1452 	 * in case we faulted due to authentication, invalidate our
1453 	 * current tickets so that we can get new ones.
1454 	 */
1455 	if (con->v1.auth_retry) {
1456 		dout("auth_retry %d, invalidating\n", con->v1.auth_retry);
1457 		if (con->ops->invalidate_authorizer)
1458 			con->ops->invalidate_authorizer(con);
1459 		con->v1.auth_retry = 0;
1460 	}
1461 
1462 	if (con->ops->fault)
1463 		con->ops->fault(con);
1464 }
1465 
1466 /*
1467  * Do some work on a connection.  Drop a connection ref when we're done.
1468  */
1469 static void ceph_con_workfn(struct work_struct *work)
1470 {
1471 	struct ceph_connection *con = container_of(work, struct ceph_connection,
1472 						   work.work);
1473 	bool fault;
1474 
1475 	mutex_lock(&con->mutex);
1476 	while (true) {
1477 		int ret;
1478 
1479 		if ((fault = con_sock_closed(con))) {
1480 			dout("%s: con %p SOCK_CLOSED\n", __func__, con);
1481 			break;
1482 		}
1483 		if (con_backoff(con)) {
1484 			dout("%s: con %p BACKOFF\n", __func__, con);
1485 			break;
1486 		}
1487 		if (con->state == CEPH_CON_S_STANDBY) {
1488 			dout("%s: con %p STANDBY\n", __func__, con);
1489 			break;
1490 		}
1491 		if (con->state == CEPH_CON_S_CLOSED) {
1492 			dout("%s: con %p CLOSED\n", __func__, con);
1493 			BUG_ON(con->sock);
1494 			break;
1495 		}
1496 		if (con->state == CEPH_CON_S_PREOPEN) {
1497 			dout("%s: con %p PREOPEN\n", __func__, con);
1498 			BUG_ON(con->sock);
1499 		}
1500 
1501 		if (ceph_msgr2(from_msgr(con->msgr)))
1502 			ret = ceph_con_v2_try_read(con);
1503 		else
1504 			ret = ceph_con_v1_try_read(con);
1505 		if (ret < 0) {
1506 			if (ret == -EAGAIN)
1507 				continue;
1508 			if (!con->error_msg)
1509 				con->error_msg = "socket error on read";
1510 			fault = true;
1511 			break;
1512 		}
1513 
1514 		if (ceph_msgr2(from_msgr(con->msgr)))
1515 			ret = ceph_con_v2_try_write(con);
1516 		else
1517 			ret = ceph_con_v1_try_write(con);
1518 		if (ret < 0) {
1519 			if (ret == -EAGAIN)
1520 				continue;
1521 			if (!con->error_msg)
1522 				con->error_msg = "socket error on write";
1523 			fault = true;
1524 		}
1525 
1526 		break;	/* If we make it to here, we're done */
1527 	}
1528 	if (fault)
1529 		con_fault(con);
1530 	mutex_unlock(&con->mutex);
1531 
1532 	if (fault)
1533 		con_fault_finish(con);
1534 
1535 	con->ops->put(con);
1536 }
1537 
1538 /*
1539  * Generic error/fault handler.  A retry mechanism is used with
1540  * exponential backoff
1541  */
1542 static void con_fault(struct ceph_connection *con)
1543 {
1544 	dout("fault %p state %d to peer %s\n",
1545 	     con, con->state, ceph_pr_addr(&con->peer_addr));
1546 
1547 	pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
1548 		ceph_pr_addr(&con->peer_addr), con->error_msg);
1549 	con->error_msg = NULL;
1550 
1551 	WARN_ON(con->state == CEPH_CON_S_STANDBY ||
1552 		con->state == CEPH_CON_S_CLOSED);
1553 
1554 	ceph_con_reset_protocol(con);
1555 
1556 	if (ceph_con_flag_test(con, CEPH_CON_F_LOSSYTX)) {
1557 		dout("fault on LOSSYTX channel, marking CLOSED\n");
1558 		con->state = CEPH_CON_S_CLOSED;
1559 		return;
1560 	}
1561 
1562 	/* Requeue anything that hasn't been acked */
1563 	list_splice_init(&con->out_sent, &con->out_queue);
1564 
1565 	/* If there are no messages queued or keepalive pending, place
1566 	 * the connection in a STANDBY state */
1567 	if (list_empty(&con->out_queue) &&
1568 	    !ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING)) {
1569 		dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
1570 		ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
1571 		con->state = CEPH_CON_S_STANDBY;
1572 	} else {
1573 		/* retry after a delay. */
1574 		con->state = CEPH_CON_S_PREOPEN;
1575 		if (!con->delay) {
1576 			con->delay = BASE_DELAY_INTERVAL;
1577 		} else if (con->delay < MAX_DELAY_INTERVAL) {
1578 			con->delay *= 2;
1579 			if (con->delay > MAX_DELAY_INTERVAL)
1580 				con->delay = MAX_DELAY_INTERVAL;
1581 		}
1582 		ceph_con_flag_set(con, CEPH_CON_F_BACKOFF);
1583 		queue_con(con);
1584 	}
1585 }
1586 
1587 void ceph_messenger_reset_nonce(struct ceph_messenger *msgr)
1588 {
1589 	u32 nonce = le32_to_cpu(msgr->inst.addr.nonce) + 1000000;
1590 	msgr->inst.addr.nonce = cpu_to_le32(nonce);
1591 	ceph_encode_my_addr(msgr);
1592 }
1593 
1594 /*
1595  * initialize a new messenger instance
1596  */
1597 void ceph_messenger_init(struct ceph_messenger *msgr,
1598 			 struct ceph_entity_addr *myaddr)
1599 {
1600 	spin_lock_init(&msgr->global_seq_lock);
1601 
1602 	if (myaddr) {
1603 		memcpy(&msgr->inst.addr.in_addr, &myaddr->in_addr,
1604 		       sizeof(msgr->inst.addr.in_addr));
1605 		ceph_addr_set_port(&msgr->inst.addr, 0);
1606 	}
1607 
1608 	/*
1609 	 * Since nautilus, clients are identified using type ANY.
1610 	 * For msgr1, ceph_encode_banner_addr() munges it to NONE.
1611 	 */
1612 	msgr->inst.addr.type = CEPH_ENTITY_ADDR_TYPE_ANY;
1613 
1614 	/* generate a random non-zero nonce */
1615 	do {
1616 		get_random_bytes(&msgr->inst.addr.nonce,
1617 				 sizeof(msgr->inst.addr.nonce));
1618 	} while (!msgr->inst.addr.nonce);
1619 	ceph_encode_my_addr(msgr);
1620 
1621 	atomic_set(&msgr->stopping, 0);
1622 	write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
1623 
1624 	dout("%s %p\n", __func__, msgr);
1625 }
1626 
1627 void ceph_messenger_fini(struct ceph_messenger *msgr)
1628 {
1629 	put_net(read_pnet(&msgr->net));
1630 }
1631 
1632 static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con)
1633 {
1634 	if (msg->con)
1635 		msg->con->ops->put(msg->con);
1636 
1637 	msg->con = con ? con->ops->get(con) : NULL;
1638 	BUG_ON(msg->con != con);
1639 }
1640 
1641 static void clear_standby(struct ceph_connection *con)
1642 {
1643 	/* come back from STANDBY? */
1644 	if (con->state == CEPH_CON_S_STANDBY) {
1645 		dout("clear_standby %p and ++connect_seq\n", con);
1646 		con->state = CEPH_CON_S_PREOPEN;
1647 		con->v1.connect_seq++;
1648 		WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING));
1649 		WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING));
1650 	}
1651 }
1652 
1653 /*
1654  * Queue up an outgoing message on the given connection.
1655  *
1656  * Consumes a ref on @msg.
1657  */
1658 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
1659 {
1660 	/* set src+dst */
1661 	msg->hdr.src = con->msgr->inst.name;
1662 	BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
1663 	msg->needs_out_seq = true;
1664 
1665 	mutex_lock(&con->mutex);
1666 
1667 	if (con->state == CEPH_CON_S_CLOSED) {
1668 		dout("con_send %p closed, dropping %p\n", con, msg);
1669 		ceph_msg_put(msg);
1670 		mutex_unlock(&con->mutex);
1671 		return;
1672 	}
1673 
1674 	msg_con_set(msg, con);
1675 
1676 	BUG_ON(!list_empty(&msg->list_head));
1677 	list_add_tail(&msg->list_head, &con->out_queue);
1678 	dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
1679 	     ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
1680 	     ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1681 	     le32_to_cpu(msg->hdr.front_len),
1682 	     le32_to_cpu(msg->hdr.middle_len),
1683 	     le32_to_cpu(msg->hdr.data_len));
1684 
1685 	clear_standby(con);
1686 	mutex_unlock(&con->mutex);
1687 
1688 	/* if there wasn't anything waiting to send before, queue
1689 	 * new work */
1690 	if (!ceph_con_flag_test_and_set(con, CEPH_CON_F_WRITE_PENDING))
1691 		queue_con(con);
1692 }
1693 EXPORT_SYMBOL(ceph_con_send);
1694 
1695 /*
1696  * Revoke a message that was previously queued for send
1697  */
1698 void ceph_msg_revoke(struct ceph_msg *msg)
1699 {
1700 	struct ceph_connection *con = msg->con;
1701 
1702 	if (!con) {
1703 		dout("%s msg %p null con\n", __func__, msg);
1704 		return;		/* Message not in our possession */
1705 	}
1706 
1707 	mutex_lock(&con->mutex);
1708 	if (list_empty(&msg->list_head)) {
1709 		WARN_ON(con->out_msg == msg);
1710 		dout("%s con %p msg %p not linked\n", __func__, con, msg);
1711 		mutex_unlock(&con->mutex);
1712 		return;
1713 	}
1714 
1715 	dout("%s con %p msg %p was linked\n", __func__, con, msg);
1716 	msg->hdr.seq = 0;
1717 	ceph_msg_remove(msg);
1718 
1719 	if (con->out_msg == msg) {
1720 		WARN_ON(con->state != CEPH_CON_S_OPEN);
1721 		dout("%s con %p msg %p was sending\n", __func__, con, msg);
1722 		if (ceph_msgr2(from_msgr(con->msgr)))
1723 			ceph_con_v2_revoke(con);
1724 		else
1725 			ceph_con_v1_revoke(con);
1726 		ceph_msg_put(con->out_msg);
1727 		con->out_msg = NULL;
1728 	} else {
1729 		dout("%s con %p msg %p not current, out_msg %p\n", __func__,
1730 		     con, msg, con->out_msg);
1731 	}
1732 	mutex_unlock(&con->mutex);
1733 }
1734 
1735 /*
1736  * Revoke a message that we may be reading data into
1737  */
1738 void ceph_msg_revoke_incoming(struct ceph_msg *msg)
1739 {
1740 	struct ceph_connection *con = msg->con;
1741 
1742 	if (!con) {
1743 		dout("%s msg %p null con\n", __func__, msg);
1744 		return;		/* Message not in our possession */
1745 	}
1746 
1747 	mutex_lock(&con->mutex);
1748 	if (con->in_msg == msg) {
1749 		WARN_ON(con->state != CEPH_CON_S_OPEN);
1750 		dout("%s con %p msg %p was recving\n", __func__, con, msg);
1751 		if (ceph_msgr2(from_msgr(con->msgr)))
1752 			ceph_con_v2_revoke_incoming(con);
1753 		else
1754 			ceph_con_v1_revoke_incoming(con);
1755 		ceph_msg_put(con->in_msg);
1756 		con->in_msg = NULL;
1757 	} else {
1758 		dout("%s con %p msg %p not current, in_msg %p\n", __func__,
1759 		     con, msg, con->in_msg);
1760 	}
1761 	mutex_unlock(&con->mutex);
1762 }
1763 
1764 /*
1765  * Queue a keepalive byte to ensure the tcp connection is alive.
1766  */
1767 void ceph_con_keepalive(struct ceph_connection *con)
1768 {
1769 	dout("con_keepalive %p\n", con);
1770 	mutex_lock(&con->mutex);
1771 	clear_standby(con);
1772 	ceph_con_flag_set(con, CEPH_CON_F_KEEPALIVE_PENDING);
1773 	mutex_unlock(&con->mutex);
1774 
1775 	if (!ceph_con_flag_test_and_set(con, CEPH_CON_F_WRITE_PENDING))
1776 		queue_con(con);
1777 }
1778 EXPORT_SYMBOL(ceph_con_keepalive);
1779 
1780 bool ceph_con_keepalive_expired(struct ceph_connection *con,
1781 			       unsigned long interval)
1782 {
1783 	if (interval > 0 &&
1784 	    (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
1785 		struct timespec64 now;
1786 		struct timespec64 ts;
1787 		ktime_get_real_ts64(&now);
1788 		jiffies_to_timespec64(interval, &ts);
1789 		ts = timespec64_add(con->last_keepalive_ack, ts);
1790 		return timespec64_compare(&now, &ts) >= 0;
1791 	}
1792 	return false;
1793 }
1794 
1795 static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
1796 {
1797 	BUG_ON(msg->num_data_items >= msg->max_data_items);
1798 	return &msg->data[msg->num_data_items++];
1799 }
1800 
1801 static void ceph_msg_data_destroy(struct ceph_msg_data *data)
1802 {
1803 	if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) {
1804 		int num_pages = calc_pages_for(data->alignment, data->length);
1805 		ceph_release_page_vector(data->pages, num_pages);
1806 	} else if (data->type == CEPH_MSG_DATA_PAGELIST) {
1807 		ceph_pagelist_release(data->pagelist);
1808 	}
1809 }
1810 
1811 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
1812 			     size_t length, size_t alignment, bool own_pages)
1813 {
1814 	struct ceph_msg_data *data;
1815 
1816 	BUG_ON(!pages);
1817 	BUG_ON(!length);
1818 
1819 	data = ceph_msg_data_add(msg);
1820 	data->type = CEPH_MSG_DATA_PAGES;
1821 	data->pages = pages;
1822 	data->length = length;
1823 	data->alignment = alignment & ~PAGE_MASK;
1824 	data->own_pages = own_pages;
1825 
1826 	msg->data_length += length;
1827 }
1828 EXPORT_SYMBOL(ceph_msg_data_add_pages);
1829 
1830 void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
1831 				struct ceph_pagelist *pagelist)
1832 {
1833 	struct ceph_msg_data *data;
1834 
1835 	BUG_ON(!pagelist);
1836 	BUG_ON(!pagelist->length);
1837 
1838 	data = ceph_msg_data_add(msg);
1839 	data->type = CEPH_MSG_DATA_PAGELIST;
1840 	refcount_inc(&pagelist->refcnt);
1841 	data->pagelist = pagelist;
1842 
1843 	msg->data_length += pagelist->length;
1844 }
1845 EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
1846 
1847 #ifdef	CONFIG_BLOCK
1848 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
1849 			   u32 length)
1850 {
1851 	struct ceph_msg_data *data;
1852 
1853 	data = ceph_msg_data_add(msg);
1854 	data->type = CEPH_MSG_DATA_BIO;
1855 	data->bio_pos = *bio_pos;
1856 	data->bio_length = length;
1857 
1858 	msg->data_length += length;
1859 }
1860 EXPORT_SYMBOL(ceph_msg_data_add_bio);
1861 #endif	/* CONFIG_BLOCK */
1862 
1863 void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
1864 			     struct ceph_bvec_iter *bvec_pos)
1865 {
1866 	struct ceph_msg_data *data;
1867 
1868 	data = ceph_msg_data_add(msg);
1869 	data->type = CEPH_MSG_DATA_BVECS;
1870 	data->bvec_pos = *bvec_pos;
1871 
1872 	msg->data_length += bvec_pos->iter.bi_size;
1873 }
1874 EXPORT_SYMBOL(ceph_msg_data_add_bvecs);
1875 
1876 /*
1877  * construct a new message with given type, size
1878  * the new msg has a ref count of 1.
1879  */
1880 struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
1881 			       gfp_t flags, bool can_fail)
1882 {
1883 	struct ceph_msg *m;
1884 
1885 	m = kmem_cache_zalloc(ceph_msg_cache, flags);
1886 	if (m == NULL)
1887 		goto out;
1888 
1889 	m->hdr.type = cpu_to_le16(type);
1890 	m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
1891 	m->hdr.front_len = cpu_to_le32(front_len);
1892 
1893 	INIT_LIST_HEAD(&m->list_head);
1894 	kref_init(&m->kref);
1895 
1896 	/* front */
1897 	if (front_len) {
1898 		m->front.iov_base = kvmalloc(front_len, flags);
1899 		if (m->front.iov_base == NULL) {
1900 			dout("ceph_msg_new can't allocate %d bytes\n",
1901 			     front_len);
1902 			goto out2;
1903 		}
1904 	} else {
1905 		m->front.iov_base = NULL;
1906 	}
1907 	m->front_alloc_len = m->front.iov_len = front_len;
1908 
1909 	if (max_data_items) {
1910 		m->data = kmalloc_array(max_data_items, sizeof(*m->data),
1911 					flags);
1912 		if (!m->data)
1913 			goto out2;
1914 
1915 		m->max_data_items = max_data_items;
1916 	}
1917 
1918 	dout("ceph_msg_new %p front %d\n", m, front_len);
1919 	return m;
1920 
1921 out2:
1922 	ceph_msg_put(m);
1923 out:
1924 	if (!can_fail) {
1925 		pr_err("msg_new can't create type %d front %d\n", type,
1926 		       front_len);
1927 		WARN_ON(1);
1928 	} else {
1929 		dout("msg_new can't create type %d front %d\n", type,
1930 		     front_len);
1931 	}
1932 	return NULL;
1933 }
1934 EXPORT_SYMBOL(ceph_msg_new2);
1935 
1936 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
1937 			      bool can_fail)
1938 {
1939 	return ceph_msg_new2(type, front_len, 0, flags, can_fail);
1940 }
1941 EXPORT_SYMBOL(ceph_msg_new);
1942 
1943 /*
1944  * Allocate "middle" portion of a message, if it is needed and wasn't
1945  * allocated by alloc_msg.  This allows us to read a small fixed-size
1946  * per-type header in the front and then gracefully fail (i.e.,
1947  * propagate the error to the caller based on info in the front) when
1948  * the middle is too large.
1949  */
1950 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
1951 {
1952 	int type = le16_to_cpu(msg->hdr.type);
1953 	int middle_len = le32_to_cpu(msg->hdr.middle_len);
1954 
1955 	dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
1956 	     ceph_msg_type_name(type), middle_len);
1957 	BUG_ON(!middle_len);
1958 	BUG_ON(msg->middle);
1959 
1960 	msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
1961 	if (!msg->middle)
1962 		return -ENOMEM;
1963 	return 0;
1964 }
1965 
1966 /*
1967  * Allocate a message for receiving an incoming message on a
1968  * connection, and save the result in con->in_msg.  Uses the
1969  * connection's private alloc_msg op if available.
1970  *
1971  * Returns 0 on success, or a negative error code.
1972  *
1973  * On success, if we set *skip = 1:
1974  *  - the next message should be skipped and ignored.
1975  *  - con->in_msg == NULL
1976  * or if we set *skip = 0:
1977  *  - con->in_msg is non-null.
1978  * On error (ENOMEM, EAGAIN, ...),
1979  *  - con->in_msg == NULL
1980  */
1981 int ceph_con_in_msg_alloc(struct ceph_connection *con,
1982 			  struct ceph_msg_header *hdr, int *skip)
1983 {
1984 	int middle_len = le32_to_cpu(hdr->middle_len);
1985 	struct ceph_msg *msg;
1986 	int ret = 0;
1987 
1988 	BUG_ON(con->in_msg != NULL);
1989 	BUG_ON(!con->ops->alloc_msg);
1990 
1991 	mutex_unlock(&con->mutex);
1992 	msg = con->ops->alloc_msg(con, hdr, skip);
1993 	mutex_lock(&con->mutex);
1994 	if (con->state != CEPH_CON_S_OPEN) {
1995 		if (msg)
1996 			ceph_msg_put(msg);
1997 		return -EAGAIN;
1998 	}
1999 	if (msg) {
2000 		BUG_ON(*skip);
2001 		msg_con_set(msg, con);
2002 		con->in_msg = msg;
2003 	} else {
2004 		/*
2005 		 * Null message pointer means either we should skip
2006 		 * this message or we couldn't allocate memory.  The
2007 		 * former is not an error.
2008 		 */
2009 		if (*skip)
2010 			return 0;
2011 
2012 		con->error_msg = "error allocating memory for incoming message";
2013 		return -ENOMEM;
2014 	}
2015 	memcpy(&con->in_msg->hdr, hdr, sizeof(*hdr));
2016 
2017 	if (middle_len && !con->in_msg->middle) {
2018 		ret = ceph_alloc_middle(con, con->in_msg);
2019 		if (ret < 0) {
2020 			ceph_msg_put(con->in_msg);
2021 			con->in_msg = NULL;
2022 		}
2023 	}
2024 
2025 	return ret;
2026 }
2027 
2028 void ceph_con_get_out_msg(struct ceph_connection *con)
2029 {
2030 	struct ceph_msg *msg;
2031 
2032 	BUG_ON(list_empty(&con->out_queue));
2033 	msg = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
2034 	WARN_ON(msg->con != con);
2035 
2036 	/*
2037 	 * Put the message on "sent" list using a ref from ceph_con_send().
2038 	 * It is put when the message is acked or revoked.
2039 	 */
2040 	list_move_tail(&msg->list_head, &con->out_sent);
2041 
2042 	/*
2043 	 * Only assign outgoing seq # if we haven't sent this message
2044 	 * yet.  If it is requeued, resend with it's original seq.
2045 	 */
2046 	if (msg->needs_out_seq) {
2047 		msg->hdr.seq = cpu_to_le64(++con->out_seq);
2048 		msg->needs_out_seq = false;
2049 
2050 		if (con->ops->reencode_message)
2051 			con->ops->reencode_message(msg);
2052 	}
2053 
2054 	/*
2055 	 * Get a ref for out_msg.  It is put when we are done sending the
2056 	 * message or in case of a fault.
2057 	 */
2058 	WARN_ON(con->out_msg);
2059 	con->out_msg = ceph_msg_get(msg);
2060 }
2061 
2062 /*
2063  * Free a generically kmalloc'd message.
2064  */
2065 static void ceph_msg_free(struct ceph_msg *m)
2066 {
2067 	dout("%s %p\n", __func__, m);
2068 	kvfree(m->front.iov_base);
2069 	kfree(m->data);
2070 	kmem_cache_free(ceph_msg_cache, m);
2071 }
2072 
2073 static void ceph_msg_release(struct kref *kref)
2074 {
2075 	struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2076 	int i;
2077 
2078 	dout("%s %p\n", __func__, m);
2079 	WARN_ON(!list_empty(&m->list_head));
2080 
2081 	msg_con_set(m, NULL);
2082 
2083 	/* drop middle, data, if any */
2084 	if (m->middle) {
2085 		ceph_buffer_put(m->middle);
2086 		m->middle = NULL;
2087 	}
2088 
2089 	for (i = 0; i < m->num_data_items; i++)
2090 		ceph_msg_data_destroy(&m->data[i]);
2091 
2092 	if (m->pool)
2093 		ceph_msgpool_put(m->pool, m);
2094 	else
2095 		ceph_msg_free(m);
2096 }
2097 
2098 struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
2099 {
2100 	dout("%s %p (was %d)\n", __func__, msg,
2101 	     kref_read(&msg->kref));
2102 	kref_get(&msg->kref);
2103 	return msg;
2104 }
2105 EXPORT_SYMBOL(ceph_msg_get);
2106 
2107 void ceph_msg_put(struct ceph_msg *msg)
2108 {
2109 	dout("%s %p (was %d)\n", __func__, msg,
2110 	     kref_read(&msg->kref));
2111 	kref_put(&msg->kref, ceph_msg_release);
2112 }
2113 EXPORT_SYMBOL(ceph_msg_put);
2114 
2115 void ceph_msg_dump(struct ceph_msg *msg)
2116 {
2117 	pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
2118 		 msg->front_alloc_len, msg->data_length);
2119 	print_hex_dump(KERN_DEBUG, "header: ",
2120 		       DUMP_PREFIX_OFFSET, 16, 1,
2121 		       &msg->hdr, sizeof(msg->hdr), true);
2122 	print_hex_dump(KERN_DEBUG, " front: ",
2123 		       DUMP_PREFIX_OFFSET, 16, 1,
2124 		       msg->front.iov_base, msg->front.iov_len, true);
2125 	if (msg->middle)
2126 		print_hex_dump(KERN_DEBUG, "middle: ",
2127 			       DUMP_PREFIX_OFFSET, 16, 1,
2128 			       msg->middle->vec.iov_base,
2129 			       msg->middle->vec.iov_len, true);
2130 	print_hex_dump(KERN_DEBUG, "footer: ",
2131 		       DUMP_PREFIX_OFFSET, 16, 1,
2132 		       &msg->footer, sizeof(msg->footer), true);
2133 }
2134 EXPORT_SYMBOL(ceph_msg_dump);
2135