xref: /openbmc/linux/fs/dlm/lowcomms.c (revision 11976fe2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
4 **
5 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
6 **  Copyright (C) 2004-2009 Red Hat, Inc.  All rights reserved.
7 **
8 **
9 *******************************************************************************
10 ******************************************************************************/
11 
12 /*
13  * lowcomms.c
14  *
15  * This is the "low-level" comms layer.
16  *
17  * It is responsible for sending/receiving messages
18  * from other nodes in the cluster.
19  *
20  * Cluster nodes are referred to by their nodeids. nodeids are
21  * simply 32 bit numbers to the locking module - if they need to
22  * be expanded for the cluster infrastructure then that is its
23  * responsibility. It is this layer's
24  * responsibility to resolve these into IP address or
25  * whatever it needs for inter-node communication.
26  *
27  * The comms level is two kernel threads that deal mainly with
28  * the receiving of messages from other nodes and passing them
29  * up to the mid-level comms layer (which understands the
30  * message format) for execution by the locking core, and
31  * a send thread which does all the setting up of connections
32  * to remote nodes and the sending of data. Threads are not allowed
33  * to send their own data because it may cause them to wait in times
34  * of high load. Also, this way, the sending thread can collect together
35  * messages bound for one node and send them in one block.
36  *
37  * lowcomms will choose to use either TCP or SCTP as its transport layer
38  * depending on the configuration variable 'protocol'. This should be set
39  * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
40  * cluster-wide mechanism as it must be the same on all nodes of the cluster
41  * for the DLM to function.
42  *
43  */
44 
45 #include <asm/ioctls.h>
46 #include <net/sock.h>
47 #include <net/tcp.h>
48 #include <linux/pagemap.h>
49 #include <linux/file.h>
50 #include <linux/mutex.h>
51 #include <linux/sctp.h>
52 #include <linux/slab.h>
53 #include <net/sctp/sctp.h>
54 #include <net/ipv6.h>
55 
56 #include <trace/events/dlm.h>
57 #include <trace/events/sock.h>
58 
59 #include "dlm_internal.h"
60 #include "lowcomms.h"
61 #include "midcomms.h"
62 #include "memory.h"
63 #include "config.h"
64 
65 #define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(5000)
66 #define NEEDED_RMEM (4*1024*1024)
67 
68 struct connection {
69 	struct socket *sock;	/* NULL if not connected */
70 	uint32_t nodeid;	/* So we know who we are in the list */
71 	/* this semaphore is used to allow parallel recv/send in read
72 	 * lock mode. When we release a sock we need to held the write lock.
73 	 *
74 	 * However this is locking code and not nice. When we remove the
75 	 * othercon handling we can look into other mechanism to synchronize
76 	 * io handling to call sock_release() at the right time.
77 	 */
78 	struct rw_semaphore sock_lock;
79 	unsigned long flags;
80 #define CF_APP_LIMITED 0
81 #define CF_RECV_PENDING 1
82 #define CF_SEND_PENDING 2
83 #define CF_RECV_INTR 3
84 #define CF_IO_STOP 4
85 #define CF_IS_OTHERCON 5
86 	struct list_head writequeue;  /* List of outgoing writequeue_entries */
87 	spinlock_t writequeue_lock;
88 	int retries;
89 	struct hlist_node list;
90 	/* due some connect()/accept() races we currently have this cross over
91 	 * connection attempt second connection for one node.
92 	 *
93 	 * There is a solution to avoid the race by introducing a connect
94 	 * rule as e.g. our_nodeid > nodeid_to_connect who is allowed to
95 	 * connect. Otherside can connect but will only be considered that
96 	 * the other side wants to have a reconnect.
97 	 *
98 	 * However changing to this behaviour will break backwards compatible.
99 	 * In a DLM protocol major version upgrade we should remove this!
100 	 */
101 	struct connection *othercon;
102 	struct work_struct rwork; /* receive worker */
103 	struct work_struct swork; /* send worker */
104 	wait_queue_head_t shutdown_wait;
105 	unsigned char rx_leftover_buf[DLM_MAX_SOCKET_BUFSIZE];
106 	int rx_leftover;
107 	int mark;
108 	int addr_count;
109 	int curr_addr_index;
110 	struct sockaddr_storage addr[DLM_MAX_ADDR_COUNT];
111 	spinlock_t addrs_lock;
112 	struct rcu_head rcu;
113 };
114 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
115 
116 struct listen_connection {
117 	struct socket *sock;
118 	struct work_struct rwork;
119 };
120 
121 #define DLM_WQ_REMAIN_BYTES(e) (PAGE_SIZE - e->end)
122 #define DLM_WQ_LENGTH_BYTES(e) (e->end - e->offset)
123 
124 /* An entry waiting to be sent */
125 struct writequeue_entry {
126 	struct list_head list;
127 	struct page *page;
128 	int offset;
129 	int len;
130 	int end;
131 	int users;
132 	bool dirty;
133 	struct connection *con;
134 	struct list_head msgs;
135 	struct kref ref;
136 };
137 
138 struct dlm_msg {
139 	struct writequeue_entry *entry;
140 	struct dlm_msg *orig_msg;
141 	bool retransmit;
142 	void *ppc;
143 	int len;
144 	int idx; /* new()/commit() idx exchange */
145 
146 	struct list_head list;
147 	struct kref ref;
148 };
149 
150 struct processqueue_entry {
151 	unsigned char *buf;
152 	int nodeid;
153 	int buflen;
154 
155 	struct list_head list;
156 };
157 
158 struct dlm_proto_ops {
159 	bool try_new_addr;
160 	const char *name;
161 	int proto;
162 
163 	int (*connect)(struct connection *con, struct socket *sock,
164 		       struct sockaddr *addr, int addr_len);
165 	void (*sockopts)(struct socket *sock);
166 	int (*bind)(struct socket *sock);
167 	int (*listen_validate)(void);
168 	void (*listen_sockopts)(struct socket *sock);
169 	int (*listen_bind)(struct socket *sock);
170 };
171 
172 static struct listen_sock_callbacks {
173 	void (*sk_error_report)(struct sock *);
174 	void (*sk_data_ready)(struct sock *);
175 	void (*sk_state_change)(struct sock *);
176 	void (*sk_write_space)(struct sock *);
177 } listen_sock;
178 
179 static struct listen_connection listen_con;
180 static struct sockaddr_storage dlm_local_addr[DLM_MAX_ADDR_COUNT];
181 static int dlm_local_count;
182 
183 /* Work queues */
184 static struct workqueue_struct *io_workqueue;
185 static struct workqueue_struct *process_workqueue;
186 
187 static struct hlist_head connection_hash[CONN_HASH_SIZE];
188 static DEFINE_SPINLOCK(connections_lock);
189 DEFINE_STATIC_SRCU(connections_srcu);
190 
191 static const struct dlm_proto_ops *dlm_proto_ops;
192 
193 #define DLM_IO_SUCCESS 0
194 #define DLM_IO_END 1
195 #define DLM_IO_EOF 2
196 #define DLM_IO_RESCHED 3
197 
198 static void process_recv_sockets(struct work_struct *work);
199 static void process_send_sockets(struct work_struct *work);
200 static void process_dlm_messages(struct work_struct *work);
201 
202 static DECLARE_WORK(process_work, process_dlm_messages);
203 static DEFINE_SPINLOCK(processqueue_lock);
204 static bool process_dlm_messages_pending;
205 static LIST_HEAD(processqueue);
206 
207 bool dlm_lowcomms_is_running(void)
208 {
209 	return !!listen_con.sock;
210 }
211 
212 static void lowcomms_queue_swork(struct connection *con)
213 {
214 	assert_spin_locked(&con->writequeue_lock);
215 
216 	if (!test_bit(CF_IO_STOP, &con->flags) &&
217 	    !test_bit(CF_APP_LIMITED, &con->flags) &&
218 	    !test_and_set_bit(CF_SEND_PENDING, &con->flags))
219 		queue_work(io_workqueue, &con->swork);
220 }
221 
222 static void lowcomms_queue_rwork(struct connection *con)
223 {
224 #ifdef CONFIG_LOCKDEP
225 	WARN_ON_ONCE(!lockdep_sock_is_held(con->sock->sk));
226 #endif
227 
228 	if (!test_bit(CF_IO_STOP, &con->flags) &&
229 	    !test_and_set_bit(CF_RECV_PENDING, &con->flags))
230 		queue_work(io_workqueue, &con->rwork);
231 }
232 
233 static void writequeue_entry_ctor(void *data)
234 {
235 	struct writequeue_entry *entry = data;
236 
237 	INIT_LIST_HEAD(&entry->msgs);
238 }
239 
240 struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void)
241 {
242 	return kmem_cache_create("dlm_writequeue", sizeof(struct writequeue_entry),
243 				 0, 0, writequeue_entry_ctor);
244 }
245 
246 struct kmem_cache *dlm_lowcomms_msg_cache_create(void)
247 {
248 	return kmem_cache_create("dlm_msg", sizeof(struct dlm_msg), 0, 0, NULL);
249 }
250 
251 /* need to held writequeue_lock */
252 static struct writequeue_entry *con_next_wq(struct connection *con)
253 {
254 	struct writequeue_entry *e;
255 
256 	e = list_first_entry_or_null(&con->writequeue, struct writequeue_entry,
257 				     list);
258 	/* if len is zero nothing is to send, if there are users filling
259 	 * buffers we wait until the users are done so we can send more.
260 	 */
261 	if (!e || e->users || e->len == 0)
262 		return NULL;
263 
264 	return e;
265 }
266 
267 static struct connection *__find_con(int nodeid, int r)
268 {
269 	struct connection *con;
270 
271 	hlist_for_each_entry_rcu(con, &connection_hash[r], list) {
272 		if (con->nodeid == nodeid)
273 			return con;
274 	}
275 
276 	return NULL;
277 }
278 
279 static void dlm_con_init(struct connection *con, int nodeid)
280 {
281 	con->nodeid = nodeid;
282 	init_rwsem(&con->sock_lock);
283 	INIT_LIST_HEAD(&con->writequeue);
284 	spin_lock_init(&con->writequeue_lock);
285 	INIT_WORK(&con->swork, process_send_sockets);
286 	INIT_WORK(&con->rwork, process_recv_sockets);
287 	spin_lock_init(&con->addrs_lock);
288 	init_waitqueue_head(&con->shutdown_wait);
289 }
290 
291 /*
292  * If 'allocation' is zero then we don't attempt to create a new
293  * connection structure for this node.
294  */
295 static struct connection *nodeid2con(int nodeid, gfp_t alloc)
296 {
297 	struct connection *con, *tmp;
298 	int r;
299 
300 	r = nodeid_hash(nodeid);
301 	con = __find_con(nodeid, r);
302 	if (con || !alloc)
303 		return con;
304 
305 	con = kzalloc(sizeof(*con), alloc);
306 	if (!con)
307 		return NULL;
308 
309 	dlm_con_init(con, nodeid);
310 
311 	spin_lock(&connections_lock);
312 	/* Because multiple workqueues/threads calls this function it can
313 	 * race on multiple cpu's. Instead of locking hot path __find_con()
314 	 * we just check in rare cases of recently added nodes again
315 	 * under protection of connections_lock. If this is the case we
316 	 * abort our connection creation and return the existing connection.
317 	 */
318 	tmp = __find_con(nodeid, r);
319 	if (tmp) {
320 		spin_unlock(&connections_lock);
321 		kfree(con);
322 		return tmp;
323 	}
324 
325 	hlist_add_head_rcu(&con->list, &connection_hash[r]);
326 	spin_unlock(&connections_lock);
327 
328 	return con;
329 }
330 
331 static int addr_compare(const struct sockaddr_storage *x,
332 			const struct sockaddr_storage *y)
333 {
334 	switch (x->ss_family) {
335 	case AF_INET: {
336 		struct sockaddr_in *sinx = (struct sockaddr_in *)x;
337 		struct sockaddr_in *siny = (struct sockaddr_in *)y;
338 		if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
339 			return 0;
340 		if (sinx->sin_port != siny->sin_port)
341 			return 0;
342 		break;
343 	}
344 	case AF_INET6: {
345 		struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
346 		struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
347 		if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
348 			return 0;
349 		if (sinx->sin6_port != siny->sin6_port)
350 			return 0;
351 		break;
352 	}
353 	default:
354 		return 0;
355 	}
356 	return 1;
357 }
358 
359 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
360 			  struct sockaddr *sa_out, bool try_new_addr,
361 			  unsigned int *mark)
362 {
363 	struct sockaddr_storage sas;
364 	struct connection *con;
365 	int idx;
366 
367 	if (!dlm_local_count)
368 		return -1;
369 
370 	idx = srcu_read_lock(&connections_srcu);
371 	con = nodeid2con(nodeid, 0);
372 	if (!con) {
373 		srcu_read_unlock(&connections_srcu, idx);
374 		return -ENOENT;
375 	}
376 
377 	spin_lock(&con->addrs_lock);
378 	if (!con->addr_count) {
379 		spin_unlock(&con->addrs_lock);
380 		srcu_read_unlock(&connections_srcu, idx);
381 		return -ENOENT;
382 	}
383 
384 	memcpy(&sas, &con->addr[con->curr_addr_index],
385 	       sizeof(struct sockaddr_storage));
386 
387 	if (try_new_addr) {
388 		con->curr_addr_index++;
389 		if (con->curr_addr_index == con->addr_count)
390 			con->curr_addr_index = 0;
391 	}
392 
393 	*mark = con->mark;
394 	spin_unlock(&con->addrs_lock);
395 
396 	if (sas_out)
397 		memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
398 
399 	if (!sa_out) {
400 		srcu_read_unlock(&connections_srcu, idx);
401 		return 0;
402 	}
403 
404 	if (dlm_local_addr[0].ss_family == AF_INET) {
405 		struct sockaddr_in *in4  = (struct sockaddr_in *) &sas;
406 		struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
407 		ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
408 	} else {
409 		struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &sas;
410 		struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
411 		ret6->sin6_addr = in6->sin6_addr;
412 	}
413 
414 	srcu_read_unlock(&connections_srcu, idx);
415 	return 0;
416 }
417 
418 static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid,
419 			  unsigned int *mark)
420 {
421 	struct connection *con;
422 	int i, idx, addr_i;
423 
424 	idx = srcu_read_lock(&connections_srcu);
425 	for (i = 0; i < CONN_HASH_SIZE; i++) {
426 		hlist_for_each_entry_rcu(con, &connection_hash[i], list) {
427 			WARN_ON_ONCE(!con->addr_count);
428 
429 			spin_lock(&con->addrs_lock);
430 			for (addr_i = 0; addr_i < con->addr_count; addr_i++) {
431 				if (addr_compare(&con->addr[addr_i], addr)) {
432 					*nodeid = con->nodeid;
433 					*mark = con->mark;
434 					spin_unlock(&con->addrs_lock);
435 					srcu_read_unlock(&connections_srcu, idx);
436 					return 0;
437 				}
438 			}
439 			spin_unlock(&con->addrs_lock);
440 		}
441 	}
442 	srcu_read_unlock(&connections_srcu, idx);
443 
444 	return -ENOENT;
445 }
446 
447 static bool dlm_lowcomms_con_has_addr(const struct connection *con,
448 				      const struct sockaddr_storage *addr)
449 {
450 	int i;
451 
452 	for (i = 0; i < con->addr_count; i++) {
453 		if (addr_compare(&con->addr[i], addr))
454 			return true;
455 	}
456 
457 	return false;
458 }
459 
460 int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
461 {
462 	struct connection *con;
463 	bool ret, idx;
464 
465 	idx = srcu_read_lock(&connections_srcu);
466 	con = nodeid2con(nodeid, GFP_NOFS);
467 	if (!con) {
468 		srcu_read_unlock(&connections_srcu, idx);
469 		return -ENOMEM;
470 	}
471 
472 	spin_lock(&con->addrs_lock);
473 	if (!con->addr_count) {
474 		memcpy(&con->addr[0], addr, sizeof(*addr));
475 		con->addr_count = 1;
476 		con->mark = dlm_config.ci_mark;
477 		spin_unlock(&con->addrs_lock);
478 		srcu_read_unlock(&connections_srcu, idx);
479 		return 0;
480 	}
481 
482 	ret = dlm_lowcomms_con_has_addr(con, addr);
483 	if (ret) {
484 		spin_unlock(&con->addrs_lock);
485 		srcu_read_unlock(&connections_srcu, idx);
486 		return -EEXIST;
487 	}
488 
489 	if (con->addr_count >= DLM_MAX_ADDR_COUNT) {
490 		spin_unlock(&con->addrs_lock);
491 		srcu_read_unlock(&connections_srcu, idx);
492 		return -ENOSPC;
493 	}
494 
495 	memcpy(&con->addr[con->addr_count++], addr, sizeof(*addr));
496 	srcu_read_unlock(&connections_srcu, idx);
497 	spin_unlock(&con->addrs_lock);
498 	return 0;
499 }
500 
501 /* Data available on socket or listen socket received a connect */
502 static void lowcomms_data_ready(struct sock *sk)
503 {
504 	struct connection *con = sock2con(sk);
505 
506 	trace_sk_data_ready(sk);
507 
508 	set_bit(CF_RECV_INTR, &con->flags);
509 	lowcomms_queue_rwork(con);
510 }
511 
512 static void lowcomms_write_space(struct sock *sk)
513 {
514 	struct connection *con = sock2con(sk);
515 
516 	clear_bit(SOCK_NOSPACE, &con->sock->flags);
517 
518 	spin_lock_bh(&con->writequeue_lock);
519 	if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
520 		con->sock->sk->sk_write_pending--;
521 		clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
522 	}
523 
524 	lowcomms_queue_swork(con);
525 	spin_unlock_bh(&con->writequeue_lock);
526 }
527 
528 static void lowcomms_state_change(struct sock *sk)
529 {
530 	/* SCTP layer is not calling sk_data_ready when the connection
531 	 * is done, so we catch the signal through here.
532 	 */
533 	if (sk->sk_shutdown == RCV_SHUTDOWN)
534 		lowcomms_data_ready(sk);
535 }
536 
537 static void lowcomms_listen_data_ready(struct sock *sk)
538 {
539 	trace_sk_data_ready(sk);
540 
541 	queue_work(io_workqueue, &listen_con.rwork);
542 }
543 
544 int dlm_lowcomms_connect_node(int nodeid)
545 {
546 	struct connection *con;
547 	int idx;
548 
549 	if (nodeid == dlm_our_nodeid())
550 		return 0;
551 
552 	idx = srcu_read_lock(&connections_srcu);
553 	con = nodeid2con(nodeid, 0);
554 	if (WARN_ON_ONCE(!con)) {
555 		srcu_read_unlock(&connections_srcu, idx);
556 		return -ENOENT;
557 	}
558 
559 	down_read(&con->sock_lock);
560 	if (!con->sock) {
561 		spin_lock_bh(&con->writequeue_lock);
562 		lowcomms_queue_swork(con);
563 		spin_unlock_bh(&con->writequeue_lock);
564 	}
565 	up_read(&con->sock_lock);
566 	srcu_read_unlock(&connections_srcu, idx);
567 
568 	cond_resched();
569 	return 0;
570 }
571 
572 int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark)
573 {
574 	struct connection *con;
575 	int idx;
576 
577 	idx = srcu_read_lock(&connections_srcu);
578 	con = nodeid2con(nodeid, 0);
579 	if (!con) {
580 		srcu_read_unlock(&connections_srcu, idx);
581 		return -ENOENT;
582 	}
583 
584 	spin_lock(&con->addrs_lock);
585 	con->mark = mark;
586 	spin_unlock(&con->addrs_lock);
587 	srcu_read_unlock(&connections_srcu, idx);
588 	return 0;
589 }
590 
591 static void lowcomms_error_report(struct sock *sk)
592 {
593 	struct connection *con = sock2con(sk);
594 	struct inet_sock *inet;
595 
596 	inet = inet_sk(sk);
597 	switch (sk->sk_family) {
598 	case AF_INET:
599 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
600 				   "sending to node %d at %pI4, dport %d, "
601 				   "sk_err=%d/%d\n", dlm_our_nodeid(),
602 				   con->nodeid, &inet->inet_daddr,
603 				   ntohs(inet->inet_dport), sk->sk_err,
604 				   READ_ONCE(sk->sk_err_soft));
605 		break;
606 #if IS_ENABLED(CONFIG_IPV6)
607 	case AF_INET6:
608 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
609 				   "sending to node %d at %pI6c, "
610 				   "dport %d, sk_err=%d/%d\n", dlm_our_nodeid(),
611 				   con->nodeid, &sk->sk_v6_daddr,
612 				   ntohs(inet->inet_dport), sk->sk_err,
613 				   READ_ONCE(sk->sk_err_soft));
614 		break;
615 #endif
616 	default:
617 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
618 				   "invalid socket family %d set, "
619 				   "sk_err=%d/%d\n", dlm_our_nodeid(),
620 				   sk->sk_family, sk->sk_err,
621 				   READ_ONCE(sk->sk_err_soft));
622 		break;
623 	}
624 
625 	dlm_midcomms_unack_msg_resend(con->nodeid);
626 
627 	listen_sock.sk_error_report(sk);
628 }
629 
630 static void restore_callbacks(struct sock *sk)
631 {
632 #ifdef CONFIG_LOCKDEP
633 	WARN_ON_ONCE(!lockdep_sock_is_held(sk));
634 #endif
635 
636 	sk->sk_user_data = NULL;
637 	sk->sk_data_ready = listen_sock.sk_data_ready;
638 	sk->sk_state_change = listen_sock.sk_state_change;
639 	sk->sk_write_space = listen_sock.sk_write_space;
640 	sk->sk_error_report = listen_sock.sk_error_report;
641 }
642 
643 /* Make a socket active */
644 static void add_sock(struct socket *sock, struct connection *con)
645 {
646 	struct sock *sk = sock->sk;
647 
648 	lock_sock(sk);
649 	con->sock = sock;
650 
651 	sk->sk_user_data = con;
652 	sk->sk_data_ready = lowcomms_data_ready;
653 	sk->sk_write_space = lowcomms_write_space;
654 	if (dlm_config.ci_protocol == DLM_PROTO_SCTP)
655 		sk->sk_state_change = lowcomms_state_change;
656 	sk->sk_allocation = GFP_NOFS;
657 	sk->sk_use_task_frag = false;
658 	sk->sk_error_report = lowcomms_error_report;
659 	release_sock(sk);
660 }
661 
662 /* Add the port number to an IPv6 or 4 sockaddr and return the address
663    length */
664 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
665 			  int *addr_len)
666 {
667 	saddr->ss_family =  dlm_local_addr[0].ss_family;
668 	if (saddr->ss_family == AF_INET) {
669 		struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
670 		in4_addr->sin_port = cpu_to_be16(port);
671 		*addr_len = sizeof(struct sockaddr_in);
672 		memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
673 	} else {
674 		struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
675 		in6_addr->sin6_port = cpu_to_be16(port);
676 		*addr_len = sizeof(struct sockaddr_in6);
677 	}
678 	memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
679 }
680 
681 static void dlm_page_release(struct kref *kref)
682 {
683 	struct writequeue_entry *e = container_of(kref, struct writequeue_entry,
684 						  ref);
685 
686 	__free_page(e->page);
687 	dlm_free_writequeue(e);
688 }
689 
690 static void dlm_msg_release(struct kref *kref)
691 {
692 	struct dlm_msg *msg = container_of(kref, struct dlm_msg, ref);
693 
694 	kref_put(&msg->entry->ref, dlm_page_release);
695 	dlm_free_msg(msg);
696 }
697 
698 static void free_entry(struct writequeue_entry *e)
699 {
700 	struct dlm_msg *msg, *tmp;
701 
702 	list_for_each_entry_safe(msg, tmp, &e->msgs, list) {
703 		if (msg->orig_msg) {
704 			msg->orig_msg->retransmit = false;
705 			kref_put(&msg->orig_msg->ref, dlm_msg_release);
706 		}
707 
708 		list_del(&msg->list);
709 		kref_put(&msg->ref, dlm_msg_release);
710 	}
711 
712 	list_del(&e->list);
713 	kref_put(&e->ref, dlm_page_release);
714 }
715 
716 static void dlm_close_sock(struct socket **sock)
717 {
718 	lock_sock((*sock)->sk);
719 	restore_callbacks((*sock)->sk);
720 	release_sock((*sock)->sk);
721 
722 	sock_release(*sock);
723 	*sock = NULL;
724 }
725 
726 static void allow_connection_io(struct connection *con)
727 {
728 	if (con->othercon)
729 		clear_bit(CF_IO_STOP, &con->othercon->flags);
730 	clear_bit(CF_IO_STOP, &con->flags);
731 }
732 
733 static void stop_connection_io(struct connection *con)
734 {
735 	if (con->othercon)
736 		stop_connection_io(con->othercon);
737 
738 	down_write(&con->sock_lock);
739 	if (con->sock) {
740 		lock_sock(con->sock->sk);
741 		restore_callbacks(con->sock->sk);
742 
743 		spin_lock_bh(&con->writequeue_lock);
744 		set_bit(CF_IO_STOP, &con->flags);
745 		spin_unlock_bh(&con->writequeue_lock);
746 		release_sock(con->sock->sk);
747 	} else {
748 		spin_lock_bh(&con->writequeue_lock);
749 		set_bit(CF_IO_STOP, &con->flags);
750 		spin_unlock_bh(&con->writequeue_lock);
751 	}
752 	up_write(&con->sock_lock);
753 
754 	cancel_work_sync(&con->swork);
755 	cancel_work_sync(&con->rwork);
756 }
757 
758 /* Close a remote connection and tidy up */
759 static void close_connection(struct connection *con, bool and_other)
760 {
761 	struct writequeue_entry *e;
762 
763 	if (con->othercon && and_other)
764 		close_connection(con->othercon, false);
765 
766 	down_write(&con->sock_lock);
767 	if (!con->sock) {
768 		up_write(&con->sock_lock);
769 		return;
770 	}
771 
772 	dlm_close_sock(&con->sock);
773 
774 	/* if we send a writequeue entry only a half way, we drop the
775 	 * whole entry because reconnection and that we not start of the
776 	 * middle of a msg which will confuse the other end.
777 	 *
778 	 * we can always drop messages because retransmits, but what we
779 	 * cannot allow is to transmit half messages which may be processed
780 	 * at the other side.
781 	 *
782 	 * our policy is to start on a clean state when disconnects, we don't
783 	 * know what's send/received on transport layer in this case.
784 	 */
785 	spin_lock_bh(&con->writequeue_lock);
786 	if (!list_empty(&con->writequeue)) {
787 		e = list_first_entry(&con->writequeue, struct writequeue_entry,
788 				     list);
789 		if (e->dirty)
790 			free_entry(e);
791 	}
792 	spin_unlock_bh(&con->writequeue_lock);
793 
794 	con->rx_leftover = 0;
795 	con->retries = 0;
796 	clear_bit(CF_APP_LIMITED, &con->flags);
797 	clear_bit(CF_RECV_PENDING, &con->flags);
798 	clear_bit(CF_SEND_PENDING, &con->flags);
799 	up_write(&con->sock_lock);
800 }
801 
802 static void shutdown_connection(struct connection *con, bool and_other)
803 {
804 	int ret;
805 
806 	if (con->othercon && and_other)
807 		shutdown_connection(con->othercon, false);
808 
809 	flush_workqueue(io_workqueue);
810 	down_read(&con->sock_lock);
811 	/* nothing to shutdown */
812 	if (!con->sock) {
813 		up_read(&con->sock_lock);
814 		return;
815 	}
816 
817 	ret = kernel_sock_shutdown(con->sock, SHUT_WR);
818 	up_read(&con->sock_lock);
819 	if (ret) {
820 		log_print("Connection %p failed to shutdown: %d will force close",
821 			  con, ret);
822 		goto force_close;
823 	} else {
824 		ret = wait_event_timeout(con->shutdown_wait, !con->sock,
825 					 DLM_SHUTDOWN_WAIT_TIMEOUT);
826 		if (ret == 0) {
827 			log_print("Connection %p shutdown timed out, will force close",
828 				  con);
829 			goto force_close;
830 		}
831 	}
832 
833 	return;
834 
835 force_close:
836 	close_connection(con, false);
837 }
838 
839 static struct processqueue_entry *new_processqueue_entry(int nodeid,
840 							 int buflen)
841 {
842 	struct processqueue_entry *pentry;
843 
844 	pentry = kmalloc(sizeof(*pentry), GFP_NOFS);
845 	if (!pentry)
846 		return NULL;
847 
848 	pentry->buf = kmalloc(buflen, GFP_NOFS);
849 	if (!pentry->buf) {
850 		kfree(pentry);
851 		return NULL;
852 	}
853 
854 	pentry->nodeid = nodeid;
855 	return pentry;
856 }
857 
858 static void free_processqueue_entry(struct processqueue_entry *pentry)
859 {
860 	kfree(pentry->buf);
861 	kfree(pentry);
862 }
863 
864 struct dlm_processed_nodes {
865 	int nodeid;
866 
867 	struct list_head list;
868 };
869 
870 static void add_processed_node(int nodeid, struct list_head *processed_nodes)
871 {
872 	struct dlm_processed_nodes *n;
873 
874 	list_for_each_entry(n, processed_nodes, list) {
875 		/* we already remembered this node */
876 		if (n->nodeid == nodeid)
877 			return;
878 	}
879 
880 	/* if it's fails in worst case we simple don't send an ack back.
881 	 * We try it next time.
882 	 */
883 	n = kmalloc(sizeof(*n), GFP_NOFS);
884 	if (!n)
885 		return;
886 
887 	n->nodeid = nodeid;
888 	list_add(&n->list, processed_nodes);
889 }
890 
891 static void process_dlm_messages(struct work_struct *work)
892 {
893 	struct dlm_processed_nodes *n, *n_tmp;
894 	struct processqueue_entry *pentry;
895 	LIST_HEAD(processed_nodes);
896 
897 	spin_lock(&processqueue_lock);
898 	pentry = list_first_entry_or_null(&processqueue,
899 					  struct processqueue_entry, list);
900 	if (WARN_ON_ONCE(!pentry)) {
901 		spin_unlock(&processqueue_lock);
902 		return;
903 	}
904 
905 	list_del(&pentry->list);
906 	spin_unlock(&processqueue_lock);
907 
908 	for (;;) {
909 		dlm_process_incoming_buffer(pentry->nodeid, pentry->buf,
910 					    pentry->buflen);
911 		add_processed_node(pentry->nodeid, &processed_nodes);
912 		free_processqueue_entry(pentry);
913 
914 		spin_lock(&processqueue_lock);
915 		pentry = list_first_entry_or_null(&processqueue,
916 						  struct processqueue_entry, list);
917 		if (!pentry) {
918 			process_dlm_messages_pending = false;
919 			spin_unlock(&processqueue_lock);
920 			break;
921 		}
922 
923 		list_del(&pentry->list);
924 		spin_unlock(&processqueue_lock);
925 	}
926 
927 	/* send ack back after we processed couple of messages */
928 	list_for_each_entry_safe(n, n_tmp, &processed_nodes, list) {
929 		list_del(&n->list);
930 		dlm_midcomms_receive_done(n->nodeid);
931 		kfree(n);
932 	}
933 }
934 
935 /* Data received from remote end */
936 static int receive_from_sock(struct connection *con, int buflen)
937 {
938 	struct processqueue_entry *pentry;
939 	int ret, buflen_real;
940 	struct msghdr msg;
941 	struct kvec iov;
942 
943 	pentry = new_processqueue_entry(con->nodeid, buflen);
944 	if (!pentry)
945 		return DLM_IO_RESCHED;
946 
947 	memcpy(pentry->buf, con->rx_leftover_buf, con->rx_leftover);
948 
949 	/* calculate new buffer parameter regarding last receive and
950 	 * possible leftover bytes
951 	 */
952 	iov.iov_base = pentry->buf + con->rx_leftover;
953 	iov.iov_len = buflen - con->rx_leftover;
954 
955 	memset(&msg, 0, sizeof(msg));
956 	msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
957 	clear_bit(CF_RECV_INTR, &con->flags);
958 again:
959 	ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
960 			     msg.msg_flags);
961 	trace_dlm_recv(con->nodeid, ret);
962 	if (ret == -EAGAIN) {
963 		lock_sock(con->sock->sk);
964 		if (test_and_clear_bit(CF_RECV_INTR, &con->flags)) {
965 			release_sock(con->sock->sk);
966 			goto again;
967 		}
968 
969 		clear_bit(CF_RECV_PENDING, &con->flags);
970 		release_sock(con->sock->sk);
971 		free_processqueue_entry(pentry);
972 		return DLM_IO_END;
973 	} else if (ret == 0) {
974 		/* close will clear CF_RECV_PENDING */
975 		free_processqueue_entry(pentry);
976 		return DLM_IO_EOF;
977 	} else if (ret < 0) {
978 		free_processqueue_entry(pentry);
979 		return ret;
980 	}
981 
982 	/* new buflen according readed bytes and leftover from last receive */
983 	buflen_real = ret + con->rx_leftover;
984 	ret = dlm_validate_incoming_buffer(con->nodeid, pentry->buf,
985 					   buflen_real);
986 	if (ret < 0) {
987 		free_processqueue_entry(pentry);
988 		return ret;
989 	}
990 
991 	pentry->buflen = ret;
992 
993 	/* calculate leftover bytes from process and put it into begin of
994 	 * the receive buffer, so next receive we have the full message
995 	 * at the start address of the receive buffer.
996 	 */
997 	con->rx_leftover = buflen_real - ret;
998 	memmove(con->rx_leftover_buf, pentry->buf + ret,
999 		con->rx_leftover);
1000 
1001 	spin_lock(&processqueue_lock);
1002 	list_add_tail(&pentry->list, &processqueue);
1003 	if (!process_dlm_messages_pending) {
1004 		process_dlm_messages_pending = true;
1005 		queue_work(process_workqueue, &process_work);
1006 	}
1007 	spin_unlock(&processqueue_lock);
1008 
1009 	return DLM_IO_SUCCESS;
1010 }
1011 
1012 /* Listening socket is busy, accept a connection */
1013 static int accept_from_sock(void)
1014 {
1015 	struct sockaddr_storage peeraddr;
1016 	int len, idx, result, nodeid;
1017 	struct connection *newcon;
1018 	struct socket *newsock;
1019 	unsigned int mark;
1020 
1021 	result = kernel_accept(listen_con.sock, &newsock, O_NONBLOCK);
1022 	if (result == -EAGAIN)
1023 		return DLM_IO_END;
1024 	else if (result < 0)
1025 		goto accept_err;
1026 
1027 	/* Get the connected socket's peer */
1028 	memset(&peeraddr, 0, sizeof(peeraddr));
1029 	len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2);
1030 	if (len < 0) {
1031 		result = -ECONNABORTED;
1032 		goto accept_err;
1033 	}
1034 
1035 	/* Get the new node's NODEID */
1036 	make_sockaddr(&peeraddr, 0, &len);
1037 	if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) {
1038 		switch (peeraddr.ss_family) {
1039 		case AF_INET: {
1040 			struct sockaddr_in *sin = (struct sockaddr_in *)&peeraddr;
1041 
1042 			log_print("connect from non cluster IPv4 node %pI4",
1043 				  &sin->sin_addr);
1044 			break;
1045 		}
1046 #if IS_ENABLED(CONFIG_IPV6)
1047 		case AF_INET6: {
1048 			struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&peeraddr;
1049 
1050 			log_print("connect from non cluster IPv6 node %pI6c",
1051 				  &sin6->sin6_addr);
1052 			break;
1053 		}
1054 #endif
1055 		default:
1056 			log_print("invalid family from non cluster node");
1057 			break;
1058 		}
1059 
1060 		sock_release(newsock);
1061 		return -1;
1062 	}
1063 
1064 	log_print("got connection from %d", nodeid);
1065 
1066 	/*  Check to see if we already have a connection to this node. This
1067 	 *  could happen if the two nodes initiate a connection at roughly
1068 	 *  the same time and the connections cross on the wire.
1069 	 *  In this case we store the incoming one in "othercon"
1070 	 */
1071 	idx = srcu_read_lock(&connections_srcu);
1072 	newcon = nodeid2con(nodeid, 0);
1073 	if (WARN_ON_ONCE(!newcon)) {
1074 		srcu_read_unlock(&connections_srcu, idx);
1075 		result = -ENOENT;
1076 		goto accept_err;
1077 	}
1078 
1079 	sock_set_mark(newsock->sk, mark);
1080 
1081 	down_write(&newcon->sock_lock);
1082 	if (newcon->sock) {
1083 		struct connection *othercon = newcon->othercon;
1084 
1085 		if (!othercon) {
1086 			othercon = kzalloc(sizeof(*othercon), GFP_NOFS);
1087 			if (!othercon) {
1088 				log_print("failed to allocate incoming socket");
1089 				up_write(&newcon->sock_lock);
1090 				srcu_read_unlock(&connections_srcu, idx);
1091 				result = -ENOMEM;
1092 				goto accept_err;
1093 			}
1094 
1095 			dlm_con_init(othercon, nodeid);
1096 			lockdep_set_subclass(&othercon->sock_lock, 1);
1097 			newcon->othercon = othercon;
1098 			set_bit(CF_IS_OTHERCON, &othercon->flags);
1099 		} else {
1100 			/* close other sock con if we have something new */
1101 			close_connection(othercon, false);
1102 		}
1103 
1104 		down_write(&othercon->sock_lock);
1105 		add_sock(newsock, othercon);
1106 
1107 		/* check if we receved something while adding */
1108 		lock_sock(othercon->sock->sk);
1109 		lowcomms_queue_rwork(othercon);
1110 		release_sock(othercon->sock->sk);
1111 		up_write(&othercon->sock_lock);
1112 	}
1113 	else {
1114 		/* accept copies the sk after we've saved the callbacks, so we
1115 		   don't want to save them a second time or comm errors will
1116 		   result in calling sk_error_report recursively. */
1117 		add_sock(newsock, newcon);
1118 
1119 		/* check if we receved something while adding */
1120 		lock_sock(newcon->sock->sk);
1121 		lowcomms_queue_rwork(newcon);
1122 		release_sock(newcon->sock->sk);
1123 	}
1124 	up_write(&newcon->sock_lock);
1125 	srcu_read_unlock(&connections_srcu, idx);
1126 
1127 	return DLM_IO_SUCCESS;
1128 
1129 accept_err:
1130 	if (newsock)
1131 		sock_release(newsock);
1132 
1133 	return result;
1134 }
1135 
1136 /*
1137  * writequeue_entry_complete - try to delete and free write queue entry
1138  * @e: write queue entry to try to delete
1139  * @completed: bytes completed
1140  *
1141  * writequeue_lock must be held.
1142  */
1143 static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
1144 {
1145 	e->offset += completed;
1146 	e->len -= completed;
1147 	/* signal that page was half way transmitted */
1148 	e->dirty = true;
1149 
1150 	if (e->len == 0 && e->users == 0)
1151 		free_entry(e);
1152 }
1153 
1154 /*
1155  * sctp_bind_addrs - bind a SCTP socket to all our addresses
1156  */
1157 static int sctp_bind_addrs(struct socket *sock, uint16_t port)
1158 {
1159 	struct sockaddr_storage localaddr;
1160 	struct sockaddr *addr = (struct sockaddr *)&localaddr;
1161 	int i, addr_len, result = 0;
1162 
1163 	for (i = 0; i < dlm_local_count; i++) {
1164 		memcpy(&localaddr, &dlm_local_addr[i], sizeof(localaddr));
1165 		make_sockaddr(&localaddr, port, &addr_len);
1166 
1167 		if (!i)
1168 			result = kernel_bind(sock, addr, addr_len);
1169 		else
1170 			result = sock_bind_add(sock->sk, addr, addr_len);
1171 
1172 		if (result < 0) {
1173 			log_print("Can't bind to %d addr number %d, %d.\n",
1174 				  port, i + 1, result);
1175 			break;
1176 		}
1177 	}
1178 	return result;
1179 }
1180 
1181 /* Get local addresses */
1182 static void init_local(void)
1183 {
1184 	struct sockaddr_storage sas;
1185 	int i;
1186 
1187 	dlm_local_count = 0;
1188 	for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
1189 		if (dlm_our_addr(&sas, i))
1190 			break;
1191 
1192 		memcpy(&dlm_local_addr[dlm_local_count++], &sas, sizeof(sas));
1193 	}
1194 }
1195 
1196 static struct writequeue_entry *new_writequeue_entry(struct connection *con)
1197 {
1198 	struct writequeue_entry *entry;
1199 
1200 	entry = dlm_allocate_writequeue();
1201 	if (!entry)
1202 		return NULL;
1203 
1204 	entry->page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
1205 	if (!entry->page) {
1206 		dlm_free_writequeue(entry);
1207 		return NULL;
1208 	}
1209 
1210 	entry->offset = 0;
1211 	entry->len = 0;
1212 	entry->end = 0;
1213 	entry->dirty = false;
1214 	entry->con = con;
1215 	entry->users = 1;
1216 	kref_init(&entry->ref);
1217 	return entry;
1218 }
1219 
1220 static struct writequeue_entry *new_wq_entry(struct connection *con, int len,
1221 					     char **ppc, void (*cb)(void *data),
1222 					     void *data)
1223 {
1224 	struct writequeue_entry *e;
1225 
1226 	spin_lock_bh(&con->writequeue_lock);
1227 	if (!list_empty(&con->writequeue)) {
1228 		e = list_last_entry(&con->writequeue, struct writequeue_entry, list);
1229 		if (DLM_WQ_REMAIN_BYTES(e) >= len) {
1230 			kref_get(&e->ref);
1231 
1232 			*ppc = page_address(e->page) + e->end;
1233 			if (cb)
1234 				cb(data);
1235 
1236 			e->end += len;
1237 			e->users++;
1238 			goto out;
1239 		}
1240 	}
1241 
1242 	e = new_writequeue_entry(con);
1243 	if (!e)
1244 		goto out;
1245 
1246 	kref_get(&e->ref);
1247 	*ppc = page_address(e->page);
1248 	e->end += len;
1249 	if (cb)
1250 		cb(data);
1251 
1252 	list_add_tail(&e->list, &con->writequeue);
1253 
1254 out:
1255 	spin_unlock_bh(&con->writequeue_lock);
1256 	return e;
1257 };
1258 
1259 static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len,
1260 						gfp_t allocation, char **ppc,
1261 						void (*cb)(void *data),
1262 						void *data)
1263 {
1264 	struct writequeue_entry *e;
1265 	struct dlm_msg *msg;
1266 
1267 	msg = dlm_allocate_msg(allocation);
1268 	if (!msg)
1269 		return NULL;
1270 
1271 	kref_init(&msg->ref);
1272 
1273 	e = new_wq_entry(con, len, ppc, cb, data);
1274 	if (!e) {
1275 		dlm_free_msg(msg);
1276 		return NULL;
1277 	}
1278 
1279 	msg->retransmit = false;
1280 	msg->orig_msg = NULL;
1281 	msg->ppc = *ppc;
1282 	msg->len = len;
1283 	msg->entry = e;
1284 
1285 	return msg;
1286 }
1287 
1288 /* avoid false positive for nodes_srcu, unlock happens in
1289  * dlm_lowcomms_commit_msg which is a must call if success
1290  */
1291 #ifndef __CHECKER__
1292 struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
1293 				     char **ppc, void (*cb)(void *data),
1294 				     void *data)
1295 {
1296 	struct connection *con;
1297 	struct dlm_msg *msg;
1298 	int idx;
1299 
1300 	if (len > DLM_MAX_SOCKET_BUFSIZE ||
1301 	    len < sizeof(struct dlm_header)) {
1302 		BUILD_BUG_ON(PAGE_SIZE < DLM_MAX_SOCKET_BUFSIZE);
1303 		log_print("failed to allocate a buffer of size %d", len);
1304 		WARN_ON_ONCE(1);
1305 		return NULL;
1306 	}
1307 
1308 	idx = srcu_read_lock(&connections_srcu);
1309 	con = nodeid2con(nodeid, 0);
1310 	if (WARN_ON_ONCE(!con)) {
1311 		srcu_read_unlock(&connections_srcu, idx);
1312 		return NULL;
1313 	}
1314 
1315 	msg = dlm_lowcomms_new_msg_con(con, len, allocation, ppc, cb, data);
1316 	if (!msg) {
1317 		srcu_read_unlock(&connections_srcu, idx);
1318 		return NULL;
1319 	}
1320 
1321 	/* for dlm_lowcomms_commit_msg() */
1322 	kref_get(&msg->ref);
1323 	/* we assume if successful commit must called */
1324 	msg->idx = idx;
1325 	return msg;
1326 }
1327 #endif
1328 
1329 static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg)
1330 {
1331 	struct writequeue_entry *e = msg->entry;
1332 	struct connection *con = e->con;
1333 	int users;
1334 
1335 	spin_lock_bh(&con->writequeue_lock);
1336 	kref_get(&msg->ref);
1337 	list_add(&msg->list, &e->msgs);
1338 
1339 	users = --e->users;
1340 	if (users)
1341 		goto out;
1342 
1343 	e->len = DLM_WQ_LENGTH_BYTES(e);
1344 
1345 	lowcomms_queue_swork(con);
1346 
1347 out:
1348 	spin_unlock_bh(&con->writequeue_lock);
1349 	return;
1350 }
1351 
1352 /* avoid false positive for nodes_srcu, lock was happen in
1353  * dlm_lowcomms_new_msg
1354  */
1355 #ifndef __CHECKER__
1356 void dlm_lowcomms_commit_msg(struct dlm_msg *msg)
1357 {
1358 	_dlm_lowcomms_commit_msg(msg);
1359 	srcu_read_unlock(&connections_srcu, msg->idx);
1360 	/* because dlm_lowcomms_new_msg() */
1361 	kref_put(&msg->ref, dlm_msg_release);
1362 }
1363 #endif
1364 
1365 void dlm_lowcomms_put_msg(struct dlm_msg *msg)
1366 {
1367 	kref_put(&msg->ref, dlm_msg_release);
1368 }
1369 
1370 /* does not held connections_srcu, usage lowcomms_error_report only */
1371 int dlm_lowcomms_resend_msg(struct dlm_msg *msg)
1372 {
1373 	struct dlm_msg *msg_resend;
1374 	char *ppc;
1375 
1376 	if (msg->retransmit)
1377 		return 1;
1378 
1379 	msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len,
1380 					      GFP_ATOMIC, &ppc, NULL, NULL);
1381 	if (!msg_resend)
1382 		return -ENOMEM;
1383 
1384 	msg->retransmit = true;
1385 	kref_get(&msg->ref);
1386 	msg_resend->orig_msg = msg;
1387 
1388 	memcpy(ppc, msg->ppc, msg->len);
1389 	_dlm_lowcomms_commit_msg(msg_resend);
1390 	dlm_lowcomms_put_msg(msg_resend);
1391 
1392 	return 0;
1393 }
1394 
1395 /* Send a message */
1396 static int send_to_sock(struct connection *con)
1397 {
1398 	const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1399 	struct writequeue_entry *e;
1400 	int len, offset, ret;
1401 
1402 	spin_lock_bh(&con->writequeue_lock);
1403 	e = con_next_wq(con);
1404 	if (!e) {
1405 		clear_bit(CF_SEND_PENDING, &con->flags);
1406 		spin_unlock_bh(&con->writequeue_lock);
1407 		return DLM_IO_END;
1408 	}
1409 
1410 	len = e->len;
1411 	offset = e->offset;
1412 	WARN_ON_ONCE(len == 0 && e->users == 0);
1413 	spin_unlock_bh(&con->writequeue_lock);
1414 
1415 	ret = kernel_sendpage(con->sock, e->page, offset, len,
1416 			      msg_flags);
1417 	trace_dlm_send(con->nodeid, ret);
1418 	if (ret == -EAGAIN || ret == 0) {
1419 		lock_sock(con->sock->sk);
1420 		spin_lock_bh(&con->writequeue_lock);
1421 		if (test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
1422 		    !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1423 			/* Notify TCP that we're limited by the
1424 			 * application window size.
1425 			 */
1426 			set_bit(SOCK_NOSPACE, &con->sock->sk->sk_socket->flags);
1427 			con->sock->sk->sk_write_pending++;
1428 
1429 			clear_bit(CF_SEND_PENDING, &con->flags);
1430 			spin_unlock_bh(&con->writequeue_lock);
1431 			release_sock(con->sock->sk);
1432 
1433 			/* wait for write_space() event */
1434 			return DLM_IO_END;
1435 		}
1436 		spin_unlock_bh(&con->writequeue_lock);
1437 		release_sock(con->sock->sk);
1438 
1439 		return DLM_IO_RESCHED;
1440 	} else if (ret < 0) {
1441 		return ret;
1442 	}
1443 
1444 	spin_lock_bh(&con->writequeue_lock);
1445 	writequeue_entry_complete(e, ret);
1446 	spin_unlock_bh(&con->writequeue_lock);
1447 
1448 	return DLM_IO_SUCCESS;
1449 }
1450 
1451 static void clean_one_writequeue(struct connection *con)
1452 {
1453 	struct writequeue_entry *e, *safe;
1454 
1455 	spin_lock_bh(&con->writequeue_lock);
1456 	list_for_each_entry_safe(e, safe, &con->writequeue, list) {
1457 		free_entry(e);
1458 	}
1459 	spin_unlock_bh(&con->writequeue_lock);
1460 }
1461 
1462 static void connection_release(struct rcu_head *rcu)
1463 {
1464 	struct connection *con = container_of(rcu, struct connection, rcu);
1465 
1466 	WARN_ON_ONCE(!list_empty(&con->writequeue));
1467 	WARN_ON_ONCE(con->sock);
1468 	kfree(con);
1469 }
1470 
1471 /* Called from recovery when it knows that a node has
1472    left the cluster */
1473 int dlm_lowcomms_close(int nodeid)
1474 {
1475 	struct connection *con;
1476 	int idx;
1477 
1478 	log_print("closing connection to node %d", nodeid);
1479 
1480 	idx = srcu_read_lock(&connections_srcu);
1481 	con = nodeid2con(nodeid, 0);
1482 	if (WARN_ON_ONCE(!con)) {
1483 		srcu_read_unlock(&connections_srcu, idx);
1484 		return -ENOENT;
1485 	}
1486 
1487 	stop_connection_io(con);
1488 	log_print("io handling for node: %d stopped", nodeid);
1489 	close_connection(con, true);
1490 
1491 	spin_lock(&connections_lock);
1492 	hlist_del_rcu(&con->list);
1493 	spin_unlock(&connections_lock);
1494 
1495 	clean_one_writequeue(con);
1496 	call_srcu(&connections_srcu, &con->rcu, connection_release);
1497 	if (con->othercon) {
1498 		clean_one_writequeue(con->othercon);
1499 		if (con->othercon)
1500 			call_srcu(&connections_srcu, &con->othercon->rcu, connection_release);
1501 	}
1502 	srcu_read_unlock(&connections_srcu, idx);
1503 
1504 	/* for debugging we print when we are done to compare with other
1505 	 * messages in between. This function need to be correctly synchronized
1506 	 * with io handling
1507 	 */
1508 	log_print("closing connection to node %d done", nodeid);
1509 
1510 	return 0;
1511 }
1512 
1513 /* Receive worker function */
1514 static void process_recv_sockets(struct work_struct *work)
1515 {
1516 	struct connection *con = container_of(work, struct connection, rwork);
1517 	int ret, buflen;
1518 
1519 	down_read(&con->sock_lock);
1520 	if (!con->sock) {
1521 		up_read(&con->sock_lock);
1522 		return;
1523 	}
1524 
1525 	buflen = READ_ONCE(dlm_config.ci_buffer_size);
1526 	do {
1527 		ret = receive_from_sock(con, buflen);
1528 	} while (ret == DLM_IO_SUCCESS);
1529 	up_read(&con->sock_lock);
1530 
1531 	switch (ret) {
1532 	case DLM_IO_END:
1533 		/* CF_RECV_PENDING cleared */
1534 		break;
1535 	case DLM_IO_EOF:
1536 		close_connection(con, false);
1537 		wake_up(&con->shutdown_wait);
1538 		/* CF_RECV_PENDING cleared */
1539 		break;
1540 	case DLM_IO_RESCHED:
1541 		cond_resched();
1542 		queue_work(io_workqueue, &con->rwork);
1543 		/* CF_RECV_PENDING not cleared */
1544 		break;
1545 	default:
1546 		if (ret < 0) {
1547 			if (test_bit(CF_IS_OTHERCON, &con->flags)) {
1548 				close_connection(con, false);
1549 			} else {
1550 				spin_lock_bh(&con->writequeue_lock);
1551 				lowcomms_queue_swork(con);
1552 				spin_unlock_bh(&con->writequeue_lock);
1553 			}
1554 
1555 			/* CF_RECV_PENDING cleared for othercon
1556 			 * we trigger send queue if not already done
1557 			 * and process_send_sockets will handle it
1558 			 */
1559 			break;
1560 		}
1561 
1562 		WARN_ON_ONCE(1);
1563 		break;
1564 	}
1565 }
1566 
1567 static void process_listen_recv_socket(struct work_struct *work)
1568 {
1569 	int ret;
1570 
1571 	if (WARN_ON_ONCE(!listen_con.sock))
1572 		return;
1573 
1574 	do {
1575 		ret = accept_from_sock();
1576 	} while (ret == DLM_IO_SUCCESS);
1577 
1578 	if (ret < 0)
1579 		log_print("critical error accepting connection: %d", ret);
1580 }
1581 
1582 static int dlm_connect(struct connection *con)
1583 {
1584 	struct sockaddr_storage addr;
1585 	int result, addr_len;
1586 	struct socket *sock;
1587 	unsigned int mark;
1588 
1589 	memset(&addr, 0, sizeof(addr));
1590 	result = nodeid_to_addr(con->nodeid, &addr, NULL,
1591 				dlm_proto_ops->try_new_addr, &mark);
1592 	if (result < 0) {
1593 		log_print("no address for nodeid %d", con->nodeid);
1594 		return result;
1595 	}
1596 
1597 	/* Create a socket to communicate with */
1598 	result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family,
1599 				  SOCK_STREAM, dlm_proto_ops->proto, &sock);
1600 	if (result < 0)
1601 		return result;
1602 
1603 	sock_set_mark(sock->sk, mark);
1604 	dlm_proto_ops->sockopts(sock);
1605 
1606 	result = dlm_proto_ops->bind(sock);
1607 	if (result < 0) {
1608 		sock_release(sock);
1609 		return result;
1610 	}
1611 
1612 	add_sock(sock, con);
1613 
1614 	log_print_ratelimited("connecting to %d", con->nodeid);
1615 	make_sockaddr(&addr, dlm_config.ci_tcp_port, &addr_len);
1616 	result = dlm_proto_ops->connect(con, sock, (struct sockaddr *)&addr,
1617 					addr_len);
1618 	switch (result) {
1619 	case -EINPROGRESS:
1620 		/* not an error */
1621 		fallthrough;
1622 	case 0:
1623 		break;
1624 	default:
1625 		if (result < 0)
1626 			dlm_close_sock(&con->sock);
1627 
1628 		break;
1629 	}
1630 
1631 	return result;
1632 }
1633 
1634 /* Send worker function */
1635 static void process_send_sockets(struct work_struct *work)
1636 {
1637 	struct connection *con = container_of(work, struct connection, swork);
1638 	int ret;
1639 
1640 	WARN_ON_ONCE(test_bit(CF_IS_OTHERCON, &con->flags));
1641 
1642 	down_read(&con->sock_lock);
1643 	if (!con->sock) {
1644 		up_read(&con->sock_lock);
1645 		down_write(&con->sock_lock);
1646 		if (!con->sock) {
1647 			ret = dlm_connect(con);
1648 			switch (ret) {
1649 			case 0:
1650 				break;
1651 			case -EINPROGRESS:
1652 				/* avoid spamming resched on connection
1653 				 * we might can switch to a state_change
1654 				 * event based mechanism if established
1655 				 */
1656 				msleep(100);
1657 				break;
1658 			default:
1659 				/* CF_SEND_PENDING not cleared */
1660 				up_write(&con->sock_lock);
1661 				log_print("connect to node %d try %d error %d",
1662 					  con->nodeid, con->retries++, ret);
1663 				msleep(1000);
1664 				/* For now we try forever to reconnect. In
1665 				 * future we should send a event to cluster
1666 				 * manager to fence itself after certain amount
1667 				 * of retries.
1668 				 */
1669 				queue_work(io_workqueue, &con->swork);
1670 				return;
1671 			}
1672 		}
1673 		downgrade_write(&con->sock_lock);
1674 	}
1675 
1676 	do {
1677 		ret = send_to_sock(con);
1678 	} while (ret == DLM_IO_SUCCESS);
1679 	up_read(&con->sock_lock);
1680 
1681 	switch (ret) {
1682 	case DLM_IO_END:
1683 		/* CF_SEND_PENDING cleared */
1684 		break;
1685 	case DLM_IO_RESCHED:
1686 		/* CF_SEND_PENDING not cleared */
1687 		cond_resched();
1688 		queue_work(io_workqueue, &con->swork);
1689 		break;
1690 	default:
1691 		if (ret < 0) {
1692 			close_connection(con, false);
1693 
1694 			/* CF_SEND_PENDING cleared */
1695 			spin_lock_bh(&con->writequeue_lock);
1696 			lowcomms_queue_swork(con);
1697 			spin_unlock_bh(&con->writequeue_lock);
1698 			break;
1699 		}
1700 
1701 		WARN_ON_ONCE(1);
1702 		break;
1703 	}
1704 }
1705 
1706 static void work_stop(void)
1707 {
1708 	if (io_workqueue) {
1709 		destroy_workqueue(io_workqueue);
1710 		io_workqueue = NULL;
1711 	}
1712 
1713 	if (process_workqueue) {
1714 		destroy_workqueue(process_workqueue);
1715 		process_workqueue = NULL;
1716 	}
1717 }
1718 
1719 static int work_start(void)
1720 {
1721 	io_workqueue = alloc_workqueue("dlm_io", WQ_HIGHPRI | WQ_MEM_RECLAIM |
1722 				       WQ_UNBOUND, 0);
1723 	if (!io_workqueue) {
1724 		log_print("can't start dlm_io");
1725 		return -ENOMEM;
1726 	}
1727 
1728 	/* ordered dlm message process queue,
1729 	 * should be converted to a tasklet
1730 	 */
1731 	process_workqueue = alloc_ordered_workqueue("dlm_process",
1732 						    WQ_HIGHPRI | WQ_MEM_RECLAIM);
1733 	if (!process_workqueue) {
1734 		log_print("can't start dlm_process");
1735 		destroy_workqueue(io_workqueue);
1736 		io_workqueue = NULL;
1737 		return -ENOMEM;
1738 	}
1739 
1740 	return 0;
1741 }
1742 
1743 void dlm_lowcomms_shutdown(void)
1744 {
1745 	struct connection *con;
1746 	int i, idx;
1747 
1748 	/* stop lowcomms_listen_data_ready calls */
1749 	lock_sock(listen_con.sock->sk);
1750 	listen_con.sock->sk->sk_data_ready = listen_sock.sk_data_ready;
1751 	release_sock(listen_con.sock->sk);
1752 
1753 	cancel_work_sync(&listen_con.rwork);
1754 	dlm_close_sock(&listen_con.sock);
1755 
1756 	idx = srcu_read_lock(&connections_srcu);
1757 	for (i = 0; i < CONN_HASH_SIZE; i++) {
1758 		hlist_for_each_entry_rcu(con, &connection_hash[i], list) {
1759 			shutdown_connection(con, true);
1760 			stop_connection_io(con);
1761 			flush_workqueue(process_workqueue);
1762 			close_connection(con, true);
1763 
1764 			clean_one_writequeue(con);
1765 			if (con->othercon)
1766 				clean_one_writequeue(con->othercon);
1767 			allow_connection_io(con);
1768 		}
1769 	}
1770 	srcu_read_unlock(&connections_srcu, idx);
1771 }
1772 
1773 void dlm_lowcomms_stop(void)
1774 {
1775 	work_stop();
1776 	dlm_proto_ops = NULL;
1777 }
1778 
1779 static int dlm_listen_for_all(void)
1780 {
1781 	struct socket *sock;
1782 	int result;
1783 
1784 	log_print("Using %s for communications",
1785 		  dlm_proto_ops->name);
1786 
1787 	result = dlm_proto_ops->listen_validate();
1788 	if (result < 0)
1789 		return result;
1790 
1791 	result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family,
1792 				  SOCK_STREAM, dlm_proto_ops->proto, &sock);
1793 	if (result < 0) {
1794 		log_print("Can't create comms socket: %d", result);
1795 		return result;
1796 	}
1797 
1798 	sock_set_mark(sock->sk, dlm_config.ci_mark);
1799 	dlm_proto_ops->listen_sockopts(sock);
1800 
1801 	result = dlm_proto_ops->listen_bind(sock);
1802 	if (result < 0)
1803 		goto out;
1804 
1805 	lock_sock(sock->sk);
1806 	listen_sock.sk_data_ready = sock->sk->sk_data_ready;
1807 	listen_sock.sk_write_space = sock->sk->sk_write_space;
1808 	listen_sock.sk_error_report = sock->sk->sk_error_report;
1809 	listen_sock.sk_state_change = sock->sk->sk_state_change;
1810 
1811 	listen_con.sock = sock;
1812 
1813 	sock->sk->sk_allocation = GFP_NOFS;
1814 	sock->sk->sk_use_task_frag = false;
1815 	sock->sk->sk_data_ready = lowcomms_listen_data_ready;
1816 	release_sock(sock->sk);
1817 
1818 	result = sock->ops->listen(sock, 128);
1819 	if (result < 0) {
1820 		dlm_close_sock(&listen_con.sock);
1821 		return result;
1822 	}
1823 
1824 	return 0;
1825 
1826 out:
1827 	sock_release(sock);
1828 	return result;
1829 }
1830 
1831 static int dlm_tcp_bind(struct socket *sock)
1832 {
1833 	struct sockaddr_storage src_addr;
1834 	int result, addr_len;
1835 
1836 	/* Bind to our cluster-known address connecting to avoid
1837 	 * routing problems.
1838 	 */
1839 	memcpy(&src_addr, &dlm_local_addr[0], sizeof(src_addr));
1840 	make_sockaddr(&src_addr, 0, &addr_len);
1841 
1842 	result = sock->ops->bind(sock, (struct sockaddr *)&src_addr,
1843 				 addr_len);
1844 	if (result < 0) {
1845 		/* This *may* not indicate a critical error */
1846 		log_print("could not bind for connect: %d", result);
1847 	}
1848 
1849 	return 0;
1850 }
1851 
1852 static int dlm_tcp_connect(struct connection *con, struct socket *sock,
1853 			   struct sockaddr *addr, int addr_len)
1854 {
1855 	return sock->ops->connect(sock, addr, addr_len, O_NONBLOCK);
1856 }
1857 
1858 static int dlm_tcp_listen_validate(void)
1859 {
1860 	/* We don't support multi-homed hosts */
1861 	if (dlm_local_count > 1) {
1862 		log_print("TCP protocol can't handle multi-homed hosts, try SCTP");
1863 		return -EINVAL;
1864 	}
1865 
1866 	return 0;
1867 }
1868 
1869 static void dlm_tcp_sockopts(struct socket *sock)
1870 {
1871 	/* Turn off Nagle's algorithm */
1872 	tcp_sock_set_nodelay(sock->sk);
1873 }
1874 
1875 static void dlm_tcp_listen_sockopts(struct socket *sock)
1876 {
1877 	dlm_tcp_sockopts(sock);
1878 	sock_set_reuseaddr(sock->sk);
1879 }
1880 
1881 static int dlm_tcp_listen_bind(struct socket *sock)
1882 {
1883 	int addr_len;
1884 
1885 	/* Bind to our port */
1886 	make_sockaddr(&dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
1887 	return sock->ops->bind(sock, (struct sockaddr *)&dlm_local_addr[0],
1888 			       addr_len);
1889 }
1890 
1891 static const struct dlm_proto_ops dlm_tcp_ops = {
1892 	.name = "TCP",
1893 	.proto = IPPROTO_TCP,
1894 	.connect = dlm_tcp_connect,
1895 	.sockopts = dlm_tcp_sockopts,
1896 	.bind = dlm_tcp_bind,
1897 	.listen_validate = dlm_tcp_listen_validate,
1898 	.listen_sockopts = dlm_tcp_listen_sockopts,
1899 	.listen_bind = dlm_tcp_listen_bind,
1900 };
1901 
1902 static int dlm_sctp_bind(struct socket *sock)
1903 {
1904 	return sctp_bind_addrs(sock, 0);
1905 }
1906 
1907 static int dlm_sctp_connect(struct connection *con, struct socket *sock,
1908 			    struct sockaddr *addr, int addr_len)
1909 {
1910 	int ret;
1911 
1912 	/*
1913 	 * Make sock->ops->connect() function return in specified time,
1914 	 * since O_NONBLOCK argument in connect() function does not work here,
1915 	 * then, we should restore the default value of this attribute.
1916 	 */
1917 	sock_set_sndtimeo(sock->sk, 5);
1918 	ret = sock->ops->connect(sock, addr, addr_len, 0);
1919 	sock_set_sndtimeo(sock->sk, 0);
1920 	return ret;
1921 }
1922 
1923 static int dlm_sctp_listen_validate(void)
1924 {
1925 	if (!IS_ENABLED(CONFIG_IP_SCTP)) {
1926 		log_print("SCTP is not enabled by this kernel");
1927 		return -EOPNOTSUPP;
1928 	}
1929 
1930 	request_module("sctp");
1931 	return 0;
1932 }
1933 
1934 static int dlm_sctp_bind_listen(struct socket *sock)
1935 {
1936 	return sctp_bind_addrs(sock, dlm_config.ci_tcp_port);
1937 }
1938 
1939 static void dlm_sctp_sockopts(struct socket *sock)
1940 {
1941 	/* Turn off Nagle's algorithm */
1942 	sctp_sock_set_nodelay(sock->sk);
1943 	sock_set_rcvbuf(sock->sk, NEEDED_RMEM);
1944 }
1945 
1946 static const struct dlm_proto_ops dlm_sctp_ops = {
1947 	.name = "SCTP",
1948 	.proto = IPPROTO_SCTP,
1949 	.try_new_addr = true,
1950 	.connect = dlm_sctp_connect,
1951 	.sockopts = dlm_sctp_sockopts,
1952 	.bind = dlm_sctp_bind,
1953 	.listen_validate = dlm_sctp_listen_validate,
1954 	.listen_sockopts = dlm_sctp_sockopts,
1955 	.listen_bind = dlm_sctp_bind_listen,
1956 };
1957 
1958 int dlm_lowcomms_start(void)
1959 {
1960 	int error;
1961 
1962 	init_local();
1963 	if (!dlm_local_count) {
1964 		error = -ENOTCONN;
1965 		log_print("no local IP address has been set");
1966 		goto fail;
1967 	}
1968 
1969 	error = work_start();
1970 	if (error)
1971 		goto fail;
1972 
1973 	/* Start listening */
1974 	switch (dlm_config.ci_protocol) {
1975 	case DLM_PROTO_TCP:
1976 		dlm_proto_ops = &dlm_tcp_ops;
1977 		break;
1978 	case DLM_PROTO_SCTP:
1979 		dlm_proto_ops = &dlm_sctp_ops;
1980 		break;
1981 	default:
1982 		log_print("Invalid protocol identifier %d set",
1983 			  dlm_config.ci_protocol);
1984 		error = -EINVAL;
1985 		goto fail_proto_ops;
1986 	}
1987 
1988 	error = dlm_listen_for_all();
1989 	if (error)
1990 		goto fail_listen;
1991 
1992 	return 0;
1993 
1994 fail_listen:
1995 	dlm_proto_ops = NULL;
1996 fail_proto_ops:
1997 	work_stop();
1998 fail:
1999 	return error;
2000 }
2001 
2002 void dlm_lowcomms_init(void)
2003 {
2004 	int i;
2005 
2006 	for (i = 0; i < CONN_HASH_SIZE; i++)
2007 		INIT_HLIST_HEAD(&connection_hash[i]);
2008 
2009 	INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
2010 }
2011 
2012 void dlm_lowcomms_exit(void)
2013 {
2014 	struct connection *con;
2015 	int i, idx;
2016 
2017 	idx = srcu_read_lock(&connections_srcu);
2018 	for (i = 0; i < CONN_HASH_SIZE; i++) {
2019 		hlist_for_each_entry_rcu(con, &connection_hash[i], list) {
2020 			spin_lock(&connections_lock);
2021 			hlist_del_rcu(&con->list);
2022 			spin_unlock(&connections_lock);
2023 
2024 			if (con->othercon)
2025 				call_srcu(&connections_srcu, &con->othercon->rcu,
2026 					  connection_release);
2027 			call_srcu(&connections_srcu, &con->rcu, connection_release);
2028 		}
2029 	}
2030 	srcu_read_unlock(&connections_srcu, idx);
2031 }
2032