xref: /openbmc/linux/fs/dlm/lowcomms.c (revision d11ccd45)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
4 **
5 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
6 **  Copyright (C) 2004-2009 Red Hat, Inc.  All rights reserved.
7 **
8 **
9 *******************************************************************************
10 ******************************************************************************/
11 
12 /*
13  * lowcomms.c
14  *
15  * This is the "low-level" comms layer.
16  *
17  * It is responsible for sending/receiving messages
18  * from other nodes in the cluster.
19  *
20  * Cluster nodes are referred to by their nodeids. nodeids are
21  * simply 32 bit numbers to the locking module - if they need to
22  * be expanded for the cluster infrastructure then that is its
23  * responsibility. It is this layer's
24  * responsibility to resolve these into IP address or
25  * whatever it needs for inter-node communication.
26  *
27  * The comms level is two kernel threads that deal mainly with
28  * the receiving of messages from other nodes and passing them
29  * up to the mid-level comms layer (which understands the
30  * message format) for execution by the locking core, and
31  * a send thread which does all the setting up of connections
32  * to remote nodes and the sending of data. Threads are not allowed
33  * to send their own data because it may cause them to wait in times
34  * of high load. Also, this way, the sending thread can collect together
35  * messages bound for one node and send them in one block.
36  *
37  * lowcomms will choose to use either TCP or SCTP as its transport layer
38  * depending on the configuration variable 'protocol'. This should be set
39  * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
40  * cluster-wide mechanism as it must be the same on all nodes of the cluster
41  * for the DLM to function.
42  *
43  */
44 
45 #include <asm/ioctls.h>
46 #include <net/sock.h>
47 #include <net/tcp.h>
48 #include <linux/pagemap.h>
49 #include <linux/file.h>
50 #include <linux/mutex.h>
51 #include <linux/sctp.h>
52 #include <linux/slab.h>
53 #include <net/sctp/sctp.h>
54 #include <net/ipv6.h>
55 
56 #include "dlm_internal.h"
57 #include "lowcomms.h"
58 #include "midcomms.h"
59 #include "config.h"
60 
61 #define NEEDED_RMEM (4*1024*1024)
62 #define CONN_HASH_SIZE 32
63 
64 /* Number of messages to send before rescheduling */
65 #define MAX_SEND_MSG_COUNT 25
66 #define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(10000)
67 
68 struct connection {
69 	struct socket *sock;	/* NULL if not connected */
70 	uint32_t nodeid;	/* So we know who we are in the list */
71 	struct mutex sock_mutex;
72 	unsigned long flags;
73 #define CF_READ_PENDING 1
74 #define CF_WRITE_PENDING 2
75 #define CF_INIT_PENDING 4
76 #define CF_IS_OTHERCON 5
77 #define CF_CLOSE 6
78 #define CF_APP_LIMITED 7
79 #define CF_CLOSING 8
80 #define CF_SHUTDOWN 9
81 #define CF_CONNECTED 10
82 	struct list_head writequeue;  /* List of outgoing writequeue_entries */
83 	spinlock_t writequeue_lock;
84 	void (*connect_action) (struct connection *);	/* What to do to connect */
85 	void (*shutdown_action)(struct connection *con); /* What to do to shutdown */
86 	int retries;
87 #define MAX_CONNECT_RETRIES 3
88 	struct hlist_node list;
89 	struct connection *othercon;
90 	struct work_struct rwork; /* Receive workqueue */
91 	struct work_struct swork; /* Send workqueue */
92 	wait_queue_head_t shutdown_wait; /* wait for graceful shutdown */
93 	unsigned char *rx_buf;
94 	int rx_buflen;
95 	int rx_leftover;
96 	struct rcu_head rcu;
97 };
98 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
99 
100 struct listen_connection {
101 	struct socket *sock;
102 	struct work_struct rwork;
103 };
104 
105 /* An entry waiting to be sent */
106 struct writequeue_entry {
107 	struct list_head list;
108 	struct page *page;
109 	int offset;
110 	int len;
111 	int end;
112 	int users;
113 	struct connection *con;
114 };
115 
116 struct dlm_node_addr {
117 	struct list_head list;
118 	int nodeid;
119 	int addr_count;
120 	int curr_addr_index;
121 	struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
122 };
123 
124 static struct listen_sock_callbacks {
125 	void (*sk_error_report)(struct sock *);
126 	void (*sk_data_ready)(struct sock *);
127 	void (*sk_state_change)(struct sock *);
128 	void (*sk_write_space)(struct sock *);
129 } listen_sock;
130 
131 static LIST_HEAD(dlm_node_addrs);
132 static DEFINE_SPINLOCK(dlm_node_addrs_spin);
133 
134 static struct listen_connection listen_con;
135 static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
136 static int dlm_local_count;
137 static int dlm_allow_conn;
138 
139 /* Work queues */
140 static struct workqueue_struct *recv_workqueue;
141 static struct workqueue_struct *send_workqueue;
142 
143 static struct hlist_head connection_hash[CONN_HASH_SIZE];
144 static DEFINE_SPINLOCK(connections_lock);
145 DEFINE_STATIC_SRCU(connections_srcu);
146 
147 static void process_recv_sockets(struct work_struct *work);
148 static void process_send_sockets(struct work_struct *work);
149 
150 static void sctp_connect_to_sock(struct connection *con);
151 static void tcp_connect_to_sock(struct connection *con);
152 static void dlm_tcp_shutdown(struct connection *con);
153 
154 /* This is deliberately very simple because most clusters have simple
155    sequential nodeids, so we should be able to go straight to a connection
156    struct in the array */
157 static inline int nodeid_hash(int nodeid)
158 {
159 	return nodeid & (CONN_HASH_SIZE-1);
160 }
161 
162 static struct connection *__find_con(int nodeid)
163 {
164 	int r, idx;
165 	struct connection *con;
166 
167 	r = nodeid_hash(nodeid);
168 
169 	idx = srcu_read_lock(&connections_srcu);
170 	hlist_for_each_entry_rcu(con, &connection_hash[r], list) {
171 		if (con->nodeid == nodeid) {
172 			srcu_read_unlock(&connections_srcu, idx);
173 			return con;
174 		}
175 	}
176 	srcu_read_unlock(&connections_srcu, idx);
177 
178 	return NULL;
179 }
180 
181 static int dlm_con_init(struct connection *con, int nodeid)
182 {
183 	con->rx_buflen = dlm_config.ci_buffer_size;
184 	con->rx_buf = kmalloc(con->rx_buflen, GFP_NOFS);
185 	if (!con->rx_buf)
186 		return -ENOMEM;
187 
188 	con->nodeid = nodeid;
189 	mutex_init(&con->sock_mutex);
190 	INIT_LIST_HEAD(&con->writequeue);
191 	spin_lock_init(&con->writequeue_lock);
192 	INIT_WORK(&con->swork, process_send_sockets);
193 	INIT_WORK(&con->rwork, process_recv_sockets);
194 	init_waitqueue_head(&con->shutdown_wait);
195 
196 	if (dlm_config.ci_protocol == 0) {
197 		con->connect_action = tcp_connect_to_sock;
198 		con->shutdown_action = dlm_tcp_shutdown;
199 	} else {
200 		con->connect_action = sctp_connect_to_sock;
201 	}
202 
203 	return 0;
204 }
205 
206 /*
207  * If 'allocation' is zero then we don't attempt to create a new
208  * connection structure for this node.
209  */
210 static struct connection *nodeid2con(int nodeid, gfp_t alloc)
211 {
212 	struct connection *con, *tmp;
213 	int r, ret;
214 
215 	con = __find_con(nodeid);
216 	if (con || !alloc)
217 		return con;
218 
219 	con = kzalloc(sizeof(*con), alloc);
220 	if (!con)
221 		return NULL;
222 
223 	ret = dlm_con_init(con, nodeid);
224 	if (ret) {
225 		kfree(con);
226 		return NULL;
227 	}
228 
229 	r = nodeid_hash(nodeid);
230 
231 	spin_lock(&connections_lock);
232 	/* Because multiple workqueues/threads calls this function it can
233 	 * race on multiple cpu's. Instead of locking hot path __find_con()
234 	 * we just check in rare cases of recently added nodes again
235 	 * under protection of connections_lock. If this is the case we
236 	 * abort our connection creation and return the existing connection.
237 	 */
238 	tmp = __find_con(nodeid);
239 	if (tmp) {
240 		spin_unlock(&connections_lock);
241 		kfree(con->rx_buf);
242 		kfree(con);
243 		return tmp;
244 	}
245 
246 	hlist_add_head_rcu(&con->list, &connection_hash[r]);
247 	spin_unlock(&connections_lock);
248 
249 	return con;
250 }
251 
252 /* Loop round all connections */
253 static void foreach_conn(void (*conn_func)(struct connection *c))
254 {
255 	int i, idx;
256 	struct connection *con;
257 
258 	idx = srcu_read_lock(&connections_srcu);
259 	for (i = 0; i < CONN_HASH_SIZE; i++) {
260 		hlist_for_each_entry_rcu(con, &connection_hash[i], list)
261 			conn_func(con);
262 	}
263 	srcu_read_unlock(&connections_srcu, idx);
264 }
265 
266 static struct dlm_node_addr *find_node_addr(int nodeid)
267 {
268 	struct dlm_node_addr *na;
269 
270 	list_for_each_entry(na, &dlm_node_addrs, list) {
271 		if (na->nodeid == nodeid)
272 			return na;
273 	}
274 	return NULL;
275 }
276 
277 static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y)
278 {
279 	switch (x->ss_family) {
280 	case AF_INET: {
281 		struct sockaddr_in *sinx = (struct sockaddr_in *)x;
282 		struct sockaddr_in *siny = (struct sockaddr_in *)y;
283 		if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
284 			return 0;
285 		if (sinx->sin_port != siny->sin_port)
286 			return 0;
287 		break;
288 	}
289 	case AF_INET6: {
290 		struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
291 		struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
292 		if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
293 			return 0;
294 		if (sinx->sin6_port != siny->sin6_port)
295 			return 0;
296 		break;
297 	}
298 	default:
299 		return 0;
300 	}
301 	return 1;
302 }
303 
304 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
305 			  struct sockaddr *sa_out, bool try_new_addr)
306 {
307 	struct sockaddr_storage sas;
308 	struct dlm_node_addr *na;
309 
310 	if (!dlm_local_count)
311 		return -1;
312 
313 	spin_lock(&dlm_node_addrs_spin);
314 	na = find_node_addr(nodeid);
315 	if (na && na->addr_count) {
316 		memcpy(&sas, na->addr[na->curr_addr_index],
317 		       sizeof(struct sockaddr_storage));
318 
319 		if (try_new_addr) {
320 			na->curr_addr_index++;
321 			if (na->curr_addr_index == na->addr_count)
322 				na->curr_addr_index = 0;
323 		}
324 	}
325 	spin_unlock(&dlm_node_addrs_spin);
326 
327 	if (!na)
328 		return -EEXIST;
329 
330 	if (!na->addr_count)
331 		return -ENOENT;
332 
333 	if (sas_out)
334 		memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
335 
336 	if (!sa_out)
337 		return 0;
338 
339 	if (dlm_local_addr[0]->ss_family == AF_INET) {
340 		struct sockaddr_in *in4  = (struct sockaddr_in *) &sas;
341 		struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
342 		ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
343 	} else {
344 		struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &sas;
345 		struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
346 		ret6->sin6_addr = in6->sin6_addr;
347 	}
348 
349 	return 0;
350 }
351 
352 static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
353 {
354 	struct dlm_node_addr *na;
355 	int rv = -EEXIST;
356 	int addr_i;
357 
358 	spin_lock(&dlm_node_addrs_spin);
359 	list_for_each_entry(na, &dlm_node_addrs, list) {
360 		if (!na->addr_count)
361 			continue;
362 
363 		for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
364 			if (addr_compare(na->addr[addr_i], addr)) {
365 				*nodeid = na->nodeid;
366 				rv = 0;
367 				goto unlock;
368 			}
369 		}
370 	}
371 unlock:
372 	spin_unlock(&dlm_node_addrs_spin);
373 	return rv;
374 }
375 
376 int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
377 {
378 	struct sockaddr_storage *new_addr;
379 	struct dlm_node_addr *new_node, *na;
380 
381 	new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS);
382 	if (!new_node)
383 		return -ENOMEM;
384 
385 	new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS);
386 	if (!new_addr) {
387 		kfree(new_node);
388 		return -ENOMEM;
389 	}
390 
391 	memcpy(new_addr, addr, len);
392 
393 	spin_lock(&dlm_node_addrs_spin);
394 	na = find_node_addr(nodeid);
395 	if (!na) {
396 		new_node->nodeid = nodeid;
397 		new_node->addr[0] = new_addr;
398 		new_node->addr_count = 1;
399 		list_add(&new_node->list, &dlm_node_addrs);
400 		spin_unlock(&dlm_node_addrs_spin);
401 		return 0;
402 	}
403 
404 	if (na->addr_count >= DLM_MAX_ADDR_COUNT) {
405 		spin_unlock(&dlm_node_addrs_spin);
406 		kfree(new_addr);
407 		kfree(new_node);
408 		return -ENOSPC;
409 	}
410 
411 	na->addr[na->addr_count++] = new_addr;
412 	spin_unlock(&dlm_node_addrs_spin);
413 	kfree(new_node);
414 	return 0;
415 }
416 
417 /* Data available on socket or listen socket received a connect */
418 static void lowcomms_data_ready(struct sock *sk)
419 {
420 	struct connection *con;
421 
422 	read_lock_bh(&sk->sk_callback_lock);
423 	con = sock2con(sk);
424 	if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
425 		queue_work(recv_workqueue, &con->rwork);
426 	read_unlock_bh(&sk->sk_callback_lock);
427 }
428 
429 static void lowcomms_listen_data_ready(struct sock *sk)
430 {
431 	queue_work(recv_workqueue, &listen_con.rwork);
432 }
433 
434 static void lowcomms_write_space(struct sock *sk)
435 {
436 	struct connection *con;
437 
438 	read_lock_bh(&sk->sk_callback_lock);
439 	con = sock2con(sk);
440 	if (!con)
441 		goto out;
442 
443 	if (!test_and_set_bit(CF_CONNECTED, &con->flags)) {
444 		log_print("successful connected to node %d", con->nodeid);
445 		queue_work(send_workqueue, &con->swork);
446 		goto out;
447 	}
448 
449 	clear_bit(SOCK_NOSPACE, &con->sock->flags);
450 
451 	if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
452 		con->sock->sk->sk_write_pending--;
453 		clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
454 	}
455 
456 	queue_work(send_workqueue, &con->swork);
457 out:
458 	read_unlock_bh(&sk->sk_callback_lock);
459 }
460 
461 static inline void lowcomms_connect_sock(struct connection *con)
462 {
463 	if (test_bit(CF_CLOSE, &con->flags))
464 		return;
465 	queue_work(send_workqueue, &con->swork);
466 	cond_resched();
467 }
468 
469 static void lowcomms_state_change(struct sock *sk)
470 {
471 	/* SCTP layer is not calling sk_data_ready when the connection
472 	 * is done, so we catch the signal through here. Also, it
473 	 * doesn't switch socket state when entering shutdown, so we
474 	 * skip the write in that case.
475 	 */
476 	if (sk->sk_shutdown) {
477 		if (sk->sk_shutdown == RCV_SHUTDOWN)
478 			lowcomms_data_ready(sk);
479 	} else if (sk->sk_state == TCP_ESTABLISHED) {
480 		lowcomms_write_space(sk);
481 	}
482 }
483 
484 int dlm_lowcomms_connect_node(int nodeid)
485 {
486 	struct connection *con;
487 
488 	if (nodeid == dlm_our_nodeid())
489 		return 0;
490 
491 	con = nodeid2con(nodeid, GFP_NOFS);
492 	if (!con)
493 		return -ENOMEM;
494 	lowcomms_connect_sock(con);
495 	return 0;
496 }
497 
498 static void lowcomms_error_report(struct sock *sk)
499 {
500 	struct connection *con;
501 	struct sockaddr_storage saddr;
502 	void (*orig_report)(struct sock *) = NULL;
503 
504 	read_lock_bh(&sk->sk_callback_lock);
505 	con = sock2con(sk);
506 	if (con == NULL)
507 		goto out;
508 
509 	orig_report = listen_sock.sk_error_report;
510 	if (con->sock == NULL ||
511 	    kernel_getpeername(con->sock, (struct sockaddr *)&saddr) < 0) {
512 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
513 				   "sending to node %d, port %d, "
514 				   "sk_err=%d/%d\n", dlm_our_nodeid(),
515 				   con->nodeid, dlm_config.ci_tcp_port,
516 				   sk->sk_err, sk->sk_err_soft);
517 	} else if (saddr.ss_family == AF_INET) {
518 		struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr;
519 
520 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
521 				   "sending to node %d at %pI4, port %d, "
522 				   "sk_err=%d/%d\n", dlm_our_nodeid(),
523 				   con->nodeid, &sin4->sin_addr.s_addr,
524 				   dlm_config.ci_tcp_port, sk->sk_err,
525 				   sk->sk_err_soft);
526 	} else {
527 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr;
528 
529 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
530 				   "sending to node %d at %u.%u.%u.%u, "
531 				   "port %d, sk_err=%d/%d\n", dlm_our_nodeid(),
532 				   con->nodeid, sin6->sin6_addr.s6_addr32[0],
533 				   sin6->sin6_addr.s6_addr32[1],
534 				   sin6->sin6_addr.s6_addr32[2],
535 				   sin6->sin6_addr.s6_addr32[3],
536 				   dlm_config.ci_tcp_port, sk->sk_err,
537 				   sk->sk_err_soft);
538 	}
539 out:
540 	read_unlock_bh(&sk->sk_callback_lock);
541 	if (orig_report)
542 		orig_report(sk);
543 }
544 
545 /* Note: sk_callback_lock must be locked before calling this function. */
546 static void save_listen_callbacks(struct socket *sock)
547 {
548 	struct sock *sk = sock->sk;
549 
550 	listen_sock.sk_data_ready = sk->sk_data_ready;
551 	listen_sock.sk_state_change = sk->sk_state_change;
552 	listen_sock.sk_write_space = sk->sk_write_space;
553 	listen_sock.sk_error_report = sk->sk_error_report;
554 }
555 
556 static void restore_callbacks(struct socket *sock)
557 {
558 	struct sock *sk = sock->sk;
559 
560 	write_lock_bh(&sk->sk_callback_lock);
561 	sk->sk_user_data = NULL;
562 	sk->sk_data_ready = listen_sock.sk_data_ready;
563 	sk->sk_state_change = listen_sock.sk_state_change;
564 	sk->sk_write_space = listen_sock.sk_write_space;
565 	sk->sk_error_report = listen_sock.sk_error_report;
566 	write_unlock_bh(&sk->sk_callback_lock);
567 }
568 
569 static void add_listen_sock(struct socket *sock, struct listen_connection *con)
570 {
571 	struct sock *sk = sock->sk;
572 
573 	write_lock_bh(&sk->sk_callback_lock);
574 	save_listen_callbacks(sock);
575 	con->sock = sock;
576 
577 	sk->sk_user_data = con;
578 	sk->sk_allocation = GFP_NOFS;
579 	/* Install a data_ready callback */
580 	sk->sk_data_ready = lowcomms_listen_data_ready;
581 	write_unlock_bh(&sk->sk_callback_lock);
582 }
583 
584 /* Make a socket active */
585 static void add_sock(struct socket *sock, struct connection *con)
586 {
587 	struct sock *sk = sock->sk;
588 
589 	write_lock_bh(&sk->sk_callback_lock);
590 	con->sock = sock;
591 
592 	sk->sk_user_data = con;
593 	/* Install a data_ready callback */
594 	sk->sk_data_ready = lowcomms_data_ready;
595 	sk->sk_write_space = lowcomms_write_space;
596 	sk->sk_state_change = lowcomms_state_change;
597 	sk->sk_allocation = GFP_NOFS;
598 	sk->sk_error_report = lowcomms_error_report;
599 	write_unlock_bh(&sk->sk_callback_lock);
600 }
601 
602 /* Add the port number to an IPv6 or 4 sockaddr and return the address
603    length */
604 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
605 			  int *addr_len)
606 {
607 	saddr->ss_family =  dlm_local_addr[0]->ss_family;
608 	if (saddr->ss_family == AF_INET) {
609 		struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
610 		in4_addr->sin_port = cpu_to_be16(port);
611 		*addr_len = sizeof(struct sockaddr_in);
612 		memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
613 	} else {
614 		struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
615 		in6_addr->sin6_port = cpu_to_be16(port);
616 		*addr_len = sizeof(struct sockaddr_in6);
617 	}
618 	memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
619 }
620 
621 static void dlm_close_sock(struct socket **sock)
622 {
623 	if (*sock) {
624 		restore_callbacks(*sock);
625 		sock_release(*sock);
626 		*sock = NULL;
627 	}
628 }
629 
630 /* Close a remote connection and tidy up */
631 static void close_connection(struct connection *con, bool and_other,
632 			     bool tx, bool rx)
633 {
634 	bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
635 
636 	if (tx && !closing && cancel_work_sync(&con->swork)) {
637 		log_print("canceled swork for node %d", con->nodeid);
638 		clear_bit(CF_WRITE_PENDING, &con->flags);
639 	}
640 	if (rx && !closing && cancel_work_sync(&con->rwork)) {
641 		log_print("canceled rwork for node %d", con->nodeid);
642 		clear_bit(CF_READ_PENDING, &con->flags);
643 	}
644 
645 	mutex_lock(&con->sock_mutex);
646 	dlm_close_sock(&con->sock);
647 
648 	if (con->othercon && and_other) {
649 		/* Will only re-enter once. */
650 		close_connection(con->othercon, false, true, true);
651 	}
652 
653 	con->rx_leftover = 0;
654 	con->retries = 0;
655 	clear_bit(CF_CONNECTED, &con->flags);
656 	mutex_unlock(&con->sock_mutex);
657 	clear_bit(CF_CLOSING, &con->flags);
658 }
659 
660 static void shutdown_connection(struct connection *con)
661 {
662 	int ret;
663 
664 	if (cancel_work_sync(&con->swork)) {
665 		log_print("canceled swork for node %d", con->nodeid);
666 		clear_bit(CF_WRITE_PENDING, &con->flags);
667 	}
668 
669 	mutex_lock(&con->sock_mutex);
670 	/* nothing to shutdown */
671 	if (!con->sock) {
672 		mutex_unlock(&con->sock_mutex);
673 		return;
674 	}
675 
676 	set_bit(CF_SHUTDOWN, &con->flags);
677 	ret = kernel_sock_shutdown(con->sock, SHUT_WR);
678 	mutex_unlock(&con->sock_mutex);
679 	if (ret) {
680 		log_print("Connection %p failed to shutdown: %d will force close",
681 			  con, ret);
682 		goto force_close;
683 	} else {
684 		ret = wait_event_timeout(con->shutdown_wait,
685 					 !test_bit(CF_SHUTDOWN, &con->flags),
686 					 DLM_SHUTDOWN_WAIT_TIMEOUT);
687 		if (ret == 0) {
688 			log_print("Connection %p shutdown timed out, will force close",
689 				  con);
690 			goto force_close;
691 		}
692 	}
693 
694 	return;
695 
696 force_close:
697 	clear_bit(CF_SHUTDOWN, &con->flags);
698 	close_connection(con, false, true, true);
699 }
700 
701 static void dlm_tcp_shutdown(struct connection *con)
702 {
703 	if (con->othercon)
704 		shutdown_connection(con->othercon);
705 	shutdown_connection(con);
706 }
707 
708 static int con_realloc_receive_buf(struct connection *con, int newlen)
709 {
710 	unsigned char *newbuf;
711 
712 	newbuf = kmalloc(newlen, GFP_NOFS);
713 	if (!newbuf)
714 		return -ENOMEM;
715 
716 	/* copy any leftover from last receive */
717 	if (con->rx_leftover)
718 		memmove(newbuf, con->rx_buf, con->rx_leftover);
719 
720 	/* swap to new buffer space */
721 	kfree(con->rx_buf);
722 	con->rx_buflen = newlen;
723 	con->rx_buf = newbuf;
724 
725 	return 0;
726 }
727 
728 /* Data received from remote end */
729 static int receive_from_sock(struct connection *con)
730 {
731 	int call_again_soon = 0;
732 	struct msghdr msg;
733 	struct kvec iov;
734 	int ret, buflen;
735 
736 	mutex_lock(&con->sock_mutex);
737 
738 	if (con->sock == NULL) {
739 		ret = -EAGAIN;
740 		goto out_close;
741 	}
742 
743 	/* realloc if we get new buffer size to read out */
744 	buflen = dlm_config.ci_buffer_size;
745 	if (con->rx_buflen != buflen && con->rx_leftover <= buflen) {
746 		ret = con_realloc_receive_buf(con, buflen);
747 		if (ret < 0)
748 			goto out_resched;
749 	}
750 
751 	/* calculate new buffer parameter regarding last receive and
752 	 * possible leftover bytes
753 	 */
754 	iov.iov_base = con->rx_buf + con->rx_leftover;
755 	iov.iov_len = con->rx_buflen - con->rx_leftover;
756 
757 	memset(&msg, 0, sizeof(msg));
758 	msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
759 	ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
760 			     msg.msg_flags);
761 	if (ret <= 0)
762 		goto out_close;
763 	else if (ret == iov.iov_len)
764 		call_again_soon = 1;
765 
766 	/* new buflen according readed bytes and leftover from last receive */
767 	buflen = ret + con->rx_leftover;
768 	ret = dlm_process_incoming_buffer(con->nodeid, con->rx_buf, buflen);
769 	if (ret < 0)
770 		goto out_close;
771 
772 	/* calculate leftover bytes from process and put it into begin of
773 	 * the receive buffer, so next receive we have the full message
774 	 * at the start address of the receive buffer.
775 	 */
776 	con->rx_leftover = buflen - ret;
777 	if (con->rx_leftover) {
778 		memmove(con->rx_buf, con->rx_buf + ret,
779 			con->rx_leftover);
780 		call_again_soon = true;
781 	}
782 
783 	if (call_again_soon)
784 		goto out_resched;
785 
786 	mutex_unlock(&con->sock_mutex);
787 	return 0;
788 
789 out_resched:
790 	if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
791 		queue_work(recv_workqueue, &con->rwork);
792 	mutex_unlock(&con->sock_mutex);
793 	return -EAGAIN;
794 
795 out_close:
796 	mutex_unlock(&con->sock_mutex);
797 	if (ret != -EAGAIN) {
798 		/* Reconnect when there is something to send */
799 		close_connection(con, false, true, false);
800 		if (ret == 0) {
801 			log_print("connection %p got EOF from %d",
802 				  con, con->nodeid);
803 			/* handling for tcp shutdown */
804 			clear_bit(CF_SHUTDOWN, &con->flags);
805 			wake_up(&con->shutdown_wait);
806 			/* signal to breaking receive worker */
807 			ret = -1;
808 		}
809 	}
810 	return ret;
811 }
812 
813 /* Listening socket is busy, accept a connection */
814 static int accept_from_sock(struct listen_connection *con)
815 {
816 	int result;
817 	struct sockaddr_storage peeraddr;
818 	struct socket *newsock;
819 	int len;
820 	int nodeid;
821 	struct connection *newcon;
822 	struct connection *addcon;
823 	unsigned int mark;
824 
825 	if (!dlm_allow_conn) {
826 		return -1;
827 	}
828 
829 	if (!con->sock)
830 		return -ENOTCONN;
831 
832 	result = kernel_accept(con->sock, &newsock, O_NONBLOCK);
833 	if (result < 0)
834 		goto accept_err;
835 
836 	/* Get the connected socket's peer */
837 	memset(&peeraddr, 0, sizeof(peeraddr));
838 	len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2);
839 	if (len < 0) {
840 		result = -ECONNABORTED;
841 		goto accept_err;
842 	}
843 
844 	/* Get the new node's NODEID */
845 	make_sockaddr(&peeraddr, 0, &len);
846 	if (addr_to_nodeid(&peeraddr, &nodeid)) {
847 		unsigned char *b=(unsigned char *)&peeraddr;
848 		log_print("connect from non cluster node");
849 		print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
850 				     b, sizeof(struct sockaddr_storage));
851 		sock_release(newsock);
852 		return -1;
853 	}
854 
855 	dlm_comm_mark(nodeid, &mark);
856 	sock_set_mark(newsock->sk, mark);
857 
858 	log_print("got connection from %d", nodeid);
859 
860 	/*  Check to see if we already have a connection to this node. This
861 	 *  could happen if the two nodes initiate a connection at roughly
862 	 *  the same time and the connections cross on the wire.
863 	 *  In this case we store the incoming one in "othercon"
864 	 */
865 	newcon = nodeid2con(nodeid, GFP_NOFS);
866 	if (!newcon) {
867 		result = -ENOMEM;
868 		goto accept_err;
869 	}
870 
871 	mutex_lock(&newcon->sock_mutex);
872 	if (newcon->sock) {
873 		struct connection *othercon = newcon->othercon;
874 
875 		if (!othercon) {
876 			othercon = kzalloc(sizeof(*othercon), GFP_NOFS);
877 			if (!othercon) {
878 				log_print("failed to allocate incoming socket");
879 				mutex_unlock(&newcon->sock_mutex);
880 				result = -ENOMEM;
881 				goto accept_err;
882 			}
883 
884 			result = dlm_con_init(othercon, nodeid);
885 			if (result < 0) {
886 				kfree(othercon);
887 				goto accept_err;
888 			}
889 
890 			newcon->othercon = othercon;
891 		} else {
892 			/* close other sock con if we have something new */
893 			close_connection(othercon, false, true, false);
894 		}
895 
896 		mutex_lock_nested(&othercon->sock_mutex, 1);
897 		add_sock(newsock, othercon);
898 		addcon = othercon;
899 		mutex_unlock(&othercon->sock_mutex);
900 	}
901 	else {
902 		/* accept copies the sk after we've saved the callbacks, so we
903 		   don't want to save them a second time or comm errors will
904 		   result in calling sk_error_report recursively. */
905 		add_sock(newsock, newcon);
906 		addcon = newcon;
907 	}
908 
909 	mutex_unlock(&newcon->sock_mutex);
910 
911 	/*
912 	 * Add it to the active queue in case we got data
913 	 * between processing the accept adding the socket
914 	 * to the read_sockets list
915 	 */
916 	if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
917 		queue_work(recv_workqueue, &addcon->rwork);
918 
919 	return 0;
920 
921 accept_err:
922 	if (newsock)
923 		sock_release(newsock);
924 
925 	if (result != -EAGAIN)
926 		log_print("error accepting connection from node: %d", result);
927 	return result;
928 }
929 
930 static void free_entry(struct writequeue_entry *e)
931 {
932 	__free_page(e->page);
933 	kfree(e);
934 }
935 
936 /*
937  * writequeue_entry_complete - try to delete and free write queue entry
938  * @e: write queue entry to try to delete
939  * @completed: bytes completed
940  *
941  * writequeue_lock must be held.
942  */
943 static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
944 {
945 	e->offset += completed;
946 	e->len -= completed;
947 
948 	if (e->len == 0 && e->users == 0) {
949 		list_del(&e->list);
950 		free_entry(e);
951 	}
952 }
953 
954 /*
955  * sctp_bind_addrs - bind a SCTP socket to all our addresses
956  */
957 static int sctp_bind_addrs(struct socket *sock, uint16_t port)
958 {
959 	struct sockaddr_storage localaddr;
960 	struct sockaddr *addr = (struct sockaddr *)&localaddr;
961 	int i, addr_len, result = 0;
962 
963 	for (i = 0; i < dlm_local_count; i++) {
964 		memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
965 		make_sockaddr(&localaddr, port, &addr_len);
966 
967 		if (!i)
968 			result = kernel_bind(sock, addr, addr_len);
969 		else
970 			result = sock_bind_add(sock->sk, addr, addr_len);
971 
972 		if (result < 0) {
973 			log_print("Can't bind to %d addr number %d, %d.\n",
974 				  port, i + 1, result);
975 			break;
976 		}
977 	}
978 	return result;
979 }
980 
981 /* Initiate an SCTP association.
982    This is a special case of send_to_sock() in that we don't yet have a
983    peeled-off socket for this association, so we use the listening socket
984    and add the primary IP address of the remote node.
985  */
986 static void sctp_connect_to_sock(struct connection *con)
987 {
988 	struct sockaddr_storage daddr;
989 	int result;
990 	int addr_len;
991 	struct socket *sock;
992 	unsigned int mark;
993 
994 	dlm_comm_mark(con->nodeid, &mark);
995 
996 	mutex_lock(&con->sock_mutex);
997 
998 	/* Some odd races can cause double-connects, ignore them */
999 	if (con->retries++ > MAX_CONNECT_RETRIES)
1000 		goto out;
1001 
1002 	if (con->sock) {
1003 		log_print("node %d already connected.", con->nodeid);
1004 		goto out;
1005 	}
1006 
1007 	memset(&daddr, 0, sizeof(daddr));
1008 	result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
1009 	if (result < 0) {
1010 		log_print("no address for nodeid %d", con->nodeid);
1011 		goto out;
1012 	}
1013 
1014 	/* Create a socket to communicate with */
1015 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1016 				  SOCK_STREAM, IPPROTO_SCTP, &sock);
1017 	if (result < 0)
1018 		goto socket_err;
1019 
1020 	sock_set_mark(sock->sk, mark);
1021 
1022 	add_sock(sock, con);
1023 
1024 	/* Bind to all addresses. */
1025 	if (sctp_bind_addrs(con->sock, 0))
1026 		goto bind_err;
1027 
1028 	make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len);
1029 
1030 	log_print("connecting to %d", con->nodeid);
1031 
1032 	/* Turn off Nagle's algorithm */
1033 	sctp_sock_set_nodelay(sock->sk);
1034 
1035 	/*
1036 	 * Make sock->ops->connect() function return in specified time,
1037 	 * since O_NONBLOCK argument in connect() function does not work here,
1038 	 * then, we should restore the default value of this attribute.
1039 	 */
1040 	sock_set_sndtimeo(sock->sk, 5);
1041 	result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
1042 				   0);
1043 	sock_set_sndtimeo(sock->sk, 0);
1044 
1045 	if (result == -EINPROGRESS)
1046 		result = 0;
1047 	if (result == 0) {
1048 		if (!test_and_set_bit(CF_CONNECTED, &con->flags))
1049 			log_print("successful connected to node %d", con->nodeid);
1050 		goto out;
1051 	}
1052 
1053 bind_err:
1054 	con->sock = NULL;
1055 	sock_release(sock);
1056 
1057 socket_err:
1058 	/*
1059 	 * Some errors are fatal and this list might need adjusting. For other
1060 	 * errors we try again until the max number of retries is reached.
1061 	 */
1062 	if (result != -EHOSTUNREACH &&
1063 	    result != -ENETUNREACH &&
1064 	    result != -ENETDOWN &&
1065 	    result != -EINVAL &&
1066 	    result != -EPROTONOSUPPORT) {
1067 		log_print("connect %d try %d error %d", con->nodeid,
1068 			  con->retries, result);
1069 		mutex_unlock(&con->sock_mutex);
1070 		msleep(1000);
1071 		lowcomms_connect_sock(con);
1072 		return;
1073 	}
1074 
1075 out:
1076 	mutex_unlock(&con->sock_mutex);
1077 }
1078 
1079 /* Connect a new socket to its peer */
1080 static void tcp_connect_to_sock(struct connection *con)
1081 {
1082 	struct sockaddr_storage saddr, src_addr;
1083 	int addr_len;
1084 	struct socket *sock = NULL;
1085 	unsigned int mark;
1086 	int result;
1087 
1088 	dlm_comm_mark(con->nodeid, &mark);
1089 
1090 	mutex_lock(&con->sock_mutex);
1091 	if (con->retries++ > MAX_CONNECT_RETRIES)
1092 		goto out;
1093 
1094 	/* Some odd races can cause double-connects, ignore them */
1095 	if (con->sock)
1096 		goto out;
1097 
1098 	/* Create a socket to communicate with */
1099 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1100 				  SOCK_STREAM, IPPROTO_TCP, &sock);
1101 	if (result < 0)
1102 		goto out_err;
1103 
1104 	sock_set_mark(sock->sk, mark);
1105 
1106 	memset(&saddr, 0, sizeof(saddr));
1107 	result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
1108 	if (result < 0) {
1109 		log_print("no address for nodeid %d", con->nodeid);
1110 		goto out_err;
1111 	}
1112 
1113 	add_sock(sock, con);
1114 
1115 	/* Bind to our cluster-known address connecting to avoid
1116 	   routing problems */
1117 	memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
1118 	make_sockaddr(&src_addr, 0, &addr_len);
1119 	result = sock->ops->bind(sock, (struct sockaddr *) &src_addr,
1120 				 addr_len);
1121 	if (result < 0) {
1122 		log_print("could not bind for connect: %d", result);
1123 		/* This *may* not indicate a critical error */
1124 	}
1125 
1126 	make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
1127 
1128 	log_print("connecting to %d", con->nodeid);
1129 
1130 	/* Turn off Nagle's algorithm */
1131 	tcp_sock_set_nodelay(sock->sk);
1132 
1133 	result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
1134 				   O_NONBLOCK);
1135 	if (result == -EINPROGRESS)
1136 		result = 0;
1137 	if (result == 0)
1138 		goto out;
1139 
1140 out_err:
1141 	if (con->sock) {
1142 		sock_release(con->sock);
1143 		con->sock = NULL;
1144 	} else if (sock) {
1145 		sock_release(sock);
1146 	}
1147 	/*
1148 	 * Some errors are fatal and this list might need adjusting. For other
1149 	 * errors we try again until the max number of retries is reached.
1150 	 */
1151 	if (result != -EHOSTUNREACH &&
1152 	    result != -ENETUNREACH &&
1153 	    result != -ENETDOWN &&
1154 	    result != -EINVAL &&
1155 	    result != -EPROTONOSUPPORT) {
1156 		log_print("connect %d try %d error %d", con->nodeid,
1157 			  con->retries, result);
1158 		mutex_unlock(&con->sock_mutex);
1159 		msleep(1000);
1160 		lowcomms_connect_sock(con);
1161 		return;
1162 	}
1163 out:
1164 	mutex_unlock(&con->sock_mutex);
1165 	return;
1166 }
1167 
1168 /* On error caller must run dlm_close_sock() for the
1169  * listen connection socket.
1170  */
1171 static int tcp_create_listen_sock(struct listen_connection *con,
1172 				  struct sockaddr_storage *saddr)
1173 {
1174 	struct socket *sock = NULL;
1175 	int result = 0;
1176 	int addr_len;
1177 
1178 	if (dlm_local_addr[0]->ss_family == AF_INET)
1179 		addr_len = sizeof(struct sockaddr_in);
1180 	else
1181 		addr_len = sizeof(struct sockaddr_in6);
1182 
1183 	/* Create a socket to communicate with */
1184 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1185 				  SOCK_STREAM, IPPROTO_TCP, &sock);
1186 	if (result < 0) {
1187 		log_print("Can't create listening comms socket");
1188 		goto create_out;
1189 	}
1190 
1191 	sock_set_mark(sock->sk, dlm_config.ci_mark);
1192 
1193 	/* Turn off Nagle's algorithm */
1194 	tcp_sock_set_nodelay(sock->sk);
1195 
1196 	sock_set_reuseaddr(sock->sk);
1197 
1198 	add_listen_sock(sock, con);
1199 
1200 	/* Bind to our port */
1201 	make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
1202 	result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
1203 	if (result < 0) {
1204 		log_print("Can't bind to port %d", dlm_config.ci_tcp_port);
1205 		goto create_out;
1206 	}
1207 	sock_set_keepalive(sock->sk);
1208 
1209 	result = sock->ops->listen(sock, 5);
1210 	if (result < 0) {
1211 		log_print("Can't listen on port %d", dlm_config.ci_tcp_port);
1212 		goto create_out;
1213 	}
1214 
1215 	return 0;
1216 
1217 create_out:
1218 	return result;
1219 }
1220 
1221 /* Get local addresses */
1222 static void init_local(void)
1223 {
1224 	struct sockaddr_storage sas, *addr;
1225 	int i;
1226 
1227 	dlm_local_count = 0;
1228 	for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
1229 		if (dlm_our_addr(&sas, i))
1230 			break;
1231 
1232 		addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
1233 		if (!addr)
1234 			break;
1235 		dlm_local_addr[dlm_local_count++] = addr;
1236 	}
1237 }
1238 
1239 static void deinit_local(void)
1240 {
1241 	int i;
1242 
1243 	for (i = 0; i < dlm_local_count; i++)
1244 		kfree(dlm_local_addr[i]);
1245 }
1246 
1247 /* Initialise SCTP socket and bind to all interfaces
1248  * On error caller must run dlm_close_sock() for the
1249  * listen connection socket.
1250  */
1251 static int sctp_listen_for_all(struct listen_connection *con)
1252 {
1253 	struct socket *sock = NULL;
1254 	int result = -EINVAL;
1255 
1256 	log_print("Using SCTP for communications");
1257 
1258 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1259 				  SOCK_STREAM, IPPROTO_SCTP, &sock);
1260 	if (result < 0) {
1261 		log_print("Can't create comms socket, check SCTP is loaded");
1262 		goto out;
1263 	}
1264 
1265 	sock_set_rcvbuf(sock->sk, NEEDED_RMEM);
1266 	sock_set_mark(sock->sk, dlm_config.ci_mark);
1267 	sctp_sock_set_nodelay(sock->sk);
1268 
1269 	add_listen_sock(sock, con);
1270 
1271 	/* Bind to all addresses. */
1272 	result = sctp_bind_addrs(con->sock, dlm_config.ci_tcp_port);
1273 	if (result < 0)
1274 		goto out;
1275 
1276 	result = sock->ops->listen(sock, 5);
1277 	if (result < 0) {
1278 		log_print("Can't set socket listening");
1279 		goto out;
1280 	}
1281 
1282 	return 0;
1283 
1284 out:
1285 	return result;
1286 }
1287 
1288 static int tcp_listen_for_all(void)
1289 {
1290 	/* We don't support multi-homed hosts */
1291 	if (dlm_local_addr[1] != NULL) {
1292 		log_print("TCP protocol can't handle multi-homed hosts, "
1293 			  "try SCTP");
1294 		return -EINVAL;
1295 	}
1296 
1297 	log_print("Using TCP for communications");
1298 
1299 	return tcp_create_listen_sock(&listen_con, dlm_local_addr[0]);
1300 }
1301 
1302 
1303 
1304 static struct writequeue_entry *new_writequeue_entry(struct connection *con,
1305 						     gfp_t allocation)
1306 {
1307 	struct writequeue_entry *entry;
1308 
1309 	entry = kmalloc(sizeof(struct writequeue_entry), allocation);
1310 	if (!entry)
1311 		return NULL;
1312 
1313 	entry->page = alloc_page(allocation);
1314 	if (!entry->page) {
1315 		kfree(entry);
1316 		return NULL;
1317 	}
1318 
1319 	entry->offset = 0;
1320 	entry->len = 0;
1321 	entry->end = 0;
1322 	entry->users = 0;
1323 	entry->con = con;
1324 
1325 	return entry;
1326 }
1327 
1328 void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
1329 {
1330 	struct connection *con;
1331 	struct writequeue_entry *e;
1332 	int offset = 0;
1333 
1334 	if (len > LOWCOMMS_MAX_TX_BUFFER_LEN) {
1335 		BUILD_BUG_ON(PAGE_SIZE < LOWCOMMS_MAX_TX_BUFFER_LEN);
1336 		log_print("failed to allocate a buffer of size %d", len);
1337 		return NULL;
1338 	}
1339 
1340 	con = nodeid2con(nodeid, allocation);
1341 	if (!con)
1342 		return NULL;
1343 
1344 	spin_lock(&con->writequeue_lock);
1345 	e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
1346 	if ((&e->list == &con->writequeue) ||
1347 	    (PAGE_SIZE - e->end < len)) {
1348 		e = NULL;
1349 	} else {
1350 		offset = e->end;
1351 		e->end += len;
1352 		e->users++;
1353 	}
1354 	spin_unlock(&con->writequeue_lock);
1355 
1356 	if (e) {
1357 	got_one:
1358 		*ppc = page_address(e->page) + offset;
1359 		return e;
1360 	}
1361 
1362 	e = new_writequeue_entry(con, allocation);
1363 	if (e) {
1364 		spin_lock(&con->writequeue_lock);
1365 		offset = e->end;
1366 		e->end += len;
1367 		e->users++;
1368 		list_add_tail(&e->list, &con->writequeue);
1369 		spin_unlock(&con->writequeue_lock);
1370 		goto got_one;
1371 	}
1372 	return NULL;
1373 }
1374 
1375 void dlm_lowcomms_commit_buffer(void *mh)
1376 {
1377 	struct writequeue_entry *e = (struct writequeue_entry *)mh;
1378 	struct connection *con = e->con;
1379 	int users;
1380 
1381 	spin_lock(&con->writequeue_lock);
1382 	users = --e->users;
1383 	if (users)
1384 		goto out;
1385 	e->len = e->end - e->offset;
1386 	spin_unlock(&con->writequeue_lock);
1387 
1388 	queue_work(send_workqueue, &con->swork);
1389 	return;
1390 
1391 out:
1392 	spin_unlock(&con->writequeue_lock);
1393 	return;
1394 }
1395 
1396 /* Send a message */
1397 static void send_to_sock(struct connection *con)
1398 {
1399 	int ret = 0;
1400 	const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1401 	struct writequeue_entry *e;
1402 	int len, offset;
1403 	int count = 0;
1404 
1405 	mutex_lock(&con->sock_mutex);
1406 	if (con->sock == NULL)
1407 		goto out_connect;
1408 
1409 	spin_lock(&con->writequeue_lock);
1410 	for (;;) {
1411 		e = list_entry(con->writequeue.next, struct writequeue_entry,
1412 			       list);
1413 		if ((struct list_head *) e == &con->writequeue)
1414 			break;
1415 
1416 		len = e->len;
1417 		offset = e->offset;
1418 		BUG_ON(len == 0 && e->users == 0);
1419 		spin_unlock(&con->writequeue_lock);
1420 
1421 		ret = 0;
1422 		if (len) {
1423 			ret = kernel_sendpage(con->sock, e->page, offset, len,
1424 					      msg_flags);
1425 			if (ret == -EAGAIN || ret == 0) {
1426 				if (ret == -EAGAIN &&
1427 				    test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
1428 				    !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1429 					/* Notify TCP that we're limited by the
1430 					 * application window size.
1431 					 */
1432 					set_bit(SOCK_NOSPACE, &con->sock->flags);
1433 					con->sock->sk->sk_write_pending++;
1434 				}
1435 				cond_resched();
1436 				goto out;
1437 			} else if (ret < 0)
1438 				goto send_error;
1439 		}
1440 
1441 		/* Don't starve people filling buffers */
1442 		if (++count >= MAX_SEND_MSG_COUNT) {
1443 			cond_resched();
1444 			count = 0;
1445 		}
1446 
1447 		spin_lock(&con->writequeue_lock);
1448 		writequeue_entry_complete(e, ret);
1449 	}
1450 	spin_unlock(&con->writequeue_lock);
1451 out:
1452 	mutex_unlock(&con->sock_mutex);
1453 	return;
1454 
1455 send_error:
1456 	mutex_unlock(&con->sock_mutex);
1457 	close_connection(con, false, false, true);
1458 	/* Requeue the send work. When the work daemon runs again, it will try
1459 	   a new connection, then call this function again. */
1460 	queue_work(send_workqueue, &con->swork);
1461 	return;
1462 
1463 out_connect:
1464 	mutex_unlock(&con->sock_mutex);
1465 	queue_work(send_workqueue, &con->swork);
1466 	cond_resched();
1467 }
1468 
1469 static void clean_one_writequeue(struct connection *con)
1470 {
1471 	struct writequeue_entry *e, *safe;
1472 
1473 	spin_lock(&con->writequeue_lock);
1474 	list_for_each_entry_safe(e, safe, &con->writequeue, list) {
1475 		list_del(&e->list);
1476 		free_entry(e);
1477 	}
1478 	spin_unlock(&con->writequeue_lock);
1479 }
1480 
1481 /* Called from recovery when it knows that a node has
1482    left the cluster */
1483 int dlm_lowcomms_close(int nodeid)
1484 {
1485 	struct connection *con;
1486 	struct dlm_node_addr *na;
1487 
1488 	log_print("closing connection to node %d", nodeid);
1489 	con = nodeid2con(nodeid, 0);
1490 	if (con) {
1491 		set_bit(CF_CLOSE, &con->flags);
1492 		close_connection(con, true, true, true);
1493 		clean_one_writequeue(con);
1494 		if (con->othercon)
1495 			clean_one_writequeue(con->othercon);
1496 	}
1497 
1498 	spin_lock(&dlm_node_addrs_spin);
1499 	na = find_node_addr(nodeid);
1500 	if (na) {
1501 		list_del(&na->list);
1502 		while (na->addr_count--)
1503 			kfree(na->addr[na->addr_count]);
1504 		kfree(na);
1505 	}
1506 	spin_unlock(&dlm_node_addrs_spin);
1507 
1508 	return 0;
1509 }
1510 
1511 /* Receive workqueue function */
1512 static void process_recv_sockets(struct work_struct *work)
1513 {
1514 	struct connection *con = container_of(work, struct connection, rwork);
1515 	int err;
1516 
1517 	clear_bit(CF_READ_PENDING, &con->flags);
1518 	do {
1519 		err = receive_from_sock(con);
1520 	} while (!err);
1521 }
1522 
1523 static void process_listen_recv_socket(struct work_struct *work)
1524 {
1525 	accept_from_sock(&listen_con);
1526 }
1527 
1528 /* Send workqueue function */
1529 static void process_send_sockets(struct work_struct *work)
1530 {
1531 	struct connection *con = container_of(work, struct connection, swork);
1532 
1533 	clear_bit(CF_WRITE_PENDING, &con->flags);
1534 	if (con->sock == NULL) /* not mutex protected so check it inside too */
1535 		con->connect_action(con);
1536 	if (!list_empty(&con->writequeue))
1537 		send_to_sock(con);
1538 }
1539 
1540 static void work_stop(void)
1541 {
1542 	if (recv_workqueue)
1543 		destroy_workqueue(recv_workqueue);
1544 	if (send_workqueue)
1545 		destroy_workqueue(send_workqueue);
1546 }
1547 
1548 static int work_start(void)
1549 {
1550 	recv_workqueue = alloc_workqueue("dlm_recv",
1551 					 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1552 	if (!recv_workqueue) {
1553 		log_print("can't start dlm_recv");
1554 		return -ENOMEM;
1555 	}
1556 
1557 	send_workqueue = alloc_workqueue("dlm_send",
1558 					 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1559 	if (!send_workqueue) {
1560 		log_print("can't start dlm_send");
1561 		destroy_workqueue(recv_workqueue);
1562 		return -ENOMEM;
1563 	}
1564 
1565 	return 0;
1566 }
1567 
1568 static void _stop_conn(struct connection *con, bool and_other)
1569 {
1570 	mutex_lock(&con->sock_mutex);
1571 	set_bit(CF_CLOSE, &con->flags);
1572 	set_bit(CF_READ_PENDING, &con->flags);
1573 	set_bit(CF_WRITE_PENDING, &con->flags);
1574 	if (con->sock && con->sock->sk) {
1575 		write_lock_bh(&con->sock->sk->sk_callback_lock);
1576 		con->sock->sk->sk_user_data = NULL;
1577 		write_unlock_bh(&con->sock->sk->sk_callback_lock);
1578 	}
1579 	if (con->othercon && and_other)
1580 		_stop_conn(con->othercon, false);
1581 	mutex_unlock(&con->sock_mutex);
1582 }
1583 
1584 static void stop_conn(struct connection *con)
1585 {
1586 	_stop_conn(con, true);
1587 }
1588 
1589 static void shutdown_conn(struct connection *con)
1590 {
1591 	if (con->shutdown_action)
1592 		con->shutdown_action(con);
1593 }
1594 
1595 static void connection_release(struct rcu_head *rcu)
1596 {
1597 	struct connection *con = container_of(rcu, struct connection, rcu);
1598 
1599 	kfree(con->rx_buf);
1600 	kfree(con);
1601 }
1602 
1603 static void free_conn(struct connection *con)
1604 {
1605 	close_connection(con, true, true, true);
1606 	spin_lock(&connections_lock);
1607 	hlist_del_rcu(&con->list);
1608 	spin_unlock(&connections_lock);
1609 	if (con->othercon) {
1610 		clean_one_writequeue(con->othercon);
1611 		call_srcu(&connections_srcu, &con->othercon->rcu,
1612 			  connection_release);
1613 	}
1614 	clean_one_writequeue(con);
1615 	call_srcu(&connections_srcu, &con->rcu, connection_release);
1616 }
1617 
1618 static void work_flush(void)
1619 {
1620 	int ok, idx;
1621 	int i;
1622 	struct connection *con;
1623 
1624 	do {
1625 		ok = 1;
1626 		foreach_conn(stop_conn);
1627 		if (recv_workqueue)
1628 			flush_workqueue(recv_workqueue);
1629 		if (send_workqueue)
1630 			flush_workqueue(send_workqueue);
1631 		idx = srcu_read_lock(&connections_srcu);
1632 		for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
1633 			hlist_for_each_entry_rcu(con, &connection_hash[i],
1634 						 list) {
1635 				ok &= test_bit(CF_READ_PENDING, &con->flags);
1636 				ok &= test_bit(CF_WRITE_PENDING, &con->flags);
1637 				if (con->othercon) {
1638 					ok &= test_bit(CF_READ_PENDING,
1639 						       &con->othercon->flags);
1640 					ok &= test_bit(CF_WRITE_PENDING,
1641 						       &con->othercon->flags);
1642 				}
1643 			}
1644 		}
1645 		srcu_read_unlock(&connections_srcu, idx);
1646 	} while (!ok);
1647 }
1648 
1649 void dlm_lowcomms_stop(void)
1650 {
1651 	/* Set all the flags to prevent any
1652 	   socket activity.
1653 	*/
1654 	dlm_allow_conn = 0;
1655 
1656 	if (recv_workqueue)
1657 		flush_workqueue(recv_workqueue);
1658 	if (send_workqueue)
1659 		flush_workqueue(send_workqueue);
1660 
1661 	dlm_close_sock(&listen_con.sock);
1662 
1663 	foreach_conn(shutdown_conn);
1664 	work_flush();
1665 	foreach_conn(free_conn);
1666 	work_stop();
1667 	deinit_local();
1668 }
1669 
1670 int dlm_lowcomms_start(void)
1671 {
1672 	int error = -EINVAL;
1673 	int i;
1674 
1675 	for (i = 0; i < CONN_HASH_SIZE; i++)
1676 		INIT_HLIST_HEAD(&connection_hash[i]);
1677 
1678 	init_local();
1679 	if (!dlm_local_count) {
1680 		error = -ENOTCONN;
1681 		log_print("no local IP address has been set");
1682 		goto fail;
1683 	}
1684 
1685 	INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
1686 
1687 	error = work_start();
1688 	if (error)
1689 		goto fail;
1690 
1691 	dlm_allow_conn = 1;
1692 
1693 	/* Start listening */
1694 	if (dlm_config.ci_protocol == 0)
1695 		error = tcp_listen_for_all();
1696 	else
1697 		error = sctp_listen_for_all(&listen_con);
1698 	if (error)
1699 		goto fail_unlisten;
1700 
1701 	return 0;
1702 
1703 fail_unlisten:
1704 	dlm_allow_conn = 0;
1705 	dlm_close_sock(&listen_con.sock);
1706 fail:
1707 	return error;
1708 }
1709 
1710 void dlm_lowcomms_exit(void)
1711 {
1712 	struct dlm_node_addr *na, *safe;
1713 
1714 	spin_lock(&dlm_node_addrs_spin);
1715 	list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) {
1716 		list_del(&na->list);
1717 		while (na->addr_count--)
1718 			kfree(na->addr[na->addr_count]);
1719 		kfree(na);
1720 	}
1721 	spin_unlock(&dlm_node_addrs_spin);
1722 }
1723