xref: /openbmc/linux/fs/dlm/lowcomms.c (revision d10a0b88)
12522fe45SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
26ed7257bSPatrick Caulfield /******************************************************************************
36ed7257bSPatrick Caulfield *******************************************************************************
46ed7257bSPatrick Caulfield **
56ed7257bSPatrick Caulfield **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
65e9ccc37SChristine Caulfield **  Copyright (C) 2004-2009 Red Hat, Inc.  All rights reserved.
76ed7257bSPatrick Caulfield **
86ed7257bSPatrick Caulfield **
96ed7257bSPatrick Caulfield *******************************************************************************
106ed7257bSPatrick Caulfield ******************************************************************************/
116ed7257bSPatrick Caulfield 
126ed7257bSPatrick Caulfield /*
136ed7257bSPatrick Caulfield  * lowcomms.c
146ed7257bSPatrick Caulfield  *
156ed7257bSPatrick Caulfield  * This is the "low-level" comms layer.
166ed7257bSPatrick Caulfield  *
176ed7257bSPatrick Caulfield  * It is responsible for sending/receiving messages
186ed7257bSPatrick Caulfield  * from other nodes in the cluster.
196ed7257bSPatrick Caulfield  *
206ed7257bSPatrick Caulfield  * Cluster nodes are referred to by their nodeids. nodeids are
216ed7257bSPatrick Caulfield  * simply 32 bit numbers to the locking module - if they need to
222cf12c0bSJoe Perches  * be expanded for the cluster infrastructure then that is its
236ed7257bSPatrick Caulfield  * responsibility. It is this layer's
246ed7257bSPatrick Caulfield  * responsibility to resolve these into IP address or
256ed7257bSPatrick Caulfield  * whatever it needs for inter-node communication.
266ed7257bSPatrick Caulfield  *
276ed7257bSPatrick Caulfield  * The comms level is two kernel threads that deal mainly with
286ed7257bSPatrick Caulfield  * the receiving of messages from other nodes and passing them
296ed7257bSPatrick Caulfield  * up to the mid-level comms layer (which understands the
306ed7257bSPatrick Caulfield  * message format) for execution by the locking core, and
316ed7257bSPatrick Caulfield  * a send thread which does all the setting up of connections
326ed7257bSPatrick Caulfield  * to remote nodes and the sending of data. Threads are not allowed
336ed7257bSPatrick Caulfield  * to send their own data because it may cause them to wait in times
346ed7257bSPatrick Caulfield  * of high load. Also, this way, the sending thread can collect together
356ed7257bSPatrick Caulfield  * messages bound for one node and send them in one block.
366ed7257bSPatrick Caulfield  *
372cf12c0bSJoe Perches  * lowcomms will choose to use either TCP or SCTP as its transport layer
386ed7257bSPatrick Caulfield  * depending on the configuration variable 'protocol'. This should be set
396ed7257bSPatrick Caulfield  * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
406ed7257bSPatrick Caulfield  * cluster-wide mechanism as it must be the same on all nodes of the cluster
416ed7257bSPatrick Caulfield  * for the DLM to function.
426ed7257bSPatrick Caulfield  *
436ed7257bSPatrick Caulfield  */
446ed7257bSPatrick Caulfield 
456ed7257bSPatrick Caulfield #include <asm/ioctls.h>
466ed7257bSPatrick Caulfield #include <net/sock.h>
476ed7257bSPatrick Caulfield #include <net/tcp.h>
486ed7257bSPatrick Caulfield #include <linux/pagemap.h>
496ed7257bSPatrick Caulfield #include <linux/file.h>
507a936ce7SMatthias Kaehlcke #include <linux/mutex.h>
516ed7257bSPatrick Caulfield #include <linux/sctp.h>
525a0e3ad6STejun Heo #include <linux/slab.h>
532f2d76ccSBenjamin Poirier #include <net/sctp/sctp.h>
5444ad532bSJoe Perches #include <net/ipv6.h>
556ed7257bSPatrick Caulfield 
566ed7257bSPatrick Caulfield #include "dlm_internal.h"
576ed7257bSPatrick Caulfield #include "lowcomms.h"
586ed7257bSPatrick Caulfield #include "midcomms.h"
596ed7257bSPatrick Caulfield #include "config.h"
606ed7257bSPatrick Caulfield 
616ed7257bSPatrick Caulfield #define NEEDED_RMEM (4*1024*1024)
626ed7257bSPatrick Caulfield 
63f92c8dd7SBob Peterson /* Number of messages to send before rescheduling */
64f92c8dd7SBob Peterson #define MAX_SEND_MSG_COUNT 25
65055923bfSAlexander Aring #define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(10000)
66f92c8dd7SBob Peterson 
676ed7257bSPatrick Caulfield struct connection {
686ed7257bSPatrick Caulfield 	struct socket *sock;	/* NULL if not connected */
696ed7257bSPatrick Caulfield 	uint32_t nodeid;	/* So we know who we are in the list */
706ed7257bSPatrick Caulfield 	struct mutex sock_mutex;
716ed7257bSPatrick Caulfield 	unsigned long flags;
726ed7257bSPatrick Caulfield #define CF_READ_PENDING 1
738a4abb08Stsutomu.owa@toshiba.co.jp #define CF_WRITE_PENDING 2
746ed7257bSPatrick Caulfield #define CF_INIT_PENDING 4
756ed7257bSPatrick Caulfield #define CF_IS_OTHERCON 5
76063c4c99SLars Marowsky-Bree #define CF_CLOSE 6
77b36930ddSDavid Miller #define CF_APP_LIMITED 7
78b2a66629Stsutomu.owa@toshiba.co.jp #define CF_CLOSING 8
79055923bfSAlexander Aring #define CF_SHUTDOWN 9
8019633c7eSAlexander Aring #define CF_CONNECTED 10
81ba868d9dSAlexander Aring #define CF_RECONNECT 11
82ba868d9dSAlexander Aring #define CF_DELAY_CONNECT 12
838aa31cbfSAlexander Aring #define CF_EOF 13
846ed7257bSPatrick Caulfield 	struct list_head writequeue;  /* List of outgoing writequeue_entries */
856ed7257bSPatrick Caulfield 	spinlock_t writequeue_lock;
868aa31cbfSAlexander Aring 	atomic_t writequeue_cnt;
876ed7257bSPatrick Caulfield 	void (*connect_action) (struct connection *);	/* What to do to connect */
88055923bfSAlexander Aring 	void (*shutdown_action)(struct connection *con); /* What to do to shutdown */
898aa31cbfSAlexander Aring 	bool (*eof_condition)(struct connection *con); /* What to do to eof check */
906ed7257bSPatrick Caulfield 	int retries;
916ed7257bSPatrick Caulfield #define MAX_CONNECT_RETRIES 3
925e9ccc37SChristine Caulfield 	struct hlist_node list;
936ed7257bSPatrick Caulfield 	struct connection *othercon;
94ba868d9dSAlexander Aring 	struct connection *sendcon;
956ed7257bSPatrick Caulfield 	struct work_struct rwork; /* Receive workqueue */
966ed7257bSPatrick Caulfield 	struct work_struct swork; /* Send workqueue */
97055923bfSAlexander Aring 	wait_queue_head_t shutdown_wait; /* wait for graceful shutdown */
984798cbbfSAlexander Aring 	unsigned char *rx_buf;
994798cbbfSAlexander Aring 	int rx_buflen;
1004798cbbfSAlexander Aring 	int rx_leftover;
101a47666ebSAlexander Aring 	struct rcu_head rcu;
1026ed7257bSPatrick Caulfield };
1036ed7257bSPatrick Caulfield #define sock2con(x) ((struct connection *)(x)->sk_user_data)
1046ed7257bSPatrick Caulfield 
105d11ccd45SAlexander Aring struct listen_connection {
106d11ccd45SAlexander Aring 	struct socket *sock;
107d11ccd45SAlexander Aring 	struct work_struct rwork;
108d11ccd45SAlexander Aring };
109d11ccd45SAlexander Aring 
110f0747ebfSAlexander Aring #define DLM_WQ_REMAIN_BYTES(e) (PAGE_SIZE - e->end)
111f0747ebfSAlexander Aring #define DLM_WQ_LENGTH_BYTES(e) (e->end - e->offset)
112f0747ebfSAlexander Aring 
1136ed7257bSPatrick Caulfield /* An entry waiting to be sent */
1146ed7257bSPatrick Caulfield struct writequeue_entry {
1156ed7257bSPatrick Caulfield 	struct list_head list;
1166ed7257bSPatrick Caulfield 	struct page *page;
1176ed7257bSPatrick Caulfield 	int offset;
1186ed7257bSPatrick Caulfield 	int len;
1196ed7257bSPatrick Caulfield 	int end;
1206ed7257bSPatrick Caulfield 	int users;
121706474fbSAlexander Aring 	bool dirty;
1226ed7257bSPatrick Caulfield 	struct connection *con;
1238f2dc78dSAlexander Aring 	struct list_head msgs;
1248f2dc78dSAlexander Aring 	struct kref ref;
1258f2dc78dSAlexander Aring };
1268f2dc78dSAlexander Aring 
1278f2dc78dSAlexander Aring struct dlm_msg {
1288f2dc78dSAlexander Aring 	struct writequeue_entry *entry;
1292874d1a6SAlexander Aring 	struct dlm_msg *orig_msg;
1302874d1a6SAlexander Aring 	bool retransmit;
1318f2dc78dSAlexander Aring 	void *ppc;
1328f2dc78dSAlexander Aring 	int len;
1338f2dc78dSAlexander Aring 	int idx; /* new()/commit() idx exchange */
1348f2dc78dSAlexander Aring 
1358f2dc78dSAlexander Aring 	struct list_head list;
1368f2dc78dSAlexander Aring 	struct kref ref;
1376ed7257bSPatrick Caulfield };
1386ed7257bSPatrick Caulfield 
13936b71a8bSDavid Teigland struct dlm_node_addr {
14036b71a8bSDavid Teigland 	struct list_head list;
14136b71a8bSDavid Teigland 	int nodeid;
142e125fbebSAlexander Aring 	int mark;
14336b71a8bSDavid Teigland 	int addr_count;
14498e1b60eSMike Christie 	int curr_addr_index;
14536b71a8bSDavid Teigland 	struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
14636b71a8bSDavid Teigland };
14736b71a8bSDavid Teigland 
148cc661fc9SBob Peterson static struct listen_sock_callbacks {
149cc661fc9SBob Peterson 	void (*sk_error_report)(struct sock *);
150cc661fc9SBob Peterson 	void (*sk_data_ready)(struct sock *);
151cc661fc9SBob Peterson 	void (*sk_state_change)(struct sock *);
152cc661fc9SBob Peterson 	void (*sk_write_space)(struct sock *);
153cc661fc9SBob Peterson } listen_sock;
154cc661fc9SBob Peterson 
15536b71a8bSDavid Teigland static LIST_HEAD(dlm_node_addrs);
15636b71a8bSDavid Teigland static DEFINE_SPINLOCK(dlm_node_addrs_spin);
15736b71a8bSDavid Teigland 
158d11ccd45SAlexander Aring static struct listen_connection listen_con;
1596ed7257bSPatrick Caulfield static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
1606ed7257bSPatrick Caulfield static int dlm_local_count;
16151746163SAlexander Aring int dlm_allow_conn;
1626ed7257bSPatrick Caulfield 
1636ed7257bSPatrick Caulfield /* Work queues */
1646ed7257bSPatrick Caulfield static struct workqueue_struct *recv_workqueue;
1656ed7257bSPatrick Caulfield static struct workqueue_struct *send_workqueue;
1666ed7257bSPatrick Caulfield 
1675e9ccc37SChristine Caulfield static struct hlist_head connection_hash[CONN_HASH_SIZE];
168a47666ebSAlexander Aring static DEFINE_SPINLOCK(connections_lock);
169a47666ebSAlexander Aring DEFINE_STATIC_SRCU(connections_srcu);
1706ed7257bSPatrick Caulfield 
1716ed7257bSPatrick Caulfield static void process_recv_sockets(struct work_struct *work);
1726ed7257bSPatrick Caulfield static void process_send_sockets(struct work_struct *work);
1736ed7257bSPatrick Caulfield 
1740672c3c2SAlexander Aring static void sctp_connect_to_sock(struct connection *con);
1750672c3c2SAlexander Aring static void tcp_connect_to_sock(struct connection *con);
17642873c90SAlexander Aring static void dlm_tcp_shutdown(struct connection *con);
1775e9ccc37SChristine Caulfield 
178b38bc9c2SAlexander Aring static struct connection *__find_con(int nodeid, int r)
1795e9ccc37SChristine Caulfield {
1805e9ccc37SChristine Caulfield 	struct connection *con;
1815e9ccc37SChristine Caulfield 
182a47666ebSAlexander Aring 	hlist_for_each_entry_rcu(con, &connection_hash[r], list) {
183b38bc9c2SAlexander Aring 		if (con->nodeid == nodeid)
1845e9ccc37SChristine Caulfield 			return con;
1855e9ccc37SChristine Caulfield 	}
186a47666ebSAlexander Aring 
1875e9ccc37SChristine Caulfield 	return NULL;
1885e9ccc37SChristine Caulfield }
1895e9ccc37SChristine Caulfield 
1908aa31cbfSAlexander Aring static bool tcp_eof_condition(struct connection *con)
1918aa31cbfSAlexander Aring {
1928aa31cbfSAlexander Aring 	return atomic_read(&con->writequeue_cnt);
1938aa31cbfSAlexander Aring }
1948aa31cbfSAlexander Aring 
1956cde210aSAlexander Aring static int dlm_con_init(struct connection *con, int nodeid)
1966ed7257bSPatrick Caulfield {
1974798cbbfSAlexander Aring 	con->rx_buflen = dlm_config.ci_buffer_size;
1984798cbbfSAlexander Aring 	con->rx_buf = kmalloc(con->rx_buflen, GFP_NOFS);
1996cde210aSAlexander Aring 	if (!con->rx_buf)
2006cde210aSAlexander Aring 		return -ENOMEM;
2014798cbbfSAlexander Aring 
2026ed7257bSPatrick Caulfield 	con->nodeid = nodeid;
2036ed7257bSPatrick Caulfield 	mutex_init(&con->sock_mutex);
2046ed7257bSPatrick Caulfield 	INIT_LIST_HEAD(&con->writequeue);
2056ed7257bSPatrick Caulfield 	spin_lock_init(&con->writequeue_lock);
2068aa31cbfSAlexander Aring 	atomic_set(&con->writequeue_cnt, 0);
2076ed7257bSPatrick Caulfield 	INIT_WORK(&con->swork, process_send_sockets);
2086ed7257bSPatrick Caulfield 	INIT_WORK(&con->rwork, process_recv_sockets);
209055923bfSAlexander Aring 	init_waitqueue_head(&con->shutdown_wait);
2106ed7257bSPatrick Caulfield 
211ac7d5d03SAlexander Aring 	switch (dlm_config.ci_protocol) {
212ac7d5d03SAlexander Aring 	case DLM_PROTO_TCP:
2130672c3c2SAlexander Aring 		con->connect_action = tcp_connect_to_sock;
21442873c90SAlexander Aring 		con->shutdown_action = dlm_tcp_shutdown;
2158aa31cbfSAlexander Aring 		con->eof_condition = tcp_eof_condition;
216ac7d5d03SAlexander Aring 		break;
217ac7d5d03SAlexander Aring 	case DLM_PROTO_SCTP:
2180672c3c2SAlexander Aring 		con->connect_action = sctp_connect_to_sock;
219ac7d5d03SAlexander Aring 		break;
220ac7d5d03SAlexander Aring 	default:
221ac7d5d03SAlexander Aring 		kfree(con->rx_buf);
222ac7d5d03SAlexander Aring 		return -EINVAL;
22342873c90SAlexander Aring 	}
2246ed7257bSPatrick Caulfield 
2256cde210aSAlexander Aring 	return 0;
2266cde210aSAlexander Aring }
2276cde210aSAlexander Aring 
2286cde210aSAlexander Aring /*
2296cde210aSAlexander Aring  * If 'allocation' is zero then we don't attempt to create a new
2306cde210aSAlexander Aring  * connection structure for this node.
2316cde210aSAlexander Aring  */
2326cde210aSAlexander Aring static struct connection *nodeid2con(int nodeid, gfp_t alloc)
2336cde210aSAlexander Aring {
2346cde210aSAlexander Aring 	struct connection *con, *tmp;
2356cde210aSAlexander Aring 	int r, ret;
2366cde210aSAlexander Aring 
237b38bc9c2SAlexander Aring 	r = nodeid_hash(nodeid);
238b38bc9c2SAlexander Aring 	con = __find_con(nodeid, r);
2396cde210aSAlexander Aring 	if (con || !alloc)
2406cde210aSAlexander Aring 		return con;
2416cde210aSAlexander Aring 
2426cde210aSAlexander Aring 	con = kzalloc(sizeof(*con), alloc);
2436cde210aSAlexander Aring 	if (!con)
2446cde210aSAlexander Aring 		return NULL;
2456cde210aSAlexander Aring 
2466cde210aSAlexander Aring 	ret = dlm_con_init(con, nodeid);
2476cde210aSAlexander Aring 	if (ret) {
2486cde210aSAlexander Aring 		kfree(con);
2496cde210aSAlexander Aring 		return NULL;
2506cde210aSAlexander Aring 	}
2516cde210aSAlexander Aring 
252a47666ebSAlexander Aring 	spin_lock(&connections_lock);
2534f2b30fdSAlexander Aring 	/* Because multiple workqueues/threads calls this function it can
2544f2b30fdSAlexander Aring 	 * race on multiple cpu's. Instead of locking hot path __find_con()
2554f2b30fdSAlexander Aring 	 * we just check in rare cases of recently added nodes again
2564f2b30fdSAlexander Aring 	 * under protection of connections_lock. If this is the case we
2574f2b30fdSAlexander Aring 	 * abort our connection creation and return the existing connection.
2584f2b30fdSAlexander Aring 	 */
259b38bc9c2SAlexander Aring 	tmp = __find_con(nodeid, r);
2604f2b30fdSAlexander Aring 	if (tmp) {
2614f2b30fdSAlexander Aring 		spin_unlock(&connections_lock);
2624f2b30fdSAlexander Aring 		kfree(con->rx_buf);
2634f2b30fdSAlexander Aring 		kfree(con);
2644f2b30fdSAlexander Aring 		return tmp;
2654f2b30fdSAlexander Aring 	}
2664f2b30fdSAlexander Aring 
267a47666ebSAlexander Aring 	hlist_add_head_rcu(&con->list, &connection_hash[r]);
268a47666ebSAlexander Aring 	spin_unlock(&connections_lock);
269a47666ebSAlexander Aring 
2706ed7257bSPatrick Caulfield 	return con;
2716ed7257bSPatrick Caulfield }
2726ed7257bSPatrick Caulfield 
2735e9ccc37SChristine Caulfield /* Loop round all connections */
2745e9ccc37SChristine Caulfield static void foreach_conn(void (*conn_func)(struct connection *c))
2755e9ccc37SChristine Caulfield {
276b38bc9c2SAlexander Aring 	int i;
2775e9ccc37SChristine Caulfield 	struct connection *con;
2785e9ccc37SChristine Caulfield 
2795e9ccc37SChristine Caulfield 	for (i = 0; i < CONN_HASH_SIZE; i++) {
280a47666ebSAlexander Aring 		hlist_for_each_entry_rcu(con, &connection_hash[i], list)
2815e9ccc37SChristine Caulfield 			conn_func(con);
2825e9ccc37SChristine Caulfield 	}
2836ed7257bSPatrick Caulfield }
2846ed7257bSPatrick Caulfield 
28536b71a8bSDavid Teigland static struct dlm_node_addr *find_node_addr(int nodeid)
2866ed7257bSPatrick Caulfield {
28736b71a8bSDavid Teigland 	struct dlm_node_addr *na;
28836b71a8bSDavid Teigland 
28936b71a8bSDavid Teigland 	list_for_each_entry(na, &dlm_node_addrs, list) {
29036b71a8bSDavid Teigland 		if (na->nodeid == nodeid)
29136b71a8bSDavid Teigland 			return na;
29236b71a8bSDavid Teigland 	}
29336b71a8bSDavid Teigland 	return NULL;
29436b71a8bSDavid Teigland }
29536b71a8bSDavid Teigland 
29640c6b83eSAlexander Aring static int addr_compare(const struct sockaddr_storage *x,
29740c6b83eSAlexander Aring 			const struct sockaddr_storage *y)
29836b71a8bSDavid Teigland {
29936b71a8bSDavid Teigland 	switch (x->ss_family) {
30036b71a8bSDavid Teigland 	case AF_INET: {
30136b71a8bSDavid Teigland 		struct sockaddr_in *sinx = (struct sockaddr_in *)x;
30236b71a8bSDavid Teigland 		struct sockaddr_in *siny = (struct sockaddr_in *)y;
30336b71a8bSDavid Teigland 		if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
30436b71a8bSDavid Teigland 			return 0;
30536b71a8bSDavid Teigland 		if (sinx->sin_port != siny->sin_port)
30636b71a8bSDavid Teigland 			return 0;
30736b71a8bSDavid Teigland 		break;
30836b71a8bSDavid Teigland 	}
30936b71a8bSDavid Teigland 	case AF_INET6: {
31036b71a8bSDavid Teigland 		struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
31136b71a8bSDavid Teigland 		struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
31236b71a8bSDavid Teigland 		if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
31336b71a8bSDavid Teigland 			return 0;
31436b71a8bSDavid Teigland 		if (sinx->sin6_port != siny->sin6_port)
31536b71a8bSDavid Teigland 			return 0;
31636b71a8bSDavid Teigland 		break;
31736b71a8bSDavid Teigland 	}
31836b71a8bSDavid Teigland 	default:
31936b71a8bSDavid Teigland 		return 0;
32036b71a8bSDavid Teigland 	}
32136b71a8bSDavid Teigland 	return 1;
32236b71a8bSDavid Teigland }
32336b71a8bSDavid Teigland 
32436b71a8bSDavid Teigland static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
325e125fbebSAlexander Aring 			  struct sockaddr *sa_out, bool try_new_addr,
326e125fbebSAlexander Aring 			  unsigned int *mark)
32736b71a8bSDavid Teigland {
32836b71a8bSDavid Teigland 	struct sockaddr_storage sas;
32936b71a8bSDavid Teigland 	struct dlm_node_addr *na;
3306ed7257bSPatrick Caulfield 
3316ed7257bSPatrick Caulfield 	if (!dlm_local_count)
3326ed7257bSPatrick Caulfield 		return -1;
3336ed7257bSPatrick Caulfield 
33436b71a8bSDavid Teigland 	spin_lock(&dlm_node_addrs_spin);
33536b71a8bSDavid Teigland 	na = find_node_addr(nodeid);
33698e1b60eSMike Christie 	if (na && na->addr_count) {
337ee44b4bcSMarcelo Ricardo Leitner 		memcpy(&sas, na->addr[na->curr_addr_index],
338ee44b4bcSMarcelo Ricardo Leitner 		       sizeof(struct sockaddr_storage));
339ee44b4bcSMarcelo Ricardo Leitner 
34098e1b60eSMike Christie 		if (try_new_addr) {
34198e1b60eSMike Christie 			na->curr_addr_index++;
34298e1b60eSMike Christie 			if (na->curr_addr_index == na->addr_count)
34398e1b60eSMike Christie 				na->curr_addr_index = 0;
34498e1b60eSMike Christie 		}
34598e1b60eSMike Christie 	}
34636b71a8bSDavid Teigland 	spin_unlock(&dlm_node_addrs_spin);
34736b71a8bSDavid Teigland 
34836b71a8bSDavid Teigland 	if (!na)
34936b71a8bSDavid Teigland 		return -EEXIST;
35036b71a8bSDavid Teigland 
35136b71a8bSDavid Teigland 	if (!na->addr_count)
35236b71a8bSDavid Teigland 		return -ENOENT;
35336b71a8bSDavid Teigland 
354e125fbebSAlexander Aring 	*mark = na->mark;
355e125fbebSAlexander Aring 
35636b71a8bSDavid Teigland 	if (sas_out)
35736b71a8bSDavid Teigland 		memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
35836b71a8bSDavid Teigland 
35936b71a8bSDavid Teigland 	if (!sa_out)
36036b71a8bSDavid Teigland 		return 0;
3616ed7257bSPatrick Caulfield 
3626ed7257bSPatrick Caulfield 	if (dlm_local_addr[0]->ss_family == AF_INET) {
36336b71a8bSDavid Teigland 		struct sockaddr_in *in4  = (struct sockaddr_in *) &sas;
36436b71a8bSDavid Teigland 		struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
3656ed7257bSPatrick Caulfield 		ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
3666ed7257bSPatrick Caulfield 	} else {
36736b71a8bSDavid Teigland 		struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &sas;
36836b71a8bSDavid Teigland 		struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
3694e3fd7a0SAlexey Dobriyan 		ret6->sin6_addr = in6->sin6_addr;
3706ed7257bSPatrick Caulfield 	}
3716ed7257bSPatrick Caulfield 
3726ed7257bSPatrick Caulfield 	return 0;
3736ed7257bSPatrick Caulfield }
3746ed7257bSPatrick Caulfield 
375e125fbebSAlexander Aring static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid,
376e125fbebSAlexander Aring 			  unsigned int *mark)
37736b71a8bSDavid Teigland {
37836b71a8bSDavid Teigland 	struct dlm_node_addr *na;
37936b71a8bSDavid Teigland 	int rv = -EEXIST;
38098e1b60eSMike Christie 	int addr_i;
38136b71a8bSDavid Teigland 
38236b71a8bSDavid Teigland 	spin_lock(&dlm_node_addrs_spin);
38336b71a8bSDavid Teigland 	list_for_each_entry(na, &dlm_node_addrs, list) {
38436b71a8bSDavid Teigland 		if (!na->addr_count)
38536b71a8bSDavid Teigland 			continue;
38636b71a8bSDavid Teigland 
38798e1b60eSMike Christie 		for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
38898e1b60eSMike Christie 			if (addr_compare(na->addr[addr_i], addr)) {
38936b71a8bSDavid Teigland 				*nodeid = na->nodeid;
390e125fbebSAlexander Aring 				*mark = na->mark;
39136b71a8bSDavid Teigland 				rv = 0;
39298e1b60eSMike Christie 				goto unlock;
39336b71a8bSDavid Teigland 			}
39498e1b60eSMike Christie 		}
39598e1b60eSMike Christie 	}
39698e1b60eSMike Christie unlock:
39736b71a8bSDavid Teigland 	spin_unlock(&dlm_node_addrs_spin);
39836b71a8bSDavid Teigland 	return rv;
39936b71a8bSDavid Teigland }
40036b71a8bSDavid Teigland 
4014f19d071SAlexander Aring /* caller need to held dlm_node_addrs_spin lock */
4024f19d071SAlexander Aring static bool dlm_lowcomms_na_has_addr(const struct dlm_node_addr *na,
4034f19d071SAlexander Aring 				     const struct sockaddr_storage *addr)
4044f19d071SAlexander Aring {
4054f19d071SAlexander Aring 	int i;
4064f19d071SAlexander Aring 
4074f19d071SAlexander Aring 	for (i = 0; i < na->addr_count; i++) {
4084f19d071SAlexander Aring 		if (addr_compare(na->addr[i], addr))
4094f19d071SAlexander Aring 			return true;
4104f19d071SAlexander Aring 	}
4114f19d071SAlexander Aring 
4124f19d071SAlexander Aring 	return false;
4134f19d071SAlexander Aring }
4144f19d071SAlexander Aring 
41536b71a8bSDavid Teigland int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
41636b71a8bSDavid Teigland {
41736b71a8bSDavid Teigland 	struct sockaddr_storage *new_addr;
41836b71a8bSDavid Teigland 	struct dlm_node_addr *new_node, *na;
4194f19d071SAlexander Aring 	bool ret;
42036b71a8bSDavid Teigland 
42136b71a8bSDavid Teigland 	new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS);
42236b71a8bSDavid Teigland 	if (!new_node)
42336b71a8bSDavid Teigland 		return -ENOMEM;
42436b71a8bSDavid Teigland 
42536b71a8bSDavid Teigland 	new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS);
42636b71a8bSDavid Teigland 	if (!new_addr) {
42736b71a8bSDavid Teigland 		kfree(new_node);
42836b71a8bSDavid Teigland 		return -ENOMEM;
42936b71a8bSDavid Teigland 	}
43036b71a8bSDavid Teigland 
43136b71a8bSDavid Teigland 	memcpy(new_addr, addr, len);
43236b71a8bSDavid Teigland 
43336b71a8bSDavid Teigland 	spin_lock(&dlm_node_addrs_spin);
43436b71a8bSDavid Teigland 	na = find_node_addr(nodeid);
43536b71a8bSDavid Teigland 	if (!na) {
43636b71a8bSDavid Teigland 		new_node->nodeid = nodeid;
43736b71a8bSDavid Teigland 		new_node->addr[0] = new_addr;
43836b71a8bSDavid Teigland 		new_node->addr_count = 1;
439e125fbebSAlexander Aring 		new_node->mark = dlm_config.ci_mark;
44036b71a8bSDavid Teigland 		list_add(&new_node->list, &dlm_node_addrs);
44136b71a8bSDavid Teigland 		spin_unlock(&dlm_node_addrs_spin);
44236b71a8bSDavid Teigland 		return 0;
44336b71a8bSDavid Teigland 	}
44436b71a8bSDavid Teigland 
4454f19d071SAlexander Aring 	ret = dlm_lowcomms_na_has_addr(na, addr);
4464f19d071SAlexander Aring 	if (ret) {
4474f19d071SAlexander Aring 		spin_unlock(&dlm_node_addrs_spin);
4484f19d071SAlexander Aring 		kfree(new_addr);
4494f19d071SAlexander Aring 		kfree(new_node);
4504f19d071SAlexander Aring 		return -EEXIST;
4514f19d071SAlexander Aring 	}
4524f19d071SAlexander Aring 
45336b71a8bSDavid Teigland 	if (na->addr_count >= DLM_MAX_ADDR_COUNT) {
45436b71a8bSDavid Teigland 		spin_unlock(&dlm_node_addrs_spin);
45536b71a8bSDavid Teigland 		kfree(new_addr);
45636b71a8bSDavid Teigland 		kfree(new_node);
45736b71a8bSDavid Teigland 		return -ENOSPC;
45836b71a8bSDavid Teigland 	}
45936b71a8bSDavid Teigland 
46036b71a8bSDavid Teigland 	na->addr[na->addr_count++] = new_addr;
46136b71a8bSDavid Teigland 	spin_unlock(&dlm_node_addrs_spin);
46236b71a8bSDavid Teigland 	kfree(new_node);
46336b71a8bSDavid Teigland 	return 0;
46436b71a8bSDavid Teigland }
46536b71a8bSDavid Teigland 
4666ed7257bSPatrick Caulfield /* Data available on socket or listen socket received a connect */
467676d2369SDavid S. Miller static void lowcomms_data_ready(struct sock *sk)
4686ed7257bSPatrick Caulfield {
46993eaadebStsutomu.owa@toshiba.co.jp 	struct connection *con;
47093eaadebStsutomu.owa@toshiba.co.jp 
47193eaadebStsutomu.owa@toshiba.co.jp 	read_lock_bh(&sk->sk_callback_lock);
47293eaadebStsutomu.owa@toshiba.co.jp 	con = sock2con(sk);
473afb853fbSPatrick Caulfield 	if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
4746ed7257bSPatrick Caulfield 		queue_work(recv_workqueue, &con->rwork);
47593eaadebStsutomu.owa@toshiba.co.jp 	read_unlock_bh(&sk->sk_callback_lock);
4766ed7257bSPatrick Caulfield }
4776ed7257bSPatrick Caulfield 
478d11ccd45SAlexander Aring static void lowcomms_listen_data_ready(struct sock *sk)
479d11ccd45SAlexander Aring {
4809a4139a7SAlexander Aring 	if (!dlm_allow_conn)
4819a4139a7SAlexander Aring 		return;
4829a4139a7SAlexander Aring 
483d11ccd45SAlexander Aring 	queue_work(recv_workqueue, &listen_con.rwork);
484d11ccd45SAlexander Aring }
485d11ccd45SAlexander Aring 
4866ed7257bSPatrick Caulfield static void lowcomms_write_space(struct sock *sk)
4876ed7257bSPatrick Caulfield {
48893eaadebStsutomu.owa@toshiba.co.jp 	struct connection *con;
4896ed7257bSPatrick Caulfield 
49093eaadebStsutomu.owa@toshiba.co.jp 	read_lock_bh(&sk->sk_callback_lock);
49193eaadebStsutomu.owa@toshiba.co.jp 	con = sock2con(sk);
492b36930ddSDavid Miller 	if (!con)
49393eaadebStsutomu.owa@toshiba.co.jp 		goto out;
494b36930ddSDavid Miller 
49519633c7eSAlexander Aring 	if (!test_and_set_bit(CF_CONNECTED, &con->flags)) {
49619633c7eSAlexander Aring 		log_print("successful connected to node %d", con->nodeid);
49719633c7eSAlexander Aring 		queue_work(send_workqueue, &con->swork);
49819633c7eSAlexander Aring 		goto out;
49919633c7eSAlexander Aring 	}
50019633c7eSAlexander Aring 
501b36930ddSDavid Miller 	clear_bit(SOCK_NOSPACE, &con->sock->flags);
502b36930ddSDavid Miller 
503b36930ddSDavid Miller 	if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
504b36930ddSDavid Miller 		con->sock->sk->sk_write_pending--;
5059cd3e072SEric Dumazet 		clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
506b36930ddSDavid Miller 	}
507b36930ddSDavid Miller 
5086ed7257bSPatrick Caulfield 	queue_work(send_workqueue, &con->swork);
50993eaadebStsutomu.owa@toshiba.co.jp out:
51093eaadebStsutomu.owa@toshiba.co.jp 	read_unlock_bh(&sk->sk_callback_lock);
5116ed7257bSPatrick Caulfield }
5126ed7257bSPatrick Caulfield 
5136ed7257bSPatrick Caulfield static inline void lowcomms_connect_sock(struct connection *con)
5146ed7257bSPatrick Caulfield {
515063c4c99SLars Marowsky-Bree 	if (test_bit(CF_CLOSE, &con->flags))
516063c4c99SLars Marowsky-Bree 		return;
5176ed7257bSPatrick Caulfield 	queue_work(send_workqueue, &con->swork);
51861d9102bSBob Peterson 	cond_resched();
5196ed7257bSPatrick Caulfield }
5206ed7257bSPatrick Caulfield 
5216ed7257bSPatrick Caulfield static void lowcomms_state_change(struct sock *sk)
5226ed7257bSPatrick Caulfield {
523ee44b4bcSMarcelo Ricardo Leitner 	/* SCTP layer is not calling sk_data_ready when the connection
524ee44b4bcSMarcelo Ricardo Leitner 	 * is done, so we catch the signal through here. Also, it
525ee44b4bcSMarcelo Ricardo Leitner 	 * doesn't switch socket state when entering shutdown, so we
526ee44b4bcSMarcelo Ricardo Leitner 	 * skip the write in that case.
527ee44b4bcSMarcelo Ricardo Leitner 	 */
528ee44b4bcSMarcelo Ricardo Leitner 	if (sk->sk_shutdown) {
529ee44b4bcSMarcelo Ricardo Leitner 		if (sk->sk_shutdown == RCV_SHUTDOWN)
530ee44b4bcSMarcelo Ricardo Leitner 			lowcomms_data_ready(sk);
531ee44b4bcSMarcelo Ricardo Leitner 	} else if (sk->sk_state == TCP_ESTABLISHED) {
5326ed7257bSPatrick Caulfield 		lowcomms_write_space(sk);
5336ed7257bSPatrick Caulfield 	}
534ee44b4bcSMarcelo Ricardo Leitner }
5356ed7257bSPatrick Caulfield 
536391fbdc5SChristine Caulfield int dlm_lowcomms_connect_node(int nodeid)
537391fbdc5SChristine Caulfield {
538391fbdc5SChristine Caulfield 	struct connection *con;
539b38bc9c2SAlexander Aring 	int idx;
540391fbdc5SChristine Caulfield 
541391fbdc5SChristine Caulfield 	if (nodeid == dlm_our_nodeid())
542391fbdc5SChristine Caulfield 		return 0;
543391fbdc5SChristine Caulfield 
544b38bc9c2SAlexander Aring 	idx = srcu_read_lock(&connections_srcu);
545391fbdc5SChristine Caulfield 	con = nodeid2con(nodeid, GFP_NOFS);
546b38bc9c2SAlexander Aring 	if (!con) {
547b38bc9c2SAlexander Aring 		srcu_read_unlock(&connections_srcu, idx);
548391fbdc5SChristine Caulfield 		return -ENOMEM;
549b38bc9c2SAlexander Aring 	}
550b38bc9c2SAlexander Aring 
551391fbdc5SChristine Caulfield 	lowcomms_connect_sock(con);
552b38bc9c2SAlexander Aring 	srcu_read_unlock(&connections_srcu, idx);
553b38bc9c2SAlexander Aring 
554391fbdc5SChristine Caulfield 	return 0;
555391fbdc5SChristine Caulfield }
556391fbdc5SChristine Caulfield 
557e125fbebSAlexander Aring int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark)
558e125fbebSAlexander Aring {
559e125fbebSAlexander Aring 	struct dlm_node_addr *na;
560e125fbebSAlexander Aring 
561e125fbebSAlexander Aring 	spin_lock(&dlm_node_addrs_spin);
562e125fbebSAlexander Aring 	na = find_node_addr(nodeid);
563e125fbebSAlexander Aring 	if (!na) {
564e125fbebSAlexander Aring 		spin_unlock(&dlm_node_addrs_spin);
565e125fbebSAlexander Aring 		return -ENOENT;
566e125fbebSAlexander Aring 	}
567e125fbebSAlexander Aring 
568e125fbebSAlexander Aring 	na->mark = mark;
569e125fbebSAlexander Aring 	spin_unlock(&dlm_node_addrs_spin);
570e125fbebSAlexander Aring 
571e125fbebSAlexander Aring 	return 0;
572e125fbebSAlexander Aring }
573e125fbebSAlexander Aring 
574b3a5bbfdSBob Peterson static void lowcomms_error_report(struct sock *sk)
575b3a5bbfdSBob Peterson {
576b81171cbSBob Peterson 	struct connection *con;
577b3a5bbfdSBob Peterson 	struct sockaddr_storage saddr;
578b81171cbSBob Peterson 	void (*orig_report)(struct sock *) = NULL;
579b3a5bbfdSBob Peterson 
580b81171cbSBob Peterson 	read_lock_bh(&sk->sk_callback_lock);
581b81171cbSBob Peterson 	con = sock2con(sk);
582b81171cbSBob Peterson 	if (con == NULL)
583b81171cbSBob Peterson 		goto out;
584b81171cbSBob Peterson 
585cc661fc9SBob Peterson 	orig_report = listen_sock.sk_error_report;
5861a31833dSBob Peterson 	if (con->sock == NULL ||
5879b2c45d4SDenys Vlasenko 	    kernel_getpeername(con->sock, (struct sockaddr *)&saddr) < 0) {
588b3a5bbfdSBob Peterson 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
589b3a5bbfdSBob Peterson 				   "sending to node %d, port %d, "
590b3a5bbfdSBob Peterson 				   "sk_err=%d/%d\n", dlm_our_nodeid(),
591b3a5bbfdSBob Peterson 				   con->nodeid, dlm_config.ci_tcp_port,
592b3a5bbfdSBob Peterson 				   sk->sk_err, sk->sk_err_soft);
593b3a5bbfdSBob Peterson 	} else if (saddr.ss_family == AF_INET) {
594b3a5bbfdSBob Peterson 		struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr;
595b3a5bbfdSBob Peterson 
596b3a5bbfdSBob Peterson 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
597b3a5bbfdSBob Peterson 				   "sending to node %d at %pI4, port %d, "
598b3a5bbfdSBob Peterson 				   "sk_err=%d/%d\n", dlm_our_nodeid(),
599b3a5bbfdSBob Peterson 				   con->nodeid, &sin4->sin_addr.s_addr,
600b3a5bbfdSBob Peterson 				   dlm_config.ci_tcp_port, sk->sk_err,
601b3a5bbfdSBob Peterson 				   sk->sk_err_soft);
602b3a5bbfdSBob Peterson 	} else {
603b3a5bbfdSBob Peterson 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr;
604b3a5bbfdSBob Peterson 
605b3a5bbfdSBob Peterson 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
606b3a5bbfdSBob Peterson 				   "sending to node %d at %u.%u.%u.%u, "
607b3a5bbfdSBob Peterson 				   "port %d, sk_err=%d/%d\n", dlm_our_nodeid(),
608b3a5bbfdSBob Peterson 				   con->nodeid, sin6->sin6_addr.s6_addr32[0],
609b3a5bbfdSBob Peterson 				   sin6->sin6_addr.s6_addr32[1],
610b3a5bbfdSBob Peterson 				   sin6->sin6_addr.s6_addr32[2],
611b3a5bbfdSBob Peterson 				   sin6->sin6_addr.s6_addr32[3],
612b3a5bbfdSBob Peterson 				   dlm_config.ci_tcp_port, sk->sk_err,
613b3a5bbfdSBob Peterson 				   sk->sk_err_soft);
614b3a5bbfdSBob Peterson 	}
615ba868d9dSAlexander Aring 
616ba868d9dSAlexander Aring 	/* below sendcon only handling */
617ba868d9dSAlexander Aring 	if (test_bit(CF_IS_OTHERCON, &con->flags))
618ba868d9dSAlexander Aring 		con = con->sendcon;
619ba868d9dSAlexander Aring 
620ba868d9dSAlexander Aring 	switch (sk->sk_err) {
621ba868d9dSAlexander Aring 	case ECONNREFUSED:
622ba868d9dSAlexander Aring 		set_bit(CF_DELAY_CONNECT, &con->flags);
623ba868d9dSAlexander Aring 		break;
624ba868d9dSAlexander Aring 	default:
625ba868d9dSAlexander Aring 		break;
626ba868d9dSAlexander Aring 	}
627ba868d9dSAlexander Aring 
628ba868d9dSAlexander Aring 	if (!test_and_set_bit(CF_RECONNECT, &con->flags))
629ba868d9dSAlexander Aring 		queue_work(send_workqueue, &con->swork);
630ba868d9dSAlexander Aring 
631b81171cbSBob Peterson out:
632b81171cbSBob Peterson 	read_unlock_bh(&sk->sk_callback_lock);
633b81171cbSBob Peterson 	if (orig_report)
634b81171cbSBob Peterson 		orig_report(sk);
635b81171cbSBob Peterson }
636b81171cbSBob Peterson 
637b81171cbSBob Peterson /* Note: sk_callback_lock must be locked before calling this function. */
638cc661fc9SBob Peterson static void save_listen_callbacks(struct socket *sock)
639b81171cbSBob Peterson {
640cc661fc9SBob Peterson 	struct sock *sk = sock->sk;
641cc661fc9SBob Peterson 
642cc661fc9SBob Peterson 	listen_sock.sk_data_ready = sk->sk_data_ready;
643cc661fc9SBob Peterson 	listen_sock.sk_state_change = sk->sk_state_change;
644cc661fc9SBob Peterson 	listen_sock.sk_write_space = sk->sk_write_space;
645cc661fc9SBob Peterson 	listen_sock.sk_error_report = sk->sk_error_report;
646b81171cbSBob Peterson }
647b81171cbSBob Peterson 
648cc661fc9SBob Peterson static void restore_callbacks(struct socket *sock)
649b81171cbSBob Peterson {
650cc661fc9SBob Peterson 	struct sock *sk = sock->sk;
651cc661fc9SBob Peterson 
652b81171cbSBob Peterson 	write_lock_bh(&sk->sk_callback_lock);
653b81171cbSBob Peterson 	sk->sk_user_data = NULL;
654cc661fc9SBob Peterson 	sk->sk_data_ready = listen_sock.sk_data_ready;
655cc661fc9SBob Peterson 	sk->sk_state_change = listen_sock.sk_state_change;
656cc661fc9SBob Peterson 	sk->sk_write_space = listen_sock.sk_write_space;
657cc661fc9SBob Peterson 	sk->sk_error_report = listen_sock.sk_error_report;
658b81171cbSBob Peterson 	write_unlock_bh(&sk->sk_callback_lock);
659b3a5bbfdSBob Peterson }
660b3a5bbfdSBob Peterson 
661d11ccd45SAlexander Aring static void add_listen_sock(struct socket *sock, struct listen_connection *con)
662d11ccd45SAlexander Aring {
663d11ccd45SAlexander Aring 	struct sock *sk = sock->sk;
664d11ccd45SAlexander Aring 
665d11ccd45SAlexander Aring 	write_lock_bh(&sk->sk_callback_lock);
666d11ccd45SAlexander Aring 	save_listen_callbacks(sock);
667d11ccd45SAlexander Aring 	con->sock = sock;
668d11ccd45SAlexander Aring 
669d11ccd45SAlexander Aring 	sk->sk_user_data = con;
670d11ccd45SAlexander Aring 	sk->sk_allocation = GFP_NOFS;
671d11ccd45SAlexander Aring 	/* Install a data_ready callback */
672d11ccd45SAlexander Aring 	sk->sk_data_ready = lowcomms_listen_data_ready;
673d11ccd45SAlexander Aring 	write_unlock_bh(&sk->sk_callback_lock);
674d11ccd45SAlexander Aring }
675d11ccd45SAlexander Aring 
6766ed7257bSPatrick Caulfield /* Make a socket active */
677988419a9Stsutomu.owa@toshiba.co.jp static void add_sock(struct socket *sock, struct connection *con)
6786ed7257bSPatrick Caulfield {
679b81171cbSBob Peterson 	struct sock *sk = sock->sk;
680b81171cbSBob Peterson 
681b81171cbSBob Peterson 	write_lock_bh(&sk->sk_callback_lock);
6826ed7257bSPatrick Caulfield 	con->sock = sock;
6836ed7257bSPatrick Caulfield 
684b81171cbSBob Peterson 	sk->sk_user_data = con;
6856ed7257bSPatrick Caulfield 	/* Install a data_ready callback */
686b81171cbSBob Peterson 	sk->sk_data_ready = lowcomms_data_ready;
687b81171cbSBob Peterson 	sk->sk_write_space = lowcomms_write_space;
688b81171cbSBob Peterson 	sk->sk_state_change = lowcomms_state_change;
689b81171cbSBob Peterson 	sk->sk_allocation = GFP_NOFS;
690b81171cbSBob Peterson 	sk->sk_error_report = lowcomms_error_report;
691b81171cbSBob Peterson 	write_unlock_bh(&sk->sk_callback_lock);
6926ed7257bSPatrick Caulfield }
6936ed7257bSPatrick Caulfield 
6946ed7257bSPatrick Caulfield /* Add the port number to an IPv6 or 4 sockaddr and return the address
6956ed7257bSPatrick Caulfield    length */
6966ed7257bSPatrick Caulfield static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
6976ed7257bSPatrick Caulfield 			  int *addr_len)
6986ed7257bSPatrick Caulfield {
6996ed7257bSPatrick Caulfield 	saddr->ss_family =  dlm_local_addr[0]->ss_family;
7006ed7257bSPatrick Caulfield 	if (saddr->ss_family == AF_INET) {
7016ed7257bSPatrick Caulfield 		struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
7026ed7257bSPatrick Caulfield 		in4_addr->sin_port = cpu_to_be16(port);
7036ed7257bSPatrick Caulfield 		*addr_len = sizeof(struct sockaddr_in);
7046ed7257bSPatrick Caulfield 		memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
7056ed7257bSPatrick Caulfield 	} else {
7066ed7257bSPatrick Caulfield 		struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
7076ed7257bSPatrick Caulfield 		in6_addr->sin6_port = cpu_to_be16(port);
7086ed7257bSPatrick Caulfield 		*addr_len = sizeof(struct sockaddr_in6);
7096ed7257bSPatrick Caulfield 	}
71001c8cab2SPatrick Caulfield 	memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
7116ed7257bSPatrick Caulfield }
7126ed7257bSPatrick Caulfield 
713706474fbSAlexander Aring static void dlm_page_release(struct kref *kref)
714706474fbSAlexander Aring {
715706474fbSAlexander Aring 	struct writequeue_entry *e = container_of(kref, struct writequeue_entry,
716706474fbSAlexander Aring 						  ref);
717706474fbSAlexander Aring 
718706474fbSAlexander Aring 	__free_page(e->page);
719706474fbSAlexander Aring 	kfree(e);
720706474fbSAlexander Aring }
721706474fbSAlexander Aring 
722706474fbSAlexander Aring static void dlm_msg_release(struct kref *kref)
723706474fbSAlexander Aring {
724706474fbSAlexander Aring 	struct dlm_msg *msg = container_of(kref, struct dlm_msg, ref);
725706474fbSAlexander Aring 
726706474fbSAlexander Aring 	kref_put(&msg->entry->ref, dlm_page_release);
727706474fbSAlexander Aring 	kfree(msg);
728706474fbSAlexander Aring }
729706474fbSAlexander Aring 
730706474fbSAlexander Aring static void free_entry(struct writequeue_entry *e)
731706474fbSAlexander Aring {
732706474fbSAlexander Aring 	struct dlm_msg *msg, *tmp;
733706474fbSAlexander Aring 
734706474fbSAlexander Aring 	list_for_each_entry_safe(msg, tmp, &e->msgs, list) {
735706474fbSAlexander Aring 		if (msg->orig_msg) {
736706474fbSAlexander Aring 			msg->orig_msg->retransmit = false;
737706474fbSAlexander Aring 			kref_put(&msg->orig_msg->ref, dlm_msg_release);
738706474fbSAlexander Aring 		}
739706474fbSAlexander Aring 
740706474fbSAlexander Aring 		list_del(&msg->list);
741706474fbSAlexander Aring 		kref_put(&msg->ref, dlm_msg_release);
742706474fbSAlexander Aring 	}
743706474fbSAlexander Aring 
744706474fbSAlexander Aring 	list_del(&e->list);
745706474fbSAlexander Aring 	atomic_dec(&e->con->writequeue_cnt);
746706474fbSAlexander Aring 	kref_put(&e->ref, dlm_page_release);
747706474fbSAlexander Aring }
748706474fbSAlexander Aring 
749d11ccd45SAlexander Aring static void dlm_close_sock(struct socket **sock)
750d11ccd45SAlexander Aring {
751d11ccd45SAlexander Aring 	if (*sock) {
752d11ccd45SAlexander Aring 		restore_callbacks(*sock);
753d11ccd45SAlexander Aring 		sock_release(*sock);
754d11ccd45SAlexander Aring 		*sock = NULL;
755d11ccd45SAlexander Aring 	}
756d11ccd45SAlexander Aring }
757d11ccd45SAlexander Aring 
7586ed7257bSPatrick Caulfield /* Close a remote connection and tidy up */
7590d737a8cSMarcelo Ricardo Leitner static void close_connection(struct connection *con, bool and_other,
7600d737a8cSMarcelo Ricardo Leitner 			     bool tx, bool rx)
7616ed7257bSPatrick Caulfield {
762b2a66629Stsutomu.owa@toshiba.co.jp 	bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
763706474fbSAlexander Aring 	struct writequeue_entry *e;
764b2a66629Stsutomu.owa@toshiba.co.jp 
7650aa18464Stsutomu.owa@toshiba.co.jp 	if (tx && !closing && cancel_work_sync(&con->swork)) {
7660d737a8cSMarcelo Ricardo Leitner 		log_print("canceled swork for node %d", con->nodeid);
7670aa18464Stsutomu.owa@toshiba.co.jp 		clear_bit(CF_WRITE_PENDING, &con->flags);
7680aa18464Stsutomu.owa@toshiba.co.jp 	}
7690aa18464Stsutomu.owa@toshiba.co.jp 	if (rx && !closing && cancel_work_sync(&con->rwork)) {
7700d737a8cSMarcelo Ricardo Leitner 		log_print("canceled rwork for node %d", con->nodeid);
7710aa18464Stsutomu.owa@toshiba.co.jp 		clear_bit(CF_READ_PENDING, &con->flags);
7720aa18464Stsutomu.owa@toshiba.co.jp 	}
7736ed7257bSPatrick Caulfield 
7740d737a8cSMarcelo Ricardo Leitner 	mutex_lock(&con->sock_mutex);
775d11ccd45SAlexander Aring 	dlm_close_sock(&con->sock);
776d11ccd45SAlexander Aring 
7776ed7257bSPatrick Caulfield 	if (con->othercon && and_other) {
7786ed7257bSPatrick Caulfield 		/* Will only re-enter once. */
779c6aa00e3SAlexander Aring 		close_connection(con->othercon, false, tx, rx);
7806ed7257bSPatrick Caulfield 	}
7819e5f2825SPatrick Caulfield 
782706474fbSAlexander Aring 	/* if we send a writequeue entry only a half way, we drop the
783706474fbSAlexander Aring 	 * whole entry because reconnection and that we not start of the
784706474fbSAlexander Aring 	 * middle of a msg which will confuse the other end.
785706474fbSAlexander Aring 	 *
786706474fbSAlexander Aring 	 * we can always drop messages because retransmits, but what we
787706474fbSAlexander Aring 	 * cannot allow is to transmit half messages which may be processed
788706474fbSAlexander Aring 	 * at the other side.
789706474fbSAlexander Aring 	 *
790706474fbSAlexander Aring 	 * our policy is to start on a clean state when disconnects, we don't
791706474fbSAlexander Aring 	 * know what's send/received on transport layer in this case.
792706474fbSAlexander Aring 	 */
793706474fbSAlexander Aring 	spin_lock(&con->writequeue_lock);
794706474fbSAlexander Aring 	if (!list_empty(&con->writequeue)) {
795706474fbSAlexander Aring 		e = list_first_entry(&con->writequeue, struct writequeue_entry,
796706474fbSAlexander Aring 				     list);
797706474fbSAlexander Aring 		if (e->dirty)
798706474fbSAlexander Aring 			free_entry(e);
799706474fbSAlexander Aring 	}
800706474fbSAlexander Aring 	spin_unlock(&con->writequeue_lock);
801706474fbSAlexander Aring 
8024798cbbfSAlexander Aring 	con->rx_leftover = 0;
8036ed7257bSPatrick Caulfield 	con->retries = 0;
80419633c7eSAlexander Aring 	clear_bit(CF_CONNECTED, &con->flags);
805ba868d9dSAlexander Aring 	clear_bit(CF_DELAY_CONNECT, &con->flags);
806ba868d9dSAlexander Aring 	clear_bit(CF_RECONNECT, &con->flags);
8078aa31cbfSAlexander Aring 	clear_bit(CF_EOF, &con->flags);
8086ed7257bSPatrick Caulfield 	mutex_unlock(&con->sock_mutex);
809b2a66629Stsutomu.owa@toshiba.co.jp 	clear_bit(CF_CLOSING, &con->flags);
8106ed7257bSPatrick Caulfield }
8116ed7257bSPatrick Caulfield 
812055923bfSAlexander Aring static void shutdown_connection(struct connection *con)
813055923bfSAlexander Aring {
814055923bfSAlexander Aring 	int ret;
815055923bfSAlexander Aring 
816eec054b5SAlexander Aring 	flush_work(&con->swork);
817055923bfSAlexander Aring 
818055923bfSAlexander Aring 	mutex_lock(&con->sock_mutex);
819055923bfSAlexander Aring 	/* nothing to shutdown */
820055923bfSAlexander Aring 	if (!con->sock) {
821055923bfSAlexander Aring 		mutex_unlock(&con->sock_mutex);
822055923bfSAlexander Aring 		return;
823055923bfSAlexander Aring 	}
824055923bfSAlexander Aring 
825055923bfSAlexander Aring 	set_bit(CF_SHUTDOWN, &con->flags);
826055923bfSAlexander Aring 	ret = kernel_sock_shutdown(con->sock, SHUT_WR);
827055923bfSAlexander Aring 	mutex_unlock(&con->sock_mutex);
828055923bfSAlexander Aring 	if (ret) {
829055923bfSAlexander Aring 		log_print("Connection %p failed to shutdown: %d will force close",
830055923bfSAlexander Aring 			  con, ret);
831055923bfSAlexander Aring 		goto force_close;
832055923bfSAlexander Aring 	} else {
833055923bfSAlexander Aring 		ret = wait_event_timeout(con->shutdown_wait,
834055923bfSAlexander Aring 					 !test_bit(CF_SHUTDOWN, &con->flags),
835055923bfSAlexander Aring 					 DLM_SHUTDOWN_WAIT_TIMEOUT);
836055923bfSAlexander Aring 		if (ret == 0) {
837055923bfSAlexander Aring 			log_print("Connection %p shutdown timed out, will force close",
838055923bfSAlexander Aring 				  con);
839055923bfSAlexander Aring 			goto force_close;
840055923bfSAlexander Aring 		}
841055923bfSAlexander Aring 	}
842055923bfSAlexander Aring 
843055923bfSAlexander Aring 	return;
844055923bfSAlexander Aring 
845055923bfSAlexander Aring force_close:
846055923bfSAlexander Aring 	clear_bit(CF_SHUTDOWN, &con->flags);
847055923bfSAlexander Aring 	close_connection(con, false, true, true);
848055923bfSAlexander Aring }
849055923bfSAlexander Aring 
850055923bfSAlexander Aring static void dlm_tcp_shutdown(struct connection *con)
851055923bfSAlexander Aring {
852055923bfSAlexander Aring 	if (con->othercon)
853055923bfSAlexander Aring 		shutdown_connection(con->othercon);
854055923bfSAlexander Aring 	shutdown_connection(con);
855055923bfSAlexander Aring }
856055923bfSAlexander Aring 
8574798cbbfSAlexander Aring static int con_realloc_receive_buf(struct connection *con, int newlen)
8584798cbbfSAlexander Aring {
8594798cbbfSAlexander Aring 	unsigned char *newbuf;
8604798cbbfSAlexander Aring 
8614798cbbfSAlexander Aring 	newbuf = kmalloc(newlen, GFP_NOFS);
8624798cbbfSAlexander Aring 	if (!newbuf)
8634798cbbfSAlexander Aring 		return -ENOMEM;
8644798cbbfSAlexander Aring 
8654798cbbfSAlexander Aring 	/* copy any leftover from last receive */
8664798cbbfSAlexander Aring 	if (con->rx_leftover)
8674798cbbfSAlexander Aring 		memmove(newbuf, con->rx_buf, con->rx_leftover);
8684798cbbfSAlexander Aring 
8694798cbbfSAlexander Aring 	/* swap to new buffer space */
8704798cbbfSAlexander Aring 	kfree(con->rx_buf);
8714798cbbfSAlexander Aring 	con->rx_buflen = newlen;
8724798cbbfSAlexander Aring 	con->rx_buf = newbuf;
8734798cbbfSAlexander Aring 
8744798cbbfSAlexander Aring 	return 0;
8754798cbbfSAlexander Aring }
8764798cbbfSAlexander Aring 
8776ed7257bSPatrick Caulfield /* Data received from remote end */
8786ed7257bSPatrick Caulfield static int receive_from_sock(struct connection *con)
8796ed7257bSPatrick Caulfield {
8806ed7257bSPatrick Caulfield 	int call_again_soon = 0;
8814798cbbfSAlexander Aring 	struct msghdr msg;
8824798cbbfSAlexander Aring 	struct kvec iov;
8834798cbbfSAlexander Aring 	int ret, buflen;
8846ed7257bSPatrick Caulfield 
8856ed7257bSPatrick Caulfield 	mutex_lock(&con->sock_mutex);
8866ed7257bSPatrick Caulfield 
8876ed7257bSPatrick Caulfield 	if (con->sock == NULL) {
8886ed7257bSPatrick Caulfield 		ret = -EAGAIN;
8896ed7257bSPatrick Caulfield 		goto out_close;
8906ed7257bSPatrick Caulfield 	}
8914798cbbfSAlexander Aring 
8924798cbbfSAlexander Aring 	/* realloc if we get new buffer size to read out */
8934798cbbfSAlexander Aring 	buflen = dlm_config.ci_buffer_size;
8944798cbbfSAlexander Aring 	if (con->rx_buflen != buflen && con->rx_leftover <= buflen) {
8954798cbbfSAlexander Aring 		ret = con_realloc_receive_buf(con, buflen);
8964798cbbfSAlexander Aring 		if (ret < 0)
8976ed7257bSPatrick Caulfield 			goto out_resched;
8986ed7257bSPatrick Caulfield 	}
8996ed7257bSPatrick Caulfield 
9004798cbbfSAlexander Aring 	/* calculate new buffer parameter regarding last receive and
9014798cbbfSAlexander Aring 	 * possible leftover bytes
9026ed7257bSPatrick Caulfield 	 */
9034798cbbfSAlexander Aring 	iov.iov_base = con->rx_buf + con->rx_leftover;
9044798cbbfSAlexander Aring 	iov.iov_len = con->rx_buflen - con->rx_leftover;
9056ed7257bSPatrick Caulfield 
9064798cbbfSAlexander Aring 	memset(&msg, 0, sizeof(msg));
9074798cbbfSAlexander Aring 	msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
9084798cbbfSAlexander Aring 	ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
9094798cbbfSAlexander Aring 			     msg.msg_flags);
9106ed7257bSPatrick Caulfield 	if (ret <= 0)
9116ed7257bSPatrick Caulfield 		goto out_close;
9124798cbbfSAlexander Aring 	else if (ret == iov.iov_len)
913ee44b4bcSMarcelo Ricardo Leitner 		call_again_soon = 1;
9146ed7257bSPatrick Caulfield 
9154798cbbfSAlexander Aring 	/* new buflen according readed bytes and leftover from last receive */
9164798cbbfSAlexander Aring 	buflen = ret + con->rx_leftover;
9174798cbbfSAlexander Aring 	ret = dlm_process_incoming_buffer(con->nodeid, con->rx_buf, buflen);
9184798cbbfSAlexander Aring 	if (ret < 0)
9194798cbbfSAlexander Aring 		goto out_close;
9206ed7257bSPatrick Caulfield 
9214798cbbfSAlexander Aring 	/* calculate leftover bytes from process and put it into begin of
9224798cbbfSAlexander Aring 	 * the receive buffer, so next receive we have the full message
9234798cbbfSAlexander Aring 	 * at the start address of the receive buffer.
9244798cbbfSAlexander Aring 	 */
9254798cbbfSAlexander Aring 	con->rx_leftover = buflen - ret;
9264798cbbfSAlexander Aring 	if (con->rx_leftover) {
9274798cbbfSAlexander Aring 		memmove(con->rx_buf, con->rx_buf + ret,
9284798cbbfSAlexander Aring 			con->rx_leftover);
9294798cbbfSAlexander Aring 		call_again_soon = true;
9306ed7257bSPatrick Caulfield 	}
9316ed7257bSPatrick Caulfield 
9326ed7257bSPatrick Caulfield 	if (call_again_soon)
9336ed7257bSPatrick Caulfield 		goto out_resched;
9344798cbbfSAlexander Aring 
9356ed7257bSPatrick Caulfield 	mutex_unlock(&con->sock_mutex);
9366ed7257bSPatrick Caulfield 	return 0;
9376ed7257bSPatrick Caulfield 
9386ed7257bSPatrick Caulfield out_resched:
9396ed7257bSPatrick Caulfield 	if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
9406ed7257bSPatrick Caulfield 		queue_work(recv_workqueue, &con->rwork);
9416ed7257bSPatrick Caulfield 	mutex_unlock(&con->sock_mutex);
9426ed7257bSPatrick Caulfield 	return -EAGAIN;
9436ed7257bSPatrick Caulfield 
9446ed7257bSPatrick Caulfield out_close:
945055923bfSAlexander Aring 	if (ret == 0) {
946055923bfSAlexander Aring 		log_print("connection %p got EOF from %d",
947055923bfSAlexander Aring 			  con, con->nodeid);
9488aa31cbfSAlexander Aring 
9498aa31cbfSAlexander Aring 		if (con->eof_condition && con->eof_condition(con)) {
9508aa31cbfSAlexander Aring 			set_bit(CF_EOF, &con->flags);
9518aa31cbfSAlexander Aring 			mutex_unlock(&con->sock_mutex);
9528aa31cbfSAlexander Aring 		} else {
9538aa31cbfSAlexander Aring 			mutex_unlock(&con->sock_mutex);
9548aa31cbfSAlexander Aring 			close_connection(con, false, true, false);
9558aa31cbfSAlexander Aring 
956055923bfSAlexander Aring 			/* handling for tcp shutdown */
957055923bfSAlexander Aring 			clear_bit(CF_SHUTDOWN, &con->flags);
958055923bfSAlexander Aring 			wake_up(&con->shutdown_wait);
9598aa31cbfSAlexander Aring 		}
9608aa31cbfSAlexander Aring 
961055923bfSAlexander Aring 		/* signal to breaking receive worker */
962055923bfSAlexander Aring 		ret = -1;
9638aa31cbfSAlexander Aring 	} else {
9648aa31cbfSAlexander Aring 		mutex_unlock(&con->sock_mutex);
9656ed7257bSPatrick Caulfield 	}
9666ed7257bSPatrick Caulfield 	return ret;
9676ed7257bSPatrick Caulfield }
9686ed7257bSPatrick Caulfield 
9696ed7257bSPatrick Caulfield /* Listening socket is busy, accept a connection */
970d11ccd45SAlexander Aring static int accept_from_sock(struct listen_connection *con)
9716ed7257bSPatrick Caulfield {
9726ed7257bSPatrick Caulfield 	int result;
9736ed7257bSPatrick Caulfield 	struct sockaddr_storage peeraddr;
9746ed7257bSPatrick Caulfield 	struct socket *newsock;
975b38bc9c2SAlexander Aring 	int len, idx;
9766ed7257bSPatrick Caulfield 	int nodeid;
9776ed7257bSPatrick Caulfield 	struct connection *newcon;
9786ed7257bSPatrick Caulfield 	struct connection *addcon;
9793f78cd7dSAlexander Aring 	unsigned int mark;
9806ed7257bSPatrick Caulfield 
981d11ccd45SAlexander Aring 	if (!con->sock)
9823421fb15Stsutomu.owa@toshiba.co.jp 		return -ENOTCONN;
9836ed7257bSPatrick Caulfield 
9843421fb15Stsutomu.owa@toshiba.co.jp 	result = kernel_accept(con->sock, &newsock, O_NONBLOCK);
9856ed7257bSPatrick Caulfield 	if (result < 0)
9866ed7257bSPatrick Caulfield 		goto accept_err;
9876ed7257bSPatrick Caulfield 
9886ed7257bSPatrick Caulfield 	/* Get the connected socket's peer */
9896ed7257bSPatrick Caulfield 	memset(&peeraddr, 0, sizeof(peeraddr));
9909b2c45d4SDenys Vlasenko 	len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2);
9919b2c45d4SDenys Vlasenko 	if (len < 0) {
9926ed7257bSPatrick Caulfield 		result = -ECONNABORTED;
9936ed7257bSPatrick Caulfield 		goto accept_err;
9946ed7257bSPatrick Caulfield 	}
9956ed7257bSPatrick Caulfield 
9966ed7257bSPatrick Caulfield 	/* Get the new node's NODEID */
9976ed7257bSPatrick Caulfield 	make_sockaddr(&peeraddr, 0, &len);
998e125fbebSAlexander Aring 	if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) {
999bcaadf5cSMasatake YAMATO 		unsigned char *b=(unsigned char *)&peeraddr;
1000617e82e1SDavid Teigland 		log_print("connect from non cluster node");
1001bcaadf5cSMasatake YAMATO 		print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
1002bcaadf5cSMasatake YAMATO 				     b, sizeof(struct sockaddr_storage));
10036ed7257bSPatrick Caulfield 		sock_release(newsock);
10046ed7257bSPatrick Caulfield 		return -1;
10056ed7257bSPatrick Caulfield 	}
10066ed7257bSPatrick Caulfield 
10076ed7257bSPatrick Caulfield 	log_print("got connection from %d", nodeid);
10086ed7257bSPatrick Caulfield 
10096ed7257bSPatrick Caulfield 	/*  Check to see if we already have a connection to this node. This
10106ed7257bSPatrick Caulfield 	 *  could happen if the two nodes initiate a connection at roughly
10116ed7257bSPatrick Caulfield 	 *  the same time and the connections cross on the wire.
10126ed7257bSPatrick Caulfield 	 *  In this case we store the incoming one in "othercon"
10136ed7257bSPatrick Caulfield 	 */
1014b38bc9c2SAlexander Aring 	idx = srcu_read_lock(&connections_srcu);
1015748285ccSDavid Teigland 	newcon = nodeid2con(nodeid, GFP_NOFS);
10166ed7257bSPatrick Caulfield 	if (!newcon) {
1017b38bc9c2SAlexander Aring 		srcu_read_unlock(&connections_srcu, idx);
10186ed7257bSPatrick Caulfield 		result = -ENOMEM;
10196ed7257bSPatrick Caulfield 		goto accept_err;
10206ed7257bSPatrick Caulfield 	}
1021d11ccd45SAlexander Aring 
1022e125fbebSAlexander Aring 	sock_set_mark(newsock->sk, mark);
1023e125fbebSAlexander Aring 
1024d11ccd45SAlexander Aring 	mutex_lock(&newcon->sock_mutex);
10256ed7257bSPatrick Caulfield 	if (newcon->sock) {
10266ed7257bSPatrick Caulfield 		struct connection *othercon = newcon->othercon;
10276ed7257bSPatrick Caulfield 
10286ed7257bSPatrick Caulfield 		if (!othercon) {
1029a47666ebSAlexander Aring 			othercon = kzalloc(sizeof(*othercon), GFP_NOFS);
10306ed7257bSPatrick Caulfield 			if (!othercon) {
1031617e82e1SDavid Teigland 				log_print("failed to allocate incoming socket");
10326ed7257bSPatrick Caulfield 				mutex_unlock(&newcon->sock_mutex);
1033b38bc9c2SAlexander Aring 				srcu_read_unlock(&connections_srcu, idx);
10346ed7257bSPatrick Caulfield 				result = -ENOMEM;
10356ed7257bSPatrick Caulfield 				goto accept_err;
10366ed7257bSPatrick Caulfield 			}
10374798cbbfSAlexander Aring 
10386cde210aSAlexander Aring 			result = dlm_con_init(othercon, nodeid);
10396cde210aSAlexander Aring 			if (result < 0) {
10404798cbbfSAlexander Aring 				kfree(othercon);
10412fd8db2dSYang Yingliang 				mutex_unlock(&newcon->sock_mutex);
1042b38bc9c2SAlexander Aring 				srcu_read_unlock(&connections_srcu, idx);
10434798cbbfSAlexander Aring 				goto accept_err;
10444798cbbfSAlexander Aring 			}
10454798cbbfSAlexander Aring 
1046e9a470acSAlexander Aring 			lockdep_set_subclass(&othercon->sock_mutex, 1);
10477443bc96SAlexander Aring 			set_bit(CF_IS_OTHERCON, &othercon->flags);
10486cde210aSAlexander Aring 			newcon->othercon = othercon;
1049ba868d9dSAlexander Aring 			othercon->sendcon = newcon;
1050ba3ab3caSAlexander Aring 		} else {
1051ba3ab3caSAlexander Aring 			/* close other sock con if we have something new */
1052ba3ab3caSAlexander Aring 			close_connection(othercon, false, true, false);
105361d96be0SPatrick Caulfield 		}
1054ba3ab3caSAlexander Aring 
1055e9a470acSAlexander Aring 		mutex_lock(&othercon->sock_mutex);
1056988419a9Stsutomu.owa@toshiba.co.jp 		add_sock(newsock, othercon);
10576ed7257bSPatrick Caulfield 		addcon = othercon;
1058c7355827Stsutomu.owa@toshiba.co.jp 		mutex_unlock(&othercon->sock_mutex);
10596ed7257bSPatrick Caulfield 	}
10606ed7257bSPatrick Caulfield 	else {
10613735b4b9SBob Peterson 		/* accept copies the sk after we've saved the callbacks, so we
10623735b4b9SBob Peterson 		   don't want to save them a second time or comm errors will
10633735b4b9SBob Peterson 		   result in calling sk_error_report recursively. */
1064988419a9Stsutomu.owa@toshiba.co.jp 		add_sock(newsock, newcon);
10656ed7257bSPatrick Caulfield 		addcon = newcon;
10666ed7257bSPatrick Caulfield 	}
10676ed7257bSPatrick Caulfield 
1068b30a624fSAlexander Aring 	set_bit(CF_CONNECTED, &addcon->flags);
10696ed7257bSPatrick Caulfield 	mutex_unlock(&newcon->sock_mutex);
10706ed7257bSPatrick Caulfield 
10716ed7257bSPatrick Caulfield 	/*
10726ed7257bSPatrick Caulfield 	 * Add it to the active queue in case we got data
107325985edcSLucas De Marchi 	 * between processing the accept adding the socket
10746ed7257bSPatrick Caulfield 	 * to the read_sockets list
10756ed7257bSPatrick Caulfield 	 */
10766ed7257bSPatrick Caulfield 	if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
10776ed7257bSPatrick Caulfield 		queue_work(recv_workqueue, &addcon->rwork);
10786ed7257bSPatrick Caulfield 
1079b38bc9c2SAlexander Aring 	srcu_read_unlock(&connections_srcu, idx);
1080b38bc9c2SAlexander Aring 
10816ed7257bSPatrick Caulfield 	return 0;
10826ed7257bSPatrick Caulfield 
10836ed7257bSPatrick Caulfield accept_err:
10843421fb15Stsutomu.owa@toshiba.co.jp 	if (newsock)
10856ed7257bSPatrick Caulfield 		sock_release(newsock);
10866ed7257bSPatrick Caulfield 
10876ed7257bSPatrick Caulfield 	if (result != -EAGAIN)
1088617e82e1SDavid Teigland 		log_print("error accepting connection from node: %d", result);
10896ed7257bSPatrick Caulfield 	return result;
10906ed7257bSPatrick Caulfield }
10916ed7257bSPatrick Caulfield 
10925d689871SMike Christie /*
10935d689871SMike Christie  * writequeue_entry_complete - try to delete and free write queue entry
10945d689871SMike Christie  * @e: write queue entry to try to delete
10955d689871SMike Christie  * @completed: bytes completed
10965d689871SMike Christie  *
10975d689871SMike Christie  * writequeue_lock must be held.
10985d689871SMike Christie  */
10995d689871SMike Christie static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
11005d689871SMike Christie {
11015d689871SMike Christie 	e->offset += completed;
11025d689871SMike Christie 	e->len -= completed;
1103706474fbSAlexander Aring 	/* signal that page was half way transmitted */
1104706474fbSAlexander Aring 	e->dirty = true;
11055d689871SMike Christie 
11068f2dc78dSAlexander Aring 	if (e->len == 0 && e->users == 0)
11075d689871SMike Christie 		free_entry(e);
11085d689871SMike Christie }
11095d689871SMike Christie 
1110ee44b4bcSMarcelo Ricardo Leitner /*
1111ee44b4bcSMarcelo Ricardo Leitner  * sctp_bind_addrs - bind a SCTP socket to all our addresses
1112ee44b4bcSMarcelo Ricardo Leitner  */
111313004e8aSAlexander Aring static int sctp_bind_addrs(struct socket *sock, uint16_t port)
1114ee44b4bcSMarcelo Ricardo Leitner {
1115ee44b4bcSMarcelo Ricardo Leitner 	struct sockaddr_storage localaddr;
1116c0425a42SChristoph Hellwig 	struct sockaddr *addr = (struct sockaddr *)&localaddr;
1117ee44b4bcSMarcelo Ricardo Leitner 	int i, addr_len, result = 0;
1118ee44b4bcSMarcelo Ricardo Leitner 
1119ee44b4bcSMarcelo Ricardo Leitner 	for (i = 0; i < dlm_local_count; i++) {
1120ee44b4bcSMarcelo Ricardo Leitner 		memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
1121ee44b4bcSMarcelo Ricardo Leitner 		make_sockaddr(&localaddr, port, &addr_len);
1122ee44b4bcSMarcelo Ricardo Leitner 
1123ee44b4bcSMarcelo Ricardo Leitner 		if (!i)
112413004e8aSAlexander Aring 			result = kernel_bind(sock, addr, addr_len);
1125ee44b4bcSMarcelo Ricardo Leitner 		else
112613004e8aSAlexander Aring 			result = sock_bind_add(sock->sk, addr, addr_len);
1127ee44b4bcSMarcelo Ricardo Leitner 
1128ee44b4bcSMarcelo Ricardo Leitner 		if (result < 0) {
1129ee44b4bcSMarcelo Ricardo Leitner 			log_print("Can't bind to %d addr number %d, %d.\n",
1130ee44b4bcSMarcelo Ricardo Leitner 				  port, i + 1, result);
1131ee44b4bcSMarcelo Ricardo Leitner 			break;
1132ee44b4bcSMarcelo Ricardo Leitner 		}
1133ee44b4bcSMarcelo Ricardo Leitner 	}
1134ee44b4bcSMarcelo Ricardo Leitner 	return result;
1135ee44b4bcSMarcelo Ricardo Leitner }
1136ee44b4bcSMarcelo Ricardo Leitner 
11376ed7257bSPatrick Caulfield /* Initiate an SCTP association.
11386ed7257bSPatrick Caulfield    This is a special case of send_to_sock() in that we don't yet have a
11396ed7257bSPatrick Caulfield    peeled-off socket for this association, so we use the listening socket
11406ed7257bSPatrick Caulfield    and add the primary IP address of the remote node.
11416ed7257bSPatrick Caulfield  */
1142ee44b4bcSMarcelo Ricardo Leitner static void sctp_connect_to_sock(struct connection *con)
11436ed7257bSPatrick Caulfield {
1144ee44b4bcSMarcelo Ricardo Leitner 	struct sockaddr_storage daddr;
1145ee44b4bcSMarcelo Ricardo Leitner 	int result;
1146ee44b4bcSMarcelo Ricardo Leitner 	int addr_len;
1147ee44b4bcSMarcelo Ricardo Leitner 	struct socket *sock;
11489c9f168fSAlexander Aring 	unsigned int mark;
1149ee44b4bcSMarcelo Ricardo Leitner 
11505d689871SMike Christie 	mutex_lock(&con->sock_mutex);
11516ed7257bSPatrick Caulfield 
1152ee44b4bcSMarcelo Ricardo Leitner 	/* Some odd races can cause double-connects, ignore them */
1153ee44b4bcSMarcelo Ricardo Leitner 	if (con->retries++ > MAX_CONNECT_RETRIES)
1154ee44b4bcSMarcelo Ricardo Leitner 		goto out;
1155ee44b4bcSMarcelo Ricardo Leitner 
1156ee44b4bcSMarcelo Ricardo Leitner 	if (con->sock) {
1157ee44b4bcSMarcelo Ricardo Leitner 		log_print("node %d already connected.", con->nodeid);
1158ee44b4bcSMarcelo Ricardo Leitner 		goto out;
1159ee44b4bcSMarcelo Ricardo Leitner 	}
1160ee44b4bcSMarcelo Ricardo Leitner 
1161ee44b4bcSMarcelo Ricardo Leitner 	memset(&daddr, 0, sizeof(daddr));
1162e125fbebSAlexander Aring 	result = nodeid_to_addr(con->nodeid, &daddr, NULL, true, &mark);
1163ee44b4bcSMarcelo Ricardo Leitner 	if (result < 0) {
11646ed7257bSPatrick Caulfield 		log_print("no address for nodeid %d", con->nodeid);
1165ee44b4bcSMarcelo Ricardo Leitner 		goto out;
116604bedd79SDavid Teigland 	}
11676ed7257bSPatrick Caulfield 
1168ee44b4bcSMarcelo Ricardo Leitner 	/* Create a socket to communicate with */
1169ee44b4bcSMarcelo Ricardo Leitner 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1170ee44b4bcSMarcelo Ricardo Leitner 				  SOCK_STREAM, IPPROTO_SCTP, &sock);
1171ee44b4bcSMarcelo Ricardo Leitner 	if (result < 0)
1172ee44b4bcSMarcelo Ricardo Leitner 		goto socket_err;
11736ed7257bSPatrick Caulfield 
11749c9f168fSAlexander Aring 	sock_set_mark(sock->sk, mark);
11759c9f168fSAlexander Aring 
1176988419a9Stsutomu.owa@toshiba.co.jp 	add_sock(sock, con);
11776ed7257bSPatrick Caulfield 
1178ee44b4bcSMarcelo Ricardo Leitner 	/* Bind to all addresses. */
117913004e8aSAlexander Aring 	if (sctp_bind_addrs(con->sock, 0))
1180ee44b4bcSMarcelo Ricardo Leitner 		goto bind_err;
118198e1b60eSMike Christie 
1182ee44b4bcSMarcelo Ricardo Leitner 	make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len);
11836ed7257bSPatrick Caulfield 
11842df6b762SAlexander Aring 	log_print_ratelimited("connecting to %d", con->nodeid);
11856ed7257bSPatrick Caulfield 
1186ee44b4bcSMarcelo Ricardo Leitner 	/* Turn off Nagle's algorithm */
118740ef92c6SChristoph Hellwig 	sctp_sock_set_nodelay(sock->sk);
1188ee44b4bcSMarcelo Ricardo Leitner 
1189f706d830SGang He 	/*
1190f706d830SGang He 	 * Make sock->ops->connect() function return in specified time,
1191f706d830SGang He 	 * since O_NONBLOCK argument in connect() function does not work here,
1192f706d830SGang He 	 * then, we should restore the default value of this attribute.
1193f706d830SGang He 	 */
119476ee0785SChristoph Hellwig 	sock_set_sndtimeo(sock->sk, 5);
1195ee44b4bcSMarcelo Ricardo Leitner 	result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
1196da3627c3SGang He 				   0);
119776ee0785SChristoph Hellwig 	sock_set_sndtimeo(sock->sk, 0);
1198f706d830SGang He 
1199ee44b4bcSMarcelo Ricardo Leitner 	if (result == -EINPROGRESS)
1200ee44b4bcSMarcelo Ricardo Leitner 		result = 0;
120119633c7eSAlexander Aring 	if (result == 0) {
120219633c7eSAlexander Aring 		if (!test_and_set_bit(CF_CONNECTED, &con->flags))
120319633c7eSAlexander Aring 			log_print("successful connected to node %d", con->nodeid);
1204ee44b4bcSMarcelo Ricardo Leitner 		goto out;
120519633c7eSAlexander Aring 	}
1206ee44b4bcSMarcelo Ricardo Leitner 
1207ee44b4bcSMarcelo Ricardo Leitner bind_err:
1208ee44b4bcSMarcelo Ricardo Leitner 	con->sock = NULL;
1209ee44b4bcSMarcelo Ricardo Leitner 	sock_release(sock);
1210ee44b4bcSMarcelo Ricardo Leitner 
1211ee44b4bcSMarcelo Ricardo Leitner socket_err:
1212ee44b4bcSMarcelo Ricardo Leitner 	/*
1213ee44b4bcSMarcelo Ricardo Leitner 	 * Some errors are fatal and this list might need adjusting. For other
1214ee44b4bcSMarcelo Ricardo Leitner 	 * errors we try again until the max number of retries is reached.
1215ee44b4bcSMarcelo Ricardo Leitner 	 */
1216ee44b4bcSMarcelo Ricardo Leitner 	if (result != -EHOSTUNREACH &&
1217ee44b4bcSMarcelo Ricardo Leitner 	    result != -ENETUNREACH &&
1218ee44b4bcSMarcelo Ricardo Leitner 	    result != -ENETDOWN &&
1219ee44b4bcSMarcelo Ricardo Leitner 	    result != -EINVAL &&
1220ee44b4bcSMarcelo Ricardo Leitner 	    result != -EPROTONOSUPPORT) {
1221ee44b4bcSMarcelo Ricardo Leitner 		log_print("connect %d try %d error %d", con->nodeid,
1222ee44b4bcSMarcelo Ricardo Leitner 			  con->retries, result);
1223ee44b4bcSMarcelo Ricardo Leitner 		mutex_unlock(&con->sock_mutex);
1224ee44b4bcSMarcelo Ricardo Leitner 		msleep(1000);
1225ee44b4bcSMarcelo Ricardo Leitner 		lowcomms_connect_sock(con);
1226ee44b4bcSMarcelo Ricardo Leitner 		return;
12276ed7257bSPatrick Caulfield 	}
12285d689871SMike Christie 
1229ee44b4bcSMarcelo Ricardo Leitner out:
12305d689871SMike Christie 	mutex_unlock(&con->sock_mutex);
12316ed7257bSPatrick Caulfield }
12326ed7257bSPatrick Caulfield 
12336ed7257bSPatrick Caulfield /* Connect a new socket to its peer */
12346ed7257bSPatrick Caulfield static void tcp_connect_to_sock(struct connection *con)
12356ed7257bSPatrick Caulfield {
12366bd8fedaSLon Hohberger 	struct sockaddr_storage saddr, src_addr;
1237e125fbebSAlexander Aring 	unsigned int mark;
12386ed7257bSPatrick Caulfield 	int addr_len;
1239a89d63a1SCasey Dahlin 	struct socket *sock = NULL;
124036b71a8bSDavid Teigland 	int result;
12416ed7257bSPatrick Caulfield 
12426ed7257bSPatrick Caulfield 	mutex_lock(&con->sock_mutex);
12436ed7257bSPatrick Caulfield 	if (con->retries++ > MAX_CONNECT_RETRIES)
12446ed7257bSPatrick Caulfield 		goto out;
12456ed7257bSPatrick Caulfield 
12466ed7257bSPatrick Caulfield 	/* Some odd races can cause double-connects, ignore them */
124736b71a8bSDavid Teigland 	if (con->sock)
12486ed7257bSPatrick Caulfield 		goto out;
12496ed7257bSPatrick Caulfield 
12506ed7257bSPatrick Caulfield 	/* Create a socket to communicate with */
1251eeb1bd5cSEric W. Biederman 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1252eeb1bd5cSEric W. Biederman 				  SOCK_STREAM, IPPROTO_TCP, &sock);
12536ed7257bSPatrick Caulfield 	if (result < 0)
12546ed7257bSPatrick Caulfield 		goto out_err;
12556ed7257bSPatrick Caulfield 
12566ed7257bSPatrick Caulfield 	memset(&saddr, 0, sizeof(saddr));
1257e125fbebSAlexander Aring 	result = nodeid_to_addr(con->nodeid, &saddr, NULL, false, &mark);
125836b71a8bSDavid Teigland 	if (result < 0) {
125936b71a8bSDavid Teigland 		log_print("no address for nodeid %d", con->nodeid);
12606ed7257bSPatrick Caulfield 		goto out_err;
126136b71a8bSDavid Teigland 	}
12626ed7257bSPatrick Caulfield 
1263e125fbebSAlexander Aring 	sock_set_mark(sock->sk, mark);
1264e125fbebSAlexander Aring 
1265988419a9Stsutomu.owa@toshiba.co.jp 	add_sock(sock, con);
12666ed7257bSPatrick Caulfield 
12676bd8fedaSLon Hohberger 	/* Bind to our cluster-known address connecting to avoid
12686bd8fedaSLon Hohberger 	   routing problems */
12696bd8fedaSLon Hohberger 	memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
12706bd8fedaSLon Hohberger 	make_sockaddr(&src_addr, 0, &addr_len);
12716bd8fedaSLon Hohberger 	result = sock->ops->bind(sock, (struct sockaddr *) &src_addr,
12726bd8fedaSLon Hohberger 				 addr_len);
12736bd8fedaSLon Hohberger 	if (result < 0) {
12746bd8fedaSLon Hohberger 		log_print("could not bind for connect: %d", result);
12756bd8fedaSLon Hohberger 		/* This *may* not indicate a critical error */
12766bd8fedaSLon Hohberger 	}
12776bd8fedaSLon Hohberger 
12786ed7257bSPatrick Caulfield 	make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
12796ed7257bSPatrick Caulfield 
12802df6b762SAlexander Aring 	log_print_ratelimited("connecting to %d", con->nodeid);
1281cb2d45daSDavid Teigland 
1282cb2d45daSDavid Teigland 	/* Turn off Nagle's algorithm */
128312abc5eeSChristoph Hellwig 	tcp_sock_set_nodelay(sock->sk);
1284cb2d45daSDavid Teigland 
128536b71a8bSDavid Teigland 	result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
12866ed7257bSPatrick Caulfield 				   O_NONBLOCK);
12876ed7257bSPatrick Caulfield 	if (result == -EINPROGRESS)
12886ed7257bSPatrick Caulfield 		result = 0;
12896ed7257bSPatrick Caulfield 	if (result == 0)
12906ed7257bSPatrick Caulfield 		goto out;
12916ed7257bSPatrick Caulfield 
12926ed7257bSPatrick Caulfield out_err:
12936ed7257bSPatrick Caulfield 	if (con->sock) {
12946ed7257bSPatrick Caulfield 		sock_release(con->sock);
12956ed7257bSPatrick Caulfield 		con->sock = NULL;
1296a89d63a1SCasey Dahlin 	} else if (sock) {
1297a89d63a1SCasey Dahlin 		sock_release(sock);
12986ed7257bSPatrick Caulfield 	}
12996ed7257bSPatrick Caulfield 	/*
13006ed7257bSPatrick Caulfield 	 * Some errors are fatal and this list might need adjusting. For other
13016ed7257bSPatrick Caulfield 	 * errors we try again until the max number of retries is reached.
13026ed7257bSPatrick Caulfield 	 */
130336b71a8bSDavid Teigland 	if (result != -EHOSTUNREACH &&
130436b71a8bSDavid Teigland 	    result != -ENETUNREACH &&
130536b71a8bSDavid Teigland 	    result != -ENETDOWN &&
130636b71a8bSDavid Teigland 	    result != -EINVAL &&
130736b71a8bSDavid Teigland 	    result != -EPROTONOSUPPORT) {
130836b71a8bSDavid Teigland 		log_print("connect %d try %d error %d", con->nodeid,
130936b71a8bSDavid Teigland 			  con->retries, result);
131036b71a8bSDavid Teigland 		mutex_unlock(&con->sock_mutex);
131136b71a8bSDavid Teigland 		msleep(1000);
13126ed7257bSPatrick Caulfield 		lowcomms_connect_sock(con);
131336b71a8bSDavid Teigland 		return;
13146ed7257bSPatrick Caulfield 	}
13156ed7257bSPatrick Caulfield out:
13166ed7257bSPatrick Caulfield 	mutex_unlock(&con->sock_mutex);
13176ed7257bSPatrick Caulfield 	return;
13186ed7257bSPatrick Caulfield }
13196ed7257bSPatrick Caulfield 
1320d11ccd45SAlexander Aring /* On error caller must run dlm_close_sock() for the
1321d11ccd45SAlexander Aring  * listen connection socket.
1322d11ccd45SAlexander Aring  */
1323d11ccd45SAlexander Aring static int tcp_create_listen_sock(struct listen_connection *con,
13246ed7257bSPatrick Caulfield 				  struct sockaddr_storage *saddr)
13256ed7257bSPatrick Caulfield {
13266ed7257bSPatrick Caulfield 	struct socket *sock = NULL;
13276ed7257bSPatrick Caulfield 	int result = 0;
13286ed7257bSPatrick Caulfield 	int addr_len;
13296ed7257bSPatrick Caulfield 
13306ed7257bSPatrick Caulfield 	if (dlm_local_addr[0]->ss_family == AF_INET)
13316ed7257bSPatrick Caulfield 		addr_len = sizeof(struct sockaddr_in);
13326ed7257bSPatrick Caulfield 	else
13336ed7257bSPatrick Caulfield 		addr_len = sizeof(struct sockaddr_in6);
13346ed7257bSPatrick Caulfield 
13356ed7257bSPatrick Caulfield 	/* Create a socket to communicate with */
1336eeb1bd5cSEric W. Biederman 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1337eeb1bd5cSEric W. Biederman 				  SOCK_STREAM, IPPROTO_TCP, &sock);
13386ed7257bSPatrick Caulfield 	if (result < 0) {
1339617e82e1SDavid Teigland 		log_print("Can't create listening comms socket");
13406ed7257bSPatrick Caulfield 		goto create_out;
13416ed7257bSPatrick Caulfield 	}
13426ed7257bSPatrick Caulfield 
1343a5b7ab63SAlexander Aring 	sock_set_mark(sock->sk, dlm_config.ci_mark);
1344a5b7ab63SAlexander Aring 
1345cb2d45daSDavid Teigland 	/* Turn off Nagle's algorithm */
134612abc5eeSChristoph Hellwig 	tcp_sock_set_nodelay(sock->sk);
1347cb2d45daSDavid Teigland 
1348b58f0e8fSChristoph Hellwig 	sock_set_reuseaddr(sock->sk);
13496ed7257bSPatrick Caulfield 
1350d11ccd45SAlexander Aring 	add_listen_sock(sock, con);
13516ed7257bSPatrick Caulfield 
13526ed7257bSPatrick Caulfield 	/* Bind to our port */
13536ed7257bSPatrick Caulfield 	make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
13546ed7257bSPatrick Caulfield 	result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
13556ed7257bSPatrick Caulfield 	if (result < 0) {
1356617e82e1SDavid Teigland 		log_print("Can't bind to port %d", dlm_config.ci_tcp_port);
13576ed7257bSPatrick Caulfield 		goto create_out;
13586ed7257bSPatrick Caulfield 	}
1359ce3d9544SChristoph Hellwig 	sock_set_keepalive(sock->sk);
13606ed7257bSPatrick Caulfield 
13616ed7257bSPatrick Caulfield 	result = sock->ops->listen(sock, 5);
13626ed7257bSPatrick Caulfield 	if (result < 0) {
1363617e82e1SDavid Teigland 		log_print("Can't listen on port %d", dlm_config.ci_tcp_port);
13646ed7257bSPatrick Caulfield 		goto create_out;
13656ed7257bSPatrick Caulfield 	}
13666ed7257bSPatrick Caulfield 
1367d11ccd45SAlexander Aring 	return 0;
1368d11ccd45SAlexander Aring 
13696ed7257bSPatrick Caulfield create_out:
1370d11ccd45SAlexander Aring 	return result;
13716ed7257bSPatrick Caulfield }
13726ed7257bSPatrick Caulfield 
13736ed7257bSPatrick Caulfield /* Get local addresses */
13746ed7257bSPatrick Caulfield static void init_local(void)
13756ed7257bSPatrick Caulfield {
13766ed7257bSPatrick Caulfield 	struct sockaddr_storage sas, *addr;
13776ed7257bSPatrick Caulfield 	int i;
13786ed7257bSPatrick Caulfield 
137930d3a237SPatrick Caulfield 	dlm_local_count = 0;
13801b189b88SDavid Teigland 	for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
13816ed7257bSPatrick Caulfield 		if (dlm_our_addr(&sas, i))
13826ed7257bSPatrick Caulfield 			break;
13836ed7257bSPatrick Caulfield 
13845c93f56fSAmitoj Kaur Chawla 		addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
13856ed7257bSPatrick Caulfield 		if (!addr)
13866ed7257bSPatrick Caulfield 			break;
13876ed7257bSPatrick Caulfield 		dlm_local_addr[dlm_local_count++] = addr;
13886ed7257bSPatrick Caulfield 	}
13896ed7257bSPatrick Caulfield }
13906ed7257bSPatrick Caulfield 
1391043697f0SAlexander Aring static void deinit_local(void)
1392043697f0SAlexander Aring {
1393043697f0SAlexander Aring 	int i;
1394043697f0SAlexander Aring 
1395043697f0SAlexander Aring 	for (i = 0; i < dlm_local_count; i++)
1396043697f0SAlexander Aring 		kfree(dlm_local_addr[i]);
1397043697f0SAlexander Aring }
1398043697f0SAlexander Aring 
1399d11ccd45SAlexander Aring /* Initialise SCTP socket and bind to all interfaces
1400d11ccd45SAlexander Aring  * On error caller must run dlm_close_sock() for the
1401d11ccd45SAlexander Aring  * listen connection socket.
1402d11ccd45SAlexander Aring  */
1403d11ccd45SAlexander Aring static int sctp_listen_for_all(struct listen_connection *con)
14046ed7257bSPatrick Caulfield {
14056ed7257bSPatrick Caulfield 	struct socket *sock = NULL;
1406ee44b4bcSMarcelo Ricardo Leitner 	int result = -EINVAL;
14076ed7257bSPatrick Caulfield 
14086ed7257bSPatrick Caulfield 	log_print("Using SCTP for communications");
14096ed7257bSPatrick Caulfield 
1410eeb1bd5cSEric W. Biederman 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1411ee44b4bcSMarcelo Ricardo Leitner 				  SOCK_STREAM, IPPROTO_SCTP, &sock);
14126ed7257bSPatrick Caulfield 	if (result < 0) {
14136ed7257bSPatrick Caulfield 		log_print("Can't create comms socket, check SCTP is loaded");
14146ed7257bSPatrick Caulfield 		goto out;
14156ed7257bSPatrick Caulfield 	}
14166ed7257bSPatrick Caulfield 
141726cfabf9SChristoph Hellwig 	sock_set_rcvbuf(sock->sk, NEEDED_RMEM);
1418a5b7ab63SAlexander Aring 	sock_set_mark(sock->sk, dlm_config.ci_mark);
141940ef92c6SChristoph Hellwig 	sctp_sock_set_nodelay(sock->sk);
142086e92ad2SMike Christie 
1421d11ccd45SAlexander Aring 	add_listen_sock(sock, con);
1422b81171cbSBob Peterson 
1423ee44b4bcSMarcelo Ricardo Leitner 	/* Bind to all addresses. */
1424d11ccd45SAlexander Aring 	result = sctp_bind_addrs(con->sock, dlm_config.ci_tcp_port);
1425d11ccd45SAlexander Aring 	if (result < 0)
1426d11ccd45SAlexander Aring 		goto out;
14276ed7257bSPatrick Caulfield 
14286ed7257bSPatrick Caulfield 	result = sock->ops->listen(sock, 5);
14296ed7257bSPatrick Caulfield 	if (result < 0) {
14306ed7257bSPatrick Caulfield 		log_print("Can't set socket listening");
1431d11ccd45SAlexander Aring 		goto out;
14326ed7257bSPatrick Caulfield 	}
14336ed7257bSPatrick Caulfield 
14346ed7257bSPatrick Caulfield 	return 0;
14356ed7257bSPatrick Caulfield 
14366ed7257bSPatrick Caulfield out:
14376ed7257bSPatrick Caulfield 	return result;
14386ed7257bSPatrick Caulfield }
14396ed7257bSPatrick Caulfield 
14406ed7257bSPatrick Caulfield static int tcp_listen_for_all(void)
14416ed7257bSPatrick Caulfield {
14426ed7257bSPatrick Caulfield 	/* We don't support multi-homed hosts */
14431a26bfafSAlexander Aring 	if (dlm_local_count > 1) {
1444617e82e1SDavid Teigland 		log_print("TCP protocol can't handle multi-homed hosts, "
1445617e82e1SDavid Teigland 			  "try SCTP");
14466ed7257bSPatrick Caulfield 		return -EINVAL;
14476ed7257bSPatrick Caulfield 	}
14486ed7257bSPatrick Caulfield 
14496ed7257bSPatrick Caulfield 	log_print("Using TCP for communications");
14506ed7257bSPatrick Caulfield 
1451d11ccd45SAlexander Aring 	return tcp_create_listen_sock(&listen_con, dlm_local_addr[0]);
14526ed7257bSPatrick Caulfield }
14536ed7257bSPatrick Caulfield 
14546ed7257bSPatrick Caulfield 
14556ed7257bSPatrick Caulfield 
14566ed7257bSPatrick Caulfield static struct writequeue_entry *new_writequeue_entry(struct connection *con,
14576ed7257bSPatrick Caulfield 						     gfp_t allocation)
14586ed7257bSPatrick Caulfield {
14596ed7257bSPatrick Caulfield 	struct writequeue_entry *entry;
14606ed7257bSPatrick Caulfield 
1461f0747ebfSAlexander Aring 	entry = kzalloc(sizeof(*entry), allocation);
14626ed7257bSPatrick Caulfield 	if (!entry)
14636ed7257bSPatrick Caulfield 		return NULL;
14646ed7257bSPatrick Caulfield 
1465e1a7cbceSAlexander Aring 	entry->page = alloc_page(allocation | __GFP_ZERO);
14666ed7257bSPatrick Caulfield 	if (!entry->page) {
14676ed7257bSPatrick Caulfield 		kfree(entry);
14686ed7257bSPatrick Caulfield 		return NULL;
14696ed7257bSPatrick Caulfield 	}
14706ed7257bSPatrick Caulfield 
14716ed7257bSPatrick Caulfield 	entry->con = con;
1472f0747ebfSAlexander Aring 	entry->users = 1;
14738f2dc78dSAlexander Aring 	kref_init(&entry->ref);
14748f2dc78dSAlexander Aring 	INIT_LIST_HEAD(&entry->msgs);
14756ed7257bSPatrick Caulfield 
14766ed7257bSPatrick Caulfield 	return entry;
14776ed7257bSPatrick Caulfield }
14786ed7257bSPatrick Caulfield 
1479f0747ebfSAlexander Aring static struct writequeue_entry *new_wq_entry(struct connection *con, int len,
14808f2dc78dSAlexander Aring 					     gfp_t allocation, char **ppc,
14818f2dc78dSAlexander Aring 					     void (*cb)(struct dlm_mhandle *mh),
14828f2dc78dSAlexander Aring 					     struct dlm_mhandle *mh)
1483f0747ebfSAlexander Aring {
1484f0747ebfSAlexander Aring 	struct writequeue_entry *e;
1485f0747ebfSAlexander Aring 
1486f0747ebfSAlexander Aring 	spin_lock(&con->writequeue_lock);
1487f0747ebfSAlexander Aring 	if (!list_empty(&con->writequeue)) {
1488f0747ebfSAlexander Aring 		e = list_last_entry(&con->writequeue, struct writequeue_entry, list);
1489f0747ebfSAlexander Aring 		if (DLM_WQ_REMAIN_BYTES(e) >= len) {
14908f2dc78dSAlexander Aring 			kref_get(&e->ref);
14918f2dc78dSAlexander Aring 
1492f0747ebfSAlexander Aring 			*ppc = page_address(e->page) + e->end;
14938f2dc78dSAlexander Aring 			if (cb)
14948f2dc78dSAlexander Aring 				cb(mh);
14958f2dc78dSAlexander Aring 
1496f0747ebfSAlexander Aring 			e->end += len;
1497f0747ebfSAlexander Aring 			e->users++;
1498f0747ebfSAlexander Aring 			spin_unlock(&con->writequeue_lock);
1499f0747ebfSAlexander Aring 
1500f0747ebfSAlexander Aring 			return e;
1501f0747ebfSAlexander Aring 		}
1502f0747ebfSAlexander Aring 	}
1503f0747ebfSAlexander Aring 	spin_unlock(&con->writequeue_lock);
1504f0747ebfSAlexander Aring 
1505f0747ebfSAlexander Aring 	e = new_writequeue_entry(con, allocation);
1506f0747ebfSAlexander Aring 	if (!e)
1507f0747ebfSAlexander Aring 		return NULL;
1508f0747ebfSAlexander Aring 
15098f2dc78dSAlexander Aring 	kref_get(&e->ref);
1510f0747ebfSAlexander Aring 	*ppc = page_address(e->page);
1511f0747ebfSAlexander Aring 	e->end += len;
15128aa31cbfSAlexander Aring 	atomic_inc(&con->writequeue_cnt);
1513f0747ebfSAlexander Aring 
1514f0747ebfSAlexander Aring 	spin_lock(&con->writequeue_lock);
15158f2dc78dSAlexander Aring 	if (cb)
15168f2dc78dSAlexander Aring 		cb(mh);
15178f2dc78dSAlexander Aring 
1518f0747ebfSAlexander Aring 	list_add_tail(&e->list, &con->writequeue);
1519f0747ebfSAlexander Aring 	spin_unlock(&con->writequeue_lock);
1520f0747ebfSAlexander Aring 
1521f0747ebfSAlexander Aring 	return e;
1522f0747ebfSAlexander Aring };
1523f0747ebfSAlexander Aring 
15242874d1a6SAlexander Aring static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len,
15252874d1a6SAlexander Aring 						gfp_t allocation, char **ppc,
15262874d1a6SAlexander Aring 						void (*cb)(struct dlm_mhandle *mh),
15272874d1a6SAlexander Aring 						struct dlm_mhandle *mh)
15282874d1a6SAlexander Aring {
15292874d1a6SAlexander Aring 	struct writequeue_entry *e;
15302874d1a6SAlexander Aring 	struct dlm_msg *msg;
15312874d1a6SAlexander Aring 
15322874d1a6SAlexander Aring 	msg = kzalloc(sizeof(*msg), allocation);
15332874d1a6SAlexander Aring 	if (!msg)
15342874d1a6SAlexander Aring 		return NULL;
15352874d1a6SAlexander Aring 
15362874d1a6SAlexander Aring 	kref_init(&msg->ref);
15372874d1a6SAlexander Aring 
15382874d1a6SAlexander Aring 	e = new_wq_entry(con, len, allocation, ppc, cb, mh);
15392874d1a6SAlexander Aring 	if (!e) {
15402874d1a6SAlexander Aring 		kfree(msg);
15412874d1a6SAlexander Aring 		return NULL;
15422874d1a6SAlexander Aring 	}
15432874d1a6SAlexander Aring 
15442874d1a6SAlexander Aring 	msg->ppc = *ppc;
15452874d1a6SAlexander Aring 	msg->len = len;
15462874d1a6SAlexander Aring 	msg->entry = e;
15472874d1a6SAlexander Aring 
15482874d1a6SAlexander Aring 	return msg;
15492874d1a6SAlexander Aring }
15502874d1a6SAlexander Aring 
15518f2dc78dSAlexander Aring struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
15528f2dc78dSAlexander Aring 				     char **ppc, void (*cb)(struct dlm_mhandle *mh),
15538f2dc78dSAlexander Aring 				     struct dlm_mhandle *mh)
15546ed7257bSPatrick Caulfield {
15556ed7257bSPatrick Caulfield 	struct connection *con;
15568f2dc78dSAlexander Aring 	struct dlm_msg *msg;
1557b38bc9c2SAlexander Aring 	int idx;
15586ed7257bSPatrick Caulfield 
1559*d10a0b88SAlexander Aring 	if (len > DLM_MAX_SOCKET_BUFSIZE ||
1560c45674fbSAlexander Aring 	    len < sizeof(struct dlm_header)) {
1561*d10a0b88SAlexander Aring 		BUILD_BUG_ON(PAGE_SIZE < DLM_MAX_SOCKET_BUFSIZE);
1562692f51c8SAlexander Aring 		log_print("failed to allocate a buffer of size %d", len);
1563c45674fbSAlexander Aring 		WARN_ON(1);
1564692f51c8SAlexander Aring 		return NULL;
1565692f51c8SAlexander Aring 	}
1566692f51c8SAlexander Aring 
1567b38bc9c2SAlexander Aring 	idx = srcu_read_lock(&connections_srcu);
15686ed7257bSPatrick Caulfield 	con = nodeid2con(nodeid, allocation);
1569b38bc9c2SAlexander Aring 	if (!con) {
1570b38bc9c2SAlexander Aring 		srcu_read_unlock(&connections_srcu, idx);
15716ed7257bSPatrick Caulfield 		return NULL;
1572b38bc9c2SAlexander Aring 	}
15736ed7257bSPatrick Caulfield 
15742874d1a6SAlexander Aring 	msg = dlm_lowcomms_new_msg_con(con, len, allocation, ppc, cb, mh);
15758f2dc78dSAlexander Aring 	if (!msg) {
1576b38bc9c2SAlexander Aring 		srcu_read_unlock(&connections_srcu, idx);
1577b38bc9c2SAlexander Aring 		return NULL;
1578b38bc9c2SAlexander Aring 	}
1579b38bc9c2SAlexander Aring 
15808f2dc78dSAlexander Aring 	/* we assume if successful commit must called */
15818f2dc78dSAlexander Aring 	msg->idx = idx;
15828f2dc78dSAlexander Aring 	return msg;
15838f2dc78dSAlexander Aring }
15848f2dc78dSAlexander Aring 
15852874d1a6SAlexander Aring static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg)
15866ed7257bSPatrick Caulfield {
15878f2dc78dSAlexander Aring 	struct writequeue_entry *e = msg->entry;
15886ed7257bSPatrick Caulfield 	struct connection *con = e->con;
15896ed7257bSPatrick Caulfield 	int users;
15906ed7257bSPatrick Caulfield 
15916ed7257bSPatrick Caulfield 	spin_lock(&con->writequeue_lock);
15928f2dc78dSAlexander Aring 	kref_get(&msg->ref);
15938f2dc78dSAlexander Aring 	list_add(&msg->list, &e->msgs);
15948f2dc78dSAlexander Aring 
15956ed7257bSPatrick Caulfield 	users = --e->users;
15966ed7257bSPatrick Caulfield 	if (users)
15976ed7257bSPatrick Caulfield 		goto out;
1598f0747ebfSAlexander Aring 
1599f0747ebfSAlexander Aring 	e->len = DLM_WQ_LENGTH_BYTES(e);
16006ed7257bSPatrick Caulfield 	spin_unlock(&con->writequeue_lock);
16016ed7257bSPatrick Caulfield 
16026ed7257bSPatrick Caulfield 	queue_work(send_workqueue, &con->swork);
16036ed7257bSPatrick Caulfield 	return;
16046ed7257bSPatrick Caulfield 
16056ed7257bSPatrick Caulfield out:
16066ed7257bSPatrick Caulfield 	spin_unlock(&con->writequeue_lock);
16076ed7257bSPatrick Caulfield 	return;
16086ed7257bSPatrick Caulfield }
16096ed7257bSPatrick Caulfield 
16102874d1a6SAlexander Aring void dlm_lowcomms_commit_msg(struct dlm_msg *msg)
16112874d1a6SAlexander Aring {
16122874d1a6SAlexander Aring 	_dlm_lowcomms_commit_msg(msg);
16132874d1a6SAlexander Aring 	srcu_read_unlock(&connections_srcu, msg->idx);
16142874d1a6SAlexander Aring }
16152874d1a6SAlexander Aring 
16168f2dc78dSAlexander Aring void dlm_lowcomms_put_msg(struct dlm_msg *msg)
16178f2dc78dSAlexander Aring {
16188f2dc78dSAlexander Aring 	kref_put(&msg->ref, dlm_msg_release);
16198f2dc78dSAlexander Aring }
16208f2dc78dSAlexander Aring 
16212874d1a6SAlexander Aring /* does not held connections_srcu, usage workqueue only */
16222874d1a6SAlexander Aring int dlm_lowcomms_resend_msg(struct dlm_msg *msg)
16232874d1a6SAlexander Aring {
16242874d1a6SAlexander Aring 	struct dlm_msg *msg_resend;
16252874d1a6SAlexander Aring 	char *ppc;
16262874d1a6SAlexander Aring 
16272874d1a6SAlexander Aring 	if (msg->retransmit)
16282874d1a6SAlexander Aring 		return 1;
16292874d1a6SAlexander Aring 
16302874d1a6SAlexander Aring 	msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len,
16312874d1a6SAlexander Aring 					      GFP_ATOMIC, &ppc, NULL, NULL);
16322874d1a6SAlexander Aring 	if (!msg_resend)
16332874d1a6SAlexander Aring 		return -ENOMEM;
16342874d1a6SAlexander Aring 
16352874d1a6SAlexander Aring 	msg->retransmit = true;
16362874d1a6SAlexander Aring 	kref_get(&msg->ref);
16372874d1a6SAlexander Aring 	msg_resend->orig_msg = msg;
16382874d1a6SAlexander Aring 
16392874d1a6SAlexander Aring 	memcpy(ppc, msg->ppc, msg->len);
16402874d1a6SAlexander Aring 	_dlm_lowcomms_commit_msg(msg_resend);
16412874d1a6SAlexander Aring 	dlm_lowcomms_put_msg(msg_resend);
16422874d1a6SAlexander Aring 
16432874d1a6SAlexander Aring 	return 0;
16442874d1a6SAlexander Aring }
16452874d1a6SAlexander Aring 
16466ed7257bSPatrick Caulfield /* Send a message */
16476ed7257bSPatrick Caulfield static void send_to_sock(struct connection *con)
16486ed7257bSPatrick Caulfield {
16496ed7257bSPatrick Caulfield 	int ret = 0;
16506ed7257bSPatrick Caulfield 	const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
16516ed7257bSPatrick Caulfield 	struct writequeue_entry *e;
16526ed7257bSPatrick Caulfield 	int len, offset;
1653f92c8dd7SBob Peterson 	int count = 0;
16546ed7257bSPatrick Caulfield 
16556ed7257bSPatrick Caulfield 	mutex_lock(&con->sock_mutex);
16566ed7257bSPatrick Caulfield 	if (con->sock == NULL)
16576ed7257bSPatrick Caulfield 		goto out_connect;
16586ed7257bSPatrick Caulfield 
16596ed7257bSPatrick Caulfield 	spin_lock(&con->writequeue_lock);
16606ed7257bSPatrick Caulfield 	for (;;) {
1661f0747ebfSAlexander Aring 		if (list_empty(&con->writequeue))
16626ed7257bSPatrick Caulfield 			break;
16636ed7257bSPatrick Caulfield 
1664f0747ebfSAlexander Aring 		e = list_first_entry(&con->writequeue, struct writequeue_entry, list);
16656ed7257bSPatrick Caulfield 		len = e->len;
16666ed7257bSPatrick Caulfield 		offset = e->offset;
16676ed7257bSPatrick Caulfield 		BUG_ON(len == 0 && e->users == 0);
16686ed7257bSPatrick Caulfield 		spin_unlock(&con->writequeue_lock);
16696ed7257bSPatrick Caulfield 
16706ed7257bSPatrick Caulfield 		ret = 0;
16716ed7257bSPatrick Caulfield 		if (len) {
16721329e3f2SPaolo Bonzini 			ret = kernel_sendpage(con->sock, e->page, offset, len,
16736ed7257bSPatrick Caulfield 					      msg_flags);
1674d66f8277SPatrick Caulfield 			if (ret == -EAGAIN || ret == 0) {
1675b36930ddSDavid Miller 				if (ret == -EAGAIN &&
16769cd3e072SEric Dumazet 				    test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
1677b36930ddSDavid Miller 				    !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1678b36930ddSDavid Miller 					/* Notify TCP that we're limited by the
1679b36930ddSDavid Miller 					 * application window size.
1680b36930ddSDavid Miller 					 */
1681b36930ddSDavid Miller 					set_bit(SOCK_NOSPACE, &con->sock->flags);
1682b36930ddSDavid Miller 					con->sock->sk->sk_write_pending++;
1683b36930ddSDavid Miller 				}
1684d66f8277SPatrick Caulfield 				cond_resched();
16856ed7257bSPatrick Caulfield 				goto out;
16869c5bef58SYing Xue 			} else if (ret < 0)
1687ba868d9dSAlexander Aring 				goto out;
1688d66f8277SPatrick Caulfield 		}
1689f92c8dd7SBob Peterson 
16906ed7257bSPatrick Caulfield 		/* Don't starve people filling buffers */
1691f92c8dd7SBob Peterson 		if (++count >= MAX_SEND_MSG_COUNT) {
16926ed7257bSPatrick Caulfield 			cond_resched();
1693f92c8dd7SBob Peterson 			count = 0;
1694f92c8dd7SBob Peterson 		}
16956ed7257bSPatrick Caulfield 
16966ed7257bSPatrick Caulfield 		spin_lock(&con->writequeue_lock);
16975d689871SMike Christie 		writequeue_entry_complete(e, ret);
16986ed7257bSPatrick Caulfield 	}
16996ed7257bSPatrick Caulfield 	spin_unlock(&con->writequeue_lock);
17008aa31cbfSAlexander Aring 
17018aa31cbfSAlexander Aring 	/* close if we got EOF */
17028aa31cbfSAlexander Aring 	if (test_and_clear_bit(CF_EOF, &con->flags)) {
17038aa31cbfSAlexander Aring 		mutex_unlock(&con->sock_mutex);
17048aa31cbfSAlexander Aring 		close_connection(con, false, false, true);
17058aa31cbfSAlexander Aring 
17068aa31cbfSAlexander Aring 		/* handling for tcp shutdown */
17078aa31cbfSAlexander Aring 		clear_bit(CF_SHUTDOWN, &con->flags);
17088aa31cbfSAlexander Aring 		wake_up(&con->shutdown_wait);
17098aa31cbfSAlexander Aring 	} else {
17108aa31cbfSAlexander Aring 		mutex_unlock(&con->sock_mutex);
17118aa31cbfSAlexander Aring 	}
17128aa31cbfSAlexander Aring 
17138aa31cbfSAlexander Aring 	return;
17148aa31cbfSAlexander Aring 
17156ed7257bSPatrick Caulfield out:
17166ed7257bSPatrick Caulfield 	mutex_unlock(&con->sock_mutex);
17176ed7257bSPatrick Caulfield 	return;
17186ed7257bSPatrick Caulfield 
17196ed7257bSPatrick Caulfield out_connect:
17206ed7257bSPatrick Caulfield 	mutex_unlock(&con->sock_mutex);
172101da24d3SBob Peterson 	queue_work(send_workqueue, &con->swork);
172201da24d3SBob Peterson 	cond_resched();
17236ed7257bSPatrick Caulfield }
17246ed7257bSPatrick Caulfield 
17256ed7257bSPatrick Caulfield static void clean_one_writequeue(struct connection *con)
17266ed7257bSPatrick Caulfield {
17275e9ccc37SChristine Caulfield 	struct writequeue_entry *e, *safe;
17286ed7257bSPatrick Caulfield 
17296ed7257bSPatrick Caulfield 	spin_lock(&con->writequeue_lock);
17305e9ccc37SChristine Caulfield 	list_for_each_entry_safe(e, safe, &con->writequeue, list) {
17316ed7257bSPatrick Caulfield 		free_entry(e);
17326ed7257bSPatrick Caulfield 	}
17336ed7257bSPatrick Caulfield 	spin_unlock(&con->writequeue_lock);
17346ed7257bSPatrick Caulfield }
17356ed7257bSPatrick Caulfield 
17366ed7257bSPatrick Caulfield /* Called from recovery when it knows that a node has
17376ed7257bSPatrick Caulfield    left the cluster */
17386ed7257bSPatrick Caulfield int dlm_lowcomms_close(int nodeid)
17396ed7257bSPatrick Caulfield {
17406ed7257bSPatrick Caulfield 	struct connection *con;
174136b71a8bSDavid Teigland 	struct dlm_node_addr *na;
1742b38bc9c2SAlexander Aring 	int idx;
17436ed7257bSPatrick Caulfield 
17446ed7257bSPatrick Caulfield 	log_print("closing connection to node %d", nodeid);
1745b38bc9c2SAlexander Aring 	idx = srcu_read_lock(&connections_srcu);
17466ed7257bSPatrick Caulfield 	con = nodeid2con(nodeid, 0);
17476ed7257bSPatrick Caulfield 	if (con) {
1748063c4c99SLars Marowsky-Bree 		set_bit(CF_CLOSE, &con->flags);
17490d737a8cSMarcelo Ricardo Leitner 		close_connection(con, true, true, true);
17506ed7257bSPatrick Caulfield 		clean_one_writequeue(con);
175153a5edaaSAlexander Aring 		if (con->othercon)
175253a5edaaSAlexander Aring 			clean_one_writequeue(con->othercon);
17536ed7257bSPatrick Caulfield 	}
1754b38bc9c2SAlexander Aring 	srcu_read_unlock(&connections_srcu, idx);
175536b71a8bSDavid Teigland 
175636b71a8bSDavid Teigland 	spin_lock(&dlm_node_addrs_spin);
175736b71a8bSDavid Teigland 	na = find_node_addr(nodeid);
175836b71a8bSDavid Teigland 	if (na) {
175936b71a8bSDavid Teigland 		list_del(&na->list);
176036b71a8bSDavid Teigland 		while (na->addr_count--)
176136b71a8bSDavid Teigland 			kfree(na->addr[na->addr_count]);
176236b71a8bSDavid Teigland 		kfree(na);
176336b71a8bSDavid Teigland 	}
176436b71a8bSDavid Teigland 	spin_unlock(&dlm_node_addrs_spin);
176536b71a8bSDavid Teigland 
17666ed7257bSPatrick Caulfield 	return 0;
17676ed7257bSPatrick Caulfield }
17686ed7257bSPatrick Caulfield 
17696ed7257bSPatrick Caulfield /* Receive workqueue function */
17706ed7257bSPatrick Caulfield static void process_recv_sockets(struct work_struct *work)
17716ed7257bSPatrick Caulfield {
17726ed7257bSPatrick Caulfield 	struct connection *con = container_of(work, struct connection, rwork);
17736ed7257bSPatrick Caulfield 	int err;
17746ed7257bSPatrick Caulfield 
17756ed7257bSPatrick Caulfield 	clear_bit(CF_READ_PENDING, &con->flags);
17766ed7257bSPatrick Caulfield 	do {
1777d11ccd45SAlexander Aring 		err = receive_from_sock(con);
17786ed7257bSPatrick Caulfield 	} while (!err);
17796ed7257bSPatrick Caulfield }
17806ed7257bSPatrick Caulfield 
1781d11ccd45SAlexander Aring static void process_listen_recv_socket(struct work_struct *work)
1782d11ccd45SAlexander Aring {
1783d11ccd45SAlexander Aring 	accept_from_sock(&listen_con);
1784d11ccd45SAlexander Aring }
1785d11ccd45SAlexander Aring 
17866ed7257bSPatrick Caulfield /* Send workqueue function */
17876ed7257bSPatrick Caulfield static void process_send_sockets(struct work_struct *work)
17886ed7257bSPatrick Caulfield {
17896ed7257bSPatrick Caulfield 	struct connection *con = container_of(work, struct connection, swork);
17906ed7257bSPatrick Caulfield 
17917443bc96SAlexander Aring 	WARN_ON(test_bit(CF_IS_OTHERCON, &con->flags));
17927443bc96SAlexander Aring 
17938a4abb08Stsutomu.owa@toshiba.co.jp 	clear_bit(CF_WRITE_PENDING, &con->flags);
1794ba868d9dSAlexander Aring 
1795489d8e55SAlexander Aring 	if (test_and_clear_bit(CF_RECONNECT, &con->flags)) {
1796ba868d9dSAlexander Aring 		close_connection(con, false, false, true);
1797489d8e55SAlexander Aring 		dlm_midcomms_unack_msg_resend(con->nodeid);
1798489d8e55SAlexander Aring 	}
1799ba868d9dSAlexander Aring 
1800ba868d9dSAlexander Aring 	if (con->sock == NULL) { /* not mutex protected so check it inside too */
1801ba868d9dSAlexander Aring 		if (test_and_clear_bit(CF_DELAY_CONNECT, &con->flags))
1802ba868d9dSAlexander Aring 			msleep(1000);
18036ed7257bSPatrick Caulfield 		con->connect_action(con);
1804ba868d9dSAlexander Aring 	}
180501da24d3SBob Peterson 	if (!list_empty(&con->writequeue))
18066ed7257bSPatrick Caulfield 		send_to_sock(con);
18076ed7257bSPatrick Caulfield }
18086ed7257bSPatrick Caulfield 
18096ed7257bSPatrick Caulfield static void work_stop(void)
18106ed7257bSPatrick Caulfield {
1811fcef0e6cSAlexander Aring 	if (recv_workqueue) {
18126ed7257bSPatrick Caulfield 		destroy_workqueue(recv_workqueue);
1813fcef0e6cSAlexander Aring 		recv_workqueue = NULL;
1814fcef0e6cSAlexander Aring 	}
1815fcef0e6cSAlexander Aring 
1816fcef0e6cSAlexander Aring 	if (send_workqueue) {
18176ed7257bSPatrick Caulfield 		destroy_workqueue(send_workqueue);
1818fcef0e6cSAlexander Aring 		send_workqueue = NULL;
1819fcef0e6cSAlexander Aring 	}
18206ed7257bSPatrick Caulfield }
18216ed7257bSPatrick Caulfield 
18226ed7257bSPatrick Caulfield static int work_start(void)
18236ed7257bSPatrick Caulfield {
18246c6a1cc6SAlexander Aring 	recv_workqueue = alloc_ordered_workqueue("dlm_recv", WQ_MEM_RECLAIM);
1825b9d41052SNamhyung Kim 	if (!recv_workqueue) {
1826b9d41052SNamhyung Kim 		log_print("can't start dlm_recv");
1827b9d41052SNamhyung Kim 		return -ENOMEM;
18286ed7257bSPatrick Caulfield 	}
18296ed7257bSPatrick Caulfield 
18306c6a1cc6SAlexander Aring 	send_workqueue = alloc_ordered_workqueue("dlm_send", WQ_MEM_RECLAIM);
1831b9d41052SNamhyung Kim 	if (!send_workqueue) {
1832b9d41052SNamhyung Kim 		log_print("can't start dlm_send");
18336ed7257bSPatrick Caulfield 		destroy_workqueue(recv_workqueue);
1834fcef0e6cSAlexander Aring 		recv_workqueue = NULL;
1835b9d41052SNamhyung Kim 		return -ENOMEM;
18366ed7257bSPatrick Caulfield 	}
18376ed7257bSPatrick Caulfield 
18386ed7257bSPatrick Caulfield 	return 0;
18396ed7257bSPatrick Caulfield }
18406ed7257bSPatrick Caulfield 
18419d232469SAlexander Aring static void shutdown_conn(struct connection *con)
18429d232469SAlexander Aring {
18439d232469SAlexander Aring 	if (con->shutdown_action)
18449d232469SAlexander Aring 		con->shutdown_action(con);
18459d232469SAlexander Aring }
18469d232469SAlexander Aring 
18479d232469SAlexander Aring void dlm_lowcomms_shutdown(void)
18489d232469SAlexander Aring {
1849b38bc9c2SAlexander Aring 	int idx;
1850b38bc9c2SAlexander Aring 
18519d232469SAlexander Aring 	/* Set all the flags to prevent any
18529d232469SAlexander Aring 	 * socket activity.
18539d232469SAlexander Aring 	 */
18549d232469SAlexander Aring 	dlm_allow_conn = 0;
18559d232469SAlexander Aring 
18569d232469SAlexander Aring 	if (recv_workqueue)
18579d232469SAlexander Aring 		flush_workqueue(recv_workqueue);
18589d232469SAlexander Aring 	if (send_workqueue)
18599d232469SAlexander Aring 		flush_workqueue(send_workqueue);
18609d232469SAlexander Aring 
18619d232469SAlexander Aring 	dlm_close_sock(&listen_con.sock);
18629d232469SAlexander Aring 
1863b38bc9c2SAlexander Aring 	idx = srcu_read_lock(&connections_srcu);
18649d232469SAlexander Aring 	foreach_conn(shutdown_conn);
1865b38bc9c2SAlexander Aring 	srcu_read_unlock(&connections_srcu, idx);
18669d232469SAlexander Aring }
18679d232469SAlexander Aring 
1868f0fb83cbStsutomu.owa@toshiba.co.jp static void _stop_conn(struct connection *con, bool and_other)
18696ed7257bSPatrick Caulfield {
1870f0fb83cbStsutomu.owa@toshiba.co.jp 	mutex_lock(&con->sock_mutex);
1871173a31feStsutomu.owa@toshiba.co.jp 	set_bit(CF_CLOSE, &con->flags);
1872f0fb83cbStsutomu.owa@toshiba.co.jp 	set_bit(CF_READ_PENDING, &con->flags);
18738a4abb08Stsutomu.owa@toshiba.co.jp 	set_bit(CF_WRITE_PENDING, &con->flags);
187493eaadebStsutomu.owa@toshiba.co.jp 	if (con->sock && con->sock->sk) {
187593eaadebStsutomu.owa@toshiba.co.jp 		write_lock_bh(&con->sock->sk->sk_callback_lock);
1876afb853fbSPatrick Caulfield 		con->sock->sk->sk_user_data = NULL;
187793eaadebStsutomu.owa@toshiba.co.jp 		write_unlock_bh(&con->sock->sk->sk_callback_lock);
187893eaadebStsutomu.owa@toshiba.co.jp 	}
1879f0fb83cbStsutomu.owa@toshiba.co.jp 	if (con->othercon && and_other)
1880f0fb83cbStsutomu.owa@toshiba.co.jp 		_stop_conn(con->othercon, false);
1881f0fb83cbStsutomu.owa@toshiba.co.jp 	mutex_unlock(&con->sock_mutex);
1882f0fb83cbStsutomu.owa@toshiba.co.jp }
1883f0fb83cbStsutomu.owa@toshiba.co.jp 
1884f0fb83cbStsutomu.owa@toshiba.co.jp static void stop_conn(struct connection *con)
1885f0fb83cbStsutomu.owa@toshiba.co.jp {
1886f0fb83cbStsutomu.owa@toshiba.co.jp 	_stop_conn(con, true);
1887afb853fbSPatrick Caulfield }
18885e9ccc37SChristine Caulfield 
18894798cbbfSAlexander Aring static void connection_release(struct rcu_head *rcu)
18904798cbbfSAlexander Aring {
18914798cbbfSAlexander Aring 	struct connection *con = container_of(rcu, struct connection, rcu);
18924798cbbfSAlexander Aring 
18934798cbbfSAlexander Aring 	kfree(con->rx_buf);
18944798cbbfSAlexander Aring 	kfree(con);
18954798cbbfSAlexander Aring }
18964798cbbfSAlexander Aring 
18975e9ccc37SChristine Caulfield static void free_conn(struct connection *con)
18985e9ccc37SChristine Caulfield {
18990d737a8cSMarcelo Ricardo Leitner 	close_connection(con, true, true, true);
1900a47666ebSAlexander Aring 	spin_lock(&connections_lock);
1901a47666ebSAlexander Aring 	hlist_del_rcu(&con->list);
1902a47666ebSAlexander Aring 	spin_unlock(&connections_lock);
1903948c47e9SAlexander Aring 	if (con->othercon) {
1904948c47e9SAlexander Aring 		clean_one_writequeue(con->othercon);
19055cbec208SAlexander Aring 		call_srcu(&connections_srcu, &con->othercon->rcu,
19065cbec208SAlexander Aring 			  connection_release);
1907948c47e9SAlexander Aring 	}
19080de98432SAlexander Aring 	clean_one_writequeue(con);
19095cbec208SAlexander Aring 	call_srcu(&connections_srcu, &con->rcu, connection_release);
19106ed7257bSPatrick Caulfield }
19115e9ccc37SChristine Caulfield 
1912f0fb83cbStsutomu.owa@toshiba.co.jp static void work_flush(void)
1913f0fb83cbStsutomu.owa@toshiba.co.jp {
1914b38bc9c2SAlexander Aring 	int ok;
1915f0fb83cbStsutomu.owa@toshiba.co.jp 	int i;
1916f0fb83cbStsutomu.owa@toshiba.co.jp 	struct connection *con;
1917f0fb83cbStsutomu.owa@toshiba.co.jp 
1918f0fb83cbStsutomu.owa@toshiba.co.jp 	do {
1919f0fb83cbStsutomu.owa@toshiba.co.jp 		ok = 1;
1920f0fb83cbStsutomu.owa@toshiba.co.jp 		foreach_conn(stop_conn);
1921b355516fSDavid Windsor 		if (recv_workqueue)
1922f0fb83cbStsutomu.owa@toshiba.co.jp 			flush_workqueue(recv_workqueue);
1923b355516fSDavid Windsor 		if (send_workqueue)
1924f0fb83cbStsutomu.owa@toshiba.co.jp 			flush_workqueue(send_workqueue);
1925f0fb83cbStsutomu.owa@toshiba.co.jp 		for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
1926a47666ebSAlexander Aring 			hlist_for_each_entry_rcu(con, &connection_hash[i],
1927a47666ebSAlexander Aring 						 list) {
1928f0fb83cbStsutomu.owa@toshiba.co.jp 				ok &= test_bit(CF_READ_PENDING, &con->flags);
19298a4abb08Stsutomu.owa@toshiba.co.jp 				ok &= test_bit(CF_WRITE_PENDING, &con->flags);
19308a4abb08Stsutomu.owa@toshiba.co.jp 				if (con->othercon) {
1931f0fb83cbStsutomu.owa@toshiba.co.jp 					ok &= test_bit(CF_READ_PENDING,
1932f0fb83cbStsutomu.owa@toshiba.co.jp 						       &con->othercon->flags);
19338a4abb08Stsutomu.owa@toshiba.co.jp 					ok &= test_bit(CF_WRITE_PENDING,
19348a4abb08Stsutomu.owa@toshiba.co.jp 						       &con->othercon->flags);
19358a4abb08Stsutomu.owa@toshiba.co.jp 				}
1936f0fb83cbStsutomu.owa@toshiba.co.jp 			}
1937f0fb83cbStsutomu.owa@toshiba.co.jp 		}
1938f0fb83cbStsutomu.owa@toshiba.co.jp 	} while (!ok);
1939f0fb83cbStsutomu.owa@toshiba.co.jp }
1940f0fb83cbStsutomu.owa@toshiba.co.jp 
19415e9ccc37SChristine Caulfield void dlm_lowcomms_stop(void)
19425e9ccc37SChristine Caulfield {
1943b38bc9c2SAlexander Aring 	int idx;
1944b38bc9c2SAlexander Aring 
1945b38bc9c2SAlexander Aring 	idx = srcu_read_lock(&connections_srcu);
1946f0fb83cbStsutomu.owa@toshiba.co.jp 	work_flush();
19473a8db798SMarcelo Ricardo Leitner 	foreach_conn(free_conn);
1948b38bc9c2SAlexander Aring 	srcu_read_unlock(&connections_srcu, idx);
19496ed7257bSPatrick Caulfield 	work_stop();
1950043697f0SAlexander Aring 	deinit_local();
19516ed7257bSPatrick Caulfield }
19526ed7257bSPatrick Caulfield 
19536ed7257bSPatrick Caulfield int dlm_lowcomms_start(void)
19546ed7257bSPatrick Caulfield {
19556ed7257bSPatrick Caulfield 	int error = -EINVAL;
19565e9ccc37SChristine Caulfield 	int i;
19575e9ccc37SChristine Caulfield 
19585e9ccc37SChristine Caulfield 	for (i = 0; i < CONN_HASH_SIZE; i++)
19595e9ccc37SChristine Caulfield 		INIT_HLIST_HEAD(&connection_hash[i]);
19606ed7257bSPatrick Caulfield 
19616ed7257bSPatrick Caulfield 	init_local();
19626ed7257bSPatrick Caulfield 	if (!dlm_local_count) {
1963617e82e1SDavid Teigland 		error = -ENOTCONN;
19646ed7257bSPatrick Caulfield 		log_print("no local IP address has been set");
1965513ef596SDavid Teigland 		goto fail;
19666ed7257bSPatrick Caulfield 	}
19676ed7257bSPatrick Caulfield 
1968d11ccd45SAlexander Aring 	INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
1969d11ccd45SAlexander Aring 
1970513ef596SDavid Teigland 	error = work_start();
1971513ef596SDavid Teigland 	if (error)
1972fcef0e6cSAlexander Aring 		goto fail_local;
1973513ef596SDavid Teigland 
1974513ef596SDavid Teigland 	dlm_allow_conn = 1;
19756ed7257bSPatrick Caulfield 
19766ed7257bSPatrick Caulfield 	/* Start listening */
1977ac7d5d03SAlexander Aring 	switch (dlm_config.ci_protocol) {
1978ac7d5d03SAlexander Aring 	case DLM_PROTO_TCP:
19796ed7257bSPatrick Caulfield 		error = tcp_listen_for_all();
1980ac7d5d03SAlexander Aring 		break;
1981ac7d5d03SAlexander Aring 	case DLM_PROTO_SCTP:
1982d11ccd45SAlexander Aring 		error = sctp_listen_for_all(&listen_con);
1983ac7d5d03SAlexander Aring 		break;
1984ac7d5d03SAlexander Aring 	default:
1985ac7d5d03SAlexander Aring 		log_print("Invalid protocol identifier %d set",
1986ac7d5d03SAlexander Aring 			  dlm_config.ci_protocol);
1987ac7d5d03SAlexander Aring 		error = -EINVAL;
1988ac7d5d03SAlexander Aring 		break;
1989ac7d5d03SAlexander Aring 	}
19906ed7257bSPatrick Caulfield 	if (error)
19916ed7257bSPatrick Caulfield 		goto fail_unlisten;
19926ed7257bSPatrick Caulfield 
19936ed7257bSPatrick Caulfield 	return 0;
19946ed7257bSPatrick Caulfield 
19956ed7257bSPatrick Caulfield fail_unlisten:
1996513ef596SDavid Teigland 	dlm_allow_conn = 0;
1997d11ccd45SAlexander Aring 	dlm_close_sock(&listen_con.sock);
1998fcef0e6cSAlexander Aring 	work_stop();
1999fcef0e6cSAlexander Aring fail_local:
2000fcef0e6cSAlexander Aring 	deinit_local();
2001513ef596SDavid Teigland fail:
20026ed7257bSPatrick Caulfield 	return error;
20036ed7257bSPatrick Caulfield }
200436b71a8bSDavid Teigland 
200536b71a8bSDavid Teigland void dlm_lowcomms_exit(void)
200636b71a8bSDavid Teigland {
200736b71a8bSDavid Teigland 	struct dlm_node_addr *na, *safe;
200836b71a8bSDavid Teigland 
200936b71a8bSDavid Teigland 	spin_lock(&dlm_node_addrs_spin);
201036b71a8bSDavid Teigland 	list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) {
201136b71a8bSDavid Teigland 		list_del(&na->list);
201236b71a8bSDavid Teigland 		while (na->addr_count--)
201336b71a8bSDavid Teigland 			kfree(na->addr[na->addr_count]);
201436b71a8bSDavid Teigland 		kfree(na);
201536b71a8bSDavid Teigland 	}
201636b71a8bSDavid Teigland 	spin_unlock(&dlm_node_addrs_spin);
201736b71a8bSDavid Teigland }
2018