xref: /openbmc/linux/fs/dlm/lowcomms.c (revision 4c3d9057)
12522fe45SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
26ed7257bSPatrick Caulfield /******************************************************************************
36ed7257bSPatrick Caulfield *******************************************************************************
46ed7257bSPatrick Caulfield **
56ed7257bSPatrick Caulfield **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
65e9ccc37SChristine Caulfield **  Copyright (C) 2004-2009 Red Hat, Inc.  All rights reserved.
76ed7257bSPatrick Caulfield **
86ed7257bSPatrick Caulfield **
96ed7257bSPatrick Caulfield *******************************************************************************
106ed7257bSPatrick Caulfield ******************************************************************************/
116ed7257bSPatrick Caulfield 
126ed7257bSPatrick Caulfield /*
136ed7257bSPatrick Caulfield  * lowcomms.c
146ed7257bSPatrick Caulfield  *
156ed7257bSPatrick Caulfield  * This is the "low-level" comms layer.
166ed7257bSPatrick Caulfield  *
176ed7257bSPatrick Caulfield  * It is responsible for sending/receiving messages
186ed7257bSPatrick Caulfield  * from other nodes in the cluster.
196ed7257bSPatrick Caulfield  *
206ed7257bSPatrick Caulfield  * Cluster nodes are referred to by their nodeids. nodeids are
216ed7257bSPatrick Caulfield  * simply 32 bit numbers to the locking module - if they need to
222cf12c0bSJoe Perches  * be expanded for the cluster infrastructure then that is its
236ed7257bSPatrick Caulfield  * responsibility. It is this layer's
246ed7257bSPatrick Caulfield  * responsibility to resolve these into IP address or
256ed7257bSPatrick Caulfield  * whatever it needs for inter-node communication.
266ed7257bSPatrick Caulfield  *
276ed7257bSPatrick Caulfield  * The comms level is two kernel threads that deal mainly with
286ed7257bSPatrick Caulfield  * the receiving of messages from other nodes and passing them
296ed7257bSPatrick Caulfield  * up to the mid-level comms layer (which understands the
306ed7257bSPatrick Caulfield  * message format) for execution by the locking core, and
316ed7257bSPatrick Caulfield  * a send thread which does all the setting up of connections
326ed7257bSPatrick Caulfield  * to remote nodes and the sending of data. Threads are not allowed
336ed7257bSPatrick Caulfield  * to send their own data because it may cause them to wait in times
346ed7257bSPatrick Caulfield  * of high load. Also, this way, the sending thread can collect together
356ed7257bSPatrick Caulfield  * messages bound for one node and send them in one block.
366ed7257bSPatrick Caulfield  *
372cf12c0bSJoe Perches  * lowcomms will choose to use either TCP or SCTP as its transport layer
386ed7257bSPatrick Caulfield  * depending on the configuration variable 'protocol'. This should be set
396ed7257bSPatrick Caulfield  * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
406ed7257bSPatrick Caulfield  * cluster-wide mechanism as it must be the same on all nodes of the cluster
416ed7257bSPatrick Caulfield  * for the DLM to function.
426ed7257bSPatrick Caulfield  *
436ed7257bSPatrick Caulfield  */
446ed7257bSPatrick Caulfield 
456ed7257bSPatrick Caulfield #include <asm/ioctls.h>
466ed7257bSPatrick Caulfield #include <net/sock.h>
476ed7257bSPatrick Caulfield #include <net/tcp.h>
486ed7257bSPatrick Caulfield #include <linux/pagemap.h>
496ed7257bSPatrick Caulfield #include <linux/file.h>
507a936ce7SMatthias Kaehlcke #include <linux/mutex.h>
516ed7257bSPatrick Caulfield #include <linux/sctp.h>
525a0e3ad6STejun Heo #include <linux/slab.h>
532f2d76ccSBenjamin Poirier #include <net/sctp/sctp.h>
5444ad532bSJoe Perches #include <net/ipv6.h>
556ed7257bSPatrick Caulfield 
5692732376SAlexander Aring #include <trace/events/dlm.h>
5792732376SAlexander Aring 
586ed7257bSPatrick Caulfield #include "dlm_internal.h"
596ed7257bSPatrick Caulfield #include "lowcomms.h"
606ed7257bSPatrick Caulfield #include "midcomms.h"
616ed7257bSPatrick Caulfield #include "config.h"
626ed7257bSPatrick Caulfield 
636ed7257bSPatrick Caulfield #define NEEDED_RMEM (4*1024*1024)
646ed7257bSPatrick Caulfield 
65f92c8dd7SBob Peterson /* Number of messages to send before rescheduling */
66f92c8dd7SBob Peterson #define MAX_SEND_MSG_COUNT 25
67055923bfSAlexander Aring #define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(10000)
68f92c8dd7SBob Peterson 
696ed7257bSPatrick Caulfield struct connection {
706ed7257bSPatrick Caulfield 	struct socket *sock;	/* NULL if not connected */
716ed7257bSPatrick Caulfield 	uint32_t nodeid;	/* So we know who we are in the list */
726ed7257bSPatrick Caulfield 	struct mutex sock_mutex;
736ed7257bSPatrick Caulfield 	unsigned long flags;
746ed7257bSPatrick Caulfield #define CF_READ_PENDING 1
758a4abb08Stsutomu.owa@toshiba.co.jp #define CF_WRITE_PENDING 2
766ed7257bSPatrick Caulfield #define CF_INIT_PENDING 4
776ed7257bSPatrick Caulfield #define CF_IS_OTHERCON 5
78063c4c99SLars Marowsky-Bree #define CF_CLOSE 6
79b36930ddSDavid Miller #define CF_APP_LIMITED 7
80b2a66629Stsutomu.owa@toshiba.co.jp #define CF_CLOSING 8
81055923bfSAlexander Aring #define CF_SHUTDOWN 9
8219633c7eSAlexander Aring #define CF_CONNECTED 10
83ba868d9dSAlexander Aring #define CF_RECONNECT 11
84ba868d9dSAlexander Aring #define CF_DELAY_CONNECT 12
858aa31cbfSAlexander Aring #define CF_EOF 13
866ed7257bSPatrick Caulfield 	struct list_head writequeue;  /* List of outgoing writequeue_entries */
876ed7257bSPatrick Caulfield 	spinlock_t writequeue_lock;
888aa31cbfSAlexander Aring 	atomic_t writequeue_cnt;
89c51b0221SAlexander Aring 	struct mutex wq_alloc;
906ed7257bSPatrick Caulfield 	int retries;
916ed7257bSPatrick Caulfield #define MAX_CONNECT_RETRIES 3
925e9ccc37SChristine Caulfield 	struct hlist_node list;
936ed7257bSPatrick Caulfield 	struct connection *othercon;
94ba868d9dSAlexander Aring 	struct connection *sendcon;
956ed7257bSPatrick Caulfield 	struct work_struct rwork; /* Receive workqueue */
966ed7257bSPatrick Caulfield 	struct work_struct swork; /* Send workqueue */
97055923bfSAlexander Aring 	wait_queue_head_t shutdown_wait; /* wait for graceful shutdown */
984798cbbfSAlexander Aring 	unsigned char *rx_buf;
994798cbbfSAlexander Aring 	int rx_buflen;
1004798cbbfSAlexander Aring 	int rx_leftover;
101a47666ebSAlexander Aring 	struct rcu_head rcu;
1026ed7257bSPatrick Caulfield };
1036ed7257bSPatrick Caulfield #define sock2con(x) ((struct connection *)(x)->sk_user_data)
1046ed7257bSPatrick Caulfield 
105d11ccd45SAlexander Aring struct listen_connection {
106d11ccd45SAlexander Aring 	struct socket *sock;
107d11ccd45SAlexander Aring 	struct work_struct rwork;
108d11ccd45SAlexander Aring };
109d11ccd45SAlexander Aring 
110f0747ebfSAlexander Aring #define DLM_WQ_REMAIN_BYTES(e) (PAGE_SIZE - e->end)
111f0747ebfSAlexander Aring #define DLM_WQ_LENGTH_BYTES(e) (e->end - e->offset)
112f0747ebfSAlexander Aring 
1136ed7257bSPatrick Caulfield /* An entry waiting to be sent */
1146ed7257bSPatrick Caulfield struct writequeue_entry {
1156ed7257bSPatrick Caulfield 	struct list_head list;
1166ed7257bSPatrick Caulfield 	struct page *page;
1176ed7257bSPatrick Caulfield 	int offset;
1186ed7257bSPatrick Caulfield 	int len;
1196ed7257bSPatrick Caulfield 	int end;
1206ed7257bSPatrick Caulfield 	int users;
121706474fbSAlexander Aring 	bool dirty;
1226ed7257bSPatrick Caulfield 	struct connection *con;
1238f2dc78dSAlexander Aring 	struct list_head msgs;
1248f2dc78dSAlexander Aring 	struct kref ref;
1258f2dc78dSAlexander Aring };
1268f2dc78dSAlexander Aring 
1278f2dc78dSAlexander Aring struct dlm_msg {
1288f2dc78dSAlexander Aring 	struct writequeue_entry *entry;
1292874d1a6SAlexander Aring 	struct dlm_msg *orig_msg;
1302874d1a6SAlexander Aring 	bool retransmit;
1318f2dc78dSAlexander Aring 	void *ppc;
1328f2dc78dSAlexander Aring 	int len;
1338f2dc78dSAlexander Aring 	int idx; /* new()/commit() idx exchange */
1348f2dc78dSAlexander Aring 
1358f2dc78dSAlexander Aring 	struct list_head list;
1368f2dc78dSAlexander Aring 	struct kref ref;
1376ed7257bSPatrick Caulfield };
1386ed7257bSPatrick Caulfield 
13936b71a8bSDavid Teigland struct dlm_node_addr {
14036b71a8bSDavid Teigland 	struct list_head list;
14136b71a8bSDavid Teigland 	int nodeid;
142e125fbebSAlexander Aring 	int mark;
14336b71a8bSDavid Teigland 	int addr_count;
14498e1b60eSMike Christie 	int curr_addr_index;
14536b71a8bSDavid Teigland 	struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
14636b71a8bSDavid Teigland };
14736b71a8bSDavid Teigland 
148a66c008cSAlexander Aring struct dlm_proto_ops {
1498728a455SAlexander Aring 	bool try_new_addr;
1502dc6b115SAlexander Aring 	const char *name;
1512dc6b115SAlexander Aring 	int proto;
1522dc6b115SAlexander Aring 
1538728a455SAlexander Aring 	int (*connect)(struct connection *con, struct socket *sock,
1548728a455SAlexander Aring 		       struct sockaddr *addr, int addr_len);
1558728a455SAlexander Aring 	void (*sockopts)(struct socket *sock);
1568728a455SAlexander Aring 	int (*bind)(struct socket *sock);
1572dc6b115SAlexander Aring 	int (*listen_validate)(void);
1582dc6b115SAlexander Aring 	void (*listen_sockopts)(struct socket *sock);
1592dc6b115SAlexander Aring 	int (*listen_bind)(struct socket *sock);
160a66c008cSAlexander Aring 	/* What to do to shutdown */
161a66c008cSAlexander Aring 	void (*shutdown_action)(struct connection *con);
162a66c008cSAlexander Aring 	/* What to do to eof check */
163a66c008cSAlexander Aring 	bool (*eof_condition)(struct connection *con);
164a66c008cSAlexander Aring };
165a66c008cSAlexander Aring 
166cc661fc9SBob Peterson static struct listen_sock_callbacks {
167cc661fc9SBob Peterson 	void (*sk_error_report)(struct sock *);
168cc661fc9SBob Peterson 	void (*sk_data_ready)(struct sock *);
169cc661fc9SBob Peterson 	void (*sk_state_change)(struct sock *);
170cc661fc9SBob Peterson 	void (*sk_write_space)(struct sock *);
171cc661fc9SBob Peterson } listen_sock;
172cc661fc9SBob Peterson 
17336b71a8bSDavid Teigland static LIST_HEAD(dlm_node_addrs);
17436b71a8bSDavid Teigland static DEFINE_SPINLOCK(dlm_node_addrs_spin);
17536b71a8bSDavid Teigland 
176d11ccd45SAlexander Aring static struct listen_connection listen_con;
1776ed7257bSPatrick Caulfield static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
1786ed7257bSPatrick Caulfield static int dlm_local_count;
17951746163SAlexander Aring int dlm_allow_conn;
1806ed7257bSPatrick Caulfield 
1816ed7257bSPatrick Caulfield /* Work queues */
1826ed7257bSPatrick Caulfield static struct workqueue_struct *recv_workqueue;
1836ed7257bSPatrick Caulfield static struct workqueue_struct *send_workqueue;
1846ed7257bSPatrick Caulfield 
1855e9ccc37SChristine Caulfield static struct hlist_head connection_hash[CONN_HASH_SIZE];
186a47666ebSAlexander Aring static DEFINE_SPINLOCK(connections_lock);
187a47666ebSAlexander Aring DEFINE_STATIC_SRCU(connections_srcu);
1886ed7257bSPatrick Caulfield 
189a66c008cSAlexander Aring static const struct dlm_proto_ops *dlm_proto_ops;
190a66c008cSAlexander Aring 
1916ed7257bSPatrick Caulfield static void process_recv_sockets(struct work_struct *work);
1926ed7257bSPatrick Caulfield static void process_send_sockets(struct work_struct *work);
1936ed7257bSPatrick Caulfield 
19466d5955aSAlexander Aring /* need to held writequeue_lock */
19566d5955aSAlexander Aring static struct writequeue_entry *con_next_wq(struct connection *con)
19666d5955aSAlexander Aring {
19766d5955aSAlexander Aring 	struct writequeue_entry *e;
19866d5955aSAlexander Aring 
19966d5955aSAlexander Aring 	if (list_empty(&con->writequeue))
20066d5955aSAlexander Aring 		return NULL;
20166d5955aSAlexander Aring 
20266d5955aSAlexander Aring 	e = list_first_entry(&con->writequeue, struct writequeue_entry,
20366d5955aSAlexander Aring 			     list);
20466d5955aSAlexander Aring 	if (e->len == 0)
20566d5955aSAlexander Aring 		return NULL;
20666d5955aSAlexander Aring 
20766d5955aSAlexander Aring 	return e;
20866d5955aSAlexander Aring }
20966d5955aSAlexander Aring 
210b38bc9c2SAlexander Aring static struct connection *__find_con(int nodeid, int r)
2115e9ccc37SChristine Caulfield {
2125e9ccc37SChristine Caulfield 	struct connection *con;
2135e9ccc37SChristine Caulfield 
214a47666ebSAlexander Aring 	hlist_for_each_entry_rcu(con, &connection_hash[r], list) {
215b38bc9c2SAlexander Aring 		if (con->nodeid == nodeid)
2165e9ccc37SChristine Caulfield 			return con;
2175e9ccc37SChristine Caulfield 	}
218a47666ebSAlexander Aring 
2195e9ccc37SChristine Caulfield 	return NULL;
2205e9ccc37SChristine Caulfield }
2215e9ccc37SChristine Caulfield 
2228aa31cbfSAlexander Aring static bool tcp_eof_condition(struct connection *con)
2238aa31cbfSAlexander Aring {
2248aa31cbfSAlexander Aring 	return atomic_read(&con->writequeue_cnt);
2258aa31cbfSAlexander Aring }
2268aa31cbfSAlexander Aring 
2276cde210aSAlexander Aring static int dlm_con_init(struct connection *con, int nodeid)
2286ed7257bSPatrick Caulfield {
2294798cbbfSAlexander Aring 	con->rx_buflen = dlm_config.ci_buffer_size;
2304798cbbfSAlexander Aring 	con->rx_buf = kmalloc(con->rx_buflen, GFP_NOFS);
2316cde210aSAlexander Aring 	if (!con->rx_buf)
2326cde210aSAlexander Aring 		return -ENOMEM;
2334798cbbfSAlexander Aring 
2346ed7257bSPatrick Caulfield 	con->nodeid = nodeid;
2356ed7257bSPatrick Caulfield 	mutex_init(&con->sock_mutex);
2366ed7257bSPatrick Caulfield 	INIT_LIST_HEAD(&con->writequeue);
2376ed7257bSPatrick Caulfield 	spin_lock_init(&con->writequeue_lock);
2388aa31cbfSAlexander Aring 	atomic_set(&con->writequeue_cnt, 0);
2396ed7257bSPatrick Caulfield 	INIT_WORK(&con->swork, process_send_sockets);
2406ed7257bSPatrick Caulfield 	INIT_WORK(&con->rwork, process_recv_sockets);
241055923bfSAlexander Aring 	init_waitqueue_head(&con->shutdown_wait);
2426ed7257bSPatrick Caulfield 
2436cde210aSAlexander Aring 	return 0;
2446cde210aSAlexander Aring }
2456cde210aSAlexander Aring 
2466cde210aSAlexander Aring /*
2476cde210aSAlexander Aring  * If 'allocation' is zero then we don't attempt to create a new
2486cde210aSAlexander Aring  * connection structure for this node.
2496cde210aSAlexander Aring  */
2506cde210aSAlexander Aring static struct connection *nodeid2con(int nodeid, gfp_t alloc)
2516cde210aSAlexander Aring {
2526cde210aSAlexander Aring 	struct connection *con, *tmp;
2536cde210aSAlexander Aring 	int r, ret;
2546cde210aSAlexander Aring 
255b38bc9c2SAlexander Aring 	r = nodeid_hash(nodeid);
256b38bc9c2SAlexander Aring 	con = __find_con(nodeid, r);
2576cde210aSAlexander Aring 	if (con || !alloc)
2586cde210aSAlexander Aring 		return con;
2596cde210aSAlexander Aring 
2606cde210aSAlexander Aring 	con = kzalloc(sizeof(*con), alloc);
2616cde210aSAlexander Aring 	if (!con)
2626cde210aSAlexander Aring 		return NULL;
2636cde210aSAlexander Aring 
2646cde210aSAlexander Aring 	ret = dlm_con_init(con, nodeid);
2656cde210aSAlexander Aring 	if (ret) {
2666cde210aSAlexander Aring 		kfree(con);
2676cde210aSAlexander Aring 		return NULL;
2686cde210aSAlexander Aring 	}
2696cde210aSAlexander Aring 
270c51b0221SAlexander Aring 	mutex_init(&con->wq_alloc);
271c51b0221SAlexander Aring 
272a47666ebSAlexander Aring 	spin_lock(&connections_lock);
2734f2b30fdSAlexander Aring 	/* Because multiple workqueues/threads calls this function it can
2744f2b30fdSAlexander Aring 	 * race on multiple cpu's. Instead of locking hot path __find_con()
2754f2b30fdSAlexander Aring 	 * we just check in rare cases of recently added nodes again
2764f2b30fdSAlexander Aring 	 * under protection of connections_lock. If this is the case we
2774f2b30fdSAlexander Aring 	 * abort our connection creation and return the existing connection.
2784f2b30fdSAlexander Aring 	 */
279b38bc9c2SAlexander Aring 	tmp = __find_con(nodeid, r);
2804f2b30fdSAlexander Aring 	if (tmp) {
2814f2b30fdSAlexander Aring 		spin_unlock(&connections_lock);
2824f2b30fdSAlexander Aring 		kfree(con->rx_buf);
2834f2b30fdSAlexander Aring 		kfree(con);
2844f2b30fdSAlexander Aring 		return tmp;
2854f2b30fdSAlexander Aring 	}
2864f2b30fdSAlexander Aring 
287a47666ebSAlexander Aring 	hlist_add_head_rcu(&con->list, &connection_hash[r]);
288a47666ebSAlexander Aring 	spin_unlock(&connections_lock);
289a47666ebSAlexander Aring 
2906ed7257bSPatrick Caulfield 	return con;
2916ed7257bSPatrick Caulfield }
2926ed7257bSPatrick Caulfield 
2935e9ccc37SChristine Caulfield /* Loop round all connections */
2945e9ccc37SChristine Caulfield static void foreach_conn(void (*conn_func)(struct connection *c))
2955e9ccc37SChristine Caulfield {
296b38bc9c2SAlexander Aring 	int i;
2975e9ccc37SChristine Caulfield 	struct connection *con;
2985e9ccc37SChristine Caulfield 
2995e9ccc37SChristine Caulfield 	for (i = 0; i < CONN_HASH_SIZE; i++) {
300a47666ebSAlexander Aring 		hlist_for_each_entry_rcu(con, &connection_hash[i], list)
3015e9ccc37SChristine Caulfield 			conn_func(con);
3025e9ccc37SChristine Caulfield 	}
3036ed7257bSPatrick Caulfield }
3046ed7257bSPatrick Caulfield 
30536b71a8bSDavid Teigland static struct dlm_node_addr *find_node_addr(int nodeid)
3066ed7257bSPatrick Caulfield {
30736b71a8bSDavid Teigland 	struct dlm_node_addr *na;
30836b71a8bSDavid Teigland 
30936b71a8bSDavid Teigland 	list_for_each_entry(na, &dlm_node_addrs, list) {
31036b71a8bSDavid Teigland 		if (na->nodeid == nodeid)
31136b71a8bSDavid Teigland 			return na;
31236b71a8bSDavid Teigland 	}
31336b71a8bSDavid Teigland 	return NULL;
31436b71a8bSDavid Teigland }
31536b71a8bSDavid Teigland 
31640c6b83eSAlexander Aring static int addr_compare(const struct sockaddr_storage *x,
31740c6b83eSAlexander Aring 			const struct sockaddr_storage *y)
31836b71a8bSDavid Teigland {
31936b71a8bSDavid Teigland 	switch (x->ss_family) {
32036b71a8bSDavid Teigland 	case AF_INET: {
32136b71a8bSDavid Teigland 		struct sockaddr_in *sinx = (struct sockaddr_in *)x;
32236b71a8bSDavid Teigland 		struct sockaddr_in *siny = (struct sockaddr_in *)y;
32336b71a8bSDavid Teigland 		if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
32436b71a8bSDavid Teigland 			return 0;
32536b71a8bSDavid Teigland 		if (sinx->sin_port != siny->sin_port)
32636b71a8bSDavid Teigland 			return 0;
32736b71a8bSDavid Teigland 		break;
32836b71a8bSDavid Teigland 	}
32936b71a8bSDavid Teigland 	case AF_INET6: {
33036b71a8bSDavid Teigland 		struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
33136b71a8bSDavid Teigland 		struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
33236b71a8bSDavid Teigland 		if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
33336b71a8bSDavid Teigland 			return 0;
33436b71a8bSDavid Teigland 		if (sinx->sin6_port != siny->sin6_port)
33536b71a8bSDavid Teigland 			return 0;
33636b71a8bSDavid Teigland 		break;
33736b71a8bSDavid Teigland 	}
33836b71a8bSDavid Teigland 	default:
33936b71a8bSDavid Teigland 		return 0;
34036b71a8bSDavid Teigland 	}
34136b71a8bSDavid Teigland 	return 1;
34236b71a8bSDavid Teigland }
34336b71a8bSDavid Teigland 
34436b71a8bSDavid Teigland static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
345e125fbebSAlexander Aring 			  struct sockaddr *sa_out, bool try_new_addr,
346e125fbebSAlexander Aring 			  unsigned int *mark)
34736b71a8bSDavid Teigland {
34836b71a8bSDavid Teigland 	struct sockaddr_storage sas;
34936b71a8bSDavid Teigland 	struct dlm_node_addr *na;
3506ed7257bSPatrick Caulfield 
3516ed7257bSPatrick Caulfield 	if (!dlm_local_count)
3526ed7257bSPatrick Caulfield 		return -1;
3536ed7257bSPatrick Caulfield 
35436b71a8bSDavid Teigland 	spin_lock(&dlm_node_addrs_spin);
35536b71a8bSDavid Teigland 	na = find_node_addr(nodeid);
35698e1b60eSMike Christie 	if (na && na->addr_count) {
357ee44b4bcSMarcelo Ricardo Leitner 		memcpy(&sas, na->addr[na->curr_addr_index],
358ee44b4bcSMarcelo Ricardo Leitner 		       sizeof(struct sockaddr_storage));
359ee44b4bcSMarcelo Ricardo Leitner 
36098e1b60eSMike Christie 		if (try_new_addr) {
36198e1b60eSMike Christie 			na->curr_addr_index++;
36298e1b60eSMike Christie 			if (na->curr_addr_index == na->addr_count)
36398e1b60eSMike Christie 				na->curr_addr_index = 0;
36498e1b60eSMike Christie 		}
36598e1b60eSMike Christie 	}
36636b71a8bSDavid Teigland 	spin_unlock(&dlm_node_addrs_spin);
36736b71a8bSDavid Teigland 
36836b71a8bSDavid Teigland 	if (!na)
36936b71a8bSDavid Teigland 		return -EEXIST;
37036b71a8bSDavid Teigland 
37136b71a8bSDavid Teigland 	if (!na->addr_count)
37236b71a8bSDavid Teigland 		return -ENOENT;
37336b71a8bSDavid Teigland 
374e125fbebSAlexander Aring 	*mark = na->mark;
375e125fbebSAlexander Aring 
37636b71a8bSDavid Teigland 	if (sas_out)
37736b71a8bSDavid Teigland 		memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
37836b71a8bSDavid Teigland 
37936b71a8bSDavid Teigland 	if (!sa_out)
38036b71a8bSDavid Teigland 		return 0;
3816ed7257bSPatrick Caulfield 
3826ed7257bSPatrick Caulfield 	if (dlm_local_addr[0]->ss_family == AF_INET) {
38336b71a8bSDavid Teigland 		struct sockaddr_in *in4  = (struct sockaddr_in *) &sas;
38436b71a8bSDavid Teigland 		struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
3856ed7257bSPatrick Caulfield 		ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
3866ed7257bSPatrick Caulfield 	} else {
38736b71a8bSDavid Teigland 		struct sockaddr_in6 *in6  = (struct sockaddr_in6 *) &sas;
38836b71a8bSDavid Teigland 		struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
3894e3fd7a0SAlexey Dobriyan 		ret6->sin6_addr = in6->sin6_addr;
3906ed7257bSPatrick Caulfield 	}
3916ed7257bSPatrick Caulfield 
3926ed7257bSPatrick Caulfield 	return 0;
3936ed7257bSPatrick Caulfield }
3946ed7257bSPatrick Caulfield 
395e125fbebSAlexander Aring static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid,
396e125fbebSAlexander Aring 			  unsigned int *mark)
39736b71a8bSDavid Teigland {
39836b71a8bSDavid Teigland 	struct dlm_node_addr *na;
39936b71a8bSDavid Teigland 	int rv = -EEXIST;
40098e1b60eSMike Christie 	int addr_i;
40136b71a8bSDavid Teigland 
40236b71a8bSDavid Teigland 	spin_lock(&dlm_node_addrs_spin);
40336b71a8bSDavid Teigland 	list_for_each_entry(na, &dlm_node_addrs, list) {
40436b71a8bSDavid Teigland 		if (!na->addr_count)
40536b71a8bSDavid Teigland 			continue;
40636b71a8bSDavid Teigland 
40798e1b60eSMike Christie 		for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
40898e1b60eSMike Christie 			if (addr_compare(na->addr[addr_i], addr)) {
40936b71a8bSDavid Teigland 				*nodeid = na->nodeid;
410e125fbebSAlexander Aring 				*mark = na->mark;
41136b71a8bSDavid Teigland 				rv = 0;
41298e1b60eSMike Christie 				goto unlock;
41336b71a8bSDavid Teigland 			}
41498e1b60eSMike Christie 		}
41598e1b60eSMike Christie 	}
41698e1b60eSMike Christie unlock:
41736b71a8bSDavid Teigland 	spin_unlock(&dlm_node_addrs_spin);
41836b71a8bSDavid Teigland 	return rv;
41936b71a8bSDavid Teigland }
42036b71a8bSDavid Teigland 
4214f19d071SAlexander Aring /* caller need to held dlm_node_addrs_spin lock */
4224f19d071SAlexander Aring static bool dlm_lowcomms_na_has_addr(const struct dlm_node_addr *na,
4234f19d071SAlexander Aring 				     const struct sockaddr_storage *addr)
4244f19d071SAlexander Aring {
4254f19d071SAlexander Aring 	int i;
4264f19d071SAlexander Aring 
4274f19d071SAlexander Aring 	for (i = 0; i < na->addr_count; i++) {
4284f19d071SAlexander Aring 		if (addr_compare(na->addr[i], addr))
4294f19d071SAlexander Aring 			return true;
4304f19d071SAlexander Aring 	}
4314f19d071SAlexander Aring 
4324f19d071SAlexander Aring 	return false;
4334f19d071SAlexander Aring }
4344f19d071SAlexander Aring 
43536b71a8bSDavid Teigland int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
43636b71a8bSDavid Teigland {
43736b71a8bSDavid Teigland 	struct sockaddr_storage *new_addr;
43836b71a8bSDavid Teigland 	struct dlm_node_addr *new_node, *na;
4394f19d071SAlexander Aring 	bool ret;
44036b71a8bSDavid Teigland 
44136b71a8bSDavid Teigland 	new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS);
44236b71a8bSDavid Teigland 	if (!new_node)
44336b71a8bSDavid Teigland 		return -ENOMEM;
44436b71a8bSDavid Teigland 
44536b71a8bSDavid Teigland 	new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS);
44636b71a8bSDavid Teigland 	if (!new_addr) {
44736b71a8bSDavid Teigland 		kfree(new_node);
44836b71a8bSDavid Teigland 		return -ENOMEM;
44936b71a8bSDavid Teigland 	}
45036b71a8bSDavid Teigland 
45136b71a8bSDavid Teigland 	memcpy(new_addr, addr, len);
45236b71a8bSDavid Teigland 
45336b71a8bSDavid Teigland 	spin_lock(&dlm_node_addrs_spin);
45436b71a8bSDavid Teigland 	na = find_node_addr(nodeid);
45536b71a8bSDavid Teigland 	if (!na) {
45636b71a8bSDavid Teigland 		new_node->nodeid = nodeid;
45736b71a8bSDavid Teigland 		new_node->addr[0] = new_addr;
45836b71a8bSDavid Teigland 		new_node->addr_count = 1;
459e125fbebSAlexander Aring 		new_node->mark = dlm_config.ci_mark;
46036b71a8bSDavid Teigland 		list_add(&new_node->list, &dlm_node_addrs);
46136b71a8bSDavid Teigland 		spin_unlock(&dlm_node_addrs_spin);
46236b71a8bSDavid Teigland 		return 0;
46336b71a8bSDavid Teigland 	}
46436b71a8bSDavid Teigland 
4654f19d071SAlexander Aring 	ret = dlm_lowcomms_na_has_addr(na, addr);
4664f19d071SAlexander Aring 	if (ret) {
4674f19d071SAlexander Aring 		spin_unlock(&dlm_node_addrs_spin);
4684f19d071SAlexander Aring 		kfree(new_addr);
4694f19d071SAlexander Aring 		kfree(new_node);
4704f19d071SAlexander Aring 		return -EEXIST;
4714f19d071SAlexander Aring 	}
4724f19d071SAlexander Aring 
47336b71a8bSDavid Teigland 	if (na->addr_count >= DLM_MAX_ADDR_COUNT) {
47436b71a8bSDavid Teigland 		spin_unlock(&dlm_node_addrs_spin);
47536b71a8bSDavid Teigland 		kfree(new_addr);
47636b71a8bSDavid Teigland 		kfree(new_node);
47736b71a8bSDavid Teigland 		return -ENOSPC;
47836b71a8bSDavid Teigland 	}
47936b71a8bSDavid Teigland 
48036b71a8bSDavid Teigland 	na->addr[na->addr_count++] = new_addr;
48136b71a8bSDavid Teigland 	spin_unlock(&dlm_node_addrs_spin);
48236b71a8bSDavid Teigland 	kfree(new_node);
48336b71a8bSDavid Teigland 	return 0;
48436b71a8bSDavid Teigland }
48536b71a8bSDavid Teigland 
4866ed7257bSPatrick Caulfield /* Data available on socket or listen socket received a connect */
487676d2369SDavid S. Miller static void lowcomms_data_ready(struct sock *sk)
4886ed7257bSPatrick Caulfield {
48993eaadebStsutomu.owa@toshiba.co.jp 	struct connection *con;
49093eaadebStsutomu.owa@toshiba.co.jp 
49193eaadebStsutomu.owa@toshiba.co.jp 	read_lock_bh(&sk->sk_callback_lock);
49293eaadebStsutomu.owa@toshiba.co.jp 	con = sock2con(sk);
493afb853fbSPatrick Caulfield 	if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
4946ed7257bSPatrick Caulfield 		queue_work(recv_workqueue, &con->rwork);
49593eaadebStsutomu.owa@toshiba.co.jp 	read_unlock_bh(&sk->sk_callback_lock);
4966ed7257bSPatrick Caulfield }
4976ed7257bSPatrick Caulfield 
498d11ccd45SAlexander Aring static void lowcomms_listen_data_ready(struct sock *sk)
499d11ccd45SAlexander Aring {
5009a4139a7SAlexander Aring 	if (!dlm_allow_conn)
5019a4139a7SAlexander Aring 		return;
5029a4139a7SAlexander Aring 
503d11ccd45SAlexander Aring 	queue_work(recv_workqueue, &listen_con.rwork);
504d11ccd45SAlexander Aring }
505d11ccd45SAlexander Aring 
5066ed7257bSPatrick Caulfield static void lowcomms_write_space(struct sock *sk)
5076ed7257bSPatrick Caulfield {
50893eaadebStsutomu.owa@toshiba.co.jp 	struct connection *con;
5096ed7257bSPatrick Caulfield 
51093eaadebStsutomu.owa@toshiba.co.jp 	read_lock_bh(&sk->sk_callback_lock);
51193eaadebStsutomu.owa@toshiba.co.jp 	con = sock2con(sk);
512b36930ddSDavid Miller 	if (!con)
51393eaadebStsutomu.owa@toshiba.co.jp 		goto out;
514b36930ddSDavid Miller 
51519633c7eSAlexander Aring 	if (!test_and_set_bit(CF_CONNECTED, &con->flags)) {
51619633c7eSAlexander Aring 		log_print("successful connected to node %d", con->nodeid);
51719633c7eSAlexander Aring 		queue_work(send_workqueue, &con->swork);
51819633c7eSAlexander Aring 		goto out;
51919633c7eSAlexander Aring 	}
52019633c7eSAlexander Aring 
521b36930ddSDavid Miller 	clear_bit(SOCK_NOSPACE, &con->sock->flags);
522b36930ddSDavid Miller 
523b36930ddSDavid Miller 	if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
524b36930ddSDavid Miller 		con->sock->sk->sk_write_pending--;
5259cd3e072SEric Dumazet 		clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
526b36930ddSDavid Miller 	}
527b36930ddSDavid Miller 
5286ed7257bSPatrick Caulfield 	queue_work(send_workqueue, &con->swork);
52993eaadebStsutomu.owa@toshiba.co.jp out:
53093eaadebStsutomu.owa@toshiba.co.jp 	read_unlock_bh(&sk->sk_callback_lock);
5316ed7257bSPatrick Caulfield }
5326ed7257bSPatrick Caulfield 
5336ed7257bSPatrick Caulfield static inline void lowcomms_connect_sock(struct connection *con)
5346ed7257bSPatrick Caulfield {
535063c4c99SLars Marowsky-Bree 	if (test_bit(CF_CLOSE, &con->flags))
536063c4c99SLars Marowsky-Bree 		return;
5376ed7257bSPatrick Caulfield 	queue_work(send_workqueue, &con->swork);
53861d9102bSBob Peterson 	cond_resched();
5396ed7257bSPatrick Caulfield }
5406ed7257bSPatrick Caulfield 
5416ed7257bSPatrick Caulfield static void lowcomms_state_change(struct sock *sk)
5426ed7257bSPatrick Caulfield {
543ee44b4bcSMarcelo Ricardo Leitner 	/* SCTP layer is not calling sk_data_ready when the connection
544ee44b4bcSMarcelo Ricardo Leitner 	 * is done, so we catch the signal through here. Also, it
545ee44b4bcSMarcelo Ricardo Leitner 	 * doesn't switch socket state when entering shutdown, so we
546ee44b4bcSMarcelo Ricardo Leitner 	 * skip the write in that case.
547ee44b4bcSMarcelo Ricardo Leitner 	 */
548ee44b4bcSMarcelo Ricardo Leitner 	if (sk->sk_shutdown) {
549ee44b4bcSMarcelo Ricardo Leitner 		if (sk->sk_shutdown == RCV_SHUTDOWN)
550ee44b4bcSMarcelo Ricardo Leitner 			lowcomms_data_ready(sk);
551ee44b4bcSMarcelo Ricardo Leitner 	} else if (sk->sk_state == TCP_ESTABLISHED) {
5526ed7257bSPatrick Caulfield 		lowcomms_write_space(sk);
5536ed7257bSPatrick Caulfield 	}
554ee44b4bcSMarcelo Ricardo Leitner }
5556ed7257bSPatrick Caulfield 
556391fbdc5SChristine Caulfield int dlm_lowcomms_connect_node(int nodeid)
557391fbdc5SChristine Caulfield {
558391fbdc5SChristine Caulfield 	struct connection *con;
559b38bc9c2SAlexander Aring 	int idx;
560391fbdc5SChristine Caulfield 
561391fbdc5SChristine Caulfield 	if (nodeid == dlm_our_nodeid())
562391fbdc5SChristine Caulfield 		return 0;
563391fbdc5SChristine Caulfield 
564b38bc9c2SAlexander Aring 	idx = srcu_read_lock(&connections_srcu);
565391fbdc5SChristine Caulfield 	con = nodeid2con(nodeid, GFP_NOFS);
566b38bc9c2SAlexander Aring 	if (!con) {
567b38bc9c2SAlexander Aring 		srcu_read_unlock(&connections_srcu, idx);
568391fbdc5SChristine Caulfield 		return -ENOMEM;
569b38bc9c2SAlexander Aring 	}
570b38bc9c2SAlexander Aring 
571391fbdc5SChristine Caulfield 	lowcomms_connect_sock(con);
572b38bc9c2SAlexander Aring 	srcu_read_unlock(&connections_srcu, idx);
573b38bc9c2SAlexander Aring 
574391fbdc5SChristine Caulfield 	return 0;
575391fbdc5SChristine Caulfield }
576391fbdc5SChristine Caulfield 
577e125fbebSAlexander Aring int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark)
578e125fbebSAlexander Aring {
579e125fbebSAlexander Aring 	struct dlm_node_addr *na;
580e125fbebSAlexander Aring 
581e125fbebSAlexander Aring 	spin_lock(&dlm_node_addrs_spin);
582e125fbebSAlexander Aring 	na = find_node_addr(nodeid);
583e125fbebSAlexander Aring 	if (!na) {
584e125fbebSAlexander Aring 		spin_unlock(&dlm_node_addrs_spin);
585e125fbebSAlexander Aring 		return -ENOENT;
586e125fbebSAlexander Aring 	}
587e125fbebSAlexander Aring 
588e125fbebSAlexander Aring 	na->mark = mark;
589e125fbebSAlexander Aring 	spin_unlock(&dlm_node_addrs_spin);
590e125fbebSAlexander Aring 
591e125fbebSAlexander Aring 	return 0;
592e125fbebSAlexander Aring }
593e125fbebSAlexander Aring 
594b3a5bbfdSBob Peterson static void lowcomms_error_report(struct sock *sk)
595b3a5bbfdSBob Peterson {
596b81171cbSBob Peterson 	struct connection *con;
597b81171cbSBob Peterson 	void (*orig_report)(struct sock *) = NULL;
598*4c3d9057SAlexander Aring 	struct inet_sock *inet;
599b3a5bbfdSBob Peterson 
600b81171cbSBob Peterson 	read_lock_bh(&sk->sk_callback_lock);
601b81171cbSBob Peterson 	con = sock2con(sk);
602b81171cbSBob Peterson 	if (con == NULL)
603b81171cbSBob Peterson 		goto out;
604b81171cbSBob Peterson 
605cc661fc9SBob Peterson 	orig_report = listen_sock.sk_error_report;
606b3a5bbfdSBob Peterson 
607*4c3d9057SAlexander Aring 	inet = inet_sk(sk);
608*4c3d9057SAlexander Aring 	switch (sk->sk_family) {
609*4c3d9057SAlexander Aring 	case AF_INET:
610b3a5bbfdSBob Peterson 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
611*4c3d9057SAlexander Aring 				   "sending to node %d at %pI4, dport %d, "
612b3a5bbfdSBob Peterson 				   "sk_err=%d/%d\n", dlm_our_nodeid(),
613*4c3d9057SAlexander Aring 				   con->nodeid, &inet->inet_daddr,
614*4c3d9057SAlexander Aring 				   ntohs(inet->inet_dport), sk->sk_err,
615b3a5bbfdSBob Peterson 				   sk->sk_err_soft);
616*4c3d9057SAlexander Aring 		break;
617*4c3d9057SAlexander Aring 	case AF_INET6:
618b3a5bbfdSBob Peterson 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
619*4c3d9057SAlexander Aring 				   "sending to node %d at %pI6c, "
620*4c3d9057SAlexander Aring 				   "dport %d, sk_err=%d/%d\n", dlm_our_nodeid(),
621*4c3d9057SAlexander Aring 				   con->nodeid, &sk->sk_v6_daddr,
622*4c3d9057SAlexander Aring 				   ntohs(inet->inet_dport), sk->sk_err,
623b3a5bbfdSBob Peterson 				   sk->sk_err_soft);
624*4c3d9057SAlexander Aring 		break;
625*4c3d9057SAlexander Aring 	default:
626*4c3d9057SAlexander Aring 		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
627*4c3d9057SAlexander Aring 				   "invalid socket family %d set, "
628*4c3d9057SAlexander Aring 				   "sk_err=%d/%d\n", dlm_our_nodeid(),
629*4c3d9057SAlexander Aring 				   sk->sk_family, sk->sk_err, sk->sk_err_soft);
630*4c3d9057SAlexander Aring 		goto out;
631b3a5bbfdSBob Peterson 	}
632ba868d9dSAlexander Aring 
633ba868d9dSAlexander Aring 	/* below sendcon only handling */
634ba868d9dSAlexander Aring 	if (test_bit(CF_IS_OTHERCON, &con->flags))
635ba868d9dSAlexander Aring 		con = con->sendcon;
636ba868d9dSAlexander Aring 
637ba868d9dSAlexander Aring 	switch (sk->sk_err) {
638ba868d9dSAlexander Aring 	case ECONNREFUSED:
639ba868d9dSAlexander Aring 		set_bit(CF_DELAY_CONNECT, &con->flags);
640ba868d9dSAlexander Aring 		break;
641ba868d9dSAlexander Aring 	default:
642ba868d9dSAlexander Aring 		break;
643ba868d9dSAlexander Aring 	}
644ba868d9dSAlexander Aring 
645ba868d9dSAlexander Aring 	if (!test_and_set_bit(CF_RECONNECT, &con->flags))
646ba868d9dSAlexander Aring 		queue_work(send_workqueue, &con->swork);
647ba868d9dSAlexander Aring 
648b81171cbSBob Peterson out:
649b81171cbSBob Peterson 	read_unlock_bh(&sk->sk_callback_lock);
650b81171cbSBob Peterson 	if (orig_report)
651b81171cbSBob Peterson 		orig_report(sk);
652b81171cbSBob Peterson }
653b81171cbSBob Peterson 
654b81171cbSBob Peterson /* Note: sk_callback_lock must be locked before calling this function. */
655cc661fc9SBob Peterson static void save_listen_callbacks(struct socket *sock)
656b81171cbSBob Peterson {
657cc661fc9SBob Peterson 	struct sock *sk = sock->sk;
658cc661fc9SBob Peterson 
659cc661fc9SBob Peterson 	listen_sock.sk_data_ready = sk->sk_data_ready;
660cc661fc9SBob Peterson 	listen_sock.sk_state_change = sk->sk_state_change;
661cc661fc9SBob Peterson 	listen_sock.sk_write_space = sk->sk_write_space;
662cc661fc9SBob Peterson 	listen_sock.sk_error_report = sk->sk_error_report;
663b81171cbSBob Peterson }
664b81171cbSBob Peterson 
665cc661fc9SBob Peterson static void restore_callbacks(struct socket *sock)
666b81171cbSBob Peterson {
667cc661fc9SBob Peterson 	struct sock *sk = sock->sk;
668cc661fc9SBob Peterson 
669b81171cbSBob Peterson 	write_lock_bh(&sk->sk_callback_lock);
670b81171cbSBob Peterson 	sk->sk_user_data = NULL;
671cc661fc9SBob Peterson 	sk->sk_data_ready = listen_sock.sk_data_ready;
672cc661fc9SBob Peterson 	sk->sk_state_change = listen_sock.sk_state_change;
673cc661fc9SBob Peterson 	sk->sk_write_space = listen_sock.sk_write_space;
674cc661fc9SBob Peterson 	sk->sk_error_report = listen_sock.sk_error_report;
675b81171cbSBob Peterson 	write_unlock_bh(&sk->sk_callback_lock);
676b3a5bbfdSBob Peterson }
677b3a5bbfdSBob Peterson 
678d11ccd45SAlexander Aring static void add_listen_sock(struct socket *sock, struct listen_connection *con)
679d11ccd45SAlexander Aring {
680d11ccd45SAlexander Aring 	struct sock *sk = sock->sk;
681d11ccd45SAlexander Aring 
682d11ccd45SAlexander Aring 	write_lock_bh(&sk->sk_callback_lock);
683d11ccd45SAlexander Aring 	save_listen_callbacks(sock);
684d11ccd45SAlexander Aring 	con->sock = sock;
685d11ccd45SAlexander Aring 
686d11ccd45SAlexander Aring 	sk->sk_user_data = con;
687d11ccd45SAlexander Aring 	sk->sk_allocation = GFP_NOFS;
688d11ccd45SAlexander Aring 	/* Install a data_ready callback */
689d11ccd45SAlexander Aring 	sk->sk_data_ready = lowcomms_listen_data_ready;
690d11ccd45SAlexander Aring 	write_unlock_bh(&sk->sk_callback_lock);
691d11ccd45SAlexander Aring }
692d11ccd45SAlexander Aring 
6936ed7257bSPatrick Caulfield /* Make a socket active */
694988419a9Stsutomu.owa@toshiba.co.jp static void add_sock(struct socket *sock, struct connection *con)
6956ed7257bSPatrick Caulfield {
696b81171cbSBob Peterson 	struct sock *sk = sock->sk;
697b81171cbSBob Peterson 
698b81171cbSBob Peterson 	write_lock_bh(&sk->sk_callback_lock);
6996ed7257bSPatrick Caulfield 	con->sock = sock;
7006ed7257bSPatrick Caulfield 
701b81171cbSBob Peterson 	sk->sk_user_data = con;
7026ed7257bSPatrick Caulfield 	/* Install a data_ready callback */
703b81171cbSBob Peterson 	sk->sk_data_ready = lowcomms_data_ready;
704b81171cbSBob Peterson 	sk->sk_write_space = lowcomms_write_space;
705b81171cbSBob Peterson 	sk->sk_state_change = lowcomms_state_change;
706b81171cbSBob Peterson 	sk->sk_allocation = GFP_NOFS;
707b81171cbSBob Peterson 	sk->sk_error_report = lowcomms_error_report;
708b81171cbSBob Peterson 	write_unlock_bh(&sk->sk_callback_lock);
7096ed7257bSPatrick Caulfield }
7106ed7257bSPatrick Caulfield 
7116ed7257bSPatrick Caulfield /* Add the port number to an IPv6 or 4 sockaddr and return the address
7126ed7257bSPatrick Caulfield    length */
7136ed7257bSPatrick Caulfield static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
7146ed7257bSPatrick Caulfield 			  int *addr_len)
7156ed7257bSPatrick Caulfield {
7166ed7257bSPatrick Caulfield 	saddr->ss_family =  dlm_local_addr[0]->ss_family;
7176ed7257bSPatrick Caulfield 	if (saddr->ss_family == AF_INET) {
7186ed7257bSPatrick Caulfield 		struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
7196ed7257bSPatrick Caulfield 		in4_addr->sin_port = cpu_to_be16(port);
7206ed7257bSPatrick Caulfield 		*addr_len = sizeof(struct sockaddr_in);
7216ed7257bSPatrick Caulfield 		memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
7226ed7257bSPatrick Caulfield 	} else {
7236ed7257bSPatrick Caulfield 		struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
7246ed7257bSPatrick Caulfield 		in6_addr->sin6_port = cpu_to_be16(port);
7256ed7257bSPatrick Caulfield 		*addr_len = sizeof(struct sockaddr_in6);
7266ed7257bSPatrick Caulfield 	}
72701c8cab2SPatrick Caulfield 	memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
7286ed7257bSPatrick Caulfield }
7296ed7257bSPatrick Caulfield 
730706474fbSAlexander Aring static void dlm_page_release(struct kref *kref)
731706474fbSAlexander Aring {
732706474fbSAlexander Aring 	struct writequeue_entry *e = container_of(kref, struct writequeue_entry,
733706474fbSAlexander Aring 						  ref);
734706474fbSAlexander Aring 
735706474fbSAlexander Aring 	__free_page(e->page);
736706474fbSAlexander Aring 	kfree(e);
737706474fbSAlexander Aring }
738706474fbSAlexander Aring 
739706474fbSAlexander Aring static void dlm_msg_release(struct kref *kref)
740706474fbSAlexander Aring {
741706474fbSAlexander Aring 	struct dlm_msg *msg = container_of(kref, struct dlm_msg, ref);
742706474fbSAlexander Aring 
743706474fbSAlexander Aring 	kref_put(&msg->entry->ref, dlm_page_release);
744706474fbSAlexander Aring 	kfree(msg);
745706474fbSAlexander Aring }
746706474fbSAlexander Aring 
747706474fbSAlexander Aring static void free_entry(struct writequeue_entry *e)
748706474fbSAlexander Aring {
749706474fbSAlexander Aring 	struct dlm_msg *msg, *tmp;
750706474fbSAlexander Aring 
751706474fbSAlexander Aring 	list_for_each_entry_safe(msg, tmp, &e->msgs, list) {
752706474fbSAlexander Aring 		if (msg->orig_msg) {
753706474fbSAlexander Aring 			msg->orig_msg->retransmit = false;
754706474fbSAlexander Aring 			kref_put(&msg->orig_msg->ref, dlm_msg_release);
755706474fbSAlexander Aring 		}
756706474fbSAlexander Aring 
757706474fbSAlexander Aring 		list_del(&msg->list);
758706474fbSAlexander Aring 		kref_put(&msg->ref, dlm_msg_release);
759706474fbSAlexander Aring 	}
760706474fbSAlexander Aring 
761706474fbSAlexander Aring 	list_del(&e->list);
762706474fbSAlexander Aring 	atomic_dec(&e->con->writequeue_cnt);
763706474fbSAlexander Aring 	kref_put(&e->ref, dlm_page_release);
764706474fbSAlexander Aring }
765706474fbSAlexander Aring 
766d11ccd45SAlexander Aring static void dlm_close_sock(struct socket **sock)
767d11ccd45SAlexander Aring {
768d11ccd45SAlexander Aring 	if (*sock) {
769d11ccd45SAlexander Aring 		restore_callbacks(*sock);
770d11ccd45SAlexander Aring 		sock_release(*sock);
771d11ccd45SAlexander Aring 		*sock = NULL;
772d11ccd45SAlexander Aring 	}
773d11ccd45SAlexander Aring }
774d11ccd45SAlexander Aring 
7756ed7257bSPatrick Caulfield /* Close a remote connection and tidy up */
7760d737a8cSMarcelo Ricardo Leitner static void close_connection(struct connection *con, bool and_other,
7770d737a8cSMarcelo Ricardo Leitner 			     bool tx, bool rx)
7786ed7257bSPatrick Caulfield {
779b2a66629Stsutomu.owa@toshiba.co.jp 	bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
780706474fbSAlexander Aring 	struct writequeue_entry *e;
781b2a66629Stsutomu.owa@toshiba.co.jp 
7820aa18464Stsutomu.owa@toshiba.co.jp 	if (tx && !closing && cancel_work_sync(&con->swork)) {
7830d737a8cSMarcelo Ricardo Leitner 		log_print("canceled swork for node %d", con->nodeid);
7840aa18464Stsutomu.owa@toshiba.co.jp 		clear_bit(CF_WRITE_PENDING, &con->flags);
7850aa18464Stsutomu.owa@toshiba.co.jp 	}
7860aa18464Stsutomu.owa@toshiba.co.jp 	if (rx && !closing && cancel_work_sync(&con->rwork)) {
7870d737a8cSMarcelo Ricardo Leitner 		log_print("canceled rwork for node %d", con->nodeid);
7880aa18464Stsutomu.owa@toshiba.co.jp 		clear_bit(CF_READ_PENDING, &con->flags);
7890aa18464Stsutomu.owa@toshiba.co.jp 	}
7906ed7257bSPatrick Caulfield 
7910d737a8cSMarcelo Ricardo Leitner 	mutex_lock(&con->sock_mutex);
792d11ccd45SAlexander Aring 	dlm_close_sock(&con->sock);
793d11ccd45SAlexander Aring 
7946ed7257bSPatrick Caulfield 	if (con->othercon && and_other) {
7956ed7257bSPatrick Caulfield 		/* Will only re-enter once. */
796c6aa00e3SAlexander Aring 		close_connection(con->othercon, false, tx, rx);
7976ed7257bSPatrick Caulfield 	}
7989e5f2825SPatrick Caulfield 
799706474fbSAlexander Aring 	/* if we send a writequeue entry only a half way, we drop the
800706474fbSAlexander Aring 	 * whole entry because reconnection and that we not start of the
801706474fbSAlexander Aring 	 * middle of a msg which will confuse the other end.
802706474fbSAlexander Aring 	 *
803706474fbSAlexander Aring 	 * we can always drop messages because retransmits, but what we
804706474fbSAlexander Aring 	 * cannot allow is to transmit half messages which may be processed
805706474fbSAlexander Aring 	 * at the other side.
806706474fbSAlexander Aring 	 *
807706474fbSAlexander Aring 	 * our policy is to start on a clean state when disconnects, we don't
808706474fbSAlexander Aring 	 * know what's send/received on transport layer in this case.
809706474fbSAlexander Aring 	 */
810706474fbSAlexander Aring 	spin_lock(&con->writequeue_lock);
811706474fbSAlexander Aring 	if (!list_empty(&con->writequeue)) {
812706474fbSAlexander Aring 		e = list_first_entry(&con->writequeue, struct writequeue_entry,
813706474fbSAlexander Aring 				     list);
814706474fbSAlexander Aring 		if (e->dirty)
815706474fbSAlexander Aring 			free_entry(e);
816706474fbSAlexander Aring 	}
817706474fbSAlexander Aring 	spin_unlock(&con->writequeue_lock);
818706474fbSAlexander Aring 
8194798cbbfSAlexander Aring 	con->rx_leftover = 0;
8206ed7257bSPatrick Caulfield 	con->retries = 0;
821052849beSAlexander Aring 	clear_bit(CF_APP_LIMITED, &con->flags);
82219633c7eSAlexander Aring 	clear_bit(CF_CONNECTED, &con->flags);
823ba868d9dSAlexander Aring 	clear_bit(CF_DELAY_CONNECT, &con->flags);
824ba868d9dSAlexander Aring 	clear_bit(CF_RECONNECT, &con->flags);
8258aa31cbfSAlexander Aring 	clear_bit(CF_EOF, &con->flags);
8266ed7257bSPatrick Caulfield 	mutex_unlock(&con->sock_mutex);
827b2a66629Stsutomu.owa@toshiba.co.jp 	clear_bit(CF_CLOSING, &con->flags);
8286ed7257bSPatrick Caulfield }
8296ed7257bSPatrick Caulfield 
830055923bfSAlexander Aring static void shutdown_connection(struct connection *con)
831055923bfSAlexander Aring {
832055923bfSAlexander Aring 	int ret;
833055923bfSAlexander Aring 
834eec054b5SAlexander Aring 	flush_work(&con->swork);
835055923bfSAlexander Aring 
836055923bfSAlexander Aring 	mutex_lock(&con->sock_mutex);
837055923bfSAlexander Aring 	/* nothing to shutdown */
838055923bfSAlexander Aring 	if (!con->sock) {
839055923bfSAlexander Aring 		mutex_unlock(&con->sock_mutex);
840055923bfSAlexander Aring 		return;
841055923bfSAlexander Aring 	}
842055923bfSAlexander Aring 
843055923bfSAlexander Aring 	set_bit(CF_SHUTDOWN, &con->flags);
844055923bfSAlexander Aring 	ret = kernel_sock_shutdown(con->sock, SHUT_WR);
845055923bfSAlexander Aring 	mutex_unlock(&con->sock_mutex);
846055923bfSAlexander Aring 	if (ret) {
847055923bfSAlexander Aring 		log_print("Connection %p failed to shutdown: %d will force close",
848055923bfSAlexander Aring 			  con, ret);
849055923bfSAlexander Aring 		goto force_close;
850055923bfSAlexander Aring 	} else {
851055923bfSAlexander Aring 		ret = wait_event_timeout(con->shutdown_wait,
852055923bfSAlexander Aring 					 !test_bit(CF_SHUTDOWN, &con->flags),
853055923bfSAlexander Aring 					 DLM_SHUTDOWN_WAIT_TIMEOUT);
854055923bfSAlexander Aring 		if (ret == 0) {
855055923bfSAlexander Aring 			log_print("Connection %p shutdown timed out, will force close",
856055923bfSAlexander Aring 				  con);
857055923bfSAlexander Aring 			goto force_close;
858055923bfSAlexander Aring 		}
859055923bfSAlexander Aring 	}
860055923bfSAlexander Aring 
861055923bfSAlexander Aring 	return;
862055923bfSAlexander Aring 
863055923bfSAlexander Aring force_close:
864055923bfSAlexander Aring 	clear_bit(CF_SHUTDOWN, &con->flags);
865055923bfSAlexander Aring 	close_connection(con, false, true, true);
866055923bfSAlexander Aring }
867055923bfSAlexander Aring 
868055923bfSAlexander Aring static void dlm_tcp_shutdown(struct connection *con)
869055923bfSAlexander Aring {
870055923bfSAlexander Aring 	if (con->othercon)
871055923bfSAlexander Aring 		shutdown_connection(con->othercon);
872055923bfSAlexander Aring 	shutdown_connection(con);
873055923bfSAlexander Aring }
874055923bfSAlexander Aring 
8754798cbbfSAlexander Aring static int con_realloc_receive_buf(struct connection *con, int newlen)
8764798cbbfSAlexander Aring {
8774798cbbfSAlexander Aring 	unsigned char *newbuf;
8784798cbbfSAlexander Aring 
8794798cbbfSAlexander Aring 	newbuf = kmalloc(newlen, GFP_NOFS);
8804798cbbfSAlexander Aring 	if (!newbuf)
8814798cbbfSAlexander Aring 		return -ENOMEM;
8824798cbbfSAlexander Aring 
8834798cbbfSAlexander Aring 	/* copy any leftover from last receive */
8844798cbbfSAlexander Aring 	if (con->rx_leftover)
8854798cbbfSAlexander Aring 		memmove(newbuf, con->rx_buf, con->rx_leftover);
8864798cbbfSAlexander Aring 
8874798cbbfSAlexander Aring 	/* swap to new buffer space */
8884798cbbfSAlexander Aring 	kfree(con->rx_buf);
8894798cbbfSAlexander Aring 	con->rx_buflen = newlen;
8904798cbbfSAlexander Aring 	con->rx_buf = newbuf;
8914798cbbfSAlexander Aring 
8924798cbbfSAlexander Aring 	return 0;
8934798cbbfSAlexander Aring }
8944798cbbfSAlexander Aring 
8956ed7257bSPatrick Caulfield /* Data received from remote end */
8966ed7257bSPatrick Caulfield static int receive_from_sock(struct connection *con)
8976ed7257bSPatrick Caulfield {
8984798cbbfSAlexander Aring 	struct msghdr msg;
8994798cbbfSAlexander Aring 	struct kvec iov;
9004798cbbfSAlexander Aring 	int ret, buflen;
9016ed7257bSPatrick Caulfield 
9026ed7257bSPatrick Caulfield 	mutex_lock(&con->sock_mutex);
9036ed7257bSPatrick Caulfield 
9046ed7257bSPatrick Caulfield 	if (con->sock == NULL) {
9056ed7257bSPatrick Caulfield 		ret = -EAGAIN;
9066ed7257bSPatrick Caulfield 		goto out_close;
9076ed7257bSPatrick Caulfield 	}
9084798cbbfSAlexander Aring 
9094798cbbfSAlexander Aring 	/* realloc if we get new buffer size to read out */
9104798cbbfSAlexander Aring 	buflen = dlm_config.ci_buffer_size;
9114798cbbfSAlexander Aring 	if (con->rx_buflen != buflen && con->rx_leftover <= buflen) {
9124798cbbfSAlexander Aring 		ret = con_realloc_receive_buf(con, buflen);
9134798cbbfSAlexander Aring 		if (ret < 0)
9146ed7257bSPatrick Caulfield 			goto out_resched;
9156ed7257bSPatrick Caulfield 	}
9166ed7257bSPatrick Caulfield 
91762699b3fSAlexander Aring 	for (;;) {
9184798cbbfSAlexander Aring 		/* calculate new buffer parameter regarding last receive and
9194798cbbfSAlexander Aring 		 * possible leftover bytes
9206ed7257bSPatrick Caulfield 		 */
9214798cbbfSAlexander Aring 		iov.iov_base = con->rx_buf + con->rx_leftover;
9224798cbbfSAlexander Aring 		iov.iov_len = con->rx_buflen - con->rx_leftover;
9236ed7257bSPatrick Caulfield 
9244798cbbfSAlexander Aring 		memset(&msg, 0, sizeof(msg));
9254798cbbfSAlexander Aring 		msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
9264798cbbfSAlexander Aring 		ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
9274798cbbfSAlexander Aring 				     msg.msg_flags);
92892732376SAlexander Aring 		trace_dlm_recv(con->nodeid, ret);
92962699b3fSAlexander Aring 		if (ret == -EAGAIN)
93062699b3fSAlexander Aring 			break;
93162699b3fSAlexander Aring 		else if (ret <= 0)
9326ed7257bSPatrick Caulfield 			goto out_close;
9336ed7257bSPatrick Caulfield 
9344798cbbfSAlexander Aring 		/* new buflen according readed bytes and leftover from last receive */
9354798cbbfSAlexander Aring 		buflen = ret + con->rx_leftover;
9364798cbbfSAlexander Aring 		ret = dlm_process_incoming_buffer(con->nodeid, con->rx_buf, buflen);
9374798cbbfSAlexander Aring 		if (ret < 0)
9384798cbbfSAlexander Aring 			goto out_close;
9396ed7257bSPatrick Caulfield 
9404798cbbfSAlexander Aring 		/* calculate leftover bytes from process and put it into begin of
9414798cbbfSAlexander Aring 		 * the receive buffer, so next receive we have the full message
9424798cbbfSAlexander Aring 		 * at the start address of the receive buffer.
9434798cbbfSAlexander Aring 		 */
9444798cbbfSAlexander Aring 		con->rx_leftover = buflen - ret;
9454798cbbfSAlexander Aring 		if (con->rx_leftover) {
9464798cbbfSAlexander Aring 			memmove(con->rx_buf, con->rx_buf + ret,
9474798cbbfSAlexander Aring 				con->rx_leftover);
9486ed7257bSPatrick Caulfield 		}
94962699b3fSAlexander Aring 	}
9504798cbbfSAlexander Aring 
951b97f8525SAlexander Aring 	dlm_midcomms_receive_done(con->nodeid);
9526ed7257bSPatrick Caulfield 	mutex_unlock(&con->sock_mutex);
9536ed7257bSPatrick Caulfield 	return 0;
9546ed7257bSPatrick Caulfield 
9556ed7257bSPatrick Caulfield out_resched:
9566ed7257bSPatrick Caulfield 	if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
9576ed7257bSPatrick Caulfield 		queue_work(recv_workqueue, &con->rwork);
9586ed7257bSPatrick Caulfield 	mutex_unlock(&con->sock_mutex);
9596ed7257bSPatrick Caulfield 	return -EAGAIN;
9606ed7257bSPatrick Caulfield 
9616ed7257bSPatrick Caulfield out_close:
962055923bfSAlexander Aring 	if (ret == 0) {
963055923bfSAlexander Aring 		log_print("connection %p got EOF from %d",
964055923bfSAlexander Aring 			  con, con->nodeid);
9658aa31cbfSAlexander Aring 
966a66c008cSAlexander Aring 		if (dlm_proto_ops->eof_condition &&
967a66c008cSAlexander Aring 		    dlm_proto_ops->eof_condition(con)) {
9688aa31cbfSAlexander Aring 			set_bit(CF_EOF, &con->flags);
9698aa31cbfSAlexander Aring 			mutex_unlock(&con->sock_mutex);
9708aa31cbfSAlexander Aring 		} else {
9718aa31cbfSAlexander Aring 			mutex_unlock(&con->sock_mutex);
9728aa31cbfSAlexander Aring 			close_connection(con, false, true, false);
9738aa31cbfSAlexander Aring 
974055923bfSAlexander Aring 			/* handling for tcp shutdown */
975055923bfSAlexander Aring 			clear_bit(CF_SHUTDOWN, &con->flags);
976055923bfSAlexander Aring 			wake_up(&con->shutdown_wait);
9778aa31cbfSAlexander Aring 		}
9788aa31cbfSAlexander Aring 
979055923bfSAlexander Aring 		/* signal to breaking receive worker */
980055923bfSAlexander Aring 		ret = -1;
9818aa31cbfSAlexander Aring 	} else {
9828aa31cbfSAlexander Aring 		mutex_unlock(&con->sock_mutex);
9836ed7257bSPatrick Caulfield 	}
9846ed7257bSPatrick Caulfield 	return ret;
9856ed7257bSPatrick Caulfield }
9866ed7257bSPatrick Caulfield 
9876ed7257bSPatrick Caulfield /* Listening socket is busy, accept a connection */
988d11ccd45SAlexander Aring static int accept_from_sock(struct listen_connection *con)
9896ed7257bSPatrick Caulfield {
9906ed7257bSPatrick Caulfield 	int result;
9916ed7257bSPatrick Caulfield 	struct sockaddr_storage peeraddr;
9926ed7257bSPatrick Caulfield 	struct socket *newsock;
993b38bc9c2SAlexander Aring 	int len, idx;
9946ed7257bSPatrick Caulfield 	int nodeid;
9956ed7257bSPatrick Caulfield 	struct connection *newcon;
9966ed7257bSPatrick Caulfield 	struct connection *addcon;
9973f78cd7dSAlexander Aring 	unsigned int mark;
9986ed7257bSPatrick Caulfield 
999d11ccd45SAlexander Aring 	if (!con->sock)
10003421fb15Stsutomu.owa@toshiba.co.jp 		return -ENOTCONN;
10016ed7257bSPatrick Caulfield 
10023421fb15Stsutomu.owa@toshiba.co.jp 	result = kernel_accept(con->sock, &newsock, O_NONBLOCK);
10036ed7257bSPatrick Caulfield 	if (result < 0)
10046ed7257bSPatrick Caulfield 		goto accept_err;
10056ed7257bSPatrick Caulfield 
10066ed7257bSPatrick Caulfield 	/* Get the connected socket's peer */
10076ed7257bSPatrick Caulfield 	memset(&peeraddr, 0, sizeof(peeraddr));
10089b2c45d4SDenys Vlasenko 	len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2);
10099b2c45d4SDenys Vlasenko 	if (len < 0) {
10106ed7257bSPatrick Caulfield 		result = -ECONNABORTED;
10116ed7257bSPatrick Caulfield 		goto accept_err;
10126ed7257bSPatrick Caulfield 	}
10136ed7257bSPatrick Caulfield 
10146ed7257bSPatrick Caulfield 	/* Get the new node's NODEID */
10156ed7257bSPatrick Caulfield 	make_sockaddr(&peeraddr, 0, &len);
1016e125fbebSAlexander Aring 	if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) {
1017bcaadf5cSMasatake YAMATO 		unsigned char *b=(unsigned char *)&peeraddr;
1018617e82e1SDavid Teigland 		log_print("connect from non cluster node");
1019bcaadf5cSMasatake YAMATO 		print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
1020bcaadf5cSMasatake YAMATO 				     b, sizeof(struct sockaddr_storage));
10216ed7257bSPatrick Caulfield 		sock_release(newsock);
10226ed7257bSPatrick Caulfield 		return -1;
10236ed7257bSPatrick Caulfield 	}
10246ed7257bSPatrick Caulfield 
10256ed7257bSPatrick Caulfield 	log_print("got connection from %d", nodeid);
10266ed7257bSPatrick Caulfield 
10276ed7257bSPatrick Caulfield 	/*  Check to see if we already have a connection to this node. This
10286ed7257bSPatrick Caulfield 	 *  could happen if the two nodes initiate a connection at roughly
10296ed7257bSPatrick Caulfield 	 *  the same time and the connections cross on the wire.
10306ed7257bSPatrick Caulfield 	 *  In this case we store the incoming one in "othercon"
10316ed7257bSPatrick Caulfield 	 */
1032b38bc9c2SAlexander Aring 	idx = srcu_read_lock(&connections_srcu);
1033748285ccSDavid Teigland 	newcon = nodeid2con(nodeid, GFP_NOFS);
10346ed7257bSPatrick Caulfield 	if (!newcon) {
1035b38bc9c2SAlexander Aring 		srcu_read_unlock(&connections_srcu, idx);
10366ed7257bSPatrick Caulfield 		result = -ENOMEM;
10376ed7257bSPatrick Caulfield 		goto accept_err;
10386ed7257bSPatrick Caulfield 	}
1039d11ccd45SAlexander Aring 
1040e125fbebSAlexander Aring 	sock_set_mark(newsock->sk, mark);
1041e125fbebSAlexander Aring 
1042d11ccd45SAlexander Aring 	mutex_lock(&newcon->sock_mutex);
10436ed7257bSPatrick Caulfield 	if (newcon->sock) {
10446ed7257bSPatrick Caulfield 		struct connection *othercon = newcon->othercon;
10456ed7257bSPatrick Caulfield 
10466ed7257bSPatrick Caulfield 		if (!othercon) {
1047a47666ebSAlexander Aring 			othercon = kzalloc(sizeof(*othercon), GFP_NOFS);
10486ed7257bSPatrick Caulfield 			if (!othercon) {
1049617e82e1SDavid Teigland 				log_print("failed to allocate incoming socket");
10506ed7257bSPatrick Caulfield 				mutex_unlock(&newcon->sock_mutex);
1051b38bc9c2SAlexander Aring 				srcu_read_unlock(&connections_srcu, idx);
10526ed7257bSPatrick Caulfield 				result = -ENOMEM;
10536ed7257bSPatrick Caulfield 				goto accept_err;
10546ed7257bSPatrick Caulfield 			}
10554798cbbfSAlexander Aring 
10566cde210aSAlexander Aring 			result = dlm_con_init(othercon, nodeid);
10576cde210aSAlexander Aring 			if (result < 0) {
10584798cbbfSAlexander Aring 				kfree(othercon);
10592fd8db2dSYang Yingliang 				mutex_unlock(&newcon->sock_mutex);
1060b38bc9c2SAlexander Aring 				srcu_read_unlock(&connections_srcu, idx);
10614798cbbfSAlexander Aring 				goto accept_err;
10624798cbbfSAlexander Aring 			}
10634798cbbfSAlexander Aring 
1064e9a470acSAlexander Aring 			lockdep_set_subclass(&othercon->sock_mutex, 1);
10657443bc96SAlexander Aring 			set_bit(CF_IS_OTHERCON, &othercon->flags);
10666cde210aSAlexander Aring 			newcon->othercon = othercon;
1067ba868d9dSAlexander Aring 			othercon->sendcon = newcon;
1068ba3ab3caSAlexander Aring 		} else {
1069ba3ab3caSAlexander Aring 			/* close other sock con if we have something new */
1070ba3ab3caSAlexander Aring 			close_connection(othercon, false, true, false);
107161d96be0SPatrick Caulfield 		}
1072ba3ab3caSAlexander Aring 
1073e9a470acSAlexander Aring 		mutex_lock(&othercon->sock_mutex);
1074988419a9Stsutomu.owa@toshiba.co.jp 		add_sock(newsock, othercon);
10756ed7257bSPatrick Caulfield 		addcon = othercon;
1076c7355827Stsutomu.owa@toshiba.co.jp 		mutex_unlock(&othercon->sock_mutex);
10776ed7257bSPatrick Caulfield 	}
10786ed7257bSPatrick Caulfield 	else {
10793735b4b9SBob Peterson 		/* accept copies the sk after we've saved the callbacks, so we
10803735b4b9SBob Peterson 		   don't want to save them a second time or comm errors will
10813735b4b9SBob Peterson 		   result in calling sk_error_report recursively. */
1082988419a9Stsutomu.owa@toshiba.co.jp 		add_sock(newsock, newcon);
10836ed7257bSPatrick Caulfield 		addcon = newcon;
10846ed7257bSPatrick Caulfield 	}
10856ed7257bSPatrick Caulfield 
1086b30a624fSAlexander Aring 	set_bit(CF_CONNECTED, &addcon->flags);
10876ed7257bSPatrick Caulfield 	mutex_unlock(&newcon->sock_mutex);
10886ed7257bSPatrick Caulfield 
10896ed7257bSPatrick Caulfield 	/*
10906ed7257bSPatrick Caulfield 	 * Add it to the active queue in case we got data
109125985edcSLucas De Marchi 	 * between processing the accept adding the socket
10926ed7257bSPatrick Caulfield 	 * to the read_sockets list
10936ed7257bSPatrick Caulfield 	 */
10946ed7257bSPatrick Caulfield 	if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
10956ed7257bSPatrick Caulfield 		queue_work(recv_workqueue, &addcon->rwork);
10966ed7257bSPatrick Caulfield 
1097b38bc9c2SAlexander Aring 	srcu_read_unlock(&connections_srcu, idx);
1098b38bc9c2SAlexander Aring 
10996ed7257bSPatrick Caulfield 	return 0;
11006ed7257bSPatrick Caulfield 
11016ed7257bSPatrick Caulfield accept_err:
11023421fb15Stsutomu.owa@toshiba.co.jp 	if (newsock)
11036ed7257bSPatrick Caulfield 		sock_release(newsock);
11046ed7257bSPatrick Caulfield 
11056ed7257bSPatrick Caulfield 	if (result != -EAGAIN)
1106617e82e1SDavid Teigland 		log_print("error accepting connection from node: %d", result);
11076ed7257bSPatrick Caulfield 	return result;
11086ed7257bSPatrick Caulfield }
11096ed7257bSPatrick Caulfield 
11105d689871SMike Christie /*
11115d689871SMike Christie  * writequeue_entry_complete - try to delete and free write queue entry
11125d689871SMike Christie  * @e: write queue entry to try to delete
11135d689871SMike Christie  * @completed: bytes completed
11145d689871SMike Christie  *
11155d689871SMike Christie  * writequeue_lock must be held.
11165d689871SMike Christie  */
11175d689871SMike Christie static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
11185d689871SMike Christie {
11195d689871SMike Christie 	e->offset += completed;
11205d689871SMike Christie 	e->len -= completed;
1121706474fbSAlexander Aring 	/* signal that page was half way transmitted */
1122706474fbSAlexander Aring 	e->dirty = true;
11235d689871SMike Christie 
11248f2dc78dSAlexander Aring 	if (e->len == 0 && e->users == 0)
11255d689871SMike Christie 		free_entry(e);
11265d689871SMike Christie }
11275d689871SMike Christie 
1128ee44b4bcSMarcelo Ricardo Leitner /*
1129ee44b4bcSMarcelo Ricardo Leitner  * sctp_bind_addrs - bind a SCTP socket to all our addresses
1130ee44b4bcSMarcelo Ricardo Leitner  */
113113004e8aSAlexander Aring static int sctp_bind_addrs(struct socket *sock, uint16_t port)
1132ee44b4bcSMarcelo Ricardo Leitner {
1133ee44b4bcSMarcelo Ricardo Leitner 	struct sockaddr_storage localaddr;
1134c0425a42SChristoph Hellwig 	struct sockaddr *addr = (struct sockaddr *)&localaddr;
1135ee44b4bcSMarcelo Ricardo Leitner 	int i, addr_len, result = 0;
1136ee44b4bcSMarcelo Ricardo Leitner 
1137ee44b4bcSMarcelo Ricardo Leitner 	for (i = 0; i < dlm_local_count; i++) {
1138ee44b4bcSMarcelo Ricardo Leitner 		memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
1139ee44b4bcSMarcelo Ricardo Leitner 		make_sockaddr(&localaddr, port, &addr_len);
1140ee44b4bcSMarcelo Ricardo Leitner 
1141ee44b4bcSMarcelo Ricardo Leitner 		if (!i)
114213004e8aSAlexander Aring 			result = kernel_bind(sock, addr, addr_len);
1143ee44b4bcSMarcelo Ricardo Leitner 		else
114413004e8aSAlexander Aring 			result = sock_bind_add(sock->sk, addr, addr_len);
1145ee44b4bcSMarcelo Ricardo Leitner 
1146ee44b4bcSMarcelo Ricardo Leitner 		if (result < 0) {
1147ee44b4bcSMarcelo Ricardo Leitner 			log_print("Can't bind to %d addr number %d, %d.\n",
1148ee44b4bcSMarcelo Ricardo Leitner 				  port, i + 1, result);
1149ee44b4bcSMarcelo Ricardo Leitner 			break;
1150ee44b4bcSMarcelo Ricardo Leitner 		}
1151ee44b4bcSMarcelo Ricardo Leitner 	}
1152ee44b4bcSMarcelo Ricardo Leitner 	return result;
1153ee44b4bcSMarcelo Ricardo Leitner }
1154ee44b4bcSMarcelo Ricardo Leitner 
11556ed7257bSPatrick Caulfield /* Get local addresses */
11566ed7257bSPatrick Caulfield static void init_local(void)
11576ed7257bSPatrick Caulfield {
11586ed7257bSPatrick Caulfield 	struct sockaddr_storage sas, *addr;
11596ed7257bSPatrick Caulfield 	int i;
11606ed7257bSPatrick Caulfield 
116130d3a237SPatrick Caulfield 	dlm_local_count = 0;
11621b189b88SDavid Teigland 	for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
11636ed7257bSPatrick Caulfield 		if (dlm_our_addr(&sas, i))
11646ed7257bSPatrick Caulfield 			break;
11656ed7257bSPatrick Caulfield 
11665c93f56fSAmitoj Kaur Chawla 		addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
11676ed7257bSPatrick Caulfield 		if (!addr)
11686ed7257bSPatrick Caulfield 			break;
11696ed7257bSPatrick Caulfield 		dlm_local_addr[dlm_local_count++] = addr;
11706ed7257bSPatrick Caulfield 	}
11716ed7257bSPatrick Caulfield }
11726ed7257bSPatrick Caulfield 
1173043697f0SAlexander Aring static void deinit_local(void)
1174043697f0SAlexander Aring {
1175043697f0SAlexander Aring 	int i;
1176043697f0SAlexander Aring 
1177043697f0SAlexander Aring 	for (i = 0; i < dlm_local_count; i++)
1178043697f0SAlexander Aring 		kfree(dlm_local_addr[i]);
1179043697f0SAlexander Aring }
1180043697f0SAlexander Aring 
11816ed7257bSPatrick Caulfield static struct writequeue_entry *new_writequeue_entry(struct connection *con,
11826ed7257bSPatrick Caulfield 						     gfp_t allocation)
11836ed7257bSPatrick Caulfield {
11846ed7257bSPatrick Caulfield 	struct writequeue_entry *entry;
11856ed7257bSPatrick Caulfield 
1186f0747ebfSAlexander Aring 	entry = kzalloc(sizeof(*entry), allocation);
11876ed7257bSPatrick Caulfield 	if (!entry)
11886ed7257bSPatrick Caulfield 		return NULL;
11896ed7257bSPatrick Caulfield 
1190e1a7cbceSAlexander Aring 	entry->page = alloc_page(allocation | __GFP_ZERO);
11916ed7257bSPatrick Caulfield 	if (!entry->page) {
11926ed7257bSPatrick Caulfield 		kfree(entry);
11936ed7257bSPatrick Caulfield 		return NULL;
11946ed7257bSPatrick Caulfield 	}
11956ed7257bSPatrick Caulfield 
11966ed7257bSPatrick Caulfield 	entry->con = con;
1197f0747ebfSAlexander Aring 	entry->users = 1;
11988f2dc78dSAlexander Aring 	kref_init(&entry->ref);
11998f2dc78dSAlexander Aring 	INIT_LIST_HEAD(&entry->msgs);
12006ed7257bSPatrick Caulfield 
12016ed7257bSPatrick Caulfield 	return entry;
12026ed7257bSPatrick Caulfield }
12036ed7257bSPatrick Caulfield 
1204f0747ebfSAlexander Aring static struct writequeue_entry *new_wq_entry(struct connection *con, int len,
12058f2dc78dSAlexander Aring 					     gfp_t allocation, char **ppc,
12065c16febbSAlexander Aring 					     void (*cb)(void *data), void *data)
1207f0747ebfSAlexander Aring {
1208f0747ebfSAlexander Aring 	struct writequeue_entry *e;
1209f0747ebfSAlexander Aring 
1210f0747ebfSAlexander Aring 	spin_lock(&con->writequeue_lock);
1211f0747ebfSAlexander Aring 	if (!list_empty(&con->writequeue)) {
1212f0747ebfSAlexander Aring 		e = list_last_entry(&con->writequeue, struct writequeue_entry, list);
1213f0747ebfSAlexander Aring 		if (DLM_WQ_REMAIN_BYTES(e) >= len) {
12148f2dc78dSAlexander Aring 			kref_get(&e->ref);
12158f2dc78dSAlexander Aring 
1216f0747ebfSAlexander Aring 			*ppc = page_address(e->page) + e->end;
12178f2dc78dSAlexander Aring 			if (cb)
12185c16febbSAlexander Aring 				cb(data);
12198f2dc78dSAlexander Aring 
1220f0747ebfSAlexander Aring 			e->end += len;
1221f0747ebfSAlexander Aring 			e->users++;
1222f0747ebfSAlexander Aring 			spin_unlock(&con->writequeue_lock);
1223f0747ebfSAlexander Aring 
1224f0747ebfSAlexander Aring 			return e;
1225f0747ebfSAlexander Aring 		}
1226f0747ebfSAlexander Aring 	}
1227f0747ebfSAlexander Aring 	spin_unlock(&con->writequeue_lock);
1228f0747ebfSAlexander Aring 
1229f0747ebfSAlexander Aring 	e = new_writequeue_entry(con, allocation);
1230f0747ebfSAlexander Aring 	if (!e)
1231f0747ebfSAlexander Aring 		return NULL;
1232f0747ebfSAlexander Aring 
12338f2dc78dSAlexander Aring 	kref_get(&e->ref);
1234f0747ebfSAlexander Aring 	*ppc = page_address(e->page);
1235f0747ebfSAlexander Aring 	e->end += len;
12368aa31cbfSAlexander Aring 	atomic_inc(&con->writequeue_cnt);
1237f0747ebfSAlexander Aring 
1238f0747ebfSAlexander Aring 	spin_lock(&con->writequeue_lock);
12398f2dc78dSAlexander Aring 	if (cb)
12405c16febbSAlexander Aring 		cb(data);
12418f2dc78dSAlexander Aring 
1242f0747ebfSAlexander Aring 	list_add_tail(&e->list, &con->writequeue);
1243f0747ebfSAlexander Aring 	spin_unlock(&con->writequeue_lock);
1244f0747ebfSAlexander Aring 
1245f0747ebfSAlexander Aring 	return e;
1246f0747ebfSAlexander Aring };
1247f0747ebfSAlexander Aring 
12482874d1a6SAlexander Aring static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len,
12492874d1a6SAlexander Aring 						gfp_t allocation, char **ppc,
12505c16febbSAlexander Aring 						void (*cb)(void *data),
12515c16febbSAlexander Aring 						void *data)
12522874d1a6SAlexander Aring {
12532874d1a6SAlexander Aring 	struct writequeue_entry *e;
12542874d1a6SAlexander Aring 	struct dlm_msg *msg;
1255c51b0221SAlexander Aring 	bool sleepable;
12562874d1a6SAlexander Aring 
12572874d1a6SAlexander Aring 	msg = kzalloc(sizeof(*msg), allocation);
12582874d1a6SAlexander Aring 	if (!msg)
12592874d1a6SAlexander Aring 		return NULL;
12602874d1a6SAlexander Aring 
1261c51b0221SAlexander Aring 	/* this mutex is being used as a wait to avoid multiple "fast"
1262c51b0221SAlexander Aring 	 * new writequeue page list entry allocs in new_wq_entry in
1263c51b0221SAlexander Aring 	 * normal operation which is sleepable context. Without it
1264c51b0221SAlexander Aring 	 * we could end in multiple writequeue entries with one
1265c51b0221SAlexander Aring 	 * dlm message because multiple callers were waiting at
1266c51b0221SAlexander Aring 	 * the writequeue_lock in new_wq_entry().
1267c51b0221SAlexander Aring 	 */
1268c51b0221SAlexander Aring 	sleepable = gfpflags_normal_context(allocation);
1269c51b0221SAlexander Aring 	if (sleepable)
1270c51b0221SAlexander Aring 		mutex_lock(&con->wq_alloc);
1271c51b0221SAlexander Aring 
12722874d1a6SAlexander Aring 	kref_init(&msg->ref);
12732874d1a6SAlexander Aring 
12745c16febbSAlexander Aring 	e = new_wq_entry(con, len, allocation, ppc, cb, data);
12752874d1a6SAlexander Aring 	if (!e) {
1276c51b0221SAlexander Aring 		if (sleepable)
1277c51b0221SAlexander Aring 			mutex_unlock(&con->wq_alloc);
1278c51b0221SAlexander Aring 
12792874d1a6SAlexander Aring 		kfree(msg);
12802874d1a6SAlexander Aring 		return NULL;
12812874d1a6SAlexander Aring 	}
12822874d1a6SAlexander Aring 
1283c51b0221SAlexander Aring 	if (sleepable)
1284c51b0221SAlexander Aring 		mutex_unlock(&con->wq_alloc);
1285c51b0221SAlexander Aring 
12862874d1a6SAlexander Aring 	msg->ppc = *ppc;
12872874d1a6SAlexander Aring 	msg->len = len;
12882874d1a6SAlexander Aring 	msg->entry = e;
12892874d1a6SAlexander Aring 
12902874d1a6SAlexander Aring 	return msg;
12912874d1a6SAlexander Aring }
12922874d1a6SAlexander Aring 
12938f2dc78dSAlexander Aring struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
12945c16febbSAlexander Aring 				     char **ppc, void (*cb)(void *data),
12955c16febbSAlexander Aring 				     void *data)
12966ed7257bSPatrick Caulfield {
12976ed7257bSPatrick Caulfield 	struct connection *con;
12988f2dc78dSAlexander Aring 	struct dlm_msg *msg;
1299b38bc9c2SAlexander Aring 	int idx;
13006ed7257bSPatrick Caulfield 
1301d10a0b88SAlexander Aring 	if (len > DLM_MAX_SOCKET_BUFSIZE ||
1302c45674fbSAlexander Aring 	    len < sizeof(struct dlm_header)) {
1303d10a0b88SAlexander Aring 		BUILD_BUG_ON(PAGE_SIZE < DLM_MAX_SOCKET_BUFSIZE);
1304692f51c8SAlexander Aring 		log_print("failed to allocate a buffer of size %d", len);
1305c45674fbSAlexander Aring 		WARN_ON(1);
1306692f51c8SAlexander Aring 		return NULL;
1307692f51c8SAlexander Aring 	}
1308692f51c8SAlexander Aring 
1309b38bc9c2SAlexander Aring 	idx = srcu_read_lock(&connections_srcu);
13106ed7257bSPatrick Caulfield 	con = nodeid2con(nodeid, allocation);
1311b38bc9c2SAlexander Aring 	if (!con) {
1312b38bc9c2SAlexander Aring 		srcu_read_unlock(&connections_srcu, idx);
13136ed7257bSPatrick Caulfield 		return NULL;
1314b38bc9c2SAlexander Aring 	}
13156ed7257bSPatrick Caulfield 
13165c16febbSAlexander Aring 	msg = dlm_lowcomms_new_msg_con(con, len, allocation, ppc, cb, data);
13178f2dc78dSAlexander Aring 	if (!msg) {
1318b38bc9c2SAlexander Aring 		srcu_read_unlock(&connections_srcu, idx);
1319b38bc9c2SAlexander Aring 		return NULL;
1320b38bc9c2SAlexander Aring 	}
1321b38bc9c2SAlexander Aring 
13228f2dc78dSAlexander Aring 	/* we assume if successful commit must called */
13238f2dc78dSAlexander Aring 	msg->idx = idx;
13248f2dc78dSAlexander Aring 	return msg;
13258f2dc78dSAlexander Aring }
13268f2dc78dSAlexander Aring 
13272874d1a6SAlexander Aring static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg)
13286ed7257bSPatrick Caulfield {
13298f2dc78dSAlexander Aring 	struct writequeue_entry *e = msg->entry;
13306ed7257bSPatrick Caulfield 	struct connection *con = e->con;
13316ed7257bSPatrick Caulfield 	int users;
13326ed7257bSPatrick Caulfield 
13336ed7257bSPatrick Caulfield 	spin_lock(&con->writequeue_lock);
13348f2dc78dSAlexander Aring 	kref_get(&msg->ref);
13358f2dc78dSAlexander Aring 	list_add(&msg->list, &e->msgs);
13368f2dc78dSAlexander Aring 
13376ed7257bSPatrick Caulfield 	users = --e->users;
13386ed7257bSPatrick Caulfield 	if (users)
13396ed7257bSPatrick Caulfield 		goto out;
1340f0747ebfSAlexander Aring 
1341f0747ebfSAlexander Aring 	e->len = DLM_WQ_LENGTH_BYTES(e);
13426ed7257bSPatrick Caulfield 	spin_unlock(&con->writequeue_lock);
13436ed7257bSPatrick Caulfield 
13446ed7257bSPatrick Caulfield 	queue_work(send_workqueue, &con->swork);
13456ed7257bSPatrick Caulfield 	return;
13466ed7257bSPatrick Caulfield 
13476ed7257bSPatrick Caulfield out:
13486ed7257bSPatrick Caulfield 	spin_unlock(&con->writequeue_lock);
13496ed7257bSPatrick Caulfield 	return;
13506ed7257bSPatrick Caulfield }
13516ed7257bSPatrick Caulfield 
13522874d1a6SAlexander Aring void dlm_lowcomms_commit_msg(struct dlm_msg *msg)
13532874d1a6SAlexander Aring {
13542874d1a6SAlexander Aring 	_dlm_lowcomms_commit_msg(msg);
13552874d1a6SAlexander Aring 	srcu_read_unlock(&connections_srcu, msg->idx);
13562874d1a6SAlexander Aring }
13572874d1a6SAlexander Aring 
13588f2dc78dSAlexander Aring void dlm_lowcomms_put_msg(struct dlm_msg *msg)
13598f2dc78dSAlexander Aring {
13608f2dc78dSAlexander Aring 	kref_put(&msg->ref, dlm_msg_release);
13618f2dc78dSAlexander Aring }
13628f2dc78dSAlexander Aring 
13632874d1a6SAlexander Aring /* does not held connections_srcu, usage workqueue only */
13642874d1a6SAlexander Aring int dlm_lowcomms_resend_msg(struct dlm_msg *msg)
13652874d1a6SAlexander Aring {
13662874d1a6SAlexander Aring 	struct dlm_msg *msg_resend;
13672874d1a6SAlexander Aring 	char *ppc;
13682874d1a6SAlexander Aring 
13692874d1a6SAlexander Aring 	if (msg->retransmit)
13702874d1a6SAlexander Aring 		return 1;
13712874d1a6SAlexander Aring 
13722874d1a6SAlexander Aring 	msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len,
13732874d1a6SAlexander Aring 					      GFP_ATOMIC, &ppc, NULL, NULL);
13742874d1a6SAlexander Aring 	if (!msg_resend)
13752874d1a6SAlexander Aring 		return -ENOMEM;
13762874d1a6SAlexander Aring 
13772874d1a6SAlexander Aring 	msg->retransmit = true;
13782874d1a6SAlexander Aring 	kref_get(&msg->ref);
13792874d1a6SAlexander Aring 	msg_resend->orig_msg = msg;
13802874d1a6SAlexander Aring 
13812874d1a6SAlexander Aring 	memcpy(ppc, msg->ppc, msg->len);
13822874d1a6SAlexander Aring 	_dlm_lowcomms_commit_msg(msg_resend);
13832874d1a6SAlexander Aring 	dlm_lowcomms_put_msg(msg_resend);
13842874d1a6SAlexander Aring 
13852874d1a6SAlexander Aring 	return 0;
13862874d1a6SAlexander Aring }
13872874d1a6SAlexander Aring 
13886ed7257bSPatrick Caulfield /* Send a message */
13896ed7257bSPatrick Caulfield static void send_to_sock(struct connection *con)
13906ed7257bSPatrick Caulfield {
13916ed7257bSPatrick Caulfield 	const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
13926ed7257bSPatrick Caulfield 	struct writequeue_entry *e;
139366d5955aSAlexander Aring 	int len, offset, ret;
1394f92c8dd7SBob Peterson 	int count = 0;
13956ed7257bSPatrick Caulfield 
13966ed7257bSPatrick Caulfield 	mutex_lock(&con->sock_mutex);
13976ed7257bSPatrick Caulfield 	if (con->sock == NULL)
13986ed7257bSPatrick Caulfield 		goto out_connect;
13996ed7257bSPatrick Caulfield 
14006ed7257bSPatrick Caulfield 	spin_lock(&con->writequeue_lock);
14016ed7257bSPatrick Caulfield 	for (;;) {
140266d5955aSAlexander Aring 		e = con_next_wq(con);
140366d5955aSAlexander Aring 		if (!e)
14046ed7257bSPatrick Caulfield 			break;
14056ed7257bSPatrick Caulfield 
14066ed7257bSPatrick Caulfield 		len = e->len;
14076ed7257bSPatrick Caulfield 		offset = e->offset;
14086ed7257bSPatrick Caulfield 		BUG_ON(len == 0 && e->users == 0);
14096ed7257bSPatrick Caulfield 		spin_unlock(&con->writequeue_lock);
14106ed7257bSPatrick Caulfield 
14111329e3f2SPaolo Bonzini 		ret = kernel_sendpage(con->sock, e->page, offset, len,
14126ed7257bSPatrick Caulfield 				      msg_flags);
141392732376SAlexander Aring 		trace_dlm_send(con->nodeid, ret);
1414d66f8277SPatrick Caulfield 		if (ret == -EAGAIN || ret == 0) {
1415b36930ddSDavid Miller 			if (ret == -EAGAIN &&
14169cd3e072SEric Dumazet 			    test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
1417b36930ddSDavid Miller 			    !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1418b36930ddSDavid Miller 				/* Notify TCP that we're limited by the
1419b36930ddSDavid Miller 				 * application window size.
1420b36930ddSDavid Miller 				 */
1421b36930ddSDavid Miller 				set_bit(SOCK_NOSPACE, &con->sock->flags);
1422b36930ddSDavid Miller 				con->sock->sk->sk_write_pending++;
1423b36930ddSDavid Miller 			}
1424d66f8277SPatrick Caulfield 			cond_resched();
14256ed7257bSPatrick Caulfield 			goto out;
14269c5bef58SYing Xue 		} else if (ret < 0)
1427ba868d9dSAlexander Aring 			goto out;
1428f92c8dd7SBob Peterson 
14296ed7257bSPatrick Caulfield 		/* Don't starve people filling buffers */
1430f92c8dd7SBob Peterson 		if (++count >= MAX_SEND_MSG_COUNT) {
14316ed7257bSPatrick Caulfield 			cond_resched();
1432f92c8dd7SBob Peterson 			count = 0;
1433f92c8dd7SBob Peterson 		}
14346ed7257bSPatrick Caulfield 
14356ed7257bSPatrick Caulfield 		spin_lock(&con->writequeue_lock);
14365d689871SMike Christie 		writequeue_entry_complete(e, ret);
14376ed7257bSPatrick Caulfield 	}
14386ed7257bSPatrick Caulfield 	spin_unlock(&con->writequeue_lock);
14398aa31cbfSAlexander Aring 
14408aa31cbfSAlexander Aring 	/* close if we got EOF */
14418aa31cbfSAlexander Aring 	if (test_and_clear_bit(CF_EOF, &con->flags)) {
14428aa31cbfSAlexander Aring 		mutex_unlock(&con->sock_mutex);
14438aa31cbfSAlexander Aring 		close_connection(con, false, false, true);
14448aa31cbfSAlexander Aring 
14458aa31cbfSAlexander Aring 		/* handling for tcp shutdown */
14468aa31cbfSAlexander Aring 		clear_bit(CF_SHUTDOWN, &con->flags);
14478aa31cbfSAlexander Aring 		wake_up(&con->shutdown_wait);
14488aa31cbfSAlexander Aring 	} else {
14498aa31cbfSAlexander Aring 		mutex_unlock(&con->sock_mutex);
14508aa31cbfSAlexander Aring 	}
14518aa31cbfSAlexander Aring 
14528aa31cbfSAlexander Aring 	return;
14538aa31cbfSAlexander Aring 
14546ed7257bSPatrick Caulfield out:
14556ed7257bSPatrick Caulfield 	mutex_unlock(&con->sock_mutex);
14566ed7257bSPatrick Caulfield 	return;
14576ed7257bSPatrick Caulfield 
14586ed7257bSPatrick Caulfield out_connect:
14596ed7257bSPatrick Caulfield 	mutex_unlock(&con->sock_mutex);
146001da24d3SBob Peterson 	queue_work(send_workqueue, &con->swork);
146101da24d3SBob Peterson 	cond_resched();
14626ed7257bSPatrick Caulfield }
14636ed7257bSPatrick Caulfield 
14646ed7257bSPatrick Caulfield static void clean_one_writequeue(struct connection *con)
14656ed7257bSPatrick Caulfield {
14665e9ccc37SChristine Caulfield 	struct writequeue_entry *e, *safe;
14676ed7257bSPatrick Caulfield 
14686ed7257bSPatrick Caulfield 	spin_lock(&con->writequeue_lock);
14695e9ccc37SChristine Caulfield 	list_for_each_entry_safe(e, safe, &con->writequeue, list) {
14706ed7257bSPatrick Caulfield 		free_entry(e);
14716ed7257bSPatrick Caulfield 	}
14726ed7257bSPatrick Caulfield 	spin_unlock(&con->writequeue_lock);
14736ed7257bSPatrick Caulfield }
14746ed7257bSPatrick Caulfield 
14756ed7257bSPatrick Caulfield /* Called from recovery when it knows that a node has
14766ed7257bSPatrick Caulfield    left the cluster */
14776ed7257bSPatrick Caulfield int dlm_lowcomms_close(int nodeid)
14786ed7257bSPatrick Caulfield {
14796ed7257bSPatrick Caulfield 	struct connection *con;
148036b71a8bSDavid Teigland 	struct dlm_node_addr *na;
1481b38bc9c2SAlexander Aring 	int idx;
14826ed7257bSPatrick Caulfield 
14836ed7257bSPatrick Caulfield 	log_print("closing connection to node %d", nodeid);
1484b38bc9c2SAlexander Aring 	idx = srcu_read_lock(&connections_srcu);
14856ed7257bSPatrick Caulfield 	con = nodeid2con(nodeid, 0);
14866ed7257bSPatrick Caulfield 	if (con) {
1487063c4c99SLars Marowsky-Bree 		set_bit(CF_CLOSE, &con->flags);
14880d737a8cSMarcelo Ricardo Leitner 		close_connection(con, true, true, true);
14896ed7257bSPatrick Caulfield 		clean_one_writequeue(con);
149053a5edaaSAlexander Aring 		if (con->othercon)
149153a5edaaSAlexander Aring 			clean_one_writequeue(con->othercon);
14926ed7257bSPatrick Caulfield 	}
1493b38bc9c2SAlexander Aring 	srcu_read_unlock(&connections_srcu, idx);
149436b71a8bSDavid Teigland 
149536b71a8bSDavid Teigland 	spin_lock(&dlm_node_addrs_spin);
149636b71a8bSDavid Teigland 	na = find_node_addr(nodeid);
149736b71a8bSDavid Teigland 	if (na) {
149836b71a8bSDavid Teigland 		list_del(&na->list);
149936b71a8bSDavid Teigland 		while (na->addr_count--)
150036b71a8bSDavid Teigland 			kfree(na->addr[na->addr_count]);
150136b71a8bSDavid Teigland 		kfree(na);
150236b71a8bSDavid Teigland 	}
150336b71a8bSDavid Teigland 	spin_unlock(&dlm_node_addrs_spin);
150436b71a8bSDavid Teigland 
15056ed7257bSPatrick Caulfield 	return 0;
15066ed7257bSPatrick Caulfield }
15076ed7257bSPatrick Caulfield 
15086ed7257bSPatrick Caulfield /* Receive workqueue function */
15096ed7257bSPatrick Caulfield static void process_recv_sockets(struct work_struct *work)
15106ed7257bSPatrick Caulfield {
15116ed7257bSPatrick Caulfield 	struct connection *con = container_of(work, struct connection, rwork);
15126ed7257bSPatrick Caulfield 
15136ed7257bSPatrick Caulfield 	clear_bit(CF_READ_PENDING, &con->flags);
151462699b3fSAlexander Aring 	receive_from_sock(con);
15156ed7257bSPatrick Caulfield }
15166ed7257bSPatrick Caulfield 
1517d11ccd45SAlexander Aring static void process_listen_recv_socket(struct work_struct *work)
1518d11ccd45SAlexander Aring {
1519d11ccd45SAlexander Aring 	accept_from_sock(&listen_con);
1520d11ccd45SAlexander Aring }
1521d11ccd45SAlexander Aring 
15228728a455SAlexander Aring static void dlm_connect(struct connection *con)
15238728a455SAlexander Aring {
15248728a455SAlexander Aring 	struct sockaddr_storage addr;
15258728a455SAlexander Aring 	int result, addr_len;
15268728a455SAlexander Aring 	struct socket *sock;
15278728a455SAlexander Aring 	unsigned int mark;
15288728a455SAlexander Aring 
15298728a455SAlexander Aring 	/* Some odd races can cause double-connects, ignore them */
15308728a455SAlexander Aring 	if (con->retries++ > MAX_CONNECT_RETRIES)
15318728a455SAlexander Aring 		return;
15328728a455SAlexander Aring 
15338728a455SAlexander Aring 	if (con->sock) {
15348728a455SAlexander Aring 		log_print("node %d already connected.", con->nodeid);
15358728a455SAlexander Aring 		return;
15368728a455SAlexander Aring 	}
15378728a455SAlexander Aring 
15388728a455SAlexander Aring 	memset(&addr, 0, sizeof(addr));
15398728a455SAlexander Aring 	result = nodeid_to_addr(con->nodeid, &addr, NULL,
15408728a455SAlexander Aring 				dlm_proto_ops->try_new_addr, &mark);
15418728a455SAlexander Aring 	if (result < 0) {
15428728a455SAlexander Aring 		log_print("no address for nodeid %d", con->nodeid);
15438728a455SAlexander Aring 		return;
15448728a455SAlexander Aring 	}
15458728a455SAlexander Aring 
15468728a455SAlexander Aring 	/* Create a socket to communicate with */
15478728a455SAlexander Aring 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
15488728a455SAlexander Aring 				  SOCK_STREAM, dlm_proto_ops->proto, &sock);
15498728a455SAlexander Aring 	if (result < 0)
15508728a455SAlexander Aring 		goto socket_err;
15518728a455SAlexander Aring 
15528728a455SAlexander Aring 	sock_set_mark(sock->sk, mark);
15538728a455SAlexander Aring 	dlm_proto_ops->sockopts(sock);
15548728a455SAlexander Aring 
15558728a455SAlexander Aring 	add_sock(sock, con);
15568728a455SAlexander Aring 
15578728a455SAlexander Aring 	result = dlm_proto_ops->bind(sock);
15588728a455SAlexander Aring 	if (result < 0)
15598728a455SAlexander Aring 		goto add_sock_err;
15608728a455SAlexander Aring 
15618728a455SAlexander Aring 	log_print_ratelimited("connecting to %d", con->nodeid);
15628728a455SAlexander Aring 	make_sockaddr(&addr, dlm_config.ci_tcp_port, &addr_len);
15638728a455SAlexander Aring 	result = dlm_proto_ops->connect(con, sock, (struct sockaddr *)&addr,
15648728a455SAlexander Aring 					addr_len);
15658728a455SAlexander Aring 	if (result < 0)
15668728a455SAlexander Aring 		goto add_sock_err;
15678728a455SAlexander Aring 
15688728a455SAlexander Aring 	return;
15698728a455SAlexander Aring 
15708728a455SAlexander Aring add_sock_err:
15718728a455SAlexander Aring 	dlm_close_sock(&con->sock);
15728728a455SAlexander Aring 
15738728a455SAlexander Aring socket_err:
15748728a455SAlexander Aring 	/*
15758728a455SAlexander Aring 	 * Some errors are fatal and this list might need adjusting. For other
15768728a455SAlexander Aring 	 * errors we try again until the max number of retries is reached.
15778728a455SAlexander Aring 	 */
15788728a455SAlexander Aring 	if (result != -EHOSTUNREACH &&
15798728a455SAlexander Aring 	    result != -ENETUNREACH &&
15808728a455SAlexander Aring 	    result != -ENETDOWN &&
15818728a455SAlexander Aring 	    result != -EINVAL &&
15828728a455SAlexander Aring 	    result != -EPROTONOSUPPORT) {
15838728a455SAlexander Aring 		log_print("connect %d try %d error %d", con->nodeid,
15848728a455SAlexander Aring 			  con->retries, result);
15858728a455SAlexander Aring 		msleep(1000);
15868728a455SAlexander Aring 		lowcomms_connect_sock(con);
15878728a455SAlexander Aring 	}
15888728a455SAlexander Aring }
15898728a455SAlexander Aring 
15906ed7257bSPatrick Caulfield /* Send workqueue function */
15916ed7257bSPatrick Caulfield static void process_send_sockets(struct work_struct *work)
15926ed7257bSPatrick Caulfield {
15936ed7257bSPatrick Caulfield 	struct connection *con = container_of(work, struct connection, swork);
15946ed7257bSPatrick Caulfield 
15957443bc96SAlexander Aring 	WARN_ON(test_bit(CF_IS_OTHERCON, &con->flags));
15967443bc96SAlexander Aring 
15978a4abb08Stsutomu.owa@toshiba.co.jp 	clear_bit(CF_WRITE_PENDING, &con->flags);
1598ba868d9dSAlexander Aring 
1599489d8e55SAlexander Aring 	if (test_and_clear_bit(CF_RECONNECT, &con->flags)) {
1600ba868d9dSAlexander Aring 		close_connection(con, false, false, true);
1601489d8e55SAlexander Aring 		dlm_midcomms_unack_msg_resend(con->nodeid);
1602489d8e55SAlexander Aring 	}
1603ba868d9dSAlexander Aring 
16048728a455SAlexander Aring 	if (con->sock == NULL) {
1605ba868d9dSAlexander Aring 		if (test_and_clear_bit(CF_DELAY_CONNECT, &con->flags))
1606ba868d9dSAlexander Aring 			msleep(1000);
16078728a455SAlexander Aring 
16088728a455SAlexander Aring 		mutex_lock(&con->sock_mutex);
16098728a455SAlexander Aring 		dlm_connect(con);
16108728a455SAlexander Aring 		mutex_unlock(&con->sock_mutex);
1611ba868d9dSAlexander Aring 	}
16128728a455SAlexander Aring 
161301da24d3SBob Peterson 	if (!list_empty(&con->writequeue))
16146ed7257bSPatrick Caulfield 		send_to_sock(con);
16156ed7257bSPatrick Caulfield }
16166ed7257bSPatrick Caulfield 
16176ed7257bSPatrick Caulfield static void work_stop(void)
16186ed7257bSPatrick Caulfield {
1619fcef0e6cSAlexander Aring 	if (recv_workqueue) {
16206ed7257bSPatrick Caulfield 		destroy_workqueue(recv_workqueue);
1621fcef0e6cSAlexander Aring 		recv_workqueue = NULL;
1622fcef0e6cSAlexander Aring 	}
1623fcef0e6cSAlexander Aring 
1624fcef0e6cSAlexander Aring 	if (send_workqueue) {
16256ed7257bSPatrick Caulfield 		destroy_workqueue(send_workqueue);
1626fcef0e6cSAlexander Aring 		send_workqueue = NULL;
1627fcef0e6cSAlexander Aring 	}
16286ed7257bSPatrick Caulfield }
16296ed7257bSPatrick Caulfield 
16306ed7257bSPatrick Caulfield static int work_start(void)
16316ed7257bSPatrick Caulfield {
16326c6a1cc6SAlexander Aring 	recv_workqueue = alloc_ordered_workqueue("dlm_recv", WQ_MEM_RECLAIM);
1633b9d41052SNamhyung Kim 	if (!recv_workqueue) {
1634b9d41052SNamhyung Kim 		log_print("can't start dlm_recv");
1635b9d41052SNamhyung Kim 		return -ENOMEM;
16366ed7257bSPatrick Caulfield 	}
16376ed7257bSPatrick Caulfield 
16386c6a1cc6SAlexander Aring 	send_workqueue = alloc_ordered_workqueue("dlm_send", WQ_MEM_RECLAIM);
1639b9d41052SNamhyung Kim 	if (!send_workqueue) {
1640b9d41052SNamhyung Kim 		log_print("can't start dlm_send");
16416ed7257bSPatrick Caulfield 		destroy_workqueue(recv_workqueue);
1642fcef0e6cSAlexander Aring 		recv_workqueue = NULL;
1643b9d41052SNamhyung Kim 		return -ENOMEM;
16446ed7257bSPatrick Caulfield 	}
16456ed7257bSPatrick Caulfield 
16466ed7257bSPatrick Caulfield 	return 0;
16476ed7257bSPatrick Caulfield }
16486ed7257bSPatrick Caulfield 
16499d232469SAlexander Aring static void shutdown_conn(struct connection *con)
16509d232469SAlexander Aring {
1651a66c008cSAlexander Aring 	if (dlm_proto_ops->shutdown_action)
1652a66c008cSAlexander Aring 		dlm_proto_ops->shutdown_action(con);
16539d232469SAlexander Aring }
16549d232469SAlexander Aring 
16559d232469SAlexander Aring void dlm_lowcomms_shutdown(void)
16569d232469SAlexander Aring {
1657b38bc9c2SAlexander Aring 	int idx;
1658b38bc9c2SAlexander Aring 
16599d232469SAlexander Aring 	/* Set all the flags to prevent any
16609d232469SAlexander Aring 	 * socket activity.
16619d232469SAlexander Aring 	 */
16629d232469SAlexander Aring 	dlm_allow_conn = 0;
16639d232469SAlexander Aring 
16649d232469SAlexander Aring 	if (recv_workqueue)
16659d232469SAlexander Aring 		flush_workqueue(recv_workqueue);
16669d232469SAlexander Aring 	if (send_workqueue)
16679d232469SAlexander Aring 		flush_workqueue(send_workqueue);
16689d232469SAlexander Aring 
16699d232469SAlexander Aring 	dlm_close_sock(&listen_con.sock);
16709d232469SAlexander Aring 
1671b38bc9c2SAlexander Aring 	idx = srcu_read_lock(&connections_srcu);
16729d232469SAlexander Aring 	foreach_conn(shutdown_conn);
1673b38bc9c2SAlexander Aring 	srcu_read_unlock(&connections_srcu, idx);
16749d232469SAlexander Aring }
16759d232469SAlexander Aring 
1676f0fb83cbStsutomu.owa@toshiba.co.jp static void _stop_conn(struct connection *con, bool and_other)
16776ed7257bSPatrick Caulfield {
1678f0fb83cbStsutomu.owa@toshiba.co.jp 	mutex_lock(&con->sock_mutex);
1679173a31feStsutomu.owa@toshiba.co.jp 	set_bit(CF_CLOSE, &con->flags);
1680f0fb83cbStsutomu.owa@toshiba.co.jp 	set_bit(CF_READ_PENDING, &con->flags);
16818a4abb08Stsutomu.owa@toshiba.co.jp 	set_bit(CF_WRITE_PENDING, &con->flags);
168293eaadebStsutomu.owa@toshiba.co.jp 	if (con->sock && con->sock->sk) {
168393eaadebStsutomu.owa@toshiba.co.jp 		write_lock_bh(&con->sock->sk->sk_callback_lock);
1684afb853fbSPatrick Caulfield 		con->sock->sk->sk_user_data = NULL;
168593eaadebStsutomu.owa@toshiba.co.jp 		write_unlock_bh(&con->sock->sk->sk_callback_lock);
168693eaadebStsutomu.owa@toshiba.co.jp 	}
1687f0fb83cbStsutomu.owa@toshiba.co.jp 	if (con->othercon && and_other)
1688f0fb83cbStsutomu.owa@toshiba.co.jp 		_stop_conn(con->othercon, false);
1689f0fb83cbStsutomu.owa@toshiba.co.jp 	mutex_unlock(&con->sock_mutex);
1690f0fb83cbStsutomu.owa@toshiba.co.jp }
1691f0fb83cbStsutomu.owa@toshiba.co.jp 
1692f0fb83cbStsutomu.owa@toshiba.co.jp static void stop_conn(struct connection *con)
1693f0fb83cbStsutomu.owa@toshiba.co.jp {
1694f0fb83cbStsutomu.owa@toshiba.co.jp 	_stop_conn(con, true);
1695afb853fbSPatrick Caulfield }
16965e9ccc37SChristine Caulfield 
16974798cbbfSAlexander Aring static void connection_release(struct rcu_head *rcu)
16984798cbbfSAlexander Aring {
16994798cbbfSAlexander Aring 	struct connection *con = container_of(rcu, struct connection, rcu);
17004798cbbfSAlexander Aring 
17014798cbbfSAlexander Aring 	kfree(con->rx_buf);
17024798cbbfSAlexander Aring 	kfree(con);
17034798cbbfSAlexander Aring }
17044798cbbfSAlexander Aring 
17055e9ccc37SChristine Caulfield static void free_conn(struct connection *con)
17065e9ccc37SChristine Caulfield {
17070d737a8cSMarcelo Ricardo Leitner 	close_connection(con, true, true, true);
1708a47666ebSAlexander Aring 	spin_lock(&connections_lock);
1709a47666ebSAlexander Aring 	hlist_del_rcu(&con->list);
1710a47666ebSAlexander Aring 	spin_unlock(&connections_lock);
1711948c47e9SAlexander Aring 	if (con->othercon) {
1712948c47e9SAlexander Aring 		clean_one_writequeue(con->othercon);
17135cbec208SAlexander Aring 		call_srcu(&connections_srcu, &con->othercon->rcu,
17145cbec208SAlexander Aring 			  connection_release);
1715948c47e9SAlexander Aring 	}
17160de98432SAlexander Aring 	clean_one_writequeue(con);
17175cbec208SAlexander Aring 	call_srcu(&connections_srcu, &con->rcu, connection_release);
17186ed7257bSPatrick Caulfield }
17195e9ccc37SChristine Caulfield 
1720f0fb83cbStsutomu.owa@toshiba.co.jp static void work_flush(void)
1721f0fb83cbStsutomu.owa@toshiba.co.jp {
1722b38bc9c2SAlexander Aring 	int ok;
1723f0fb83cbStsutomu.owa@toshiba.co.jp 	int i;
1724f0fb83cbStsutomu.owa@toshiba.co.jp 	struct connection *con;
1725f0fb83cbStsutomu.owa@toshiba.co.jp 
1726f0fb83cbStsutomu.owa@toshiba.co.jp 	do {
1727f0fb83cbStsutomu.owa@toshiba.co.jp 		ok = 1;
1728f0fb83cbStsutomu.owa@toshiba.co.jp 		foreach_conn(stop_conn);
1729b355516fSDavid Windsor 		if (recv_workqueue)
1730f0fb83cbStsutomu.owa@toshiba.co.jp 			flush_workqueue(recv_workqueue);
1731b355516fSDavid Windsor 		if (send_workqueue)
1732f0fb83cbStsutomu.owa@toshiba.co.jp 			flush_workqueue(send_workqueue);
1733f0fb83cbStsutomu.owa@toshiba.co.jp 		for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
1734a47666ebSAlexander Aring 			hlist_for_each_entry_rcu(con, &connection_hash[i],
1735a47666ebSAlexander Aring 						 list) {
1736f0fb83cbStsutomu.owa@toshiba.co.jp 				ok &= test_bit(CF_READ_PENDING, &con->flags);
17378a4abb08Stsutomu.owa@toshiba.co.jp 				ok &= test_bit(CF_WRITE_PENDING, &con->flags);
17388a4abb08Stsutomu.owa@toshiba.co.jp 				if (con->othercon) {
1739f0fb83cbStsutomu.owa@toshiba.co.jp 					ok &= test_bit(CF_READ_PENDING,
1740f0fb83cbStsutomu.owa@toshiba.co.jp 						       &con->othercon->flags);
17418a4abb08Stsutomu.owa@toshiba.co.jp 					ok &= test_bit(CF_WRITE_PENDING,
17428a4abb08Stsutomu.owa@toshiba.co.jp 						       &con->othercon->flags);
17438a4abb08Stsutomu.owa@toshiba.co.jp 				}
1744f0fb83cbStsutomu.owa@toshiba.co.jp 			}
1745f0fb83cbStsutomu.owa@toshiba.co.jp 		}
1746f0fb83cbStsutomu.owa@toshiba.co.jp 	} while (!ok);
1747f0fb83cbStsutomu.owa@toshiba.co.jp }
1748f0fb83cbStsutomu.owa@toshiba.co.jp 
17495e9ccc37SChristine Caulfield void dlm_lowcomms_stop(void)
17505e9ccc37SChristine Caulfield {
1751b38bc9c2SAlexander Aring 	int idx;
1752b38bc9c2SAlexander Aring 
1753b38bc9c2SAlexander Aring 	idx = srcu_read_lock(&connections_srcu);
1754f0fb83cbStsutomu.owa@toshiba.co.jp 	work_flush();
17553a8db798SMarcelo Ricardo Leitner 	foreach_conn(free_conn);
1756b38bc9c2SAlexander Aring 	srcu_read_unlock(&connections_srcu, idx);
17576ed7257bSPatrick Caulfield 	work_stop();
1758043697f0SAlexander Aring 	deinit_local();
1759a66c008cSAlexander Aring 
1760a66c008cSAlexander Aring 	dlm_proto_ops = NULL;
17616ed7257bSPatrick Caulfield }
17626ed7257bSPatrick Caulfield 
17632dc6b115SAlexander Aring static int dlm_listen_for_all(void)
17642dc6b115SAlexander Aring {
17652dc6b115SAlexander Aring 	struct socket *sock;
17662dc6b115SAlexander Aring 	int result;
17672dc6b115SAlexander Aring 
17682dc6b115SAlexander Aring 	log_print("Using %s for communications",
17692dc6b115SAlexander Aring 		  dlm_proto_ops->name);
17702dc6b115SAlexander Aring 
17712dc6b115SAlexander Aring 	result = dlm_proto_ops->listen_validate();
17722dc6b115SAlexander Aring 	if (result < 0)
17732dc6b115SAlexander Aring 		return result;
17742dc6b115SAlexander Aring 
17752dc6b115SAlexander Aring 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
17762dc6b115SAlexander Aring 				  SOCK_STREAM, dlm_proto_ops->proto, &sock);
17772dc6b115SAlexander Aring 	if (result < 0) {
1778fe933675SAlexander Aring 		log_print("Can't create comms socket: %d", result);
17792dc6b115SAlexander Aring 		goto out;
17802dc6b115SAlexander Aring 	}
17812dc6b115SAlexander Aring 
17822dc6b115SAlexander Aring 	sock_set_mark(sock->sk, dlm_config.ci_mark);
17832dc6b115SAlexander Aring 	dlm_proto_ops->listen_sockopts(sock);
17842dc6b115SAlexander Aring 
17852dc6b115SAlexander Aring 	result = dlm_proto_ops->listen_bind(sock);
17862dc6b115SAlexander Aring 	if (result < 0)
17872dc6b115SAlexander Aring 		goto out;
17882dc6b115SAlexander Aring 
17892dc6b115SAlexander Aring 	save_listen_callbacks(sock);
17902dc6b115SAlexander Aring 	add_listen_sock(sock, &listen_con);
17912dc6b115SAlexander Aring 
17922dc6b115SAlexander Aring 	INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
17932dc6b115SAlexander Aring 	result = sock->ops->listen(sock, 5);
17942dc6b115SAlexander Aring 	if (result < 0) {
17952dc6b115SAlexander Aring 		dlm_close_sock(&listen_con.sock);
17962dc6b115SAlexander Aring 		goto out;
17972dc6b115SAlexander Aring 	}
17982dc6b115SAlexander Aring 
17992dc6b115SAlexander Aring 	return 0;
18002dc6b115SAlexander Aring 
18012dc6b115SAlexander Aring out:
18022dc6b115SAlexander Aring 	sock_release(sock);
18032dc6b115SAlexander Aring 	return result;
18042dc6b115SAlexander Aring }
18052dc6b115SAlexander Aring 
18068728a455SAlexander Aring static int dlm_tcp_bind(struct socket *sock)
18078728a455SAlexander Aring {
18088728a455SAlexander Aring 	struct sockaddr_storage src_addr;
18098728a455SAlexander Aring 	int result, addr_len;
18108728a455SAlexander Aring 
18118728a455SAlexander Aring 	/* Bind to our cluster-known address connecting to avoid
18128728a455SAlexander Aring 	 * routing problems.
18138728a455SAlexander Aring 	 */
18148728a455SAlexander Aring 	memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
18158728a455SAlexander Aring 	make_sockaddr(&src_addr, 0, &addr_len);
18168728a455SAlexander Aring 
18178728a455SAlexander Aring 	result = sock->ops->bind(sock, (struct sockaddr *)&src_addr,
18188728a455SAlexander Aring 				 addr_len);
18198728a455SAlexander Aring 	if (result < 0) {
18208728a455SAlexander Aring 		/* This *may* not indicate a critical error */
18218728a455SAlexander Aring 		log_print("could not bind for connect: %d", result);
18228728a455SAlexander Aring 	}
18238728a455SAlexander Aring 
18248728a455SAlexander Aring 	return 0;
18258728a455SAlexander Aring }
18268728a455SAlexander Aring 
18278728a455SAlexander Aring static int dlm_tcp_connect(struct connection *con, struct socket *sock,
18288728a455SAlexander Aring 			   struct sockaddr *addr, int addr_len)
18298728a455SAlexander Aring {
18308728a455SAlexander Aring 	int ret;
18318728a455SAlexander Aring 
18328728a455SAlexander Aring 	ret = sock->ops->connect(sock, addr, addr_len, O_NONBLOCK);
18338728a455SAlexander Aring 	switch (ret) {
18348728a455SAlexander Aring 	case -EINPROGRESS:
18358728a455SAlexander Aring 		fallthrough;
18368728a455SAlexander Aring 	case 0:
18378728a455SAlexander Aring 		return 0;
18388728a455SAlexander Aring 	}
18398728a455SAlexander Aring 
18408728a455SAlexander Aring 	return ret;
18418728a455SAlexander Aring }
18428728a455SAlexander Aring 
18432dc6b115SAlexander Aring static int dlm_tcp_listen_validate(void)
18442dc6b115SAlexander Aring {
18452dc6b115SAlexander Aring 	/* We don't support multi-homed hosts */
18462dc6b115SAlexander Aring 	if (dlm_local_count > 1) {
18472dc6b115SAlexander Aring 		log_print("TCP protocol can't handle multi-homed hosts, try SCTP");
18482dc6b115SAlexander Aring 		return -EINVAL;
18492dc6b115SAlexander Aring 	}
18502dc6b115SAlexander Aring 
18512dc6b115SAlexander Aring 	return 0;
18522dc6b115SAlexander Aring }
18532dc6b115SAlexander Aring 
18542dc6b115SAlexander Aring static void dlm_tcp_sockopts(struct socket *sock)
18552dc6b115SAlexander Aring {
18562dc6b115SAlexander Aring 	/* Turn off Nagle's algorithm */
18572dc6b115SAlexander Aring 	tcp_sock_set_nodelay(sock->sk);
18582dc6b115SAlexander Aring }
18592dc6b115SAlexander Aring 
18602dc6b115SAlexander Aring static void dlm_tcp_listen_sockopts(struct socket *sock)
18612dc6b115SAlexander Aring {
18622dc6b115SAlexander Aring 	dlm_tcp_sockopts(sock);
18632dc6b115SAlexander Aring 	sock_set_reuseaddr(sock->sk);
18642dc6b115SAlexander Aring }
18652dc6b115SAlexander Aring 
18662dc6b115SAlexander Aring static int dlm_tcp_listen_bind(struct socket *sock)
18672dc6b115SAlexander Aring {
18682dc6b115SAlexander Aring 	int addr_len;
18692dc6b115SAlexander Aring 
18702dc6b115SAlexander Aring 	/* Bind to our port */
18712dc6b115SAlexander Aring 	make_sockaddr(dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
18722dc6b115SAlexander Aring 	return sock->ops->bind(sock, (struct sockaddr *)dlm_local_addr[0],
18732dc6b115SAlexander Aring 			       addr_len);
18742dc6b115SAlexander Aring }
18752dc6b115SAlexander Aring 
1876a66c008cSAlexander Aring static const struct dlm_proto_ops dlm_tcp_ops = {
18772dc6b115SAlexander Aring 	.name = "TCP",
18782dc6b115SAlexander Aring 	.proto = IPPROTO_TCP,
18798728a455SAlexander Aring 	.connect = dlm_tcp_connect,
18808728a455SAlexander Aring 	.sockopts = dlm_tcp_sockopts,
18818728a455SAlexander Aring 	.bind = dlm_tcp_bind,
18822dc6b115SAlexander Aring 	.listen_validate = dlm_tcp_listen_validate,
18832dc6b115SAlexander Aring 	.listen_sockopts = dlm_tcp_listen_sockopts,
18842dc6b115SAlexander Aring 	.listen_bind = dlm_tcp_listen_bind,
1885a66c008cSAlexander Aring 	.shutdown_action = dlm_tcp_shutdown,
1886a66c008cSAlexander Aring 	.eof_condition = tcp_eof_condition,
1887a66c008cSAlexander Aring };
1888a66c008cSAlexander Aring 
18898728a455SAlexander Aring static int dlm_sctp_bind(struct socket *sock)
18908728a455SAlexander Aring {
18918728a455SAlexander Aring 	return sctp_bind_addrs(sock, 0);
18928728a455SAlexander Aring }
18938728a455SAlexander Aring 
18948728a455SAlexander Aring static int dlm_sctp_connect(struct connection *con, struct socket *sock,
18958728a455SAlexander Aring 			    struct sockaddr *addr, int addr_len)
18968728a455SAlexander Aring {
18978728a455SAlexander Aring 	int ret;
18988728a455SAlexander Aring 
18998728a455SAlexander Aring 	/*
19008728a455SAlexander Aring 	 * Make sock->ops->connect() function return in specified time,
19018728a455SAlexander Aring 	 * since O_NONBLOCK argument in connect() function does not work here,
19028728a455SAlexander Aring 	 * then, we should restore the default value of this attribute.
19038728a455SAlexander Aring 	 */
19048728a455SAlexander Aring 	sock_set_sndtimeo(sock->sk, 5);
19058728a455SAlexander Aring 	ret = sock->ops->connect(sock, addr, addr_len, 0);
19068728a455SAlexander Aring 	sock_set_sndtimeo(sock->sk, 0);
19078728a455SAlexander Aring 	if (ret < 0)
19088728a455SAlexander Aring 		return ret;
19098728a455SAlexander Aring 
19108728a455SAlexander Aring 	if (!test_and_set_bit(CF_CONNECTED, &con->flags))
19118728a455SAlexander Aring 		log_print("successful connected to node %d", con->nodeid);
19128728a455SAlexander Aring 
19138728a455SAlexander Aring 	return 0;
19148728a455SAlexander Aring }
19158728a455SAlexander Aring 
191690d21fc0SAlexander Aring static int dlm_sctp_listen_validate(void)
191790d21fc0SAlexander Aring {
191890d21fc0SAlexander Aring 	if (!IS_ENABLED(CONFIG_IP_SCTP)) {
191990d21fc0SAlexander Aring 		log_print("SCTP is not enabled by this kernel");
192090d21fc0SAlexander Aring 		return -EOPNOTSUPP;
192190d21fc0SAlexander Aring 	}
192290d21fc0SAlexander Aring 
192390d21fc0SAlexander Aring 	request_module("sctp");
192490d21fc0SAlexander Aring 	return 0;
192590d21fc0SAlexander Aring }
192690d21fc0SAlexander Aring 
19272dc6b115SAlexander Aring static int dlm_sctp_bind_listen(struct socket *sock)
19282dc6b115SAlexander Aring {
19292dc6b115SAlexander Aring 	return sctp_bind_addrs(sock, dlm_config.ci_tcp_port);
19302dc6b115SAlexander Aring }
19312dc6b115SAlexander Aring 
19322dc6b115SAlexander Aring static void dlm_sctp_sockopts(struct socket *sock)
19332dc6b115SAlexander Aring {
19342dc6b115SAlexander Aring 	/* Turn off Nagle's algorithm */
19352dc6b115SAlexander Aring 	sctp_sock_set_nodelay(sock->sk);
19362dc6b115SAlexander Aring 	sock_set_rcvbuf(sock->sk, NEEDED_RMEM);
19372dc6b115SAlexander Aring }
19382dc6b115SAlexander Aring 
1939a66c008cSAlexander Aring static const struct dlm_proto_ops dlm_sctp_ops = {
19402dc6b115SAlexander Aring 	.name = "SCTP",
19412dc6b115SAlexander Aring 	.proto = IPPROTO_SCTP,
19428728a455SAlexander Aring 	.try_new_addr = true,
19438728a455SAlexander Aring 	.connect = dlm_sctp_connect,
19448728a455SAlexander Aring 	.sockopts = dlm_sctp_sockopts,
19458728a455SAlexander Aring 	.bind = dlm_sctp_bind,
194690d21fc0SAlexander Aring 	.listen_validate = dlm_sctp_listen_validate,
19472dc6b115SAlexander Aring 	.listen_sockopts = dlm_sctp_sockopts,
19482dc6b115SAlexander Aring 	.listen_bind = dlm_sctp_bind_listen,
1949a66c008cSAlexander Aring };
1950a66c008cSAlexander Aring 
19516ed7257bSPatrick Caulfield int dlm_lowcomms_start(void)
19526ed7257bSPatrick Caulfield {
19536ed7257bSPatrick Caulfield 	int error = -EINVAL;
19545e9ccc37SChristine Caulfield 	int i;
19555e9ccc37SChristine Caulfield 
19565e9ccc37SChristine Caulfield 	for (i = 0; i < CONN_HASH_SIZE; i++)
19575e9ccc37SChristine Caulfield 		INIT_HLIST_HEAD(&connection_hash[i]);
19586ed7257bSPatrick Caulfield 
19596ed7257bSPatrick Caulfield 	init_local();
19606ed7257bSPatrick Caulfield 	if (!dlm_local_count) {
1961617e82e1SDavid Teigland 		error = -ENOTCONN;
19626ed7257bSPatrick Caulfield 		log_print("no local IP address has been set");
1963513ef596SDavid Teigland 		goto fail;
19646ed7257bSPatrick Caulfield 	}
19656ed7257bSPatrick Caulfield 
1966d11ccd45SAlexander Aring 	INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
1967d11ccd45SAlexander Aring 
1968513ef596SDavid Teigland 	error = work_start();
1969513ef596SDavid Teigland 	if (error)
1970fcef0e6cSAlexander Aring 		goto fail_local;
1971513ef596SDavid Teigland 
1972513ef596SDavid Teigland 	dlm_allow_conn = 1;
19736ed7257bSPatrick Caulfield 
19746ed7257bSPatrick Caulfield 	/* Start listening */
1975ac7d5d03SAlexander Aring 	switch (dlm_config.ci_protocol) {
1976ac7d5d03SAlexander Aring 	case DLM_PROTO_TCP:
1977a66c008cSAlexander Aring 		dlm_proto_ops = &dlm_tcp_ops;
1978ac7d5d03SAlexander Aring 		break;
1979ac7d5d03SAlexander Aring 	case DLM_PROTO_SCTP:
1980a66c008cSAlexander Aring 		dlm_proto_ops = &dlm_sctp_ops;
1981ac7d5d03SAlexander Aring 		break;
1982ac7d5d03SAlexander Aring 	default:
1983ac7d5d03SAlexander Aring 		log_print("Invalid protocol identifier %d set",
1984ac7d5d03SAlexander Aring 			  dlm_config.ci_protocol);
1985ac7d5d03SAlexander Aring 		error = -EINVAL;
19862dc6b115SAlexander Aring 		goto fail_proto_ops;
1987ac7d5d03SAlexander Aring 	}
19882dc6b115SAlexander Aring 
19892dc6b115SAlexander Aring 	error = dlm_listen_for_all();
19906ed7257bSPatrick Caulfield 	if (error)
19912dc6b115SAlexander Aring 		goto fail_listen;
19926ed7257bSPatrick Caulfield 
19936ed7257bSPatrick Caulfield 	return 0;
19946ed7257bSPatrick Caulfield 
19952dc6b115SAlexander Aring fail_listen:
19962dc6b115SAlexander Aring 	dlm_proto_ops = NULL;
19972dc6b115SAlexander Aring fail_proto_ops:
1998513ef596SDavid Teigland 	dlm_allow_conn = 0;
1999d11ccd45SAlexander Aring 	dlm_close_sock(&listen_con.sock);
2000fcef0e6cSAlexander Aring 	work_stop();
2001fcef0e6cSAlexander Aring fail_local:
2002fcef0e6cSAlexander Aring 	deinit_local();
2003513ef596SDavid Teigland fail:
20046ed7257bSPatrick Caulfield 	return error;
20056ed7257bSPatrick Caulfield }
200636b71a8bSDavid Teigland 
200736b71a8bSDavid Teigland void dlm_lowcomms_exit(void)
200836b71a8bSDavid Teigland {
200936b71a8bSDavid Teigland 	struct dlm_node_addr *na, *safe;
201036b71a8bSDavid Teigland 
201136b71a8bSDavid Teigland 	spin_lock(&dlm_node_addrs_spin);
201236b71a8bSDavid Teigland 	list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) {
201336b71a8bSDavid Teigland 		list_del(&na->list);
201436b71a8bSDavid Teigland 		while (na->addr_count--)
201536b71a8bSDavid Teigland 			kfree(na->addr[na->addr_count]);
201636b71a8bSDavid Teigland 		kfree(na);
201736b71a8bSDavid Teigland 	}
201836b71a8bSDavid Teigland 	spin_unlock(&dlm_node_addrs_spin);
201936b71a8bSDavid Teigland }
2020