xref: /openbmc/linux/drivers/infiniband/core/cma.c (revision e0fe97ef)
124f52149SLeon Romanovsky // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2e51060f0SSean Hefty /*
3e51060f0SSean Hefty  * Copyright (c) 2005 Voltaire Inc.  All rights reserved.
4e51060f0SSean Hefty  * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
524f52149SLeon Romanovsky  * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
6e51060f0SSean Hefty  * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
7e51060f0SSean Hefty  */
8e51060f0SSean Hefty 
9e51060f0SSean Hefty #include <linux/completion.h>
10e51060f0SSean Hefty #include <linux/in.h>
11e51060f0SSean Hefty #include <linux/in6.h>
12e51060f0SSean Hefty #include <linux/mutex.h>
13e51060f0SSean Hefty #include <linux/random.h>
14fc008bdbSPatrisious Haddad #include <linux/rbtree.h>
15bee3c3c9SMoni Shoua #include <linux/igmp.h>
1663826753SMatthew Wilcox #include <linux/xarray.h>
1707ebafbaSTom Tucker #include <linux/inetdevice.h>
185a0e3ad6STejun Heo #include <linux/slab.h>
19e4dd23d7SPaul Gortmaker #include <linux/module.h>
20366cddb4SAmir Vadai #include <net/route.h>
21e51060f0SSean Hefty 
224be74b42SHaggai Eran #include <net/net_namespace.h>
234be74b42SHaggai Eran #include <net/netns/generic.h>
24925d046eSPatrisious Haddad #include <net/netevent.h>
25e51060f0SSean Hefty #include <net/tcp.h>
261f5175adSAleksey Senin #include <net/ipv6.h>
27f887f2acSHaggai Eran #include <net/ip_fib.h>
28f887f2acSHaggai Eran #include <net/ip6_route.h>
29e51060f0SSean Hefty 
30e51060f0SSean Hefty #include <rdma/rdma_cm.h>
31e51060f0SSean Hefty #include <rdma/rdma_cm_ib.h>
32753f618aSNir Muchtar #include <rdma/rdma_netlink.h>
332e2d190cSSean Hefty #include <rdma/ib.h>
34e51060f0SSean Hefty #include <rdma/ib_cache.h>
35e51060f0SSean Hefty #include <rdma/ib_cm.h>
36e51060f0SSean Hefty #include <rdma/ib_sa.h>
3707ebafbaSTom Tucker #include <rdma/iw_cm.h>
38e51060f0SSean Hefty 
39218a773fSMatan Barak #include "core_priv.h"
40a3b641afSSteve Wise #include "cma_priv.h"
41ed999f82SChuck Lever #include "cma_trace.h"
42218a773fSMatan Barak 
43e51060f0SSean Hefty MODULE_AUTHOR("Sean Hefty");
44e51060f0SSean Hefty MODULE_DESCRIPTION("Generic RDMA CM Agent");
45e51060f0SSean Hefty MODULE_LICENSE("Dual BSD/GPL");
46e51060f0SSean Hefty 
47e51060f0SSean Hefty #define CMA_CM_RESPONSE_TIMEOUT 20
48d5bb7599SMichael S. Tsirkin #define CMA_MAX_CM_RETRIES 15
49dcb3f974SSean Hefty #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
50fb4907f4SChao Leng #define CMA_IBOE_PACKET_LIFETIME 16
515ab2d89bSLeon Romanovsky #define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
52e51060f0SSean Hefty 
532b1b5b60SSagi Grimberg static const char * const cma_events[] = {
542b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_ADDR_RESOLVED]	 = "address resolved",
552b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_ADDR_ERROR]	 = "address error",
562b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_ROUTE_RESOLVED]	 = "route resolved ",
572b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_ROUTE_ERROR]	 = "route error",
582b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_CONNECT_REQUEST]	 = "connect request",
592b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response",
602b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_CONNECT_ERROR]	 = "connect error",
612b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_UNREACHABLE]	 = "unreachable",
622b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_REJECTED]	 = "rejected",
632b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_ESTABLISHED]	 = "established",
642b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_DISCONNECTED]	 = "disconnected",
652b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_DEVICE_REMOVAL]	 = "device removal",
662b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_MULTICAST_JOIN]	 = "multicast join",
672b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_MULTICAST_ERROR]	 = "multicast error",
682b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_ADDR_CHANGE]	 = "address change",
692b1b5b60SSagi Grimberg 	[RDMA_CM_EVENT_TIMEWAIT_EXIT]	 = "timewait exit",
702b1b5b60SSagi Grimberg };
712b1b5b60SSagi Grimberg 
72d9e410ebSMaor Gottlieb static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
73d9e410ebSMaor Gottlieb 			      enum ib_gid_type gid_type);
74b5de0c60SJason Gunthorpe 
rdma_event_msg(enum rdma_cm_event_type event)75db7489e0SBart Van Assche const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
762b1b5b60SSagi Grimberg {
772b1b5b60SSagi Grimberg 	size_t index = event;
782b1b5b60SSagi Grimberg 
792b1b5b60SSagi Grimberg 	return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ?
802b1b5b60SSagi Grimberg 			cma_events[index] : "unrecognized event";
812b1b5b60SSagi Grimberg }
822b1b5b60SSagi Grimberg EXPORT_SYMBOL(rdma_event_msg);
832b1b5b60SSagi Grimberg 
rdma_reject_msg(struct rdma_cm_id * id,int reason)8477a5db13SSteve Wise const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
8577a5db13SSteve Wise 						int reason)
8677a5db13SSteve Wise {
8777a5db13SSteve Wise 	if (rdma_ib_or_roce(id->device, id->port_num))
8877a5db13SSteve Wise 		return ibcm_reject_msg(reason);
8977a5db13SSteve Wise 
9077a5db13SSteve Wise 	if (rdma_protocol_iwarp(id->device, id->port_num))
9177a5db13SSteve Wise 		return iwcm_reject_msg(reason);
9277a5db13SSteve Wise 
9377a5db13SSteve Wise 	WARN_ON_ONCE(1);
9477a5db13SSteve Wise 	return "unrecognized transport";
9577a5db13SSteve Wise }
9677a5db13SSteve Wise EXPORT_SYMBOL(rdma_reject_msg);
9777a5db13SSteve Wise 
98dd302ee4SLeon Romanovsky /**
99dd302ee4SLeon Romanovsky  * rdma_is_consumer_reject - return true if the consumer rejected the connect
100dd302ee4SLeon Romanovsky  *                           request.
101dd302ee4SLeon Romanovsky  * @id: Communication identifier that received the REJECT event.
102dd302ee4SLeon Romanovsky  * @reason: Value returned in the REJECT event status field.
103dd302ee4SLeon Romanovsky  */
rdma_is_consumer_reject(struct rdma_cm_id * id,int reason)104dd302ee4SLeon Romanovsky static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
1055042a73dSSteve Wise {
1065042a73dSSteve Wise 	if (rdma_ib_or_roce(id->device, id->port_num))
1075042a73dSSteve Wise 		return reason == IB_CM_REJ_CONSUMER_DEFINED;
1085042a73dSSteve Wise 
1095042a73dSSteve Wise 	if (rdma_protocol_iwarp(id->device, id->port_num))
1105042a73dSSteve Wise 		return reason == -ECONNREFUSED;
1115042a73dSSteve Wise 
1125042a73dSSteve Wise 	WARN_ON_ONCE(1);
1135042a73dSSteve Wise 	return false;
1145042a73dSSteve Wise }
1155042a73dSSteve Wise 
rdma_consumer_reject_data(struct rdma_cm_id * id,struct rdma_cm_event * ev,u8 * data_len)1165f244104SSteve Wise const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
1175f244104SSteve Wise 				      struct rdma_cm_event *ev, u8 *data_len)
1185f244104SSteve Wise {
1195f244104SSteve Wise 	const void *p;
1205f244104SSteve Wise 
1215f244104SSteve Wise 	if (rdma_is_consumer_reject(id, ev->status)) {
1225f244104SSteve Wise 		*data_len = ev->param.conn.private_data_len;
1235f244104SSteve Wise 		p = ev->param.conn.private_data;
1245f244104SSteve Wise 	} else {
1255f244104SSteve Wise 		*data_len = 0;
1265f244104SSteve Wise 		p = NULL;
1275f244104SSteve Wise 	}
1285f244104SSteve Wise 	return p;
1295f244104SSteve Wise }
1305f244104SSteve Wise EXPORT_SYMBOL(rdma_consumer_reject_data);
1315f244104SSteve Wise 
132fbdb0a91SSteve Wise /**
133fbdb0a91SSteve Wise  * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id.
134fbdb0a91SSteve Wise  * @id: Communication Identifier
135fbdb0a91SSteve Wise  */
rdma_iw_cm_id(struct rdma_cm_id * id)136fbdb0a91SSteve Wise struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
137fbdb0a91SSteve Wise {
138fbdb0a91SSteve Wise 	struct rdma_id_private *id_priv;
139fbdb0a91SSteve Wise 
140fbdb0a91SSteve Wise 	id_priv = container_of(id, struct rdma_id_private, id);
141fbdb0a91SSteve Wise 	if (id->device->node_type == RDMA_NODE_RNIC)
142fbdb0a91SSteve Wise 		return id_priv->cm_id.iw;
143fbdb0a91SSteve Wise 	return NULL;
144fbdb0a91SSteve Wise }
145fbdb0a91SSteve Wise EXPORT_SYMBOL(rdma_iw_cm_id);
146fbdb0a91SSteve Wise 
147fbdb0a91SSteve Wise /**
148fbdb0a91SSteve Wise  * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
149fbdb0a91SSteve Wise  * @res: rdma resource tracking entry pointer
150fbdb0a91SSteve Wise  */
rdma_res_to_id(struct rdma_restrack_entry * res)151fbdb0a91SSteve Wise struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
152fbdb0a91SSteve Wise {
153fbdb0a91SSteve Wise 	struct rdma_id_private *id_priv =
154fbdb0a91SSteve Wise 		container_of(res, struct rdma_id_private, res);
155fbdb0a91SSteve Wise 
156fbdb0a91SSteve Wise 	return &id_priv->id;
157fbdb0a91SSteve Wise }
158fbdb0a91SSteve Wise EXPORT_SYMBOL(rdma_res_to_id);
159fbdb0a91SSteve Wise 
16011a0ae4cSJason Gunthorpe static int cma_add_one(struct ib_device *device);
1617c1eb45aSHaggai Eran static void cma_remove_one(struct ib_device *device, void *client_data);
162e51060f0SSean Hefty 
163e51060f0SSean Hefty static struct ib_client cma_client = {
164e51060f0SSean Hefty 	.name   = "cma",
165e51060f0SSean Hefty 	.add    = cma_add_one,
166e51060f0SSean Hefty 	.remove = cma_remove_one
167e51060f0SSean Hefty };
168e51060f0SSean Hefty 
169c1a0b23bSMichael S. Tsirkin static struct ib_sa_client sa_client;
170e51060f0SSean Hefty static LIST_HEAD(dev_list);
171e51060f0SSean Hefty static LIST_HEAD(listen_any_list);
172e51060f0SSean Hefty static DEFINE_MUTEX(lock);
173fc008bdbSPatrisious Haddad static struct rb_root id_table = RB_ROOT;
174fc008bdbSPatrisious Haddad /* Serialize operations of id_table tree */
175fc008bdbSPatrisious Haddad static DEFINE_SPINLOCK(id_table_lock);
176e51060f0SSean Hefty static struct workqueue_struct *cma_wq;
177c7d03a00SAlexey Dobriyan static unsigned int cma_pernet_id;
178e51060f0SSean Hefty 
1794be74b42SHaggai Eran struct cma_pernet {
18063826753SMatthew Wilcox 	struct xarray tcp_ps;
18163826753SMatthew Wilcox 	struct xarray udp_ps;
18263826753SMatthew Wilcox 	struct xarray ipoib_ps;
18363826753SMatthew Wilcox 	struct xarray ib_ps;
1844be74b42SHaggai Eran };
1854be74b42SHaggai Eran 
cma_pernet(struct net * net)1864be74b42SHaggai Eran static struct cma_pernet *cma_pernet(struct net *net)
187aac978e1SHaggai Eran {
1884be74b42SHaggai Eran 	return net_generic(net, cma_pernet_id);
1894be74b42SHaggai Eran }
1904be74b42SHaggai Eran 
19163826753SMatthew Wilcox static
cma_pernet_xa(struct net * net,enum rdma_ucm_port_space ps)19263826753SMatthew Wilcox struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps)
1934be74b42SHaggai Eran {
1944be74b42SHaggai Eran 	struct cma_pernet *pernet = cma_pernet(net);
1954be74b42SHaggai Eran 
196aac978e1SHaggai Eran 	switch (ps) {
197aac978e1SHaggai Eran 	case RDMA_PS_TCP:
1984be74b42SHaggai Eran 		return &pernet->tcp_ps;
199aac978e1SHaggai Eran 	case RDMA_PS_UDP:
2004be74b42SHaggai Eran 		return &pernet->udp_ps;
201aac978e1SHaggai Eran 	case RDMA_PS_IPOIB:
2024be74b42SHaggai Eran 		return &pernet->ipoib_ps;
203aac978e1SHaggai Eran 	case RDMA_PS_IB:
2044be74b42SHaggai Eran 		return &pernet->ib_ps;
205aac978e1SHaggai Eran 	default:
206aac978e1SHaggai Eran 		return NULL;
207aac978e1SHaggai Eran 	}
208aac978e1SHaggai Eran }
209aac978e1SHaggai Eran 
210fc008bdbSPatrisious Haddad struct id_table_entry {
211fc008bdbSPatrisious Haddad 	struct list_head id_list;
212fc008bdbSPatrisious Haddad 	struct rb_node rb_node;
213fc008bdbSPatrisious Haddad };
214fc008bdbSPatrisious Haddad 
215e51060f0SSean Hefty struct cma_device {
216e51060f0SSean Hefty 	struct list_head	list;
217e51060f0SSean Hefty 	struct ib_device	*device;
218e51060f0SSean Hefty 	struct completion	comp;
219be439912SParav Pandit 	refcount_t refcount;
220e51060f0SSean Hefty 	struct list_head	id_list;
221045959dbSMatan Barak 	enum ib_gid_type	*default_gid_type;
22289052d78SMajd Dibbiny 	u8			*default_roce_tos;
223e51060f0SSean Hefty };
224e51060f0SSean Hefty 
225e51060f0SSean Hefty struct rdma_bind_list {
2262253fc0cSSteve Wise 	enum rdma_ucm_port_space ps;
227e51060f0SSean Hefty 	struct hlist_head	owners;
228e51060f0SSean Hefty 	unsigned short		port;
229e51060f0SSean Hefty };
230e51060f0SSean Hefty 
cma_ps_alloc(struct net * net,enum rdma_ucm_port_space ps,struct rdma_bind_list * bind_list,int snum)2312253fc0cSSteve Wise static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps,
232aac978e1SHaggai Eran 			struct rdma_bind_list *bind_list, int snum)
233aac978e1SHaggai Eran {
23463826753SMatthew Wilcox 	struct xarray *xa = cma_pernet_xa(net, ps);
235aac978e1SHaggai Eran 
23663826753SMatthew Wilcox 	return xa_insert(xa, snum, bind_list, GFP_KERNEL);
237aac978e1SHaggai Eran }
238aac978e1SHaggai Eran 
cma_ps_find(struct net * net,enum rdma_ucm_port_space ps,int snum)2394be74b42SHaggai Eran static struct rdma_bind_list *cma_ps_find(struct net *net,
2402253fc0cSSteve Wise 					  enum rdma_ucm_port_space ps, int snum)
241aac978e1SHaggai Eran {
24263826753SMatthew Wilcox 	struct xarray *xa = cma_pernet_xa(net, ps);
243aac978e1SHaggai Eran 
24463826753SMatthew Wilcox 	return xa_load(xa, snum);
245aac978e1SHaggai Eran }
246aac978e1SHaggai Eran 
cma_ps_remove(struct net * net,enum rdma_ucm_port_space ps,int snum)2472253fc0cSSteve Wise static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps,
2482253fc0cSSteve Wise 			  int snum)
249aac978e1SHaggai Eran {
25063826753SMatthew Wilcox 	struct xarray *xa = cma_pernet_xa(net, ps);
251aac978e1SHaggai Eran 
25263826753SMatthew Wilcox 	xa_erase(xa, snum);
253aac978e1SHaggai Eran }
254aac978e1SHaggai Eran 
25568602120SSean Hefty enum {
25668602120SSean Hefty 	CMA_OPTION_AFONLY,
25768602120SSean Hefty };
25868602120SSean Hefty 
cma_dev_get(struct cma_device * cma_dev)2595ff8c8faSParav Pandit void cma_dev_get(struct cma_device *cma_dev)
260218a773fSMatan Barak {
261be439912SParav Pandit 	refcount_inc(&cma_dev->refcount);
262218a773fSMatan Barak }
263218a773fSMatan Barak 
cma_dev_put(struct cma_device * cma_dev)2645ff8c8faSParav Pandit void cma_dev_put(struct cma_device *cma_dev)
2655ff8c8faSParav Pandit {
266be439912SParav Pandit 	if (refcount_dec_and_test(&cma_dev->refcount))
2675ff8c8faSParav Pandit 		complete(&cma_dev->comp);
2685ff8c8faSParav Pandit }
2695ff8c8faSParav Pandit 
cma_enum_devices_by_ibdev(cma_device_filter filter,void * cookie)270045959dbSMatan Barak struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter	filter,
271045959dbSMatan Barak 					     void		*cookie)
272045959dbSMatan Barak {
273045959dbSMatan Barak 	struct cma_device *cma_dev;
274045959dbSMatan Barak 	struct cma_device *found_cma_dev = NULL;
275045959dbSMatan Barak 
276045959dbSMatan Barak 	mutex_lock(&lock);
277045959dbSMatan Barak 
278045959dbSMatan Barak 	list_for_each_entry(cma_dev, &dev_list, list)
279045959dbSMatan Barak 		if (filter(cma_dev->device, cookie)) {
280045959dbSMatan Barak 			found_cma_dev = cma_dev;
281045959dbSMatan Barak 			break;
282045959dbSMatan Barak 		}
283045959dbSMatan Barak 
284045959dbSMatan Barak 	if (found_cma_dev)
2855ff8c8faSParav Pandit 		cma_dev_get(found_cma_dev);
286045959dbSMatan Barak 	mutex_unlock(&lock);
287045959dbSMatan Barak 	return found_cma_dev;
288045959dbSMatan Barak }
289045959dbSMatan Barak 
cma_get_default_gid_type(struct cma_device * cma_dev,u32 port)290045959dbSMatan Barak int cma_get_default_gid_type(struct cma_device *cma_dev,
2911fb7f897SMark Bloch 			     u32 port)
292045959dbSMatan Barak {
29324dc831bSYuval Shaia 	if (!rdma_is_port_valid(cma_dev->device, port))
294045959dbSMatan Barak 		return -EINVAL;
295045959dbSMatan Barak 
296045959dbSMatan Barak 	return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)];
297045959dbSMatan Barak }
298045959dbSMatan Barak 
cma_set_default_gid_type(struct cma_device * cma_dev,u32 port,enum ib_gid_type default_gid_type)299045959dbSMatan Barak int cma_set_default_gid_type(struct cma_device *cma_dev,
3001fb7f897SMark Bloch 			     u32 port,
301045959dbSMatan Barak 			     enum ib_gid_type default_gid_type)
302045959dbSMatan Barak {
303045959dbSMatan Barak 	unsigned long supported_gids;
304045959dbSMatan Barak 
30524dc831bSYuval Shaia 	if (!rdma_is_port_valid(cma_dev->device, port))
306045959dbSMatan Barak 		return -EINVAL;
307045959dbSMatan Barak 
3081c15b4f2SAvihai Horon 	if (default_gid_type == IB_GID_TYPE_IB &&
3091c15b4f2SAvihai Horon 	    rdma_protocol_roce_eth_encap(cma_dev->device, port))
3101c15b4f2SAvihai Horon 		default_gid_type = IB_GID_TYPE_ROCE;
3111c15b4f2SAvihai Horon 
312045959dbSMatan Barak 	supported_gids = roce_gid_type_mask_support(cma_dev->device, port);
313045959dbSMatan Barak 
314045959dbSMatan Barak 	if (!(supported_gids & 1 << default_gid_type))
315045959dbSMatan Barak 		return -EINVAL;
316045959dbSMatan Barak 
317045959dbSMatan Barak 	cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] =
318045959dbSMatan Barak 		default_gid_type;
319045959dbSMatan Barak 
320045959dbSMatan Barak 	return 0;
321045959dbSMatan Barak }
322045959dbSMatan Barak 
cma_get_default_roce_tos(struct cma_device * cma_dev,u32 port)3231fb7f897SMark Bloch int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port)
32489052d78SMajd Dibbiny {
32589052d78SMajd Dibbiny 	if (!rdma_is_port_valid(cma_dev->device, port))
32689052d78SMajd Dibbiny 		return -EINVAL;
32789052d78SMajd Dibbiny 
32889052d78SMajd Dibbiny 	return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)];
32989052d78SMajd Dibbiny }
33089052d78SMajd Dibbiny 
cma_set_default_roce_tos(struct cma_device * cma_dev,u32 port,u8 default_roce_tos)3311fb7f897SMark Bloch int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port,
33289052d78SMajd Dibbiny 			     u8 default_roce_tos)
33389052d78SMajd Dibbiny {
33489052d78SMajd Dibbiny 	if (!rdma_is_port_valid(cma_dev->device, port))
33589052d78SMajd Dibbiny 		return -EINVAL;
33689052d78SMajd Dibbiny 
33789052d78SMajd Dibbiny 	cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] =
33889052d78SMajd Dibbiny 		 default_roce_tos;
33989052d78SMajd Dibbiny 
34089052d78SMajd Dibbiny 	return 0;
34189052d78SMajd Dibbiny }
cma_get_ib_dev(struct cma_device * cma_dev)342045959dbSMatan Barak struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
343045959dbSMatan Barak {
344045959dbSMatan Barak 	return cma_dev->device;
345045959dbSMatan Barak }
346045959dbSMatan Barak 
347e51060f0SSean Hefty /*
348e51060f0SSean Hefty  * Device removal can occur at anytime, so we need extra handling to
349e51060f0SSean Hefty  * serialize notifying the user of device removal with other callbacks.
350e51060f0SSean Hefty  * We do this by disabling removal notification while a callback is in process,
351e51060f0SSean Hefty  * and reporting it after the callback completes.
352e51060f0SSean Hefty  */
353e51060f0SSean Hefty 
354c8f6a362SSean Hefty struct cma_multicast {
355c8f6a362SSean Hefty 	struct rdma_id_private *id_priv;
356fe454dc3SAvihai Horon 	union {
357b5de0c60SJason Gunthorpe 		struct ib_sa_multicast *sa_mc;
358fe454dc3SAvihai Horon 		struct {
359fe454dc3SAvihai Horon 			struct work_struct work;
360fe454dc3SAvihai Horon 			struct rdma_cm_event event;
361fe454dc3SAvihai Horon 		} iboe_join;
362fe454dc3SAvihai Horon 	};
363c8f6a362SSean Hefty 	struct list_head	list;
364c8f6a362SSean Hefty 	void			*context;
3653f446754SRoland Dreier 	struct sockaddr_storage	addr;
366ab15c95aSAlex Vesker 	u8			join_state;
367c8f6a362SSean Hefty };
368c8f6a362SSean Hefty 
369e51060f0SSean Hefty struct cma_work {
370e51060f0SSean Hefty 	struct work_struct	work;
371e51060f0SSean Hefty 	struct rdma_id_private	*id;
372550e5ca7SNir Muchtar 	enum rdma_cm_state	old_state;
373550e5ca7SNir Muchtar 	enum rdma_cm_state	new_state;
374e51060f0SSean Hefty 	struct rdma_cm_event	event;
375e51060f0SSean Hefty };
376e51060f0SSean Hefty 
377e51060f0SSean Hefty union cma_ip_addr {
378e51060f0SSean Hefty 	struct in6_addr ip6;
379e51060f0SSean Hefty 	struct {
3801b90c137SAl Viro 		__be32 pad[3];
3811b90c137SAl Viro 		__be32 addr;
382e51060f0SSean Hefty 	} ip4;
383e51060f0SSean Hefty };
384e51060f0SSean Hefty 
385e51060f0SSean Hefty struct cma_hdr {
386e51060f0SSean Hefty 	u8 cma_version;
387e51060f0SSean Hefty 	u8 ip_version;	/* IP version: 7:4 */
3881b90c137SAl Viro 	__be16 port;
389e51060f0SSean Hefty 	union cma_ip_addr src_addr;
390e51060f0SSean Hefty 	union cma_ip_addr dst_addr;
391e51060f0SSean Hefty };
392e51060f0SSean Hefty 
393e51060f0SSean Hefty #define CMA_VERSION 0x00
394e51060f0SSean Hefty 
3954c21b5bcSHaggai Eran struct cma_req_info {
3962918c1a9SParav Pandit 	struct sockaddr_storage listen_addr_storage;
3972918c1a9SParav Pandit 	struct sockaddr_storage src_addr_storage;
3984c21b5bcSHaggai Eran 	struct ib_device *device;
3994c21b5bcSHaggai Eran 	union ib_gid local_gid;
4004c21b5bcSHaggai Eran 	__be64 service_id;
40105e0b86cSParav Pandit 	int port;
40205e0b86cSParav Pandit 	bool has_gid;
4034c21b5bcSHaggai Eran 	u16 pkey;
4044c21b5bcSHaggai Eran };
4054c21b5bcSHaggai Eran 
cma_comp_exch(struct rdma_id_private * id_priv,enum rdma_cm_state comp,enum rdma_cm_state exch)406e51060f0SSean Hefty static int cma_comp_exch(struct rdma_id_private *id_priv,
407550e5ca7SNir Muchtar 			 enum rdma_cm_state comp, enum rdma_cm_state exch)
408e51060f0SSean Hefty {
409e51060f0SSean Hefty 	unsigned long flags;
410e51060f0SSean Hefty 	int ret;
411e51060f0SSean Hefty 
4122a7cec53SJason Gunthorpe 	/*
4132a7cec53SJason Gunthorpe 	 * The FSM uses a funny double locking where state is protected by both
4142a7cec53SJason Gunthorpe 	 * the handler_mutex and the spinlock. State is not allowed to change
415071ba4ccSJason Gunthorpe 	 * to/from a handler_mutex protected value without also holding
4162a7cec53SJason Gunthorpe 	 * handler_mutex.
4172a7cec53SJason Gunthorpe 	 */
418071ba4ccSJason Gunthorpe 	if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT)
4192a7cec53SJason Gunthorpe 		lockdep_assert_held(&id_priv->handler_mutex);
4202a7cec53SJason Gunthorpe 
421e51060f0SSean Hefty 	spin_lock_irqsave(&id_priv->lock, flags);
422e51060f0SSean Hefty 	if ((ret = (id_priv->state == comp)))
423e51060f0SSean Hefty 		id_priv->state = exch;
424e51060f0SSean Hefty 	spin_unlock_irqrestore(&id_priv->lock, flags);
425e51060f0SSean Hefty 	return ret;
426e51060f0SSean Hefty }
427e51060f0SSean Hefty 
cma_get_ip_ver(const struct cma_hdr * hdr)4284c21b5bcSHaggai Eran static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
429e51060f0SSean Hefty {
430e51060f0SSean Hefty 	return hdr->ip_version >> 4;
431e51060f0SSean Hefty }
432e51060f0SSean Hefty 
cma_set_ip_ver(struct cma_hdr * hdr,u8 ip_ver)433fc008bdbSPatrisious Haddad static void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
434e51060f0SSean Hefty {
435e51060f0SSean Hefty 	hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
436e51060f0SSean Hefty }
437e51060f0SSean Hefty 
cma_src_addr(struct rdma_id_private * id_priv)438fc008bdbSPatrisious Haddad static struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
439fc008bdbSPatrisious Haddad {
440fc008bdbSPatrisious Haddad 	return (struct sockaddr *)&id_priv->id.route.addr.src_addr;
441fc008bdbSPatrisious Haddad }
442fc008bdbSPatrisious Haddad 
cma_dst_addr(struct rdma_id_private * id_priv)443fc008bdbSPatrisious Haddad static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
444fc008bdbSPatrisious Haddad {
445fc008bdbSPatrisious Haddad 	return (struct sockaddr *)&id_priv->id.route.addr.dst_addr;
446fc008bdbSPatrisious Haddad }
447fc008bdbSPatrisious Haddad 
cma_igmp_send(struct net_device * ndev,union ib_gid * mgid,bool join)448bee3c3c9SMoni Shoua static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
449bee3c3c9SMoni Shoua {
450bee3c3c9SMoni Shoua 	struct in_device *in_dev = NULL;
451bee3c3c9SMoni Shoua 
452bee3c3c9SMoni Shoua 	if (ndev) {
453bee3c3c9SMoni Shoua 		rtnl_lock();
454bee3c3c9SMoni Shoua 		in_dev = __in_dev_get_rtnl(ndev);
455bee3c3c9SMoni Shoua 		if (in_dev) {
456bee3c3c9SMoni Shoua 			if (join)
457bee3c3c9SMoni Shoua 				ip_mc_inc_group(in_dev,
458bee3c3c9SMoni Shoua 						*(__be32 *)(mgid->raw + 12));
459bee3c3c9SMoni Shoua 			else
460bee3c3c9SMoni Shoua 				ip_mc_dec_group(in_dev,
461bee3c3c9SMoni Shoua 						*(__be32 *)(mgid->raw + 12));
462bee3c3c9SMoni Shoua 		}
463bee3c3c9SMoni Shoua 		rtnl_unlock();
464bee3c3c9SMoni Shoua 	}
465bee3c3c9SMoni Shoua 	return (in_dev) ? 0 : -ENODEV;
466bee3c3c9SMoni Shoua }
467bee3c3c9SMoni Shoua 
compare_netdev_and_ip(int ifindex_a,struct sockaddr * sa,struct id_table_entry * entry_b)468fc008bdbSPatrisious Haddad static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa,
469fc008bdbSPatrisious Haddad 				 struct id_table_entry *entry_b)
470fc008bdbSPatrisious Haddad {
471fc008bdbSPatrisious Haddad 	struct rdma_id_private *id_priv = list_first_entry(
472fc008bdbSPatrisious Haddad 		&entry_b->id_list, struct rdma_id_private, id_list_entry);
473fc008bdbSPatrisious Haddad 	int ifindex_b = id_priv->id.route.addr.dev_addr.bound_dev_if;
474fc008bdbSPatrisious Haddad 	struct sockaddr *sb = cma_dst_addr(id_priv);
475fc008bdbSPatrisious Haddad 
476fc008bdbSPatrisious Haddad 	if (ifindex_a != ifindex_b)
477fc008bdbSPatrisious Haddad 		return (ifindex_a > ifindex_b) ? 1 : -1;
478fc008bdbSPatrisious Haddad 
479fc008bdbSPatrisious Haddad 	if (sa->sa_family != sb->sa_family)
480fc008bdbSPatrisious Haddad 		return sa->sa_family - sb->sa_family;
481fc008bdbSPatrisious Haddad 
482876e480dSKees Cook 	if (sa->sa_family == AF_INET &&
483876e480dSKees Cook 	    __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in)) {
484876e480dSKees Cook 		return memcmp(&((struct sockaddr_in *)sa)->sin_addr,
485876e480dSKees Cook 			      &((struct sockaddr_in *)sb)->sin_addr,
486fc008bdbSPatrisious Haddad 			      sizeof(((struct sockaddr_in *)sa)->sin_addr));
487876e480dSKees Cook 	}
488fc008bdbSPatrisious Haddad 
489876e480dSKees Cook 	if (sa->sa_family == AF_INET6 &&
490876e480dSKees Cook 	    __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in6)) {
491fc008bdbSPatrisious Haddad 		return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
492fc008bdbSPatrisious Haddad 				     &((struct sockaddr_in6 *)sb)->sin6_addr);
493fc008bdbSPatrisious Haddad 	}
494fc008bdbSPatrisious Haddad 
495876e480dSKees Cook 	return -1;
496876e480dSKees Cook }
497876e480dSKees Cook 
cma_add_id_to_tree(struct rdma_id_private * node_id_priv)498fc008bdbSPatrisious Haddad static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv)
499fc008bdbSPatrisious Haddad {
500fc008bdbSPatrisious Haddad 	struct rb_node **new, *parent = NULL;
501fc008bdbSPatrisious Haddad 	struct id_table_entry *this, *node;
502fc008bdbSPatrisious Haddad 	unsigned long flags;
503fc008bdbSPatrisious Haddad 	int result;
504fc008bdbSPatrisious Haddad 
505fc008bdbSPatrisious Haddad 	node = kzalloc(sizeof(*node), GFP_KERNEL);
506fc008bdbSPatrisious Haddad 	if (!node)
507fc008bdbSPatrisious Haddad 		return -ENOMEM;
508fc008bdbSPatrisious Haddad 
509fc008bdbSPatrisious Haddad 	spin_lock_irqsave(&id_table_lock, flags);
510fc008bdbSPatrisious Haddad 	new = &id_table.rb_node;
511fc008bdbSPatrisious Haddad 	while (*new) {
512fc008bdbSPatrisious Haddad 		this = container_of(*new, struct id_table_entry, rb_node);
513fc008bdbSPatrisious Haddad 		result = compare_netdev_and_ip(
514fc008bdbSPatrisious Haddad 			node_id_priv->id.route.addr.dev_addr.bound_dev_if,
515fc008bdbSPatrisious Haddad 			cma_dst_addr(node_id_priv), this);
516fc008bdbSPatrisious Haddad 
517fc008bdbSPatrisious Haddad 		parent = *new;
518fc008bdbSPatrisious Haddad 		if (result < 0)
519fc008bdbSPatrisious Haddad 			new = &((*new)->rb_left);
520fc008bdbSPatrisious Haddad 		else if (result > 0)
521fc008bdbSPatrisious Haddad 			new = &((*new)->rb_right);
522fc008bdbSPatrisious Haddad 		else {
523fc008bdbSPatrisious Haddad 			list_add_tail(&node_id_priv->id_list_entry,
524fc008bdbSPatrisious Haddad 				      &this->id_list);
525fc008bdbSPatrisious Haddad 			kfree(node);
526fc008bdbSPatrisious Haddad 			goto unlock;
527fc008bdbSPatrisious Haddad 		}
528fc008bdbSPatrisious Haddad 	}
529fc008bdbSPatrisious Haddad 
530fc008bdbSPatrisious Haddad 	INIT_LIST_HEAD(&node->id_list);
531fc008bdbSPatrisious Haddad 	list_add_tail(&node_id_priv->id_list_entry, &node->id_list);
532fc008bdbSPatrisious Haddad 
533fc008bdbSPatrisious Haddad 	rb_link_node(&node->rb_node, parent, new);
534fc008bdbSPatrisious Haddad 	rb_insert_color(&node->rb_node, &id_table);
535fc008bdbSPatrisious Haddad 
536fc008bdbSPatrisious Haddad unlock:
537fc008bdbSPatrisious Haddad 	spin_unlock_irqrestore(&id_table_lock, flags);
538fc008bdbSPatrisious Haddad 	return 0;
539fc008bdbSPatrisious Haddad }
540fc008bdbSPatrisious Haddad 
541fc008bdbSPatrisious Haddad static struct id_table_entry *
node_from_ndev_ip(struct rb_root * root,int ifindex,struct sockaddr * sa)542fc008bdbSPatrisious Haddad node_from_ndev_ip(struct rb_root *root, int ifindex, struct sockaddr *sa)
543fc008bdbSPatrisious Haddad {
544fc008bdbSPatrisious Haddad 	struct rb_node *node = root->rb_node;
545fc008bdbSPatrisious Haddad 	struct id_table_entry *data;
546fc008bdbSPatrisious Haddad 	int result;
547fc008bdbSPatrisious Haddad 
548fc008bdbSPatrisious Haddad 	while (node) {
549fc008bdbSPatrisious Haddad 		data = container_of(node, struct id_table_entry, rb_node);
550fc008bdbSPatrisious Haddad 		result = compare_netdev_and_ip(ifindex, sa, data);
551fc008bdbSPatrisious Haddad 		if (result < 0)
552fc008bdbSPatrisious Haddad 			node = node->rb_left;
553fc008bdbSPatrisious Haddad 		else if (result > 0)
554fc008bdbSPatrisious Haddad 			node = node->rb_right;
555fc008bdbSPatrisious Haddad 		else
556fc008bdbSPatrisious Haddad 			return data;
557fc008bdbSPatrisious Haddad 	}
558fc008bdbSPatrisious Haddad 
559fc008bdbSPatrisious Haddad 	return NULL;
560fc008bdbSPatrisious Haddad }
561fc008bdbSPatrisious Haddad 
cma_remove_id_from_tree(struct rdma_id_private * id_priv)562fc008bdbSPatrisious Haddad static void cma_remove_id_from_tree(struct rdma_id_private *id_priv)
563fc008bdbSPatrisious Haddad {
564fc008bdbSPatrisious Haddad 	struct id_table_entry *data;
565fc008bdbSPatrisious Haddad 	unsigned long flags;
566fc008bdbSPatrisious Haddad 
567fc008bdbSPatrisious Haddad 	spin_lock_irqsave(&id_table_lock, flags);
568fc008bdbSPatrisious Haddad 	if (list_empty(&id_priv->id_list_entry))
569fc008bdbSPatrisious Haddad 		goto out;
570fc008bdbSPatrisious Haddad 
571fc008bdbSPatrisious Haddad 	data = node_from_ndev_ip(&id_table,
572fc008bdbSPatrisious Haddad 				 id_priv->id.route.addr.dev_addr.bound_dev_if,
573fc008bdbSPatrisious Haddad 				 cma_dst_addr(id_priv));
574fc008bdbSPatrisious Haddad 	if (!data)
575fc008bdbSPatrisious Haddad 		goto out;
576fc008bdbSPatrisious Haddad 
577fc008bdbSPatrisious Haddad 	list_del_init(&id_priv->id_list_entry);
578fc008bdbSPatrisious Haddad 	if (list_empty(&data->id_list)) {
579fc008bdbSPatrisious Haddad 		rb_erase(&data->rb_node, &id_table);
580fc008bdbSPatrisious Haddad 		kfree(data);
581fc008bdbSPatrisious Haddad 	}
582fc008bdbSPatrisious Haddad out:
583fc008bdbSPatrisious Haddad 	spin_unlock_irqrestore(&id_table_lock, flags);
584fc008bdbSPatrisious Haddad }
585fc008bdbSPatrisious Haddad 
_cma_attach_to_dev(struct rdma_id_private * id_priv,struct cma_device * cma_dev)586045959dbSMatan Barak static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
587e51060f0SSean Hefty 			       struct cma_device *cma_dev)
588e51060f0SSean Hefty {
5895ff8c8faSParav Pandit 	cma_dev_get(cma_dev);
590e51060f0SSean Hefty 	id_priv->cma_dev = cma_dev;
591e51060f0SSean Hefty 	id_priv->id.device = cma_dev->device;
5923c86aa70SEli Cohen 	id_priv->id.route.addr.dev_addr.transport =
5933c86aa70SEli Cohen 		rdma_node_get_transport(cma_dev->device->node_type);
59499cfddb8SJason Gunthorpe 	list_add_tail(&id_priv->device_item, &cma_dev->id_list);
595b09c4d70SLeon Romanovsky 
596278f74b3SChuck Lever 	trace_cm_id_attach(id_priv, cma_dev->device);
597e51060f0SSean Hefty }
598e51060f0SSean Hefty 
cma_attach_to_dev(struct rdma_id_private * id_priv,struct cma_device * cma_dev)599045959dbSMatan Barak static void cma_attach_to_dev(struct rdma_id_private *id_priv,
600045959dbSMatan Barak 			      struct cma_device *cma_dev)
601045959dbSMatan Barak {
602045959dbSMatan Barak 	_cma_attach_to_dev(id_priv, cma_dev);
603045959dbSMatan Barak 	id_priv->gid_type =
604045959dbSMatan Barak 		cma_dev->default_gid_type[id_priv->id.port_num -
605045959dbSMatan Barak 					  rdma_start_port(cma_dev->device)];
606045959dbSMatan Barak }
607045959dbSMatan Barak 
cma_release_dev(struct rdma_id_private * id_priv)608a396d43aSSean Hefty static void cma_release_dev(struct rdma_id_private *id_priv)
609e51060f0SSean Hefty {
610a396d43aSSean Hefty 	mutex_lock(&lock);
61199cfddb8SJason Gunthorpe 	list_del_init(&id_priv->device_item);
6125ff8c8faSParav Pandit 	cma_dev_put(id_priv->cma_dev);
613e51060f0SSean Hefty 	id_priv->cma_dev = NULL;
614889d916bSShay Drory 	id_priv->id.device = NULL;
615e246b7c0SLeon Romanovsky 	if (id_priv->id.route.addr.dev_addr.sgid_attr) {
616e246b7c0SLeon Romanovsky 		rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
617e246b7c0SLeon Romanovsky 		id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
618e246b7c0SLeon Romanovsky 	}
619a396d43aSSean Hefty 	mutex_unlock(&lock);
620e51060f0SSean Hefty }
621e51060f0SSean Hefty 
cma_family(struct rdma_id_private * id_priv)622f4753834SSean Hefty static inline unsigned short cma_family(struct rdma_id_private *id_priv)
623f4753834SSean Hefty {
624f4753834SSean Hefty 	return id_priv->id.route.addr.src_addr.ss_family;
625f4753834SSean Hefty }
626f4753834SSean Hefty 
cma_set_default_qkey(struct rdma_id_private * id_priv)62758e84f6bSMark Zhang static int cma_set_default_qkey(struct rdma_id_private *id_priv)
628c8f6a362SSean Hefty {
629c8f6a362SSean Hefty 	struct ib_sa_mcmember_rec rec;
630c8f6a362SSean Hefty 	int ret = 0;
631c8f6a362SSean Hefty 
632d2ca39f2SYossi Etigin 	switch (id_priv->id.ps) {
633c8f6a362SSean Hefty 	case RDMA_PS_UDP:
6345c438135SSean Hefty 	case RDMA_PS_IB:
635d2ca39f2SYossi Etigin 		id_priv->qkey = RDMA_UDP_QKEY;
636c8f6a362SSean Hefty 		break;
637c8f6a362SSean Hefty 	case RDMA_PS_IPOIB:
638d2ca39f2SYossi Etigin 		ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
639d2ca39f2SYossi Etigin 		ret = ib_sa_get_mcmember_rec(id_priv->id.device,
640d2ca39f2SYossi Etigin 					     id_priv->id.port_num, &rec.mgid,
641d2ca39f2SYossi Etigin 					     &rec);
642d2ca39f2SYossi Etigin 		if (!ret)
643d2ca39f2SYossi Etigin 			id_priv->qkey = be32_to_cpu(rec.qkey);
644c8f6a362SSean Hefty 		break;
645c8f6a362SSean Hefty 	default:
646c8f6a362SSean Hefty 		break;
647c8f6a362SSean Hefty 	}
648c8f6a362SSean Hefty 	return ret;
649c8f6a362SSean Hefty }
650c8f6a362SSean Hefty 
cma_set_qkey(struct rdma_id_private * id_priv,u32 qkey)65158e84f6bSMark Zhang static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
65258e84f6bSMark Zhang {
65358e84f6bSMark Zhang 	if (!qkey ||
65458e84f6bSMark Zhang 	    (id_priv->qkey && (id_priv->qkey != qkey)))
65558e84f6bSMark Zhang 		return -EINVAL;
65658e84f6bSMark Zhang 
65758e84f6bSMark Zhang 	id_priv->qkey = qkey;
65858e84f6bSMark Zhang 	return 0;
65958e84f6bSMark Zhang }
66058e84f6bSMark Zhang 
cma_translate_ib(struct sockaddr_ib * sib,struct rdma_dev_addr * dev_addr)661680f920aSSean Hefty static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
662680f920aSSean Hefty {
663680f920aSSean Hefty 	dev_addr->dev_type = ARPHRD_INFINIBAND;
664680f920aSSean Hefty 	rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr);
665680f920aSSean Hefty 	ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey));
666680f920aSSean Hefty }
667680f920aSSean Hefty 
cma_translate_addr(struct sockaddr * addr,struct rdma_dev_addr * dev_addr)668680f920aSSean Hefty static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
669680f920aSSean Hefty {
670680f920aSSean Hefty 	int ret;
671680f920aSSean Hefty 
672680f920aSSean Hefty 	if (addr->sa_family != AF_IB) {
673575c7e58SParav Pandit 		ret = rdma_translate_ip(addr, dev_addr);
674680f920aSSean Hefty 	} else {
675680f920aSSean Hefty 		cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
676680f920aSSean Hefty 		ret = 0;
677680f920aSSean Hefty 	}
678680f920aSSean Hefty 
679680f920aSSean Hefty 	return ret;
680680f920aSSean Hefty }
681680f920aSSean Hefty 
6824ed13a5fSParav Pandit static const struct ib_gid_attr *
cma_validate_port(struct ib_device * device,u32 port,enum ib_gid_type gid_type,union ib_gid * gid,struct rdma_id_private * id_priv)6831fb7f897SMark Bloch cma_validate_port(struct ib_device *device, u32 port,
684045959dbSMatan Barak 		  enum ib_gid_type gid_type,
6852493a57bSParav Pandit 		  union ib_gid *gid,
6862493a57bSParav Pandit 		  struct rdma_id_private *id_priv)
6877c11147dSMichael Wang {
6882493a57bSParav Pandit 	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
689700c9649SChuck Lever 	const struct ib_gid_attr *sgid_attr = ERR_PTR(-ENODEV);
6902493a57bSParav Pandit 	int bound_if_index = dev_addr->bound_dev_if;
6912493a57bSParav Pandit 	int dev_type = dev_addr->dev_type;
692abae1b71SMatan Barak 	struct net_device *ndev = NULL;
6937c11147dSMichael Wang 
69441c61401SParav Pandit 	if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
695700c9649SChuck Lever 		goto out;
69641c61401SParav Pandit 
6977c11147dSMichael Wang 	if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
698700c9649SChuck Lever 		goto out;
6997c11147dSMichael Wang 
7007c11147dSMichael Wang 	if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
701700c9649SChuck Lever 		goto out;
7027c11147dSMichael Wang 
703f8ef1be8SChuck Lever 	/*
704f8ef1be8SChuck Lever 	 * For drivers that do not associate more than one net device with
705f8ef1be8SChuck Lever 	 * their gid tables, such as iWARP drivers, it is sufficient to
706f8ef1be8SChuck Lever 	 * return the first table entry.
707f8ef1be8SChuck Lever 	 *
708f8ef1be8SChuck Lever 	 * Other driver classes might be included in the future.
709f8ef1be8SChuck Lever 	 */
710f8ef1be8SChuck Lever 	if (rdma_protocol_iwarp(device, port)) {
711f8ef1be8SChuck Lever 		sgid_attr = rdma_get_gid_attr(device, port, 0);
712f8ef1be8SChuck Lever 		if (IS_ERR(sgid_attr))
713f8ef1be8SChuck Lever 			goto out;
714f8ef1be8SChuck Lever 
715f8ef1be8SChuck Lever 		rcu_read_lock();
716f8ef1be8SChuck Lever 		ndev = rcu_dereference(sgid_attr->ndev);
717f8ef1be8SChuck Lever 		if (!net_eq(dev_net(ndev), dev_addr->net) ||
718f8ef1be8SChuck Lever 		    ndev->ifindex != bound_if_index)
719f8ef1be8SChuck Lever 			sgid_attr = ERR_PTR(-ENODEV);
720f8ef1be8SChuck Lever 		rcu_read_unlock();
721f8ef1be8SChuck Lever 		goto out;
722f8ef1be8SChuck Lever 	}
7237c11147dSMichael Wang 
72400db63c1SParav Pandit 	if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
72566c74d74SParav Pandit 		ndev = dev_get_by_index(dev_addr->net, bound_if_index);
72600db63c1SParav Pandit 		if (!ndev)
727700c9649SChuck Lever 			goto out;
72800db63c1SParav Pandit 	} else {
729045959dbSMatan Barak 		gid_type = IB_GID_TYPE_IB;
73000db63c1SParav Pandit 	}
731abae1b71SMatan Barak 
7324ed13a5fSParav Pandit 	sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev);
733abae1b71SMatan Barak 	dev_put(ndev);
734700c9649SChuck Lever out:
7354ed13a5fSParav Pandit 	return sgid_attr;
7364ed13a5fSParav Pandit }
7377c11147dSMichael Wang 
cma_bind_sgid_attr(struct rdma_id_private * id_priv,const struct ib_gid_attr * sgid_attr)7384ed13a5fSParav Pandit static void cma_bind_sgid_attr(struct rdma_id_private *id_priv,
7394ed13a5fSParav Pandit 			       const struct ib_gid_attr *sgid_attr)
7404ed13a5fSParav Pandit {
7414ed13a5fSParav Pandit 	WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr);
7424ed13a5fSParav Pandit 	id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr;
7437c11147dSMichael Wang }
7447c11147dSMichael Wang 
745ff11c6cdSParav Pandit /**
746ff11c6cdSParav Pandit  * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute
747ff11c6cdSParav Pandit  * based on source ip address.
748ff11c6cdSParav Pandit  * @id_priv:	cm_id which should be bound to cma device
749ff11c6cdSParav Pandit  *
750ff11c6cdSParav Pandit  * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute
751ff11c6cdSParav Pandit  * based on source IP address. It returns 0 on success or error code otherwise.
752ff11c6cdSParav Pandit  * It is applicable to active and passive side cm_id.
753ff11c6cdSParav Pandit  */
cma_acquire_dev_by_src_ip(struct rdma_id_private * id_priv)754ff11c6cdSParav Pandit static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
755ff11c6cdSParav Pandit {
756ff11c6cdSParav Pandit 	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
757ff11c6cdSParav Pandit 	const struct ib_gid_attr *sgid_attr;
758ff11c6cdSParav Pandit 	union ib_gid gid, iboe_gid, *gidp;
759ff11c6cdSParav Pandit 	struct cma_device *cma_dev;
760ff11c6cdSParav Pandit 	enum ib_gid_type gid_type;
761ff11c6cdSParav Pandit 	int ret = -ENODEV;
7621fb7f897SMark Bloch 	u32 port;
763ff11c6cdSParav Pandit 
764ff11c6cdSParav Pandit 	if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
765ff11c6cdSParav Pandit 	    id_priv->id.ps == RDMA_PS_IPOIB)
766ff11c6cdSParav Pandit 		return -EINVAL;
767ff11c6cdSParav Pandit 
768ff11c6cdSParav Pandit 	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
769ff11c6cdSParav Pandit 		    &iboe_gid);
770ff11c6cdSParav Pandit 
771ff11c6cdSParav Pandit 	memcpy(&gid, dev_addr->src_dev_addr +
772ff11c6cdSParav Pandit 	       rdma_addr_gid_offset(dev_addr), sizeof(gid));
773ff11c6cdSParav Pandit 
774ff11c6cdSParav Pandit 	mutex_lock(&lock);
775ff11c6cdSParav Pandit 	list_for_each_entry(cma_dev, &dev_list, list) {
776ea1075edSJason Gunthorpe 		rdma_for_each_port (cma_dev->device, port) {
777ff11c6cdSParav Pandit 			gidp = rdma_protocol_roce(cma_dev->device, port) ?
778ff11c6cdSParav Pandit 			       &iboe_gid : &gid;
779ff11c6cdSParav Pandit 			gid_type = cma_dev->default_gid_type[port - 1];
780ff11c6cdSParav Pandit 			sgid_attr = cma_validate_port(cma_dev->device, port,
781ff11c6cdSParav Pandit 						      gid_type, gidp, id_priv);
782ff11c6cdSParav Pandit 			if (!IS_ERR(sgid_attr)) {
783ff11c6cdSParav Pandit 				id_priv->id.port_num = port;
784ff11c6cdSParav Pandit 				cma_bind_sgid_attr(id_priv, sgid_attr);
785ff11c6cdSParav Pandit 				cma_attach_to_dev(id_priv, cma_dev);
786ff11c6cdSParav Pandit 				ret = 0;
787ff11c6cdSParav Pandit 				goto out;
788ff11c6cdSParav Pandit 			}
789ff11c6cdSParav Pandit 		}
790ff11c6cdSParav Pandit 	}
791ff11c6cdSParav Pandit out:
792ff11c6cdSParav Pandit 	mutex_unlock(&lock);
793ff11c6cdSParav Pandit 	return ret;
794ff11c6cdSParav Pandit }
795ff11c6cdSParav Pandit 
79641ab1cb7SParav Pandit /**
79741ab1cb7SParav Pandit  * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute
79841ab1cb7SParav Pandit  * @id_priv:		cm id to bind to cma device
79941ab1cb7SParav Pandit  * @listen_id_priv:	listener cm id to match against
80041ab1cb7SParav Pandit  * @req:		Pointer to req structure containaining incoming
80141ab1cb7SParav Pandit  *			request information
80241ab1cb7SParav Pandit  * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when
80341ab1cb7SParav Pandit  * rdma device matches for listen_id and incoming request. It also verifies
80441ab1cb7SParav Pandit  * that a GID table entry is present for the source address.
80541ab1cb7SParav Pandit  * Returns 0 on success, or returns error code otherwise.
80641ab1cb7SParav Pandit  */
cma_ib_acquire_dev(struct rdma_id_private * id_priv,const struct rdma_id_private * listen_id_priv,struct cma_req_info * req)80741ab1cb7SParav Pandit static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
80841ab1cb7SParav Pandit 			      const struct rdma_id_private *listen_id_priv,
80941ab1cb7SParav Pandit 			      struct cma_req_info *req)
81041ab1cb7SParav Pandit {
81141ab1cb7SParav Pandit 	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
81241ab1cb7SParav Pandit 	const struct ib_gid_attr *sgid_attr;
81341ab1cb7SParav Pandit 	enum ib_gid_type gid_type;
81441ab1cb7SParav Pandit 	union ib_gid gid;
81541ab1cb7SParav Pandit 
81641ab1cb7SParav Pandit 	if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
81741ab1cb7SParav Pandit 	    id_priv->id.ps == RDMA_PS_IPOIB)
81841ab1cb7SParav Pandit 		return -EINVAL;
81941ab1cb7SParav Pandit 
82041ab1cb7SParav Pandit 	if (rdma_protocol_roce(req->device, req->port))
82141ab1cb7SParav Pandit 		rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
82241ab1cb7SParav Pandit 			    &gid);
82341ab1cb7SParav Pandit 	else
82441ab1cb7SParav Pandit 		memcpy(&gid, dev_addr->src_dev_addr +
82541ab1cb7SParav Pandit 		       rdma_addr_gid_offset(dev_addr), sizeof(gid));
82641ab1cb7SParav Pandit 
82741ab1cb7SParav Pandit 	gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1];
82841ab1cb7SParav Pandit 	sgid_attr = cma_validate_port(req->device, req->port,
82941ab1cb7SParav Pandit 				      gid_type, &gid, id_priv);
83041ab1cb7SParav Pandit 	if (IS_ERR(sgid_attr))
83141ab1cb7SParav Pandit 		return PTR_ERR(sgid_attr);
83241ab1cb7SParav Pandit 
83341ab1cb7SParav Pandit 	id_priv->id.port_num = req->port;
83441ab1cb7SParav Pandit 	cma_bind_sgid_attr(id_priv, sgid_attr);
83541ab1cb7SParav Pandit 	/* Need to acquire lock to protect against reader
83641ab1cb7SParav Pandit 	 * of cma_dev->id_list such as cma_netdev_callback() and
83741ab1cb7SParav Pandit 	 * cma_process_remove().
83841ab1cb7SParav Pandit 	 */
83941ab1cb7SParav Pandit 	mutex_lock(&lock);
84041ab1cb7SParav Pandit 	cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
84141ab1cb7SParav Pandit 	mutex_unlock(&lock);
842cb5cd0eaSShay Drory 	rdma_restrack_add(&id_priv->res);
84341ab1cb7SParav Pandit 	return 0;
84441ab1cb7SParav Pandit }
84541ab1cb7SParav Pandit 
cma_iw_acquire_dev(struct rdma_id_private * id_priv,const struct rdma_id_private * listen_id_priv)84641ab1cb7SParav Pandit static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
847e7ff98aeSParav Pandit 			      const struct rdma_id_private *listen_id_priv)
848e51060f0SSean Hefty {
849c8f6a362SSean Hefty 	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
8504ed13a5fSParav Pandit 	const struct ib_gid_attr *sgid_attr;
851e51060f0SSean Hefty 	struct cma_device *cma_dev;
8524ed13a5fSParav Pandit 	enum ib_gid_type gid_type;
853e51060f0SSean Hefty 	int ret = -ENODEV;
85441ab1cb7SParav Pandit 	union ib_gid gid;
8551fb7f897SMark Bloch 	u32 port;
856e51060f0SSean Hefty 
8577c11147dSMichael Wang 	if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
8582efdd6a0SMoni Shoua 	    id_priv->id.ps == RDMA_PS_IPOIB)
8592efdd6a0SMoni Shoua 		return -EINVAL;
8602efdd6a0SMoni Shoua 
8613c86aa70SEli Cohen 	memcpy(&gid, dev_addr->src_dev_addr +
86241ab1cb7SParav Pandit 	       rdma_addr_gid_offset(dev_addr), sizeof(gid));
86341ab1cb7SParav Pandit 
86441ab1cb7SParav Pandit 	mutex_lock(&lock);
8657c11147dSMichael Wang 
866be9130ccSDoug Ledford 	cma_dev = listen_id_priv->cma_dev;
867be9130ccSDoug Ledford 	port = listen_id_priv->id.port_num;
86879d684f0SParav Pandit 	gid_type = listen_id_priv->gid_type;
8694ed13a5fSParav Pandit 	sgid_attr = cma_validate_port(cma_dev->device, port,
87041ab1cb7SParav Pandit 				      gid_type, &gid, id_priv);
8714ed13a5fSParav Pandit 	if (!IS_ERR(sgid_attr)) {
8727c11147dSMichael Wang 		id_priv->id.port_num = port;
8734ed13a5fSParav Pandit 		cma_bind_sgid_attr(id_priv, sgid_attr);
8744ed13a5fSParav Pandit 		ret = 0;
875be9130ccSDoug Ledford 		goto out;
876be9130ccSDoug Ledford 	}
8777c11147dSMichael Wang 
878e51060f0SSean Hefty 	list_for_each_entry(cma_dev, &dev_list, list) {
879cc055dd3SParav Pandit 		rdma_for_each_port (cma_dev->device, port) {
880ff11c6cdSParav Pandit 			if (listen_id_priv->cma_dev == cma_dev &&
881be9130ccSDoug Ledford 			    listen_id_priv->id.port_num == port)
882be9130ccSDoug Ledford 				continue;
8833c86aa70SEli Cohen 
88479d684f0SParav Pandit 			gid_type = cma_dev->default_gid_type[port - 1];
8854ed13a5fSParav Pandit 			sgid_attr = cma_validate_port(cma_dev->device, port,
88641ab1cb7SParav Pandit 						      gid_type, &gid, id_priv);
8874ed13a5fSParav Pandit 			if (!IS_ERR(sgid_attr)) {
8887c11147dSMichael Wang 				id_priv->id.port_num = port;
8894ed13a5fSParav Pandit 				cma_bind_sgid_attr(id_priv, sgid_attr);
8904ed13a5fSParav Pandit 				ret = 0;
8913c86aa70SEli Cohen 				goto out;
89263f05be2Sshefty 			}
893e51060f0SSean Hefty 		}
894e51060f0SSean Hefty 	}
8953c86aa70SEli Cohen 
8963c86aa70SEli Cohen out:
897cb5cd0eaSShay Drory 	if (!ret) {
8983c86aa70SEli Cohen 		cma_attach_to_dev(id_priv, cma_dev);
899cb5cd0eaSShay Drory 		rdma_restrack_add(&id_priv->res);
900cb5cd0eaSShay Drory 	}
9013c86aa70SEli Cohen 
902a396d43aSSean Hefty 	mutex_unlock(&lock);
903e51060f0SSean Hefty 	return ret;
904e51060f0SSean Hefty }
905e51060f0SSean Hefty 
906f17df3b0SSean Hefty /*
907f17df3b0SSean Hefty  * Select the source IB device and address to reach the destination IB address.
908f17df3b0SSean Hefty  */
cma_resolve_ib_dev(struct rdma_id_private * id_priv)909f17df3b0SSean Hefty static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
910f17df3b0SSean Hefty {
911f17df3b0SSean Hefty 	struct cma_device *cma_dev, *cur_dev;
912f17df3b0SSean Hefty 	struct sockaddr_ib *addr;
913f17df3b0SSean Hefty 	union ib_gid gid, sgid, *dgid;
914cc055dd3SParav Pandit 	unsigned int p;
915f17df3b0SSean Hefty 	u16 pkey, index;
91693b1f29dSJack Wang 	enum ib_port_state port_state;
91720679094SAvihai Horon 	int ret;
918f17df3b0SSean Hefty 	int i;
919f17df3b0SSean Hefty 
920f17df3b0SSean Hefty 	cma_dev = NULL;
921f17df3b0SSean Hefty 	addr = (struct sockaddr_ib *) cma_dst_addr(id_priv);
922f17df3b0SSean Hefty 	dgid = (union ib_gid *) &addr->sib_addr;
923f17df3b0SSean Hefty 	pkey = ntohs(addr->sib_pkey);
924f17df3b0SSean Hefty 
925954a8e3aSParav Pandit 	mutex_lock(&lock);
926f17df3b0SSean Hefty 	list_for_each_entry(cur_dev, &dev_list, list) {
927cc055dd3SParav Pandit 		rdma_for_each_port (cur_dev->device, p) {
92830a74ef4SMichael Wang 			if (!rdma_cap_af_ib(cur_dev->device, p))
929f17df3b0SSean Hefty 				continue;
930f17df3b0SSean Hefty 
931f17df3b0SSean Hefty 			if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
932f17df3b0SSean Hefty 				continue;
933f17df3b0SSean Hefty 
93493b1f29dSJack Wang 			if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
93593b1f29dSJack Wang 				continue;
93620679094SAvihai Horon 
93720679094SAvihai Horon 			for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len;
93820679094SAvihai Horon 			     ++i) {
93920679094SAvihai Horon 				ret = rdma_query_gid(cur_dev->device, p, i,
94020679094SAvihai Horon 						     &gid);
94120679094SAvihai Horon 				if (ret)
94220679094SAvihai Horon 					continue;
94320679094SAvihai Horon 
944f17df3b0SSean Hefty 				if (!memcmp(&gid, dgid, sizeof(gid))) {
945f17df3b0SSean Hefty 					cma_dev = cur_dev;
946f17df3b0SSean Hefty 					sgid = gid;
9478fb488d7SPaul Bolle 					id_priv->id.port_num = p;
948f17df3b0SSean Hefty 					goto found;
949f17df3b0SSean Hefty 				}
950f17df3b0SSean Hefty 
951f17df3b0SSean Hefty 				if (!cma_dev && (gid.global.subnet_prefix ==
95293b1f29dSJack Wang 				    dgid->global.subnet_prefix) &&
95393b1f29dSJack Wang 				    port_state == IB_PORT_ACTIVE) {
954f17df3b0SSean Hefty 					cma_dev = cur_dev;
955f17df3b0SSean Hefty 					sgid = gid;
9568fb488d7SPaul Bolle 					id_priv->id.port_num = p;
957954a8e3aSParav Pandit 					goto found;
958f17df3b0SSean Hefty 				}
959f17df3b0SSean Hefty 			}
960f17df3b0SSean Hefty 		}
961f17df3b0SSean Hefty 	}
962954a8e3aSParav Pandit 	mutex_unlock(&lock);
963f17df3b0SSean Hefty 	return -ENODEV;
964f17df3b0SSean Hefty 
965f17df3b0SSean Hefty found:
966f17df3b0SSean Hefty 	cma_attach_to_dev(id_priv, cma_dev);
967cb5cd0eaSShay Drory 	rdma_restrack_add(&id_priv->res);
968954a8e3aSParav Pandit 	mutex_unlock(&lock);
969f17df3b0SSean Hefty 	addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
970954a8e3aSParav Pandit 	memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
971f17df3b0SSean Hefty 	cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
972f17df3b0SSean Hefty 	return 0;
973f17df3b0SSean Hefty }
974f17df3b0SSean Hefty 
cma_id_get(struct rdma_id_private * id_priv)975e368d23fSParav Pandit static void cma_id_get(struct rdma_id_private *id_priv)
976e368d23fSParav Pandit {
97743fb5892SParav Pandit 	refcount_inc(&id_priv->refcount);
978e368d23fSParav Pandit }
979e368d23fSParav Pandit 
cma_id_put(struct rdma_id_private * id_priv)980e368d23fSParav Pandit static void cma_id_put(struct rdma_id_private *id_priv)
981e51060f0SSean Hefty {
98243fb5892SParav Pandit 	if (refcount_dec_and_test(&id_priv->refcount))
983e51060f0SSean Hefty 		complete(&id_priv->comp);
984e51060f0SSean Hefty }
985e51060f0SSean Hefty 
986b09c4d70SLeon Romanovsky static struct rdma_id_private *
__rdma_create_id(struct net * net,rdma_cm_event_handler event_handler,void * context,enum rdma_ucm_port_space ps,enum ib_qp_type qp_type,const struct rdma_id_private * parent)987b09c4d70SLeon Romanovsky __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
9882253fc0cSSteve Wise 		 void *context, enum rdma_ucm_port_space ps,
989b09c4d70SLeon Romanovsky 		 enum ib_qp_type qp_type, const struct rdma_id_private *parent)
990e51060f0SSean Hefty {
991e51060f0SSean Hefty 	struct rdma_id_private *id_priv;
992e51060f0SSean Hefty 
993e51060f0SSean Hefty 	id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
994e51060f0SSean Hefty 	if (!id_priv)
995e51060f0SSean Hefty 		return ERR_PTR(-ENOMEM);
996e51060f0SSean Hefty 
997550e5ca7SNir Muchtar 	id_priv->state = RDMA_CM_IDLE;
998e51060f0SSean Hefty 	id_priv->id.context = context;
999e51060f0SSean Hefty 	id_priv->id.event_handler = event_handler;
1000e51060f0SSean Hefty 	id_priv->id.ps = ps;
1001b26f9b99SSean Hefty 	id_priv->id.qp_type = qp_type;
100289052d78SMajd Dibbiny 	id_priv->tos_set = false;
10032c1619edSDanit Goldberg 	id_priv->timeout_set = false;
10043aeffc46SHåkon Bugge 	id_priv->min_rnr_timer_set = false;
100579d684f0SParav Pandit 	id_priv->gid_type = IB_GID_TYPE_IB;
1006e51060f0SSean Hefty 	spin_lock_init(&id_priv->lock);
1007c5483388SSean Hefty 	mutex_init(&id_priv->qp_mutex);
1008e51060f0SSean Hefty 	init_completion(&id_priv->comp);
100943fb5892SParav Pandit 	refcount_set(&id_priv->refcount, 1);
1010de910bd9SOr Gerlitz 	mutex_init(&id_priv->handler_mutex);
101199cfddb8SJason Gunthorpe 	INIT_LIST_HEAD(&id_priv->device_item);
1012fc008bdbSPatrisious Haddad 	INIT_LIST_HEAD(&id_priv->id_list_entry);
1013e51060f0SSean Hefty 	INIT_LIST_HEAD(&id_priv->listen_list);
1014c8f6a362SSean Hefty 	INIT_LIST_HEAD(&id_priv->mc_list);
1015e51060f0SSean Hefty 	get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
1016fa20105eSGuy Shapiro 	id_priv->id.route.addr.dev_addr.net = get_net(net);
101723a9cd2aSMoni Shoua 	id_priv->seq_num &= 0x00ffffff;
1018e51060f0SSean Hefty 
101913ef5539SLeon Romanovsky 	rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID);
1020b09c4d70SLeon Romanovsky 	if (parent)
1021b09c4d70SLeon Romanovsky 		rdma_restrack_parent_name(&id_priv->res, &parent->res);
102213ef5539SLeon Romanovsky 
1023b09c4d70SLeon Romanovsky 	return id_priv;
1024e51060f0SSean Hefty }
1025b09c4d70SLeon Romanovsky 
1026b09c4d70SLeon Romanovsky struct rdma_cm_id *
__rdma_create_kernel_id(struct net * net,rdma_cm_event_handler event_handler,void * context,enum rdma_ucm_port_space ps,enum ib_qp_type qp_type,const char * caller)1027b09c4d70SLeon Romanovsky __rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler,
1028b09c4d70SLeon Romanovsky 			void *context, enum rdma_ucm_port_space ps,
1029b09c4d70SLeon Romanovsky 			enum ib_qp_type qp_type, const char *caller)
1030b09c4d70SLeon Romanovsky {
1031b09c4d70SLeon Romanovsky 	struct rdma_id_private *ret;
1032b09c4d70SLeon Romanovsky 
1033b09c4d70SLeon Romanovsky 	ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL);
1034b09c4d70SLeon Romanovsky 	if (IS_ERR(ret))
1035b09c4d70SLeon Romanovsky 		return ERR_CAST(ret);
1036b09c4d70SLeon Romanovsky 
1037b09c4d70SLeon Romanovsky 	rdma_restrack_set_name(&ret->res, caller);
1038b09c4d70SLeon Romanovsky 	return &ret->id;
1039b09c4d70SLeon Romanovsky }
1040b09c4d70SLeon Romanovsky EXPORT_SYMBOL(__rdma_create_kernel_id);
1041b09c4d70SLeon Romanovsky 
rdma_create_user_id(rdma_cm_event_handler event_handler,void * context,enum rdma_ucm_port_space ps,enum ib_qp_type qp_type)1042b09c4d70SLeon Romanovsky struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler,
1043b09c4d70SLeon Romanovsky 				       void *context,
1044b09c4d70SLeon Romanovsky 				       enum rdma_ucm_port_space ps,
1045b09c4d70SLeon Romanovsky 				       enum ib_qp_type qp_type)
1046b09c4d70SLeon Romanovsky {
1047b09c4d70SLeon Romanovsky 	struct rdma_id_private *ret;
1048b09c4d70SLeon Romanovsky 
1049b09c4d70SLeon Romanovsky 	ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context,
1050b09c4d70SLeon Romanovsky 			       ps, qp_type, NULL);
1051b09c4d70SLeon Romanovsky 	if (IS_ERR(ret))
1052b09c4d70SLeon Romanovsky 		return ERR_CAST(ret);
1053b09c4d70SLeon Romanovsky 
1054b09c4d70SLeon Romanovsky 	rdma_restrack_set_name(&ret->res, NULL);
1055b09c4d70SLeon Romanovsky 	return &ret->id;
1056b09c4d70SLeon Romanovsky }
1057b09c4d70SLeon Romanovsky EXPORT_SYMBOL(rdma_create_user_id);
1058e51060f0SSean Hefty 
cma_init_ud_qp(struct rdma_id_private * id_priv,struct ib_qp * qp)1059c8f6a362SSean Hefty static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
1060e51060f0SSean Hefty {
1061e51060f0SSean Hefty 	struct ib_qp_attr qp_attr;
1062c8f6a362SSean Hefty 	int qp_attr_mask, ret;
1063e51060f0SSean Hefty 
1064c8f6a362SSean Hefty 	qp_attr.qp_state = IB_QPS_INIT;
1065c8f6a362SSean Hefty 	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1066e51060f0SSean Hefty 	if (ret)
1067e51060f0SSean Hefty 		return ret;
1068e51060f0SSean Hefty 
1069c8f6a362SSean Hefty 	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1070c8f6a362SSean Hefty 	if (ret)
1071c8f6a362SSean Hefty 		return ret;
1072c8f6a362SSean Hefty 
1073c8f6a362SSean Hefty 	qp_attr.qp_state = IB_QPS_RTR;
1074c8f6a362SSean Hefty 	ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
1075c8f6a362SSean Hefty 	if (ret)
1076c8f6a362SSean Hefty 		return ret;
1077c8f6a362SSean Hefty 
1078c8f6a362SSean Hefty 	qp_attr.qp_state = IB_QPS_RTS;
1079c8f6a362SSean Hefty 	qp_attr.sq_psn = 0;
1080c8f6a362SSean Hefty 	ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
1081c8f6a362SSean Hefty 
1082c8f6a362SSean Hefty 	return ret;
1083e51060f0SSean Hefty }
1084e51060f0SSean Hefty 
cma_init_conn_qp(struct rdma_id_private * id_priv,struct ib_qp * qp)1085db4657afSMike Marciniszyn static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
1086db4657afSMike Marciniszyn {
1087db4657afSMike Marciniszyn 	struct ib_qp_attr qp_attr;
1088db4657afSMike Marciniszyn 	int qp_attr_mask, ret;
1089db4657afSMike Marciniszyn 
1090db4657afSMike Marciniszyn 	qp_attr.qp_state = IB_QPS_INIT;
1091db4657afSMike Marciniszyn 	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1092db4657afSMike Marciniszyn 	if (ret)
1093db4657afSMike Marciniszyn 		return ret;
1094db4657afSMike Marciniszyn 
1095db4657afSMike Marciniszyn 	return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1096db4657afSMike Marciniszyn }
1097db4657afSMike Marciniszyn 
rdma_create_qp(struct rdma_cm_id * id,struct ib_pd * pd,struct ib_qp_init_attr * qp_init_attr)1098e51060f0SSean Hefty int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
1099e51060f0SSean Hefty 		   struct ib_qp_init_attr *qp_init_attr)
1100e51060f0SSean Hefty {
1101e51060f0SSean Hefty 	struct rdma_id_private *id_priv;
1102e51060f0SSean Hefty 	struct ib_qp *qp;
1103db4657afSMike Marciniszyn 	int ret;
1104e51060f0SSean Hefty 
1105e51060f0SSean Hefty 	id_priv = container_of(id, struct rdma_id_private, id);
1106ed999f82SChuck Lever 	if (id->device != pd->device) {
1107ed999f82SChuck Lever 		ret = -EINVAL;
1108ed999f82SChuck Lever 		goto out_err;
1109ed999f82SChuck Lever 	}
1110e51060f0SSean Hefty 
11110691a286SChristoph Hellwig 	qp_init_attr->port_num = id->port_num;
1112e51060f0SSean Hefty 	qp = ib_create_qp(pd, qp_init_attr);
1113ed999f82SChuck Lever 	if (IS_ERR(qp)) {
1114ed999f82SChuck Lever 		ret = PTR_ERR(qp);
1115ed999f82SChuck Lever 		goto out_err;
1116ed999f82SChuck Lever 	}
1117e51060f0SSean Hefty 
1118b26f9b99SSean Hefty 	if (id->qp_type == IB_QPT_UD)
1119c8f6a362SSean Hefty 		ret = cma_init_ud_qp(id_priv, qp);
1120db4657afSMike Marciniszyn 	else
1121db4657afSMike Marciniszyn 		ret = cma_init_conn_qp(id_priv, qp);
1122e51060f0SSean Hefty 	if (ret)
1123ed999f82SChuck Lever 		goto out_destroy;
1124e51060f0SSean Hefty 
1125e51060f0SSean Hefty 	id->qp = qp;
1126e51060f0SSean Hefty 	id_priv->qp_num = qp->qp_num;
1127e51060f0SSean Hefty 	id_priv->srq = (qp->srq != NULL);
1128ed999f82SChuck Lever 	trace_cm_qp_create(id_priv, pd, qp_init_attr, 0);
1129e51060f0SSean Hefty 	return 0;
1130ed999f82SChuck Lever out_destroy:
1131e51060f0SSean Hefty 	ib_destroy_qp(qp);
1132ed999f82SChuck Lever out_err:
1133ed999f82SChuck Lever 	trace_cm_qp_create(id_priv, pd, qp_init_attr, ret);
1134e51060f0SSean Hefty 	return ret;
1135e51060f0SSean Hefty }
1136e51060f0SSean Hefty EXPORT_SYMBOL(rdma_create_qp);
1137e51060f0SSean Hefty 
rdma_destroy_qp(struct rdma_cm_id * id)1138e51060f0SSean Hefty void rdma_destroy_qp(struct rdma_cm_id *id)
1139e51060f0SSean Hefty {
1140c5483388SSean Hefty 	struct rdma_id_private *id_priv;
1141c5483388SSean Hefty 
1142c5483388SSean Hefty 	id_priv = container_of(id, struct rdma_id_private, id);
1143ed999f82SChuck Lever 	trace_cm_qp_destroy(id_priv);
1144c5483388SSean Hefty 	mutex_lock(&id_priv->qp_mutex);
1145c5483388SSean Hefty 	ib_destroy_qp(id_priv->id.qp);
1146c5483388SSean Hefty 	id_priv->id.qp = NULL;
1147c5483388SSean Hefty 	mutex_unlock(&id_priv->qp_mutex);
1148e51060f0SSean Hefty }
1149e51060f0SSean Hefty EXPORT_SYMBOL(rdma_destroy_qp);
1150e51060f0SSean Hefty 
cma_modify_qp_rtr(struct rdma_id_private * id_priv,struct rdma_conn_param * conn_param)11515851bb89SSean Hefty static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
11525851bb89SSean Hefty 			     struct rdma_conn_param *conn_param)
1153e51060f0SSean Hefty {
1154e51060f0SSean Hefty 	struct ib_qp_attr qp_attr;
1155e51060f0SSean Hefty 	int qp_attr_mask, ret;
1156e51060f0SSean Hefty 
1157c5483388SSean Hefty 	mutex_lock(&id_priv->qp_mutex);
1158c5483388SSean Hefty 	if (!id_priv->id.qp) {
1159c5483388SSean Hefty 		ret = 0;
1160c5483388SSean Hefty 		goto out;
1161c5483388SSean Hefty 	}
1162e51060f0SSean Hefty 
1163e51060f0SSean Hefty 	/* Need to update QP attributes from default values. */
1164e51060f0SSean Hefty 	qp_attr.qp_state = IB_QPS_INIT;
1165c5483388SSean Hefty 	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1166e51060f0SSean Hefty 	if (ret)
1167c5483388SSean Hefty 		goto out;
1168e51060f0SSean Hefty 
1169c5483388SSean Hefty 	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1170e51060f0SSean Hefty 	if (ret)
1171c5483388SSean Hefty 		goto out;
1172e51060f0SSean Hefty 
1173e51060f0SSean Hefty 	qp_attr.qp_state = IB_QPS_RTR;
1174c5483388SSean Hefty 	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1175e51060f0SSean Hefty 	if (ret)
1176c5483388SSean Hefty 		goto out;
1177e51060f0SSean Hefty 
1178fef60902SMichael Wang 	BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
1179fef60902SMichael Wang 
11805851bb89SSean Hefty 	if (conn_param)
11815851bb89SSean Hefty 		qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
1182c5483388SSean Hefty 	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1183c5483388SSean Hefty out:
1184c5483388SSean Hefty 	mutex_unlock(&id_priv->qp_mutex);
1185c5483388SSean Hefty 	return ret;
1186e51060f0SSean Hefty }
1187e51060f0SSean Hefty 
cma_modify_qp_rts(struct rdma_id_private * id_priv,struct rdma_conn_param * conn_param)11885851bb89SSean Hefty static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
11895851bb89SSean Hefty 			     struct rdma_conn_param *conn_param)
1190e51060f0SSean Hefty {
1191e51060f0SSean Hefty 	struct ib_qp_attr qp_attr;
1192e51060f0SSean Hefty 	int qp_attr_mask, ret;
1193e51060f0SSean Hefty 
1194c5483388SSean Hefty 	mutex_lock(&id_priv->qp_mutex);
1195c5483388SSean Hefty 	if (!id_priv->id.qp) {
1196c5483388SSean Hefty 		ret = 0;
1197c5483388SSean Hefty 		goto out;
1198e51060f0SSean Hefty 	}
1199e51060f0SSean Hefty 
1200c5483388SSean Hefty 	qp_attr.qp_state = IB_QPS_RTS;
1201c5483388SSean Hefty 	ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1202c5483388SSean Hefty 	if (ret)
1203c5483388SSean Hefty 		goto out;
1204c5483388SSean Hefty 
12055851bb89SSean Hefty 	if (conn_param)
12065851bb89SSean Hefty 		qp_attr.max_rd_atomic = conn_param->initiator_depth;
1207c5483388SSean Hefty 	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1208c5483388SSean Hefty out:
1209c5483388SSean Hefty 	mutex_unlock(&id_priv->qp_mutex);
1210c5483388SSean Hefty 	return ret;
1211c5483388SSean Hefty }
1212c5483388SSean Hefty 
cma_modify_qp_err(struct rdma_id_private * id_priv)1213c5483388SSean Hefty static int cma_modify_qp_err(struct rdma_id_private *id_priv)
1214e51060f0SSean Hefty {
1215e51060f0SSean Hefty 	struct ib_qp_attr qp_attr;
1216c5483388SSean Hefty 	int ret;
1217e51060f0SSean Hefty 
1218c5483388SSean Hefty 	mutex_lock(&id_priv->qp_mutex);
1219c5483388SSean Hefty 	if (!id_priv->id.qp) {
1220c5483388SSean Hefty 		ret = 0;
1221c5483388SSean Hefty 		goto out;
1222c5483388SSean Hefty 	}
1223e51060f0SSean Hefty 
1224e51060f0SSean Hefty 	qp_attr.qp_state = IB_QPS_ERR;
1225c5483388SSean Hefty 	ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
1226c5483388SSean Hefty out:
1227c5483388SSean Hefty 	mutex_unlock(&id_priv->qp_mutex);
1228c5483388SSean Hefty 	return ret;
1229e51060f0SSean Hefty }
1230e51060f0SSean Hefty 
cma_ib_init_qp_attr(struct rdma_id_private * id_priv,struct ib_qp_attr * qp_attr,int * qp_attr_mask)1231c8f6a362SSean Hefty static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
1232c8f6a362SSean Hefty 			       struct ib_qp_attr *qp_attr, int *qp_attr_mask)
1233c8f6a362SSean Hefty {
1234c8f6a362SSean Hefty 	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
1235c8f6a362SSean Hefty 	int ret;
12363c86aa70SEli Cohen 	u16 pkey;
12373c86aa70SEli Cohen 
1238227128fcSMichael Wang 	if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
12393c86aa70SEli Cohen 		pkey = 0xffff;
1240fef60902SMichael Wang 	else
1241fef60902SMichael Wang 		pkey = ib_addr_get_pkey(dev_addr);
1242c8f6a362SSean Hefty 
1243c8f6a362SSean Hefty 	ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
12443c86aa70SEli Cohen 				  pkey, &qp_attr->pkey_index);
1245c8f6a362SSean Hefty 	if (ret)
1246c8f6a362SSean Hefty 		return ret;
1247c8f6a362SSean Hefty 
1248c8f6a362SSean Hefty 	qp_attr->port_num = id_priv->id.port_num;
1249c8f6a362SSean Hefty 	*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
1250c8f6a362SSean Hefty 
1251b26f9b99SSean Hefty 	if (id_priv->id.qp_type == IB_QPT_UD) {
125258e84f6bSMark Zhang 		ret = cma_set_default_qkey(id_priv);
1253d2ca39f2SYossi Etigin 		if (ret)
1254d2ca39f2SYossi Etigin 			return ret;
1255d2ca39f2SYossi Etigin 
1256c8f6a362SSean Hefty 		qp_attr->qkey = id_priv->qkey;
1257c8f6a362SSean Hefty 		*qp_attr_mask |= IB_QP_QKEY;
1258c8f6a362SSean Hefty 	} else {
1259c8f6a362SSean Hefty 		qp_attr->qp_access_flags = 0;
1260c8f6a362SSean Hefty 		*qp_attr_mask |= IB_QP_ACCESS_FLAGS;
1261c8f6a362SSean Hefty 	}
1262c8f6a362SSean Hefty 	return 0;
1263c8f6a362SSean Hefty }
1264c8f6a362SSean Hefty 
rdma_init_qp_attr(struct rdma_cm_id * id,struct ib_qp_attr * qp_attr,int * qp_attr_mask)1265e51060f0SSean Hefty int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
1266e51060f0SSean Hefty 		       int *qp_attr_mask)
1267e51060f0SSean Hefty {
1268e51060f0SSean Hefty 	struct rdma_id_private *id_priv;
1269c8f6a362SSean Hefty 	int ret = 0;
1270e51060f0SSean Hefty 
1271e51060f0SSean Hefty 	id_priv = container_of(id, struct rdma_id_private, id);
127272219ceaSMichael Wang 	if (rdma_cap_ib_cm(id->device, id->port_num)) {
1273b26f9b99SSean Hefty 		if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
1274c8f6a362SSean Hefty 			ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
1275c8f6a362SSean Hefty 		else
1276e51060f0SSean Hefty 			ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
1277e51060f0SSean Hefty 						 qp_attr_mask);
1278dd5f03beSMatan Barak 
1279e51060f0SSean Hefty 		if (qp_attr->qp_state == IB_QPS_RTR)
1280e51060f0SSean Hefty 			qp_attr->rq_psn = id_priv->seq_num;
128104215330SMichael Wang 	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
1282c8f6a362SSean Hefty 		if (!id_priv->cm_id.iw) {
12838f076531SDotan Barak 			qp_attr->qp_access_flags = 0;
1284c8f6a362SSean Hefty 			*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
1285c8f6a362SSean Hefty 		} else
128607ebafbaSTom Tucker 			ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
128707ebafbaSTom Tucker 						 qp_attr_mask);
1288a62ab66bSIsmail, Mustafa 		qp_attr->port_num = id_priv->id.port_num;
1289a62ab66bSIsmail, Mustafa 		*qp_attr_mask |= IB_QP_PORT;
1290b6eb7011SWenpeng Liang 	} else {
1291e51060f0SSean Hefty 		ret = -ENOSYS;
1292b6eb7011SWenpeng Liang 	}
1293e51060f0SSean Hefty 
12942c1619edSDanit Goldberg 	if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set)
12952c1619edSDanit Goldberg 		qp_attr->timeout = id_priv->timeout;
12962c1619edSDanit Goldberg 
12973aeffc46SHåkon Bugge 	if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set)
12983aeffc46SHåkon Bugge 		qp_attr->min_rnr_timer = id_priv->min_rnr_timer;
12993aeffc46SHåkon Bugge 
1300e51060f0SSean Hefty 	return ret;
1301e51060f0SSean Hefty }
1302e51060f0SSean Hefty EXPORT_SYMBOL(rdma_init_qp_attr);
1303e51060f0SSean Hefty 
cma_zero_addr(const struct sockaddr * addr)1304ca3a8aceSParav Pandit static inline bool cma_zero_addr(const struct sockaddr *addr)
1305e51060f0SSean Hefty {
13062e2d190cSSean Hefty 	switch (addr->sa_family) {
13072e2d190cSSean Hefty 	case AF_INET:
13082e2d190cSSean Hefty 		return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
13092e2d190cSSean Hefty 	case AF_INET6:
13102e2d190cSSean Hefty 		return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr);
13112e2d190cSSean Hefty 	case AF_IB:
13122e2d190cSSean Hefty 		return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr);
13132e2d190cSSean Hefty 	default:
1314ca3a8aceSParav Pandit 		return false;
1315e51060f0SSean Hefty 	}
1316e51060f0SSean Hefty }
1317e51060f0SSean Hefty 
cma_loopback_addr(const struct sockaddr * addr)1318ca3a8aceSParav Pandit static inline bool cma_loopback_addr(const struct sockaddr *addr)
1319e51060f0SSean Hefty {
13202e2d190cSSean Hefty 	switch (addr->sa_family) {
13212e2d190cSSean Hefty 	case AF_INET:
1322ca3a8aceSParav Pandit 		return ipv4_is_loopback(
1323ca3a8aceSParav Pandit 			((struct sockaddr_in *)addr)->sin_addr.s_addr);
13242e2d190cSSean Hefty 	case AF_INET6:
1325ca3a8aceSParav Pandit 		return ipv6_addr_loopback(
1326ca3a8aceSParav Pandit 			&((struct sockaddr_in6 *)addr)->sin6_addr);
13272e2d190cSSean Hefty 	case AF_IB:
1328ca3a8aceSParav Pandit 		return ib_addr_loopback(
1329ca3a8aceSParav Pandit 			&((struct sockaddr_ib *)addr)->sib_addr);
13302e2d190cSSean Hefty 	default:
1331ca3a8aceSParav Pandit 		return false;
13322e2d190cSSean Hefty 	}
1333e51060f0SSean Hefty }
1334e51060f0SSean Hefty 
cma_any_addr(const struct sockaddr * addr)1335ca3a8aceSParav Pandit static inline bool cma_any_addr(const struct sockaddr *addr)
1336e51060f0SSean Hefty {
1337e51060f0SSean Hefty 	return cma_zero_addr(addr) || cma_loopback_addr(addr);
1338e51060f0SSean Hefty }
1339e51060f0SSean Hefty 
cma_addr_cmp(const struct sockaddr * src,const struct sockaddr * dst)13405d7ed2f2SParav Pandit static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst)
134143b752daSHefty, Sean {
134243b752daSHefty, Sean 	if (src->sa_family != dst->sa_family)
134343b752daSHefty, Sean 		return -1;
134443b752daSHefty, Sean 
134543b752daSHefty, Sean 	switch (src->sa_family) {
134643b752daSHefty, Sean 	case AF_INET:
134743b752daSHefty, Sean 		return ((struct sockaddr_in *)src)->sin_addr.s_addr !=
134843b752daSHefty, Sean 		       ((struct sockaddr_in *)dst)->sin_addr.s_addr;
13495d7ed2f2SParav Pandit 	case AF_INET6: {
13505d7ed2f2SParav Pandit 		struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src;
13515d7ed2f2SParav Pandit 		struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst;
13525d7ed2f2SParav Pandit 		bool link_local;
13535d7ed2f2SParav Pandit 
13545d7ed2f2SParav Pandit 		if (ipv6_addr_cmp(&src_addr6->sin6_addr,
13555d7ed2f2SParav Pandit 					  &dst_addr6->sin6_addr))
13565d7ed2f2SParav Pandit 			return 1;
13575d7ed2f2SParav Pandit 		link_local = ipv6_addr_type(&dst_addr6->sin6_addr) &
13585d7ed2f2SParav Pandit 			     IPV6_ADDR_LINKLOCAL;
13595d7ed2f2SParav Pandit 		/* Link local must match their scope_ids */
13605d7ed2f2SParav Pandit 		return link_local ? (src_addr6->sin6_scope_id !=
13615d7ed2f2SParav Pandit 				     dst_addr6->sin6_scope_id) :
13625d7ed2f2SParav Pandit 				    0;
13635d7ed2f2SParav Pandit 	}
13645d7ed2f2SParav Pandit 
13652e2d190cSSean Hefty 	default:
13662e2d190cSSean Hefty 		return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
13672e2d190cSSean Hefty 				   &((struct sockaddr_ib *) dst)->sib_addr);
136843b752daSHefty, Sean 	}
136943b752daSHefty, Sean }
137043b752daSHefty, Sean 
cma_port(const struct sockaddr * addr)13712df7dba8SParav Pandit static __be16 cma_port(const struct sockaddr *addr)
1372628e5f6dSSean Hefty {
137358afdcb7SSean Hefty 	struct sockaddr_ib *sib;
137458afdcb7SSean Hefty 
137558afdcb7SSean Hefty 	switch (addr->sa_family) {
137658afdcb7SSean Hefty 	case AF_INET:
1377628e5f6dSSean Hefty 		return ((struct sockaddr_in *) addr)->sin_port;
137858afdcb7SSean Hefty 	case AF_INET6:
1379628e5f6dSSean Hefty 		return ((struct sockaddr_in6 *) addr)->sin6_port;
138058afdcb7SSean Hefty 	case AF_IB:
138158afdcb7SSean Hefty 		sib = (struct sockaddr_ib *) addr;
138258afdcb7SSean Hefty 		return htons((u16) (be64_to_cpu(sib->sib_sid) &
138358afdcb7SSean Hefty 				    be64_to_cpu(sib->sib_sid_mask)));
138458afdcb7SSean Hefty 	default:
138558afdcb7SSean Hefty 		return 0;
138658afdcb7SSean Hefty 	}
1387628e5f6dSSean Hefty }
1388628e5f6dSSean Hefty 
cma_any_port(const struct sockaddr * addr)13892df7dba8SParav Pandit static inline int cma_any_port(const struct sockaddr *addr)
1390e51060f0SSean Hefty {
1391628e5f6dSSean Hefty 	return !cma_port(addr);
1392e51060f0SSean Hefty }
1393e51060f0SSean Hefty 
cma_save_ib_info(struct sockaddr * src_addr,struct sockaddr * dst_addr,const struct rdma_cm_id * listen_id,const struct sa_path_rec * path)13940c505f70SHaggai Eran static void cma_save_ib_info(struct sockaddr *src_addr,
13950c505f70SHaggai Eran 			     struct sockaddr *dst_addr,
1396e7ff98aeSParav Pandit 			     const struct rdma_cm_id *listen_id,
1397e7ff98aeSParav Pandit 			     const struct sa_path_rec *path)
1398e51060f0SSean Hefty {
1399fbaa1a6dSSean Hefty 	struct sockaddr_ib *listen_ib, *ib;
1400e51060f0SSean Hefty 
1401fbaa1a6dSSean Hefty 	listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
14020c505f70SHaggai Eran 	if (src_addr) {
14030c505f70SHaggai Eran 		ib = (struct sockaddr_ib *)src_addr;
14040c505f70SHaggai Eran 		ib->sib_family = AF_IB;
1405c07678bbSMatthew Finlay 		if (path) {
1406fbaa1a6dSSean Hefty 			ib->sib_pkey = path->pkey;
1407fbaa1a6dSSean Hefty 			ib->sib_flowinfo = path->flow_label;
1408fbaa1a6dSSean Hefty 			memcpy(&ib->sib_addr, &path->sgid, 16);
1409d3957b86SMajd Dibbiny 			ib->sib_sid = path->service_id;
14100c505f70SHaggai Eran 			ib->sib_scope_id = 0;
1411c07678bbSMatthew Finlay 		} else {
1412c07678bbSMatthew Finlay 			ib->sib_pkey = listen_ib->sib_pkey;
1413c07678bbSMatthew Finlay 			ib->sib_flowinfo = listen_ib->sib_flowinfo;
1414c07678bbSMatthew Finlay 			ib->sib_addr = listen_ib->sib_addr;
1415fbaa1a6dSSean Hefty 			ib->sib_sid = listen_ib->sib_sid;
1416fbaa1a6dSSean Hefty 			ib->sib_scope_id = listen_ib->sib_scope_id;
14170c505f70SHaggai Eran 		}
14180c505f70SHaggai Eran 		ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
14190c505f70SHaggai Eran 	}
14200c505f70SHaggai Eran 	if (dst_addr) {
14210c505f70SHaggai Eran 		ib = (struct sockaddr_ib *)dst_addr;
14220c505f70SHaggai Eran 		ib->sib_family = AF_IB;
1423c07678bbSMatthew Finlay 		if (path) {
1424fbaa1a6dSSean Hefty 			ib->sib_pkey = path->pkey;
1425fbaa1a6dSSean Hefty 			ib->sib_flowinfo = path->flow_label;
1426fbaa1a6dSSean Hefty 			memcpy(&ib->sib_addr, &path->dgid, 16);
1427fbaa1a6dSSean Hefty 		}
1428c07678bbSMatthew Finlay 	}
142928521440SJason Gunthorpe }
143028521440SJason Gunthorpe 
cma_save_ip4_info(struct sockaddr_in * src_addr,struct sockaddr_in * dst_addr,struct cma_hdr * hdr,__be16 local_port)1431c50e90d0SArnd Bergmann static void cma_save_ip4_info(struct sockaddr_in *src_addr,
1432c50e90d0SArnd Bergmann 			      struct sockaddr_in *dst_addr,
14330c505f70SHaggai Eran 			      struct cma_hdr *hdr,
14340c505f70SHaggai Eran 			      __be16 local_port)
1435fbaa1a6dSSean Hefty {
14360c505f70SHaggai Eran 	if (src_addr) {
1437c50e90d0SArnd Bergmann 		*src_addr = (struct sockaddr_in) {
1438c50e90d0SArnd Bergmann 			.sin_family = AF_INET,
1439c50e90d0SArnd Bergmann 			.sin_addr.s_addr = hdr->dst_addr.ip4.addr,
1440c50e90d0SArnd Bergmann 			.sin_port = local_port,
1441c50e90d0SArnd Bergmann 		};
14420c505f70SHaggai Eran 	}
1443fbaa1a6dSSean Hefty 
14440c505f70SHaggai Eran 	if (dst_addr) {
1445c50e90d0SArnd Bergmann 		*dst_addr = (struct sockaddr_in) {
1446c50e90d0SArnd Bergmann 			.sin_family = AF_INET,
1447c50e90d0SArnd Bergmann 			.sin_addr.s_addr = hdr->src_addr.ip4.addr,
1448c50e90d0SArnd Bergmann 			.sin_port = hdr->port,
1449c50e90d0SArnd Bergmann 		};
1450fbaa1a6dSSean Hefty 	}
14510c505f70SHaggai Eran }
1452fbaa1a6dSSean Hefty 
cma_save_ip6_info(struct sockaddr_in6 * src_addr,struct sockaddr_in6 * dst_addr,struct cma_hdr * hdr,__be16 local_port)1453c50e90d0SArnd Bergmann static void cma_save_ip6_info(struct sockaddr_in6 *src_addr,
1454c50e90d0SArnd Bergmann 			      struct sockaddr_in6 *dst_addr,
14550c505f70SHaggai Eran 			      struct cma_hdr *hdr,
14560c505f70SHaggai Eran 			      __be16 local_port)
1457fbaa1a6dSSean Hefty {
14580c505f70SHaggai Eran 	if (src_addr) {
1459c50e90d0SArnd Bergmann 		*src_addr = (struct sockaddr_in6) {
1460c50e90d0SArnd Bergmann 			.sin6_family = AF_INET6,
1461c50e90d0SArnd Bergmann 			.sin6_addr = hdr->dst_addr.ip6,
1462c50e90d0SArnd Bergmann 			.sin6_port = local_port,
1463c50e90d0SArnd Bergmann 		};
14640c505f70SHaggai Eran 	}
1465fbaa1a6dSSean Hefty 
14660c505f70SHaggai Eran 	if (dst_addr) {
1467c50e90d0SArnd Bergmann 		*dst_addr = (struct sockaddr_in6) {
1468c50e90d0SArnd Bergmann 			.sin6_family = AF_INET6,
1469c50e90d0SArnd Bergmann 			.sin6_addr = hdr->src_addr.ip6,
1470c50e90d0SArnd Bergmann 			.sin6_port = hdr->port,
1471c50e90d0SArnd Bergmann 		};
1472fbaa1a6dSSean Hefty 	}
14730c505f70SHaggai Eran }
1474fbaa1a6dSSean Hefty 
cma_port_from_service_id(__be64 service_id)14750c505f70SHaggai Eran static u16 cma_port_from_service_id(__be64 service_id)
14760c505f70SHaggai Eran {
14770c505f70SHaggai Eran 	return (u16)be64_to_cpu(service_id);
14780c505f70SHaggai Eran }
14790c505f70SHaggai Eran 
cma_save_ip_info(struct sockaddr * src_addr,struct sockaddr * dst_addr,const struct ib_cm_event * ib_event,__be64 service_id)14800c505f70SHaggai Eran static int cma_save_ip_info(struct sockaddr *src_addr,
14810c505f70SHaggai Eran 			    struct sockaddr *dst_addr,
1482e7ff98aeSParav Pandit 			    const struct ib_cm_event *ib_event,
14830c505f70SHaggai Eran 			    __be64 service_id)
1484fbaa1a6dSSean Hefty {
1485fbaa1a6dSSean Hefty 	struct cma_hdr *hdr;
14860c505f70SHaggai Eran 	__be16 port;
1487e51060f0SSean Hefty 
1488fbaa1a6dSSean Hefty 	hdr = ib_event->private_data;
1489fbaa1a6dSSean Hefty 	if (hdr->cma_version != CMA_VERSION)
1490fbaa1a6dSSean Hefty 		return -EINVAL;
1491e51060f0SSean Hefty 
14920c505f70SHaggai Eran 	port = htons(cma_port_from_service_id(service_id));
14930c505f70SHaggai Eran 
1494fbaa1a6dSSean Hefty 	switch (cma_get_ip_ver(hdr)) {
1495e51060f0SSean Hefty 	case 4:
1496c50e90d0SArnd Bergmann 		cma_save_ip4_info((struct sockaddr_in *)src_addr,
1497c50e90d0SArnd Bergmann 				  (struct sockaddr_in *)dst_addr, hdr, port);
1498e51060f0SSean Hefty 		break;
1499e51060f0SSean Hefty 	case 6:
1500c50e90d0SArnd Bergmann 		cma_save_ip6_info((struct sockaddr_in6 *)src_addr,
1501c50e90d0SArnd Bergmann 				  (struct sockaddr_in6 *)dst_addr, hdr, port);
1502e51060f0SSean Hefty 		break;
1503e51060f0SSean Hefty 	default:
15044c21b5bcSHaggai Eran 		return -EAFNOSUPPORT;
1505e51060f0SSean Hefty 	}
15060c505f70SHaggai Eran 
1507fbaa1a6dSSean Hefty 	return 0;
1508e51060f0SSean Hefty }
1509e51060f0SSean Hefty 
cma_save_net_info(struct sockaddr * src_addr,struct sockaddr * dst_addr,const struct rdma_cm_id * listen_id,const struct ib_cm_event * ib_event,sa_family_t sa_family,__be64 service_id)15100c505f70SHaggai Eran static int cma_save_net_info(struct sockaddr *src_addr,
15110c505f70SHaggai Eran 			     struct sockaddr *dst_addr,
1512e7ff98aeSParav Pandit 			     const struct rdma_cm_id *listen_id,
1513e7ff98aeSParav Pandit 			     const struct ib_cm_event *ib_event,
15140c505f70SHaggai Eran 			     sa_family_t sa_family, __be64 service_id)
15150c505f70SHaggai Eran {
15160c505f70SHaggai Eran 	if (sa_family == AF_IB) {
15170c505f70SHaggai Eran 		if (ib_event->event == IB_CM_REQ_RECEIVED)
15180c505f70SHaggai Eran 			cma_save_ib_info(src_addr, dst_addr, listen_id,
15190c505f70SHaggai Eran 					 ib_event->param.req_rcvd.primary_path);
15200c505f70SHaggai Eran 		else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
15210c505f70SHaggai Eran 			cma_save_ib_info(src_addr, dst_addr, listen_id, NULL);
15220c505f70SHaggai Eran 		return 0;
15230c505f70SHaggai Eran 	}
15240c505f70SHaggai Eran 
15250c505f70SHaggai Eran 	return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id);
15260c505f70SHaggai Eran }
15270c505f70SHaggai Eran 
cma_save_req_info(const struct ib_cm_event * ib_event,struct cma_req_info * req)15284c21b5bcSHaggai Eran static int cma_save_req_info(const struct ib_cm_event *ib_event,
15294c21b5bcSHaggai Eran 			     struct cma_req_info *req)
15304c21b5bcSHaggai Eran {
15314c21b5bcSHaggai Eran 	const struct ib_cm_req_event_param *req_param =
15324c21b5bcSHaggai Eran 		&ib_event->param.req_rcvd;
15334c21b5bcSHaggai Eran 	const struct ib_cm_sidr_req_event_param *sidr_param =
15344c21b5bcSHaggai Eran 		&ib_event->param.sidr_req_rcvd;
15354c21b5bcSHaggai Eran 
15364c21b5bcSHaggai Eran 	switch (ib_event->event) {
15374c21b5bcSHaggai Eran 	case IB_CM_REQ_RECEIVED:
15384c21b5bcSHaggai Eran 		req->device	= req_param->listen_id->device;
15394c21b5bcSHaggai Eran 		req->port	= req_param->port;
15404c21b5bcSHaggai Eran 		memcpy(&req->local_gid, &req_param->primary_path->sgid,
15414c21b5bcSHaggai Eran 		       sizeof(req->local_gid));
15424c21b5bcSHaggai Eran 		req->has_gid	= true;
1543d3957b86SMajd Dibbiny 		req->service_id = req_param->primary_path->service_id;
1544ab3964adSHaggai Eran 		req->pkey	= be16_to_cpu(req_param->primary_path->pkey);
154584424a7fSHaggai Eran 		if (req->pkey != req_param->bth_pkey)
154684424a7fSHaggai Eran 			pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
154784424a7fSHaggai Eran 					    "RDMA CMA: in the future this may cause the request to be dropped\n",
154884424a7fSHaggai Eran 					    req_param->bth_pkey, req->pkey);
15494c21b5bcSHaggai Eran 		break;
15504c21b5bcSHaggai Eran 	case IB_CM_SIDR_REQ_RECEIVED:
15514c21b5bcSHaggai Eran 		req->device	= sidr_param->listen_id->device;
15524c21b5bcSHaggai Eran 		req->port	= sidr_param->port;
15534c21b5bcSHaggai Eran 		req->has_gid	= false;
15544c21b5bcSHaggai Eran 		req->service_id	= sidr_param->service_id;
1555ab3964adSHaggai Eran 		req->pkey	= sidr_param->pkey;
155684424a7fSHaggai Eran 		if (req->pkey != sidr_param->bth_pkey)
155784424a7fSHaggai Eran 			pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
155884424a7fSHaggai Eran 					    "RDMA CMA: in the future this may cause the request to be dropped\n",
155984424a7fSHaggai Eran 					    sidr_param->bth_pkey, req->pkey);
15604c21b5bcSHaggai Eran 		break;
15614c21b5bcSHaggai Eran 	default:
15624c21b5bcSHaggai Eran 		return -EINVAL;
15634c21b5bcSHaggai Eran 	}
15644c21b5bcSHaggai Eran 
15654c21b5bcSHaggai Eran 	return 0;
15664c21b5bcSHaggai Eran }
15674c21b5bcSHaggai Eran 
validate_ipv4_net_dev(struct net_device * net_dev,const struct sockaddr_in * dst_addr,const struct sockaddr_in * src_addr)1568f887f2acSHaggai Eran static bool validate_ipv4_net_dev(struct net_device *net_dev,
1569f887f2acSHaggai Eran 				  const struct sockaddr_in *dst_addr,
1570f887f2acSHaggai Eran 				  const struct sockaddr_in *src_addr)
1571f887f2acSHaggai Eran {
1572f887f2acSHaggai Eran 	__be32 daddr = dst_addr->sin_addr.s_addr,
1573f887f2acSHaggai Eran 	       saddr = src_addr->sin_addr.s_addr;
1574f887f2acSHaggai Eran 	struct fib_result res;
1575f887f2acSHaggai Eran 	struct flowi4 fl4;
1576f887f2acSHaggai Eran 	int err;
1577f887f2acSHaggai Eran 	bool ret;
1578f887f2acSHaggai Eran 
1579f887f2acSHaggai Eran 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1580f887f2acSHaggai Eran 	    ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) ||
1581f887f2acSHaggai Eran 	    ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) ||
1582f887f2acSHaggai Eran 	    ipv4_is_loopback(saddr))
1583f887f2acSHaggai Eran 		return false;
1584f887f2acSHaggai Eran 
1585f887f2acSHaggai Eran 	memset(&fl4, 0, sizeof(fl4));
1586eb83f502SHåkon Bugge 	fl4.flowi4_oif = net_dev->ifindex;
1587f887f2acSHaggai Eran 	fl4.daddr = daddr;
1588f887f2acSHaggai Eran 	fl4.saddr = saddr;
1589f887f2acSHaggai Eran 
1590f887f2acSHaggai Eran 	rcu_read_lock();
1591f887f2acSHaggai Eran 	err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
1592d3632493SBart Van Assche 	ret = err == 0 && FIB_RES_DEV(res) == net_dev;
1593f887f2acSHaggai Eran 	rcu_read_unlock();
1594f887f2acSHaggai Eran 
1595f887f2acSHaggai Eran 	return ret;
1596f887f2acSHaggai Eran }
1597f887f2acSHaggai Eran 
validate_ipv6_net_dev(struct net_device * net_dev,const struct sockaddr_in6 * dst_addr,const struct sockaddr_in6 * src_addr)1598f887f2acSHaggai Eran static bool validate_ipv6_net_dev(struct net_device *net_dev,
1599f887f2acSHaggai Eran 				  const struct sockaddr_in6 *dst_addr,
1600f887f2acSHaggai Eran 				  const struct sockaddr_in6 *src_addr)
1601f887f2acSHaggai Eran {
1602f887f2acSHaggai Eran #if IS_ENABLED(CONFIG_IPV6)
1603f887f2acSHaggai Eran 	const int strict = ipv6_addr_type(&dst_addr->sin6_addr) &
1604f887f2acSHaggai Eran 			   IPV6_ADDR_LINKLOCAL;
1605f887f2acSHaggai Eran 	struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr,
1606f887f2acSHaggai Eran 					 &src_addr->sin6_addr, net_dev->ifindex,
1607b75cc8f9SDavid Ahern 					 NULL, strict);
1608f887f2acSHaggai Eran 	bool ret;
1609f887f2acSHaggai Eran 
1610f887f2acSHaggai Eran 	if (!rt)
1611f887f2acSHaggai Eran 		return false;
1612f887f2acSHaggai Eran 
1613f887f2acSHaggai Eran 	ret = rt->rt6i_idev->dev == net_dev;
1614f887f2acSHaggai Eran 	ip6_rt_put(rt);
1615f887f2acSHaggai Eran 
1616f887f2acSHaggai Eran 	return ret;
1617f887f2acSHaggai Eran #else
1618f887f2acSHaggai Eran 	return false;
1619f887f2acSHaggai Eran #endif
1620f887f2acSHaggai Eran }
1621f887f2acSHaggai Eran 
validate_net_dev(struct net_device * net_dev,const struct sockaddr * daddr,const struct sockaddr * saddr)1622f887f2acSHaggai Eran static bool validate_net_dev(struct net_device *net_dev,
1623f887f2acSHaggai Eran 			     const struct sockaddr *daddr,
1624f887f2acSHaggai Eran 			     const struct sockaddr *saddr)
1625f887f2acSHaggai Eran {
1626f887f2acSHaggai Eran 	const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr;
1627f887f2acSHaggai Eran 	const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr;
1628f887f2acSHaggai Eran 	const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
1629f887f2acSHaggai Eran 	const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr;
1630f887f2acSHaggai Eran 
1631f887f2acSHaggai Eran 	switch (daddr->sa_family) {
1632f887f2acSHaggai Eran 	case AF_INET:
1633f887f2acSHaggai Eran 		return saddr->sa_family == AF_INET &&
1634f887f2acSHaggai Eran 		       validate_ipv4_net_dev(net_dev, daddr4, saddr4);
1635f887f2acSHaggai Eran 
1636f887f2acSHaggai Eran 	case AF_INET6:
1637f887f2acSHaggai Eran 		return saddr->sa_family == AF_INET6 &&
1638f887f2acSHaggai Eran 		       validate_ipv6_net_dev(net_dev, daddr6, saddr6);
1639f887f2acSHaggai Eran 
1640f887f2acSHaggai Eran 	default:
1641f887f2acSHaggai Eran 		return false;
1642f887f2acSHaggai Eran 	}
1643f887f2acSHaggai Eran }
1644f887f2acSHaggai Eran 
1645cee10433SParav Pandit static struct net_device *
roce_get_net_dev_by_cm_event(const struct ib_cm_event * ib_event)1646cee10433SParav Pandit roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event)
1647cee10433SParav Pandit {
1648cee10433SParav Pandit 	const struct ib_gid_attr *sgid_attr = NULL;
1649adb4a57aSParav Pandit 	struct net_device *ndev;
1650cee10433SParav Pandit 
1651cee10433SParav Pandit 	if (ib_event->event == IB_CM_REQ_RECEIVED)
1652cee10433SParav Pandit 		sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr;
1653cee10433SParav Pandit 	else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
1654cee10433SParav Pandit 		sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr;
1655cee10433SParav Pandit 
1656cee10433SParav Pandit 	if (!sgid_attr)
1657cee10433SParav Pandit 		return NULL;
1658adb4a57aSParav Pandit 
1659adb4a57aSParav Pandit 	rcu_read_lock();
1660adb4a57aSParav Pandit 	ndev = rdma_read_gid_attr_ndev_rcu(sgid_attr);
1661adb4a57aSParav Pandit 	if (IS_ERR(ndev))
1662adb4a57aSParav Pandit 		ndev = NULL;
1663adb4a57aSParav Pandit 	else
1664adb4a57aSParav Pandit 		dev_hold(ndev);
1665adb4a57aSParav Pandit 	rcu_read_unlock();
1666adb4a57aSParav Pandit 	return ndev;
1667cee10433SParav Pandit }
1668cee10433SParav Pandit 
cma_get_net_dev(const struct ib_cm_event * ib_event,struct cma_req_info * req)1669e7ff98aeSParav Pandit static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event,
16702918c1a9SParav Pandit 					  struct cma_req_info *req)
16714c21b5bcSHaggai Eran {
16722918c1a9SParav Pandit 	struct sockaddr *listen_addr =
16732918c1a9SParav Pandit 			(struct sockaddr *)&req->listen_addr_storage;
16742918c1a9SParav Pandit 	struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage;
16754c21b5bcSHaggai Eran 	struct net_device *net_dev;
16764c21b5bcSHaggai Eran 	const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
16774c21b5bcSHaggai Eran 	int err;
16784c21b5bcSHaggai Eran 
1679f887f2acSHaggai Eran 	err = cma_save_ip_info(listen_addr, src_addr, ib_event,
1680f887f2acSHaggai Eran 			       req->service_id);
16814c21b5bcSHaggai Eran 	if (err)
16824c21b5bcSHaggai Eran 		return ERR_PTR(err);
16834c21b5bcSHaggai Eran 
1684cee10433SParav Pandit 	if (rdma_protocol_roce(req->device, req->port))
1685cee10433SParav Pandit 		net_dev = roce_get_net_dev_by_cm_event(ib_event);
1686cee10433SParav Pandit 	else
1687cee10433SParav Pandit 		net_dev = ib_get_net_dev_by_params(req->device, req->port,
1688cee10433SParav Pandit 						   req->pkey,
16894c21b5bcSHaggai Eran 						   gid, listen_addr);
16904c21b5bcSHaggai Eran 	if (!net_dev)
16914c21b5bcSHaggai Eran 		return ERR_PTR(-ENODEV);
16924c21b5bcSHaggai Eran 
16934c21b5bcSHaggai Eran 	return net_dev;
16944c21b5bcSHaggai Eran }
16954c21b5bcSHaggai Eran 
rdma_ps_from_service_id(__be64 service_id)16962253fc0cSSteve Wise static enum rdma_ucm_port_space rdma_ps_from_service_id(__be64 service_id)
16974c21b5bcSHaggai Eran {
16984c21b5bcSHaggai Eran 	return (be64_to_cpu(service_id) >> 16) & 0xffff;
16994c21b5bcSHaggai Eran }
17004c21b5bcSHaggai Eran 
cma_match_private_data(struct rdma_id_private * id_priv,const struct cma_hdr * hdr)17014c21b5bcSHaggai Eran static bool cma_match_private_data(struct rdma_id_private *id_priv,
17024c21b5bcSHaggai Eran 				   const struct cma_hdr *hdr)
17034c21b5bcSHaggai Eran {
17044c21b5bcSHaggai Eran 	struct sockaddr *addr = cma_src_addr(id_priv);
17054c21b5bcSHaggai Eran 	__be32 ip4_addr;
17064c21b5bcSHaggai Eran 	struct in6_addr ip6_addr;
17074c21b5bcSHaggai Eran 
17084c21b5bcSHaggai Eran 	if (cma_any_addr(addr) && !id_priv->afonly)
17094c21b5bcSHaggai Eran 		return true;
17104c21b5bcSHaggai Eran 
17114c21b5bcSHaggai Eran 	switch (addr->sa_family) {
17124c21b5bcSHaggai Eran 	case AF_INET:
17134c21b5bcSHaggai Eran 		ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
17144c21b5bcSHaggai Eran 		if (cma_get_ip_ver(hdr) != 4)
17154c21b5bcSHaggai Eran 			return false;
17164c21b5bcSHaggai Eran 		if (!cma_any_addr(addr) &&
17174c21b5bcSHaggai Eran 		    hdr->dst_addr.ip4.addr != ip4_addr)
17184c21b5bcSHaggai Eran 			return false;
17194c21b5bcSHaggai Eran 		break;
17204c21b5bcSHaggai Eran 	case AF_INET6:
17214c21b5bcSHaggai Eran 		ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr;
17224c21b5bcSHaggai Eran 		if (cma_get_ip_ver(hdr) != 6)
17234c21b5bcSHaggai Eran 			return false;
17244c21b5bcSHaggai Eran 		if (!cma_any_addr(addr) &&
17254c21b5bcSHaggai Eran 		    memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr)))
17264c21b5bcSHaggai Eran 			return false;
17274c21b5bcSHaggai Eran 		break;
17284c21b5bcSHaggai Eran 	case AF_IB:
17294c21b5bcSHaggai Eran 		return true;
17304c21b5bcSHaggai Eran 	default:
17314c21b5bcSHaggai Eran 		return false;
17324c21b5bcSHaggai Eran 	}
17334c21b5bcSHaggai Eran 
17344c21b5bcSHaggai Eran 	return true;
17354c21b5bcSHaggai Eran }
17364c21b5bcSHaggai Eran 
cma_protocol_roce(const struct rdma_cm_id * id)1737b8cab5daSHaggai Eran static bool cma_protocol_roce(const struct rdma_cm_id *id)
1738b8cab5daSHaggai Eran {
1739b8cab5daSHaggai Eran 	struct ib_device *device = id->device;
17401fb7f897SMark Bloch 	const u32 port_num = id->port_num ?: rdma_start_port(device);
1741b8cab5daSHaggai Eran 
17425ac08a34SParav Pandit 	return rdma_protocol_roce(device, port_num);
1743b8cab5daSHaggai Eran }
1744b8cab5daSHaggai Eran 
cma_is_req_ipv6_ll(const struct cma_req_info * req)174578fb282bSParav Pandit static bool cma_is_req_ipv6_ll(const struct cma_req_info *req)
174678fb282bSParav Pandit {
174778fb282bSParav Pandit 	const struct sockaddr *daddr =
174878fb282bSParav Pandit 			(const struct sockaddr *)&req->listen_addr_storage;
174978fb282bSParav Pandit 	const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
175078fb282bSParav Pandit 
175178fb282bSParav Pandit 	/* Returns true if the req is for IPv6 link local */
175278fb282bSParav Pandit 	return (daddr->sa_family == AF_INET6 &&
175378fb282bSParav Pandit 		(ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL));
175478fb282bSParav Pandit }
175578fb282bSParav Pandit 
cma_match_net_dev(const struct rdma_cm_id * id,const struct net_device * net_dev,const struct cma_req_info * req)1756fac51590SMatan Barak static bool cma_match_net_dev(const struct rdma_cm_id *id,
1757fac51590SMatan Barak 			      const struct net_device *net_dev,
175878fb282bSParav Pandit 			      const struct cma_req_info *req)
17594c21b5bcSHaggai Eran {
1760fac51590SMatan Barak 	const struct rdma_addr *addr = &id->route.addr;
17614c21b5bcSHaggai Eran 
17624c21b5bcSHaggai Eran 	if (!net_dev)
1763d274e45cSParav Pandit 		/* This request is an AF_IB request */
176478fb282bSParav Pandit 		return (!id->port_num || id->port_num == req->port) &&
1765d274e45cSParav Pandit 		       (addr->src_addr.ss_family == AF_IB);
17664c21b5bcSHaggai Eran 
1767643d213aSParav Pandit 	/*
176878fb282bSParav Pandit 	 * If the request is not for IPv6 link local, allow matching
176978fb282bSParav Pandit 	 * request to any netdevice of the one or multiport rdma device.
177078fb282bSParav Pandit 	 */
177178fb282bSParav Pandit 	if (!cma_is_req_ipv6_ll(req))
177278fb282bSParav Pandit 		return true;
177378fb282bSParav Pandit 	/*
1774643d213aSParav Pandit 	 * Net namespaces must match, and if the listner is listening
1775643d213aSParav Pandit 	 * on a specific netdevice than netdevice must match as well.
1776643d213aSParav Pandit 	 */
1777643d213aSParav Pandit 	if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1778643d213aSParav Pandit 	    (!!addr->dev_addr.bound_dev_if ==
1779643d213aSParav Pandit 	     (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
1780643d213aSParav Pandit 		return true;
1781643d213aSParav Pandit 	else
1782643d213aSParav Pandit 		return false;
17834c21b5bcSHaggai Eran }
17844c21b5bcSHaggai Eran 
cma_find_listener(const struct rdma_bind_list * bind_list,const struct ib_cm_id * cm_id,const struct ib_cm_event * ib_event,const struct cma_req_info * req,const struct net_device * net_dev)17854c21b5bcSHaggai Eran static struct rdma_id_private *cma_find_listener(
17864c21b5bcSHaggai Eran 		const struct rdma_bind_list *bind_list,
17874c21b5bcSHaggai Eran 		const struct ib_cm_id *cm_id,
17884c21b5bcSHaggai Eran 		const struct ib_cm_event *ib_event,
17894c21b5bcSHaggai Eran 		const struct cma_req_info *req,
17904c21b5bcSHaggai Eran 		const struct net_device *net_dev)
17914c21b5bcSHaggai Eran {
17924c21b5bcSHaggai Eran 	struct rdma_id_private *id_priv, *id_priv_dev;
17934c21b5bcSHaggai Eran 
1794730c8912SMark Zhang 	lockdep_assert_held(&lock);
1795730c8912SMark Zhang 
17964c21b5bcSHaggai Eran 	if (!bind_list)
17974c21b5bcSHaggai Eran 		return ERR_PTR(-EINVAL);
17984c21b5bcSHaggai Eran 
17994c21b5bcSHaggai Eran 	hlist_for_each_entry(id_priv, &bind_list->owners, node) {
18004c21b5bcSHaggai Eran 		if (cma_match_private_data(id_priv, ib_event->private_data)) {
18014c21b5bcSHaggai Eran 			if (id_priv->id.device == cm_id->device &&
180278fb282bSParav Pandit 			    cma_match_net_dev(&id_priv->id, net_dev, req))
18034c21b5bcSHaggai Eran 				return id_priv;
18044c21b5bcSHaggai Eran 			list_for_each_entry(id_priv_dev,
18054c21b5bcSHaggai Eran 					    &id_priv->listen_list,
180699cfddb8SJason Gunthorpe 					    listen_item) {
18074c21b5bcSHaggai Eran 				if (id_priv_dev->id.device == cm_id->device &&
180878fb282bSParav Pandit 				    cma_match_net_dev(&id_priv_dev->id,
180978fb282bSParav Pandit 						      net_dev, req))
18104c21b5bcSHaggai Eran 					return id_priv_dev;
18114c21b5bcSHaggai Eran 			}
18124c21b5bcSHaggai Eran 		}
18134c21b5bcSHaggai Eran 	}
18144c21b5bcSHaggai Eran 
18154c21b5bcSHaggai Eran 	return ERR_PTR(-EINVAL);
18164c21b5bcSHaggai Eran }
18174c21b5bcSHaggai Eran 
1818e7ff98aeSParav Pandit static struct rdma_id_private *
cma_ib_id_from_event(struct ib_cm_id * cm_id,const struct ib_cm_event * ib_event,struct cma_req_info * req,struct net_device ** net_dev)181985463316SParav Pandit cma_ib_id_from_event(struct ib_cm_id *cm_id,
1820e7ff98aeSParav Pandit 		     const struct ib_cm_event *ib_event,
182141ab1cb7SParav Pandit 		     struct cma_req_info *req,
18220b3ca768SHaggai Eran 		     struct net_device **net_dev)
18234c21b5bcSHaggai Eran {
18244c21b5bcSHaggai Eran 	struct rdma_bind_list *bind_list;
18254c21b5bcSHaggai Eran 	struct rdma_id_private *id_priv;
18264c21b5bcSHaggai Eran 	int err;
18274c21b5bcSHaggai Eran 
182841ab1cb7SParav Pandit 	err = cma_save_req_info(ib_event, req);
18294c21b5bcSHaggai Eran 	if (err)
18304c21b5bcSHaggai Eran 		return ERR_PTR(err);
18314c21b5bcSHaggai Eran 
183241ab1cb7SParav Pandit 	*net_dev = cma_get_net_dev(ib_event, req);
18330b3ca768SHaggai Eran 	if (IS_ERR(*net_dev)) {
18340b3ca768SHaggai Eran 		if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
18354c21b5bcSHaggai Eran 			/* Assuming the protocol is AF_IB */
18360b3ca768SHaggai Eran 			*net_dev = NULL;
18374c21b5bcSHaggai Eran 		} else {
18380b3ca768SHaggai Eran 			return ERR_CAST(*net_dev);
18394c21b5bcSHaggai Eran 		}
18404c21b5bcSHaggai Eran 	}
18414c21b5bcSHaggai Eran 
1842730c8912SMark Zhang 	mutex_lock(&lock);
18432918c1a9SParav Pandit 	/*
18442918c1a9SParav Pandit 	 * Net namespace might be getting deleted while route lookup,
18452918c1a9SParav Pandit 	 * cm_id lookup is in progress. Therefore, perform netdevice
18462918c1a9SParav Pandit 	 * validation, cm_id lookup under rcu lock.
18472918c1a9SParav Pandit 	 * RCU lock along with netdevice state check, synchronizes with
18482918c1a9SParav Pandit 	 * netdevice migrating to different net namespace and also avoids
18492918c1a9SParav Pandit 	 * case where net namespace doesn't get deleted while lookup is in
18502918c1a9SParav Pandit 	 * progress.
18512918c1a9SParav Pandit 	 * If the device state is not IFF_UP, its properties such as ifindex
18522918c1a9SParav Pandit 	 * and nd_net cannot be trusted to remain valid without rcu lock.
18532918c1a9SParav Pandit 	 * net/core/dev.c change_net_namespace() ensures to synchronize with
18542918c1a9SParav Pandit 	 * ongoing operations on net device after device is closed using
18552918c1a9SParav Pandit 	 * synchronize_net().
18562918c1a9SParav Pandit 	 */
18572918c1a9SParav Pandit 	rcu_read_lock();
18582918c1a9SParav Pandit 	if (*net_dev) {
18592918c1a9SParav Pandit 		/*
18602918c1a9SParav Pandit 		 * If netdevice is down, it is likely that it is administratively
18612918c1a9SParav Pandit 		 * down or it might be migrating to different namespace.
18622918c1a9SParav Pandit 		 * In that case avoid further processing, as the net namespace
18632918c1a9SParav Pandit 		 * or ifindex may change.
18642918c1a9SParav Pandit 		 */
18652918c1a9SParav Pandit 		if (((*net_dev)->flags & IFF_UP) == 0) {
18662918c1a9SParav Pandit 			id_priv = ERR_PTR(-EHOSTUNREACH);
18672918c1a9SParav Pandit 			goto err;
18682918c1a9SParav Pandit 		}
18692918c1a9SParav Pandit 
18702918c1a9SParav Pandit 		if (!validate_net_dev(*net_dev,
187127cfde79SMichael Guralnik 				 (struct sockaddr *)&req->src_addr_storage,
187227cfde79SMichael Guralnik 				 (struct sockaddr *)&req->listen_addr_storage)) {
18732918c1a9SParav Pandit 			id_priv = ERR_PTR(-EHOSTUNREACH);
18742918c1a9SParav Pandit 			goto err;
18752918c1a9SParav Pandit 		}
18762918c1a9SParav Pandit 	}
18772918c1a9SParav Pandit 
1878fa20105eSGuy Shapiro 	bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
187941ab1cb7SParav Pandit 				rdma_ps_from_service_id(req->service_id),
188041ab1cb7SParav Pandit 				cma_port_from_service_id(req->service_id));
188141ab1cb7SParav Pandit 	id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
18822918c1a9SParav Pandit err:
18832918c1a9SParav Pandit 	rcu_read_unlock();
1884730c8912SMark Zhang 	mutex_unlock(&lock);
1885b3b51f9fSHaggai Eran 	if (IS_ERR(id_priv) && *net_dev) {
1886be688195SHaggai Eran 		dev_put(*net_dev);
1887be688195SHaggai Eran 		*net_dev = NULL;
1888be688195SHaggai Eran 	}
18894c21b5bcSHaggai Eran 	return id_priv;
18904c21b5bcSHaggai Eran }
18914c21b5bcSHaggai Eran 
cma_user_data_offset(struct rdma_id_private * id_priv)1892c0b64f58SBart Van Assche static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
1893e51060f0SSean Hefty {
1894e8160e15SSean Hefty 	return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
1895e51060f0SSean Hefty }
1896e51060f0SSean Hefty 
cma_cancel_route(struct rdma_id_private * id_priv)1897e51060f0SSean Hefty static void cma_cancel_route(struct rdma_id_private *id_priv)
1898e51060f0SSean Hefty {
1899fe53ba2fSMichael Wang 	if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
1900e51060f0SSean Hefty 		if (id_priv->query)
1901e51060f0SSean Hefty 			ib_sa_cancel_query(id_priv->query_id, id_priv->query);
1902e51060f0SSean Hefty 	}
1903e51060f0SSean Hefty }
1904e51060f0SSean Hefty 
_cma_cancel_listens(struct rdma_id_private * id_priv)1905ca465e1fSTao Liu static void _cma_cancel_listens(struct rdma_id_private *id_priv)
1906e51060f0SSean Hefty {
1907e51060f0SSean Hefty 	struct rdma_id_private *dev_id_priv;
1908e51060f0SSean Hefty 
1909ca465e1fSTao Liu 	lockdep_assert_held(&lock);
1910ca465e1fSTao Liu 
1911d02d1f53SSean Hefty 	/*
1912d02d1f53SSean Hefty 	 * Remove from listen_any_list to prevent added devices from spawning
1913d02d1f53SSean Hefty 	 * additional listen requests.
1914d02d1f53SSean Hefty 	 */
191599cfddb8SJason Gunthorpe 	list_del_init(&id_priv->listen_any_item);
1916e51060f0SSean Hefty 
1917e51060f0SSean Hefty 	while (!list_empty(&id_priv->listen_list)) {
191899cfddb8SJason Gunthorpe 		dev_id_priv =
191999cfddb8SJason Gunthorpe 			list_first_entry(&id_priv->listen_list,
192099cfddb8SJason Gunthorpe 					 struct rdma_id_private, listen_item);
1921d02d1f53SSean Hefty 		/* sync with device removal to avoid duplicate destruction */
192299cfddb8SJason Gunthorpe 		list_del_init(&dev_id_priv->device_item);
192399cfddb8SJason Gunthorpe 		list_del_init(&dev_id_priv->listen_item);
1924d02d1f53SSean Hefty 		mutex_unlock(&lock);
1925d02d1f53SSean Hefty 
1926d02d1f53SSean Hefty 		rdma_destroy_id(&dev_id_priv->id);
1927d02d1f53SSean Hefty 		mutex_lock(&lock);
1928e51060f0SSean Hefty 	}
1929ca465e1fSTao Liu }
1930ca465e1fSTao Liu 
cma_cancel_listens(struct rdma_id_private * id_priv)1931ca465e1fSTao Liu static void cma_cancel_listens(struct rdma_id_private *id_priv)
1932ca465e1fSTao Liu {
1933ca465e1fSTao Liu 	mutex_lock(&lock);
1934ca465e1fSTao Liu 	_cma_cancel_listens(id_priv);
1935e51060f0SSean Hefty 	mutex_unlock(&lock);
1936e51060f0SSean Hefty }
1937e51060f0SSean Hefty 
cma_cancel_operation(struct rdma_id_private * id_priv,enum rdma_cm_state state)1938e51060f0SSean Hefty static void cma_cancel_operation(struct rdma_id_private *id_priv,
1939550e5ca7SNir Muchtar 				 enum rdma_cm_state state)
1940e51060f0SSean Hefty {
1941e51060f0SSean Hefty 	switch (state) {
1942550e5ca7SNir Muchtar 	case RDMA_CM_ADDR_QUERY:
1943305d568bSJason Gunthorpe 		/*
1944305d568bSJason Gunthorpe 		 * We can avoid doing the rdma_addr_cancel() based on state,
1945305d568bSJason Gunthorpe 		 * only RDMA_CM_ADDR_QUERY has a work that could still execute.
1946305d568bSJason Gunthorpe 		 * Notice that the addr_handler work could still be exiting
1947305d568bSJason Gunthorpe 		 * outside this state, however due to the interaction with the
1948305d568bSJason Gunthorpe 		 * handler_mutex the work is guaranteed not to touch id_priv
1949305d568bSJason Gunthorpe 		 * during exit.
1950305d568bSJason Gunthorpe 		 */
1951e51060f0SSean Hefty 		rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
1952e51060f0SSean Hefty 		break;
1953550e5ca7SNir Muchtar 	case RDMA_CM_ROUTE_QUERY:
1954e51060f0SSean Hefty 		cma_cancel_route(id_priv);
1955e51060f0SSean Hefty 		break;
1956550e5ca7SNir Muchtar 	case RDMA_CM_LISTEN:
1957f4753834SSean Hefty 		if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
1958e51060f0SSean Hefty 			cma_cancel_listens(id_priv);
1959e51060f0SSean Hefty 		break;
1960e51060f0SSean Hefty 	default:
1961e51060f0SSean Hefty 		break;
1962e51060f0SSean Hefty 	}
1963e51060f0SSean Hefty }
1964e51060f0SSean Hefty 
cma_release_port(struct rdma_id_private * id_priv)1965e51060f0SSean Hefty static void cma_release_port(struct rdma_id_private *id_priv)
1966e51060f0SSean Hefty {
1967e51060f0SSean Hefty 	struct rdma_bind_list *bind_list = id_priv->bind_list;
1968fa20105eSGuy Shapiro 	struct net *net = id_priv->id.route.addr.dev_addr.net;
1969e51060f0SSean Hefty 
1970e51060f0SSean Hefty 	if (!bind_list)
1971e51060f0SSean Hefty 		return;
1972e51060f0SSean Hefty 
1973e51060f0SSean Hefty 	mutex_lock(&lock);
1974e51060f0SSean Hefty 	hlist_del(&id_priv->node);
1975e51060f0SSean Hefty 	if (hlist_empty(&bind_list->owners)) {
1976fa20105eSGuy Shapiro 		cma_ps_remove(net, bind_list->ps, bind_list->port);
1977e51060f0SSean Hefty 		kfree(bind_list);
1978e51060f0SSean Hefty 	}
1979e51060f0SSean Hefty 	mutex_unlock(&lock);
1980e51060f0SSean Hefty }
1981e51060f0SSean Hefty 
destroy_mc(struct rdma_id_private * id_priv,struct cma_multicast * mc)19823788d299SJason Gunthorpe static void destroy_mc(struct rdma_id_private *id_priv,
198388145678SParav Pandit 		       struct cma_multicast *mc)
198488145678SParav Pandit {
19852cc74e1eSChristoph Lameter 	bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
19862cc74e1eSChristoph Lameter 
1987b5de0c60SJason Gunthorpe 	if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
1988b5de0c60SJason Gunthorpe 		ib_sa_free_multicast(mc->sa_mc);
19893788d299SJason Gunthorpe 
1990b5de0c60SJason Gunthorpe 	if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
19913788d299SJason Gunthorpe 		struct rdma_dev_addr *dev_addr =
19923788d299SJason Gunthorpe 			&id_priv->id.route.addr.dev_addr;
199388145678SParav Pandit 		struct net_device *ndev = NULL;
199488145678SParav Pandit 
199588145678SParav Pandit 		if (dev_addr->bound_dev_if)
19963788d299SJason Gunthorpe 			ndev = dev_get_by_index(dev_addr->net,
19973788d299SJason Gunthorpe 						dev_addr->bound_dev_if);
1998d9e410ebSMaor Gottlieb 		if (ndev && !send_only) {
1999d9e410ebSMaor Gottlieb 			enum ib_gid_type gid_type;
2000b5de0c60SJason Gunthorpe 			union ib_gid mgid;
2001b5de0c60SJason Gunthorpe 
2002d9e410ebSMaor Gottlieb 			gid_type = id_priv->cma_dev->default_gid_type
2003d9e410ebSMaor Gottlieb 					   [id_priv->id.port_num -
2004d9e410ebSMaor Gottlieb 					    rdma_start_port(
2005d9e410ebSMaor Gottlieb 						    id_priv->cma_dev->device)];
2006d9e410ebSMaor Gottlieb 			cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid,
2007d9e410ebSMaor Gottlieb 					  gid_type);
2008b5de0c60SJason Gunthorpe 			cma_igmp_send(ndev, &mgid, false);
200988145678SParav Pandit 		}
2010d9e410ebSMaor Gottlieb 		dev_put(ndev);
2011fe454dc3SAvihai Horon 
2012fe454dc3SAvihai Horon 		cancel_work_sync(&mc->iboe_join.work);
201388145678SParav Pandit 	}
2014b5de0c60SJason Gunthorpe 	kfree(mc);
201588145678SParav Pandit }
201688145678SParav Pandit 
cma_leave_mc_groups(struct rdma_id_private * id_priv)2017c8f6a362SSean Hefty static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
2018c8f6a362SSean Hefty {
2019c8f6a362SSean Hefty 	struct cma_multicast *mc;
2020c8f6a362SSean Hefty 
2021c8f6a362SSean Hefty 	while (!list_empty(&id_priv->mc_list)) {
20223788d299SJason Gunthorpe 		mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
20233788d299SJason Gunthorpe 				      list);
2024c8f6a362SSean Hefty 		list_del(&mc->list);
20253788d299SJason Gunthorpe 		destroy_mc(id_priv, mc);
2026c8f6a362SSean Hefty 	}
2027bee3c3c9SMoni Shoua }
2028c8f6a362SSean Hefty 
_destroy_id(struct rdma_id_private * id_priv,enum rdma_cm_state state)2029f6a9d47aSJason Gunthorpe static void _destroy_id(struct rdma_id_private *id_priv,
2030f6a9d47aSJason Gunthorpe 			enum rdma_cm_state state)
2031e51060f0SSean Hefty {
2032e51060f0SSean Hefty 	cma_cancel_operation(id_priv, state);
2033e51060f0SSean Hefty 
20343d828754SLeon Romanovsky 	rdma_restrack_del(&id_priv->res);
2035fc008bdbSPatrisious Haddad 	cma_remove_id_from_tree(id_priv);
2036ed7a01fdSLeon Romanovsky 	if (id_priv->cma_dev) {
203772219ceaSMichael Wang 		if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
20380c9361fcSJack Morgenstein 			if (id_priv->cm_id.ib)
2039e51060f0SSean Hefty 				ib_destroy_cm_id(id_priv->cm_id.ib);
204004215330SMichael Wang 		} else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
20410c9361fcSJack Morgenstein 			if (id_priv->cm_id.iw)
204207ebafbaSTom Tucker 				iw_destroy_cm_id(id_priv->cm_id.iw);
2043e51060f0SSean Hefty 		}
2044c8f6a362SSean Hefty 		cma_leave_mc_groups(id_priv);
2045a396d43aSSean Hefty 		cma_release_dev(id_priv);
2046e51060f0SSean Hefty 	}
2047e51060f0SSean Hefty 
2048e51060f0SSean Hefty 	cma_release_port(id_priv);
2049e368d23fSParav Pandit 	cma_id_put(id_priv);
2050e51060f0SSean Hefty 	wait_for_completion(&id_priv->comp);
2051e51060f0SSean Hefty 
2052d02d1f53SSean Hefty 	if (id_priv->internal_id)
2053e368d23fSParav Pandit 		cma_id_put(id_priv->id.context);
2054d02d1f53SSean Hefty 
2055e51060f0SSean Hefty 	kfree(id_priv->id.route.path_rec);
20565a374949SMark Zhang 	kfree(id_priv->id.route.path_rec_inbound);
20575a374949SMark Zhang 	kfree(id_priv->id.route.path_rec_outbound);
20584ed13a5fSParav Pandit 
2059fa20105eSGuy Shapiro 	put_net(id_priv->id.route.addr.dev_addr.net);
2060e51060f0SSean Hefty 	kfree(id_priv);
2061e51060f0SSean Hefty }
2062f6a9d47aSJason Gunthorpe 
2063f6a9d47aSJason Gunthorpe /*
2064f6a9d47aSJason Gunthorpe  * destroy an ID from within the handler_mutex. This ensures that no other
2065f6a9d47aSJason Gunthorpe  * handlers can start running concurrently.
2066f6a9d47aSJason Gunthorpe  */
destroy_id_handler_unlock(struct rdma_id_private * id_priv)2067f6a9d47aSJason Gunthorpe static void destroy_id_handler_unlock(struct rdma_id_private *id_priv)
2068f6a9d47aSJason Gunthorpe 	__releases(&idprv->handler_mutex)
2069f6a9d47aSJason Gunthorpe {
2070f6a9d47aSJason Gunthorpe 	enum rdma_cm_state state;
2071f6a9d47aSJason Gunthorpe 	unsigned long flags;
2072f6a9d47aSJason Gunthorpe 
2073f6a9d47aSJason Gunthorpe 	trace_cm_id_destroy(id_priv);
2074f6a9d47aSJason Gunthorpe 
2075f6a9d47aSJason Gunthorpe 	/*
2076f6a9d47aSJason Gunthorpe 	 * Setting the state to destroyed under the handler mutex provides a
2077f6a9d47aSJason Gunthorpe 	 * fence against calling handler callbacks. If this is invoked due to
2078f6a9d47aSJason Gunthorpe 	 * the failure of a handler callback then it guarentees that no future
2079f6a9d47aSJason Gunthorpe 	 * handlers will be called.
2080f6a9d47aSJason Gunthorpe 	 */
2081f6a9d47aSJason Gunthorpe 	lockdep_assert_held(&id_priv->handler_mutex);
2082f6a9d47aSJason Gunthorpe 	spin_lock_irqsave(&id_priv->lock, flags);
2083f6a9d47aSJason Gunthorpe 	state = id_priv->state;
2084f6a9d47aSJason Gunthorpe 	id_priv->state = RDMA_CM_DESTROYING;
2085f6a9d47aSJason Gunthorpe 	spin_unlock_irqrestore(&id_priv->lock, flags);
2086f6a9d47aSJason Gunthorpe 	mutex_unlock(&id_priv->handler_mutex);
2087f6a9d47aSJason Gunthorpe 	_destroy_id(id_priv, state);
2088f6a9d47aSJason Gunthorpe }
2089f6a9d47aSJason Gunthorpe 
rdma_destroy_id(struct rdma_cm_id * id)2090f6a9d47aSJason Gunthorpe void rdma_destroy_id(struct rdma_cm_id *id)
2091f6a9d47aSJason Gunthorpe {
2092f6a9d47aSJason Gunthorpe 	struct rdma_id_private *id_priv =
2093f6a9d47aSJason Gunthorpe 		container_of(id, struct rdma_id_private, id);
2094f6a9d47aSJason Gunthorpe 
2095f6a9d47aSJason Gunthorpe 	mutex_lock(&id_priv->handler_mutex);
2096f6a9d47aSJason Gunthorpe 	destroy_id_handler_unlock(id_priv);
2097f6a9d47aSJason Gunthorpe }
2098e51060f0SSean Hefty EXPORT_SYMBOL(rdma_destroy_id);
2099e51060f0SSean Hefty 
cma_rep_recv(struct rdma_id_private * id_priv)2100e51060f0SSean Hefty static int cma_rep_recv(struct rdma_id_private *id_priv)
2101e51060f0SSean Hefty {
2102e51060f0SSean Hefty 	int ret;
2103e51060f0SSean Hefty 
21045851bb89SSean Hefty 	ret = cma_modify_qp_rtr(id_priv, NULL);
2105e51060f0SSean Hefty 	if (ret)
2106e51060f0SSean Hefty 		goto reject;
2107e51060f0SSean Hefty 
21085851bb89SSean Hefty 	ret = cma_modify_qp_rts(id_priv, NULL);
2109e51060f0SSean Hefty 	if (ret)
2110e51060f0SSean Hefty 		goto reject;
2111e51060f0SSean Hefty 
2112ed999f82SChuck Lever 	trace_cm_send_rtu(id_priv);
2113e51060f0SSean Hefty 	ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
2114e51060f0SSean Hefty 	if (ret)
2115e51060f0SSean Hefty 		goto reject;
2116e51060f0SSean Hefty 
2117e51060f0SSean Hefty 	return 0;
2118e51060f0SSean Hefty reject:
2119498683c6SMoni Shoua 	pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret);
2120c5483388SSean Hefty 	cma_modify_qp_err(id_priv);
2121ed999f82SChuck Lever 	trace_cm_send_rej(id_priv);
2122e51060f0SSean Hefty 	ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
2123e51060f0SSean Hefty 		       NULL, 0, NULL, 0);
2124e51060f0SSean Hefty 	return ret;
2125e51060f0SSean Hefty }
2126e51060f0SSean Hefty 
cma_set_rep_event_data(struct rdma_cm_event * event,const struct ib_cm_rep_event_param * rep_data,void * private_data)2127a1b1b61fSSean Hefty static void cma_set_rep_event_data(struct rdma_cm_event *event,
2128e7ff98aeSParav Pandit 				   const struct ib_cm_rep_event_param *rep_data,
2129a1b1b61fSSean Hefty 				   void *private_data)
2130a1b1b61fSSean Hefty {
2131a1b1b61fSSean Hefty 	event->param.conn.private_data = private_data;
2132a1b1b61fSSean Hefty 	event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
2133a1b1b61fSSean Hefty 	event->param.conn.responder_resources = rep_data->responder_resources;
2134a1b1b61fSSean Hefty 	event->param.conn.initiator_depth = rep_data->initiator_depth;
2135a1b1b61fSSean Hefty 	event->param.conn.flow_control = rep_data->flow_control;
2136a1b1b61fSSean Hefty 	event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
2137a1b1b61fSSean Hefty 	event->param.conn.srq = rep_data->srq;
2138a1b1b61fSSean Hefty 	event->param.conn.qp_num = rep_data->remote_qpn;
2139a20652e1SLeon Romanovsky 
2140a20652e1SLeon Romanovsky 	event->ece.vendor_id = rep_data->ece.vendor_id;
2141a20652e1SLeon Romanovsky 	event->ece.attr_mod = rep_data->ece.attr_mod;
2142a1b1b61fSSean Hefty }
2143a1b1b61fSSean Hefty 
cma_cm_event_handler(struct rdma_id_private * id_priv,struct rdma_cm_event * event)2144ed999f82SChuck Lever static int cma_cm_event_handler(struct rdma_id_private *id_priv,
2145ed999f82SChuck Lever 				struct rdma_cm_event *event)
2146ed999f82SChuck Lever {
2147ed999f82SChuck Lever 	int ret;
2148ed999f82SChuck Lever 
21493647a28dSJason Gunthorpe 	lockdep_assert_held(&id_priv->handler_mutex);
21503647a28dSJason Gunthorpe 
2151ed999f82SChuck Lever 	trace_cm_event_handler(id_priv, event);
2152ed999f82SChuck Lever 	ret = id_priv->id.event_handler(&id_priv->id, event);
2153ed999f82SChuck Lever 	trace_cm_event_done(id_priv, event, ret);
2154ed999f82SChuck Lever 	return ret;
2155ed999f82SChuck Lever }
2156ed999f82SChuck Lever 
cma_ib_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * ib_event)2157e7ff98aeSParav Pandit static int cma_ib_handler(struct ib_cm_id *cm_id,
2158e7ff98aeSParav Pandit 			  const struct ib_cm_event *ib_event)
2159e51060f0SSean Hefty {
2160e51060f0SSean Hefty 	struct rdma_id_private *id_priv = cm_id->context;
21617582df82SParav Pandit 	struct rdma_cm_event event = {};
21622a7cec53SJason Gunthorpe 	enum rdma_cm_state state;
2163f6a9d47aSJason Gunthorpe 	int ret;
2164e51060f0SSean Hefty 
216537e07cdaSBart Van Assche 	mutex_lock(&id_priv->handler_mutex);
21662a7cec53SJason Gunthorpe 	state = READ_ONCE(id_priv->state);
216738ca83a5SAmir Vadai 	if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
21682a7cec53SJason Gunthorpe 	     state != RDMA_CM_CONNECT) ||
216938ca83a5SAmir Vadai 	    (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
21702a7cec53SJason Gunthorpe 	     state != RDMA_CM_DISCONNECT))
217137e07cdaSBart Van Assche 		goto out;
2172e51060f0SSean Hefty 
2173e51060f0SSean Hefty 	switch (ib_event->event) {
2174e51060f0SSean Hefty 	case IB_CM_REQ_ERROR:
2175e51060f0SSean Hefty 	case IB_CM_REP_ERROR:
2176a1b1b61fSSean Hefty 		event.event = RDMA_CM_EVENT_UNREACHABLE;
2177a1b1b61fSSean Hefty 		event.status = -ETIMEDOUT;
2178e51060f0SSean Hefty 		break;
2179e51060f0SSean Hefty 	case IB_CM_REP_RECEIVED:
21802a7cec53SJason Gunthorpe 		if (state == RDMA_CM_CONNECT &&
2181ed999f82SChuck Lever 		    (id_priv->id.qp_type != IB_QPT_UD)) {
2182ed999f82SChuck Lever 			trace_cm_send_mra(id_priv);
218361c0ddbeSMoni Shoua 			ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
2184ed999f82SChuck Lever 		}
218501602f11SSean Hefty 		if (id_priv->id.qp) {
2186a1b1b61fSSean Hefty 			event.status = cma_rep_recv(id_priv);
2187a1b1b61fSSean Hefty 			event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
2188e51060f0SSean Hefty 						     RDMA_CM_EVENT_ESTABLISHED;
218901602f11SSean Hefty 		} else {
2190a1b1b61fSSean Hefty 			event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
219101602f11SSean Hefty 		}
2192a1b1b61fSSean Hefty 		cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
2193a1b1b61fSSean Hefty 				       ib_event->private_data);
2194e51060f0SSean Hefty 		break;
2195e51060f0SSean Hefty 	case IB_CM_RTU_RECEIVED:
21960fe313b0SSean Hefty 	case IB_CM_USER_ESTABLISHED:
21970fe313b0SSean Hefty 		event.event = RDMA_CM_EVENT_ESTABLISHED;
2198e51060f0SSean Hefty 		break;
2199e51060f0SSean Hefty 	case IB_CM_DREQ_ERROR:
2200df561f66SGustavo A. R. Silva 		event.status = -ETIMEDOUT;
2201df561f66SGustavo A. R. Silva 		fallthrough;
2202e51060f0SSean Hefty 	case IB_CM_DREQ_RECEIVED:
2203e51060f0SSean Hefty 	case IB_CM_DREP_RECEIVED:
2204550e5ca7SNir Muchtar 		if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
2205550e5ca7SNir Muchtar 				   RDMA_CM_DISCONNECT))
2206e51060f0SSean Hefty 			goto out;
2207a1b1b61fSSean Hefty 		event.event = RDMA_CM_EVENT_DISCONNECTED;
2208e51060f0SSean Hefty 		break;
2209e51060f0SSean Hefty 	case IB_CM_TIMEWAIT_EXIT:
221038ca83a5SAmir Vadai 		event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
221138ca83a5SAmir Vadai 		break;
2212e51060f0SSean Hefty 	case IB_CM_MRA_RECEIVED:
2213e51060f0SSean Hefty 		/* ignore event */
2214e51060f0SSean Hefty 		goto out;
2215e51060f0SSean Hefty 	case IB_CM_REJ_RECEIVED:
2216498683c6SMoni Shoua 		pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id,
2217498683c6SMoni Shoua 										ib_event->param.rej_rcvd.reason));
2218c5483388SSean Hefty 		cma_modify_qp_err(id_priv);
2219a1b1b61fSSean Hefty 		event.status = ib_event->param.rej_rcvd.reason;
2220a1b1b61fSSean Hefty 		event.event = RDMA_CM_EVENT_REJECTED;
2221a1b1b61fSSean Hefty 		event.param.conn.private_data = ib_event->private_data;
2222a1b1b61fSSean Hefty 		event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
2223e51060f0SSean Hefty 		break;
2224e51060f0SSean Hefty 	default:
2225aba25a3eSParav Pandit 		pr_err("RDMA CMA: unexpected IB CM event: %d\n",
2226e51060f0SSean Hefty 		       ib_event->event);
2227e51060f0SSean Hefty 		goto out;
2228e51060f0SSean Hefty 	}
2229e51060f0SSean Hefty 
2230ed999f82SChuck Lever 	ret = cma_cm_event_handler(id_priv, &event);
2231e51060f0SSean Hefty 	if (ret) {
2232e51060f0SSean Hefty 		/* Destroy the CM ID by returning a non-zero value. */
2233e51060f0SSean Hefty 		id_priv->cm_id.ib = NULL;
2234f6a9d47aSJason Gunthorpe 		destroy_id_handler_unlock(id_priv);
2235e51060f0SSean Hefty 		return ret;
2236e51060f0SSean Hefty 	}
2237e51060f0SSean Hefty out:
2238de910bd9SOr Gerlitz 	mutex_unlock(&id_priv->handler_mutex);
2239f6a9d47aSJason Gunthorpe 	return 0;
2240e51060f0SSean Hefty }
2241e51060f0SSean Hefty 
2242e7ff98aeSParav Pandit static struct rdma_id_private *
cma_ib_new_conn_id(const struct rdma_cm_id * listen_id,const struct ib_cm_event * ib_event,struct net_device * net_dev)224385463316SParav Pandit cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
2244e7ff98aeSParav Pandit 		   const struct ib_cm_event *ib_event,
22450b3ca768SHaggai Eran 		   struct net_device *net_dev)
2246e51060f0SSean Hefty {
224700313983SSteve Wise 	struct rdma_id_private *listen_id_priv;
2248e51060f0SSean Hefty 	struct rdma_id_private *id_priv;
2249e51060f0SSean Hefty 	struct rdma_cm_id *id;
2250e51060f0SSean Hefty 	struct rdma_route *rt;
22510c505f70SHaggai Eran 	const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
22529fdca4daSDasaratharaman Chandramouli 	struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path;
2253d3957b86SMajd Dibbiny 	const __be64 service_id =
2254d3957b86SMajd Dibbiny 		ib_event->param.req_rcvd.primary_path->service_id;
225564c5e613SOr Gerlitz 	int ret;
2256e51060f0SSean Hefty 
225700313983SSteve Wise 	listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
2258b09c4d70SLeon Romanovsky 	id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net,
2259fa20105eSGuy Shapiro 				   listen_id->event_handler, listen_id->context,
2260b09c4d70SLeon Romanovsky 				   listen_id->ps,
2261b09c4d70SLeon Romanovsky 				   ib_event->param.req_rcvd.qp_type,
2262b09c4d70SLeon Romanovsky 				   listen_id_priv);
2263b09c4d70SLeon Romanovsky 	if (IS_ERR(id_priv))
22640c9361fcSJack Morgenstein 		return NULL;
22653f168d2bSKrishna Kumar 
2266b09c4d70SLeon Romanovsky 	id = &id_priv->id;
22670c505f70SHaggai Eran 	if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
22680c505f70SHaggai Eran 			      (struct sockaddr *)&id->route.addr.dst_addr,
22690c505f70SHaggai Eran 			      listen_id, ib_event, ss_family, service_id))
2270fbaa1a6dSSean Hefty 		goto err;
22713f168d2bSKrishna Kumar 
22723f168d2bSKrishna Kumar 	rt = &id->route;
2273bf9a9928SMark Zhang 	rt->num_pri_alt_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
2274bf9a9928SMark Zhang 	rt->path_rec = kmalloc_array(rt->num_pri_alt_paths,
2275bf9a9928SMark Zhang 				     sizeof(*rt->path_rec), GFP_KERNEL);
22763f168d2bSKrishna Kumar 	if (!rt->path_rec)
22770c9361fcSJack Morgenstein 		goto err;
22783f168d2bSKrishna Kumar 
22799fdca4daSDasaratharaman Chandramouli 	rt->path_rec[0] = *path;
2280bf9a9928SMark Zhang 	if (rt->num_pri_alt_paths == 2)
2281e51060f0SSean Hefty 		rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
2282e51060f0SSean Hefty 
22830b3ca768SHaggai Eran 	if (net_dev) {
228477addc52SParav Pandit 		rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev);
22850b3ca768SHaggai Eran 	} else {
2286b8cab5daSHaggai Eran 		if (!cma_protocol_roce(listen_id) &&
2287b8cab5daSHaggai Eran 		    cma_any_addr(cma_src_addr(id_priv))) {
2288b8cab5daSHaggai Eran 			rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
2289b8cab5daSHaggai Eran 			rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
2290b8cab5daSHaggai Eran 			ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
2291b8cab5daSHaggai Eran 		} else if (!cma_any_addr(cma_src_addr(id_priv))) {
2292b8cab5daSHaggai Eran 			ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
2293b8cab5daSHaggai Eran 			if (ret)
2294b8cab5daSHaggai Eran 				goto err;
2295b8cab5daSHaggai Eran 		}
22966f8372b6SSean Hefty 	}
22976f8372b6SSean Hefty 	rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
2298e51060f0SSean Hefty 
2299550e5ca7SNir Muchtar 	id_priv->state = RDMA_CM_CONNECT;
2300e51060f0SSean Hefty 	return id_priv;
23013f168d2bSKrishna Kumar 
23023f168d2bSKrishna Kumar err:
23030c9361fcSJack Morgenstein 	rdma_destroy_id(id);
2304e51060f0SSean Hefty 	return NULL;
2305e51060f0SSean Hefty }
2306e51060f0SSean Hefty 
2307e7ff98aeSParav Pandit static struct rdma_id_private *
cma_ib_new_udp_id(const struct rdma_cm_id * listen_id,const struct ib_cm_event * ib_event,struct net_device * net_dev)230885463316SParav Pandit cma_ib_new_udp_id(const struct rdma_cm_id *listen_id,
2309e7ff98aeSParav Pandit 		  const struct ib_cm_event *ib_event,
23100b3ca768SHaggai Eran 		  struct net_device *net_dev)
2311628e5f6dSSean Hefty {
2312e7ff98aeSParav Pandit 	const struct rdma_id_private *listen_id_priv;
2313628e5f6dSSean Hefty 	struct rdma_id_private *id_priv;
2314628e5f6dSSean Hefty 	struct rdma_cm_id *id;
23150c505f70SHaggai Eran 	const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
2316fa20105eSGuy Shapiro 	struct net *net = listen_id->route.addr.dev_addr.net;
2317628e5f6dSSean Hefty 	int ret;
2318628e5f6dSSean Hefty 
231900313983SSteve Wise 	listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
2320b09c4d70SLeon Romanovsky 	id_priv = __rdma_create_id(net, listen_id->event_handler,
2321b09c4d70SLeon Romanovsky 				   listen_id->context, listen_id->ps, IB_QPT_UD,
2322b09c4d70SLeon Romanovsky 				   listen_id_priv);
2323b09c4d70SLeon Romanovsky 	if (IS_ERR(id_priv))
2324628e5f6dSSean Hefty 		return NULL;
2325628e5f6dSSean Hefty 
2326b09c4d70SLeon Romanovsky 	id = &id_priv->id;
23270c505f70SHaggai Eran 	if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
23280c505f70SHaggai Eran 			      (struct sockaddr *)&id->route.addr.dst_addr,
23290c505f70SHaggai Eran 			      listen_id, ib_event, ss_family,
23300c505f70SHaggai Eran 			      ib_event->param.sidr_req_rcvd.service_id))
2331628e5f6dSSean Hefty 		goto err;
2332628e5f6dSSean Hefty 
23330b3ca768SHaggai Eran 	if (net_dev) {
233477addc52SParav Pandit 		rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev);
23350b3ca768SHaggai Eran 	} else {
2336b8cab5daSHaggai Eran 		if (!cma_any_addr(cma_src_addr(id_priv))) {
2337b8cab5daSHaggai Eran 			ret = cma_translate_addr(cma_src_addr(id_priv),
23380b3ca768SHaggai Eran 						 &id->route.addr.dev_addr);
2339b8cab5daSHaggai Eran 			if (ret)
2340b8cab5daSHaggai Eran 				goto err;
2341b8cab5daSHaggai Eran 		}
23426f8372b6SSean Hefty 	}
2343628e5f6dSSean Hefty 
2344550e5ca7SNir Muchtar 	id_priv->state = RDMA_CM_CONNECT;
2345628e5f6dSSean Hefty 	return id_priv;
2346628e5f6dSSean Hefty err:
2347628e5f6dSSean Hefty 	rdma_destroy_id(id);
2348628e5f6dSSean Hefty 	return NULL;
2349628e5f6dSSean Hefty }
2350628e5f6dSSean Hefty 
cma_set_req_event_data(struct rdma_cm_event * event,const struct ib_cm_req_event_param * req_data,void * private_data,int offset)2351a1b1b61fSSean Hefty static void cma_set_req_event_data(struct rdma_cm_event *event,
2352e7ff98aeSParav Pandit 				   const struct ib_cm_req_event_param *req_data,
2353a1b1b61fSSean Hefty 				   void *private_data, int offset)
2354a1b1b61fSSean Hefty {
2355a1b1b61fSSean Hefty 	event->param.conn.private_data = private_data + offset;
2356a1b1b61fSSean Hefty 	event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
2357a1b1b61fSSean Hefty 	event->param.conn.responder_resources = req_data->responder_resources;
2358a1b1b61fSSean Hefty 	event->param.conn.initiator_depth = req_data->initiator_depth;
2359a1b1b61fSSean Hefty 	event->param.conn.flow_control = req_data->flow_control;
2360a1b1b61fSSean Hefty 	event->param.conn.retry_count = req_data->retry_count;
2361a1b1b61fSSean Hefty 	event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
2362a1b1b61fSSean Hefty 	event->param.conn.srq = req_data->srq;
2363a1b1b61fSSean Hefty 	event->param.conn.qp_num = req_data->remote_qpn;
2364a20652e1SLeon Romanovsky 
2365a20652e1SLeon Romanovsky 	event->ece.vendor_id = req_data->ece.vendor_id;
2366a20652e1SLeon Romanovsky 	event->ece.attr_mod = req_data->ece.attr_mod;
2367a1b1b61fSSean Hefty }
2368a1b1b61fSSean Hefty 
cma_ib_check_req_qp_type(const struct rdma_cm_id * id,const struct ib_cm_event * ib_event)236985463316SParav Pandit static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id,
2370e7ff98aeSParav Pandit 				    const struct ib_cm_event *ib_event)
23719595480cSHefty, Sean {
23724dd81e89SSean Hefty 	return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
23739595480cSHefty, Sean 		 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
23749595480cSHefty, Sean 		((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
23759595480cSHefty, Sean 		 (id->qp_type == IB_QPT_UD)) ||
23769595480cSHefty, Sean 		(!id->qp_type));
23779595480cSHefty, Sean }
23789595480cSHefty, Sean 
cma_ib_req_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * ib_event)237985463316SParav Pandit static int cma_ib_req_handler(struct ib_cm_id *cm_id,
2380e7ff98aeSParav Pandit 			      const struct ib_cm_event *ib_event)
2381e51060f0SSean Hefty {
238237e07cdaSBart Van Assche 	struct rdma_id_private *listen_id, *conn_id = NULL;
23837582df82SParav Pandit 	struct rdma_cm_event event = {};
238441ab1cb7SParav Pandit 	struct cma_req_info req = {};
23850b3ca768SHaggai Eran 	struct net_device *net_dev;
2386c0b64f58SBart Van Assche 	u8 offset;
2387c0b64f58SBart Van Assche 	int ret;
2388e51060f0SSean Hefty 
238941ab1cb7SParav Pandit 	listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev);
23904c21b5bcSHaggai Eran 	if (IS_ERR(listen_id))
23914c21b5bcSHaggai Eran 		return PTR_ERR(listen_id);
23924c21b5bcSHaggai Eran 
2393ed999f82SChuck Lever 	trace_cm_req_handler(listen_id, ib_event->event);
239485463316SParav Pandit 	if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) {
23950b3ca768SHaggai Eran 		ret = -EINVAL;
23960b3ca768SHaggai Eran 		goto net_dev_put;
23970b3ca768SHaggai Eran 	}
23989595480cSHefty, Sean 
239937e07cdaSBart Van Assche 	mutex_lock(&listen_id->handler_mutex);
2400d490ee52SJason Gunthorpe 	if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) {
24010b3ca768SHaggai Eran 		ret = -ECONNABORTED;
2402f6a9d47aSJason Gunthorpe 		goto err_unlock;
24030b3ca768SHaggai Eran 	}
2404e51060f0SSean Hefty 
2405e8160e15SSean Hefty 	offset = cma_user_data_offset(listen_id);
2406628e5f6dSSean Hefty 	event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
24079595480cSHefty, Sean 	if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
240885463316SParav Pandit 		conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev);
2409628e5f6dSSean Hefty 		event.param.ud.private_data = ib_event->private_data + offset;
2410628e5f6dSSean Hefty 		event.param.ud.private_data_len =
2411628e5f6dSSean Hefty 				IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
2412628e5f6dSSean Hefty 	} else {
241385463316SParav Pandit 		conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev);
2414628e5f6dSSean Hefty 		cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
2415628e5f6dSSean Hefty 				       ib_event->private_data, offset);
2416628e5f6dSSean Hefty 	}
2417e51060f0SSean Hefty 	if (!conn_id) {
2418e51060f0SSean Hefty 		ret = -ENOMEM;
2419f6a9d47aSJason Gunthorpe 		goto err_unlock;
2420e51060f0SSean Hefty 	}
2421e51060f0SSean Hefty 
2422de910bd9SOr Gerlitz 	mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
242341ab1cb7SParav Pandit 	ret = cma_ib_acquire_dev(conn_id, listen_id, &req);
2424f6a9d47aSJason Gunthorpe 	if (ret) {
2425f6a9d47aSJason Gunthorpe 		destroy_id_handler_unlock(conn_id);
2426f6a9d47aSJason Gunthorpe 		goto err_unlock;
2427f6a9d47aSJason Gunthorpe 	}
2428e51060f0SSean Hefty 
2429e51060f0SSean Hefty 	conn_id->cm_id.ib = cm_id;
2430e51060f0SSean Hefty 	cm_id->context = conn_id;
2431e51060f0SSean Hefty 	cm_id->cm_handler = cma_ib_handler;
2432e51060f0SSean Hefty 
2433ed999f82SChuck Lever 	ret = cma_cm_event_handler(conn_id, &event);
2434f6a9d47aSJason Gunthorpe 	if (ret) {
2435f6a9d47aSJason Gunthorpe 		/* Destroy the CM ID by returning a non-zero value. */
2436f6a9d47aSJason Gunthorpe 		conn_id->cm_id.ib = NULL;
2437f6a9d47aSJason Gunthorpe 		mutex_unlock(&listen_id->handler_mutex);
2438f6a9d47aSJason Gunthorpe 		destroy_id_handler_unlock(conn_id);
2439f6a9d47aSJason Gunthorpe 		goto net_dev_put;
2440f6a9d47aSJason Gunthorpe 	}
2441f6a9d47aSJason Gunthorpe 
24422a7cec53SJason Gunthorpe 	if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT &&
24432a7cec53SJason Gunthorpe 	    conn_id->id.qp_type != IB_QPT_UD) {
2444ed999f82SChuck Lever 		trace_cm_send_mra(cm_id->context);
2445ead595aeSSean Hefty 		ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
2446ed999f82SChuck Lever 	}
2447de910bd9SOr Gerlitz 	mutex_unlock(&conn_id->handler_mutex);
2448a1a733f6SKrishna Kumar 
2449f6a9d47aSJason Gunthorpe err_unlock:
2450de910bd9SOr Gerlitz 	mutex_unlock(&listen_id->handler_mutex);
24510b3ca768SHaggai Eran 
24520b3ca768SHaggai Eran net_dev_put:
24530b3ca768SHaggai Eran 	dev_put(net_dev);
24540b3ca768SHaggai Eran 
2455e51060f0SSean Hefty 	return ret;
2456e51060f0SSean Hefty }
2457e51060f0SSean Hefty 
rdma_get_service_id(struct rdma_cm_id * id,struct sockaddr * addr)2458cf53936fSSean Hefty __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
2459e51060f0SSean Hefty {
2460496ce3ceSSean Hefty 	if (addr->sa_family == AF_IB)
2461496ce3ceSSean Hefty 		return ((struct sockaddr_ib *) addr)->sib_sid;
2462496ce3ceSSean Hefty 
2463cf53936fSSean Hefty 	return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr)));
2464e51060f0SSean Hefty }
2465cf53936fSSean Hefty EXPORT_SYMBOL(rdma_get_service_id);
2466e51060f0SSean Hefty 
rdma_read_gids(struct rdma_cm_id * cm_id,union ib_gid * sgid,union ib_gid * dgid)2467411460acSParav Pandit void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
2468411460acSParav Pandit 		    union ib_gid *dgid)
2469411460acSParav Pandit {
2470411460acSParav Pandit 	struct rdma_addr *addr = &cm_id->route.addr;
2471411460acSParav Pandit 
2472411460acSParav Pandit 	if (!cm_id->device) {
2473411460acSParav Pandit 		if (sgid)
2474411460acSParav Pandit 			memset(sgid, 0, sizeof(*sgid));
2475411460acSParav Pandit 		if (dgid)
2476411460acSParav Pandit 			memset(dgid, 0, sizeof(*dgid));
2477411460acSParav Pandit 		return;
2478411460acSParav Pandit 	}
2479411460acSParav Pandit 
2480411460acSParav Pandit 	if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) {
2481411460acSParav Pandit 		if (sgid)
2482411460acSParav Pandit 			rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid);
2483411460acSParav Pandit 		if (dgid)
2484411460acSParav Pandit 			rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid);
2485411460acSParav Pandit 	} else {
2486411460acSParav Pandit 		if (sgid)
2487411460acSParav Pandit 			rdma_addr_get_sgid(&addr->dev_addr, sgid);
2488411460acSParav Pandit 		if (dgid)
2489411460acSParav Pandit 			rdma_addr_get_dgid(&addr->dev_addr, dgid);
2490411460acSParav Pandit 	}
2491411460acSParav Pandit }
2492411460acSParav Pandit EXPORT_SYMBOL(rdma_read_gids);
2493411460acSParav Pandit 
cma_iw_handler(struct iw_cm_id * iw_id,struct iw_cm_event * iw_event)249407ebafbaSTom Tucker static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
249507ebafbaSTom Tucker {
249607ebafbaSTom Tucker 	struct rdma_id_private *id_priv = iw_id->context;
24977582df82SParav Pandit 	struct rdma_cm_event event = {};
249807ebafbaSTom Tucker 	int ret = 0;
249924d44a39SSteve Wise 	struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
250024d44a39SSteve Wise 	struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
250107ebafbaSTom Tucker 
250237e07cdaSBart Van Assche 	mutex_lock(&id_priv->handler_mutex);
25032a7cec53SJason Gunthorpe 	if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
250437e07cdaSBart Van Assche 		goto out;
250507ebafbaSTom Tucker 
250607ebafbaSTom Tucker 	switch (iw_event->event) {
250707ebafbaSTom Tucker 	case IW_CM_EVENT_CLOSE:
2508a1b1b61fSSean Hefty 		event.event = RDMA_CM_EVENT_DISCONNECTED;
250907ebafbaSTom Tucker 		break;
251007ebafbaSTom Tucker 	case IW_CM_EVENT_CONNECT_REPLY:
251124d44a39SSteve Wise 		memcpy(cma_src_addr(id_priv), laddr,
251224d44a39SSteve Wise 		       rdma_addr_size(laddr));
251324d44a39SSteve Wise 		memcpy(cma_dst_addr(id_priv), raddr,
251424d44a39SSteve Wise 		       rdma_addr_size(raddr));
2515881a045fSSteve Wise 		switch (iw_event->status) {
2516881a045fSSteve Wise 		case 0:
2517a1b1b61fSSean Hefty 			event.event = RDMA_CM_EVENT_ESTABLISHED;
25183ebeebc3SKumar Sanghvi 			event.param.conn.initiator_depth = iw_event->ird;
25193ebeebc3SKumar Sanghvi 			event.param.conn.responder_resources = iw_event->ord;
252007ebafbaSTom Tucker 			break;
2521881a045fSSteve Wise 		case -ECONNRESET:
2522881a045fSSteve Wise 		case -ECONNREFUSED:
2523881a045fSSteve Wise 			event.event = RDMA_CM_EVENT_REJECTED;
2524881a045fSSteve Wise 			break;
2525881a045fSSteve Wise 		case -ETIMEDOUT:
2526881a045fSSteve Wise 			event.event = RDMA_CM_EVENT_UNREACHABLE;
2527881a045fSSteve Wise 			break;
2528881a045fSSteve Wise 		default:
2529881a045fSSteve Wise 			event.event = RDMA_CM_EVENT_CONNECT_ERROR;
2530881a045fSSteve Wise 			break;
2531881a045fSSteve Wise 		}
2532881a045fSSteve Wise 		break;
253307ebafbaSTom Tucker 	case IW_CM_EVENT_ESTABLISHED:
2534a1b1b61fSSean Hefty 		event.event = RDMA_CM_EVENT_ESTABLISHED;
25353ebeebc3SKumar Sanghvi 		event.param.conn.initiator_depth = iw_event->ird;
25363ebeebc3SKumar Sanghvi 		event.param.conn.responder_resources = iw_event->ord;
253707ebafbaSTom Tucker 		break;
253807ebafbaSTom Tucker 	default:
2539671a6cc2SLeon Romanovsky 		goto out;
254007ebafbaSTom Tucker 	}
254107ebafbaSTom Tucker 
2542a1b1b61fSSean Hefty 	event.status = iw_event->status;
2543a1b1b61fSSean Hefty 	event.param.conn.private_data = iw_event->private_data;
2544a1b1b61fSSean Hefty 	event.param.conn.private_data_len = iw_event->private_data_len;
2545ed999f82SChuck Lever 	ret = cma_cm_event_handler(id_priv, &event);
254607ebafbaSTom Tucker 	if (ret) {
254707ebafbaSTom Tucker 		/* Destroy the CM ID by returning a non-zero value. */
254807ebafbaSTom Tucker 		id_priv->cm_id.iw = NULL;
2549f6a9d47aSJason Gunthorpe 		destroy_id_handler_unlock(id_priv);
255007ebafbaSTom Tucker 		return ret;
255107ebafbaSTom Tucker 	}
255207ebafbaSTom Tucker 
255337e07cdaSBart Van Assche out:
2554de910bd9SOr Gerlitz 	mutex_unlock(&id_priv->handler_mutex);
255507ebafbaSTom Tucker 	return ret;
255607ebafbaSTom Tucker }
255707ebafbaSTom Tucker 
iw_conn_req_handler(struct iw_cm_id * cm_id,struct iw_cm_event * iw_event)255807ebafbaSTom Tucker static int iw_conn_req_handler(struct iw_cm_id *cm_id,
255907ebafbaSTom Tucker 			       struct iw_cm_event *iw_event)
256007ebafbaSTom Tucker {
256107ebafbaSTom Tucker 	struct rdma_id_private *listen_id, *conn_id;
25627582df82SParav Pandit 	struct rdma_cm_event event = {};
256337e07cdaSBart Van Assche 	int ret = -ECONNABORTED;
256424d44a39SSteve Wise 	struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
256524d44a39SSteve Wise 	struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
256607ebafbaSTom Tucker 
25677582df82SParav Pandit 	event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
25687582df82SParav Pandit 	event.param.conn.private_data = iw_event->private_data;
25697582df82SParav Pandit 	event.param.conn.private_data_len = iw_event->private_data_len;
25707582df82SParav Pandit 	event.param.conn.initiator_depth = iw_event->ird;
25717582df82SParav Pandit 	event.param.conn.responder_resources = iw_event->ord;
25727582df82SParav Pandit 
257307ebafbaSTom Tucker 	listen_id = cm_id->context;
257437e07cdaSBart Van Assche 
257537e07cdaSBart Van Assche 	mutex_lock(&listen_id->handler_mutex);
2576d490ee52SJason Gunthorpe 	if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN)
257737e07cdaSBart Van Assche 		goto out;
257807ebafbaSTom Tucker 
257907ebafbaSTom Tucker 	/* Create a new RDMA id for the new IW CM ID */
2580b09c4d70SLeon Romanovsky 	conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net,
2581fa20105eSGuy Shapiro 				   listen_id->id.event_handler,
2582b09c4d70SLeon Romanovsky 				   listen_id->id.context, RDMA_PS_TCP,
2583b09c4d70SLeon Romanovsky 				   IB_QPT_RC, listen_id);
2584b09c4d70SLeon Romanovsky 	if (IS_ERR(conn_id)) {
258507ebafbaSTom Tucker 		ret = -ENOMEM;
258607ebafbaSTom Tucker 		goto out;
258707ebafbaSTom Tucker 	}
2588de910bd9SOr Gerlitz 	mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
2589550e5ca7SNir Muchtar 	conn_id->state = RDMA_CM_CONNECT;
259007ebafbaSTom Tucker 
2591575c7e58SParav Pandit 	ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
259207ebafbaSTom Tucker 	if (ret) {
2593f6a9d47aSJason Gunthorpe 		mutex_unlock(&listen_id->handler_mutex);
2594f6a9d47aSJason Gunthorpe 		destroy_id_handler_unlock(conn_id);
2595f6a9d47aSJason Gunthorpe 		return ret;
259607ebafbaSTom Tucker 	}
259707ebafbaSTom Tucker 
259841ab1cb7SParav Pandit 	ret = cma_iw_acquire_dev(conn_id, listen_id);
259907ebafbaSTom Tucker 	if (ret) {
2600f6a9d47aSJason Gunthorpe 		mutex_unlock(&listen_id->handler_mutex);
2601f6a9d47aSJason Gunthorpe 		destroy_id_handler_unlock(conn_id);
2602f6a9d47aSJason Gunthorpe 		return ret;
260307ebafbaSTom Tucker 	}
260407ebafbaSTom Tucker 
260507ebafbaSTom Tucker 	conn_id->cm_id.iw = cm_id;
260607ebafbaSTom Tucker 	cm_id->context = conn_id;
260707ebafbaSTom Tucker 	cm_id->cm_handler = cma_iw_handler;
260807ebafbaSTom Tucker 
260924d44a39SSteve Wise 	memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
261024d44a39SSteve Wise 	memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
261107ebafbaSTom Tucker 
2612ed999f82SChuck Lever 	ret = cma_cm_event_handler(conn_id, &event);
261307ebafbaSTom Tucker 	if (ret) {
261407ebafbaSTom Tucker 		/* User wants to destroy the CM ID */
261507ebafbaSTom Tucker 		conn_id->cm_id.iw = NULL;
2616b66f31efSBart Van Assche 		mutex_unlock(&listen_id->handler_mutex);
2617f6a9d47aSJason Gunthorpe 		destroy_id_handler_unlock(conn_id);
2618b66f31efSBart Van Assche 		return ret;
261907ebafbaSTom Tucker 	}
262007ebafbaSTom Tucker 
2621de910bd9SOr Gerlitz 	mutex_unlock(&conn_id->handler_mutex);
2622de910bd9SOr Gerlitz 
262307ebafbaSTom Tucker out:
2624de910bd9SOr Gerlitz 	mutex_unlock(&listen_id->handler_mutex);
262507ebafbaSTom Tucker 	return ret;
262607ebafbaSTom Tucker }
262707ebafbaSTom Tucker 
cma_ib_listen(struct rdma_id_private * id_priv)2628e51060f0SSean Hefty static int cma_ib_listen(struct rdma_id_private *id_priv)
2629e51060f0SSean Hefty {
2630e51060f0SSean Hefty 	struct sockaddr *addr;
26310c9361fcSJack Morgenstein 	struct ib_cm_id	*id;
2632e51060f0SSean Hefty 	__be64 svc_id;
2633e51060f0SSean Hefty 
2634f4753834SSean Hefty 	addr = cma_src_addr(id_priv);
2635cf53936fSSean Hefty 	svc_id = rdma_get_service_id(&id_priv->id, addr);
263685463316SParav Pandit 	id = ib_cm_insert_listen(id_priv->id.device,
263785463316SParav Pandit 				 cma_ib_req_handler, svc_id);
263851efe394SHaggai Eran 	if (IS_ERR(id))
263951efe394SHaggai Eran 		return PTR_ERR(id);
264051efe394SHaggai Eran 	id_priv->cm_id.ib = id;
2641e51060f0SSean Hefty 
264251efe394SHaggai Eran 	return 0;
2643e51060f0SSean Hefty }
2644e51060f0SSean Hefty 
cma_iw_listen(struct rdma_id_private * id_priv,int backlog)264507ebafbaSTom Tucker static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
264607ebafbaSTom Tucker {
264707ebafbaSTom Tucker 	int ret;
26480c9361fcSJack Morgenstein 	struct iw_cm_id	*id;
264907ebafbaSTom Tucker 
26500c9361fcSJack Morgenstein 	id = iw_create_cm_id(id_priv->id.device,
265107ebafbaSTom Tucker 			     iw_conn_req_handler,
265207ebafbaSTom Tucker 			     id_priv);
26530c9361fcSJack Morgenstein 	if (IS_ERR(id))
26540c9361fcSJack Morgenstein 		return PTR_ERR(id);
26550c9361fcSJack Morgenstein 
2656ca0c448dSHåkon Bugge 	mutex_lock(&id_priv->qp_mutex);
265768cdba06SSteve Wise 	id->tos = id_priv->tos;
2658926ba19bSSteve Wise 	id->tos_set = id_priv->tos_set;
2659ca0c448dSHåkon Bugge 	mutex_unlock(&id_priv->qp_mutex);
2660e35ecb46SBernard Metzler 	id->afonly = id_priv->afonly;
26610c9361fcSJack Morgenstein 	id_priv->cm_id.iw = id;
266207ebafbaSTom Tucker 
266324d44a39SSteve Wise 	memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
266424d44a39SSteve Wise 	       rdma_addr_size(cma_src_addr(id_priv)));
266507ebafbaSTom Tucker 
266607ebafbaSTom Tucker 	ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
266707ebafbaSTom Tucker 
266807ebafbaSTom Tucker 	if (ret) {
266907ebafbaSTom Tucker 		iw_destroy_cm_id(id_priv->cm_id.iw);
267007ebafbaSTom Tucker 		id_priv->cm_id.iw = NULL;
267107ebafbaSTom Tucker 	}
267207ebafbaSTom Tucker 
267307ebafbaSTom Tucker 	return ret;
267407ebafbaSTom Tucker }
267507ebafbaSTom Tucker 
cma_listen_handler(struct rdma_cm_id * id,struct rdma_cm_event * event)2676e51060f0SSean Hefty static int cma_listen_handler(struct rdma_cm_id *id,
2677e51060f0SSean Hefty 			      struct rdma_cm_event *event)
2678e51060f0SSean Hefty {
2679e51060f0SSean Hefty 	struct rdma_id_private *id_priv = id->context;
2680e51060f0SSean Hefty 
2681d54f23c0SJason Gunthorpe 	/* Listening IDs are always destroyed on removal */
2682d54f23c0SJason Gunthorpe 	if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
2683d54f23c0SJason Gunthorpe 		return -1;
2684d54f23c0SJason Gunthorpe 
2685e51060f0SSean Hefty 	id->context = id_priv->id.context;
2686e51060f0SSean Hefty 	id->event_handler = id_priv->id.event_handler;
2687ed999f82SChuck Lever 	trace_cm_event_handler(id_priv, event);
2688e51060f0SSean Hefty 	return id_priv->id.event_handler(id, event);
2689e51060f0SSean Hefty }
2690e51060f0SSean Hefty 
cma_listen_on_dev(struct rdma_id_private * id_priv,struct cma_device * cma_dev,struct rdma_id_private ** to_destroy)2691c80a0c52SLeon Romanovsky static int cma_listen_on_dev(struct rdma_id_private *id_priv,
2692dd37d2f5SJason Gunthorpe 			     struct cma_device *cma_dev,
2693dd37d2f5SJason Gunthorpe 			     struct rdma_id_private **to_destroy)
2694e51060f0SSean Hefty {
2695e51060f0SSean Hefty 	struct rdma_id_private *dev_id_priv;
2696fa20105eSGuy Shapiro 	struct net *net = id_priv->id.route.addr.dev_addr.net;
2697e51060f0SSean Hefty 	int ret;
2698e51060f0SSean Hefty 
2699730c8912SMark Zhang 	lockdep_assert_held(&lock);
2700730c8912SMark Zhang 
2701dd37d2f5SJason Gunthorpe 	*to_destroy = NULL;
270272219ceaSMichael Wang 	if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
2703c80a0c52SLeon Romanovsky 		return 0;
270494d0c939SSean Hefty 
2705b09c4d70SLeon Romanovsky 	dev_id_priv =
2706b09c4d70SLeon Romanovsky 		__rdma_create_id(net, cma_listen_handler, id_priv,
2707b09c4d70SLeon Romanovsky 				 id_priv->id.ps, id_priv->id.qp_type, id_priv);
2708b09c4d70SLeon Romanovsky 	if (IS_ERR(dev_id_priv))
2709c80a0c52SLeon Romanovsky 		return PTR_ERR(dev_id_priv);
2710e51060f0SSean Hefty 
2711550e5ca7SNir Muchtar 	dev_id_priv->state = RDMA_CM_ADDR_BOUND;
2712f4753834SSean Hefty 	memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
2713f4753834SSean Hefty 	       rdma_addr_size(cma_src_addr(id_priv)));
2714e51060f0SSean Hefty 
2715045959dbSMatan Barak 	_cma_attach_to_dev(dev_id_priv, cma_dev);
2716cb5cd0eaSShay Drory 	rdma_restrack_add(&dev_id_priv->res);
2717e368d23fSParav Pandit 	cma_id_get(id_priv);
2718d02d1f53SSean Hefty 	dev_id_priv->internal_id = 1;
27195b0ec991SSean Hefty 	dev_id_priv->afonly = id_priv->afonly;
2720ca0c448dSHåkon Bugge 	mutex_lock(&id_priv->qp_mutex);
27219491128fSSteve Wise 	dev_id_priv->tos_set = id_priv->tos_set;
27229491128fSSteve Wise 	dev_id_priv->tos = id_priv->tos;
2723ca0c448dSHåkon Bugge 	mutex_unlock(&id_priv->qp_mutex);
2724e51060f0SSean Hefty 
2725b09c4d70SLeon Romanovsky 	ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
2726e51060f0SSean Hefty 	if (ret)
2727c80a0c52SLeon Romanovsky 		goto err_listen;
272899cfddb8SJason Gunthorpe 	list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list);
2729c80a0c52SLeon Romanovsky 	return 0;
2730c80a0c52SLeon Romanovsky err_listen:
2731dd37d2f5SJason Gunthorpe 	/* Caller must destroy this after releasing lock */
2732dd37d2f5SJason Gunthorpe 	*to_destroy = dev_id_priv;
2733c80a0c52SLeon Romanovsky 	dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret);
2734c80a0c52SLeon Romanovsky 	return ret;
2735e51060f0SSean Hefty }
2736e51060f0SSean Hefty 
cma_listen_on_all(struct rdma_id_private * id_priv)2737c80a0c52SLeon Romanovsky static int cma_listen_on_all(struct rdma_id_private *id_priv)
2738e51060f0SSean Hefty {
2739dd37d2f5SJason Gunthorpe 	struct rdma_id_private *to_destroy;
2740e51060f0SSean Hefty 	struct cma_device *cma_dev;
2741c80a0c52SLeon Romanovsky 	int ret;
2742e51060f0SSean Hefty 
2743e51060f0SSean Hefty 	mutex_lock(&lock);
274499cfddb8SJason Gunthorpe 	list_add_tail(&id_priv->listen_any_item, &listen_any_list);
2745c80a0c52SLeon Romanovsky 	list_for_each_entry(cma_dev, &dev_list, list) {
2746dd37d2f5SJason Gunthorpe 		ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
2747dd37d2f5SJason Gunthorpe 		if (ret) {
2748dd37d2f5SJason Gunthorpe 			/* Prevent racing with cma_process_remove() */
2749dd37d2f5SJason Gunthorpe 			if (to_destroy)
275099cfddb8SJason Gunthorpe 				list_del_init(&to_destroy->device_item);
2751c80a0c52SLeon Romanovsky 			goto err_listen;
2752c80a0c52SLeon Romanovsky 		}
2753dd37d2f5SJason Gunthorpe 	}
2754e51060f0SSean Hefty 	mutex_unlock(&lock);
2755c80a0c52SLeon Romanovsky 	return 0;
2756c80a0c52SLeon Romanovsky 
2757c80a0c52SLeon Romanovsky err_listen:
2758ca465e1fSTao Liu 	_cma_cancel_listens(id_priv);
2759c80a0c52SLeon Romanovsky 	mutex_unlock(&lock);
2760dd37d2f5SJason Gunthorpe 	if (to_destroy)
2761dd37d2f5SJason Gunthorpe 		rdma_destroy_id(&to_destroy->id);
2762c80a0c52SLeon Romanovsky 	return ret;
2763e51060f0SSean Hefty }
2764e51060f0SSean Hefty 
rdma_set_service_type(struct rdma_cm_id * id,int tos)2765a81c994dSSean Hefty void rdma_set_service_type(struct rdma_cm_id *id, int tos)
2766a81c994dSSean Hefty {
2767a81c994dSSean Hefty 	struct rdma_id_private *id_priv;
2768a81c994dSSean Hefty 
2769a81c994dSSean Hefty 	id_priv = container_of(id, struct rdma_id_private, id);
2770ca0c448dSHåkon Bugge 	mutex_lock(&id_priv->qp_mutex);
2771a81c994dSSean Hefty 	id_priv->tos = (u8) tos;
277289052d78SMajd Dibbiny 	id_priv->tos_set = true;
2773ca0c448dSHåkon Bugge 	mutex_unlock(&id_priv->qp_mutex);
2774a81c994dSSean Hefty }
2775a81c994dSSean Hefty EXPORT_SYMBOL(rdma_set_service_type);
2776a81c994dSSean Hefty 
27772c1619edSDanit Goldberg /**
27782c1619edSDanit Goldberg  * rdma_set_ack_timeout() - Set the ack timeout of QP associated
27792c1619edSDanit Goldberg  *                          with a connection identifier.
27802c1619edSDanit Goldberg  * @id: Communication identifier to associated with service type.
27812c1619edSDanit Goldberg  * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec.
27822c1619edSDanit Goldberg  *
27832c1619edSDanit Goldberg  * This function should be called before rdma_connect() on active side,
27842c1619edSDanit Goldberg  * and on passive side before rdma_accept(). It is applicable to primary
27852c1619edSDanit Goldberg  * path only. The timeout will affect the local side of the QP, it is not
2786e1ee1e62SDag Moxnes  * negotiated with remote side and zero disables the timer. In case it is
2787e1ee1e62SDag Moxnes  * set before rdma_resolve_route, the value will also be used to determine
2788e1ee1e62SDag Moxnes  * PacketLifeTime for RoCE.
27892c1619edSDanit Goldberg  *
27902c1619edSDanit Goldberg  * Return: 0 for success
27912c1619edSDanit Goldberg  */
rdma_set_ack_timeout(struct rdma_cm_id * id,u8 timeout)27922c1619edSDanit Goldberg int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
27932c1619edSDanit Goldberg {
27942c1619edSDanit Goldberg 	struct rdma_id_private *id_priv;
27952c1619edSDanit Goldberg 
2796748663c8SHåkon Bugge 	if (id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_INI)
27972c1619edSDanit Goldberg 		return -EINVAL;
27982c1619edSDanit Goldberg 
27992c1619edSDanit Goldberg 	id_priv = container_of(id, struct rdma_id_private, id);
2800ca0c448dSHåkon Bugge 	mutex_lock(&id_priv->qp_mutex);
28012c1619edSDanit Goldberg 	id_priv->timeout = timeout;
28022c1619edSDanit Goldberg 	id_priv->timeout_set = true;
2803ca0c448dSHåkon Bugge 	mutex_unlock(&id_priv->qp_mutex);
28042c1619edSDanit Goldberg 
28052c1619edSDanit Goldberg 	return 0;
28062c1619edSDanit Goldberg }
28072c1619edSDanit Goldberg EXPORT_SYMBOL(rdma_set_ack_timeout);
28082c1619edSDanit Goldberg 
28093aeffc46SHåkon Bugge /**
28103aeffc46SHåkon Bugge  * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the
28113aeffc46SHåkon Bugge  *			      QP associated with a connection identifier.
28123aeffc46SHåkon Bugge  * @id: Communication identifier to associated with service type.
28133aeffc46SHåkon Bugge  * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK
28143aeffc46SHåkon Bugge  *		   Timer Field" in the IBTA specification.
28153aeffc46SHåkon Bugge  *
28163aeffc46SHåkon Bugge  * This function should be called before rdma_connect() on active
28173aeffc46SHåkon Bugge  * side, and on passive side before rdma_accept(). The timer value
28183aeffc46SHåkon Bugge  * will be associated with the local QP. When it receives a send it is
28193aeffc46SHåkon Bugge  * not read to handle, typically if the receive queue is empty, an RNR
28203aeffc46SHåkon Bugge  * Retry NAK is returned to the requester with the min_rnr_timer
28213aeffc46SHåkon Bugge  * encoded. The requester will then wait at least the time specified
28223aeffc46SHåkon Bugge  * in the NAK before retrying. The default is zero, which translates
28233aeffc46SHåkon Bugge  * to a minimum RNR Timer value of 655 ms.
28243aeffc46SHåkon Bugge  *
28253aeffc46SHåkon Bugge  * Return: 0 for success
28263aeffc46SHåkon Bugge  */
rdma_set_min_rnr_timer(struct rdma_cm_id * id,u8 min_rnr_timer)28273aeffc46SHåkon Bugge int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
28283aeffc46SHåkon Bugge {
28293aeffc46SHåkon Bugge 	struct rdma_id_private *id_priv;
28303aeffc46SHåkon Bugge 
28313aeffc46SHåkon Bugge 	/* It is a five-bit value */
28323aeffc46SHåkon Bugge 	if (min_rnr_timer & 0xe0)
28333aeffc46SHåkon Bugge 		return -EINVAL;
28343aeffc46SHåkon Bugge 
28353aeffc46SHåkon Bugge 	if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT))
28363aeffc46SHåkon Bugge 		return -EINVAL;
28373aeffc46SHåkon Bugge 
28383aeffc46SHåkon Bugge 	id_priv = container_of(id, struct rdma_id_private, id);
2839ca0c448dSHåkon Bugge 	mutex_lock(&id_priv->qp_mutex);
28403aeffc46SHåkon Bugge 	id_priv->min_rnr_timer = min_rnr_timer;
28413aeffc46SHåkon Bugge 	id_priv->min_rnr_timer_set = true;
2842ca0c448dSHåkon Bugge 	mutex_unlock(&id_priv->qp_mutex);
28433aeffc46SHåkon Bugge 
28443aeffc46SHåkon Bugge 	return 0;
28453aeffc46SHåkon Bugge }
28463aeffc46SHåkon Bugge EXPORT_SYMBOL(rdma_set_min_rnr_timer);
28473aeffc46SHåkon Bugge 
route_set_path_rec_inbound(struct cma_work * work,struct sa_path_rec * path_rec)2848ccae0447SMark Zhang static int route_set_path_rec_inbound(struct cma_work *work,
28495a374949SMark Zhang 				      struct sa_path_rec *path_rec)
28505a374949SMark Zhang {
28515a374949SMark Zhang 	struct rdma_route *route = &work->id->id.route;
28525a374949SMark Zhang 
28535a374949SMark Zhang 	if (!route->path_rec_inbound) {
28545a374949SMark Zhang 		route->path_rec_inbound =
28555a374949SMark Zhang 			kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL);
28565a374949SMark Zhang 		if (!route->path_rec_inbound)
2857ccae0447SMark Zhang 			return -ENOMEM;
28585a374949SMark Zhang 	}
28595a374949SMark Zhang 
28605a374949SMark Zhang 	*route->path_rec_inbound = *path_rec;
2861ccae0447SMark Zhang 	return 0;
28625a374949SMark Zhang }
28635a374949SMark Zhang 
route_set_path_rec_outbound(struct cma_work * work,struct sa_path_rec * path_rec)2864ccae0447SMark Zhang static int route_set_path_rec_outbound(struct cma_work *work,
28655a374949SMark Zhang 				       struct sa_path_rec *path_rec)
28665a374949SMark Zhang {
28675a374949SMark Zhang 	struct rdma_route *route = &work->id->id.route;
28685a374949SMark Zhang 
28695a374949SMark Zhang 	if (!route->path_rec_outbound) {
28705a374949SMark Zhang 		route->path_rec_outbound =
28715a374949SMark Zhang 			kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL);
28725a374949SMark Zhang 		if (!route->path_rec_outbound)
2873ccae0447SMark Zhang 			return -ENOMEM;
28745a374949SMark Zhang 	}
28755a374949SMark Zhang 
28765a374949SMark Zhang 	*route->path_rec_outbound = *path_rec;
2877ccae0447SMark Zhang 	return 0;
28785a374949SMark Zhang }
28795a374949SMark Zhang 
cma_query_handler(int status,struct sa_path_rec * path_rec,unsigned int num_prs,void * context)2880c2f8fc4eSDasaratharaman Chandramouli static void cma_query_handler(int status, struct sa_path_rec *path_rec,
2881ccae0447SMark Zhang 			      unsigned int num_prs, void *context)
2882e51060f0SSean Hefty {
2883e51060f0SSean Hefty 	struct cma_work *work = context;
2884e51060f0SSean Hefty 	struct rdma_route *route;
28855a374949SMark Zhang 	int i;
2886e51060f0SSean Hefty 
2887e51060f0SSean Hefty 	route = &work->id->id.route;
2888e51060f0SSean Hefty 
28895a374949SMark Zhang 	if (status)
28905a374949SMark Zhang 		goto fail;
28915a374949SMark Zhang 
28925a374949SMark Zhang 	for (i = 0; i < num_prs; i++) {
28935a374949SMark Zhang 		if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP))
28945a374949SMark Zhang 			*route->path_rec = path_rec[i];
28955a374949SMark Zhang 		else if (path_rec[i].flags & IB_PATH_INBOUND)
2896ccae0447SMark Zhang 			status = route_set_path_rec_inbound(work, &path_rec[i]);
28975a374949SMark Zhang 		else if (path_rec[i].flags & IB_PATH_OUTBOUND)
2898ccae0447SMark Zhang 			status = route_set_path_rec_outbound(work,
2899ccae0447SMark Zhang 							     &path_rec[i]);
2900ccae0447SMark Zhang 		else
29015a374949SMark Zhang 			status = -EINVAL;
2902ccae0447SMark Zhang 
2903ccae0447SMark Zhang 		if (status)
29045a374949SMark Zhang 			goto fail;
29055a374949SMark Zhang 	}
29065a374949SMark Zhang 
2907bf9a9928SMark Zhang 	route->num_pri_alt_paths = 1;
29085a374949SMark Zhang 	queue_work(cma_wq, &work->work);
29095a374949SMark Zhang 	return;
29105a374949SMark Zhang 
29115a374949SMark Zhang fail:
2912550e5ca7SNir Muchtar 	work->old_state = RDMA_CM_ROUTE_QUERY;
2913550e5ca7SNir Muchtar 	work->new_state = RDMA_CM_ADDR_RESOLVED;
2914e51060f0SSean Hefty 	work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
29158f0472d3SSean Hefty 	work->event.status = status;
2916498683c6SMoni Shoua 	pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
2917498683c6SMoni Shoua 			     status);
2918e51060f0SSean Hefty 	queue_work(cma_wq, &work->work);
2919e51060f0SSean Hefty }
2920e51060f0SSean Hefty 
cma_query_ib_route(struct rdma_id_private * id_priv,unsigned long timeout_ms,struct cma_work * work)2921dbace111SLeon Romanovsky static int cma_query_ib_route(struct rdma_id_private *id_priv,
2922dbace111SLeon Romanovsky 			      unsigned long timeout_ms, struct cma_work *work)
2923e51060f0SSean Hefty {
2924f4753834SSean Hefty 	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2925c2f8fc4eSDasaratharaman Chandramouli 	struct sa_path_rec path_rec;
2926a81c994dSSean Hefty 	ib_sa_comp_mask comp_mask;
2927a81c994dSSean Hefty 	struct sockaddr_in6 *sin6;
2928f68194caSSean Hefty 	struct sockaddr_ib *sib;
2929e51060f0SSean Hefty 
2930e51060f0SSean Hefty 	memset(&path_rec, 0, sizeof path_rec);
29314c33bd19SDasaratharaman Chandramouli 
29324c33bd19SDasaratharaman Chandramouli 	if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num))
29334c33bd19SDasaratharaman Chandramouli 		path_rec.rec_type = SA_PATH_REC_TYPE_OPA;
29344c33bd19SDasaratharaman Chandramouli 	else
29359fdca4daSDasaratharaman Chandramouli 		path_rec.rec_type = SA_PATH_REC_TYPE_IB;
2936f4753834SSean Hefty 	rdma_addr_get_sgid(dev_addr, &path_rec.sgid);
2937f4753834SSean Hefty 	rdma_addr_get_dgid(dev_addr, &path_rec.dgid);
2938f4753834SSean Hefty 	path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2939e51060f0SSean Hefty 	path_rec.numb_path = 1;
2940962063e6SSean Hefty 	path_rec.reversible = 1;
2941d3957b86SMajd Dibbiny 	path_rec.service_id = rdma_get_service_id(&id_priv->id,
2942d3957b86SMajd Dibbiny 						  cma_dst_addr(id_priv));
2943a81c994dSSean Hefty 
2944a81c994dSSean Hefty 	comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
2945a81c994dSSean Hefty 		    IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
2946a81c994dSSean Hefty 		    IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
2947a81c994dSSean Hefty 
2948f68194caSSean Hefty 	switch (cma_family(id_priv)) {
2949f68194caSSean Hefty 	case AF_INET:
2950a81c994dSSean Hefty 		path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
2951a81c994dSSean Hefty 		comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
2952f68194caSSean Hefty 		break;
2953f68194caSSean Hefty 	case AF_INET6:
2954f4753834SSean Hefty 		sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
2955a81c994dSSean Hefty 		path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
2956a81c994dSSean Hefty 		comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
2957f68194caSSean Hefty 		break;
2958f68194caSSean Hefty 	case AF_IB:
2959f68194caSSean Hefty 		sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
2960f68194caSSean Hefty 		path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20);
2961f68194caSSean Hefty 		comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
2962f68194caSSean Hefty 		break;
2963a81c994dSSean Hefty 	}
2964e51060f0SSean Hefty 
2965c1a0b23bSMichael S. Tsirkin 	id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
2966e51060f0SSean Hefty 					       id_priv->id.port_num, &path_rec,
2967a81c994dSSean Hefty 					       comp_mask, timeout_ms,
2968a81c994dSSean Hefty 					       GFP_KERNEL, cma_query_handler,
2969a81c994dSSean Hefty 					       work, &id_priv->query);
2970e51060f0SSean Hefty 
2971e51060f0SSean Hefty 	return (id_priv->query_id < 0) ? id_priv->query_id : 0;
2972e51060f0SSean Hefty }
2973e51060f0SSean Hefty 
cma_iboe_join_work_handler(struct work_struct * work)2974fe454dc3SAvihai Horon static void cma_iboe_join_work_handler(struct work_struct *work)
2975fe454dc3SAvihai Horon {
2976fe454dc3SAvihai Horon 	struct cma_multicast *mc =
2977fe454dc3SAvihai Horon 		container_of(work, struct cma_multicast, iboe_join.work);
2978fe454dc3SAvihai Horon 	struct rdma_cm_event *event = &mc->iboe_join.event;
2979fe454dc3SAvihai Horon 	struct rdma_id_private *id_priv = mc->id_priv;
2980fe454dc3SAvihai Horon 	int ret;
2981fe454dc3SAvihai Horon 
2982fe454dc3SAvihai Horon 	mutex_lock(&id_priv->handler_mutex);
2983fe454dc3SAvihai Horon 	if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
2984fe454dc3SAvihai Horon 	    READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
2985fe454dc3SAvihai Horon 		goto out_unlock;
2986fe454dc3SAvihai Horon 
2987fe454dc3SAvihai Horon 	ret = cma_cm_event_handler(id_priv, event);
2988fe454dc3SAvihai Horon 	WARN_ON(ret);
2989fe454dc3SAvihai Horon 
2990fe454dc3SAvihai Horon out_unlock:
2991fe454dc3SAvihai Horon 	mutex_unlock(&id_priv->handler_mutex);
2992fe454dc3SAvihai Horon 	if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN)
2993fe454dc3SAvihai Horon 		rdma_destroy_ah_attr(&event->param.ud.ah_attr);
2994fe454dc3SAvihai Horon }
2995fe454dc3SAvihai Horon 
cma_work_handler(struct work_struct * _work)2996c4028958SDavid Howells static void cma_work_handler(struct work_struct *_work)
2997e51060f0SSean Hefty {
2998c4028958SDavid Howells 	struct cma_work *work = container_of(_work, struct cma_work, work);
2999e51060f0SSean Hefty 	struct rdma_id_private *id_priv = work->id;
3000e51060f0SSean Hefty 
3001de910bd9SOr Gerlitz 	mutex_lock(&id_priv->handler_mutex);
30027e85bcdaSJason Gunthorpe 	if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
30037e85bcdaSJason Gunthorpe 	    READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
30047e85bcdaSJason Gunthorpe 		goto out_unlock;
30057e85bcdaSJason Gunthorpe 	if (work->old_state != 0 || work->new_state != 0) {
3006e51060f0SSean Hefty 		if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
3007f6a9d47aSJason Gunthorpe 			goto out_unlock;
3008e51060f0SSean Hefty 	}
3009e51060f0SSean Hefty 
3010e51060f0SSean Hefty 	if (cma_cm_event_handler(id_priv, &work->event)) {
3011e51060f0SSean Hefty 		cma_id_put(id_priv);
3012e51060f0SSean Hefty 		destroy_id_handler_unlock(id_priv);
3013e51060f0SSean Hefty 		goto out_free;
3014e51060f0SSean Hefty 	}
3015f6a9d47aSJason Gunthorpe 
3016f6a9d47aSJason Gunthorpe out_unlock:
3017de910bd9SOr Gerlitz 	mutex_unlock(&id_priv->handler_mutex);
3018e368d23fSParav Pandit 	cma_id_put(id_priv);
3019f6a9d47aSJason Gunthorpe out_free:
3020b5de0c60SJason Gunthorpe 	if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN)
3021b5de0c60SJason Gunthorpe 		rdma_destroy_ah_attr(&work->event.param.ud.ah_attr);
3022dd5bdff8SOr Gerlitz 	kfree(work);
3023dd5bdff8SOr Gerlitz }
3024dd5bdff8SOr Gerlitz 
cma_init_resolve_route_work(struct cma_work * work,struct rdma_id_private * id_priv)3025981b5a23SParav Pandit static void cma_init_resolve_route_work(struct cma_work *work,
3026981b5a23SParav Pandit 					struct rdma_id_private *id_priv)
3027981b5a23SParav Pandit {
3028981b5a23SParav Pandit 	work->id = id_priv;
3029981b5a23SParav Pandit 	INIT_WORK(&work->work, cma_work_handler);
3030981b5a23SParav Pandit 	work->old_state = RDMA_CM_ROUTE_QUERY;
3031981b5a23SParav Pandit 	work->new_state = RDMA_CM_ROUTE_RESOLVED;
3032981b5a23SParav Pandit 	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
3033981b5a23SParav Pandit }
3034981b5a23SParav Pandit 
enqueue_resolve_addr_work(struct cma_work * work,struct rdma_id_private * id_priv)3035081ea519SParav Pandit static void enqueue_resolve_addr_work(struct cma_work *work,
3036981b5a23SParav Pandit 				      struct rdma_id_private *id_priv)
3037981b5a23SParav Pandit {
3038e368d23fSParav Pandit 	/* Balances with cma_id_put() in cma_work_handler */
3039e368d23fSParav Pandit 	cma_id_get(id_priv);
3040081ea519SParav Pandit 
3041981b5a23SParav Pandit 	work->id = id_priv;
3042981b5a23SParav Pandit 	INIT_WORK(&work->work, cma_work_handler);
3043981b5a23SParav Pandit 	work->old_state = RDMA_CM_ADDR_QUERY;
3044981b5a23SParav Pandit 	work->new_state = RDMA_CM_ADDR_RESOLVED;
3045981b5a23SParav Pandit 	work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
3046081ea519SParav Pandit 
3047081ea519SParav Pandit 	queue_work(cma_wq, &work->work);
3048981b5a23SParav Pandit }
3049981b5a23SParav Pandit 
cma_resolve_ib_route(struct rdma_id_private * id_priv,unsigned long timeout_ms)3050dbace111SLeon Romanovsky static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
3051dbace111SLeon Romanovsky 				unsigned long timeout_ms)
3052e51060f0SSean Hefty {
3053e51060f0SSean Hefty 	struct rdma_route *route = &id_priv->id.route;
3054e51060f0SSean Hefty 	struct cma_work *work;
3055e51060f0SSean Hefty 	int ret;
3056e51060f0SSean Hefty 
3057e51060f0SSean Hefty 	work = kzalloc(sizeof *work, GFP_KERNEL);
3058e51060f0SSean Hefty 	if (!work)
3059e51060f0SSean Hefty 		return -ENOMEM;
3060e51060f0SSean Hefty 
3061981b5a23SParav Pandit 	cma_init_resolve_route_work(work, id_priv);
3062e51060f0SSean Hefty 
306374f160eaSGerd Rausch 	if (!route->path_rec)
3064e51060f0SSean Hefty 		route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
3065e51060f0SSean Hefty 	if (!route->path_rec) {
3066e51060f0SSean Hefty 		ret = -ENOMEM;
3067e51060f0SSean Hefty 		goto err1;
3068e51060f0SSean Hefty 	}
3069e51060f0SSean Hefty 
3070e51060f0SSean Hefty 	ret = cma_query_ib_route(id_priv, timeout_ms, work);
3071e51060f0SSean Hefty 	if (ret)
3072e51060f0SSean Hefty 		goto err2;
3073e51060f0SSean Hefty 
3074e51060f0SSean Hefty 	return 0;
3075e51060f0SSean Hefty err2:
3076e51060f0SSean Hefty 	kfree(route->path_rec);
3077e51060f0SSean Hefty 	route->path_rec = NULL;
3078e51060f0SSean Hefty err1:
3079e51060f0SSean Hefty 	kfree(work);
3080e51060f0SSean Hefty 	return ret;
3081e51060f0SSean Hefty }
3082e51060f0SSean Hefty 
cma_route_gid_type(enum rdma_network_type network_type,unsigned long supported_gids,enum ib_gid_type default_gid)30839327c7afSParav Pandit static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
30849327c7afSParav Pandit 					   unsigned long supported_gids,
30859327c7afSParav Pandit 					   enum ib_gid_type default_gid)
30869327c7afSParav Pandit {
30879327c7afSParav Pandit 	if ((network_type == RDMA_NETWORK_IPV4 ||
30889327c7afSParav Pandit 	     network_type == RDMA_NETWORK_IPV6) &&
30899327c7afSParav Pandit 	    test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
30909327c7afSParav Pandit 		return IB_GID_TYPE_ROCE_UDP_ENCAP;
30919327c7afSParav Pandit 
30929327c7afSParav Pandit 	return default_gid;
30939327c7afSParav Pandit }
30949327c7afSParav Pandit 
30959327c7afSParav Pandit /*
30969327c7afSParav Pandit  * cma_iboe_set_path_rec_l2_fields() is helper function which sets
30979327c7afSParav Pandit  * path record type based on GID type.
30989327c7afSParav Pandit  * It also sets up other L2 fields which includes destination mac address
30999327c7afSParav Pandit  * netdev ifindex, of the path record.
31009327c7afSParav Pandit  * It returns the netdev of the bound interface for this path record entry.
31019327c7afSParav Pandit  */
31029327c7afSParav Pandit static struct net_device *
cma_iboe_set_path_rec_l2_fields(struct rdma_id_private * id_priv)31039327c7afSParav Pandit cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv)
31049327c7afSParav Pandit {
31059327c7afSParav Pandit 	struct rdma_route *route = &id_priv->id.route;
31069327c7afSParav Pandit 	enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
31079327c7afSParav Pandit 	struct rdma_addr *addr = &route->addr;
31089327c7afSParav Pandit 	unsigned long supported_gids;
31099327c7afSParav Pandit 	struct net_device *ndev;
31109327c7afSParav Pandit 
31119327c7afSParav Pandit 	if (!addr->dev_addr.bound_dev_if)
31129327c7afSParav Pandit 		return NULL;
31139327c7afSParav Pandit 
31149327c7afSParav Pandit 	ndev = dev_get_by_index(addr->dev_addr.net,
31159327c7afSParav Pandit 				addr->dev_addr.bound_dev_if);
31169327c7afSParav Pandit 	if (!ndev)
31179327c7afSParav Pandit 		return NULL;
31189327c7afSParav Pandit 
31199327c7afSParav Pandit 	supported_gids = roce_gid_type_mask_support(id_priv->id.device,
31209327c7afSParav Pandit 						    id_priv->id.port_num);
31219327c7afSParav Pandit 	gid_type = cma_route_gid_type(addr->dev_addr.network,
31229327c7afSParav Pandit 				      supported_gids,
31239327c7afSParav Pandit 				      id_priv->gid_type);
31249327c7afSParav Pandit 	/* Use the hint from IP Stack to select GID Type */
31259327c7afSParav Pandit 	if (gid_type < ib_network_to_gid_type(addr->dev_addr.network))
31269327c7afSParav Pandit 		gid_type = ib_network_to_gid_type(addr->dev_addr.network);
31279327c7afSParav Pandit 	route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
31289327c7afSParav Pandit 
3129114cc9c4SParav Pandit 	route->path_rec->roce.route_resolved = true;
31309327c7afSParav Pandit 	sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
31319327c7afSParav Pandit 	return ndev;
31329327c7afSParav Pandit }
31339327c7afSParav Pandit 
rdma_set_ib_path(struct rdma_cm_id * id,struct sa_path_rec * path_rec)3134fe75889fSParav Pandit int rdma_set_ib_path(struct rdma_cm_id *id,
3135fe75889fSParav Pandit 		     struct sa_path_rec *path_rec)
3136e51060f0SSean Hefty {
3137e51060f0SSean Hefty 	struct rdma_id_private *id_priv;
31388d20a1f0SParav Pandit 	struct net_device *ndev;
3139e51060f0SSean Hefty 	int ret;
3140e51060f0SSean Hefty 
3141e51060f0SSean Hefty 	id_priv = container_of(id, struct rdma_id_private, id);
3142550e5ca7SNir Muchtar 	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
3143550e5ca7SNir Muchtar 			   RDMA_CM_ROUTE_RESOLVED))
3144e51060f0SSean Hefty 		return -EINVAL;
3145e51060f0SSean Hefty 
3146fe75889fSParav Pandit 	id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec),
31479893e742SJulia Lawall 				     GFP_KERNEL);
3148e51060f0SSean Hefty 	if (!id->route.path_rec) {
3149e51060f0SSean Hefty 		ret = -ENOMEM;
3150e51060f0SSean Hefty 		goto err;
3151e51060f0SSean Hefty 	}
3152e51060f0SSean Hefty 
31538d20a1f0SParav Pandit 	if (rdma_protocol_roce(id->device, id->port_num)) {
31548d20a1f0SParav Pandit 		ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
31558d20a1f0SParav Pandit 		if (!ndev) {
31568d20a1f0SParav Pandit 			ret = -ENODEV;
31578d20a1f0SParav Pandit 			goto err_free;
31588d20a1f0SParav Pandit 		}
31598d20a1f0SParav Pandit 		dev_put(ndev);
31608d20a1f0SParav Pandit 	}
31618d20a1f0SParav Pandit 
3162bf9a9928SMark Zhang 	id->route.num_pri_alt_paths = 1;
3163e51060f0SSean Hefty 	return 0;
31648d20a1f0SParav Pandit 
31658d20a1f0SParav Pandit err_free:
31668d20a1f0SParav Pandit 	kfree(id->route.path_rec);
31678d20a1f0SParav Pandit 	id->route.path_rec = NULL;
3168e51060f0SSean Hefty err:
3169550e5ca7SNir Muchtar 	cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
3170e51060f0SSean Hefty 	return ret;
3171e51060f0SSean Hefty }
3172fe75889fSParav Pandit EXPORT_SYMBOL(rdma_set_ib_path);
3173e51060f0SSean Hefty 
cma_resolve_iw_route(struct rdma_id_private * id_priv)3174d6f91252SLeon Romanovsky static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
317507ebafbaSTom Tucker {
317607ebafbaSTom Tucker 	struct cma_work *work;
317707ebafbaSTom Tucker 
317807ebafbaSTom Tucker 	work = kzalloc(sizeof *work, GFP_KERNEL);
317907ebafbaSTom Tucker 	if (!work)
318007ebafbaSTom Tucker 		return -ENOMEM;
318107ebafbaSTom Tucker 
3182981b5a23SParav Pandit 	cma_init_resolve_route_work(work, id_priv);
318307ebafbaSTom Tucker 	queue_work(cma_wq, &work->work);
318407ebafbaSTom Tucker 	return 0;
318507ebafbaSTom Tucker }
318607ebafbaSTom Tucker 
get_vlan_ndev_tc(struct net_device * vlan_ndev,int prio)3187d3bd9396SParav Pandit static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio)
3188eb072c4bSEyal Perry {
3189eb072c4bSEyal Perry 	struct net_device *dev;
3190eb072c4bSEyal Perry 
3191d3bd9396SParav Pandit 	dev = vlan_dev_real_dev(vlan_ndev);
3192eb072c4bSEyal Perry 	if (dev->num_tc)
3193eb072c4bSEyal Perry 		return netdev_get_prio_tc_map(dev, prio);
3194eb072c4bSEyal Perry 
3195d3bd9396SParav Pandit 	return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) &
3196eb072c4bSEyal Perry 		VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
3197d3bd9396SParav Pandit }
3198d3bd9396SParav Pandit 
3199d3bd9396SParav Pandit struct iboe_prio_tc_map {
3200d3bd9396SParav Pandit 	int input_prio;
3201d3bd9396SParav Pandit 	int output_tc;
3202d3bd9396SParav Pandit 	bool found;
3203d3bd9396SParav Pandit };
3204d3bd9396SParav Pandit 
get_lower_vlan_dev_tc(struct net_device * dev,struct netdev_nested_priv * priv)3205eff74233STaehee Yoo static int get_lower_vlan_dev_tc(struct net_device *dev,
3206eff74233STaehee Yoo 				 struct netdev_nested_priv *priv)
3207d3bd9396SParav Pandit {
3208eff74233STaehee Yoo 	struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data;
3209d3bd9396SParav Pandit 
3210d3bd9396SParav Pandit 	if (is_vlan_dev(dev))
3211d3bd9396SParav Pandit 		map->output_tc = get_vlan_ndev_tc(dev, map->input_prio);
3212d3bd9396SParav Pandit 	else if (dev->num_tc)
3213d3bd9396SParav Pandit 		map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio);
3214d3bd9396SParav Pandit 	else
3215d3bd9396SParav Pandit 		map->output_tc = 0;
3216d3bd9396SParav Pandit 	/* We are interested only in first level VLAN device, so always
3217d3bd9396SParav Pandit 	 * return 1 to stop iterating over next level devices.
3218d3bd9396SParav Pandit 	 */
3219d3bd9396SParav Pandit 	map->found = true;
3220d3bd9396SParav Pandit 	return 1;
3221d3bd9396SParav Pandit }
3222d3bd9396SParav Pandit 
iboe_tos_to_sl(struct net_device * ndev,int tos)3223d3bd9396SParav Pandit static int iboe_tos_to_sl(struct net_device *ndev, int tos)
3224d3bd9396SParav Pandit {
3225d3bd9396SParav Pandit 	struct iboe_prio_tc_map prio_tc_map = {};
3226d3bd9396SParav Pandit 	int prio = rt_tos2priority(tos);
3227eff74233STaehee Yoo 	struct netdev_nested_priv priv;
3228d3bd9396SParav Pandit 
3229d3bd9396SParav Pandit 	/* If VLAN device, get it directly from the VLAN netdev */
3230d3bd9396SParav Pandit 	if (is_vlan_dev(ndev))
3231d3bd9396SParav Pandit 		return get_vlan_ndev_tc(ndev, prio);
3232d3bd9396SParav Pandit 
3233d3bd9396SParav Pandit 	prio_tc_map.input_prio = prio;
3234eff74233STaehee Yoo 	priv.data = (void *)&prio_tc_map;
3235d3bd9396SParav Pandit 	rcu_read_lock();
3236d3bd9396SParav Pandit 	netdev_walk_all_lower_dev_rcu(ndev,
3237d3bd9396SParav Pandit 				      get_lower_vlan_dev_tc,
3238eff74233STaehee Yoo 				      &priv);
3239d3bd9396SParav Pandit 	rcu_read_unlock();
3240d3bd9396SParav Pandit 	/* If map is found from lower device, use it; Otherwise
3241d3bd9396SParav Pandit 	 * continue with the current netdevice to get priority to tc map.
3242d3bd9396SParav Pandit 	 */
3243d3bd9396SParav Pandit 	if (prio_tc_map.found)
3244d3bd9396SParav Pandit 		return prio_tc_map.output_tc;
3245d3bd9396SParav Pandit 	else if (ndev->num_tc)
3246d3bd9396SParav Pandit 		return netdev_get_prio_tc_map(ndev, prio);
3247d3bd9396SParav Pandit 	else
3248eb072c4bSEyal Perry 		return 0;
3249eb072c4bSEyal Perry }
3250eb072c4bSEyal Perry 
cma_get_roce_udp_flow_label(struct rdma_id_private * id_priv)3251f6653405SMark Zhang static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv)
3252f6653405SMark Zhang {
3253f6653405SMark Zhang 	struct sockaddr_in6 *addr6;
3254f6653405SMark Zhang 	u16 dport, sport;
3255f6653405SMark Zhang 	u32 hash, fl;
3256f6653405SMark Zhang 
3257f6653405SMark Zhang 	addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv);
3258f6653405SMark Zhang 	fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK;
3259f6653405SMark Zhang 	if ((cma_family(id_priv) != AF_INET6) || !fl) {
3260f6653405SMark Zhang 		dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv)));
3261f6653405SMark Zhang 		sport = be16_to_cpu(cma_port(cma_src_addr(id_priv)));
3262f6653405SMark Zhang 		hash = (u32)sport * 31 + dport;
3263f6653405SMark Zhang 		fl = hash & IB_GRH_FLOWLABEL_MASK;
3264f6653405SMark Zhang 	}
3265f6653405SMark Zhang 
3266f6653405SMark Zhang 	return cpu_to_be32(fl);
3267f6653405SMark Zhang }
3268f6653405SMark Zhang 
cma_resolve_iboe_route(struct rdma_id_private * id_priv)32693c86aa70SEli Cohen static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
32703c86aa70SEli Cohen {
32713c86aa70SEli Cohen 	struct rdma_route *route = &id_priv->id.route;
32723c86aa70SEli Cohen 	struct rdma_addr *addr = &route->addr;
32733c86aa70SEli Cohen 	struct cma_work *work;
32743c86aa70SEli Cohen 	int ret;
32754367ec7fSParav Pandit 	struct net_device *ndev;
32764367ec7fSParav Pandit 
327789052d78SMajd Dibbiny 	u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
327889052d78SMajd Dibbiny 					rdma_start_port(id_priv->cma_dev->device)];
3279ca0c448dSHåkon Bugge 	u8 tos;
3280dd5f03beSMatan Barak 
3281ca0c448dSHåkon Bugge 	mutex_lock(&id_priv->qp_mutex);
3282ca0c448dSHåkon Bugge 	tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
3283ca0c448dSHåkon Bugge 	mutex_unlock(&id_priv->qp_mutex);
32843c86aa70SEli Cohen 
32853c86aa70SEli Cohen 	work = kzalloc(sizeof *work, GFP_KERNEL);
32863c86aa70SEli Cohen 	if (!work)
32873c86aa70SEli Cohen 		return -ENOMEM;
32883c86aa70SEli Cohen 
32893c86aa70SEli Cohen 	route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
32903c86aa70SEli Cohen 	if (!route->path_rec) {
32913c86aa70SEli Cohen 		ret = -ENOMEM;
32923c86aa70SEli Cohen 		goto err1;
32933c86aa70SEli Cohen 	}
32943c86aa70SEli Cohen 
3295bf9a9928SMark Zhang 	route->num_pri_alt_paths = 1;
32963c86aa70SEli Cohen 
32979327c7afSParav Pandit 	ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
329823d70503SWei Yongjun 	if (!ndev) {
329923d70503SWei Yongjun 		ret = -ENODEV;
330023d70503SWei Yongjun 		goto err2;
330123d70503SWei Yongjun 	}
330220029832SMatan Barak 
33037b85627bSMoni Shoua 	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
33047b85627bSMoni Shoua 		    &route->path_rec->sgid);
33057b85627bSMoni Shoua 	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
33067b85627bSMoni Shoua 		    &route->path_rec->dgid);
3307af7bd463SEli Cohen 
3308c3efe750SMatan Barak 	if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB)
3309c865f246SSomnath Kotur 		/* TODO: get the hoplimit from the inet/inet6 device */
3310c3efe750SMatan Barak 		route->path_rec->hop_limit = addr->dev_addr.hoplimit;
3311c3efe750SMatan Barak 	else
3312af7bd463SEli Cohen 		route->path_rec->hop_limit = 1;
3313af7bd463SEli Cohen 	route->path_rec->reversible = 1;
3314af7bd463SEli Cohen 	route->path_rec->pkey = cpu_to_be16(0xffff);
3315af7bd463SEli Cohen 	route->path_rec->mtu_selector = IB_SA_EQ;
331689052d78SMajd Dibbiny 	route->path_rec->sl = iboe_tos_to_sl(ndev, tos);
331789052d78SMajd Dibbiny 	route->path_rec->traffic_class = tos;
33183c86aa70SEli Cohen 	route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
33193c86aa70SEli Cohen 	route->path_rec->rate_selector = IB_SA_EQ;
332058030c76SMark Zhang 	route->path_rec->rate = IB_RATE_PORT_CURRENT;
33213c86aa70SEli Cohen 	dev_put(ndev);
33223c86aa70SEli Cohen 	route->path_rec->packet_life_time_selector = IB_SA_EQ;
3323e1ee1e62SDag Moxnes 	/* In case ACK timeout is set, use this value to calculate
3324e1ee1e62SDag Moxnes 	 * PacketLifeTime.  As per IBTA 12.7.34,
3325e1ee1e62SDag Moxnes 	 * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay).
3326e1ee1e62SDag Moxnes 	 * Assuming a negligible local ACK delay, we can use
3327e1ee1e62SDag Moxnes 	 * PacketLifeTime = local ACK timeout/2
3328e1ee1e62SDag Moxnes 	 * as a reasonable approximation for RoCE networks.
3329e1ee1e62SDag Moxnes 	 */
3330ca0c448dSHåkon Bugge 	mutex_lock(&id_priv->qp_mutex);
3331e84045eaSHåkon Bugge 	if (id_priv->timeout_set && id_priv->timeout)
3332e84045eaSHåkon Bugge 		route->path_rec->packet_life_time = id_priv->timeout - 1;
3333e84045eaSHåkon Bugge 	else
3334e84045eaSHåkon Bugge 		route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
3335ca0c448dSHåkon Bugge 	mutex_unlock(&id_priv->qp_mutex);
3336e1ee1e62SDag Moxnes 
33373c86aa70SEli Cohen 	if (!route->path_rec->mtu) {
33383c86aa70SEli Cohen 		ret = -EINVAL;
33393c86aa70SEli Cohen 		goto err2;
33403c86aa70SEli Cohen 	}
33413c86aa70SEli Cohen 
3342f6653405SMark Zhang 	if (rdma_protocol_roce_udp_encap(id_priv->id.device,
3343f6653405SMark Zhang 					 id_priv->id.port_num))
3344f6653405SMark Zhang 		route->path_rec->flow_label =
3345f6653405SMark Zhang 			cma_get_roce_udp_flow_label(id_priv);
3346f6653405SMark Zhang 
3347981b5a23SParav Pandit 	cma_init_resolve_route_work(work, id_priv);
33483c86aa70SEli Cohen 	queue_work(cma_wq, &work->work);
33493c86aa70SEli Cohen 
33503c86aa70SEli Cohen 	return 0;
33513c86aa70SEli Cohen 
33523c86aa70SEli Cohen err2:
33533c86aa70SEli Cohen 	kfree(route->path_rec);
33543c86aa70SEli Cohen 	route->path_rec = NULL;
3355bf9a9928SMark Zhang 	route->num_pri_alt_paths = 0;
33563c86aa70SEli Cohen err1:
33573c86aa70SEli Cohen 	kfree(work);
33583c86aa70SEli Cohen 	return ret;
33593c86aa70SEli Cohen }
33603c86aa70SEli Cohen 
rdma_resolve_route(struct rdma_cm_id * id,unsigned long timeout_ms)3361dbace111SLeon Romanovsky int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
3362e51060f0SSean Hefty {
3363e51060f0SSean Hefty 	struct rdma_id_private *id_priv;
3364e51060f0SSean Hefty 	int ret;
3365e51060f0SSean Hefty 
33665f5a6509SHåkon Bugge 	if (!timeout_ms)
33675f5a6509SHåkon Bugge 		return -EINVAL;
33685f5a6509SHåkon Bugge 
3369e51060f0SSean Hefty 	id_priv = container_of(id, struct rdma_id_private, id);
3370550e5ca7SNir Muchtar 	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
3371e51060f0SSean Hefty 		return -EINVAL;
3372e51060f0SSean Hefty 
3373e368d23fSParav Pandit 	cma_id_get(id_priv);
3374fe53ba2fSMichael Wang 	if (rdma_cap_ib_sa(id->device, id->port_num))
3375e51060f0SSean Hefty 		ret = cma_resolve_ib_route(id_priv, timeout_ms);
3376fc008bdbSPatrisious Haddad 	else if (rdma_protocol_roce(id->device, id->port_num)) {
33773c86aa70SEli Cohen 		ret = cma_resolve_iboe_route(id_priv);
3378fc008bdbSPatrisious Haddad 		if (!ret)
3379fc008bdbSPatrisious Haddad 			cma_add_id_to_tree(id_priv);
3380fc008bdbSPatrisious Haddad 	}
3381c72f2189SMichael Wang 	else if (rdma_protocol_iwarp(id->device, id->port_num))
3382d6f91252SLeon Romanovsky 		ret = cma_resolve_iw_route(id_priv);
3383c72f2189SMichael Wang 	else
3384e51060f0SSean Hefty 		ret = -ENOSYS;
3385c72f2189SMichael Wang 
3386e51060f0SSean Hefty 	if (ret)
3387e51060f0SSean Hefty 		goto err;
3388e51060f0SSean Hefty 
3389e51060f0SSean Hefty 	return 0;
3390e51060f0SSean Hefty err:
3391550e5ca7SNir Muchtar 	cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
3392e368d23fSParav Pandit 	cma_id_put(id_priv);
3393e51060f0SSean Hefty 	return ret;
3394e51060f0SSean Hefty }
3395e51060f0SSean Hefty EXPORT_SYMBOL(rdma_resolve_route);
3396e51060f0SSean Hefty 
cma_set_loopback(struct sockaddr * addr)33976a3e362dSSean Hefty static void cma_set_loopback(struct sockaddr *addr)
33986a3e362dSSean Hefty {
33996a3e362dSSean Hefty 	switch (addr->sa_family) {
34006a3e362dSSean Hefty 	case AF_INET:
34016a3e362dSSean Hefty 		((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
34026a3e362dSSean Hefty 		break;
34036a3e362dSSean Hefty 	case AF_INET6:
34046a3e362dSSean Hefty 		ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr,
34056a3e362dSSean Hefty 			      0, 0, 0, htonl(1));
34066a3e362dSSean Hefty 		break;
34076a3e362dSSean Hefty 	default:
34086a3e362dSSean Hefty 		ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr,
34096a3e362dSSean Hefty 			    0, 0, 0, htonl(1));
34106a3e362dSSean Hefty 		break;
34116a3e362dSSean Hefty 	}
34126a3e362dSSean Hefty }
34136a3e362dSSean Hefty 
cma_bind_loopback(struct rdma_id_private * id_priv)3414e51060f0SSean Hefty static int cma_bind_loopback(struct rdma_id_private *id_priv)
3415e51060f0SSean Hefty {
3416b0569e40SSean Hefty 	struct cma_device *cma_dev, *cur_dev;
3417f0ee3404SMichael S. Tsirkin 	union ib_gid gid;
3418102c5ce0SJack Wang 	enum ib_port_state port_state;
3419cc055dd3SParav Pandit 	unsigned int p;
3420e51060f0SSean Hefty 	u16 pkey;
3421e51060f0SSean Hefty 	int ret;
3422e51060f0SSean Hefty 
3423b0569e40SSean Hefty 	cma_dev = NULL;
3424e51060f0SSean Hefty 	mutex_lock(&lock);
3425b0569e40SSean Hefty 	list_for_each_entry(cur_dev, &dev_list, list) {
3426b0569e40SSean Hefty 		if (cma_family(id_priv) == AF_IB &&
342772219ceaSMichael Wang 		    !rdma_cap_ib_cm(cur_dev->device, 1))
3428b0569e40SSean Hefty 			continue;
3429b0569e40SSean Hefty 
3430b0569e40SSean Hefty 		if (!cma_dev)
3431b0569e40SSean Hefty 			cma_dev = cur_dev;
3432b0569e40SSean Hefty 
3433cc055dd3SParav Pandit 		rdma_for_each_port (cur_dev->device, p) {
3434102c5ce0SJack Wang 			if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) &&
3435102c5ce0SJack Wang 			    port_state == IB_PORT_ACTIVE) {
3436b0569e40SSean Hefty 				cma_dev = cur_dev;
3437b0569e40SSean Hefty 				goto port_found;
3438b0569e40SSean Hefty 			}
3439b0569e40SSean Hefty 		}
3440b0569e40SSean Hefty 	}
3441b0569e40SSean Hefty 
3442b0569e40SSean Hefty 	if (!cma_dev) {
3443e82153b5SKrishna Kumar 		ret = -ENODEV;
3444e82153b5SKrishna Kumar 		goto out;
3445e82153b5SKrishna Kumar 	}
3446e51060f0SSean Hefty 
3447e51060f0SSean Hefty 	p = 1;
3448e51060f0SSean Hefty 
3449e51060f0SSean Hefty port_found:
34501dfce294SParav Pandit 	ret = rdma_query_gid(cma_dev->device, p, 0, &gid);
3451e51060f0SSean Hefty 	if (ret)
3452e51060f0SSean Hefty 		goto out;
3453e51060f0SSean Hefty 
3454e51060f0SSean Hefty 	ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
3455e51060f0SSean Hefty 	if (ret)
3456e51060f0SSean Hefty 		goto out;
3457e51060f0SSean Hefty 
34586f8372b6SSean Hefty 	id_priv->id.route.addr.dev_addr.dev_type =
345921655afcSMichael Wang 		(rdma_protocol_ib(cma_dev->device, p)) ?
34606f8372b6SSean Hefty 		ARPHRD_INFINIBAND : ARPHRD_ETHER;
34616f8372b6SSean Hefty 
34626f8372b6SSean Hefty 	rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
3463e51060f0SSean Hefty 	ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
3464e51060f0SSean Hefty 	id_priv->id.port_num = p;
3465e51060f0SSean Hefty 	cma_attach_to_dev(id_priv, cma_dev);
3466cb5cd0eaSShay Drory 	rdma_restrack_add(&id_priv->res);
3467f4753834SSean Hefty 	cma_set_loopback(cma_src_addr(id_priv));
3468e51060f0SSean Hefty out:
3469e51060f0SSean Hefty 	mutex_unlock(&lock);
3470e51060f0SSean Hefty 	return ret;
3471e51060f0SSean Hefty }
3472e51060f0SSean Hefty 
addr_handler(int status,struct sockaddr * src_addr,struct rdma_dev_addr * dev_addr,void * context)3473e51060f0SSean Hefty static void addr_handler(int status, struct sockaddr *src_addr,
3474e51060f0SSean Hefty 			 struct rdma_dev_addr *dev_addr, void *context)
3475e51060f0SSean Hefty {
3476e51060f0SSean Hefty 	struct rdma_id_private *id_priv = context;
34777582df82SParav Pandit 	struct rdma_cm_event event = {};
34785fc01fb8SMyungho Jung 	struct sockaddr *addr;
34795fc01fb8SMyungho Jung 	struct sockaddr_storage old_addr;
3480e51060f0SSean Hefty 
3481de910bd9SOr Gerlitz 	mutex_lock(&id_priv->handler_mutex);
3482550e5ca7SNir Muchtar 	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
3483550e5ca7SNir Muchtar 			   RDMA_CM_ADDR_RESOLVED))
348461a73c70SSean Hefty 		goto out;
348561a73c70SSean Hefty 
34865fc01fb8SMyungho Jung 	/*
34875fc01fb8SMyungho Jung 	 * Store the previous src address, so that if we fail to acquire
34885fc01fb8SMyungho Jung 	 * matching rdma device, old address can be restored back, which helps
34895fc01fb8SMyungho Jung 	 * to cancel the cma listen operation correctly.
34905fc01fb8SMyungho Jung 	 */
34915fc01fb8SMyungho Jung 	addr = cma_src_addr(id_priv);
34925fc01fb8SMyungho Jung 	memcpy(&old_addr, addr, rdma_addr_size(addr));
34935fc01fb8SMyungho Jung 	memcpy(addr, src_addr, rdma_addr_size(src_addr));
3494498683c6SMoni Shoua 	if (!status && !id_priv->cma_dev) {
3495ff11c6cdSParav Pandit 		status = cma_acquire_dev_by_src_ip(id_priv);
3496498683c6SMoni Shoua 		if (status)
3497498683c6SMoni Shoua 			pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
3498498683c6SMoni Shoua 					     status);
3499cb5cd0eaSShay Drory 		rdma_restrack_add(&id_priv->res);
3500a6e4d254SHåkon Bugge 	} else if (status) {
3501498683c6SMoni Shoua 		pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
3502498683c6SMoni Shoua 	}
3503e51060f0SSean Hefty 
3504e51060f0SSean Hefty 	if (status) {
35055fc01fb8SMyungho Jung 		memcpy(addr, &old_addr,
35065fc01fb8SMyungho Jung 		       rdma_addr_size((struct sockaddr *)&old_addr));
3507550e5ca7SNir Muchtar 		if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
3508550e5ca7SNir Muchtar 				   RDMA_CM_ADDR_BOUND))
3509e51060f0SSean Hefty 			goto out;
3510a1b1b61fSSean Hefty 		event.event = RDMA_CM_EVENT_ADDR_ERROR;
3511a1b1b61fSSean Hefty 		event.status = status;
35127b85627bSMoni Shoua 	} else
3513a1b1b61fSSean Hefty 		event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
3514e51060f0SSean Hefty 
3515ed999f82SChuck Lever 	if (cma_cm_event_handler(id_priv, &event)) {
3516f6a9d47aSJason Gunthorpe 		destroy_id_handler_unlock(id_priv);
3517e51060f0SSean Hefty 		return;
3518e51060f0SSean Hefty 	}
3519e51060f0SSean Hefty out:
3520de910bd9SOr Gerlitz 	mutex_unlock(&id_priv->handler_mutex);
3521e51060f0SSean Hefty }
3522e51060f0SSean Hefty 
cma_resolve_loopback(struct rdma_id_private * id_priv)3523e51060f0SSean Hefty static int cma_resolve_loopback(struct rdma_id_private *id_priv)
3524e51060f0SSean Hefty {
3525e51060f0SSean Hefty 	struct cma_work *work;
3526f0ee3404SMichael S. Tsirkin 	union ib_gid gid;
3527e51060f0SSean Hefty 	int ret;
3528e51060f0SSean Hefty 
3529e51060f0SSean Hefty 	work = kzalloc(sizeof *work, GFP_KERNEL);
3530e51060f0SSean Hefty 	if (!work)
3531e51060f0SSean Hefty 		return -ENOMEM;
3532e51060f0SSean Hefty 
3533e51060f0SSean Hefty 	if (!id_priv->cma_dev) {
3534e51060f0SSean Hefty 		ret = cma_bind_loopback(id_priv);
3535e51060f0SSean Hefty 		if (ret)
3536e51060f0SSean Hefty 			goto err;
3537e51060f0SSean Hefty 	}
3538e51060f0SSean Hefty 
35396f8372b6SSean Hefty 	rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
35406f8372b6SSean Hefty 	rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
3541e51060f0SSean Hefty 
3542081ea519SParav Pandit 	enqueue_resolve_addr_work(work, id_priv);
3543e51060f0SSean Hefty 	return 0;
3544e51060f0SSean Hefty err:
3545e51060f0SSean Hefty 	kfree(work);
3546e51060f0SSean Hefty 	return ret;
3547e51060f0SSean Hefty }
3548e51060f0SSean Hefty 
cma_resolve_ib_addr(struct rdma_id_private * id_priv)3549f17df3b0SSean Hefty static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
3550f17df3b0SSean Hefty {
3551f17df3b0SSean Hefty 	struct cma_work *work;
3552f17df3b0SSean Hefty 	int ret;
3553f17df3b0SSean Hefty 
3554f17df3b0SSean Hefty 	work = kzalloc(sizeof *work, GFP_KERNEL);
3555f17df3b0SSean Hefty 	if (!work)
3556f17df3b0SSean Hefty 		return -ENOMEM;
3557f17df3b0SSean Hefty 
3558f17df3b0SSean Hefty 	if (!id_priv->cma_dev) {
3559f17df3b0SSean Hefty 		ret = cma_resolve_ib_dev(id_priv);
3560f17df3b0SSean Hefty 		if (ret)
3561f17df3b0SSean Hefty 			goto err;
3562f17df3b0SSean Hefty 	}
3563f17df3b0SSean Hefty 
3564f17df3b0SSean Hefty 	rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
3565f17df3b0SSean Hefty 		&(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
3566f17df3b0SSean Hefty 
3567081ea519SParav Pandit 	enqueue_resolve_addr_work(work, id_priv);
3568f17df3b0SSean Hefty 	return 0;
3569f17df3b0SSean Hefty err:
3570f17df3b0SSean Hefty 	kfree(work);
3571f17df3b0SSean Hefty 	return ret;
3572f17df3b0SSean Hefty }
3573f17df3b0SSean Hefty 
rdma_set_reuseaddr(struct rdma_cm_id * id,int reuse)3574a9bb7912SHefty, Sean int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
3575a9bb7912SHefty, Sean {
3576a9bb7912SHefty, Sean 	struct rdma_id_private *id_priv;
3577a9bb7912SHefty, Sean 	unsigned long flags;
3578a9bb7912SHefty, Sean 	int ret;
3579a9bb7912SHefty, Sean 
3580a9bb7912SHefty, Sean 	id_priv = container_of(id, struct rdma_id_private, id);
3581a9bb7912SHefty, Sean 	spin_lock_irqsave(&id_priv->lock, flags);
3582d490ee52SJason Gunthorpe 	if ((reuse && id_priv->state != RDMA_CM_LISTEN) ||
3583d490ee52SJason Gunthorpe 	    id_priv->state == RDMA_CM_IDLE) {
3584a9bb7912SHefty, Sean 		id_priv->reuseaddr = reuse;
3585a9bb7912SHefty, Sean 		ret = 0;
3586a9bb7912SHefty, Sean 	} else {
3587a9bb7912SHefty, Sean 		ret = -EINVAL;
3588a9bb7912SHefty, Sean 	}
3589a9bb7912SHefty, Sean 	spin_unlock_irqrestore(&id_priv->lock, flags);
3590a9bb7912SHefty, Sean 	return ret;
3591a9bb7912SHefty, Sean }
3592a9bb7912SHefty, Sean EXPORT_SYMBOL(rdma_set_reuseaddr);
3593a9bb7912SHefty, Sean 
rdma_set_afonly(struct rdma_cm_id * id,int afonly)359468602120SSean Hefty int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
359568602120SSean Hefty {
359668602120SSean Hefty 	struct rdma_id_private *id_priv;
359768602120SSean Hefty 	unsigned long flags;
359868602120SSean Hefty 	int ret;
359968602120SSean Hefty 
360068602120SSean Hefty 	id_priv = container_of(id, struct rdma_id_private, id);
360168602120SSean Hefty 	spin_lock_irqsave(&id_priv->lock, flags);
360268602120SSean Hefty 	if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
360368602120SSean Hefty 		id_priv->options |= (1 << CMA_OPTION_AFONLY);
360468602120SSean Hefty 		id_priv->afonly = afonly;
360568602120SSean Hefty 		ret = 0;
360668602120SSean Hefty 	} else {
360768602120SSean Hefty 		ret = -EINVAL;
360868602120SSean Hefty 	}
360968602120SSean Hefty 	spin_unlock_irqrestore(&id_priv->lock, flags);
361068602120SSean Hefty 	return ret;
361168602120SSean Hefty }
361268602120SSean Hefty EXPORT_SYMBOL(rdma_set_afonly);
361368602120SSean Hefty 
cma_bind_port(struct rdma_bind_list * bind_list,struct rdma_id_private * id_priv)3614e51060f0SSean Hefty static void cma_bind_port(struct rdma_bind_list *bind_list,
3615e51060f0SSean Hefty 			  struct rdma_id_private *id_priv)
3616e51060f0SSean Hefty {
361758afdcb7SSean Hefty 	struct sockaddr *addr;
361858afdcb7SSean Hefty 	struct sockaddr_ib *sib;
361958afdcb7SSean Hefty 	u64 sid, mask;
362058afdcb7SSean Hefty 	__be16 port;
3621e51060f0SSean Hefty 
3622730c8912SMark Zhang 	lockdep_assert_held(&lock);
3623730c8912SMark Zhang 
3624f4753834SSean Hefty 	addr = cma_src_addr(id_priv);
362558afdcb7SSean Hefty 	port = htons(bind_list->port);
362658afdcb7SSean Hefty 
362758afdcb7SSean Hefty 	switch (addr->sa_family) {
362858afdcb7SSean Hefty 	case AF_INET:
362958afdcb7SSean Hefty 		((struct sockaddr_in *) addr)->sin_port = port;
363058afdcb7SSean Hefty 		break;
363158afdcb7SSean Hefty 	case AF_INET6:
363258afdcb7SSean Hefty 		((struct sockaddr_in6 *) addr)->sin6_port = port;
363358afdcb7SSean Hefty 		break;
363458afdcb7SSean Hefty 	case AF_IB:
363558afdcb7SSean Hefty 		sib = (struct sockaddr_ib *) addr;
363658afdcb7SSean Hefty 		sid = be64_to_cpu(sib->sib_sid);
363758afdcb7SSean Hefty 		mask = be64_to_cpu(sib->sib_sid_mask);
363858afdcb7SSean Hefty 		sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port));
363958afdcb7SSean Hefty 		sib->sib_sid_mask = cpu_to_be64(~0ULL);
364058afdcb7SSean Hefty 		break;
364158afdcb7SSean Hefty 	}
3642e51060f0SSean Hefty 	id_priv->bind_list = bind_list;
3643e51060f0SSean Hefty 	hlist_add_head(&id_priv->node, &bind_list->owners);
3644e51060f0SSean Hefty }
3645e51060f0SSean Hefty 
cma_alloc_port(enum rdma_ucm_port_space ps,struct rdma_id_private * id_priv,unsigned short snum)36462253fc0cSSteve Wise static int cma_alloc_port(enum rdma_ucm_port_space ps,
3647aac978e1SHaggai Eran 			  struct rdma_id_private *id_priv, unsigned short snum)
3648e51060f0SSean Hefty {
3649e51060f0SSean Hefty 	struct rdma_bind_list *bind_list;
36503b069c5dSTejun Heo 	int ret;
3651e51060f0SSean Hefty 
3652730c8912SMark Zhang 	lockdep_assert_held(&lock);
3653730c8912SMark Zhang 
3654cb164b8cSSean Hefty 	bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
3655e51060f0SSean Hefty 	if (!bind_list)
3656e51060f0SSean Hefty 		return -ENOMEM;
3657e51060f0SSean Hefty 
3658fa20105eSGuy Shapiro 	ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list,
3659fa20105eSGuy Shapiro 			   snum);
36603b069c5dSTejun Heo 	if (ret < 0)
36613b069c5dSTejun Heo 		goto err;
3662e51060f0SSean Hefty 
3663e51060f0SSean Hefty 	bind_list->ps = ps;
3664061ccb52SLeon Romanovsky 	bind_list->port = snum;
3665e51060f0SSean Hefty 	cma_bind_port(bind_list, id_priv);
3666e51060f0SSean Hefty 	return 0;
36673b069c5dSTejun Heo err:
3668aedec080SSean Hefty 	kfree(bind_list);
36693b069c5dSTejun Heo 	return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
3670aedec080SSean Hefty }
3671aedec080SSean Hefty 
cma_port_is_unique(struct rdma_bind_list * bind_list,struct rdma_id_private * id_priv)367219b752a1SMoni Shoua static int cma_port_is_unique(struct rdma_bind_list *bind_list,
367319b752a1SMoni Shoua 			      struct rdma_id_private *id_priv)
367419b752a1SMoni Shoua {
367519b752a1SMoni Shoua 	struct rdma_id_private *cur_id;
367619b752a1SMoni Shoua 	struct sockaddr  *daddr = cma_dst_addr(id_priv);
367719b752a1SMoni Shoua 	struct sockaddr  *saddr = cma_src_addr(id_priv);
367819b752a1SMoni Shoua 	__be16 dport = cma_port(daddr);
367919b752a1SMoni Shoua 
3680730c8912SMark Zhang 	lockdep_assert_held(&lock);
3681730c8912SMark Zhang 
368219b752a1SMoni Shoua 	hlist_for_each_entry(cur_id, &bind_list->owners, node) {
368319b752a1SMoni Shoua 		struct sockaddr  *cur_daddr = cma_dst_addr(cur_id);
368419b752a1SMoni Shoua 		struct sockaddr  *cur_saddr = cma_src_addr(cur_id);
368519b752a1SMoni Shoua 		__be16 cur_dport = cma_port(cur_daddr);
368619b752a1SMoni Shoua 
368719b752a1SMoni Shoua 		if (id_priv == cur_id)
368819b752a1SMoni Shoua 			continue;
368919b752a1SMoni Shoua 
369019b752a1SMoni Shoua 		/* different dest port -> unique */
36919dea9a2fSTatyana Nikolova 		if (!cma_any_port(daddr) &&
36929dea9a2fSTatyana Nikolova 		    !cma_any_port(cur_daddr) &&
369319b752a1SMoni Shoua 		    (dport != cur_dport))
369419b752a1SMoni Shoua 			continue;
369519b752a1SMoni Shoua 
369619b752a1SMoni Shoua 		/* different src address -> unique */
369719b752a1SMoni Shoua 		if (!cma_any_addr(saddr) &&
369819b752a1SMoni Shoua 		    !cma_any_addr(cur_saddr) &&
369919b752a1SMoni Shoua 		    cma_addr_cmp(saddr, cur_saddr))
370019b752a1SMoni Shoua 			continue;
370119b752a1SMoni Shoua 
370219b752a1SMoni Shoua 		/* different dst address -> unique */
37039dea9a2fSTatyana Nikolova 		if (!cma_any_addr(daddr) &&
37049dea9a2fSTatyana Nikolova 		    !cma_any_addr(cur_daddr) &&
370519b752a1SMoni Shoua 		    cma_addr_cmp(daddr, cur_daddr))
370619b752a1SMoni Shoua 			continue;
370719b752a1SMoni Shoua 
370819b752a1SMoni Shoua 		return -EADDRNOTAVAIL;
370919b752a1SMoni Shoua 	}
371019b752a1SMoni Shoua 	return 0;
371119b752a1SMoni Shoua }
371219b752a1SMoni Shoua 
cma_alloc_any_port(enum rdma_ucm_port_space ps,struct rdma_id_private * id_priv)37132253fc0cSSteve Wise static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
3714aac978e1SHaggai Eran 			      struct rdma_id_private *id_priv)
3715aedec080SSean Hefty {
37165d7220e8STetsuo Handa 	static unsigned int last_used_port;
37175d7220e8STetsuo Handa 	int low, high, remaining;
37185d7220e8STetsuo Handa 	unsigned int rover;
3719fa20105eSGuy Shapiro 	struct net *net = id_priv->id.route.addr.dev_addr.net;
3720aedec080SSean Hefty 
3721730c8912SMark Zhang 	lockdep_assert_held(&lock);
3722730c8912SMark Zhang 
3723fa20105eSGuy Shapiro 	inet_get_local_port_range(net, &low, &high);
37245d7220e8STetsuo Handa 	remaining = (high - low) + 1;
3725e8a533cbSJason A. Donenfeld 	rover = get_random_u32_inclusive(low, remaining + low - 1);
37265d7220e8STetsuo Handa retry:
372719b752a1SMoni Shoua 	if (last_used_port != rover) {
372819b752a1SMoni Shoua 		struct rdma_bind_list *bind_list;
372919b752a1SMoni Shoua 		int ret;
373019b752a1SMoni Shoua 
373119b752a1SMoni Shoua 		bind_list = cma_ps_find(net, ps, (unsigned short)rover);
373219b752a1SMoni Shoua 
373319b752a1SMoni Shoua 		if (!bind_list) {
373419b752a1SMoni Shoua 			ret = cma_alloc_port(ps, id_priv, rover);
373519b752a1SMoni Shoua 		} else {
373619b752a1SMoni Shoua 			ret = cma_port_is_unique(bind_list, id_priv);
373719b752a1SMoni Shoua 			if (!ret)
373819b752a1SMoni Shoua 				cma_bind_port(bind_list, id_priv);
373919b752a1SMoni Shoua 		}
37405d7220e8STetsuo Handa 		/*
37415d7220e8STetsuo Handa 		 * Remember previously used port number in order to avoid
37425d7220e8STetsuo Handa 		 * re-using same port immediately after it is closed.
37435d7220e8STetsuo Handa 		 */
37445d7220e8STetsuo Handa 		if (!ret)
37455d7220e8STetsuo Handa 			last_used_port = rover;
37465d7220e8STetsuo Handa 		if (ret != -EADDRNOTAVAIL)
37475d7220e8STetsuo Handa 			return ret;
37485d7220e8STetsuo Handa 	}
37495d7220e8STetsuo Handa 	if (--remaining) {
37505d7220e8STetsuo Handa 		rover++;
37515d7220e8STetsuo Handa 		if ((rover < low) || (rover > high))
37525d7220e8STetsuo Handa 			rover = low;
3753aedec080SSean Hefty 		goto retry;
3754aedec080SSean Hefty 	}
37555d7220e8STetsuo Handa 	return -EADDRNOTAVAIL;
3756e51060f0SSean Hefty }
3757e51060f0SSean Hefty 
3758a9bb7912SHefty, Sean /*
3759a9bb7912SHefty, Sean  * Check that the requested port is available.  This is called when trying to
3760a9bb7912SHefty, Sean  * bind to a specific port, or when trying to listen on a bound port.  In
3761a9bb7912SHefty, Sean  * the latter case, the provided id_priv may already be on the bind_list, but
3762a9bb7912SHefty, Sean  * we still need to check that it's okay to start listening.
3763a9bb7912SHefty, Sean  */
cma_check_port(struct rdma_bind_list * bind_list,struct rdma_id_private * id_priv,uint8_t reuseaddr)3764a9bb7912SHefty, Sean static int cma_check_port(struct rdma_bind_list *bind_list,
3765a9bb7912SHefty, Sean 			  struct rdma_id_private *id_priv, uint8_t reuseaddr)
3766e51060f0SSean Hefty {
3767e51060f0SSean Hefty 	struct rdma_id_private *cur_id;
376843b752daSHefty, Sean 	struct sockaddr *addr, *cur_addr;
3769e51060f0SSean Hefty 
3770730c8912SMark Zhang 	lockdep_assert_held(&lock);
3771730c8912SMark Zhang 
3772f4753834SSean Hefty 	addr = cma_src_addr(id_priv);
3773b67bfe0dSSasha Levin 	hlist_for_each_entry(cur_id, &bind_list->owners, node) {
3774a9bb7912SHefty, Sean 		if (id_priv == cur_id)
3775a9bb7912SHefty, Sean 			continue;
3776a9bb7912SHefty, Sean 
3777d490ee52SJason Gunthorpe 		if (reuseaddr && cur_id->reuseaddr)
37785b0ec991SSean Hefty 			continue;
37795b0ec991SSean Hefty 
3780f4753834SSean Hefty 		cur_addr = cma_src_addr(cur_id);
37815b0ec991SSean Hefty 		if (id_priv->afonly && cur_id->afonly &&
37825b0ec991SSean Hefty 		    (addr->sa_family != cur_addr->sa_family))
37835b0ec991SSean Hefty 			continue;
37845b0ec991SSean Hefty 
37855b0ec991SSean Hefty 		if (cma_any_addr(addr) || cma_any_addr(cur_addr))
3786e51060f0SSean Hefty 			return -EADDRNOTAVAIL;
3787e51060f0SSean Hefty 
378843b752daSHefty, Sean 		if (!cma_addr_cmp(addr, cur_addr))
3789e51060f0SSean Hefty 			return -EADDRINUSE;
3790e51060f0SSean Hefty 	}
3791e51060f0SSean Hefty 	return 0;
3792e51060f0SSean Hefty }
3793e51060f0SSean Hefty 
cma_use_port(enum rdma_ucm_port_space ps,struct rdma_id_private * id_priv)37942253fc0cSSteve Wise static int cma_use_port(enum rdma_ucm_port_space ps,
3795aac978e1SHaggai Eran 			struct rdma_id_private *id_priv)
3796a9bb7912SHefty, Sean {
3797a9bb7912SHefty, Sean 	struct rdma_bind_list *bind_list;
3798a9bb7912SHefty, Sean 	unsigned short snum;
3799a9bb7912SHefty, Sean 	int ret;
3800a9bb7912SHefty, Sean 
3801730c8912SMark Zhang 	lockdep_assert_held(&lock);
3802730c8912SMark Zhang 
3803f4753834SSean Hefty 	snum = ntohs(cma_port(cma_src_addr(id_priv)));
3804a9bb7912SHefty, Sean 	if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
3805a9bb7912SHefty, Sean 		return -EACCES;
3806a9bb7912SHefty, Sean 
3807fa20105eSGuy Shapiro 	bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum);
3808a9bb7912SHefty, Sean 	if (!bind_list) {
3809a9bb7912SHefty, Sean 		ret = cma_alloc_port(ps, id_priv, snum);
3810a9bb7912SHefty, Sean 	} else {
3811a9bb7912SHefty, Sean 		ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
3812a9bb7912SHefty, Sean 		if (!ret)
3813a9bb7912SHefty, Sean 			cma_bind_port(bind_list, id_priv);
3814a9bb7912SHefty, Sean 	}
3815a9bb7912SHefty, Sean 	return ret;
3816a9bb7912SHefty, Sean }
3817a9bb7912SHefty, Sean 
38182253fc0cSSteve Wise static enum rdma_ucm_port_space
cma_select_inet_ps(struct rdma_id_private * id_priv)38192253fc0cSSteve Wise cma_select_inet_ps(struct rdma_id_private *id_priv)
382058afdcb7SSean Hefty {
382158afdcb7SSean Hefty 	switch (id_priv->id.ps) {
382258afdcb7SSean Hefty 	case RDMA_PS_TCP:
382358afdcb7SSean Hefty 	case RDMA_PS_UDP:
382458afdcb7SSean Hefty 	case RDMA_PS_IPOIB:
382558afdcb7SSean Hefty 	case RDMA_PS_IB:
3826aac978e1SHaggai Eran 		return id_priv->id.ps;
382758afdcb7SSean Hefty 	default:
3828aac978e1SHaggai Eran 
3829aac978e1SHaggai Eran 		return 0;
383058afdcb7SSean Hefty 	}
383158afdcb7SSean Hefty }
383258afdcb7SSean Hefty 
38332253fc0cSSteve Wise static enum rdma_ucm_port_space
cma_select_ib_ps(struct rdma_id_private * id_priv)38342253fc0cSSteve Wise cma_select_ib_ps(struct rdma_id_private *id_priv)
383558afdcb7SSean Hefty {
38362253fc0cSSteve Wise 	enum rdma_ucm_port_space ps = 0;
383758afdcb7SSean Hefty 	struct sockaddr_ib *sib;
383858afdcb7SSean Hefty 	u64 sid_ps, mask, sid;
383958afdcb7SSean Hefty 
3840f4753834SSean Hefty 	sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
384158afdcb7SSean Hefty 	mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK;
384258afdcb7SSean Hefty 	sid = be64_to_cpu(sib->sib_sid) & mask;
384358afdcb7SSean Hefty 
384458afdcb7SSean Hefty 	if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
384558afdcb7SSean Hefty 		sid_ps = RDMA_IB_IP_PS_IB;
3846aac978e1SHaggai Eran 		ps = RDMA_PS_IB;
384758afdcb7SSean Hefty 	} else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
384858afdcb7SSean Hefty 		   (sid == (RDMA_IB_IP_PS_TCP & mask))) {
384958afdcb7SSean Hefty 		sid_ps = RDMA_IB_IP_PS_TCP;
3850aac978e1SHaggai Eran 		ps = RDMA_PS_TCP;
385158afdcb7SSean Hefty 	} else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
385258afdcb7SSean Hefty 		   (sid == (RDMA_IB_IP_PS_UDP & mask))) {
385358afdcb7SSean Hefty 		sid_ps = RDMA_IB_IP_PS_UDP;
3854aac978e1SHaggai Eran 		ps = RDMA_PS_UDP;
385558afdcb7SSean Hefty 	}
385658afdcb7SSean Hefty 
385758afdcb7SSean Hefty 	if (ps) {
385858afdcb7SSean Hefty 		sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib)));
385958afdcb7SSean Hefty 		sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK |
386058afdcb7SSean Hefty 						be64_to_cpu(sib->sib_sid_mask));
386158afdcb7SSean Hefty 	}
386258afdcb7SSean Hefty 	return ps;
386358afdcb7SSean Hefty }
386458afdcb7SSean Hefty 
cma_get_port(struct rdma_id_private * id_priv)3865e51060f0SSean Hefty static int cma_get_port(struct rdma_id_private *id_priv)
3866e51060f0SSean Hefty {
38672253fc0cSSteve Wise 	enum rdma_ucm_port_space ps;
3868e51060f0SSean Hefty 	int ret;
3869e51060f0SSean Hefty 
3870f4753834SSean Hefty 	if (cma_family(id_priv) != AF_IB)
387158afdcb7SSean Hefty 		ps = cma_select_inet_ps(id_priv);
387258afdcb7SSean Hefty 	else
387358afdcb7SSean Hefty 		ps = cma_select_ib_ps(id_priv);
387458afdcb7SSean Hefty 	if (!ps)
3875e51060f0SSean Hefty 		return -EPROTONOSUPPORT;
3876e51060f0SSean Hefty 
3877e51060f0SSean Hefty 	mutex_lock(&lock);
3878f4753834SSean Hefty 	if (cma_any_port(cma_src_addr(id_priv)))
3879aedec080SSean Hefty 		ret = cma_alloc_any_port(ps, id_priv);
3880e51060f0SSean Hefty 	else
3881e51060f0SSean Hefty 		ret = cma_use_port(ps, id_priv);
3882e51060f0SSean Hefty 	mutex_unlock(&lock);
3883e51060f0SSean Hefty 
3884e51060f0SSean Hefty 	return ret;
3885e51060f0SSean Hefty }
3886e51060f0SSean Hefty 
cma_check_linklocal(struct rdma_dev_addr * dev_addr,struct sockaddr * addr)3887d14714dfSSean Hefty static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
3888d14714dfSSean Hefty 			       struct sockaddr *addr)
3889d14714dfSSean Hefty {
3890d90f9b35SRoland Dreier #if IS_ENABLED(CONFIG_IPV6)
3891d14714dfSSean Hefty 	struct sockaddr_in6 *sin6;
3892d14714dfSSean Hefty 
3893d14714dfSSean Hefty 	if (addr->sa_family != AF_INET6)
3894d14714dfSSean Hefty 		return 0;
3895d14714dfSSean Hefty 
3896d14714dfSSean Hefty 	sin6 = (struct sockaddr_in6 *) addr;
38975462edddSSomnath Kotur 
38985462edddSSomnath Kotur 	if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
38995462edddSSomnath Kotur 		return 0;
39005462edddSSomnath Kotur 
39015462edddSSomnath Kotur 	if (!sin6->sin6_scope_id)
3902d14714dfSSean Hefty 			return -EINVAL;
3903d14714dfSSean Hefty 
3904d14714dfSSean Hefty 	dev_addr->bound_dev_if = sin6->sin6_scope_id;
3905d14714dfSSean Hefty #endif
3906d14714dfSSean Hefty 	return 0;
3907d14714dfSSean Hefty }
3908d14714dfSSean Hefty 
rdma_listen(struct rdma_cm_id * id,int backlog)3909a9bb7912SHefty, Sean int rdma_listen(struct rdma_cm_id *id, int backlog)
3910a9bb7912SHefty, Sean {
3911732d41c5SJason Gunthorpe 	struct rdma_id_private *id_priv =
3912732d41c5SJason Gunthorpe 		container_of(id, struct rdma_id_private, id);
3913a9bb7912SHefty, Sean 	int ret;
3914a9bb7912SHefty, Sean 
3915732d41c5SJason Gunthorpe 	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
3916bc0bdc5aSJason Gunthorpe 		struct sockaddr_in any_in = {
3917bc0bdc5aSJason Gunthorpe 			.sin_family = AF_INET,
3918bc0bdc5aSJason Gunthorpe 			.sin_addr.s_addr = htonl(INADDR_ANY),
3919bc0bdc5aSJason Gunthorpe 		};
3920bc0bdc5aSJason Gunthorpe 
3921732d41c5SJason Gunthorpe 		/* For a well behaved ULP state will be RDMA_CM_IDLE */
3922bc0bdc5aSJason Gunthorpe 		ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
3923a9bb7912SHefty, Sean 		if (ret)
3924a9bb7912SHefty, Sean 			return ret;
3925732d41c5SJason Gunthorpe 		if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
3926732d41c5SJason Gunthorpe 					   RDMA_CM_LISTEN)))
3927a9bb7912SHefty, Sean 			return -EINVAL;
3928a9bb7912SHefty, Sean 	}
3929a9bb7912SHefty, Sean 
3930d490ee52SJason Gunthorpe 	/*
3931d490ee52SJason Gunthorpe 	 * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable
3932d490ee52SJason Gunthorpe 	 * any more, and has to be unique in the bind list.
3933d490ee52SJason Gunthorpe 	 */
3934a9bb7912SHefty, Sean 	if (id_priv->reuseaddr) {
3935d490ee52SJason Gunthorpe 		mutex_lock(&lock);
3936d490ee52SJason Gunthorpe 		ret = cma_check_port(id_priv->bind_list, id_priv, 0);
3937d490ee52SJason Gunthorpe 		if (!ret)
3938d490ee52SJason Gunthorpe 			id_priv->reuseaddr = 0;
3939d490ee52SJason Gunthorpe 		mutex_unlock(&lock);
3940a9bb7912SHefty, Sean 		if (ret)
3941a9bb7912SHefty, Sean 			goto err;
3942a9bb7912SHefty, Sean 	}
3943a9bb7912SHefty, Sean 
3944a9bb7912SHefty, Sean 	id_priv->backlog = backlog;
3945889d916bSShay Drory 	if (id_priv->cma_dev) {
394672219ceaSMichael Wang 		if (rdma_cap_ib_cm(id->device, 1)) {
3947a9bb7912SHefty, Sean 			ret = cma_ib_listen(id_priv);
3948a9bb7912SHefty, Sean 			if (ret)
3949a9bb7912SHefty, Sean 				goto err;
395004215330SMichael Wang 		} else if (rdma_cap_iw_cm(id->device, 1)) {
3951a9bb7912SHefty, Sean 			ret = cma_iw_listen(id_priv, backlog);
3952a9bb7912SHefty, Sean 			if (ret)
3953a9bb7912SHefty, Sean 				goto err;
395421655afcSMichael Wang 		} else {
3955a9bb7912SHefty, Sean 			ret = -ENOSYS;
3956a9bb7912SHefty, Sean 			goto err;
3957a9bb7912SHefty, Sean 		}
3958c80a0c52SLeon Romanovsky 	} else {
3959c80a0c52SLeon Romanovsky 		ret = cma_listen_on_all(id_priv);
3960c80a0c52SLeon Romanovsky 		if (ret)
3961c80a0c52SLeon Romanovsky 			goto err;
3962c80a0c52SLeon Romanovsky 	}
3963a9bb7912SHefty, Sean 
3964a9bb7912SHefty, Sean 	return 0;
3965a9bb7912SHefty, Sean err:
3966a9bb7912SHefty, Sean 	id_priv->backlog = 0;
3967d490ee52SJason Gunthorpe 	/*
3968d490ee52SJason Gunthorpe 	 * All the failure paths that lead here will not allow the req_handler's
3969d490ee52SJason Gunthorpe 	 * to have run.
3970d490ee52SJason Gunthorpe 	 */
3971550e5ca7SNir Muchtar 	cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
3972a9bb7912SHefty, Sean 	return ret;
3973a9bb7912SHefty, Sean }
3974a9bb7912SHefty, Sean EXPORT_SYMBOL(rdma_listen);
3975a9bb7912SHefty, Sean 
rdma_bind_addr_dst(struct rdma_id_private * id_priv,struct sockaddr * addr,const struct sockaddr * daddr)39768d037973SPatrisious Haddad static int rdma_bind_addr_dst(struct rdma_id_private *id_priv,
39778d037973SPatrisious Haddad 			      struct sockaddr *addr, const struct sockaddr *daddr)
3978e51060f0SSean Hefty {
39798d037973SPatrisious Haddad 	struct sockaddr *id_daddr;
3980e51060f0SSean Hefty 	int ret;
3981e51060f0SSean Hefty 
3982680f920aSSean Hefty 	if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
3983680f920aSSean Hefty 	    addr->sa_family != AF_IB)
3984e51060f0SSean Hefty 		return -EAFNOSUPPORT;
3985e51060f0SSean Hefty 
3986550e5ca7SNir Muchtar 	if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
3987e51060f0SSean Hefty 		return -EINVAL;
3988e51060f0SSean Hefty 
39898d037973SPatrisious Haddad 	ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr);
3990d14714dfSSean Hefty 	if (ret)
3991d14714dfSSean Hefty 		goto err1;
3992d14714dfSSean Hefty 
39937b85627bSMoni Shoua 	memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
39948523c048SSean Hefty 	if (!cma_any_addr(addr)) {
39958d037973SPatrisious Haddad 		ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr);
3996255d0c14SKrishna Kumar 		if (ret)
3997255d0c14SKrishna Kumar 			goto err1;
3998255d0c14SKrishna Kumar 
3999ff11c6cdSParav Pandit 		ret = cma_acquire_dev_by_src_ip(id_priv);
4000e51060f0SSean Hefty 		if (ret)
4001255d0c14SKrishna Kumar 			goto err1;
4002e51060f0SSean Hefty 	}
4003e51060f0SSean Hefty 
400468602120SSean Hefty 	if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
40055b0ec991SSean Hefty 		if (addr->sa_family == AF_INET)
40065b0ec991SSean Hefty 			id_priv->afonly = 1;
40075b0ec991SSean Hefty #if IS_ENABLED(CONFIG_IPV6)
4008fa20105eSGuy Shapiro 		else if (addr->sa_family == AF_INET6) {
4009fa20105eSGuy Shapiro 			struct net *net = id_priv->id.route.addr.dev_addr.net;
4010fa20105eSGuy Shapiro 
4011fa20105eSGuy Shapiro 			id_priv->afonly = net->ipv6.sysctl.bindv6only;
4012fa20105eSGuy Shapiro 		}
40135b0ec991SSean Hefty #endif
401468602120SSean Hefty 	}
40158d037973SPatrisious Haddad 	id_daddr = cma_dst_addr(id_priv);
40168d037973SPatrisious Haddad 	if (daddr != id_daddr)
40178d037973SPatrisious Haddad 		memcpy(id_daddr, daddr, rdma_addr_size(addr));
40188d037973SPatrisious Haddad 	id_daddr->sa_family = addr->sa_family;
40199dea9a2fSTatyana Nikolova 
4020e51060f0SSean Hefty 	ret = cma_get_port(id_priv);
4021e51060f0SSean Hefty 	if (ret)
4022255d0c14SKrishna Kumar 		goto err2;
4023e51060f0SSean Hefty 
4024cb5cd0eaSShay Drory 	if (!cma_any_addr(addr))
4025cb5cd0eaSShay Drory 		rdma_restrack_add(&id_priv->res);
4026e51060f0SSean Hefty 	return 0;
4027255d0c14SKrishna Kumar err2:
4028ed7a01fdSLeon Romanovsky 	if (id_priv->cma_dev)
4029a396d43aSSean Hefty 		cma_release_dev(id_priv);
4030255d0c14SKrishna Kumar err1:
4031550e5ca7SNir Muchtar 	cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
4032e51060f0SSean Hefty 	return ret;
4033e51060f0SSean Hefty }
40348d037973SPatrisious Haddad 
cma_bind_addr(struct rdma_cm_id * id,struct sockaddr * src_addr,const struct sockaddr * dst_addr)40358d037973SPatrisious Haddad static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
40368d037973SPatrisious Haddad 			 const struct sockaddr *dst_addr)
40378d037973SPatrisious Haddad {
40388d037973SPatrisious Haddad 	struct rdma_id_private *id_priv =
40398d037973SPatrisious Haddad 		container_of(id, struct rdma_id_private, id);
40408d037973SPatrisious Haddad 	struct sockaddr_storage zero_sock = {};
40418d037973SPatrisious Haddad 
40428d037973SPatrisious Haddad 	if (src_addr && src_addr->sa_family)
40438d037973SPatrisious Haddad 		return rdma_bind_addr_dst(id_priv, src_addr, dst_addr);
40448d037973SPatrisious Haddad 
40458d037973SPatrisious Haddad 	/*
40468d037973SPatrisious Haddad 	 * When the src_addr is not specified, automatically supply an any addr
40478d037973SPatrisious Haddad 	 */
40488d037973SPatrisious Haddad 	zero_sock.ss_family = dst_addr->sa_family;
40498d037973SPatrisious Haddad 	if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
40508d037973SPatrisious Haddad 		struct sockaddr_in6 *src_addr6 =
40518d037973SPatrisious Haddad 			(struct sockaddr_in6 *)&zero_sock;
40528d037973SPatrisious Haddad 		struct sockaddr_in6 *dst_addr6 =
40538d037973SPatrisious Haddad 			(struct sockaddr_in6 *)dst_addr;
40548d037973SPatrisious Haddad 
40558d037973SPatrisious Haddad 		src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
40568d037973SPatrisious Haddad 		if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
40578d037973SPatrisious Haddad 			id->route.addr.dev_addr.bound_dev_if =
40588d037973SPatrisious Haddad 				dst_addr6->sin6_scope_id;
40598d037973SPatrisious Haddad 	} else if (dst_addr->sa_family == AF_IB) {
40608d037973SPatrisious Haddad 		((struct sockaddr_ib *)&zero_sock)->sib_pkey =
40618d037973SPatrisious Haddad 			((struct sockaddr_ib *)dst_addr)->sib_pkey;
40628d037973SPatrisious Haddad 	}
40638d037973SPatrisious Haddad 	return rdma_bind_addr_dst(id_priv, (struct sockaddr *)&zero_sock, dst_addr);
40648d037973SPatrisious Haddad }
40658d037973SPatrisious Haddad 
40668d037973SPatrisious Haddad /*
40678d037973SPatrisious Haddad  * If required, resolve the source address for bind and leave the id_priv in
40688d037973SPatrisious Haddad  * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
40698d037973SPatrisious Haddad  * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
40708d037973SPatrisious Haddad  * ignored.
40718d037973SPatrisious Haddad  */
resolve_prepare_src(struct rdma_id_private * id_priv,struct sockaddr * src_addr,const struct sockaddr * dst_addr)40728d037973SPatrisious Haddad static int resolve_prepare_src(struct rdma_id_private *id_priv,
40738d037973SPatrisious Haddad 			       struct sockaddr *src_addr,
40748d037973SPatrisious Haddad 			       const struct sockaddr *dst_addr)
40758d037973SPatrisious Haddad {
40768d037973SPatrisious Haddad 	int ret;
40778d037973SPatrisious Haddad 
40788d037973SPatrisious Haddad 	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
40798d037973SPatrisious Haddad 		/* For a well behaved ULP state will be RDMA_CM_IDLE */
40808d037973SPatrisious Haddad 		ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
40818d037973SPatrisious Haddad 		if (ret)
40828d037973SPatrisious Haddad 			return ret;
40838d037973SPatrisious Haddad 		if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
40848d037973SPatrisious Haddad 					   RDMA_CM_ADDR_QUERY)))
40858d037973SPatrisious Haddad 			return -EINVAL;
40868d037973SPatrisious Haddad 
40870e158630SShiraz Saleem 	} else {
40880e158630SShiraz Saleem 		memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
40898d037973SPatrisious Haddad 	}
40908d037973SPatrisious Haddad 
40918d037973SPatrisious Haddad 	if (cma_family(id_priv) != dst_addr->sa_family) {
40928d037973SPatrisious Haddad 		ret = -EINVAL;
40938d037973SPatrisious Haddad 		goto err_state;
40948d037973SPatrisious Haddad 	}
40958d037973SPatrisious Haddad 	return 0;
40968d037973SPatrisious Haddad 
40978d037973SPatrisious Haddad err_state:
40988d037973SPatrisious Haddad 	cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
40998d037973SPatrisious Haddad 	return ret;
41008d037973SPatrisious Haddad }
41018d037973SPatrisious Haddad 
rdma_resolve_addr(struct rdma_cm_id * id,struct sockaddr * src_addr,const struct sockaddr * dst_addr,unsigned long timeout_ms)41028d037973SPatrisious Haddad int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
41038d037973SPatrisious Haddad 		      const struct sockaddr *dst_addr, unsigned long timeout_ms)
41048d037973SPatrisious Haddad {
41058d037973SPatrisious Haddad 	struct rdma_id_private *id_priv =
41068d037973SPatrisious Haddad 		container_of(id, struct rdma_id_private, id);
41078d037973SPatrisious Haddad 	int ret;
41088d037973SPatrisious Haddad 
41098d037973SPatrisious Haddad 	ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
41108d037973SPatrisious Haddad 	if (ret)
41118d037973SPatrisious Haddad 		return ret;
41128d037973SPatrisious Haddad 
41138d037973SPatrisious Haddad 	if (cma_any_addr(dst_addr)) {
41148d037973SPatrisious Haddad 		ret = cma_resolve_loopback(id_priv);
41158d037973SPatrisious Haddad 	} else {
41168d037973SPatrisious Haddad 		if (dst_addr->sa_family == AF_IB) {
41178d037973SPatrisious Haddad 			ret = cma_resolve_ib_addr(id_priv);
41188d037973SPatrisious Haddad 		} else {
41198d037973SPatrisious Haddad 			/*
41208d037973SPatrisious Haddad 			 * The FSM can return back to RDMA_CM_ADDR_BOUND after
41218d037973SPatrisious Haddad 			 * rdma_resolve_ip() is called, eg through the error
41228d037973SPatrisious Haddad 			 * path in addr_handler(). If this happens the existing
41238d037973SPatrisious Haddad 			 * request must be canceled before issuing a new one.
41248d037973SPatrisious Haddad 			 * Since canceling a request is a bit slow and this
41258d037973SPatrisious Haddad 			 * oddball path is rare, keep track once a request has
41268d037973SPatrisious Haddad 			 * been issued. The track turns out to be a permanent
41278d037973SPatrisious Haddad 			 * state since this is the only cancel as it is
41288d037973SPatrisious Haddad 			 * immediately before rdma_resolve_ip().
41298d037973SPatrisious Haddad 			 */
41308d037973SPatrisious Haddad 			if (id_priv->used_resolve_ip)
41318d037973SPatrisious Haddad 				rdma_addr_cancel(&id->route.addr.dev_addr);
41328d037973SPatrisious Haddad 			else
41338d037973SPatrisious Haddad 				id_priv->used_resolve_ip = 1;
41348d037973SPatrisious Haddad 			ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
41358d037973SPatrisious Haddad 					      &id->route.addr.dev_addr,
41368d037973SPatrisious Haddad 					      timeout_ms, addr_handler,
41378d037973SPatrisious Haddad 					      false, id_priv);
41388d037973SPatrisious Haddad 		}
41398d037973SPatrisious Haddad 	}
41408d037973SPatrisious Haddad 	if (ret)
41418d037973SPatrisious Haddad 		goto err;
41428d037973SPatrisious Haddad 
41438d037973SPatrisious Haddad 	return 0;
41448d037973SPatrisious Haddad err:
41458d037973SPatrisious Haddad 	cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
41468d037973SPatrisious Haddad 	return ret;
41478d037973SPatrisious Haddad }
41488d037973SPatrisious Haddad EXPORT_SYMBOL(rdma_resolve_addr);
41498d037973SPatrisious Haddad 
rdma_bind_addr(struct rdma_cm_id * id,struct sockaddr * addr)41508d037973SPatrisious Haddad int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
41518d037973SPatrisious Haddad {
41528d037973SPatrisious Haddad 	struct rdma_id_private *id_priv =
41538d037973SPatrisious Haddad 		container_of(id, struct rdma_id_private, id);
41548d037973SPatrisious Haddad 
41558d037973SPatrisious Haddad 	return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv));
41568d037973SPatrisious Haddad }
4157e51060f0SSean Hefty EXPORT_SYMBOL(rdma_bind_addr);
4158e51060f0SSean Hefty 
cma_format_hdr(void * hdr,struct rdma_id_private * id_priv)4159f4753834SSean Hefty static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
4160e51060f0SSean Hefty {
4161e51060f0SSean Hefty 	struct cma_hdr *cma_hdr;
4162e51060f0SSean Hefty 
416301602f11SSean Hefty 	cma_hdr = hdr;
416401602f11SSean Hefty 	cma_hdr->cma_version = CMA_VERSION;
4165f4753834SSean Hefty 	if (cma_family(id_priv) == AF_INET) {
41661f5175adSAleksey Senin 		struct sockaddr_in *src4, *dst4;
41671f5175adSAleksey Senin 
4168f4753834SSean Hefty 		src4 = (struct sockaddr_in *) cma_src_addr(id_priv);
4169f4753834SSean Hefty 		dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv);
4170e51060f0SSean Hefty 
4171e51060f0SSean Hefty 		cma_set_ip_ver(cma_hdr, 4);
4172e51060f0SSean Hefty 		cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
4173e51060f0SSean Hefty 		cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
4174e51060f0SSean Hefty 		cma_hdr->port = src4->sin_port;
4175e8160e15SSean Hefty 	} else if (cma_family(id_priv) == AF_INET6) {
41761f5175adSAleksey Senin 		struct sockaddr_in6 *src6, *dst6;
41771f5175adSAleksey Senin 
4178f4753834SSean Hefty 		src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
4179f4753834SSean Hefty 		dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv);
41801f5175adSAleksey Senin 
41811f5175adSAleksey Senin 		cma_set_ip_ver(cma_hdr, 6);
41821f5175adSAleksey Senin 		cma_hdr->src_addr.ip6 = src6->sin6_addr;
41831f5175adSAleksey Senin 		cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
41841f5175adSAleksey Senin 		cma_hdr->port = src6->sin6_port;
41851f5175adSAleksey Senin 	}
4186e51060f0SSean Hefty 	return 0;
4187e51060f0SSean Hefty }
4188e51060f0SSean Hefty 
cma_sidr_rep_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * ib_event)4189628e5f6dSSean Hefty static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
4190e7ff98aeSParav Pandit 				const struct ib_cm_event *ib_event)
4191628e5f6dSSean Hefty {
4192628e5f6dSSean Hefty 	struct rdma_id_private *id_priv = cm_id->context;
41937582df82SParav Pandit 	struct rdma_cm_event event = {};
4194e7ff98aeSParav Pandit 	const struct ib_cm_sidr_rep_event_param *rep =
4195e7ff98aeSParav Pandit 				&ib_event->param.sidr_rep_rcvd;
4196f6a9d47aSJason Gunthorpe 	int ret;
4197628e5f6dSSean Hefty 
419837e07cdaSBart Van Assche 	mutex_lock(&id_priv->handler_mutex);
41992a7cec53SJason Gunthorpe 	if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
420037e07cdaSBart Van Assche 		goto out;
4201628e5f6dSSean Hefty 
4202628e5f6dSSean Hefty 	switch (ib_event->event) {
4203628e5f6dSSean Hefty 	case IB_CM_SIDR_REQ_ERROR:
4204628e5f6dSSean Hefty 		event.event = RDMA_CM_EVENT_UNREACHABLE;
4205628e5f6dSSean Hefty 		event.status = -ETIMEDOUT;
4206628e5f6dSSean Hefty 		break;
4207628e5f6dSSean Hefty 	case IB_CM_SIDR_REP_RECEIVED:
4208628e5f6dSSean Hefty 		event.param.ud.private_data = ib_event->private_data;
4209628e5f6dSSean Hefty 		event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
4210628e5f6dSSean Hefty 		if (rep->status != IB_SIDR_SUCCESS) {
4211628e5f6dSSean Hefty 			event.event = RDMA_CM_EVENT_UNREACHABLE;
4212628e5f6dSSean Hefty 			event.status = ib_event->param.sidr_rep_rcvd.status;
4213498683c6SMoni Shoua 			pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n",
4214498683c6SMoni Shoua 					     event.status);
4215628e5f6dSSean Hefty 			break;
4216628e5f6dSSean Hefty 		}
42175c438135SSean Hefty 		ret = cma_set_qkey(id_priv, rep->qkey);
4218d2ca39f2SYossi Etigin 		if (ret) {
4219498683c6SMoni Shoua 			pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret);
4220d2ca39f2SYossi Etigin 			event.event = RDMA_CM_EVENT_ADDR_ERROR;
42215c438135SSean Hefty 			event.status = ret;
4222628e5f6dSSean Hefty 			break;
4223628e5f6dSSean Hefty 		}
42244ad6a024SParav Pandit 		ib_init_ah_attr_from_path(id_priv->id.device,
42254ad6a024SParav Pandit 					  id_priv->id.port_num,
4226628e5f6dSSean Hefty 					  id_priv->id.route.path_rec,
422739839107SParav Pandit 					  &event.param.ud.ah_attr,
422839839107SParav Pandit 					  rep->sgid_attr);
4229628e5f6dSSean Hefty 		event.param.ud.qp_num = rep->qpn;
4230628e5f6dSSean Hefty 		event.param.ud.qkey = rep->qkey;
4231628e5f6dSSean Hefty 		event.event = RDMA_CM_EVENT_ESTABLISHED;
4232628e5f6dSSean Hefty 		event.status = 0;
4233628e5f6dSSean Hefty 		break;
4234628e5f6dSSean Hefty 	default:
4235aba25a3eSParav Pandit 		pr_err("RDMA CMA: unexpected IB CM event: %d\n",
4236628e5f6dSSean Hefty 		       ib_event->event);
4237628e5f6dSSean Hefty 		goto out;
4238628e5f6dSSean Hefty 	}
4239628e5f6dSSean Hefty 
4240ed999f82SChuck Lever 	ret = cma_cm_event_handler(id_priv, &event);
4241aa74f487SParav Pandit 
4242aa74f487SParav Pandit 	rdma_destroy_ah_attr(&event.param.ud.ah_attr);
4243628e5f6dSSean Hefty 	if (ret) {
4244628e5f6dSSean Hefty 		/* Destroy the CM ID by returning a non-zero value. */
4245628e5f6dSSean Hefty 		id_priv->cm_id.ib = NULL;
4246f6a9d47aSJason Gunthorpe 		destroy_id_handler_unlock(id_priv);
4247628e5f6dSSean Hefty 		return ret;
4248628e5f6dSSean Hefty 	}
4249628e5f6dSSean Hefty out:
4250de910bd9SOr Gerlitz 	mutex_unlock(&id_priv->handler_mutex);
4251f6a9d47aSJason Gunthorpe 	return 0;
4252628e5f6dSSean Hefty }
4253628e5f6dSSean Hefty 
cma_resolve_ib_udp(struct rdma_id_private * id_priv,struct rdma_conn_param * conn_param)4254628e5f6dSSean Hefty static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
4255628e5f6dSSean Hefty 			      struct rdma_conn_param *conn_param)
4256628e5f6dSSean Hefty {
4257628e5f6dSSean Hefty 	struct ib_cm_sidr_req_param req;
42580c9361fcSJack Morgenstein 	struct ib_cm_id	*id;
4259e511d1aeSSean Hefty 	void *private_data;
4260c0b64f58SBart Van Assche 	u8 offset;
4261c0b64f58SBart Van Assche 	int ret;
4262628e5f6dSSean Hefty 
4263e511d1aeSSean Hefty 	memset(&req, 0, sizeof req);
4264e8160e15SSean Hefty 	offset = cma_user_data_offset(id_priv);
42658d0d2b0fSHåkon Bugge 	if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
426604ded167SSean Hefty 		return -EINVAL;
426704ded167SSean Hefty 
4268e8160e15SSean Hefty 	if (req.private_data_len) {
4269e511d1aeSSean Hefty 		private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
4270e511d1aeSSean Hefty 		if (!private_data)
4271628e5f6dSSean Hefty 			return -ENOMEM;
4272e8160e15SSean Hefty 	} else {
4273e511d1aeSSean Hefty 		private_data = NULL;
4274e8160e15SSean Hefty 	}
4275628e5f6dSSean Hefty 
4276628e5f6dSSean Hefty 	if (conn_param->private_data && conn_param->private_data_len)
4277e511d1aeSSean Hefty 		memcpy(private_data + offset, conn_param->private_data,
4278e511d1aeSSean Hefty 		       conn_param->private_data_len);
4279628e5f6dSSean Hefty 
4280e511d1aeSSean Hefty 	if (private_data) {
4281e511d1aeSSean Hefty 		ret = cma_format_hdr(private_data, id_priv);
4282628e5f6dSSean Hefty 		if (ret)
4283628e5f6dSSean Hefty 			goto out;
4284e511d1aeSSean Hefty 		req.private_data = private_data;
4285e8160e15SSean Hefty 	}
4286628e5f6dSSean Hefty 
42870c9361fcSJack Morgenstein 	id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
42880c9361fcSJack Morgenstein 			     id_priv);
42890c9361fcSJack Morgenstein 	if (IS_ERR(id)) {
42900c9361fcSJack Morgenstein 		ret = PTR_ERR(id);
4291628e5f6dSSean Hefty 		goto out;
4292628e5f6dSSean Hefty 	}
42930c9361fcSJack Morgenstein 	id_priv->cm_id.ib = id;
4294628e5f6dSSean Hefty 
4295f4753834SSean Hefty 	req.path = id_priv->id.route.path_rec;
4296815d456eSParav Pandit 	req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
4297cf53936fSSean Hefty 	req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
4298628e5f6dSSean Hefty 	req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
4299628e5f6dSSean Hefty 	req.max_cm_retries = CMA_MAX_CM_RETRIES;
4300628e5f6dSSean Hefty 
4301ed999f82SChuck Lever 	trace_cm_send_sidr_req(id_priv);
4302628e5f6dSSean Hefty 	ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
4303628e5f6dSSean Hefty 	if (ret) {
4304628e5f6dSSean Hefty 		ib_destroy_cm_id(id_priv->cm_id.ib);
4305628e5f6dSSean Hefty 		id_priv->cm_id.ib = NULL;
4306628e5f6dSSean Hefty 	}
4307628e5f6dSSean Hefty out:
4308e511d1aeSSean Hefty 	kfree(private_data);
4309628e5f6dSSean Hefty 	return ret;
4310628e5f6dSSean Hefty }
4311628e5f6dSSean Hefty 
cma_connect_ib(struct rdma_id_private * id_priv,struct rdma_conn_param * conn_param)4312e51060f0SSean Hefty static int cma_connect_ib(struct rdma_id_private *id_priv,
4313e51060f0SSean Hefty 			  struct rdma_conn_param *conn_param)
4314e51060f0SSean Hefty {
4315e51060f0SSean Hefty 	struct ib_cm_req_param req;
4316e51060f0SSean Hefty 	struct rdma_route *route;
4317e51060f0SSean Hefty 	void *private_data;
43180c9361fcSJack Morgenstein 	struct ib_cm_id	*id;
4319c0b64f58SBart Van Assche 	u8 offset;
4320c0b64f58SBart Van Assche 	int ret;
4321e51060f0SSean Hefty 
4322e51060f0SSean Hefty 	memset(&req, 0, sizeof req);
4323e8160e15SSean Hefty 	offset = cma_user_data_offset(id_priv);
43248d0d2b0fSHåkon Bugge 	if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
432504ded167SSean Hefty 		return -EINVAL;
432604ded167SSean Hefty 
4327e8160e15SSean Hefty 	if (req.private_data_len) {
4328e51060f0SSean Hefty 		private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
4329e51060f0SSean Hefty 		if (!private_data)
4330e51060f0SSean Hefty 			return -ENOMEM;
4331e8160e15SSean Hefty 	} else {
4332e8160e15SSean Hefty 		private_data = NULL;
4333e8160e15SSean Hefty 	}
4334e51060f0SSean Hefty 
4335e51060f0SSean Hefty 	if (conn_param->private_data && conn_param->private_data_len)
4336e51060f0SSean Hefty 		memcpy(private_data + offset, conn_param->private_data,
4337e51060f0SSean Hefty 		       conn_param->private_data_len);
4338e51060f0SSean Hefty 
43390c9361fcSJack Morgenstein 	id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
43400c9361fcSJack Morgenstein 	if (IS_ERR(id)) {
43410c9361fcSJack Morgenstein 		ret = PTR_ERR(id);
4342e51060f0SSean Hefty 		goto out;
4343e51060f0SSean Hefty 	}
43440c9361fcSJack Morgenstein 	id_priv->cm_id.ib = id;
4345e51060f0SSean Hefty 
4346e51060f0SSean Hefty 	route = &id_priv->id.route;
4347e8160e15SSean Hefty 	if (private_data) {
4348f4753834SSean Hefty 		ret = cma_format_hdr(private_data, id_priv);
4349e51060f0SSean Hefty 		if (ret)
4350e51060f0SSean Hefty 			goto out;
4351e51060f0SSean Hefty 		req.private_data = private_data;
4352e8160e15SSean Hefty 	}
4353e51060f0SSean Hefty 
4354e51060f0SSean Hefty 	req.primary_path = &route->path_rec[0];
4355eb8336dbSMark Zhang 	req.primary_path_inbound = route->path_rec_inbound;
4356eb8336dbSMark Zhang 	req.primary_path_outbound = route->path_rec_outbound;
4357bf9a9928SMark Zhang 	if (route->num_pri_alt_paths == 2)
4358e51060f0SSean Hefty 		req.alternate_path = &route->path_rec[1];
4359e51060f0SSean Hefty 
4360815d456eSParav Pandit 	req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
4361815d456eSParav Pandit 	/* Alternate path SGID attribute currently unsupported */
4362cf53936fSSean Hefty 	req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
4363e51060f0SSean Hefty 	req.qp_num = id_priv->qp_num;
436418c441a6SSean Hefty 	req.qp_type = id_priv->id.qp_type;
4365e51060f0SSean Hefty 	req.starting_psn = id_priv->seq_num;
4366e51060f0SSean Hefty 	req.responder_resources = conn_param->responder_resources;
4367e51060f0SSean Hefty 	req.initiator_depth = conn_param->initiator_depth;
4368e51060f0SSean Hefty 	req.flow_control = conn_param->flow_control;
43694ede178aSSean Hefty 	req.retry_count = min_t(u8, 7, conn_param->retry_count);
43704ede178aSSean Hefty 	req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
4371e51060f0SSean Hefty 	req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
4372e51060f0SSean Hefty 	req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
4373e51060f0SSean Hefty 	req.max_cm_retries = CMA_MAX_CM_RETRIES;
4374e51060f0SSean Hefty 	req.srq = id_priv->srq ? 1 : 0;
4375a20652e1SLeon Romanovsky 	req.ece.vendor_id = id_priv->ece.vendor_id;
4376a20652e1SLeon Romanovsky 	req.ece.attr_mod = id_priv->ece.attr_mod;
4377e51060f0SSean Hefty 
4378ed999f82SChuck Lever 	trace_cm_send_req(id_priv);
4379e51060f0SSean Hefty 	ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
4380e51060f0SSean Hefty out:
43810c9361fcSJack Morgenstein 	if (ret && !IS_ERR(id)) {
43820c9361fcSJack Morgenstein 		ib_destroy_cm_id(id);
4383675a027cSKrishna Kumar 		id_priv->cm_id.ib = NULL;
4384675a027cSKrishna Kumar 	}
4385675a027cSKrishna Kumar 
4386e51060f0SSean Hefty 	kfree(private_data);
4387e51060f0SSean Hefty 	return ret;
4388e51060f0SSean Hefty }
4389e51060f0SSean Hefty 
cma_connect_iw(struct rdma_id_private * id_priv,struct rdma_conn_param * conn_param)439007ebafbaSTom Tucker static int cma_connect_iw(struct rdma_id_private *id_priv,
439107ebafbaSTom Tucker 			  struct rdma_conn_param *conn_param)
439207ebafbaSTom Tucker {
439307ebafbaSTom Tucker 	struct iw_cm_id *cm_id;
439407ebafbaSTom Tucker 	int ret;
439507ebafbaSTom Tucker 	struct iw_cm_conn_param iw_param;
439607ebafbaSTom Tucker 
439707ebafbaSTom Tucker 	cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
43980c9361fcSJack Morgenstein 	if (IS_ERR(cm_id))
43990c9361fcSJack Morgenstein 		return PTR_ERR(cm_id);
440007ebafbaSTom Tucker 
4401ca0c448dSHåkon Bugge 	mutex_lock(&id_priv->qp_mutex);
440268cdba06SSteve Wise 	cm_id->tos = id_priv->tos;
4403926ba19bSSteve Wise 	cm_id->tos_set = id_priv->tos_set;
4404ca0c448dSHåkon Bugge 	mutex_unlock(&id_priv->qp_mutex);
4405ca0c448dSHåkon Bugge 
440607ebafbaSTom Tucker 	id_priv->cm_id.iw = cm_id;
440707ebafbaSTom Tucker 
440824d44a39SSteve Wise 	memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
440924d44a39SSteve Wise 	       rdma_addr_size(cma_src_addr(id_priv)));
441024d44a39SSteve Wise 	memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
441124d44a39SSteve Wise 	       rdma_addr_size(cma_dst_addr(id_priv)));
441207ebafbaSTom Tucker 
44135851bb89SSean Hefty 	ret = cma_modify_qp_rtr(id_priv, conn_param);
4414675a027cSKrishna Kumar 	if (ret)
4415675a027cSKrishna Kumar 		goto out;
441607ebafbaSTom Tucker 
4417f45ee80eSHefty, Sean 	if (conn_param) {
441807ebafbaSTom Tucker 		iw_param.ord = conn_param->initiator_depth;
441907ebafbaSTom Tucker 		iw_param.ird = conn_param->responder_resources;
442007ebafbaSTom Tucker 		iw_param.private_data = conn_param->private_data;
442107ebafbaSTom Tucker 		iw_param.private_data_len = conn_param->private_data_len;
4422f45ee80eSHefty, Sean 		iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
4423f45ee80eSHefty, Sean 	} else {
4424f45ee80eSHefty, Sean 		memset(&iw_param, 0, sizeof iw_param);
442507ebafbaSTom Tucker 		iw_param.qpn = id_priv->qp_num;
4426f45ee80eSHefty, Sean 	}
442707ebafbaSTom Tucker 	ret = iw_cm_connect(cm_id, &iw_param);
442807ebafbaSTom Tucker out:
44290c9361fcSJack Morgenstein 	if (ret) {
4430675a027cSKrishna Kumar 		iw_destroy_cm_id(cm_id);
4431675a027cSKrishna Kumar 		id_priv->cm_id.iw = NULL;
4432675a027cSKrishna Kumar 	}
443307ebafbaSTom Tucker 	return ret;
443407ebafbaSTom Tucker }
443507ebafbaSTom Tucker 
4436071ba4ccSJason Gunthorpe /**
4437071ba4ccSJason Gunthorpe  * rdma_connect_locked - Initiate an active connection request.
4438071ba4ccSJason Gunthorpe  * @id: Connection identifier to connect.
4439071ba4ccSJason Gunthorpe  * @conn_param: Connection information used for connected QPs.
4440071ba4ccSJason Gunthorpe  *
4441071ba4ccSJason Gunthorpe  * Same as rdma_connect() but can only be called from the
4442071ba4ccSJason Gunthorpe  * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
4443071ba4ccSJason Gunthorpe  */
rdma_connect_locked(struct rdma_cm_id * id,struct rdma_conn_param * conn_param)4444071ba4ccSJason Gunthorpe int rdma_connect_locked(struct rdma_cm_id *id,
4445071ba4ccSJason Gunthorpe 			struct rdma_conn_param *conn_param)
4446e51060f0SSean Hefty {
44472a7cec53SJason Gunthorpe 	struct rdma_id_private *id_priv =
44482a7cec53SJason Gunthorpe 		container_of(id, struct rdma_id_private, id);
4449e51060f0SSean Hefty 	int ret;
4450e51060f0SSean Hefty 
4451071ba4ccSJason Gunthorpe 	if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
4452071ba4ccSJason Gunthorpe 		return -EINVAL;
4453e51060f0SSean Hefty 
4454e51060f0SSean Hefty 	if (!id->qp) {
4455e51060f0SSean Hefty 		id_priv->qp_num = conn_param->qp_num;
4456e51060f0SSean Hefty 		id_priv->srq = conn_param->srq;
4457e51060f0SSean Hefty 	}
4458e51060f0SSean Hefty 
445972219ceaSMichael Wang 	if (rdma_cap_ib_cm(id->device, id->port_num)) {
4460b26f9b99SSean Hefty 		if (id->qp_type == IB_QPT_UD)
4461628e5f6dSSean Hefty 			ret = cma_resolve_ib_udp(id_priv, conn_param);
4462628e5f6dSSean Hefty 		else
4463e51060f0SSean Hefty 			ret = cma_connect_ib(id_priv, conn_param);
4464b6eb7011SWenpeng Liang 	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
446507ebafbaSTom Tucker 		ret = cma_connect_iw(id_priv, conn_param);
4466b6eb7011SWenpeng Liang 	} else {
4467e51060f0SSean Hefty 		ret = -ENOSYS;
4468b6eb7011SWenpeng Liang 	}
4469e51060f0SSean Hefty 	if (ret)
44702a7cec53SJason Gunthorpe 		goto err_state;
4471e51060f0SSean Hefty 	return 0;
44722a7cec53SJason Gunthorpe err_state:
4473550e5ca7SNir Muchtar 	cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
4474071ba4ccSJason Gunthorpe 	return ret;
4475071ba4ccSJason Gunthorpe }
4476071ba4ccSJason Gunthorpe EXPORT_SYMBOL(rdma_connect_locked);
4477071ba4ccSJason Gunthorpe 
4478071ba4ccSJason Gunthorpe /**
4479071ba4ccSJason Gunthorpe  * rdma_connect - Initiate an active connection request.
4480071ba4ccSJason Gunthorpe  * @id: Connection identifier to connect.
4481071ba4ccSJason Gunthorpe  * @conn_param: Connection information used for connected QPs.
4482071ba4ccSJason Gunthorpe  *
4483071ba4ccSJason Gunthorpe  * Users must have resolved a route for the rdma_cm_id to connect with by having
4484071ba4ccSJason Gunthorpe  * called rdma_resolve_route before calling this routine.
4485071ba4ccSJason Gunthorpe  *
4486071ba4ccSJason Gunthorpe  * This call will either connect to a remote QP or obtain remote QP information
4487071ba4ccSJason Gunthorpe  * for unconnected rdma_cm_id's.  The actual operation is based on the
4488071ba4ccSJason Gunthorpe  * rdma_cm_id's port space.
4489071ba4ccSJason Gunthorpe  */
rdma_connect(struct rdma_cm_id * id,struct rdma_conn_param * conn_param)4490071ba4ccSJason Gunthorpe int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
4491071ba4ccSJason Gunthorpe {
4492071ba4ccSJason Gunthorpe 	struct rdma_id_private *id_priv =
4493071ba4ccSJason Gunthorpe 		container_of(id, struct rdma_id_private, id);
4494071ba4ccSJason Gunthorpe 	int ret;
4495071ba4ccSJason Gunthorpe 
4496071ba4ccSJason Gunthorpe 	mutex_lock(&id_priv->handler_mutex);
4497071ba4ccSJason Gunthorpe 	ret = rdma_connect_locked(id, conn_param);
44982a7cec53SJason Gunthorpe 	mutex_unlock(&id_priv->handler_mutex);
4499e51060f0SSean Hefty 	return ret;
4500e51060f0SSean Hefty }
4501e51060f0SSean Hefty EXPORT_SYMBOL(rdma_connect);
4502e51060f0SSean Hefty 
450334e2ab57SLeon Romanovsky /**
450434e2ab57SLeon Romanovsky  * rdma_connect_ece - Initiate an active connection request with ECE data.
450534e2ab57SLeon Romanovsky  * @id: Connection identifier to connect.
450634e2ab57SLeon Romanovsky  * @conn_param: Connection information used for connected QPs.
450734e2ab57SLeon Romanovsky  * @ece: ECE parameters
450834e2ab57SLeon Romanovsky  *
450934e2ab57SLeon Romanovsky  * See rdma_connect() explanation.
451034e2ab57SLeon Romanovsky  */
rdma_connect_ece(struct rdma_cm_id * id,struct rdma_conn_param * conn_param,struct rdma_ucm_ece * ece)451134e2ab57SLeon Romanovsky int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
451234e2ab57SLeon Romanovsky 		     struct rdma_ucm_ece *ece)
451334e2ab57SLeon Romanovsky {
451434e2ab57SLeon Romanovsky 	struct rdma_id_private *id_priv =
451534e2ab57SLeon Romanovsky 		container_of(id, struct rdma_id_private, id);
451634e2ab57SLeon Romanovsky 
451734e2ab57SLeon Romanovsky 	id_priv->ece.vendor_id = ece->vendor_id;
451834e2ab57SLeon Romanovsky 	id_priv->ece.attr_mod = ece->attr_mod;
451934e2ab57SLeon Romanovsky 
452034e2ab57SLeon Romanovsky 	return rdma_connect(id, conn_param);
452134e2ab57SLeon Romanovsky }
452234e2ab57SLeon Romanovsky EXPORT_SYMBOL(rdma_connect_ece);
452334e2ab57SLeon Romanovsky 
cma_accept_ib(struct rdma_id_private * id_priv,struct rdma_conn_param * conn_param)4524e51060f0SSean Hefty static int cma_accept_ib(struct rdma_id_private *id_priv,
4525e51060f0SSean Hefty 			 struct rdma_conn_param *conn_param)
4526e51060f0SSean Hefty {
4527e51060f0SSean Hefty 	struct ib_cm_rep_param rep;
45285851bb89SSean Hefty 	int ret;
4529e51060f0SSean Hefty 
45305851bb89SSean Hefty 	ret = cma_modify_qp_rtr(id_priv, conn_param);
4531e51060f0SSean Hefty 	if (ret)
45320fe313b0SSean Hefty 		goto out;
45330fe313b0SSean Hefty 
45345851bb89SSean Hefty 	ret = cma_modify_qp_rts(id_priv, conn_param);
45350fe313b0SSean Hefty 	if (ret)
45360fe313b0SSean Hefty 		goto out;
45370fe313b0SSean Hefty 
4538e51060f0SSean Hefty 	memset(&rep, 0, sizeof rep);
4539e51060f0SSean Hefty 	rep.qp_num = id_priv->qp_num;
4540e51060f0SSean Hefty 	rep.starting_psn = id_priv->seq_num;
4541e51060f0SSean Hefty 	rep.private_data = conn_param->private_data;
4542e51060f0SSean Hefty 	rep.private_data_len = conn_param->private_data_len;
4543e51060f0SSean Hefty 	rep.responder_resources = conn_param->responder_resources;
4544e51060f0SSean Hefty 	rep.initiator_depth = conn_param->initiator_depth;
4545e51060f0SSean Hefty 	rep.failover_accepted = 0;
4546e51060f0SSean Hefty 	rep.flow_control = conn_param->flow_control;
45474ede178aSSean Hefty 	rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
4548e51060f0SSean Hefty 	rep.srq = id_priv->srq ? 1 : 0;
45490cb15372SLeon Romanovsky 	rep.ece.vendor_id = id_priv->ece.vendor_id;
45500cb15372SLeon Romanovsky 	rep.ece.attr_mod = id_priv->ece.attr_mod;
4551e51060f0SSean Hefty 
4552ed999f82SChuck Lever 	trace_cm_send_rep(id_priv);
45530fe313b0SSean Hefty 	ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
45540fe313b0SSean Hefty out:
45550fe313b0SSean Hefty 	return ret;
4556e51060f0SSean Hefty }
4557e51060f0SSean Hefty 
cma_accept_iw(struct rdma_id_private * id_priv,struct rdma_conn_param * conn_param)455807ebafbaSTom Tucker static int cma_accept_iw(struct rdma_id_private *id_priv,
455907ebafbaSTom Tucker 		  struct rdma_conn_param *conn_param)
456007ebafbaSTom Tucker {
456107ebafbaSTom Tucker 	struct iw_cm_conn_param iw_param;
456207ebafbaSTom Tucker 	int ret;
456307ebafbaSTom Tucker 
4564f2625f7dSSteve Wise 	if (!conn_param)
4565f2625f7dSSteve Wise 		return -EINVAL;
4566f2625f7dSSteve Wise 
45675851bb89SSean Hefty 	ret = cma_modify_qp_rtr(id_priv, conn_param);
456807ebafbaSTom Tucker 	if (ret)
456907ebafbaSTom Tucker 		return ret;
457007ebafbaSTom Tucker 
457107ebafbaSTom Tucker 	iw_param.ord = conn_param->initiator_depth;
457207ebafbaSTom Tucker 	iw_param.ird = conn_param->responder_resources;
457307ebafbaSTom Tucker 	iw_param.private_data = conn_param->private_data;
457407ebafbaSTom Tucker 	iw_param.private_data_len = conn_param->private_data_len;
4575b6eb7011SWenpeng Liang 	if (id_priv->id.qp)
457607ebafbaSTom Tucker 		iw_param.qpn = id_priv->qp_num;
4577b6eb7011SWenpeng Liang 	else
457807ebafbaSTom Tucker 		iw_param.qpn = conn_param->qp_num;
457907ebafbaSTom Tucker 
458007ebafbaSTom Tucker 	return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
458107ebafbaSTom Tucker }
458207ebafbaSTom Tucker 
cma_send_sidr_rep(struct rdma_id_private * id_priv,enum ib_cm_sidr_status status,u32 qkey,const void * private_data,int private_data_len)4583628e5f6dSSean Hefty static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
45845c438135SSean Hefty 			     enum ib_cm_sidr_status status, u32 qkey,
4585628e5f6dSSean Hefty 			     const void *private_data, int private_data_len)
4586628e5f6dSSean Hefty {
4587628e5f6dSSean Hefty 	struct ib_cm_sidr_rep_param rep;
4588d2ca39f2SYossi Etigin 	int ret;
4589628e5f6dSSean Hefty 
4590628e5f6dSSean Hefty 	memset(&rep, 0, sizeof rep);
4591628e5f6dSSean Hefty 	rep.status = status;
4592628e5f6dSSean Hefty 	if (status == IB_SIDR_SUCCESS) {
459358e84f6bSMark Zhang 		if (qkey)
45945c438135SSean Hefty 			ret = cma_set_qkey(id_priv, qkey);
459558e84f6bSMark Zhang 		else
459658e84f6bSMark Zhang 			ret = cma_set_default_qkey(id_priv);
4597d2ca39f2SYossi Etigin 		if (ret)
4598d2ca39f2SYossi Etigin 			return ret;
4599628e5f6dSSean Hefty 		rep.qp_num = id_priv->qp_num;
4600c8f6a362SSean Hefty 		rep.qkey = id_priv->qkey;
46010cb15372SLeon Romanovsky 
46020cb15372SLeon Romanovsky 		rep.ece.vendor_id = id_priv->ece.vendor_id;
46030cb15372SLeon Romanovsky 		rep.ece.attr_mod = id_priv->ece.attr_mod;
4604628e5f6dSSean Hefty 	}
46050cb15372SLeon Romanovsky 
4606628e5f6dSSean Hefty 	rep.private_data = private_data;
4607628e5f6dSSean Hefty 	rep.private_data_len = private_data_len;
4608628e5f6dSSean Hefty 
4609ed999f82SChuck Lever 	trace_cm_send_sidr_rep(id_priv);
4610628e5f6dSSean Hefty 	return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
4611628e5f6dSSean Hefty }
4612628e5f6dSSean Hefty 
4613b09c4d70SLeon Romanovsky /**
4614b09c4d70SLeon Romanovsky  * rdma_accept - Called to accept a connection request or response.
4615b09c4d70SLeon Romanovsky  * @id: Connection identifier associated with the request.
4616b09c4d70SLeon Romanovsky  * @conn_param: Information needed to establish the connection.  This must be
4617b09c4d70SLeon Romanovsky  *   provided if accepting a connection request.  If accepting a connection
4618b09c4d70SLeon Romanovsky  *   response, this parameter must be NULL.
4619b09c4d70SLeon Romanovsky  *
4620b09c4d70SLeon Romanovsky  * Typically, this routine is only called by the listener to accept a connection
4621b09c4d70SLeon Romanovsky  * request.  It must also be called on the active side of a connection if the
4622b09c4d70SLeon Romanovsky  * user is performing their own QP transitions.
4623b09c4d70SLeon Romanovsky  *
4624b09c4d70SLeon Romanovsky  * In the case of error, a reject message is sent to the remote side and the
4625b09c4d70SLeon Romanovsky  * state of the qp associated with the id is modified to error, such that any
4626b09c4d70SLeon Romanovsky  * previously posted receive buffers would be flushed.
4627b09c4d70SLeon Romanovsky  *
4628b09c4d70SLeon Romanovsky  * This function is for use by kernel ULPs and must be called from under the
4629b09c4d70SLeon Romanovsky  * handler callback.
4630b09c4d70SLeon Romanovsky  */
rdma_accept(struct rdma_cm_id * id,struct rdma_conn_param * conn_param)4631b09c4d70SLeon Romanovsky int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
4632e51060f0SSean Hefty {
4633d114c6feSJason Gunthorpe 	struct rdma_id_private *id_priv =
4634d114c6feSJason Gunthorpe 		container_of(id, struct rdma_id_private, id);
4635e51060f0SSean Hefty 	int ret;
4636e51060f0SSean Hefty 
4637d114c6feSJason Gunthorpe 	lockdep_assert_held(&id_priv->handler_mutex);
463883e9502dSNir Muchtar 
4639d114c6feSJason Gunthorpe 	if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
4640e51060f0SSean Hefty 		return -EINVAL;
4641e51060f0SSean Hefty 
4642e51060f0SSean Hefty 	if (!id->qp && conn_param) {
4643e51060f0SSean Hefty 		id_priv->qp_num = conn_param->qp_num;
4644e51060f0SSean Hefty 		id_priv->srq = conn_param->srq;
4645e51060f0SSean Hefty 	}
4646e51060f0SSean Hefty 
464772219ceaSMichael Wang 	if (rdma_cap_ib_cm(id->device, id->port_num)) {
4648f45ee80eSHefty, Sean 		if (id->qp_type == IB_QPT_UD) {
4649f45ee80eSHefty, Sean 			if (conn_param)
4650628e5f6dSSean Hefty 				ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
46515c438135SSean Hefty 							conn_param->qkey,
4652628e5f6dSSean Hefty 							conn_param->private_data,
4653628e5f6dSSean Hefty 							conn_param->private_data_len);
4654f45ee80eSHefty, Sean 			else
4655f45ee80eSHefty, Sean 				ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
46565c438135SSean Hefty 							0, NULL, 0);
4657f45ee80eSHefty, Sean 		} else {
4658f45ee80eSHefty, Sean 			if (conn_param)
4659e51060f0SSean Hefty 				ret = cma_accept_ib(id_priv, conn_param);
4660e51060f0SSean Hefty 			else
4661e51060f0SSean Hefty 				ret = cma_rep_recv(id_priv);
4662f45ee80eSHefty, Sean 		}
4663b6eb7011SWenpeng Liang 	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
466407ebafbaSTom Tucker 		ret = cma_accept_iw(id_priv, conn_param);
4665b6eb7011SWenpeng Liang 	} else {
4666e51060f0SSean Hefty 		ret = -ENOSYS;
4667b6eb7011SWenpeng Liang 	}
4668e51060f0SSean Hefty 	if (ret)
4669e51060f0SSean Hefty 		goto reject;
4670e51060f0SSean Hefty 
4671e51060f0SSean Hefty 	return 0;
4672e51060f0SSean Hefty reject:
4673c5483388SSean Hefty 	cma_modify_qp_err(id_priv);
46748094ba0aSLeon Romanovsky 	rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
4675e51060f0SSean Hefty 	return ret;
4676e51060f0SSean Hefty }
4677b09c4d70SLeon Romanovsky EXPORT_SYMBOL(rdma_accept);
4678e51060f0SSean Hefty 
rdma_accept_ece(struct rdma_cm_id * id,struct rdma_conn_param * conn_param,struct rdma_ucm_ece * ece)4679b09c4d70SLeon Romanovsky int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
4680b09c4d70SLeon Romanovsky 		    struct rdma_ucm_ece *ece)
46810cb15372SLeon Romanovsky {
46820cb15372SLeon Romanovsky 	struct rdma_id_private *id_priv =
46830cb15372SLeon Romanovsky 		container_of(id, struct rdma_id_private, id);
46840cb15372SLeon Romanovsky 
46850cb15372SLeon Romanovsky 	id_priv->ece.vendor_id = ece->vendor_id;
46860cb15372SLeon Romanovsky 	id_priv->ece.attr_mod = ece->attr_mod;
46870cb15372SLeon Romanovsky 
4688b09c4d70SLeon Romanovsky 	return rdma_accept(id, conn_param);
46890cb15372SLeon Romanovsky }
4690b09c4d70SLeon Romanovsky EXPORT_SYMBOL(rdma_accept_ece);
46910cb15372SLeon Romanovsky 
rdma_lock_handler(struct rdma_cm_id * id)4692d114c6feSJason Gunthorpe void rdma_lock_handler(struct rdma_cm_id *id)
4693d114c6feSJason Gunthorpe {
4694d114c6feSJason Gunthorpe 	struct rdma_id_private *id_priv =
4695d114c6feSJason Gunthorpe 		container_of(id, struct rdma_id_private, id);
4696d114c6feSJason Gunthorpe 
4697d114c6feSJason Gunthorpe 	mutex_lock(&id_priv->handler_mutex);
4698d114c6feSJason Gunthorpe }
4699d114c6feSJason Gunthorpe EXPORT_SYMBOL(rdma_lock_handler);
4700d114c6feSJason Gunthorpe 
rdma_unlock_handler(struct rdma_cm_id * id)4701d114c6feSJason Gunthorpe void rdma_unlock_handler(struct rdma_cm_id *id)
4702d114c6feSJason Gunthorpe {
4703d114c6feSJason Gunthorpe 	struct rdma_id_private *id_priv =
4704d114c6feSJason Gunthorpe 		container_of(id, struct rdma_id_private, id);
4705d114c6feSJason Gunthorpe 
4706d114c6feSJason Gunthorpe 	mutex_unlock(&id_priv->handler_mutex);
4707d114c6feSJason Gunthorpe }
4708d114c6feSJason Gunthorpe EXPORT_SYMBOL(rdma_unlock_handler);
47090fe313b0SSean Hefty 
rdma_notify(struct rdma_cm_id * id,enum ib_event_type event)47100fe313b0SSean Hefty int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
47110fe313b0SSean Hefty {
4712e51060f0SSean Hefty 	struct rdma_id_private *id_priv;
4713e51060f0SSean Hefty 	int ret;
4714e51060f0SSean Hefty 
4715e51060f0SSean Hefty 	id_priv = container_of(id, struct rdma_id_private, id);
4716e51060f0SSean Hefty 	if (!id_priv->cm_id.ib)
4717e51060f0SSean Hefty 		return -EINVAL;
4718e51060f0SSean Hefty 
47190c9361fcSJack Morgenstein 	switch (id->device->node_type) {
4720e51060f0SSean Hefty 	case RDMA_NODE_IB_CA:
4721e51060f0SSean Hefty 		ret = ib_cm_notify(id_priv->cm_id.ib, event);
472272219ceaSMichael Wang 		break;
4723ed999f82SChuck Lever 	default:
47245c438135SSean Hefty 		ret = 0;
4725e51060f0SSean Hefty 		break;
4726ed999f82SChuck Lever 	}
4727ed999f82SChuck Lever 	return ret;
4728628e5f6dSSean Hefty }
4729628e5f6dSSean Hefty EXPORT_SYMBOL(rdma_notify);
4730628e5f6dSSean Hefty 
rdma_reject(struct rdma_cm_id * id,const void * private_data,u8 private_data_len,u8 reason)4731ed999f82SChuck Lever int rdma_reject(struct rdma_cm_id *id, const void *private_data,
47328094ba0aSLeon Romanovsky 		u8 private_data_len, u8 reason)
4733e51060f0SSean Hefty {
4734e51060f0SSean Hefty 	struct rdma_id_private *id_priv;
473507ebafbaSTom Tucker 	int ret;
473607ebafbaSTom Tucker 
473707ebafbaSTom Tucker 	id_priv = container_of(id, struct rdma_id_private, id);
473807ebafbaSTom Tucker 	if (!id_priv->cm_id.ib)
4739e51060f0SSean Hefty 		return -EINVAL;
4740e51060f0SSean Hefty 
4741e51060f0SSean Hefty 	if (rdma_cap_ib_cm(id->device, id->port_num)) {
4742e51060f0SSean Hefty 		if (id->qp_type == IB_QPT_UD) {
4743e51060f0SSean Hefty 			ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
4744e51060f0SSean Hefty 						private_data, private_data_len);
4745e51060f0SSean Hefty 		} else {
4746e51060f0SSean Hefty 			trace_cm_send_rej(id_priv);
47478094ba0aSLeon Romanovsky 			ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0,
47488094ba0aSLeon Romanovsky 					     private_data, private_data_len);
4749e51060f0SSean Hefty 		}
475004215330SMichael Wang 	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4751e51060f0SSean Hefty 		ret = iw_cm_reject(id_priv->cm_id.iw,
4752e51060f0SSean Hefty 				   private_data, private_data_len);
4753b6eb7011SWenpeng Liang 	} else {
4754e51060f0SSean Hefty 		ret = -ENOSYS;
4755b6eb7011SWenpeng Liang 	}
475621655afcSMichael Wang 
4757e51060f0SSean Hefty 	return ret;
4758e51060f0SSean Hefty }
4759e51060f0SSean Hefty EXPORT_SYMBOL(rdma_reject);
4760e51060f0SSean Hefty 
rdma_disconnect(struct rdma_cm_id * id)4761e51060f0SSean Hefty int rdma_disconnect(struct rdma_cm_id *id)
4762e51060f0SSean Hefty {
4763e51060f0SSean Hefty 	struct rdma_id_private *id_priv;
4764e51060f0SSean Hefty 	int ret;
4765e51060f0SSean Hefty 
4766e51060f0SSean Hefty 	id_priv = container_of(id, struct rdma_id_private, id);
47670c9361fcSJack Morgenstein 	if (!id_priv->cm_id.ib)
4768e51060f0SSean Hefty 		return -EINVAL;
4769e51060f0SSean Hefty 
477072219ceaSMichael Wang 	if (rdma_cap_ib_cm(id->device, id->port_num)) {
4771c5483388SSean Hefty 		ret = cma_modify_qp_err(id_priv);
4772e51060f0SSean Hefty 		if (ret)
4773e51060f0SSean Hefty 			goto out;
4774e51060f0SSean Hefty 		/* Initiate or respond to a disconnect. */
4775ed999f82SChuck Lever 		trace_cm_disconnect(id_priv);
4776ed999f82SChuck Lever 		if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) {
4777ed999f82SChuck Lever 			if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0))
4778ed999f82SChuck Lever 				trace_cm_sent_drep(id_priv);
4779ed999f82SChuck Lever 		} else {
4780ed999f82SChuck Lever 			trace_cm_sent_dreq(id_priv);
4781ed999f82SChuck Lever 		}
478204215330SMichael Wang 	} else if (rdma_cap_iw_cm(id->device, id->port_num)) {
478307ebafbaSTom Tucker 		ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
478421655afcSMichael Wang 	} else
478507ebafbaSTom Tucker 		ret = -EINVAL;
478621655afcSMichael Wang 
4787e51060f0SSean Hefty out:
4788e51060f0SSean Hefty 	return ret;
4789e51060f0SSean Hefty }
4790e51060f0SSean Hefty EXPORT_SYMBOL(rdma_disconnect);
4791e51060f0SSean Hefty 
cma_make_mc_event(int status,struct rdma_id_private * id_priv,struct ib_sa_multicast * multicast,struct rdma_cm_event * event,struct cma_multicast * mc)4792b5de0c60SJason Gunthorpe static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
4793b5de0c60SJason Gunthorpe 			      struct ib_sa_multicast *multicast,
4794b5de0c60SJason Gunthorpe 			      struct rdma_cm_event *event,
4795b5de0c60SJason Gunthorpe 			      struct cma_multicast *mc)
4796c8f6a362SSean Hefty {
4797b5de0c60SJason Gunthorpe 	struct rdma_dev_addr *dev_addr;
4798b5de0c60SJason Gunthorpe 	enum ib_gid_type gid_type;
4799b5de0c60SJason Gunthorpe 	struct net_device *ndev;
4800c8f6a362SSean Hefty 
480158e84f6bSMark Zhang 	if (status)
4802498683c6SMoni Shoua 		pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
4803498683c6SMoni Shoua 				     status);
4804bee3c3c9SMoni Shoua 
4805b5de0c60SJason Gunthorpe 	event->status = status;
4806b5de0c60SJason Gunthorpe 	event->param.ud.private_data = mc->context;
4807b5de0c60SJason Gunthorpe 	if (status) {
4808b5de0c60SJason Gunthorpe 		event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
4809b5de0c60SJason Gunthorpe 		return;
4810c8f6a362SSean Hefty 	}
48115c438135SSean Hefty 
4812b5de0c60SJason Gunthorpe 	dev_addr = &id_priv->id.route.addr.dev_addr;
4813b5de0c60SJason Gunthorpe 	ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
4814b5de0c60SJason Gunthorpe 	gid_type =
4815b5de0c60SJason Gunthorpe 		id_priv->cma_dev
4816b5de0c60SJason Gunthorpe 			->default_gid_type[id_priv->id.port_num -
4817b5de0c60SJason Gunthorpe 					   rdma_start_port(
4818b5de0c60SJason Gunthorpe 						   id_priv->cma_dev->device)];
4819c8f6a362SSean Hefty 
4820b5de0c60SJason Gunthorpe 	event->event = RDMA_CM_EVENT_MULTICAST_JOIN;
4821b5de0c60SJason Gunthorpe 	if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num,
4822b5de0c60SJason Gunthorpe 				     &multicast->rec, ndev, gid_type,
4823b5de0c60SJason Gunthorpe 				     &event->param.ud.ah_attr)) {
4824b5de0c60SJason Gunthorpe 		event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
4825b5de0c60SJason Gunthorpe 		goto out;
4826b5de0c60SJason Gunthorpe 	}
48276d337179SParav Pandit 
4828b5de0c60SJason Gunthorpe 	event->param.ud.qp_num = 0xFFFFFF;
482958e84f6bSMark Zhang 	event->param.ud.qkey = id_priv->qkey;
4830b5de0c60SJason Gunthorpe 
4831b5de0c60SJason Gunthorpe out:
4832bee3c3c9SMoni Shoua 	dev_put(ndev);
4833b5de0c60SJason Gunthorpe }
4834c8f6a362SSean Hefty 
cma_ib_mc_handler(int status,struct ib_sa_multicast * multicast)4835b5de0c60SJason Gunthorpe static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
4836b5de0c60SJason Gunthorpe {
4837b5de0c60SJason Gunthorpe 	struct cma_multicast *mc = multicast->context;
4838b5de0c60SJason Gunthorpe 	struct rdma_id_private *id_priv = mc->id_priv;
4839b5de0c60SJason Gunthorpe 	struct rdma_cm_event event = {};
4840b5de0c60SJason Gunthorpe 	int ret = 0;
4841b5de0c60SJason Gunthorpe 
4842b5de0c60SJason Gunthorpe 	mutex_lock(&id_priv->handler_mutex);
4843b5de0c60SJason Gunthorpe 	if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL ||
4844b5de0c60SJason Gunthorpe 	    READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
4845b5de0c60SJason Gunthorpe 		goto out;
4846b5de0c60SJason Gunthorpe 
484758e84f6bSMark Zhang 	ret = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
484858e84f6bSMark Zhang 	if (!ret) {
4849b5de0c60SJason Gunthorpe 		cma_make_mc_event(status, id_priv, multicast, &event, mc);
4850ed999f82SChuck Lever 		ret = cma_cm_event_handler(id_priv, &event);
485158e84f6bSMark Zhang 	}
4852f685c195SParav Pandit 	rdma_destroy_ah_attr(&event.param.ud.ah_attr);
4853fe454dc3SAvihai Horon 	WARN_ON(ret);
48548aa08602SSean Hefty 
485537e07cdaSBart Van Assche out:
4856de910bd9SOr Gerlitz 	mutex_unlock(&id_priv->handler_mutex);
4857c8f6a362SSean Hefty 	return 0;
4858c8f6a362SSean Hefty }
4859c8f6a362SSean Hefty 
cma_set_mgid(struct rdma_id_private * id_priv,struct sockaddr * addr,union ib_gid * mgid)4860c8f6a362SSean Hefty static void cma_set_mgid(struct rdma_id_private *id_priv,
4861c8f6a362SSean Hefty 			 struct sockaddr *addr, union ib_gid *mgid)
4862c8f6a362SSean Hefty {
4863c8f6a362SSean Hefty 	unsigned char mc_map[MAX_ADDR_LEN];
4864c8f6a362SSean Hefty 	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4865c8f6a362SSean Hefty 	struct sockaddr_in *sin = (struct sockaddr_in *) addr;
4866c8f6a362SSean Hefty 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
4867c8f6a362SSean Hefty 
4868c8f6a362SSean Hefty 	if (cma_any_addr(addr)) {
4869c8f6a362SSean Hefty 		memset(mgid, 0, sizeof *mgid);
4870c8f6a362SSean Hefty 	} else if ((addr->sa_family == AF_INET6) &&
48711c9b2819SJason Gunthorpe 		   ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
4872c8f6a362SSean Hefty 								 0xFF10A01B)) {
4873c8f6a362SSean Hefty 		/* IPv6 address is an SA assigned MGID. */
4874c8f6a362SSean Hefty 		memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
48755bc2b7b3SSean Hefty 	} else if (addr->sa_family == AF_IB) {
48765bc2b7b3SSean Hefty 		memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
4877076dd53bSVarsha Rao 	} else if (addr->sa_family == AF_INET6) {
4878e2e62697SJason Gunthorpe 		ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
4879e2e62697SJason Gunthorpe 		if (id_priv->id.ps == RDMA_PS_UDP)
4880e2e62697SJason Gunthorpe 			mc_map[7] = 0x01;	/* Use RDMA CM signature */
4881e2e62697SJason Gunthorpe 		*mgid = *(union ib_gid *) (mc_map + 4);
4882c8f6a362SSean Hefty 	} else {
4883a9e527e3SRolf Manderscheid 		ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
4884c8f6a362SSean Hefty 		if (id_priv->id.ps == RDMA_PS_UDP)
4885c8f6a362SSean Hefty 			mc_map[7] = 0x01;	/* Use RDMA CM signature */
4886c8f6a362SSean Hefty 		*mgid = *(union ib_gid *) (mc_map + 4);
4887c8f6a362SSean Hefty 	}
4888c8f6a362SSean Hefty }
4889c8f6a362SSean Hefty 
cma_join_ib_multicast(struct rdma_id_private * id_priv,struct cma_multicast * mc)4890c8f6a362SSean Hefty static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
4891c8f6a362SSean Hefty 				 struct cma_multicast *mc)
4892c8f6a362SSean Hefty {
4893c8f6a362SSean Hefty 	struct ib_sa_mcmember_rec rec;
4894c8f6a362SSean Hefty 	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4895c8f6a362SSean Hefty 	ib_sa_comp_mask comp_mask;
4896c8f6a362SSean Hefty 	int ret;
4897c8f6a362SSean Hefty 
4898c8f6a362SSean Hefty 	ib_addr_get_mgid(dev_addr, &rec.mgid);
4899c8f6a362SSean Hefty 	ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
4900c8f6a362SSean Hefty 				     &rec.mgid, &rec);
4901c8f6a362SSean Hefty 	if (ret)
4902c8f6a362SSean Hefty 		return ret;
4903c8f6a362SSean Hefty 
490458e84f6bSMark Zhang 	if (!id_priv->qkey) {
490558e84f6bSMark Zhang 		ret = cma_set_default_qkey(id_priv);
49065bc2b7b3SSean Hefty 		if (ret)
49075bc2b7b3SSean Hefty 			return ret;
490858e84f6bSMark Zhang 	}
49095bc2b7b3SSean Hefty 
49103f446754SRoland Dreier 	cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
49115bc2b7b3SSean Hefty 	rec.qkey = cpu_to_be32(id_priv->qkey);
49126f8372b6SSean Hefty 	rdma_addr_get_sgid(dev_addr, &rec.port_gid);
4913c8f6a362SSean Hefty 	rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
4914ab15c95aSAlex Vesker 	rec.join_state = mc->join_state;
4915ab15c95aSAlex Vesker 
4916c8f6a362SSean Hefty 	comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
4917c8f6a362SSean Hefty 		    IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
4918c8f6a362SSean Hefty 		    IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
4919c8f6a362SSean Hefty 		    IB_SA_MCMEMBER_REC_FLOW_LABEL |
4920c8f6a362SSean Hefty 		    IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
4921c8f6a362SSean Hefty 
492284adeee9SYossi Etigin 	if (id_priv->id.ps == RDMA_PS_IPOIB)
492384adeee9SYossi Etigin 		comp_mask |= IB_SA_MCMEMBER_REC_RATE |
49242a22fb8cSDotan Barak 			     IB_SA_MCMEMBER_REC_RATE_SELECTOR |
49252a22fb8cSDotan Barak 			     IB_SA_MCMEMBER_REC_MTU_SELECTOR |
49262a22fb8cSDotan Barak 			     IB_SA_MCMEMBER_REC_MTU |
49272a22fb8cSDotan Barak 			     IB_SA_MCMEMBER_REC_HOP_LIMIT;
492884adeee9SYossi Etigin 
4929b5de0c60SJason Gunthorpe 	mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device,
4930b5de0c60SJason Gunthorpe 					 id_priv->id.port_num, &rec, comp_mask,
4931b5de0c60SJason Gunthorpe 					 GFP_KERNEL, cma_ib_mc_handler, mc);
4932b5de0c60SJason Gunthorpe 	return PTR_ERR_OR_ZERO(mc->sa_mc);
49333c86aa70SEli Cohen }
49343c86aa70SEli Cohen 
cma_iboe_set_mgid(struct sockaddr * addr,union ib_gid * mgid,enum ib_gid_type gid_type)4935be1d325aSNoa Osherovich static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
4936be1d325aSNoa Osherovich 			      enum ib_gid_type gid_type)
49373c86aa70SEli Cohen {
49383c86aa70SEli Cohen 	struct sockaddr_in *sin = (struct sockaddr_in *)addr;
49393c86aa70SEli Cohen 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
49403c86aa70SEli Cohen 
49413c86aa70SEli Cohen 	if (cma_any_addr(addr)) {
49423c86aa70SEli Cohen 		memset(mgid, 0, sizeof *mgid);
49433c86aa70SEli Cohen 	} else if (addr->sa_family == AF_INET6) {
49443c86aa70SEli Cohen 		memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
49453c86aa70SEli Cohen 	} else {
49465c181bdaSParav Pandit 		mgid->raw[0] =
49475c181bdaSParav Pandit 			(gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff;
49485c181bdaSParav Pandit 		mgid->raw[1] =
49495c181bdaSParav Pandit 			(gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e;
49503c86aa70SEli Cohen 		mgid->raw[2] = 0;
49513c86aa70SEli Cohen 		mgid->raw[3] = 0;
49523c86aa70SEli Cohen 		mgid->raw[4] = 0;
49533c86aa70SEli Cohen 		mgid->raw[5] = 0;
49543c86aa70SEli Cohen 		mgid->raw[6] = 0;
49553c86aa70SEli Cohen 		mgid->raw[7] = 0;
49563c86aa70SEli Cohen 		mgid->raw[8] = 0;
49573c86aa70SEli Cohen 		mgid->raw[9] = 0;
49583c86aa70SEli Cohen 		mgid->raw[10] = 0xff;
49593c86aa70SEli Cohen 		mgid->raw[11] = 0xff;
49603c86aa70SEli Cohen 		*(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
49613c86aa70SEli Cohen 	}
49623c86aa70SEli Cohen }
49633c86aa70SEli Cohen 
cma_iboe_join_multicast(struct rdma_id_private * id_priv,struct cma_multicast * mc)49643c86aa70SEli Cohen static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
49653c86aa70SEli Cohen 				   struct cma_multicast *mc)
49663c86aa70SEli Cohen {
49673c86aa70SEli Cohen 	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4968bee3c3c9SMoni Shoua 	int err = 0;
49693c86aa70SEli Cohen 	struct sockaddr *addr = (struct sockaddr *)&mc->addr;
49703c86aa70SEli Cohen 	struct net_device *ndev = NULL;
4971*e0fe97efSMark Zhang 	struct ib_sa_multicast ib = {};
4972bee3c3c9SMoni Shoua 	enum ib_gid_type gid_type;
4973ab15c95aSAlex Vesker 	bool send_only;
4974ab15c95aSAlex Vesker 
4975ab15c95aSAlex Vesker 	send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
49763c86aa70SEli Cohen 
4977b5de0c60SJason Gunthorpe 	if (cma_zero_addr(addr))
49783c86aa70SEli Cohen 		return -EINVAL;
49793c86aa70SEli Cohen 
4980be1d325aSNoa Osherovich 	gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
4981be1d325aSNoa Osherovich 		   rdma_start_port(id_priv->cma_dev->device)];
4982b5de0c60SJason Gunthorpe 	cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
49833c86aa70SEli Cohen 
4984b5de0c60SJason Gunthorpe 	ib.rec.pkey = cpu_to_be16(0xffff);
49853c86aa70SEli Cohen 	if (dev_addr->bound_dev_if)
4986052eac6eSParav Pandit 		ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
4987fe454dc3SAvihai Horon 	if (!ndev)
4988fe454dc3SAvihai Horon 		return -ENODEV;
4989fe454dc3SAvihai Horon 
499058030c76SMark Zhang 	ib.rec.rate = IB_RATE_PORT_CURRENT;
4991b5de0c60SJason Gunthorpe 	ib.rec.hop_limit = 1;
4992b5de0c60SJason Gunthorpe 	ib.rec.mtu = iboe_get_mtu(ndev->mtu);
4993bee3c3c9SMoni Shoua 
4994bee3c3c9SMoni Shoua 	if (addr->sa_family == AF_INET) {
4995c65f6c5aSAlex Vesker 		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
4996b5de0c60SJason Gunthorpe 			ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
4997ab15c95aSAlex Vesker 			if (!send_only) {
4998b5de0c60SJason Gunthorpe 				err = cma_igmp_send(ndev, &ib.rec.mgid,
4999bee3c3c9SMoni Shoua 						    true);
5000bee3c3c9SMoni Shoua 			}
5001bee3c3c9SMoni Shoua 		}
5002bee3c3c9SMoni Shoua 	} else {
5003bee3c3c9SMoni Shoua 		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
5004bee3c3c9SMoni Shoua 			err = -ENOTSUPP;
5005bee3c3c9SMoni Shoua 	}
50063c86aa70SEli Cohen 	dev_put(ndev);
5007fe454dc3SAvihai Horon 	if (err || !ib.rec.mtu)
5008fe454dc3SAvihai Horon 		return err ?: -EINVAL;
5009fe454dc3SAvihai Horon 
501058e84f6bSMark Zhang 	if (!id_priv->qkey)
501158e84f6bSMark Zhang 		cma_set_default_qkey(id_priv);
501258e84f6bSMark Zhang 
50137b85627bSMoni Shoua 	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
5014b5de0c60SJason Gunthorpe 		    &ib.rec.port_gid);
5015fe454dc3SAvihai Horon 	INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
5016fe454dc3SAvihai Horon 	cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc);
5017fe454dc3SAvihai Horon 	queue_work(cma_wq, &mc->iboe_join.work);
50183c86aa70SEli Cohen 	return 0;
50193c86aa70SEli Cohen }
50203c86aa70SEli Cohen 
rdma_join_multicast(struct rdma_cm_id * id,struct sockaddr * addr,u8 join_state,void * context)5021c8f6a362SSean Hefty int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
5022ab15c95aSAlex Vesker 			u8 join_state, void *context)
5023c8f6a362SSean Hefty {
50245cfbf929SJason Gunthorpe 	struct rdma_id_private *id_priv =
50255cfbf929SJason Gunthorpe 		container_of(id, struct rdma_id_private, id);
5026c8f6a362SSean Hefty 	struct cma_multicast *mc;
5027c8f6a362SSean Hefty 	int ret;
5028c8f6a362SSean Hefty 
50291bb5091dSJason Gunthorpe 	/* Not supported for kernel QPs */
50301bb5091dSJason Gunthorpe 	if (WARN_ON(id->qp))
50317688f2c3SLeon Romanovsky 		return -EINVAL;
50327688f2c3SLeon Romanovsky 
50335cfbf929SJason Gunthorpe 	/* ULP is calling this wrong. */
50345cfbf929SJason Gunthorpe 	if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND &&
50355cfbf929SJason Gunthorpe 			    READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
5036c8f6a362SSean Hefty 		return -EINVAL;
5037c8f6a362SSean Hefty 
503858e84f6bSMark Zhang 	if (id_priv->id.qp_type != IB_QPT_UD)
503958e84f6bSMark Zhang 		return -EINVAL;
504058e84f6bSMark Zhang 
5041b5de0c60SJason Gunthorpe 	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
5042c8f6a362SSean Hefty 	if (!mc)
5043c8f6a362SSean Hefty 		return -ENOMEM;
5044c8f6a362SSean Hefty 
5045ef560861SSean Hefty 	memcpy(&mc->addr, addr, rdma_addr_size(addr));
5046c8f6a362SSean Hefty 	mc->context = context;
5047c8f6a362SSean Hefty 	mc->id_priv = id_priv;
5048ab15c95aSAlex Vesker 	mc->join_state = join_state;
5049c8f6a362SSean Hefty 
50505d9fb044SIra Weiny 	if (rdma_protocol_roce(id->device, id->port_num)) {
50513c86aa70SEli Cohen 		ret = cma_iboe_join_multicast(id_priv, mc);
5052c0126915SJason Gunthorpe 		if (ret)
5053c0126915SJason Gunthorpe 			goto out_err;
5054c0126915SJason Gunthorpe 	} else if (rdma_cap_ib_mcast(id->device, id->port_num)) {
50555c9a5282SMichael Wang 		ret = cma_join_ib_multicast(id_priv, mc);
5056c0126915SJason Gunthorpe 		if (ret)
5057c0126915SJason Gunthorpe 			goto out_err;
5058c0126915SJason Gunthorpe 	} else {
5059c8f6a362SSean Hefty 		ret = -ENOSYS;
5060c0126915SJason Gunthorpe 		goto out_err;
5061c8f6a362SSean Hefty 	}
5062c0126915SJason Gunthorpe 
5063c0126915SJason Gunthorpe 	spin_lock(&id_priv->lock);
5064c0126915SJason Gunthorpe 	list_add(&mc->list, &id_priv->mc_list);
5065c0126915SJason Gunthorpe 	spin_unlock(&id_priv->lock);
5066c0126915SJason Gunthorpe 
5067c0126915SJason Gunthorpe 	return 0;
5068c0126915SJason Gunthorpe out_err:
5069c0126915SJason Gunthorpe 	kfree(mc);
5070c8f6a362SSean Hefty 	return ret;
5071c8f6a362SSean Hefty }
5072c8f6a362SSean Hefty EXPORT_SYMBOL(rdma_join_multicast);
5073c8f6a362SSean Hefty 
rdma_leave_multicast(struct rdma_cm_id * id,struct sockaddr * addr)5074c8f6a362SSean Hefty void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
5075c8f6a362SSean Hefty {
5076c8f6a362SSean Hefty 	struct rdma_id_private *id_priv;
5077c8f6a362SSean Hefty 	struct cma_multicast *mc;
5078c8f6a362SSean Hefty 
5079c8f6a362SSean Hefty 	id_priv = container_of(id, struct rdma_id_private, id);
5080c8f6a362SSean Hefty 	spin_lock_irq(&id_priv->lock);
5081c8f6a362SSean Hefty 	list_for_each_entry(mc, &id_priv->mc_list, list) {
50823788d299SJason Gunthorpe 		if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
50833788d299SJason Gunthorpe 			continue;
5084c8f6a362SSean Hefty 		list_del(&mc->list);
5085c8f6a362SSean Hefty 		spin_unlock_irq(&id_priv->lock);
5086c8f6a362SSean Hefty 
50873788d299SJason Gunthorpe 		WARN_ON(id_priv->cma_dev->device != id->device);
50883788d299SJason Gunthorpe 		destroy_mc(id_priv, mc);
5089c8f6a362SSean Hefty 		return;
5090c8f6a362SSean Hefty 	}
5091c8f6a362SSean Hefty 	spin_unlock_irq(&id_priv->lock);
5092c8f6a362SSean Hefty }
5093c8f6a362SSean Hefty EXPORT_SYMBOL(rdma_leave_multicast);
5094c8f6a362SSean Hefty 
cma_netdev_change(struct net_device * ndev,struct rdma_id_private * id_priv)5095dd5bdff8SOr Gerlitz static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
5096dd5bdff8SOr Gerlitz {
5097dd5bdff8SOr Gerlitz 	struct rdma_dev_addr *dev_addr;
50987e85bcdaSJason Gunthorpe 	struct cma_work *work;
5099dd5bdff8SOr Gerlitz 
5100dd5bdff8SOr Gerlitz 	dev_addr = &id_priv->id.route.addr.dev_addr;
5101dd5bdff8SOr Gerlitz 
51026266ed6eSSean Hefty 	if ((dev_addr->bound_dev_if == ndev->ifindex) &&
5103fa20105eSGuy Shapiro 	    (net_eq(dev_net(ndev), dev_addr->net)) &&
5104dd5bdff8SOr Gerlitz 	    memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
5105aba25a3eSParav Pandit 		pr_info("RDMA CM addr change for ndev %s used by id %p\n",
5106dd5bdff8SOr Gerlitz 			ndev->name, &id_priv->id);
5107dd5bdff8SOr Gerlitz 		work = kzalloc(sizeof *work, GFP_KERNEL);
5108dd5bdff8SOr Gerlitz 		if (!work)
5109dd5bdff8SOr Gerlitz 			return -ENOMEM;
5110dd5bdff8SOr Gerlitz 
51117e85bcdaSJason Gunthorpe 		INIT_WORK(&work->work, cma_work_handler);
5112dd5bdff8SOr Gerlitz 		work->id = id_priv;
5113dd5bdff8SOr Gerlitz 		work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
5114e368d23fSParav Pandit 		cma_id_get(id_priv);
5115dd5bdff8SOr Gerlitz 		queue_work(cma_wq, &work->work);
5116dd5bdff8SOr Gerlitz 	}
5117dd5bdff8SOr Gerlitz 
5118dd5bdff8SOr Gerlitz 	return 0;
5119dd5bdff8SOr Gerlitz }
5120dd5bdff8SOr Gerlitz 
cma_netdev_callback(struct notifier_block * self,unsigned long event,void * ptr)5121dd5bdff8SOr Gerlitz static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
5122351638e7SJiri Pirko 			       void *ptr)
5123dd5bdff8SOr Gerlitz {
5124351638e7SJiri Pirko 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
5125dd5bdff8SOr Gerlitz 	struct cma_device *cma_dev;
5126dd5bdff8SOr Gerlitz 	struct rdma_id_private *id_priv;
5127dd5bdff8SOr Gerlitz 	int ret = NOTIFY_DONE;
5128dd5bdff8SOr Gerlitz 
5129dd5bdff8SOr Gerlitz 	if (event != NETDEV_BONDING_FAILOVER)
5130dd5bdff8SOr Gerlitz 		return NOTIFY_DONE;
5131dd5bdff8SOr Gerlitz 
51323cd96fddSParav Pandit 	if (!netif_is_bond_master(ndev))
5133dd5bdff8SOr Gerlitz 		return NOTIFY_DONE;
5134dd5bdff8SOr Gerlitz 
5135dd5bdff8SOr Gerlitz 	mutex_lock(&lock);
5136dd5bdff8SOr Gerlitz 	list_for_each_entry(cma_dev, &dev_list, list)
513799cfddb8SJason Gunthorpe 		list_for_each_entry(id_priv, &cma_dev->id_list, device_item) {
5138dd5bdff8SOr Gerlitz 			ret = cma_netdev_change(ndev, id_priv);
5139dd5bdff8SOr Gerlitz 			if (ret)
5140dd5bdff8SOr Gerlitz 				goto out;
5141dd5bdff8SOr Gerlitz 		}
5142dd5bdff8SOr Gerlitz 
5143dd5bdff8SOr Gerlitz out:
5144dd5bdff8SOr Gerlitz 	mutex_unlock(&lock);
5145dd5bdff8SOr Gerlitz 	return ret;
5146dd5bdff8SOr Gerlitz }
5147dd5bdff8SOr Gerlitz 
cma_netevent_work_handler(struct work_struct * _work)5148925d046eSPatrisious Haddad static void cma_netevent_work_handler(struct work_struct *_work)
5149925d046eSPatrisious Haddad {
5150925d046eSPatrisious Haddad 	struct rdma_id_private *id_priv =
5151925d046eSPatrisious Haddad 		container_of(_work, struct rdma_id_private, id.net_work);
5152925d046eSPatrisious Haddad 	struct rdma_cm_event event = {};
5153925d046eSPatrisious Haddad 
5154925d046eSPatrisious Haddad 	mutex_lock(&id_priv->handler_mutex);
5155925d046eSPatrisious Haddad 
5156925d046eSPatrisious Haddad 	if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
5157925d046eSPatrisious Haddad 	    READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
5158925d046eSPatrisious Haddad 		goto out_unlock;
5159925d046eSPatrisious Haddad 
5160925d046eSPatrisious Haddad 	event.event = RDMA_CM_EVENT_UNREACHABLE;
5161925d046eSPatrisious Haddad 	event.status = -ETIMEDOUT;
5162925d046eSPatrisious Haddad 
5163925d046eSPatrisious Haddad 	if (cma_cm_event_handler(id_priv, &event)) {
5164925d046eSPatrisious Haddad 		__acquire(&id_priv->handler_mutex);
5165925d046eSPatrisious Haddad 		id_priv->cm_id.ib = NULL;
5166925d046eSPatrisious Haddad 		cma_id_put(id_priv);
5167925d046eSPatrisious Haddad 		destroy_id_handler_unlock(id_priv);
5168925d046eSPatrisious Haddad 		return;
5169925d046eSPatrisious Haddad 	}
5170925d046eSPatrisious Haddad 
5171925d046eSPatrisious Haddad out_unlock:
5172925d046eSPatrisious Haddad 	mutex_unlock(&id_priv->handler_mutex);
5173925d046eSPatrisious Haddad 	cma_id_put(id_priv);
5174925d046eSPatrisious Haddad }
5175925d046eSPatrisious Haddad 
cma_netevent_callback(struct notifier_block * self,unsigned long event,void * ctx)5176925d046eSPatrisious Haddad static int cma_netevent_callback(struct notifier_block *self,
5177925d046eSPatrisious Haddad 				 unsigned long event, void *ctx)
5178925d046eSPatrisious Haddad {
5179925d046eSPatrisious Haddad 	struct id_table_entry *ips_node = NULL;
5180925d046eSPatrisious Haddad 	struct rdma_id_private *current_id;
5181925d046eSPatrisious Haddad 	struct neighbour *neigh = ctx;
5182925d046eSPatrisious Haddad 	unsigned long flags;
5183925d046eSPatrisious Haddad 
5184925d046eSPatrisious Haddad 	if (event != NETEVENT_NEIGH_UPDATE)
5185925d046eSPatrisious Haddad 		return NOTIFY_DONE;
5186925d046eSPatrisious Haddad 
5187925d046eSPatrisious Haddad 	spin_lock_irqsave(&id_table_lock, flags);
5188925d046eSPatrisious Haddad 	if (neigh->tbl->family == AF_INET6) {
5189925d046eSPatrisious Haddad 		struct sockaddr_in6 neigh_sock_6;
5190925d046eSPatrisious Haddad 
5191925d046eSPatrisious Haddad 		neigh_sock_6.sin6_family = AF_INET6;
5192925d046eSPatrisious Haddad 		neigh_sock_6.sin6_addr = *(struct in6_addr *)neigh->primary_key;
5193925d046eSPatrisious Haddad 		ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
5194925d046eSPatrisious Haddad 					     (struct sockaddr *)&neigh_sock_6);
5195925d046eSPatrisious Haddad 	} else if (neigh->tbl->family == AF_INET) {
5196925d046eSPatrisious Haddad 		struct sockaddr_in neigh_sock_4;
5197925d046eSPatrisious Haddad 
5198925d046eSPatrisious Haddad 		neigh_sock_4.sin_family = AF_INET;
5199925d046eSPatrisious Haddad 		neigh_sock_4.sin_addr.s_addr = *(__be32 *)(neigh->primary_key);
5200925d046eSPatrisious Haddad 		ips_node = node_from_ndev_ip(&id_table, neigh->dev->ifindex,
5201925d046eSPatrisious Haddad 					     (struct sockaddr *)&neigh_sock_4);
5202925d046eSPatrisious Haddad 	} else
5203925d046eSPatrisious Haddad 		goto out;
5204925d046eSPatrisious Haddad 
5205925d046eSPatrisious Haddad 	if (!ips_node)
5206925d046eSPatrisious Haddad 		goto out;
5207925d046eSPatrisious Haddad 
5208925d046eSPatrisious Haddad 	list_for_each_entry(current_id, &ips_node->id_list, id_list_entry) {
5209925d046eSPatrisious Haddad 		if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr,
5210925d046eSPatrisious Haddad 			   neigh->ha, ETH_ALEN))
5211925d046eSPatrisious Haddad 			continue;
5212925d046eSPatrisious Haddad 		INIT_WORK(&current_id->id.net_work, cma_netevent_work_handler);
5213925d046eSPatrisious Haddad 		cma_id_get(current_id);
5214925d046eSPatrisious Haddad 		queue_work(cma_wq, &current_id->id.net_work);
5215925d046eSPatrisious Haddad 	}
5216925d046eSPatrisious Haddad out:
5217925d046eSPatrisious Haddad 	spin_unlock_irqrestore(&id_table_lock, flags);
5218925d046eSPatrisious Haddad 	return NOTIFY_DONE;
5219925d046eSPatrisious Haddad }
5220925d046eSPatrisious Haddad 
5221dd5bdff8SOr Gerlitz static struct notifier_block cma_nb = {
5222dd5bdff8SOr Gerlitz 	.notifier_call = cma_netdev_callback
5223dd5bdff8SOr Gerlitz };
5224dd5bdff8SOr Gerlitz 
5225925d046eSPatrisious Haddad static struct notifier_block cma_netevent_cb = {
5226925d046eSPatrisious Haddad 	.notifier_call = cma_netevent_callback
5227925d046eSPatrisious Haddad };
5228925d046eSPatrisious Haddad 
cma_send_device_removal_put(struct rdma_id_private * id_priv)52293647a28dSJason Gunthorpe static void cma_send_device_removal_put(struct rdma_id_private *id_priv)
5230e51060f0SSean Hefty {
52313647a28dSJason Gunthorpe 	struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL };
5232550e5ca7SNir Muchtar 	enum rdma_cm_state state;
52333647a28dSJason Gunthorpe 	unsigned long flags;
5234e51060f0SSean Hefty 
5235de910bd9SOr Gerlitz 	mutex_lock(&id_priv->handler_mutex);
52363647a28dSJason Gunthorpe 	/* Record that we want to remove the device */
52373647a28dSJason Gunthorpe 	spin_lock_irqsave(&id_priv->lock, flags);
52383647a28dSJason Gunthorpe 	state = id_priv->state;
52393647a28dSJason Gunthorpe 	if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) {
52403647a28dSJason Gunthorpe 		spin_unlock_irqrestore(&id_priv->lock, flags);
5241de910bd9SOr Gerlitz 		mutex_unlock(&id_priv->handler_mutex);
52423647a28dSJason Gunthorpe 		cma_id_put(id_priv);
52433647a28dSJason Gunthorpe 		return;
52443647a28dSJason Gunthorpe 	}
52453647a28dSJason Gunthorpe 	id_priv->state = RDMA_CM_DEVICE_REMOVAL;
52463647a28dSJason Gunthorpe 	spin_unlock_irqrestore(&id_priv->lock, flags);
52473647a28dSJason Gunthorpe 
52483647a28dSJason Gunthorpe 	if (cma_cm_event_handler(id_priv, &event)) {
52493647a28dSJason Gunthorpe 		/*
52503647a28dSJason Gunthorpe 		 * At this point the ULP promises it won't call
52513647a28dSJason Gunthorpe 		 * rdma_destroy_id() concurrently
52523647a28dSJason Gunthorpe 		 */
52533647a28dSJason Gunthorpe 		cma_id_put(id_priv);
52543647a28dSJason Gunthorpe 		mutex_unlock(&id_priv->handler_mutex);
5255f6a9d47aSJason Gunthorpe 		trace_cm_id_destroy(id_priv);
5256f6a9d47aSJason Gunthorpe 		_destroy_id(id_priv, state);
52573647a28dSJason Gunthorpe 		return;
52583647a28dSJason Gunthorpe 	}
52593647a28dSJason Gunthorpe 	mutex_unlock(&id_priv->handler_mutex);
52603647a28dSJason Gunthorpe 
52613647a28dSJason Gunthorpe 	/*
52623647a28dSJason Gunthorpe 	 * If this races with destroy then the thread that first assigns state
52633647a28dSJason Gunthorpe 	 * to a destroying does the cancel.
52643647a28dSJason Gunthorpe 	 */
52653647a28dSJason Gunthorpe 	cma_cancel_operation(id_priv, state);
52663647a28dSJason Gunthorpe 	cma_id_put(id_priv);
5267e51060f0SSean Hefty }
5268e51060f0SSean Hefty 
cma_process_remove(struct cma_device * cma_dev)5269e51060f0SSean Hefty static void cma_process_remove(struct cma_device *cma_dev)
5270e51060f0SSean Hefty {
5271e51060f0SSean Hefty 	mutex_lock(&lock);
5272e51060f0SSean Hefty 	while (!list_empty(&cma_dev->id_list)) {
52733647a28dSJason Gunthorpe 		struct rdma_id_private *id_priv = list_first_entry(
527499cfddb8SJason Gunthorpe 			&cma_dev->id_list, struct rdma_id_private, device_item);
5275e51060f0SSean Hefty 
527699cfddb8SJason Gunthorpe 		list_del_init(&id_priv->listen_item);
527799cfddb8SJason Gunthorpe 		list_del_init(&id_priv->device_item);
5278e368d23fSParav Pandit 		cma_id_get(id_priv);
5279e51060f0SSean Hefty 		mutex_unlock(&lock);
5280e51060f0SSean Hefty 
52813647a28dSJason Gunthorpe 		cma_send_device_removal_put(id_priv);
5282e51060f0SSean Hefty 
5283e51060f0SSean Hefty 		mutex_lock(&lock);
5284e51060f0SSean Hefty 	}
5285e51060f0SSean Hefty 	mutex_unlock(&lock);
5286e51060f0SSean Hefty 
52875ff8c8faSParav Pandit 	cma_dev_put(cma_dev);
5288e51060f0SSean Hefty 	wait_for_completion(&cma_dev->comp);
5289e51060f0SSean Hefty }
5290e51060f0SSean Hefty 
cma_supported(struct ib_device * device)52914d51c3d9SParav Pandit static bool cma_supported(struct ib_device *device)
52924d51c3d9SParav Pandit {
52934d51c3d9SParav Pandit 	u32 i;
52944d51c3d9SParav Pandit 
52954d51c3d9SParav Pandit 	rdma_for_each_port(device, i) {
52964d51c3d9SParav Pandit 		if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i))
52974d51c3d9SParav Pandit 			return true;
52984d51c3d9SParav Pandit 	}
52994d51c3d9SParav Pandit 	return false;
53004d51c3d9SParav Pandit }
53014d51c3d9SParav Pandit 
cma_add_one(struct ib_device * device)5302c80a0c52SLeon Romanovsky static int cma_add_one(struct ib_device *device)
5303c80a0c52SLeon Romanovsky {
5304dd37d2f5SJason Gunthorpe 	struct rdma_id_private *to_destroy;
5305c80a0c52SLeon Romanovsky 	struct cma_device *cma_dev;
5306c80a0c52SLeon Romanovsky 	struct rdma_id_private *id_priv;
5307c80a0c52SLeon Romanovsky 	unsigned long supported_gids = 0;
5308c80a0c52SLeon Romanovsky 	int ret;
53091fb7f897SMark Bloch 	u32 i;
5310c80a0c52SLeon Romanovsky 
53114d51c3d9SParav Pandit 	if (!cma_supported(device))
53124d51c3d9SParav Pandit 		return -EOPNOTSUPP;
53134d51c3d9SParav Pandit 
5314c80a0c52SLeon Romanovsky 	cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL);
5315c80a0c52SLeon Romanovsky 	if (!cma_dev)
5316c80a0c52SLeon Romanovsky 		return -ENOMEM;
5317c80a0c52SLeon Romanovsky 
5318c80a0c52SLeon Romanovsky 	cma_dev->device = device;
5319c80a0c52SLeon Romanovsky 	cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
5320c80a0c52SLeon Romanovsky 					    sizeof(*cma_dev->default_gid_type),
5321c80a0c52SLeon Romanovsky 					    GFP_KERNEL);
5322c80a0c52SLeon Romanovsky 	if (!cma_dev->default_gid_type) {
5323c80a0c52SLeon Romanovsky 		ret = -ENOMEM;
5324c80a0c52SLeon Romanovsky 		goto free_cma_dev;
5325c80a0c52SLeon Romanovsky 	}
5326c80a0c52SLeon Romanovsky 
5327c80a0c52SLeon Romanovsky 	cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt,
5328c80a0c52SLeon Romanovsky 					    sizeof(*cma_dev->default_roce_tos),
5329c80a0c52SLeon Romanovsky 					    GFP_KERNEL);
5330c80a0c52SLeon Romanovsky 	if (!cma_dev->default_roce_tos) {
5331c80a0c52SLeon Romanovsky 		ret = -ENOMEM;
5332c80a0c52SLeon Romanovsky 		goto free_gid_type;
5333c80a0c52SLeon Romanovsky 	}
5334c80a0c52SLeon Romanovsky 
5335c80a0c52SLeon Romanovsky 	rdma_for_each_port (device, i) {
5336c80a0c52SLeon Romanovsky 		supported_gids = roce_gid_type_mask_support(device, i);
5337c80a0c52SLeon Romanovsky 		WARN_ON(!supported_gids);
5338c80a0c52SLeon Romanovsky 		if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE))
5339c80a0c52SLeon Romanovsky 			cma_dev->default_gid_type[i - rdma_start_port(device)] =
5340c80a0c52SLeon Romanovsky 				CMA_PREFERRED_ROCE_GID_TYPE;
5341c80a0c52SLeon Romanovsky 		else
5342c80a0c52SLeon Romanovsky 			cma_dev->default_gid_type[i - rdma_start_port(device)] =
5343c80a0c52SLeon Romanovsky 				find_first_bit(&supported_gids, BITS_PER_LONG);
5344c80a0c52SLeon Romanovsky 		cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0;
5345c80a0c52SLeon Romanovsky 	}
5346c80a0c52SLeon Romanovsky 
5347c80a0c52SLeon Romanovsky 	init_completion(&cma_dev->comp);
5348c80a0c52SLeon Romanovsky 	refcount_set(&cma_dev->refcount, 1);
5349c80a0c52SLeon Romanovsky 	INIT_LIST_HEAD(&cma_dev->id_list);
5350c80a0c52SLeon Romanovsky 	ib_set_client_data(device, &cma_client, cma_dev);
5351c80a0c52SLeon Romanovsky 
5352c80a0c52SLeon Romanovsky 	mutex_lock(&lock);
5353c80a0c52SLeon Romanovsky 	list_add_tail(&cma_dev->list, &dev_list);
535499cfddb8SJason Gunthorpe 	list_for_each_entry(id_priv, &listen_any_list, listen_any_item) {
5355dd37d2f5SJason Gunthorpe 		ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
5356c80a0c52SLeon Romanovsky 		if (ret)
5357c80a0c52SLeon Romanovsky 			goto free_listen;
5358c80a0c52SLeon Romanovsky 	}
5359c80a0c52SLeon Romanovsky 	mutex_unlock(&lock);
5360c80a0c52SLeon Romanovsky 
5361c80a0c52SLeon Romanovsky 	trace_cm_add_one(device);
5362c80a0c52SLeon Romanovsky 	return 0;
5363c80a0c52SLeon Romanovsky 
5364c80a0c52SLeon Romanovsky free_listen:
5365c80a0c52SLeon Romanovsky 	list_del(&cma_dev->list);
5366c80a0c52SLeon Romanovsky 	mutex_unlock(&lock);
5367c80a0c52SLeon Romanovsky 
5368dd37d2f5SJason Gunthorpe 	/* cma_process_remove() will delete to_destroy */
5369c80a0c52SLeon Romanovsky 	cma_process_remove(cma_dev);
5370c80a0c52SLeon Romanovsky 	kfree(cma_dev->default_roce_tos);
5371c80a0c52SLeon Romanovsky free_gid_type:
5372c80a0c52SLeon Romanovsky 	kfree(cma_dev->default_gid_type);
5373c80a0c52SLeon Romanovsky 
5374c80a0c52SLeon Romanovsky free_cma_dev:
5375c80a0c52SLeon Romanovsky 	kfree(cma_dev);
5376c80a0c52SLeon Romanovsky 	return ret;
5377c80a0c52SLeon Romanovsky }
5378c80a0c52SLeon Romanovsky 
cma_remove_one(struct ib_device * device,void * client_data)53797c1eb45aSHaggai Eran static void cma_remove_one(struct ib_device *device, void *client_data)
5380e51060f0SSean Hefty {
53817c1eb45aSHaggai Eran 	struct cma_device *cma_dev = client_data;
5382e51060f0SSean Hefty 
5383ed999f82SChuck Lever 	trace_cm_remove_one(device);
5384ed999f82SChuck Lever 
5385e51060f0SSean Hefty 	mutex_lock(&lock);
5386e51060f0SSean Hefty 	list_del(&cma_dev->list);
5387e51060f0SSean Hefty 	mutex_unlock(&lock);
5388e51060f0SSean Hefty 
5389e51060f0SSean Hefty 	cma_process_remove(cma_dev);
539089052d78SMajd Dibbiny 	kfree(cma_dev->default_roce_tos);
5391045959dbSMatan Barak 	kfree(cma_dev->default_gid_type);
5392e51060f0SSean Hefty 	kfree(cma_dev);
5393e51060f0SSean Hefty }
5394e51060f0SSean Hefty 
cma_init_net(struct net * net)53954be74b42SHaggai Eran static int cma_init_net(struct net *net)
53964be74b42SHaggai Eran {
53974be74b42SHaggai Eran 	struct cma_pernet *pernet = cma_pernet(net);
53984be74b42SHaggai Eran 
539963826753SMatthew Wilcox 	xa_init(&pernet->tcp_ps);
540063826753SMatthew Wilcox 	xa_init(&pernet->udp_ps);
540163826753SMatthew Wilcox 	xa_init(&pernet->ipoib_ps);
540263826753SMatthew Wilcox 	xa_init(&pernet->ib_ps);
54034be74b42SHaggai Eran 
54044be74b42SHaggai Eran 	return 0;
54054be74b42SHaggai Eran }
54064be74b42SHaggai Eran 
cma_exit_net(struct net * net)54074be74b42SHaggai Eran static void cma_exit_net(struct net *net)
54084be74b42SHaggai Eran {
54094be74b42SHaggai Eran 	struct cma_pernet *pernet = cma_pernet(net);
54104be74b42SHaggai Eran 
541163826753SMatthew Wilcox 	WARN_ON(!xa_empty(&pernet->tcp_ps));
541263826753SMatthew Wilcox 	WARN_ON(!xa_empty(&pernet->udp_ps));
541363826753SMatthew Wilcox 	WARN_ON(!xa_empty(&pernet->ipoib_ps));
541463826753SMatthew Wilcox 	WARN_ON(!xa_empty(&pernet->ib_ps));
54154be74b42SHaggai Eran }
54164be74b42SHaggai Eran 
54174be74b42SHaggai Eran static struct pernet_operations cma_pernet_operations = {
54184be74b42SHaggai Eran 	.init = cma_init_net,
54194be74b42SHaggai Eran 	.exit = cma_exit_net,
54204be74b42SHaggai Eran 	.id = &cma_pernet_id,
54214be74b42SHaggai Eran 	.size = sizeof(struct cma_pernet),
54224be74b42SHaggai Eran };
54234be74b42SHaggai Eran 
cma_init(void)5424716abb1fSPeter Huewe static int __init cma_init(void)
5425e51060f0SSean Hefty {
54265d7220e8STetsuo Handa 	int ret;
5427227b60f5SStephen Hemminger 
542832ac9e43SJason Gunthorpe 	/*
542932ac9e43SJason Gunthorpe 	 * There is a rare lock ordering dependency in cma_netdev_callback()
543032ac9e43SJason Gunthorpe 	 * that only happens when bonding is enabled. Teach lockdep that rtnl
543132ac9e43SJason Gunthorpe 	 * must never be nested under lock so it can find these without having
543232ac9e43SJason Gunthorpe 	 * to test with bonding.
543332ac9e43SJason Gunthorpe 	 */
543432ac9e43SJason Gunthorpe 	if (IS_ENABLED(CONFIG_LOCKDEP)) {
543532ac9e43SJason Gunthorpe 		rtnl_lock();
543632ac9e43SJason Gunthorpe 		mutex_lock(&lock);
543732ac9e43SJason Gunthorpe 		mutex_unlock(&lock);
543832ac9e43SJason Gunthorpe 		rtnl_unlock();
543932ac9e43SJason Gunthorpe 	}
544032ac9e43SJason Gunthorpe 
5441dee9acbbSBhaktipriya Shridhar 	cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
5442e51060f0SSean Hefty 	if (!cma_wq)
5443e51060f0SSean Hefty 		return -ENOMEM;
5444e51060f0SSean Hefty 
54454be74b42SHaggai Eran 	ret = register_pernet_subsys(&cma_pernet_operations);
54464be74b42SHaggai Eran 	if (ret)
54474be74b42SHaggai Eran 		goto err_wq;
54484be74b42SHaggai Eran 
5449c1a0b23bSMichael S. Tsirkin 	ib_sa_register_client(&sa_client);
5450dd5bdff8SOr Gerlitz 	register_netdevice_notifier(&cma_nb);
5451925d046eSPatrisious Haddad 	register_netevent_notifier(&cma_netevent_cb);
5452c1a0b23bSMichael S. Tsirkin 
5453e51060f0SSean Hefty 	ret = ib_register_client(&cma_client);
5454e51060f0SSean Hefty 	if (ret)
5455e51060f0SSean Hefty 		goto err;
5456753f618aSNir Muchtar 
5457a7bfb93fSzhengbin 	ret = cma_configfs_init();
5458a7bfb93fSzhengbin 	if (ret)
5459a7bfb93fSzhengbin 		goto err_ib;
5460753f618aSNir Muchtar 
5461e51060f0SSean Hefty 	return 0;
5462e51060f0SSean Hefty 
5463a7bfb93fSzhengbin err_ib:
5464a7bfb93fSzhengbin 	ib_unregister_client(&cma_client);
5465e51060f0SSean Hefty err:
5466925d046eSPatrisious Haddad 	unregister_netevent_notifier(&cma_netevent_cb);
5467dd5bdff8SOr Gerlitz 	unregister_netdevice_notifier(&cma_nb);
5468c1a0b23bSMichael S. Tsirkin 	ib_sa_unregister_client(&sa_client);
546944a7b675SChuhong Yuan 	unregister_pernet_subsys(&cma_pernet_operations);
54704be74b42SHaggai Eran err_wq:
5471e51060f0SSean Hefty 	destroy_workqueue(cma_wq);
5472e51060f0SSean Hefty 	return ret;
5473e51060f0SSean Hefty }
5474e51060f0SSean Hefty 
cma_cleanup(void)5475716abb1fSPeter Huewe static void __exit cma_cleanup(void)
5476e51060f0SSean Hefty {
5477045959dbSMatan Barak 	cma_configfs_exit();
5478e51060f0SSean Hefty 	ib_unregister_client(&cma_client);
5479925d046eSPatrisious Haddad 	unregister_netevent_notifier(&cma_netevent_cb);
5480dd5bdff8SOr Gerlitz 	unregister_netdevice_notifier(&cma_nb);
5481c1a0b23bSMichael S. Tsirkin 	ib_sa_unregister_client(&sa_client);
54824be74b42SHaggai Eran 	unregister_pernet_subsys(&cma_pernet_operations);
5483e51060f0SSean Hefty 	destroy_workqueue(cma_wq);
5484e51060f0SSean Hefty }
5485e51060f0SSean Hefty 
5486e51060f0SSean Hefty module_init(cma_init);
5487e51060f0SSean Hefty module_exit(cma_cleanup);
5488