xref: /openbmc/linux/drivers/infiniband/core/cm.c (revision d9f6e12f)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2004-2007 Intel Corporation.  All rights reserved.
4  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
5  * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
6  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7  * Copyright (c) 2019, Mellanox Technologies inc.  All rights reserved.
8  */
9 
10 #include <linux/completion.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/device.h>
13 #include <linux/module.h>
14 #include <linux/err.h>
15 #include <linux/idr.h>
16 #include <linux/interrupt.h>
17 #include <linux/random.h>
18 #include <linux/rbtree.h>
19 #include <linux/spinlock.h>
20 #include <linux/slab.h>
21 #include <linux/sysfs.h>
22 #include <linux/workqueue.h>
23 #include <linux/kdev_t.h>
24 #include <linux/etherdevice.h>
25 
26 #include <rdma/ib_cache.h>
27 #include <rdma/ib_cm.h>
28 #include "cm_msgs.h"
29 #include "core_priv.h"
30 #include "cm_trace.h"
31 
32 MODULE_AUTHOR("Sean Hefty");
33 MODULE_DESCRIPTION("InfiniBand CM");
34 MODULE_LICENSE("Dual BSD/GPL");
35 
36 static const char * const ibcm_rej_reason_strs[] = {
37 	[IB_CM_REJ_NO_QP]			= "no QP",
38 	[IB_CM_REJ_NO_EEC]			= "no EEC",
39 	[IB_CM_REJ_NO_RESOURCES]		= "no resources",
40 	[IB_CM_REJ_TIMEOUT]			= "timeout",
41 	[IB_CM_REJ_UNSUPPORTED]			= "unsupported",
42 	[IB_CM_REJ_INVALID_COMM_ID]		= "invalid comm ID",
43 	[IB_CM_REJ_INVALID_COMM_INSTANCE]	= "invalid comm instance",
44 	[IB_CM_REJ_INVALID_SERVICE_ID]		= "invalid service ID",
45 	[IB_CM_REJ_INVALID_TRANSPORT_TYPE]	= "invalid transport type",
46 	[IB_CM_REJ_STALE_CONN]			= "stale conn",
47 	[IB_CM_REJ_RDC_NOT_EXIST]		= "RDC not exist",
48 	[IB_CM_REJ_INVALID_GID]			= "invalid GID",
49 	[IB_CM_REJ_INVALID_LID]			= "invalid LID",
50 	[IB_CM_REJ_INVALID_SL]			= "invalid SL",
51 	[IB_CM_REJ_INVALID_TRAFFIC_CLASS]	= "invalid traffic class",
52 	[IB_CM_REJ_INVALID_HOP_LIMIT]		= "invalid hop limit",
53 	[IB_CM_REJ_INVALID_PACKET_RATE]		= "invalid packet rate",
54 	[IB_CM_REJ_INVALID_ALT_GID]		= "invalid alt GID",
55 	[IB_CM_REJ_INVALID_ALT_LID]		= "invalid alt LID",
56 	[IB_CM_REJ_INVALID_ALT_SL]		= "invalid alt SL",
57 	[IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS]	= "invalid alt traffic class",
58 	[IB_CM_REJ_INVALID_ALT_HOP_LIMIT]	= "invalid alt hop limit",
59 	[IB_CM_REJ_INVALID_ALT_PACKET_RATE]	= "invalid alt packet rate",
60 	[IB_CM_REJ_PORT_CM_REDIRECT]		= "port CM redirect",
61 	[IB_CM_REJ_PORT_REDIRECT]		= "port redirect",
62 	[IB_CM_REJ_INVALID_MTU]			= "invalid MTU",
63 	[IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES]	= "insufficient resp resources",
64 	[IB_CM_REJ_CONSUMER_DEFINED]		= "consumer defined",
65 	[IB_CM_REJ_INVALID_RNR_RETRY]		= "invalid RNR retry",
66 	[IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID]	= "duplicate local comm ID",
67 	[IB_CM_REJ_INVALID_CLASS_VERSION]	= "invalid class version",
68 	[IB_CM_REJ_INVALID_FLOW_LABEL]		= "invalid flow label",
69 	[IB_CM_REJ_INVALID_ALT_FLOW_LABEL]	= "invalid alt flow label",
70 	[IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
71 		"vendor option is not supported",
72 };
73 
74 const char *__attribute_const__ ibcm_reject_msg(int reason)
75 {
76 	size_t index = reason;
77 
78 	if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
79 	    ibcm_rej_reason_strs[index])
80 		return ibcm_rej_reason_strs[index];
81 	else
82 		return "unrecognized reason";
83 }
84 EXPORT_SYMBOL(ibcm_reject_msg);
85 
86 struct cm_id_private;
87 struct cm_work;
88 static int cm_add_one(struct ib_device *device);
89 static void cm_remove_one(struct ib_device *device, void *client_data);
90 static void cm_process_work(struct cm_id_private *cm_id_priv,
91 			    struct cm_work *work);
92 static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
93 				   struct ib_cm_sidr_rep_param *param);
94 static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
95 			       const void *private_data, u8 private_data_len);
96 static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
97 			       void *private_data, u8 private_data_len);
98 static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
99 			      enum ib_cm_rej_reason reason, void *ari,
100 			      u8 ari_length, const void *private_data,
101 			      u8 private_data_len);
102 
103 static struct ib_client cm_client = {
104 	.name   = "cm",
105 	.add    = cm_add_one,
106 	.remove = cm_remove_one
107 };
108 
109 static struct ib_cm {
110 	spinlock_t lock;
111 	struct list_head device_list;
112 	rwlock_t device_lock;
113 	struct rb_root listen_service_table;
114 	u64 listen_service_id;
115 	/* struct rb_root peer_service_table; todo: fix peer to peer */
116 	struct rb_root remote_qp_table;
117 	struct rb_root remote_id_table;
118 	struct rb_root remote_sidr_table;
119 	struct xarray local_id_table;
120 	u32 local_id_next;
121 	__be32 random_id_operand;
122 	struct list_head timewait_list;
123 	struct workqueue_struct *wq;
124 	/* Sync on cm change port state */
125 	spinlock_t state_lock;
126 } cm;
127 
128 /* Counter indexes ordered by attribute ID */
129 enum {
130 	CM_REQ_COUNTER,
131 	CM_MRA_COUNTER,
132 	CM_REJ_COUNTER,
133 	CM_REP_COUNTER,
134 	CM_RTU_COUNTER,
135 	CM_DREQ_COUNTER,
136 	CM_DREP_COUNTER,
137 	CM_SIDR_REQ_COUNTER,
138 	CM_SIDR_REP_COUNTER,
139 	CM_LAP_COUNTER,
140 	CM_APR_COUNTER,
141 	CM_ATTR_COUNT,
142 	CM_ATTR_ID_OFFSET = 0x0010,
143 };
144 
145 enum {
146 	CM_XMIT,
147 	CM_XMIT_RETRIES,
148 	CM_RECV,
149 	CM_RECV_DUPLICATES,
150 	CM_COUNTER_GROUPS
151 };
152 
153 static char const counter_group_names[CM_COUNTER_GROUPS]
154 				     [sizeof("cm_rx_duplicates")] = {
155 	"cm_tx_msgs", "cm_tx_retries",
156 	"cm_rx_msgs", "cm_rx_duplicates"
157 };
158 
159 struct cm_counter_group {
160 	struct kobject obj;
161 	atomic_long_t counter[CM_ATTR_COUNT];
162 };
163 
164 struct cm_counter_attribute {
165 	struct attribute attr;
166 	int index;
167 };
168 
169 #define CM_COUNTER_ATTR(_name, _index) \
170 struct cm_counter_attribute cm_##_name##_counter_attr = { \
171 	.attr = { .name = __stringify(_name), .mode = 0444 }, \
172 	.index = _index \
173 }
174 
175 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
176 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
177 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
178 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
179 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
180 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
181 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
182 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
183 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
184 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
185 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
186 
187 static struct attribute *cm_counter_default_attrs[] = {
188 	&cm_req_counter_attr.attr,
189 	&cm_mra_counter_attr.attr,
190 	&cm_rej_counter_attr.attr,
191 	&cm_rep_counter_attr.attr,
192 	&cm_rtu_counter_attr.attr,
193 	&cm_dreq_counter_attr.attr,
194 	&cm_drep_counter_attr.attr,
195 	&cm_sidr_req_counter_attr.attr,
196 	&cm_sidr_rep_counter_attr.attr,
197 	&cm_lap_counter_attr.attr,
198 	&cm_apr_counter_attr.attr,
199 	NULL
200 };
201 
202 struct cm_port {
203 	struct cm_device *cm_dev;
204 	struct ib_mad_agent *mad_agent;
205 	u8 port_num;
206 	struct list_head cm_priv_prim_list;
207 	struct list_head cm_priv_altr_list;
208 	struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
209 };
210 
211 struct cm_device {
212 	struct list_head list;
213 	struct ib_device *ib_device;
214 	u8 ack_delay;
215 	int going_down;
216 	struct cm_port *port[];
217 };
218 
219 struct cm_av {
220 	struct cm_port *port;
221 	union ib_gid dgid;
222 	struct rdma_ah_attr ah_attr;
223 	u16 pkey_index;
224 	u8 timeout;
225 };
226 
227 struct cm_work {
228 	struct delayed_work work;
229 	struct list_head list;
230 	struct cm_port *port;
231 	struct ib_mad_recv_wc *mad_recv_wc;	/* Received MADs */
232 	__be32 local_id;			/* Established / timewait */
233 	__be32 remote_id;
234 	struct ib_cm_event cm_event;
235 	struct sa_path_rec path[];
236 };
237 
238 struct cm_timewait_info {
239 	struct cm_work work;
240 	struct list_head list;
241 	struct rb_node remote_qp_node;
242 	struct rb_node remote_id_node;
243 	__be64 remote_ca_guid;
244 	__be32 remote_qpn;
245 	u8 inserted_remote_qp;
246 	u8 inserted_remote_id;
247 };
248 
249 struct cm_id_private {
250 	struct ib_cm_id	id;
251 
252 	struct rb_node service_node;
253 	struct rb_node sidr_id_node;
254 	spinlock_t lock;	/* Do not acquire inside cm.lock */
255 	struct completion comp;
256 	refcount_t refcount;
257 	/* Number of clients sharing this ib_cm_id. Only valid for listeners.
258 	 * Protected by the cm.lock spinlock. */
259 	int listen_sharecount;
260 	struct rcu_head rcu;
261 
262 	struct ib_mad_send_buf *msg;
263 	struct cm_timewait_info *timewait_info;
264 	/* todo: use alternate port on send failure */
265 	struct cm_av av;
266 	struct cm_av alt_av;
267 
268 	void *private_data;
269 	__be64 tid;
270 	__be32 local_qpn;
271 	__be32 remote_qpn;
272 	enum ib_qp_type qp_type;
273 	__be32 sq_psn;
274 	__be32 rq_psn;
275 	int timeout_ms;
276 	enum ib_mtu path_mtu;
277 	__be16 pkey;
278 	u8 private_data_len;
279 	u8 max_cm_retries;
280 	u8 responder_resources;
281 	u8 initiator_depth;
282 	u8 retry_count;
283 	u8 rnr_retry_count;
284 	u8 service_timeout;
285 	u8 target_ack_delay;
286 
287 	struct list_head prim_list;
288 	struct list_head altr_list;
289 	/* Indicates that the send port mad is registered and av is set */
290 	int prim_send_port_not_ready;
291 	int altr_send_port_not_ready;
292 
293 	struct list_head work_list;
294 	atomic_t work_count;
295 
296 	struct rdma_ucm_ece ece;
297 };
298 
299 static void cm_work_handler(struct work_struct *work);
300 
301 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
302 {
303 	if (refcount_dec_and_test(&cm_id_priv->refcount))
304 		complete(&cm_id_priv->comp);
305 }
306 
307 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
308 			struct ib_mad_send_buf **msg)
309 {
310 	struct ib_mad_agent *mad_agent;
311 	struct ib_mad_send_buf *m;
312 	struct ib_ah *ah;
313 	struct cm_av *av;
314 	unsigned long flags, flags2;
315 	int ret = 0;
316 
317 	/* don't let the port to be released till the agent is down */
318 	spin_lock_irqsave(&cm.state_lock, flags2);
319 	spin_lock_irqsave(&cm.lock, flags);
320 	if (!cm_id_priv->prim_send_port_not_ready)
321 		av = &cm_id_priv->av;
322 	else if (!cm_id_priv->altr_send_port_not_ready &&
323 		 (cm_id_priv->alt_av.port))
324 		av = &cm_id_priv->alt_av;
325 	else {
326 		pr_info("%s: not valid CM id\n", __func__);
327 		ret = -ENODEV;
328 		spin_unlock_irqrestore(&cm.lock, flags);
329 		goto out;
330 	}
331 	spin_unlock_irqrestore(&cm.lock, flags);
332 	/* Make sure the port haven't released the mad yet */
333 	mad_agent = cm_id_priv->av.port->mad_agent;
334 	if (!mad_agent) {
335 		pr_info("%s: not a valid MAD agent\n", __func__);
336 		ret = -ENODEV;
337 		goto out;
338 	}
339 	ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr, 0);
340 	if (IS_ERR(ah)) {
341 		ret = PTR_ERR(ah);
342 		goto out;
343 	}
344 
345 	m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
346 			       av->pkey_index,
347 			       0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
348 			       GFP_ATOMIC,
349 			       IB_MGMT_BASE_VERSION);
350 	if (IS_ERR(m)) {
351 		rdma_destroy_ah(ah, 0);
352 		ret = PTR_ERR(m);
353 		goto out;
354 	}
355 
356 	/* Timeout set by caller if response is expected. */
357 	m->ah = ah;
358 	m->retries = cm_id_priv->max_cm_retries;
359 
360 	refcount_inc(&cm_id_priv->refcount);
361 	m->context[0] = cm_id_priv;
362 	*msg = m;
363 
364 out:
365 	spin_unlock_irqrestore(&cm.state_lock, flags2);
366 	return ret;
367 }
368 
369 static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
370 							   struct ib_mad_recv_wc *mad_recv_wc)
371 {
372 	return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
373 				  0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
374 				  GFP_ATOMIC,
375 				  IB_MGMT_BASE_VERSION);
376 }
377 
378 static int cm_create_response_msg_ah(struct cm_port *port,
379 				     struct ib_mad_recv_wc *mad_recv_wc,
380 				     struct ib_mad_send_buf *msg)
381 {
382 	struct ib_ah *ah;
383 
384 	ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
385 				  mad_recv_wc->recv_buf.grh, port->port_num);
386 	if (IS_ERR(ah))
387 		return PTR_ERR(ah);
388 
389 	msg->ah = ah;
390 	return 0;
391 }
392 
393 static void cm_free_msg(struct ib_mad_send_buf *msg)
394 {
395 	if (msg->ah)
396 		rdma_destroy_ah(msg->ah, 0);
397 	if (msg->context[0])
398 		cm_deref_id(msg->context[0]);
399 	ib_free_send_mad(msg);
400 }
401 
402 static int cm_alloc_response_msg(struct cm_port *port,
403 				 struct ib_mad_recv_wc *mad_recv_wc,
404 				 struct ib_mad_send_buf **msg)
405 {
406 	struct ib_mad_send_buf *m;
407 	int ret;
408 
409 	m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
410 	if (IS_ERR(m))
411 		return PTR_ERR(m);
412 
413 	ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
414 	if (ret) {
415 		cm_free_msg(m);
416 		return ret;
417 	}
418 
419 	*msg = m;
420 	return 0;
421 }
422 
423 static void * cm_copy_private_data(const void *private_data,
424 				   u8 private_data_len)
425 {
426 	void *data;
427 
428 	if (!private_data || !private_data_len)
429 		return NULL;
430 
431 	data = kmemdup(private_data, private_data_len, GFP_KERNEL);
432 	if (!data)
433 		return ERR_PTR(-ENOMEM);
434 
435 	return data;
436 }
437 
438 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
439 				 void *private_data, u8 private_data_len)
440 {
441 	if (cm_id_priv->private_data && cm_id_priv->private_data_len)
442 		kfree(cm_id_priv->private_data);
443 
444 	cm_id_priv->private_data = private_data;
445 	cm_id_priv->private_data_len = private_data_len;
446 }
447 
448 static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
449 			      struct ib_grh *grh, struct cm_av *av)
450 {
451 	struct rdma_ah_attr new_ah_attr;
452 	int ret;
453 
454 	av->port = port;
455 	av->pkey_index = wc->pkey_index;
456 
457 	/*
458 	 * av->ah_attr might be initialized based on past wc during incoming
459 	 * connect request or while sending out connect request. So initialize
460 	 * a new ah_attr on stack. If initialization fails, old ah_attr is
461 	 * used for sending any responses. If initialization is successful,
462 	 * than new ah_attr is used by overwriting old one.
463 	 */
464 	ret = ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
465 				      port->port_num, wc,
466 				      grh, &new_ah_attr);
467 	if (ret)
468 		return ret;
469 
470 	rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
471 	return 0;
472 }
473 
474 static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
475 				   struct ib_grh *grh, struct cm_av *av)
476 {
477 	av->port = port;
478 	av->pkey_index = wc->pkey_index;
479 	return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
480 				       port->port_num, wc,
481 				       grh, &av->ah_attr);
482 }
483 
484 static void add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
485 				   struct cm_av *av, struct cm_port *port)
486 {
487 	unsigned long flags;
488 
489 	spin_lock_irqsave(&cm.lock, flags);
490 	if (&cm_id_priv->av == av)
491 		list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
492 	else if (&cm_id_priv->alt_av == av)
493 		list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
494 	else
495 		WARN_ON(true);
496 	spin_unlock_irqrestore(&cm.lock, flags);
497 }
498 
499 static struct cm_port *
500 get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
501 {
502 	struct cm_device *cm_dev;
503 	struct cm_port *port = NULL;
504 	unsigned long flags;
505 
506 	if (attr) {
507 		read_lock_irqsave(&cm.device_lock, flags);
508 		list_for_each_entry(cm_dev, &cm.device_list, list) {
509 			if (cm_dev->ib_device == attr->device) {
510 				port = cm_dev->port[attr->port_num - 1];
511 				break;
512 			}
513 		}
514 		read_unlock_irqrestore(&cm.device_lock, flags);
515 	} else {
516 		/* SGID attribute can be NULL in following
517 		 * conditions.
518 		 * (a) Alternative path
519 		 * (b) IB link layer without GRH
520 		 * (c) LAP send messages
521 		 */
522 		read_lock_irqsave(&cm.device_lock, flags);
523 		list_for_each_entry(cm_dev, &cm.device_list, list) {
524 			attr = rdma_find_gid(cm_dev->ib_device,
525 					     &path->sgid,
526 					     sa_conv_pathrec_to_gid_type(path),
527 					     NULL);
528 			if (!IS_ERR(attr)) {
529 				port = cm_dev->port[attr->port_num - 1];
530 				break;
531 			}
532 		}
533 		read_unlock_irqrestore(&cm.device_lock, flags);
534 		if (port)
535 			rdma_put_gid_attr(attr);
536 	}
537 	return port;
538 }
539 
540 static int cm_init_av_by_path(struct sa_path_rec *path,
541 			      const struct ib_gid_attr *sgid_attr,
542 			      struct cm_av *av,
543 			      struct cm_id_private *cm_id_priv)
544 {
545 	struct rdma_ah_attr new_ah_attr;
546 	struct cm_device *cm_dev;
547 	struct cm_port *port;
548 	int ret;
549 
550 	port = get_cm_port_from_path(path, sgid_attr);
551 	if (!port)
552 		return -EINVAL;
553 	cm_dev = port->cm_dev;
554 
555 	ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
556 				  be16_to_cpu(path->pkey), &av->pkey_index);
557 	if (ret)
558 		return ret;
559 
560 	av->port = port;
561 
562 	/*
563 	 * av->ah_attr might be initialized based on wc or during
564 	 * request processing time which might have reference to sgid_attr.
565 	 * So initialize a new ah_attr on stack.
566 	 * If initialization fails, old ah_attr is used for sending any
567 	 * responses. If initialization is successful, than new ah_attr
568 	 * is used by overwriting the old one. So that right ah_attr
569 	 * can be used to return an error response.
570 	 */
571 	ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
572 					&new_ah_attr, sgid_attr);
573 	if (ret)
574 		return ret;
575 
576 	av->timeout = path->packet_life_time + 1;
577 	add_cm_id_to_port_list(cm_id_priv, av, port);
578 	rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
579 	return 0;
580 }
581 
582 static u32 cm_local_id(__be32 local_id)
583 {
584 	return (__force u32) (local_id ^ cm.random_id_operand);
585 }
586 
587 static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
588 {
589 	struct cm_id_private *cm_id_priv;
590 
591 	rcu_read_lock();
592 	cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
593 	if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
594 	    !refcount_inc_not_zero(&cm_id_priv->refcount))
595 		cm_id_priv = NULL;
596 	rcu_read_unlock();
597 
598 	return cm_id_priv;
599 }
600 
601 /*
602  * Trivial helpers to strip endian annotation and compare; the
603  * endianness doesn't actually matter since we just need a stable
604  * order for the RB tree.
605  */
606 static int be32_lt(__be32 a, __be32 b)
607 {
608 	return (__force u32) a < (__force u32) b;
609 }
610 
611 static int be32_gt(__be32 a, __be32 b)
612 {
613 	return (__force u32) a > (__force u32) b;
614 }
615 
616 static int be64_lt(__be64 a, __be64 b)
617 {
618 	return (__force u64) a < (__force u64) b;
619 }
620 
621 static int be64_gt(__be64 a, __be64 b)
622 {
623 	return (__force u64) a > (__force u64) b;
624 }
625 
626 /*
627  * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
628  * if the new ID was inserted, NULL if it could not be inserted due to a
629  * collision, or the existing cm_id_priv ready for shared usage.
630  */
631 static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
632 					      ib_cm_handler shared_handler)
633 {
634 	struct rb_node **link = &cm.listen_service_table.rb_node;
635 	struct rb_node *parent = NULL;
636 	struct cm_id_private *cur_cm_id_priv;
637 	__be64 service_id = cm_id_priv->id.service_id;
638 	__be64 service_mask = cm_id_priv->id.service_mask;
639 	unsigned long flags;
640 
641 	spin_lock_irqsave(&cm.lock, flags);
642 	while (*link) {
643 		parent = *link;
644 		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
645 					  service_node);
646 		if ((cur_cm_id_priv->id.service_mask & service_id) ==
647 		    (service_mask & cur_cm_id_priv->id.service_id) &&
648 		    (cm_id_priv->id.device == cur_cm_id_priv->id.device)) {
649 			/*
650 			 * Sharing an ib_cm_id with different handlers is not
651 			 * supported
652 			 */
653 			if (cur_cm_id_priv->id.cm_handler != shared_handler ||
654 			    cur_cm_id_priv->id.context ||
655 			    WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
656 				spin_unlock_irqrestore(&cm.lock, flags);
657 				return NULL;
658 			}
659 			refcount_inc(&cur_cm_id_priv->refcount);
660 			cur_cm_id_priv->listen_sharecount++;
661 			spin_unlock_irqrestore(&cm.lock, flags);
662 			return cur_cm_id_priv;
663 		}
664 
665 		if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
666 			link = &(*link)->rb_left;
667 		else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
668 			link = &(*link)->rb_right;
669 		else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
670 			link = &(*link)->rb_left;
671 		else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
672 			link = &(*link)->rb_right;
673 		else
674 			link = &(*link)->rb_right;
675 	}
676 	cm_id_priv->listen_sharecount++;
677 	rb_link_node(&cm_id_priv->service_node, parent, link);
678 	rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
679 	spin_unlock_irqrestore(&cm.lock, flags);
680 	return cm_id_priv;
681 }
682 
683 static struct cm_id_private * cm_find_listen(struct ib_device *device,
684 					     __be64 service_id)
685 {
686 	struct rb_node *node = cm.listen_service_table.rb_node;
687 	struct cm_id_private *cm_id_priv;
688 
689 	while (node) {
690 		cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
691 		if ((cm_id_priv->id.service_mask & service_id) ==
692 		     cm_id_priv->id.service_id &&
693 		    (cm_id_priv->id.device == device)) {
694 			refcount_inc(&cm_id_priv->refcount);
695 			return cm_id_priv;
696 		}
697 		if (device < cm_id_priv->id.device)
698 			node = node->rb_left;
699 		else if (device > cm_id_priv->id.device)
700 			node = node->rb_right;
701 		else if (be64_lt(service_id, cm_id_priv->id.service_id))
702 			node = node->rb_left;
703 		else if (be64_gt(service_id, cm_id_priv->id.service_id))
704 			node = node->rb_right;
705 		else
706 			node = node->rb_right;
707 	}
708 	return NULL;
709 }
710 
711 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
712 						     *timewait_info)
713 {
714 	struct rb_node **link = &cm.remote_id_table.rb_node;
715 	struct rb_node *parent = NULL;
716 	struct cm_timewait_info *cur_timewait_info;
717 	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
718 	__be32 remote_id = timewait_info->work.remote_id;
719 
720 	while (*link) {
721 		parent = *link;
722 		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
723 					     remote_id_node);
724 		if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
725 			link = &(*link)->rb_left;
726 		else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
727 			link = &(*link)->rb_right;
728 		else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
729 			link = &(*link)->rb_left;
730 		else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
731 			link = &(*link)->rb_right;
732 		else
733 			return cur_timewait_info;
734 	}
735 	timewait_info->inserted_remote_id = 1;
736 	rb_link_node(&timewait_info->remote_id_node, parent, link);
737 	rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
738 	return NULL;
739 }
740 
741 static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
742 					       __be32 remote_id)
743 {
744 	struct rb_node *node = cm.remote_id_table.rb_node;
745 	struct cm_timewait_info *timewait_info;
746 	struct cm_id_private *res = NULL;
747 
748 	spin_lock_irq(&cm.lock);
749 	while (node) {
750 		timewait_info = rb_entry(node, struct cm_timewait_info,
751 					 remote_id_node);
752 		if (be32_lt(remote_id, timewait_info->work.remote_id))
753 			node = node->rb_left;
754 		else if (be32_gt(remote_id, timewait_info->work.remote_id))
755 			node = node->rb_right;
756 		else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
757 			node = node->rb_left;
758 		else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
759 			node = node->rb_right;
760 		else {
761 			res = cm_acquire_id(timewait_info->work.local_id,
762 					     timewait_info->work.remote_id);
763 			break;
764 		}
765 	}
766 	spin_unlock_irq(&cm.lock);
767 	return res;
768 }
769 
770 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
771 						      *timewait_info)
772 {
773 	struct rb_node **link = &cm.remote_qp_table.rb_node;
774 	struct rb_node *parent = NULL;
775 	struct cm_timewait_info *cur_timewait_info;
776 	__be64 remote_ca_guid = timewait_info->remote_ca_guid;
777 	__be32 remote_qpn = timewait_info->remote_qpn;
778 
779 	while (*link) {
780 		parent = *link;
781 		cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
782 					     remote_qp_node);
783 		if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
784 			link = &(*link)->rb_left;
785 		else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
786 			link = &(*link)->rb_right;
787 		else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
788 			link = &(*link)->rb_left;
789 		else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
790 			link = &(*link)->rb_right;
791 		else
792 			return cur_timewait_info;
793 	}
794 	timewait_info->inserted_remote_qp = 1;
795 	rb_link_node(&timewait_info->remote_qp_node, parent, link);
796 	rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
797 	return NULL;
798 }
799 
800 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
801 						    *cm_id_priv)
802 {
803 	struct rb_node **link = &cm.remote_sidr_table.rb_node;
804 	struct rb_node *parent = NULL;
805 	struct cm_id_private *cur_cm_id_priv;
806 	union ib_gid *port_gid = &cm_id_priv->av.dgid;
807 	__be32 remote_id = cm_id_priv->id.remote_id;
808 
809 	while (*link) {
810 		parent = *link;
811 		cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
812 					  sidr_id_node);
813 		if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
814 			link = &(*link)->rb_left;
815 		else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
816 			link = &(*link)->rb_right;
817 		else {
818 			int cmp;
819 			cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
820 				     sizeof *port_gid);
821 			if (cmp < 0)
822 				link = &(*link)->rb_left;
823 			else if (cmp > 0)
824 				link = &(*link)->rb_right;
825 			else
826 				return cur_cm_id_priv;
827 		}
828 	}
829 	rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
830 	rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
831 	return NULL;
832 }
833 
834 static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
835 					      ib_cm_handler cm_handler,
836 					      void *context)
837 {
838 	struct cm_id_private *cm_id_priv;
839 	u32 id;
840 	int ret;
841 
842 	cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
843 	if (!cm_id_priv)
844 		return ERR_PTR(-ENOMEM);
845 
846 	cm_id_priv->id.state = IB_CM_IDLE;
847 	cm_id_priv->id.device = device;
848 	cm_id_priv->id.cm_handler = cm_handler;
849 	cm_id_priv->id.context = context;
850 	cm_id_priv->id.remote_cm_qpn = 1;
851 
852 	RB_CLEAR_NODE(&cm_id_priv->service_node);
853 	RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
854 	spin_lock_init(&cm_id_priv->lock);
855 	init_completion(&cm_id_priv->comp);
856 	INIT_LIST_HEAD(&cm_id_priv->work_list);
857 	INIT_LIST_HEAD(&cm_id_priv->prim_list);
858 	INIT_LIST_HEAD(&cm_id_priv->altr_list);
859 	atomic_set(&cm_id_priv->work_count, -1);
860 	refcount_set(&cm_id_priv->refcount, 1);
861 
862 	ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b,
863 			      &cm.local_id_next, GFP_KERNEL);
864 	if (ret < 0)
865 		goto error;
866 	cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
867 
868 	return cm_id_priv;
869 
870 error:
871 	kfree(cm_id_priv);
872 	return ERR_PTR(ret);
873 }
874 
875 /*
876  * Make the ID visible to the MAD handlers and other threads that use the
877  * xarray.
878  */
879 static void cm_finalize_id(struct cm_id_private *cm_id_priv)
880 {
881 	xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
882 		 cm_id_priv, GFP_ATOMIC);
883 }
884 
885 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
886 				 ib_cm_handler cm_handler,
887 				 void *context)
888 {
889 	struct cm_id_private *cm_id_priv;
890 
891 	cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
892 	if (IS_ERR(cm_id_priv))
893 		return ERR_CAST(cm_id_priv);
894 
895 	cm_finalize_id(cm_id_priv);
896 	return &cm_id_priv->id;
897 }
898 EXPORT_SYMBOL(ib_create_cm_id);
899 
900 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
901 {
902 	struct cm_work *work;
903 
904 	if (list_empty(&cm_id_priv->work_list))
905 		return NULL;
906 
907 	work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
908 	list_del(&work->list);
909 	return work;
910 }
911 
912 static void cm_free_work(struct cm_work *work)
913 {
914 	if (work->mad_recv_wc)
915 		ib_free_recv_mad(work->mad_recv_wc);
916 	kfree(work);
917 }
918 
919 static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
920 				 struct cm_work *work)
921 	__releases(&cm_id_priv->lock)
922 {
923 	bool immediate;
924 
925 	/*
926 	 * To deliver the event to the user callback we have the drop the
927 	 * spinlock, however, we need to ensure that the user callback is single
928 	 * threaded and receives events in the temporal order. If there are
929 	 * already events being processed then thread new events onto a list,
930 	 * the thread currently processing will pick them up.
931 	 */
932 	immediate = atomic_inc_and_test(&cm_id_priv->work_count);
933 	if (!immediate) {
934 		list_add_tail(&work->list, &cm_id_priv->work_list);
935 		/*
936 		 * This routine always consumes incoming reference. Once queued
937 		 * to the work_list then a reference is held by the thread
938 		 * currently running cm_process_work() and this reference is not
939 		 * needed.
940 		 */
941 		cm_deref_id(cm_id_priv);
942 	}
943 	spin_unlock_irq(&cm_id_priv->lock);
944 
945 	if (immediate)
946 		cm_process_work(cm_id_priv, work);
947 }
948 
949 static inline int cm_convert_to_ms(int iba_time)
950 {
951 	/* approximate conversion to ms from 4.096us x 2^iba_time */
952 	return 1 << max(iba_time - 8, 0);
953 }
954 
955 /*
956  * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
957  * Because of how ack_timeout is stored, adding one doubles the timeout.
958  * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
959  * increment it (round up) only if the other is within 50%.
960  */
961 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
962 {
963 	int ack_timeout = packet_life_time + 1;
964 
965 	if (ack_timeout >= ca_ack_delay)
966 		ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
967 	else
968 		ack_timeout = ca_ack_delay +
969 			      (ack_timeout >= (ca_ack_delay - 1));
970 
971 	return min(31, ack_timeout);
972 }
973 
974 static void cm_remove_remote(struct cm_id_private *cm_id_priv)
975 {
976 	struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
977 
978 	if (timewait_info->inserted_remote_id) {
979 		rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
980 		timewait_info->inserted_remote_id = 0;
981 	}
982 
983 	if (timewait_info->inserted_remote_qp) {
984 		rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
985 		timewait_info->inserted_remote_qp = 0;
986 	}
987 }
988 
989 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
990 {
991 	struct cm_timewait_info *timewait_info;
992 
993 	timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
994 	if (!timewait_info)
995 		return ERR_PTR(-ENOMEM);
996 
997 	timewait_info->work.local_id = local_id;
998 	INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
999 	timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
1000 	return timewait_info;
1001 }
1002 
1003 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
1004 {
1005 	int wait_time;
1006 	unsigned long flags;
1007 	struct cm_device *cm_dev;
1008 
1009 	lockdep_assert_held(&cm_id_priv->lock);
1010 
1011 	cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
1012 	if (!cm_dev)
1013 		return;
1014 
1015 	spin_lock_irqsave(&cm.lock, flags);
1016 	cm_remove_remote(cm_id_priv);
1017 	list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
1018 	spin_unlock_irqrestore(&cm.lock, flags);
1019 
1020 	/*
1021 	 * The cm_id could be destroyed by the user before we exit timewait.
1022 	 * To protect against this, we search for the cm_id after exiting
1023 	 * timewait before notifying the user that we've exited timewait.
1024 	 */
1025 	cm_id_priv->id.state = IB_CM_TIMEWAIT;
1026 	wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
1027 
1028 	/* Check if the device started its remove_one */
1029 	spin_lock_irqsave(&cm.lock, flags);
1030 	if (!cm_dev->going_down)
1031 		queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
1032 				   msecs_to_jiffies(wait_time));
1033 	spin_unlock_irqrestore(&cm.lock, flags);
1034 
1035 	/*
1036 	 * The timewait_info is converted into a work and gets freed during
1037 	 * cm_free_work() in cm_timewait_handler().
1038 	 */
1039 	BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
1040 	cm_id_priv->timewait_info = NULL;
1041 }
1042 
1043 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
1044 {
1045 	unsigned long flags;
1046 
1047 	lockdep_assert_held(&cm_id_priv->lock);
1048 
1049 	cm_id_priv->id.state = IB_CM_IDLE;
1050 	if (cm_id_priv->timewait_info) {
1051 		spin_lock_irqsave(&cm.lock, flags);
1052 		cm_remove_remote(cm_id_priv);
1053 		spin_unlock_irqrestore(&cm.lock, flags);
1054 		kfree(cm_id_priv->timewait_info);
1055 		cm_id_priv->timewait_info = NULL;
1056 	}
1057 }
1058 
1059 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1060 {
1061 	struct cm_id_private *cm_id_priv;
1062 	struct cm_work *work;
1063 
1064 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1065 	spin_lock_irq(&cm_id_priv->lock);
1066 retest:
1067 	switch (cm_id->state) {
1068 	case IB_CM_LISTEN:
1069 		spin_lock(&cm.lock);
1070 		if (--cm_id_priv->listen_sharecount > 0) {
1071 			/* The id is still shared. */
1072 			WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
1073 			spin_unlock(&cm.lock);
1074 			spin_unlock_irq(&cm_id_priv->lock);
1075 			cm_deref_id(cm_id_priv);
1076 			return;
1077 		}
1078 		cm_id->state = IB_CM_IDLE;
1079 		rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
1080 		RB_CLEAR_NODE(&cm_id_priv->service_node);
1081 		spin_unlock(&cm.lock);
1082 		break;
1083 	case IB_CM_SIDR_REQ_SENT:
1084 		cm_id->state = IB_CM_IDLE;
1085 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1086 		break;
1087 	case IB_CM_SIDR_REQ_RCVD:
1088 		cm_send_sidr_rep_locked(cm_id_priv,
1089 					&(struct ib_cm_sidr_rep_param){
1090 						.status = IB_SIDR_REJECT });
1091 		/* cm_send_sidr_rep_locked will not move to IDLE if it fails */
1092 		cm_id->state = IB_CM_IDLE;
1093 		break;
1094 	case IB_CM_REQ_SENT:
1095 	case IB_CM_MRA_REQ_RCVD:
1096 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1097 		cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
1098 				   &cm_id_priv->id.device->node_guid,
1099 				   sizeof(cm_id_priv->id.device->node_guid),
1100 				   NULL, 0);
1101 		break;
1102 	case IB_CM_REQ_RCVD:
1103 		if (err == -ENOMEM) {
1104 			/* Do not reject to allow future retries. */
1105 			cm_reset_to_idle(cm_id_priv);
1106 		} else {
1107 			cm_send_rej_locked(cm_id_priv,
1108 					   IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1109 					   NULL, 0);
1110 		}
1111 		break;
1112 	case IB_CM_REP_SENT:
1113 	case IB_CM_MRA_REP_RCVD:
1114 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1115 		cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1116 				   0, NULL, 0);
1117 		goto retest;
1118 	case IB_CM_MRA_REQ_SENT:
1119 	case IB_CM_REP_RCVD:
1120 	case IB_CM_MRA_REP_SENT:
1121 		cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1122 				   0, NULL, 0);
1123 		break;
1124 	case IB_CM_ESTABLISHED:
1125 		if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
1126 			cm_id->state = IB_CM_IDLE;
1127 			break;
1128 		}
1129 		cm_send_dreq_locked(cm_id_priv, NULL, 0);
1130 		goto retest;
1131 	case IB_CM_DREQ_SENT:
1132 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1133 		cm_enter_timewait(cm_id_priv);
1134 		goto retest;
1135 	case IB_CM_DREQ_RCVD:
1136 		cm_send_drep_locked(cm_id_priv, NULL, 0);
1137 		WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
1138 		goto retest;
1139 	case IB_CM_TIMEWAIT:
1140 		/*
1141 		 * The cm_acquire_id in cm_timewait_handler will stop working
1142 		 * once we do xa_erase below, so just move to idle here for
1143 		 * consistency.
1144 		 */
1145 		cm_id->state = IB_CM_IDLE;
1146 		break;
1147 	case IB_CM_IDLE:
1148 		break;
1149 	}
1150 	WARN_ON(cm_id->state != IB_CM_IDLE);
1151 
1152 	spin_lock(&cm.lock);
1153 	/* Required for cleanup paths related cm_req_handler() */
1154 	if (cm_id_priv->timewait_info) {
1155 		cm_remove_remote(cm_id_priv);
1156 		kfree(cm_id_priv->timewait_info);
1157 		cm_id_priv->timewait_info = NULL;
1158 	}
1159 	if (!list_empty(&cm_id_priv->altr_list) &&
1160 	    (!cm_id_priv->altr_send_port_not_ready))
1161 		list_del(&cm_id_priv->altr_list);
1162 	if (!list_empty(&cm_id_priv->prim_list) &&
1163 	    (!cm_id_priv->prim_send_port_not_ready))
1164 		list_del(&cm_id_priv->prim_list);
1165 	WARN_ON(cm_id_priv->listen_sharecount);
1166 	WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
1167 	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1168 		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
1169 	spin_unlock(&cm.lock);
1170 	spin_unlock_irq(&cm_id_priv->lock);
1171 
1172 	xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
1173 	cm_deref_id(cm_id_priv);
1174 	wait_for_completion(&cm_id_priv->comp);
1175 	while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1176 		cm_free_work(work);
1177 
1178 	rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
1179 	rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
1180 	kfree(cm_id_priv->private_data);
1181 	kfree_rcu(cm_id_priv, rcu);
1182 }
1183 
1184 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1185 {
1186 	cm_destroy_id(cm_id, 0);
1187 }
1188 EXPORT_SYMBOL(ib_destroy_cm_id);
1189 
1190 static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
1191 			  __be64 service_mask)
1192 {
1193 	service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1194 	service_id &= service_mask;
1195 	if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1196 	    (service_id != IB_CM_ASSIGN_SERVICE_ID))
1197 		return -EINVAL;
1198 
1199 	if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1200 		cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
1201 		cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1202 	} else {
1203 		cm_id_priv->id.service_id = service_id;
1204 		cm_id_priv->id.service_mask = service_mask;
1205 	}
1206 	return 0;
1207 }
1208 
1209 /**
1210  * ib_cm_listen - Initiates listening on the specified service ID for
1211  *   connection and service ID resolution requests.
1212  * @cm_id: Connection identifier associated with the listen request.
1213  * @service_id: Service identifier matched against incoming connection
1214  *   and service ID resolution requests.  The service ID should be specified
1215  *   network-byte order.  If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1216  *   assign a service ID to the caller.
1217  * @service_mask: Mask applied to service ID used to listen across a
1218  *   range of service IDs.  If set to 0, the service ID is matched
1219  *   exactly.  This parameter is ignored if %service_id is set to
1220  *   IB_CM_ASSIGN_SERVICE_ID.
1221  */
1222 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1223 {
1224 	struct cm_id_private *cm_id_priv =
1225 		container_of(cm_id, struct cm_id_private, id);
1226 	unsigned long flags;
1227 	int ret;
1228 
1229 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1230 	if (cm_id_priv->id.state != IB_CM_IDLE) {
1231 		ret = -EINVAL;
1232 		goto out;
1233 	}
1234 
1235 	ret = cm_init_listen(cm_id_priv, service_id, service_mask);
1236 	if (ret)
1237 		goto out;
1238 
1239 	if (!cm_insert_listen(cm_id_priv, NULL)) {
1240 		ret = -EBUSY;
1241 		goto out;
1242 	}
1243 
1244 	cm_id_priv->id.state = IB_CM_LISTEN;
1245 	ret = 0;
1246 
1247 out:
1248 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1249 	return ret;
1250 }
1251 EXPORT_SYMBOL(ib_cm_listen);
1252 
1253 /**
1254  * ib_cm_insert_listen - Create a new listening ib_cm_id and listen on
1255  *			 the given service ID.
1256  *
1257  * If there's an existing ID listening on that same device and service ID,
1258  * return it.
1259  *
1260  * @device: Device associated with the cm_id.  All related communication will
1261  * be associated with the specified device.
1262  * @cm_handler: Callback invoked to notify the user of CM events.
1263  * @service_id: Service identifier matched against incoming connection
1264  *   and service ID resolution requests.  The service ID should be specified
1265  *   network-byte order.  If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1266  *   assign a service ID to the caller.
1267  *
1268  * Callers should call ib_destroy_cm_id when done with the listener ID.
1269  */
1270 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1271 				     ib_cm_handler cm_handler,
1272 				     __be64 service_id)
1273 {
1274 	struct cm_id_private *listen_id_priv;
1275 	struct cm_id_private *cm_id_priv;
1276 	int err = 0;
1277 
1278 	/* Create an ID in advance, since the creation may sleep */
1279 	cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
1280 	if (IS_ERR(cm_id_priv))
1281 		return ERR_CAST(cm_id_priv);
1282 
1283 	err = cm_init_listen(cm_id_priv, service_id, 0);
1284 	if (err)
1285 		return ERR_PTR(err);
1286 
1287 	spin_lock_irq(&cm_id_priv->lock);
1288 	listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
1289 	if (listen_id_priv != cm_id_priv) {
1290 		spin_unlock_irq(&cm_id_priv->lock);
1291 		ib_destroy_cm_id(&cm_id_priv->id);
1292 		if (!listen_id_priv)
1293 			return ERR_PTR(-EINVAL);
1294 		return &listen_id_priv->id;
1295 	}
1296 	cm_id_priv->id.state = IB_CM_LISTEN;
1297 	spin_unlock_irq(&cm_id_priv->lock);
1298 
1299 	/*
1300 	 * A listen ID does not need to be in the xarray since it does not
1301 	 * receive mads, is not placed in the remote_id or remote_qpn rbtree,
1302 	 * and does not enter timewait.
1303 	 */
1304 
1305 	return &cm_id_priv->id;
1306 }
1307 EXPORT_SYMBOL(ib_cm_insert_listen);
1308 
1309 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
1310 {
1311 	u64 hi_tid, low_tid;
1312 
1313 	hi_tid   = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1314 	low_tid  = (u64)cm_id_priv->id.local_id;
1315 	return cpu_to_be64(hi_tid | low_tid);
1316 }
1317 
1318 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1319 			      __be16 attr_id, __be64 tid)
1320 {
1321 	hdr->base_version  = IB_MGMT_BASE_VERSION;
1322 	hdr->mgmt_class	   = IB_MGMT_CLASS_CM;
1323 	hdr->class_version = IB_CM_CLASS_VERSION;
1324 	hdr->method	   = IB_MGMT_METHOD_SEND;
1325 	hdr->attr_id	   = attr_id;
1326 	hdr->tid	   = tid;
1327 }
1328 
1329 static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
1330 				  __be64 tid, u32 attr_mod)
1331 {
1332 	cm_format_mad_hdr(hdr, attr_id, tid);
1333 	hdr->attr_mod = cpu_to_be32(attr_mod);
1334 }
1335 
1336 static void cm_format_req(struct cm_req_msg *req_msg,
1337 			  struct cm_id_private *cm_id_priv,
1338 			  struct ib_cm_req_param *param)
1339 {
1340 	struct sa_path_rec *pri_path = param->primary_path;
1341 	struct sa_path_rec *alt_path = param->alternate_path;
1342 	bool pri_ext = false;
1343 
1344 	if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1345 		pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1346 					      pri_path->opa.slid);
1347 
1348 	cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1349 			      cm_form_tid(cm_id_priv), param->ece.attr_mod);
1350 
1351 	IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
1352 		be32_to_cpu(cm_id_priv->id.local_id));
1353 	IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
1354 	IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
1355 		be64_to_cpu(cm_id_priv->id.device->node_guid));
1356 	IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
1357 	IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
1358 	IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
1359 		param->remote_cm_response_timeout);
1360 	cm_req_set_qp_type(req_msg, param->qp_type);
1361 	IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
1362 	IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
1363 	IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
1364 		param->local_cm_response_timeout);
1365 	IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
1366 		be16_to_cpu(param->primary_path->pkey));
1367 	IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
1368 		param->primary_path->mtu);
1369 	IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
1370 
1371 	if (param->qp_type != IB_QPT_XRC_INI) {
1372 		IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
1373 			param->responder_resources);
1374 		IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
1375 		IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
1376 			param->rnr_retry_count);
1377 		IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
1378 	}
1379 
1380 	*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
1381 		pri_path->sgid;
1382 	*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
1383 		pri_path->dgid;
1384 	if (pri_ext) {
1385 		IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
1386 			->global.interface_id =
1387 			OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1388 		IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
1389 			->global.interface_id =
1390 			OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1391 	}
1392 	if (pri_path->hop_limit <= 1) {
1393 		IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1394 			be16_to_cpu(pri_ext ? 0 :
1395 					      htons(ntohl(sa_path_get_slid(
1396 						      pri_path)))));
1397 		IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1398 			be16_to_cpu(pri_ext ? 0 :
1399 					      htons(ntohl(sa_path_get_dlid(
1400 						      pri_path)))));
1401 	} else {
1402 		/* Work-around until there's a way to obtain remote LID info */
1403 		IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1404 			be16_to_cpu(IB_LID_PERMISSIVE));
1405 		IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1406 			be16_to_cpu(IB_LID_PERMISSIVE));
1407 	}
1408 	IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
1409 		be32_to_cpu(pri_path->flow_label));
1410 	IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
1411 	IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
1412 	IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
1413 	IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
1414 	IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
1415 		(pri_path->hop_limit <= 1));
1416 	IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
1417 		cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1418 			       pri_path->packet_life_time));
1419 
1420 	if (alt_path) {
1421 		bool alt_ext = false;
1422 
1423 		if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1424 			alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1425 						      alt_path->opa.slid);
1426 
1427 		*IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
1428 			alt_path->sgid;
1429 		*IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
1430 			alt_path->dgid;
1431 		if (alt_ext) {
1432 			IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1433 					req_msg)
1434 				->global.interface_id =
1435 				OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1436 			IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
1437 					req_msg)
1438 				->global.interface_id =
1439 				OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1440 		}
1441 		if (alt_path->hop_limit <= 1) {
1442 			IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1443 				be16_to_cpu(
1444 					alt_ext ? 0 :
1445 						  htons(ntohl(sa_path_get_slid(
1446 							  alt_path)))));
1447 			IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1448 				be16_to_cpu(
1449 					alt_ext ? 0 :
1450 						  htons(ntohl(sa_path_get_dlid(
1451 							  alt_path)))));
1452 		} else {
1453 			IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1454 				be16_to_cpu(IB_LID_PERMISSIVE));
1455 			IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1456 				be16_to_cpu(IB_LID_PERMISSIVE));
1457 		}
1458 		IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
1459 			be32_to_cpu(alt_path->flow_label));
1460 		IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
1461 		IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
1462 			alt_path->traffic_class);
1463 		IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
1464 			alt_path->hop_limit);
1465 		IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
1466 		IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
1467 			(alt_path->hop_limit <= 1));
1468 		IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
1469 			cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1470 				       alt_path->packet_life_time));
1471 	}
1472 	IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
1473 
1474 	if (param->private_data && param->private_data_len)
1475 		IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
1476 			    param->private_data_len);
1477 }
1478 
1479 static int cm_validate_req_param(struct ib_cm_req_param *param)
1480 {
1481 	if (!param->primary_path)
1482 		return -EINVAL;
1483 
1484 	if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1485 	    param->qp_type != IB_QPT_XRC_INI)
1486 		return -EINVAL;
1487 
1488 	if (param->private_data &&
1489 	    param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1490 		return -EINVAL;
1491 
1492 	if (param->alternate_path &&
1493 	    (param->alternate_path->pkey != param->primary_path->pkey ||
1494 	     param->alternate_path->mtu != param->primary_path->mtu))
1495 		return -EINVAL;
1496 
1497 	return 0;
1498 }
1499 
1500 int ib_send_cm_req(struct ib_cm_id *cm_id,
1501 		   struct ib_cm_req_param *param)
1502 {
1503 	struct cm_id_private *cm_id_priv;
1504 	struct cm_req_msg *req_msg;
1505 	unsigned long flags;
1506 	int ret;
1507 
1508 	ret = cm_validate_req_param(param);
1509 	if (ret)
1510 		return ret;
1511 
1512 	/* Verify that we're not in timewait. */
1513 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1514 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1515 	if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
1516 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1517 		ret = -EINVAL;
1518 		goto out;
1519 	}
1520 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1521 
1522 	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1523 							    id.local_id);
1524 	if (IS_ERR(cm_id_priv->timewait_info)) {
1525 		ret = PTR_ERR(cm_id_priv->timewait_info);
1526 		cm_id_priv->timewait_info = NULL;
1527 		goto out;
1528 	}
1529 
1530 	ret = cm_init_av_by_path(param->primary_path,
1531 				 param->ppath_sgid_attr, &cm_id_priv->av,
1532 				 cm_id_priv);
1533 	if (ret)
1534 		goto out;
1535 	if (param->alternate_path) {
1536 		ret = cm_init_av_by_path(param->alternate_path, NULL,
1537 					 &cm_id_priv->alt_av, cm_id_priv);
1538 		if (ret)
1539 			goto out;
1540 	}
1541 	cm_id->service_id = param->service_id;
1542 	cm_id->service_mask = ~cpu_to_be64(0);
1543 	cm_id_priv->timeout_ms = cm_convert_to_ms(
1544 				    param->primary_path->packet_life_time) * 2 +
1545 				 cm_convert_to_ms(
1546 				    param->remote_cm_response_timeout);
1547 	cm_id_priv->max_cm_retries = param->max_cm_retries;
1548 	cm_id_priv->initiator_depth = param->initiator_depth;
1549 	cm_id_priv->responder_resources = param->responder_resources;
1550 	cm_id_priv->retry_count = param->retry_count;
1551 	cm_id_priv->path_mtu = param->primary_path->mtu;
1552 	cm_id_priv->pkey = param->primary_path->pkey;
1553 	cm_id_priv->qp_type = param->qp_type;
1554 
1555 	ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1556 	if (ret)
1557 		goto out;
1558 
1559 	req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1560 	cm_format_req(req_msg, cm_id_priv, param);
1561 	cm_id_priv->tid = req_msg->hdr.tid;
1562 	cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1563 	cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1564 
1565 	cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
1566 	cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
1567 
1568 	trace_icm_send_req(&cm_id_priv->id);
1569 	spin_lock_irqsave(&cm_id_priv->lock, flags);
1570 	ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1571 	if (ret) {
1572 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1573 		goto error2;
1574 	}
1575 	BUG_ON(cm_id->state != IB_CM_IDLE);
1576 	cm_id->state = IB_CM_REQ_SENT;
1577 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1578 	return 0;
1579 
1580 error2:	cm_free_msg(cm_id_priv->msg);
1581 out:	return ret;
1582 }
1583 EXPORT_SYMBOL(ib_send_cm_req);
1584 
1585 static int cm_issue_rej(struct cm_port *port,
1586 			struct ib_mad_recv_wc *mad_recv_wc,
1587 			enum ib_cm_rej_reason reason,
1588 			enum cm_msg_response msg_rejected,
1589 			void *ari, u8 ari_length)
1590 {
1591 	struct ib_mad_send_buf *msg = NULL;
1592 	struct cm_rej_msg *rej_msg, *rcv_msg;
1593 	int ret;
1594 
1595 	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1596 	if (ret)
1597 		return ret;
1598 
1599 	/* We just need common CM header information.  Cast to any message. */
1600 	rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1601 	rej_msg = (struct cm_rej_msg *) msg->mad;
1602 
1603 	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1604 	IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1605 		IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
1606 	IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1607 		IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1608 	IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
1609 	IBA_SET(CM_REJ_REASON, rej_msg, reason);
1610 
1611 	if (ari && ari_length) {
1612 		IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1613 		IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1614 	}
1615 
1616 	trace_icm_issue_rej(
1617 		IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg),
1618 		IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1619 	ret = ib_post_send_mad(msg, NULL);
1620 	if (ret)
1621 		cm_free_msg(msg);
1622 
1623 	return ret;
1624 }
1625 
1626 static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1627 {
1628 	return ((cpu_to_be16(
1629 			IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
1630 		(ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1631 					       req_msg))));
1632 }
1633 
1634 static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
1635 				 struct sa_path_rec *path, union ib_gid *gid)
1636 {
1637 	if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1638 		path->rec_type = SA_PATH_REC_TYPE_OPA;
1639 	else
1640 		path->rec_type = SA_PATH_REC_TYPE_IB;
1641 }
1642 
1643 static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1644 					struct sa_path_rec *primary_path,
1645 					struct sa_path_rec *alt_path)
1646 {
1647 	u32 lid;
1648 
1649 	if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1650 		sa_path_set_dlid(primary_path,
1651 				 IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
1652 					 req_msg));
1653 		sa_path_set_slid(primary_path,
1654 				 IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
1655 					 req_msg));
1656 	} else {
1657 		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1658 			CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
1659 		sa_path_set_dlid(primary_path, lid);
1660 
1661 		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1662 			CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
1663 		sa_path_set_slid(primary_path, lid);
1664 	}
1665 
1666 	if (!cm_req_has_alt_path(req_msg))
1667 		return;
1668 
1669 	if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1670 		sa_path_set_dlid(alt_path,
1671 				 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
1672 					 req_msg));
1673 		sa_path_set_slid(alt_path,
1674 				 IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
1675 					 req_msg));
1676 	} else {
1677 		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1678 			CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
1679 		sa_path_set_dlid(alt_path, lid);
1680 
1681 		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1682 			CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
1683 		sa_path_set_slid(alt_path, lid);
1684 	}
1685 }
1686 
1687 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1688 				     struct sa_path_rec *primary_path,
1689 				     struct sa_path_rec *alt_path)
1690 {
1691 	primary_path->dgid =
1692 		*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
1693 	primary_path->sgid =
1694 		*IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
1695 	primary_path->flow_label =
1696 		cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
1697 	primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
1698 	primary_path->traffic_class =
1699 		IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
1700 	primary_path->reversible = 1;
1701 	primary_path->pkey =
1702 		cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1703 	primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
1704 	primary_path->mtu_selector = IB_SA_EQ;
1705 	primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1706 	primary_path->rate_selector = IB_SA_EQ;
1707 	primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
1708 	primary_path->packet_life_time_selector = IB_SA_EQ;
1709 	primary_path->packet_life_time =
1710 		IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
1711 	primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1712 	primary_path->service_id =
1713 		cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1714 	if (sa_path_is_roce(primary_path))
1715 		primary_path->roce.route_resolved = false;
1716 
1717 	if (cm_req_has_alt_path(req_msg)) {
1718 		alt_path->dgid = *IBA_GET_MEM_PTR(
1719 			CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
1720 		alt_path->sgid = *IBA_GET_MEM_PTR(
1721 			CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
1722 		alt_path->flow_label = cpu_to_be32(
1723 			IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
1724 		alt_path->hop_limit =
1725 			IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
1726 		alt_path->traffic_class =
1727 			IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
1728 		alt_path->reversible = 1;
1729 		alt_path->pkey =
1730 			cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1731 		alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
1732 		alt_path->mtu_selector = IB_SA_EQ;
1733 		alt_path->mtu =
1734 			IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1735 		alt_path->rate_selector = IB_SA_EQ;
1736 		alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
1737 		alt_path->packet_life_time_selector = IB_SA_EQ;
1738 		alt_path->packet_life_time =
1739 			IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
1740 		alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1741 		alt_path->service_id =
1742 			cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1743 
1744 		if (sa_path_is_roce(alt_path))
1745 			alt_path->roce.route_resolved = false;
1746 	}
1747 	cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
1748 }
1749 
1750 static u16 cm_get_bth_pkey(struct cm_work *work)
1751 {
1752 	struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1753 	u8 port_num = work->port->port_num;
1754 	u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1755 	u16 pkey;
1756 	int ret;
1757 
1758 	ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1759 	if (ret) {
1760 		dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1761 				     port_num, pkey_index, ret);
1762 		return 0;
1763 	}
1764 
1765 	return pkey;
1766 }
1767 
1768 /**
1769  * cm_opa_to_ib_sgid - Convert OPA SGID to IB SGID
1770  * ULPs (such as IPoIB) do not understand OPA GIDs and will
1771  * reject them as the local_gid will not match the sgid. Therefore,
1772  * change the pathrec's SGID to an IB SGID.
1773  *
1774  * @work: Work completion
1775  * @path: Path record
1776  */
1777 static void cm_opa_to_ib_sgid(struct cm_work *work,
1778 			      struct sa_path_rec *path)
1779 {
1780 	struct ib_device *dev = work->port->cm_dev->ib_device;
1781 	u8 port_num = work->port->port_num;
1782 
1783 	if (rdma_cap_opa_ah(dev, port_num) &&
1784 	    (ib_is_opa_gid(&path->sgid))) {
1785 		union ib_gid sgid;
1786 
1787 		if (rdma_query_gid(dev, port_num, 0, &sgid)) {
1788 			dev_warn(&dev->dev,
1789 				 "Error updating sgid in CM request\n");
1790 			return;
1791 		}
1792 
1793 		path->sgid = sgid;
1794 	}
1795 }
1796 
1797 static void cm_format_req_event(struct cm_work *work,
1798 				struct cm_id_private *cm_id_priv,
1799 				struct ib_cm_id *listen_id)
1800 {
1801 	struct cm_req_msg *req_msg;
1802 	struct ib_cm_req_event_param *param;
1803 
1804 	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1805 	param = &work->cm_event.param.req_rcvd;
1806 	param->listen_id = listen_id;
1807 	param->bth_pkey = cm_get_bth_pkey(work);
1808 	param->port = cm_id_priv->av.port->port_num;
1809 	param->primary_path = &work->path[0];
1810 	cm_opa_to_ib_sgid(work, param->primary_path);
1811 	if (cm_req_has_alt_path(req_msg)) {
1812 		param->alternate_path = &work->path[1];
1813 		cm_opa_to_ib_sgid(work, param->alternate_path);
1814 	} else {
1815 		param->alternate_path = NULL;
1816 	}
1817 	param->remote_ca_guid =
1818 		cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
1819 	param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
1820 	param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
1821 	param->qp_type = cm_req_get_qp_type(req_msg);
1822 	param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
1823 	param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
1824 	param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
1825 	param->local_cm_response_timeout =
1826 		IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
1827 	param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
1828 	param->remote_cm_response_timeout =
1829 		IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
1830 	param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
1831 	param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
1832 	param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
1833 	param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
1834 	param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
1835 	param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
1836 
1837 	work->cm_event.private_data =
1838 		IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
1839 }
1840 
1841 static void cm_process_work(struct cm_id_private *cm_id_priv,
1842 			    struct cm_work *work)
1843 {
1844 	int ret;
1845 
1846 	/* We will typically only have the current event to report. */
1847 	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1848 	cm_free_work(work);
1849 
1850 	while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1851 		spin_lock_irq(&cm_id_priv->lock);
1852 		work = cm_dequeue_work(cm_id_priv);
1853 		spin_unlock_irq(&cm_id_priv->lock);
1854 		if (!work)
1855 			return;
1856 
1857 		ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1858 						&work->cm_event);
1859 		cm_free_work(work);
1860 	}
1861 	cm_deref_id(cm_id_priv);
1862 	if (ret)
1863 		cm_destroy_id(&cm_id_priv->id, ret);
1864 }
1865 
1866 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1867 			  struct cm_id_private *cm_id_priv,
1868 			  enum cm_msg_response msg_mraed, u8 service_timeout,
1869 			  const void *private_data, u8 private_data_len)
1870 {
1871 	cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1872 	IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
1873 	IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
1874 		be32_to_cpu(cm_id_priv->id.local_id));
1875 	IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
1876 		be32_to_cpu(cm_id_priv->id.remote_id));
1877 	IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
1878 
1879 	if (private_data && private_data_len)
1880 		IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
1881 			    private_data_len);
1882 }
1883 
1884 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1885 			  struct cm_id_private *cm_id_priv,
1886 			  enum ib_cm_rej_reason reason, void *ari,
1887 			  u8 ari_length, const void *private_data,
1888 			  u8 private_data_len, enum ib_cm_state state)
1889 {
1890 	lockdep_assert_held(&cm_id_priv->lock);
1891 
1892 	cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1893 	IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1894 		be32_to_cpu(cm_id_priv->id.remote_id));
1895 
1896 	switch (state) {
1897 	case IB_CM_REQ_RCVD:
1898 		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
1899 		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1900 		break;
1901 	case IB_CM_MRA_REQ_SENT:
1902 		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1903 			be32_to_cpu(cm_id_priv->id.local_id));
1904 		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1905 		break;
1906 	case IB_CM_REP_RCVD:
1907 	case IB_CM_MRA_REP_SENT:
1908 		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1909 			be32_to_cpu(cm_id_priv->id.local_id));
1910 		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
1911 		break;
1912 	default:
1913 		IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1914 			be32_to_cpu(cm_id_priv->id.local_id));
1915 		IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
1916 			CM_MSG_RESPONSE_OTHER);
1917 		break;
1918 	}
1919 
1920 	IBA_SET(CM_REJ_REASON, rej_msg, reason);
1921 	if (ari && ari_length) {
1922 		IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1923 		IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1924 	}
1925 
1926 	if (private_data && private_data_len)
1927 		IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
1928 			    private_data_len);
1929 }
1930 
1931 static void cm_dup_req_handler(struct cm_work *work,
1932 			       struct cm_id_private *cm_id_priv)
1933 {
1934 	struct ib_mad_send_buf *msg = NULL;
1935 	int ret;
1936 
1937 	atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1938 			counter[CM_REQ_COUNTER]);
1939 
1940 	/* Quick state check to discard duplicate REQs. */
1941 	spin_lock_irq(&cm_id_priv->lock);
1942 	if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
1943 		spin_unlock_irq(&cm_id_priv->lock);
1944 		return;
1945 	}
1946 	spin_unlock_irq(&cm_id_priv->lock);
1947 
1948 	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1949 	if (ret)
1950 		return;
1951 
1952 	spin_lock_irq(&cm_id_priv->lock);
1953 	switch (cm_id_priv->id.state) {
1954 	case IB_CM_MRA_REQ_SENT:
1955 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1956 			      CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1957 			      cm_id_priv->private_data,
1958 			      cm_id_priv->private_data_len);
1959 		break;
1960 	case IB_CM_TIMEWAIT:
1961 		cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
1962 			      IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
1963 			      IB_CM_TIMEWAIT);
1964 		break;
1965 	default:
1966 		goto unlock;
1967 	}
1968 	spin_unlock_irq(&cm_id_priv->lock);
1969 
1970 	trace_icm_send_dup_req(&cm_id_priv->id);
1971 	ret = ib_post_send_mad(msg, NULL);
1972 	if (ret)
1973 		goto free;
1974 	return;
1975 
1976 unlock:	spin_unlock_irq(&cm_id_priv->lock);
1977 free:	cm_free_msg(msg);
1978 }
1979 
1980 static struct cm_id_private * cm_match_req(struct cm_work *work,
1981 					   struct cm_id_private *cm_id_priv)
1982 {
1983 	struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1984 	struct cm_timewait_info *timewait_info;
1985 	struct cm_req_msg *req_msg;
1986 
1987 	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1988 
1989 	/* Check for possible duplicate REQ. */
1990 	spin_lock_irq(&cm.lock);
1991 	timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1992 	if (timewait_info) {
1993 		cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
1994 					   timewait_info->work.remote_id);
1995 		spin_unlock_irq(&cm.lock);
1996 		if (cur_cm_id_priv) {
1997 			cm_dup_req_handler(work, cur_cm_id_priv);
1998 			cm_deref_id(cur_cm_id_priv);
1999 		}
2000 		return NULL;
2001 	}
2002 
2003 	/* Check for stale connections. */
2004 	timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2005 	if (timewait_info) {
2006 		cm_remove_remote(cm_id_priv);
2007 		cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2008 					   timewait_info->work.remote_id);
2009 
2010 		spin_unlock_irq(&cm.lock);
2011 		cm_issue_rej(work->port, work->mad_recv_wc,
2012 			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
2013 			     NULL, 0);
2014 		if (cur_cm_id_priv) {
2015 			ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2016 			cm_deref_id(cur_cm_id_priv);
2017 		}
2018 		return NULL;
2019 	}
2020 
2021 	/* Find matching listen request. */
2022 	listen_cm_id_priv = cm_find_listen(
2023 		cm_id_priv->id.device,
2024 		cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
2025 	if (!listen_cm_id_priv) {
2026 		cm_remove_remote(cm_id_priv);
2027 		spin_unlock_irq(&cm.lock);
2028 		cm_issue_rej(work->port, work->mad_recv_wc,
2029 			     IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
2030 			     NULL, 0);
2031 		return NULL;
2032 	}
2033 	spin_unlock_irq(&cm.lock);
2034 	return listen_cm_id_priv;
2035 }
2036 
2037 /*
2038  * Work-around for inter-subnet connections.  If the LIDs are permissive,
2039  * we need to override the LID/SL data in the REQ with the LID information
2040  * in the work completion.
2041  */
2042 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
2043 {
2044 	if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
2045 		if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
2046 					req_msg)) == IB_LID_PERMISSIVE) {
2047 			IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
2048 				be16_to_cpu(ib_lid_be16(wc->slid)));
2049 			IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
2050 		}
2051 
2052 		if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
2053 					req_msg)) == IB_LID_PERMISSIVE)
2054 			IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
2055 				wc->dlid_path_bits);
2056 	}
2057 
2058 	if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
2059 		if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
2060 					req_msg)) == IB_LID_PERMISSIVE) {
2061 			IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
2062 				be16_to_cpu(ib_lid_be16(wc->slid)));
2063 			IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
2064 		}
2065 
2066 		if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
2067 					req_msg)) == IB_LID_PERMISSIVE)
2068 			IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
2069 				wc->dlid_path_bits);
2070 	}
2071 }
2072 
2073 static int cm_req_handler(struct cm_work *work)
2074 {
2075 	struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
2076 	struct cm_req_msg *req_msg;
2077 	const struct ib_global_route *grh;
2078 	const struct ib_gid_attr *gid_attr;
2079 	int ret;
2080 
2081 	req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
2082 
2083 	cm_id_priv =
2084 		cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
2085 	if (IS_ERR(cm_id_priv))
2086 		return PTR_ERR(cm_id_priv);
2087 
2088 	cm_id_priv->id.remote_id =
2089 		cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
2090 	cm_id_priv->id.service_id =
2091 		cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
2092 	cm_id_priv->id.service_mask = ~cpu_to_be64(0);
2093 	cm_id_priv->tid = req_msg->hdr.tid;
2094 	cm_id_priv->timeout_ms = cm_convert_to_ms(
2095 		IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
2096 	cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
2097 	cm_id_priv->remote_qpn =
2098 		cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
2099 	cm_id_priv->initiator_depth =
2100 		IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
2101 	cm_id_priv->responder_resources =
2102 		IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
2103 	cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
2104 	cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
2105 	cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
2106 	cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
2107 	cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
2108 	cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
2109 
2110 	ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2111 				      work->mad_recv_wc->recv_buf.grh,
2112 				      &cm_id_priv->av);
2113 	if (ret)
2114 		goto destroy;
2115 	cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
2116 							    id.local_id);
2117 	if (IS_ERR(cm_id_priv->timewait_info)) {
2118 		ret = PTR_ERR(cm_id_priv->timewait_info);
2119 		cm_id_priv->timewait_info = NULL;
2120 		goto destroy;
2121 	}
2122 	cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
2123 	cm_id_priv->timewait_info->remote_ca_guid =
2124 		cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
2125 	cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
2126 
2127 	/*
2128 	 * Note that the ID pointer is not in the xarray at this point,
2129 	 * so this set is only visible to the local thread.
2130 	 */
2131 	cm_id_priv->id.state = IB_CM_REQ_RCVD;
2132 
2133 	listen_cm_id_priv = cm_match_req(work, cm_id_priv);
2134 	if (!listen_cm_id_priv) {
2135 		trace_icm_no_listener_err(&cm_id_priv->id);
2136 		cm_id_priv->id.state = IB_CM_IDLE;
2137 		ret = -EINVAL;
2138 		goto destroy;
2139 	}
2140 
2141 	cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
2142 
2143 	memset(&work->path[0], 0, sizeof(work->path[0]));
2144 	if (cm_req_has_alt_path(req_msg))
2145 		memset(&work->path[1], 0, sizeof(work->path[1]));
2146 	grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
2147 	gid_attr = grh->sgid_attr;
2148 
2149 	if (gid_attr &&
2150 	    rdma_protocol_roce(work->port->cm_dev->ib_device,
2151 			       work->port->port_num)) {
2152 		work->path[0].rec_type =
2153 			sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
2154 	} else {
2155 		cm_path_set_rec_type(
2156 			work->port->cm_dev->ib_device, work->port->port_num,
2157 			&work->path[0],
2158 			IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
2159 					req_msg));
2160 	}
2161 	if (cm_req_has_alt_path(req_msg))
2162 		work->path[1].rec_type = work->path[0].rec_type;
2163 	cm_format_paths_from_req(req_msg, &work->path[0],
2164 				 &work->path[1]);
2165 	if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
2166 		sa_path_set_dmac(&work->path[0],
2167 				 cm_id_priv->av.ah_attr.roce.dmac);
2168 	work->path[0].hop_limit = grh->hop_limit;
2169 	ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av,
2170 				 cm_id_priv);
2171 	if (ret) {
2172 		int err;
2173 
2174 		err = rdma_query_gid(work->port->cm_dev->ib_device,
2175 				     work->port->port_num, 0,
2176 				     &work->path[0].sgid);
2177 		if (err)
2178 			ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2179 				       NULL, 0, NULL, 0);
2180 		else
2181 			ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2182 				       &work->path[0].sgid,
2183 				       sizeof(work->path[0].sgid),
2184 				       NULL, 0);
2185 		goto rejected;
2186 	}
2187 	if (cm_req_has_alt_path(req_msg)) {
2188 		ret = cm_init_av_by_path(&work->path[1], NULL,
2189 					 &cm_id_priv->alt_av, cm_id_priv);
2190 		if (ret) {
2191 			ib_send_cm_rej(&cm_id_priv->id,
2192 				       IB_CM_REJ_INVALID_ALT_GID,
2193 				       &work->path[0].sgid,
2194 				       sizeof(work->path[0].sgid), NULL, 0);
2195 			goto rejected;
2196 		}
2197 	}
2198 
2199 	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
2200 	cm_id_priv->id.context = listen_cm_id_priv->id.context;
2201 	cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
2202 
2203 	/* Now MAD handlers can see the new ID */
2204 	spin_lock_irq(&cm_id_priv->lock);
2205 	cm_finalize_id(cm_id_priv);
2206 
2207 	/* Refcount belongs to the event, pairs with cm_process_work() */
2208 	refcount_inc(&cm_id_priv->refcount);
2209 	cm_queue_work_unlock(cm_id_priv, work);
2210 	/*
2211 	 * Since this ID was just created and was not made visible to other MAD
2212 	 * handlers until the cm_finalize_id() above we know that the
2213 	 * cm_process_work() will deliver the event and the listen_cm_id
2214 	 * embedded in the event can be derefed here.
2215 	 */
2216 	cm_deref_id(listen_cm_id_priv);
2217 	return 0;
2218 
2219 rejected:
2220 	cm_deref_id(listen_cm_id_priv);
2221 destroy:
2222 	ib_destroy_cm_id(&cm_id_priv->id);
2223 	return ret;
2224 }
2225 
2226 static void cm_format_rep(struct cm_rep_msg *rep_msg,
2227 			  struct cm_id_private *cm_id_priv,
2228 			  struct ib_cm_rep_param *param)
2229 {
2230 	cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
2231 			      param->ece.attr_mod);
2232 	IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
2233 		be32_to_cpu(cm_id_priv->id.local_id));
2234 	IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
2235 		be32_to_cpu(cm_id_priv->id.remote_id));
2236 	IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
2237 	IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
2238 		param->responder_resources);
2239 	IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
2240 		cm_id_priv->av.port->cm_dev->ack_delay);
2241 	IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
2242 	IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
2243 	IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
2244 		be64_to_cpu(cm_id_priv->id.device->node_guid));
2245 
2246 	if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2247 		IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
2248 			param->initiator_depth);
2249 		IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
2250 			param->flow_control);
2251 		IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
2252 		IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
2253 	} else {
2254 		IBA_SET(CM_REP_SRQ, rep_msg, 1);
2255 		IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
2256 	}
2257 
2258 	IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
2259 	IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
2260 	IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
2261 
2262 	if (param->private_data && param->private_data_len)
2263 		IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
2264 			    param->private_data_len);
2265 }
2266 
2267 int ib_send_cm_rep(struct ib_cm_id *cm_id,
2268 		   struct ib_cm_rep_param *param)
2269 {
2270 	struct cm_id_private *cm_id_priv;
2271 	struct ib_mad_send_buf *msg;
2272 	struct cm_rep_msg *rep_msg;
2273 	unsigned long flags;
2274 	int ret;
2275 
2276 	if (param->private_data &&
2277 	    param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2278 		return -EINVAL;
2279 
2280 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2281 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2282 	if (cm_id->state != IB_CM_REQ_RCVD &&
2283 	    cm_id->state != IB_CM_MRA_REQ_SENT) {
2284 		trace_icm_send_rep_err(cm_id_priv->id.local_id, cm_id->state);
2285 		ret = -EINVAL;
2286 		goto out;
2287 	}
2288 
2289 	ret = cm_alloc_msg(cm_id_priv, &msg);
2290 	if (ret)
2291 		goto out;
2292 
2293 	rep_msg = (struct cm_rep_msg *) msg->mad;
2294 	cm_format_rep(rep_msg, cm_id_priv, param);
2295 	msg->timeout_ms = cm_id_priv->timeout_ms;
2296 	msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2297 
2298 	trace_icm_send_rep(cm_id);
2299 	ret = ib_post_send_mad(msg, NULL);
2300 	if (ret) {
2301 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2302 		cm_free_msg(msg);
2303 		return ret;
2304 	}
2305 
2306 	cm_id->state = IB_CM_REP_SENT;
2307 	cm_id_priv->msg = msg;
2308 	cm_id_priv->initiator_depth = param->initiator_depth;
2309 	cm_id_priv->responder_resources = param->responder_resources;
2310 	cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2311 	WARN_ONCE(param->qp_num & 0xFF000000,
2312 		  "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
2313 		  param->qp_num);
2314 	cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2315 
2316 out:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2317 	return ret;
2318 }
2319 EXPORT_SYMBOL(ib_send_cm_rep);
2320 
2321 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2322 			  struct cm_id_private *cm_id_priv,
2323 			  const void *private_data,
2324 			  u8 private_data_len)
2325 {
2326 	cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2327 	IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
2328 		be32_to_cpu(cm_id_priv->id.local_id));
2329 	IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
2330 		be32_to_cpu(cm_id_priv->id.remote_id));
2331 
2332 	if (private_data && private_data_len)
2333 		IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
2334 			    private_data_len);
2335 }
2336 
2337 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2338 		   const void *private_data,
2339 		   u8 private_data_len)
2340 {
2341 	struct cm_id_private *cm_id_priv;
2342 	struct ib_mad_send_buf *msg;
2343 	unsigned long flags;
2344 	void *data;
2345 	int ret;
2346 
2347 	if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2348 		return -EINVAL;
2349 
2350 	data = cm_copy_private_data(private_data, private_data_len);
2351 	if (IS_ERR(data))
2352 		return PTR_ERR(data);
2353 
2354 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2355 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2356 	if (cm_id->state != IB_CM_REP_RCVD &&
2357 	    cm_id->state != IB_CM_MRA_REP_SENT) {
2358 		trace_icm_send_cm_rtu_err(cm_id);
2359 		ret = -EINVAL;
2360 		goto error;
2361 	}
2362 
2363 	ret = cm_alloc_msg(cm_id_priv, &msg);
2364 	if (ret)
2365 		goto error;
2366 
2367 	cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2368 		      private_data, private_data_len);
2369 
2370 	trace_icm_send_rtu(cm_id);
2371 	ret = ib_post_send_mad(msg, NULL);
2372 	if (ret) {
2373 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2374 		cm_free_msg(msg);
2375 		kfree(data);
2376 		return ret;
2377 	}
2378 
2379 	cm_id->state = IB_CM_ESTABLISHED;
2380 	cm_set_private_data(cm_id_priv, data, private_data_len);
2381 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2382 	return 0;
2383 
2384 error:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2385 	kfree(data);
2386 	return ret;
2387 }
2388 EXPORT_SYMBOL(ib_send_cm_rtu);
2389 
2390 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2391 {
2392 	struct cm_rep_msg *rep_msg;
2393 	struct ib_cm_rep_event_param *param;
2394 
2395 	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2396 	param = &work->cm_event.param.rep_rcvd;
2397 	param->remote_ca_guid =
2398 		cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2399 	param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
2400 	param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2401 	param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
2402 	param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2403 	param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2404 	param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2405 	param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
2406 	param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
2407 	param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2408 	param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
2409 	param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
2410 	param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
2411 	param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
2412 	param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
2413 
2414 	work->cm_event.private_data =
2415 		IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
2416 }
2417 
2418 static void cm_dup_rep_handler(struct cm_work *work)
2419 {
2420 	struct cm_id_private *cm_id_priv;
2421 	struct cm_rep_msg *rep_msg;
2422 	struct ib_mad_send_buf *msg = NULL;
2423 	int ret;
2424 
2425 	rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2426 	cm_id_priv = cm_acquire_id(
2427 		cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
2428 		cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
2429 	if (!cm_id_priv)
2430 		return;
2431 
2432 	atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2433 			counter[CM_REP_COUNTER]);
2434 	ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2435 	if (ret)
2436 		goto deref;
2437 
2438 	spin_lock_irq(&cm_id_priv->lock);
2439 	if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2440 		cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2441 			      cm_id_priv->private_data,
2442 			      cm_id_priv->private_data_len);
2443 	else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2444 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2445 			      CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2446 			      cm_id_priv->private_data,
2447 			      cm_id_priv->private_data_len);
2448 	else
2449 		goto unlock;
2450 	spin_unlock_irq(&cm_id_priv->lock);
2451 
2452 	trace_icm_send_dup_rep(&cm_id_priv->id);
2453 	ret = ib_post_send_mad(msg, NULL);
2454 	if (ret)
2455 		goto free;
2456 	goto deref;
2457 
2458 unlock:	spin_unlock_irq(&cm_id_priv->lock);
2459 free:	cm_free_msg(msg);
2460 deref:	cm_deref_id(cm_id_priv);
2461 }
2462 
2463 static int cm_rep_handler(struct cm_work *work)
2464 {
2465 	struct cm_id_private *cm_id_priv;
2466 	struct cm_rep_msg *rep_msg;
2467 	int ret;
2468 	struct cm_id_private *cur_cm_id_priv;
2469 	struct cm_timewait_info *timewait_info;
2470 
2471 	rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2472 	cm_id_priv = cm_acquire_id(
2473 		cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
2474 	if (!cm_id_priv) {
2475 		cm_dup_rep_handler(work);
2476 		trace_icm_remote_no_priv_err(
2477 			 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2478 		return -EINVAL;
2479 	}
2480 
2481 	cm_format_rep_event(work, cm_id_priv->qp_type);
2482 
2483 	spin_lock_irq(&cm_id_priv->lock);
2484 	switch (cm_id_priv->id.state) {
2485 	case IB_CM_REQ_SENT:
2486 	case IB_CM_MRA_REQ_RCVD:
2487 		break;
2488 	default:
2489 		ret = -EINVAL;
2490 		trace_icm_rep_unknown_err(
2491 			IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2492 			IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg),
2493 			cm_id_priv->id.state);
2494 		spin_unlock_irq(&cm_id_priv->lock);
2495 		goto error;
2496 	}
2497 
2498 	cm_id_priv->timewait_info->work.remote_id =
2499 		cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2500 	cm_id_priv->timewait_info->remote_ca_guid =
2501 		cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2502 	cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2503 
2504 	spin_lock(&cm.lock);
2505 	/* Check for duplicate REP. */
2506 	if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2507 		spin_unlock(&cm.lock);
2508 		spin_unlock_irq(&cm_id_priv->lock);
2509 		ret = -EINVAL;
2510 		trace_icm_insert_failed_err(
2511 			 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2512 		goto error;
2513 	}
2514 	/* Check for a stale connection. */
2515 	timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2516 	if (timewait_info) {
2517 		cm_remove_remote(cm_id_priv);
2518 		cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2519 					   timewait_info->work.remote_id);
2520 
2521 		spin_unlock(&cm.lock);
2522 		spin_unlock_irq(&cm_id_priv->lock);
2523 		cm_issue_rej(work->port, work->mad_recv_wc,
2524 			     IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2525 			     NULL, 0);
2526 		ret = -EINVAL;
2527 		trace_icm_staleconn_err(
2528 			IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2529 			IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2530 
2531 		if (cur_cm_id_priv) {
2532 			ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2533 			cm_deref_id(cur_cm_id_priv);
2534 		}
2535 
2536 		goto error;
2537 	}
2538 	spin_unlock(&cm.lock);
2539 
2540 	cm_id_priv->id.state = IB_CM_REP_RCVD;
2541 	cm_id_priv->id.remote_id =
2542 		cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2543 	cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2544 	cm_id_priv->initiator_depth =
2545 		IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2546 	cm_id_priv->responder_resources =
2547 		IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2548 	cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2549 	cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2550 	cm_id_priv->target_ack_delay =
2551 		IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2552 	cm_id_priv->av.timeout =
2553 			cm_ack_timeout(cm_id_priv->target_ack_delay,
2554 				       cm_id_priv->av.timeout - 1);
2555 	cm_id_priv->alt_av.timeout =
2556 			cm_ack_timeout(cm_id_priv->target_ack_delay,
2557 				       cm_id_priv->alt_av.timeout - 1);
2558 
2559 	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2560 	cm_queue_work_unlock(cm_id_priv, work);
2561 	return 0;
2562 
2563 error:
2564 	cm_deref_id(cm_id_priv);
2565 	return ret;
2566 }
2567 
2568 static int cm_establish_handler(struct cm_work *work)
2569 {
2570 	struct cm_id_private *cm_id_priv;
2571 
2572 	/* See comment in cm_establish about lookup. */
2573 	cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2574 	if (!cm_id_priv)
2575 		return -EINVAL;
2576 
2577 	spin_lock_irq(&cm_id_priv->lock);
2578 	if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2579 		spin_unlock_irq(&cm_id_priv->lock);
2580 		goto out;
2581 	}
2582 
2583 	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2584 	cm_queue_work_unlock(cm_id_priv, work);
2585 	return 0;
2586 out:
2587 	cm_deref_id(cm_id_priv);
2588 	return -EINVAL;
2589 }
2590 
2591 static int cm_rtu_handler(struct cm_work *work)
2592 {
2593 	struct cm_id_private *cm_id_priv;
2594 	struct cm_rtu_msg *rtu_msg;
2595 
2596 	rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2597 	cm_id_priv = cm_acquire_id(
2598 		cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
2599 		cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
2600 	if (!cm_id_priv)
2601 		return -EINVAL;
2602 
2603 	work->cm_event.private_data =
2604 		IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
2605 
2606 	spin_lock_irq(&cm_id_priv->lock);
2607 	if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2608 	    cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2609 		spin_unlock_irq(&cm_id_priv->lock);
2610 		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2611 				counter[CM_RTU_COUNTER]);
2612 		goto out;
2613 	}
2614 	cm_id_priv->id.state = IB_CM_ESTABLISHED;
2615 
2616 	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2617 	cm_queue_work_unlock(cm_id_priv, work);
2618 	return 0;
2619 out:
2620 	cm_deref_id(cm_id_priv);
2621 	return -EINVAL;
2622 }
2623 
2624 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2625 			  struct cm_id_private *cm_id_priv,
2626 			  const void *private_data,
2627 			  u8 private_data_len)
2628 {
2629 	cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2630 			  cm_form_tid(cm_id_priv));
2631 	IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
2632 		be32_to_cpu(cm_id_priv->id.local_id));
2633 	IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
2634 		be32_to_cpu(cm_id_priv->id.remote_id));
2635 	IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
2636 		be32_to_cpu(cm_id_priv->remote_qpn));
2637 
2638 	if (private_data && private_data_len)
2639 		IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
2640 			    private_data_len);
2641 }
2642 
2643 static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
2644 			       const void *private_data, u8 private_data_len)
2645 {
2646 	struct ib_mad_send_buf *msg;
2647 	int ret;
2648 
2649 	lockdep_assert_held(&cm_id_priv->lock);
2650 
2651 	if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2652 		return -EINVAL;
2653 
2654 	if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2655 		trace_icm_dreq_skipped(&cm_id_priv->id);
2656 		return -EINVAL;
2657 	}
2658 
2659 	if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2660 	    cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2661 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2662 
2663 	ret = cm_alloc_msg(cm_id_priv, &msg);
2664 	if (ret) {
2665 		cm_enter_timewait(cm_id_priv);
2666 		return ret;
2667 	}
2668 
2669 	cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2670 		       private_data, private_data_len);
2671 	msg->timeout_ms = cm_id_priv->timeout_ms;
2672 	msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2673 
2674 	trace_icm_send_dreq(&cm_id_priv->id);
2675 	ret = ib_post_send_mad(msg, NULL);
2676 	if (ret) {
2677 		cm_enter_timewait(cm_id_priv);
2678 		cm_free_msg(msg);
2679 		return ret;
2680 	}
2681 
2682 	cm_id_priv->id.state = IB_CM_DREQ_SENT;
2683 	cm_id_priv->msg = msg;
2684 	return 0;
2685 }
2686 
2687 int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
2688 		    u8 private_data_len)
2689 {
2690 	struct cm_id_private *cm_id_priv =
2691 		container_of(cm_id, struct cm_id_private, id);
2692 	unsigned long flags;
2693 	int ret;
2694 
2695 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2696 	ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
2697 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2698 	return ret;
2699 }
2700 EXPORT_SYMBOL(ib_send_cm_dreq);
2701 
2702 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2703 			  struct cm_id_private *cm_id_priv,
2704 			  const void *private_data,
2705 			  u8 private_data_len)
2706 {
2707 	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2708 	IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2709 		be32_to_cpu(cm_id_priv->id.local_id));
2710 	IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2711 		be32_to_cpu(cm_id_priv->id.remote_id));
2712 
2713 	if (private_data && private_data_len)
2714 		IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
2715 			    private_data_len);
2716 }
2717 
2718 static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
2719 			       void *private_data, u8 private_data_len)
2720 {
2721 	struct ib_mad_send_buf *msg;
2722 	int ret;
2723 
2724 	lockdep_assert_held(&cm_id_priv->lock);
2725 
2726 	if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2727 		return -EINVAL;
2728 
2729 	if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2730 		trace_icm_send_drep_err(&cm_id_priv->id);
2731 		kfree(private_data);
2732 		return -EINVAL;
2733 	}
2734 
2735 	cm_set_private_data(cm_id_priv, private_data, private_data_len);
2736 	cm_enter_timewait(cm_id_priv);
2737 
2738 	ret = cm_alloc_msg(cm_id_priv, &msg);
2739 	if (ret)
2740 		return ret;
2741 
2742 	cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2743 		       private_data, private_data_len);
2744 
2745 	trace_icm_send_drep(&cm_id_priv->id);
2746 	ret = ib_post_send_mad(msg, NULL);
2747 	if (ret) {
2748 		cm_free_msg(msg);
2749 		return ret;
2750 	}
2751 	return 0;
2752 }
2753 
2754 int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
2755 		    u8 private_data_len)
2756 {
2757 	struct cm_id_private *cm_id_priv =
2758 		container_of(cm_id, struct cm_id_private, id);
2759 	unsigned long flags;
2760 	void *data;
2761 	int ret;
2762 
2763 	data = cm_copy_private_data(private_data, private_data_len);
2764 	if (IS_ERR(data))
2765 		return PTR_ERR(data);
2766 
2767 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2768 	ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
2769 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2770 	return ret;
2771 }
2772 EXPORT_SYMBOL(ib_send_cm_drep);
2773 
2774 static int cm_issue_drep(struct cm_port *port,
2775 			 struct ib_mad_recv_wc *mad_recv_wc)
2776 {
2777 	struct ib_mad_send_buf *msg = NULL;
2778 	struct cm_dreq_msg *dreq_msg;
2779 	struct cm_drep_msg *drep_msg;
2780 	int ret;
2781 
2782 	ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2783 	if (ret)
2784 		return ret;
2785 
2786 	dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2787 	drep_msg = (struct cm_drep_msg *) msg->mad;
2788 
2789 	cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2790 	IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2791 		IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
2792 	IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2793 		IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2794 
2795 	trace_icm_issue_drep(
2796 		IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2797 		IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2798 	ret = ib_post_send_mad(msg, NULL);
2799 	if (ret)
2800 		cm_free_msg(msg);
2801 
2802 	return ret;
2803 }
2804 
2805 static int cm_dreq_handler(struct cm_work *work)
2806 {
2807 	struct cm_id_private *cm_id_priv;
2808 	struct cm_dreq_msg *dreq_msg;
2809 	struct ib_mad_send_buf *msg = NULL;
2810 
2811 	dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2812 	cm_id_priv = cm_acquire_id(
2813 		cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
2814 		cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
2815 	if (!cm_id_priv) {
2816 		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2817 				counter[CM_DREQ_COUNTER]);
2818 		cm_issue_drep(work->port, work->mad_recv_wc);
2819 		trace_icm_no_priv_err(
2820 			IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2821 			IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2822 		return -EINVAL;
2823 	}
2824 
2825 	work->cm_event.private_data =
2826 		IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
2827 
2828 	spin_lock_irq(&cm_id_priv->lock);
2829 	if (cm_id_priv->local_qpn !=
2830 	    cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
2831 		goto unlock;
2832 
2833 	switch (cm_id_priv->id.state) {
2834 	case IB_CM_REP_SENT:
2835 	case IB_CM_DREQ_SENT:
2836 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2837 		break;
2838 	case IB_CM_ESTABLISHED:
2839 		if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2840 		    cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2841 			ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2842 		break;
2843 	case IB_CM_MRA_REP_RCVD:
2844 		break;
2845 	case IB_CM_TIMEWAIT:
2846 		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2847 				counter[CM_DREQ_COUNTER]);
2848 		msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2849 		if (IS_ERR(msg))
2850 			goto unlock;
2851 
2852 		cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2853 			       cm_id_priv->private_data,
2854 			       cm_id_priv->private_data_len);
2855 		spin_unlock_irq(&cm_id_priv->lock);
2856 
2857 		if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2858 		    ib_post_send_mad(msg, NULL))
2859 			cm_free_msg(msg);
2860 		goto deref;
2861 	case IB_CM_DREQ_RCVD:
2862 		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2863 				counter[CM_DREQ_COUNTER]);
2864 		goto unlock;
2865 	default:
2866 		trace_icm_dreq_unknown_err(&cm_id_priv->id);
2867 		goto unlock;
2868 	}
2869 	cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2870 	cm_id_priv->tid = dreq_msg->hdr.tid;
2871 	cm_queue_work_unlock(cm_id_priv, work);
2872 	return 0;
2873 
2874 unlock:	spin_unlock_irq(&cm_id_priv->lock);
2875 deref:	cm_deref_id(cm_id_priv);
2876 	return -EINVAL;
2877 }
2878 
2879 static int cm_drep_handler(struct cm_work *work)
2880 {
2881 	struct cm_id_private *cm_id_priv;
2882 	struct cm_drep_msg *drep_msg;
2883 
2884 	drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2885 	cm_id_priv = cm_acquire_id(
2886 		cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
2887 		cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
2888 	if (!cm_id_priv)
2889 		return -EINVAL;
2890 
2891 	work->cm_event.private_data =
2892 		IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
2893 
2894 	spin_lock_irq(&cm_id_priv->lock);
2895 	if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2896 	    cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2897 		spin_unlock_irq(&cm_id_priv->lock);
2898 		goto out;
2899 	}
2900 	cm_enter_timewait(cm_id_priv);
2901 
2902 	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2903 	cm_queue_work_unlock(cm_id_priv, work);
2904 	return 0;
2905 out:
2906 	cm_deref_id(cm_id_priv);
2907 	return -EINVAL;
2908 }
2909 
2910 static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
2911 			      enum ib_cm_rej_reason reason, void *ari,
2912 			      u8 ari_length, const void *private_data,
2913 			      u8 private_data_len)
2914 {
2915 	enum ib_cm_state state = cm_id_priv->id.state;
2916 	struct ib_mad_send_buf *msg;
2917 	int ret;
2918 
2919 	lockdep_assert_held(&cm_id_priv->lock);
2920 
2921 	if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2922 	    (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2923 		return -EINVAL;
2924 
2925 	switch (state) {
2926 	case IB_CM_REQ_SENT:
2927 	case IB_CM_MRA_REQ_RCVD:
2928 	case IB_CM_REQ_RCVD:
2929 	case IB_CM_MRA_REQ_SENT:
2930 	case IB_CM_REP_RCVD:
2931 	case IB_CM_MRA_REP_SENT:
2932 		cm_reset_to_idle(cm_id_priv);
2933 		ret = cm_alloc_msg(cm_id_priv, &msg);
2934 		if (ret)
2935 			return ret;
2936 		cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2937 			      ari, ari_length, private_data, private_data_len,
2938 			      state);
2939 		break;
2940 	case IB_CM_REP_SENT:
2941 	case IB_CM_MRA_REP_RCVD:
2942 		cm_enter_timewait(cm_id_priv);
2943 		ret = cm_alloc_msg(cm_id_priv, &msg);
2944 		if (ret)
2945 			return ret;
2946 		cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2947 			      ari, ari_length, private_data, private_data_len,
2948 			      state);
2949 		break;
2950 	default:
2951 		trace_icm_send_unknown_rej_err(&cm_id_priv->id);
2952 		return -EINVAL;
2953 	}
2954 
2955 	trace_icm_send_rej(&cm_id_priv->id, reason);
2956 	ret = ib_post_send_mad(msg, NULL);
2957 	if (ret) {
2958 		cm_free_msg(msg);
2959 		return ret;
2960 	}
2961 
2962 	return 0;
2963 }
2964 
2965 int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
2966 		   void *ari, u8 ari_length, const void *private_data,
2967 		   u8 private_data_len)
2968 {
2969 	struct cm_id_private *cm_id_priv =
2970 		container_of(cm_id, struct cm_id_private, id);
2971 	unsigned long flags;
2972 	int ret;
2973 
2974 	spin_lock_irqsave(&cm_id_priv->lock, flags);
2975 	ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
2976 				 private_data, private_data_len);
2977 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2978 	return ret;
2979 }
2980 EXPORT_SYMBOL(ib_send_cm_rej);
2981 
2982 static void cm_format_rej_event(struct cm_work *work)
2983 {
2984 	struct cm_rej_msg *rej_msg;
2985 	struct ib_cm_rej_event_param *param;
2986 
2987 	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2988 	param = &work->cm_event.param.rej_rcvd;
2989 	param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
2990 	param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
2991 	param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
2992 	work->cm_event.private_data =
2993 		IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
2994 }
2995 
2996 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2997 {
2998 	struct cm_id_private *cm_id_priv;
2999 	__be32 remote_id;
3000 
3001 	remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
3002 
3003 	if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
3004 		cm_id_priv = cm_find_remote_id(
3005 			*((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
3006 			remote_id);
3007 	} else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
3008 		   CM_MSG_RESPONSE_REQ)
3009 		cm_id_priv = cm_acquire_id(
3010 			cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3011 			0);
3012 	else
3013 		cm_id_priv = cm_acquire_id(
3014 			cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3015 			remote_id);
3016 
3017 	return cm_id_priv;
3018 }
3019 
3020 static int cm_rej_handler(struct cm_work *work)
3021 {
3022 	struct cm_id_private *cm_id_priv;
3023 	struct cm_rej_msg *rej_msg;
3024 
3025 	rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
3026 	cm_id_priv = cm_acquire_rejected_id(rej_msg);
3027 	if (!cm_id_priv)
3028 		return -EINVAL;
3029 
3030 	cm_format_rej_event(work);
3031 
3032 	spin_lock_irq(&cm_id_priv->lock);
3033 	switch (cm_id_priv->id.state) {
3034 	case IB_CM_REQ_SENT:
3035 	case IB_CM_MRA_REQ_RCVD:
3036 	case IB_CM_REP_SENT:
3037 	case IB_CM_MRA_REP_RCVD:
3038 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3039 		fallthrough;
3040 	case IB_CM_REQ_RCVD:
3041 	case IB_CM_MRA_REQ_SENT:
3042 		if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
3043 			cm_enter_timewait(cm_id_priv);
3044 		else
3045 			cm_reset_to_idle(cm_id_priv);
3046 		break;
3047 	case IB_CM_DREQ_SENT:
3048 		ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3049 		fallthrough;
3050 	case IB_CM_REP_RCVD:
3051 	case IB_CM_MRA_REP_SENT:
3052 		cm_enter_timewait(cm_id_priv);
3053 		break;
3054 	case IB_CM_ESTABLISHED:
3055 		if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
3056 		    cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
3057 			if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
3058 				ib_cancel_mad(cm_id_priv->av.port->mad_agent,
3059 					      cm_id_priv->msg);
3060 			cm_enter_timewait(cm_id_priv);
3061 			break;
3062 		}
3063 		fallthrough;
3064 	default:
3065 		trace_icm_rej_unknown_err(&cm_id_priv->id);
3066 		spin_unlock_irq(&cm_id_priv->lock);
3067 		goto out;
3068 	}
3069 
3070 	cm_queue_work_unlock(cm_id_priv, work);
3071 	return 0;
3072 out:
3073 	cm_deref_id(cm_id_priv);
3074 	return -EINVAL;
3075 }
3076 
3077 int ib_send_cm_mra(struct ib_cm_id *cm_id,
3078 		   u8 service_timeout,
3079 		   const void *private_data,
3080 		   u8 private_data_len)
3081 {
3082 	struct cm_id_private *cm_id_priv;
3083 	struct ib_mad_send_buf *msg;
3084 	enum ib_cm_state cm_state;
3085 	enum ib_cm_lap_state lap_state;
3086 	enum cm_msg_response msg_response;
3087 	void *data;
3088 	unsigned long flags;
3089 	int ret;
3090 
3091 	if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
3092 		return -EINVAL;
3093 
3094 	data = cm_copy_private_data(private_data, private_data_len);
3095 	if (IS_ERR(data))
3096 		return PTR_ERR(data);
3097 
3098 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3099 
3100 	spin_lock_irqsave(&cm_id_priv->lock, flags);
3101 	switch(cm_id_priv->id.state) {
3102 	case IB_CM_REQ_RCVD:
3103 		cm_state = IB_CM_MRA_REQ_SENT;
3104 		lap_state = cm_id->lap_state;
3105 		msg_response = CM_MSG_RESPONSE_REQ;
3106 		break;
3107 	case IB_CM_REP_RCVD:
3108 		cm_state = IB_CM_MRA_REP_SENT;
3109 		lap_state = cm_id->lap_state;
3110 		msg_response = CM_MSG_RESPONSE_REP;
3111 		break;
3112 	case IB_CM_ESTABLISHED:
3113 		if (cm_id->lap_state == IB_CM_LAP_RCVD) {
3114 			cm_state = cm_id->state;
3115 			lap_state = IB_CM_MRA_LAP_SENT;
3116 			msg_response = CM_MSG_RESPONSE_OTHER;
3117 			break;
3118 		}
3119 		fallthrough;
3120 	default:
3121 		trace_icm_send_mra_unknown_err(&cm_id_priv->id);
3122 		ret = -EINVAL;
3123 		goto error1;
3124 	}
3125 
3126 	if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
3127 		ret = cm_alloc_msg(cm_id_priv, &msg);
3128 		if (ret)
3129 			goto error1;
3130 
3131 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3132 			      msg_response, service_timeout,
3133 			      private_data, private_data_len);
3134 		trace_icm_send_mra(cm_id);
3135 		ret = ib_post_send_mad(msg, NULL);
3136 		if (ret)
3137 			goto error2;
3138 	}
3139 
3140 	cm_id->state = cm_state;
3141 	cm_id->lap_state = lap_state;
3142 	cm_id_priv->service_timeout = service_timeout;
3143 	cm_set_private_data(cm_id_priv, data, private_data_len);
3144 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3145 	return 0;
3146 
3147 error1:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3148 	kfree(data);
3149 	return ret;
3150 
3151 error2:	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3152 	kfree(data);
3153 	cm_free_msg(msg);
3154 	return ret;
3155 }
3156 EXPORT_SYMBOL(ib_send_cm_mra);
3157 
3158 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
3159 {
3160 	switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
3161 	case CM_MSG_RESPONSE_REQ:
3162 		return cm_acquire_id(
3163 			cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3164 			0);
3165 	case CM_MSG_RESPONSE_REP:
3166 	case CM_MSG_RESPONSE_OTHER:
3167 		return cm_acquire_id(
3168 			cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3169 			cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
3170 	default:
3171 		return NULL;
3172 	}
3173 }
3174 
3175 static int cm_mra_handler(struct cm_work *work)
3176 {
3177 	struct cm_id_private *cm_id_priv;
3178 	struct cm_mra_msg *mra_msg;
3179 	int timeout;
3180 
3181 	mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
3182 	cm_id_priv = cm_acquire_mraed_id(mra_msg);
3183 	if (!cm_id_priv)
3184 		return -EINVAL;
3185 
3186 	work->cm_event.private_data =
3187 		IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
3188 	work->cm_event.param.mra_rcvd.service_timeout =
3189 		IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
3190 	timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
3191 		  cm_convert_to_ms(cm_id_priv->av.timeout);
3192 
3193 	spin_lock_irq(&cm_id_priv->lock);
3194 	switch (cm_id_priv->id.state) {
3195 	case IB_CM_REQ_SENT:
3196 		if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3197 			    CM_MSG_RESPONSE_REQ ||
3198 		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
3199 				  cm_id_priv->msg, timeout))
3200 			goto out;
3201 		cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
3202 		break;
3203 	case IB_CM_REP_SENT:
3204 		if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3205 			    CM_MSG_RESPONSE_REP ||
3206 		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
3207 				  cm_id_priv->msg, timeout))
3208 			goto out;
3209 		cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
3210 		break;
3211 	case IB_CM_ESTABLISHED:
3212 		if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3213 			    CM_MSG_RESPONSE_OTHER ||
3214 		    cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
3215 		    ib_modify_mad(cm_id_priv->av.port->mad_agent,
3216 				  cm_id_priv->msg, timeout)) {
3217 			if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
3218 				atomic_long_inc(&work->port->
3219 						counter_group[CM_RECV_DUPLICATES].
3220 						counter[CM_MRA_COUNTER]);
3221 			goto out;
3222 		}
3223 		cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
3224 		break;
3225 	case IB_CM_MRA_REQ_RCVD:
3226 	case IB_CM_MRA_REP_RCVD:
3227 		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3228 				counter[CM_MRA_COUNTER]);
3229 		fallthrough;
3230 	default:
3231 		trace_icm_mra_unknown_err(&cm_id_priv->id);
3232 		goto out;
3233 	}
3234 
3235 	cm_id_priv->msg->context[1] = (void *) (unsigned long)
3236 				      cm_id_priv->id.state;
3237 	cm_queue_work_unlock(cm_id_priv, work);
3238 	return 0;
3239 out:
3240 	spin_unlock_irq(&cm_id_priv->lock);
3241 	cm_deref_id(cm_id_priv);
3242 	return -EINVAL;
3243 }
3244 
3245 static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3246 					struct sa_path_rec *path)
3247 {
3248 	u32 lid;
3249 
3250 	if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3251 		sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
3252 					       lap_msg));
3253 		sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
3254 					       lap_msg));
3255 	} else {
3256 		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3257 			CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
3258 		sa_path_set_dlid(path, lid);
3259 
3260 		lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3261 			CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
3262 		sa_path_set_slid(path, lid);
3263 	}
3264 }
3265 
3266 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3267 				    struct sa_path_rec *path,
3268 				    struct cm_lap_msg *lap_msg)
3269 {
3270 	path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
3271 	path->sgid =
3272 		*IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
3273 	path->flow_label =
3274 		cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
3275 	path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
3276 	path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
3277 	path->reversible = 1;
3278 	path->pkey = cm_id_priv->pkey;
3279 	path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
3280 	path->mtu_selector = IB_SA_EQ;
3281 	path->mtu = cm_id_priv->path_mtu;
3282 	path->rate_selector = IB_SA_EQ;
3283 	path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
3284 	path->packet_life_time_selector = IB_SA_EQ;
3285 	path->packet_life_time =
3286 		IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
3287 	path->packet_life_time -= (path->packet_life_time > 0);
3288 	cm_format_path_lid_from_lap(lap_msg, path);
3289 }
3290 
3291 static int cm_lap_handler(struct cm_work *work)
3292 {
3293 	struct cm_id_private *cm_id_priv;
3294 	struct cm_lap_msg *lap_msg;
3295 	struct ib_cm_lap_event_param *param;
3296 	struct ib_mad_send_buf *msg = NULL;
3297 	int ret;
3298 
3299 	/* Currently Alternate path messages are not supported for
3300 	 * RoCE link layer.
3301 	 */
3302 	if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3303 			       work->port->port_num))
3304 		return -EINVAL;
3305 
3306 	/* todo: verify LAP request and send reject APR if invalid. */
3307 	lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3308 	cm_id_priv = cm_acquire_id(
3309 		cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
3310 		cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
3311 	if (!cm_id_priv)
3312 		return -EINVAL;
3313 
3314 	param = &work->cm_event.param.lap_rcvd;
3315 	memset(&work->path[0], 0, sizeof(work->path[1]));
3316 	cm_path_set_rec_type(work->port->cm_dev->ib_device,
3317 			     work->port->port_num, &work->path[0],
3318 			     IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
3319 					     lap_msg));
3320 	param->alternate_path = &work->path[0];
3321 	cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3322 	work->cm_event.private_data =
3323 		IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
3324 
3325 	spin_lock_irq(&cm_id_priv->lock);
3326 	if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3327 		goto unlock;
3328 
3329 	switch (cm_id_priv->id.lap_state) {
3330 	case IB_CM_LAP_UNINIT:
3331 	case IB_CM_LAP_IDLE:
3332 		break;
3333 	case IB_CM_MRA_LAP_SENT:
3334 		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3335 				counter[CM_LAP_COUNTER]);
3336 		msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3337 		if (IS_ERR(msg))
3338 			goto unlock;
3339 
3340 		cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3341 			      CM_MSG_RESPONSE_OTHER,
3342 			      cm_id_priv->service_timeout,
3343 			      cm_id_priv->private_data,
3344 			      cm_id_priv->private_data_len);
3345 		spin_unlock_irq(&cm_id_priv->lock);
3346 
3347 		if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3348 		    ib_post_send_mad(msg, NULL))
3349 			cm_free_msg(msg);
3350 		goto deref;
3351 	case IB_CM_LAP_RCVD:
3352 		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3353 				counter[CM_LAP_COUNTER]);
3354 		goto unlock;
3355 	default:
3356 		goto unlock;
3357 	}
3358 
3359 	ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
3360 				 work->mad_recv_wc->recv_buf.grh,
3361 				 &cm_id_priv->av);
3362 	if (ret)
3363 		goto unlock;
3364 
3365 	ret = cm_init_av_by_path(param->alternate_path, NULL,
3366 				 &cm_id_priv->alt_av, cm_id_priv);
3367 	if (ret)
3368 		goto unlock;
3369 
3370 	cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3371 	cm_id_priv->tid = lap_msg->hdr.tid;
3372 	cm_queue_work_unlock(cm_id_priv, work);
3373 	return 0;
3374 
3375 unlock:	spin_unlock_irq(&cm_id_priv->lock);
3376 deref:	cm_deref_id(cm_id_priv);
3377 	return -EINVAL;
3378 }
3379 
3380 static int cm_apr_handler(struct cm_work *work)
3381 {
3382 	struct cm_id_private *cm_id_priv;
3383 	struct cm_apr_msg *apr_msg;
3384 
3385 	/* Currently Alternate path messages are not supported for
3386 	 * RoCE link layer.
3387 	 */
3388 	if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3389 			       work->port->port_num))
3390 		return -EINVAL;
3391 
3392 	apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3393 	cm_id_priv = cm_acquire_id(
3394 		cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
3395 		cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
3396 	if (!cm_id_priv)
3397 		return -EINVAL; /* Unmatched reply. */
3398 
3399 	work->cm_event.param.apr_rcvd.ap_status =
3400 		IBA_GET(CM_APR_AR_STATUS, apr_msg);
3401 	work->cm_event.param.apr_rcvd.apr_info =
3402 		IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
3403 	work->cm_event.param.apr_rcvd.info_len =
3404 		IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
3405 	work->cm_event.private_data =
3406 		IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
3407 
3408 	spin_lock_irq(&cm_id_priv->lock);
3409 	if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3410 	    (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3411 	     cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3412 		spin_unlock_irq(&cm_id_priv->lock);
3413 		goto out;
3414 	}
3415 	cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3416 	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3417 	cm_id_priv->msg = NULL;
3418 	cm_queue_work_unlock(cm_id_priv, work);
3419 	return 0;
3420 out:
3421 	cm_deref_id(cm_id_priv);
3422 	return -EINVAL;
3423 }
3424 
3425 static int cm_timewait_handler(struct cm_work *work)
3426 {
3427 	struct cm_timewait_info *timewait_info;
3428 	struct cm_id_private *cm_id_priv;
3429 
3430 	timewait_info = container_of(work, struct cm_timewait_info, work);
3431 	spin_lock_irq(&cm.lock);
3432 	list_del(&timewait_info->list);
3433 	spin_unlock_irq(&cm.lock);
3434 
3435 	cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3436 				   timewait_info->work.remote_id);
3437 	if (!cm_id_priv)
3438 		return -EINVAL;
3439 
3440 	spin_lock_irq(&cm_id_priv->lock);
3441 	if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3442 	    cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3443 		spin_unlock_irq(&cm_id_priv->lock);
3444 		goto out;
3445 	}
3446 	cm_id_priv->id.state = IB_CM_IDLE;
3447 	cm_queue_work_unlock(cm_id_priv, work);
3448 	return 0;
3449 out:
3450 	cm_deref_id(cm_id_priv);
3451 	return -EINVAL;
3452 }
3453 
3454 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3455 			       struct cm_id_private *cm_id_priv,
3456 			       struct ib_cm_sidr_req_param *param)
3457 {
3458 	cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3459 			  cm_form_tid(cm_id_priv));
3460 	IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
3461 		be32_to_cpu(cm_id_priv->id.local_id));
3462 	IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
3463 		be16_to_cpu(param->path->pkey));
3464 	IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
3465 		be64_to_cpu(param->service_id));
3466 
3467 	if (param->private_data && param->private_data_len)
3468 		IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
3469 			    param->private_data, param->private_data_len);
3470 }
3471 
3472 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3473 			struct ib_cm_sidr_req_param *param)
3474 {
3475 	struct cm_id_private *cm_id_priv;
3476 	struct ib_mad_send_buf *msg;
3477 	unsigned long flags;
3478 	int ret;
3479 
3480 	if (!param->path || (param->private_data &&
3481 	     param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3482 		return -EINVAL;
3483 
3484 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3485 	ret = cm_init_av_by_path(param->path, param->sgid_attr,
3486 				 &cm_id_priv->av,
3487 				 cm_id_priv);
3488 	if (ret)
3489 		goto out;
3490 
3491 	cm_id->service_id = param->service_id;
3492 	cm_id->service_mask = ~cpu_to_be64(0);
3493 	cm_id_priv->timeout_ms = param->timeout_ms;
3494 	cm_id_priv->max_cm_retries = param->max_cm_retries;
3495 	ret = cm_alloc_msg(cm_id_priv, &msg);
3496 	if (ret)
3497 		goto out;
3498 
3499 	cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3500 			   param);
3501 	msg->timeout_ms = cm_id_priv->timeout_ms;
3502 	msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3503 
3504 	spin_lock_irqsave(&cm_id_priv->lock, flags);
3505 	if (cm_id->state == IB_CM_IDLE) {
3506 		trace_icm_send_sidr_req(&cm_id_priv->id);
3507 		ret = ib_post_send_mad(msg, NULL);
3508 	} else {
3509 		ret = -EINVAL;
3510 	}
3511 
3512 	if (ret) {
3513 		spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3514 		cm_free_msg(msg);
3515 		goto out;
3516 	}
3517 	cm_id->state = IB_CM_SIDR_REQ_SENT;
3518 	cm_id_priv->msg = msg;
3519 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3520 out:
3521 	return ret;
3522 }
3523 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3524 
3525 static void cm_format_sidr_req_event(struct cm_work *work,
3526 				     const struct cm_id_private *rx_cm_id,
3527 				     struct ib_cm_id *listen_id)
3528 {
3529 	struct cm_sidr_req_msg *sidr_req_msg;
3530 	struct ib_cm_sidr_req_event_param *param;
3531 
3532 	sidr_req_msg = (struct cm_sidr_req_msg *)
3533 				work->mad_recv_wc->recv_buf.mad;
3534 	param = &work->cm_event.param.sidr_req_rcvd;
3535 	param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
3536 	param->listen_id = listen_id;
3537 	param->service_id =
3538 		cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3539 	param->bth_pkey = cm_get_bth_pkey(work);
3540 	param->port = work->port->port_num;
3541 	param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
3542 	work->cm_event.private_data =
3543 		IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
3544 }
3545 
3546 static int cm_sidr_req_handler(struct cm_work *work)
3547 {
3548 	struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
3549 	struct cm_sidr_req_msg *sidr_req_msg;
3550 	struct ib_wc *wc;
3551 	int ret;
3552 
3553 	cm_id_priv =
3554 		cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
3555 	if (IS_ERR(cm_id_priv))
3556 		return PTR_ERR(cm_id_priv);
3557 
3558 	/* Record SGID/SLID and request ID for lookup. */
3559 	sidr_req_msg = (struct cm_sidr_req_msg *)
3560 				work->mad_recv_wc->recv_buf.mad;
3561 
3562 	cm_id_priv->id.remote_id =
3563 		cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
3564 	cm_id_priv->id.service_id =
3565 		cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3566 	cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3567 	cm_id_priv->tid = sidr_req_msg->hdr.tid;
3568 
3569 	wc = work->mad_recv_wc->wc;
3570 	cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3571 	cm_id_priv->av.dgid.global.interface_id = 0;
3572 	ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3573 				      work->mad_recv_wc->recv_buf.grh,
3574 				      &cm_id_priv->av);
3575 	if (ret)
3576 		goto out;
3577 
3578 	spin_lock_irq(&cm.lock);
3579 	listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3580 	if (listen_cm_id_priv) {
3581 		spin_unlock_irq(&cm.lock);
3582 		atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3583 				counter[CM_SIDR_REQ_COUNTER]);
3584 		goto out; /* Duplicate message. */
3585 	}
3586 	cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3587 	listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
3588 					   cm_id_priv->id.service_id);
3589 	if (!listen_cm_id_priv) {
3590 		spin_unlock_irq(&cm.lock);
3591 		ib_send_cm_sidr_rep(&cm_id_priv->id,
3592 				    &(struct ib_cm_sidr_rep_param){
3593 					    .status = IB_SIDR_UNSUPPORTED });
3594 		goto out; /* No match. */
3595 	}
3596 	spin_unlock_irq(&cm.lock);
3597 
3598 	cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
3599 	cm_id_priv->id.context = listen_cm_id_priv->id.context;
3600 
3601 	/*
3602 	 * A SIDR ID does not need to be in the xarray since it does not receive
3603 	 * mads, is not placed in the remote_id or remote_qpn rbtree, and does
3604 	 * not enter timewait.
3605 	 */
3606 
3607 	cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
3608 	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
3609 	cm_free_work(work);
3610 	/*
3611 	 * A pointer to the listen_cm_id is held in the event, so this deref
3612 	 * must be after the event is delivered above.
3613 	 */
3614 	cm_deref_id(listen_cm_id_priv);
3615 	if (ret)
3616 		cm_destroy_id(&cm_id_priv->id, ret);
3617 	return 0;
3618 out:
3619 	ib_destroy_cm_id(&cm_id_priv->id);
3620 	return -EINVAL;
3621 }
3622 
3623 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3624 			       struct cm_id_private *cm_id_priv,
3625 			       struct ib_cm_sidr_rep_param *param)
3626 {
3627 	cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3628 			      cm_id_priv->tid, param->ece.attr_mod);
3629 	IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
3630 		be32_to_cpu(cm_id_priv->id.remote_id));
3631 	IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
3632 	IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
3633 	IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
3634 		be64_to_cpu(cm_id_priv->id.service_id));
3635 	IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
3636 	IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
3637 		param->ece.vendor_id & 0xFF);
3638 	IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
3639 		(param->ece.vendor_id >> 8) & 0xFF);
3640 
3641 	if (param->info && param->info_length)
3642 		IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
3643 			    param->info, param->info_length);
3644 
3645 	if (param->private_data && param->private_data_len)
3646 		IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
3647 			    param->private_data, param->private_data_len);
3648 }
3649 
3650 static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
3651 				   struct ib_cm_sidr_rep_param *param)
3652 {
3653 	struct ib_mad_send_buf *msg;
3654 	unsigned long flags;
3655 	int ret;
3656 
3657 	lockdep_assert_held(&cm_id_priv->lock);
3658 
3659 	if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3660 	    (param->private_data &&
3661 	     param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3662 		return -EINVAL;
3663 
3664 	if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
3665 		return -EINVAL;
3666 
3667 	ret = cm_alloc_msg(cm_id_priv, &msg);
3668 	if (ret)
3669 		return ret;
3670 
3671 	cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3672 			   param);
3673 	trace_icm_send_sidr_rep(&cm_id_priv->id);
3674 	ret = ib_post_send_mad(msg, NULL);
3675 	if (ret) {
3676 		cm_free_msg(msg);
3677 		return ret;
3678 	}
3679 	cm_id_priv->id.state = IB_CM_IDLE;
3680 	spin_lock_irqsave(&cm.lock, flags);
3681 	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3682 		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3683 		RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3684 	}
3685 	spin_unlock_irqrestore(&cm.lock, flags);
3686 	return 0;
3687 }
3688 
3689 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3690 			struct ib_cm_sidr_rep_param *param)
3691 {
3692 	struct cm_id_private *cm_id_priv =
3693 		container_of(cm_id, struct cm_id_private, id);
3694 	unsigned long flags;
3695 	int ret;
3696 
3697 	spin_lock_irqsave(&cm_id_priv->lock, flags);
3698 	ret = cm_send_sidr_rep_locked(cm_id_priv, param);
3699 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3700 	return ret;
3701 }
3702 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3703 
3704 static void cm_format_sidr_rep_event(struct cm_work *work,
3705 				     const struct cm_id_private *cm_id_priv)
3706 {
3707 	struct cm_sidr_rep_msg *sidr_rep_msg;
3708 	struct ib_cm_sidr_rep_event_param *param;
3709 
3710 	sidr_rep_msg = (struct cm_sidr_rep_msg *)
3711 				work->mad_recv_wc->recv_buf.mad;
3712 	param = &work->cm_event.param.sidr_rep_rcvd;
3713 	param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
3714 	param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
3715 	param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
3716 	param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
3717 				      sidr_rep_msg);
3718 	param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
3719 				  sidr_rep_msg);
3720 	param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
3721 	work->cm_event.private_data =
3722 		IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
3723 }
3724 
3725 static int cm_sidr_rep_handler(struct cm_work *work)
3726 {
3727 	struct cm_sidr_rep_msg *sidr_rep_msg;
3728 	struct cm_id_private *cm_id_priv;
3729 
3730 	sidr_rep_msg = (struct cm_sidr_rep_msg *)
3731 				work->mad_recv_wc->recv_buf.mad;
3732 	cm_id_priv = cm_acquire_id(
3733 		cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
3734 	if (!cm_id_priv)
3735 		return -EINVAL; /* Unmatched reply. */
3736 
3737 	spin_lock_irq(&cm_id_priv->lock);
3738 	if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3739 		spin_unlock_irq(&cm_id_priv->lock);
3740 		goto out;
3741 	}
3742 	cm_id_priv->id.state = IB_CM_IDLE;
3743 	ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3744 	spin_unlock_irq(&cm_id_priv->lock);
3745 
3746 	cm_format_sidr_rep_event(work, cm_id_priv);
3747 	cm_process_work(cm_id_priv, work);
3748 	return 0;
3749 out:
3750 	cm_deref_id(cm_id_priv);
3751 	return -EINVAL;
3752 }
3753 
3754 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3755 				  enum ib_wc_status wc_status)
3756 {
3757 	struct cm_id_private *cm_id_priv;
3758 	struct ib_cm_event cm_event;
3759 	enum ib_cm_state state;
3760 	int ret;
3761 
3762 	memset(&cm_event, 0, sizeof cm_event);
3763 	cm_id_priv = msg->context[0];
3764 
3765 	/* Discard old sends or ones without a response. */
3766 	spin_lock_irq(&cm_id_priv->lock);
3767 	state = (enum ib_cm_state) (unsigned long) msg->context[1];
3768 	if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3769 		goto discard;
3770 
3771 	trace_icm_mad_send_err(state, wc_status);
3772 	switch (state) {
3773 	case IB_CM_REQ_SENT:
3774 	case IB_CM_MRA_REQ_RCVD:
3775 		cm_reset_to_idle(cm_id_priv);
3776 		cm_event.event = IB_CM_REQ_ERROR;
3777 		break;
3778 	case IB_CM_REP_SENT:
3779 	case IB_CM_MRA_REP_RCVD:
3780 		cm_reset_to_idle(cm_id_priv);
3781 		cm_event.event = IB_CM_REP_ERROR;
3782 		break;
3783 	case IB_CM_DREQ_SENT:
3784 		cm_enter_timewait(cm_id_priv);
3785 		cm_event.event = IB_CM_DREQ_ERROR;
3786 		break;
3787 	case IB_CM_SIDR_REQ_SENT:
3788 		cm_id_priv->id.state = IB_CM_IDLE;
3789 		cm_event.event = IB_CM_SIDR_REQ_ERROR;
3790 		break;
3791 	default:
3792 		goto discard;
3793 	}
3794 	spin_unlock_irq(&cm_id_priv->lock);
3795 	cm_event.param.send_status = wc_status;
3796 
3797 	/* No other events can occur on the cm_id at this point. */
3798 	ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3799 	cm_free_msg(msg);
3800 	if (ret)
3801 		ib_destroy_cm_id(&cm_id_priv->id);
3802 	return;
3803 discard:
3804 	spin_unlock_irq(&cm_id_priv->lock);
3805 	cm_free_msg(msg);
3806 }
3807 
3808 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3809 			    struct ib_mad_send_wc *mad_send_wc)
3810 {
3811 	struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3812 	struct cm_port *port;
3813 	u16 attr_index;
3814 
3815 	port = mad_agent->context;
3816 	attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3817 				  msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3818 
3819 	/*
3820 	 * If the send was in response to a received message (context[0] is not
3821 	 * set to a cm_id), and is not a REJ, then it is a send that was
3822 	 * manually retried.
3823 	 */
3824 	if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3825 		msg->retries = 1;
3826 
3827 	atomic_long_add(1 + msg->retries,
3828 			&port->counter_group[CM_XMIT].counter[attr_index]);
3829 	if (msg->retries)
3830 		atomic_long_add(msg->retries,
3831 				&port->counter_group[CM_XMIT_RETRIES].
3832 				counter[attr_index]);
3833 
3834 	switch (mad_send_wc->status) {
3835 	case IB_WC_SUCCESS:
3836 	case IB_WC_WR_FLUSH_ERR:
3837 		cm_free_msg(msg);
3838 		break;
3839 	default:
3840 		if (msg->context[0] && msg->context[1])
3841 			cm_process_send_error(msg, mad_send_wc->status);
3842 		else
3843 			cm_free_msg(msg);
3844 		break;
3845 	}
3846 }
3847 
3848 static void cm_work_handler(struct work_struct *_work)
3849 {
3850 	struct cm_work *work = container_of(_work, struct cm_work, work.work);
3851 	int ret;
3852 
3853 	switch (work->cm_event.event) {
3854 	case IB_CM_REQ_RECEIVED:
3855 		ret = cm_req_handler(work);
3856 		break;
3857 	case IB_CM_MRA_RECEIVED:
3858 		ret = cm_mra_handler(work);
3859 		break;
3860 	case IB_CM_REJ_RECEIVED:
3861 		ret = cm_rej_handler(work);
3862 		break;
3863 	case IB_CM_REP_RECEIVED:
3864 		ret = cm_rep_handler(work);
3865 		break;
3866 	case IB_CM_RTU_RECEIVED:
3867 		ret = cm_rtu_handler(work);
3868 		break;
3869 	case IB_CM_USER_ESTABLISHED:
3870 		ret = cm_establish_handler(work);
3871 		break;
3872 	case IB_CM_DREQ_RECEIVED:
3873 		ret = cm_dreq_handler(work);
3874 		break;
3875 	case IB_CM_DREP_RECEIVED:
3876 		ret = cm_drep_handler(work);
3877 		break;
3878 	case IB_CM_SIDR_REQ_RECEIVED:
3879 		ret = cm_sidr_req_handler(work);
3880 		break;
3881 	case IB_CM_SIDR_REP_RECEIVED:
3882 		ret = cm_sidr_rep_handler(work);
3883 		break;
3884 	case IB_CM_LAP_RECEIVED:
3885 		ret = cm_lap_handler(work);
3886 		break;
3887 	case IB_CM_APR_RECEIVED:
3888 		ret = cm_apr_handler(work);
3889 		break;
3890 	case IB_CM_TIMEWAIT_EXIT:
3891 		ret = cm_timewait_handler(work);
3892 		break;
3893 	default:
3894 		trace_icm_handler_err(work->cm_event.event);
3895 		ret = -EINVAL;
3896 		break;
3897 	}
3898 	if (ret)
3899 		cm_free_work(work);
3900 }
3901 
3902 static int cm_establish(struct ib_cm_id *cm_id)
3903 {
3904 	struct cm_id_private *cm_id_priv;
3905 	struct cm_work *work;
3906 	unsigned long flags;
3907 	int ret = 0;
3908 	struct cm_device *cm_dev;
3909 
3910 	cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3911 	if (!cm_dev)
3912 		return -ENODEV;
3913 
3914 	work = kmalloc(sizeof *work, GFP_ATOMIC);
3915 	if (!work)
3916 		return -ENOMEM;
3917 
3918 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3919 	spin_lock_irqsave(&cm_id_priv->lock, flags);
3920 	switch (cm_id->state)
3921 	{
3922 	case IB_CM_REP_SENT:
3923 	case IB_CM_MRA_REP_RCVD:
3924 		cm_id->state = IB_CM_ESTABLISHED;
3925 		break;
3926 	case IB_CM_ESTABLISHED:
3927 		ret = -EISCONN;
3928 		break;
3929 	default:
3930 		trace_icm_establish_err(cm_id);
3931 		ret = -EINVAL;
3932 		break;
3933 	}
3934 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3935 
3936 	if (ret) {
3937 		kfree(work);
3938 		goto out;
3939 	}
3940 
3941 	/*
3942 	 * The CM worker thread may try to destroy the cm_id before it
3943 	 * can execute this work item.  To prevent potential deadlock,
3944 	 * we need to find the cm_id once we're in the context of the
3945 	 * worker thread, rather than holding a reference on it.
3946 	 */
3947 	INIT_DELAYED_WORK(&work->work, cm_work_handler);
3948 	work->local_id = cm_id->local_id;
3949 	work->remote_id = cm_id->remote_id;
3950 	work->mad_recv_wc = NULL;
3951 	work->cm_event.event = IB_CM_USER_ESTABLISHED;
3952 
3953 	/* Check if the device started its remove_one */
3954 	spin_lock_irqsave(&cm.lock, flags);
3955 	if (!cm_dev->going_down) {
3956 		queue_delayed_work(cm.wq, &work->work, 0);
3957 	} else {
3958 		kfree(work);
3959 		ret = -ENODEV;
3960 	}
3961 	spin_unlock_irqrestore(&cm.lock, flags);
3962 
3963 out:
3964 	return ret;
3965 }
3966 
3967 static int cm_migrate(struct ib_cm_id *cm_id)
3968 {
3969 	struct cm_id_private *cm_id_priv;
3970 	struct cm_av tmp_av;
3971 	unsigned long flags;
3972 	int tmp_send_port_not_ready;
3973 	int ret = 0;
3974 
3975 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3976 	spin_lock_irqsave(&cm_id_priv->lock, flags);
3977 	if (cm_id->state == IB_CM_ESTABLISHED &&
3978 	    (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3979 	     cm_id->lap_state == IB_CM_LAP_IDLE)) {
3980 		cm_id->lap_state = IB_CM_LAP_IDLE;
3981 		/* Swap address vector */
3982 		tmp_av = cm_id_priv->av;
3983 		cm_id_priv->av = cm_id_priv->alt_av;
3984 		cm_id_priv->alt_av = tmp_av;
3985 		/* Swap port send ready state */
3986 		tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3987 		cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3988 		cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3989 	} else
3990 		ret = -EINVAL;
3991 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3992 
3993 	return ret;
3994 }
3995 
3996 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3997 {
3998 	int ret;
3999 
4000 	switch (event) {
4001 	case IB_EVENT_COMM_EST:
4002 		ret = cm_establish(cm_id);
4003 		break;
4004 	case IB_EVENT_PATH_MIG:
4005 		ret = cm_migrate(cm_id);
4006 		break;
4007 	default:
4008 		ret = -EINVAL;
4009 	}
4010 	return ret;
4011 }
4012 EXPORT_SYMBOL(ib_cm_notify);
4013 
4014 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
4015 			    struct ib_mad_send_buf *send_buf,
4016 			    struct ib_mad_recv_wc *mad_recv_wc)
4017 {
4018 	struct cm_port *port = mad_agent->context;
4019 	struct cm_work *work;
4020 	enum ib_cm_event_type event;
4021 	bool alt_path = false;
4022 	u16 attr_id;
4023 	int paths = 0;
4024 	int going_down = 0;
4025 
4026 	switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
4027 	case CM_REQ_ATTR_ID:
4028 		alt_path = cm_req_has_alt_path((struct cm_req_msg *)
4029 						mad_recv_wc->recv_buf.mad);
4030 		paths = 1 + (alt_path != 0);
4031 		event = IB_CM_REQ_RECEIVED;
4032 		break;
4033 	case CM_MRA_ATTR_ID:
4034 		event = IB_CM_MRA_RECEIVED;
4035 		break;
4036 	case CM_REJ_ATTR_ID:
4037 		event = IB_CM_REJ_RECEIVED;
4038 		break;
4039 	case CM_REP_ATTR_ID:
4040 		event = IB_CM_REP_RECEIVED;
4041 		break;
4042 	case CM_RTU_ATTR_ID:
4043 		event = IB_CM_RTU_RECEIVED;
4044 		break;
4045 	case CM_DREQ_ATTR_ID:
4046 		event = IB_CM_DREQ_RECEIVED;
4047 		break;
4048 	case CM_DREP_ATTR_ID:
4049 		event = IB_CM_DREP_RECEIVED;
4050 		break;
4051 	case CM_SIDR_REQ_ATTR_ID:
4052 		event = IB_CM_SIDR_REQ_RECEIVED;
4053 		break;
4054 	case CM_SIDR_REP_ATTR_ID:
4055 		event = IB_CM_SIDR_REP_RECEIVED;
4056 		break;
4057 	case CM_LAP_ATTR_ID:
4058 		paths = 1;
4059 		event = IB_CM_LAP_RECEIVED;
4060 		break;
4061 	case CM_APR_ATTR_ID:
4062 		event = IB_CM_APR_RECEIVED;
4063 		break;
4064 	default:
4065 		ib_free_recv_mad(mad_recv_wc);
4066 		return;
4067 	}
4068 
4069 	attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
4070 	atomic_long_inc(&port->counter_group[CM_RECV].
4071 			counter[attr_id - CM_ATTR_ID_OFFSET]);
4072 
4073 	work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
4074 	if (!work) {
4075 		ib_free_recv_mad(mad_recv_wc);
4076 		return;
4077 	}
4078 
4079 	INIT_DELAYED_WORK(&work->work, cm_work_handler);
4080 	work->cm_event.event = event;
4081 	work->mad_recv_wc = mad_recv_wc;
4082 	work->port = port;
4083 
4084 	/* Check if the device started its remove_one */
4085 	spin_lock_irq(&cm.lock);
4086 	if (!port->cm_dev->going_down)
4087 		queue_delayed_work(cm.wq, &work->work, 0);
4088 	else
4089 		going_down = 1;
4090 	spin_unlock_irq(&cm.lock);
4091 
4092 	if (going_down) {
4093 		kfree(work);
4094 		ib_free_recv_mad(mad_recv_wc);
4095 	}
4096 }
4097 
4098 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
4099 				struct ib_qp_attr *qp_attr,
4100 				int *qp_attr_mask)
4101 {
4102 	unsigned long flags;
4103 	int ret;
4104 
4105 	spin_lock_irqsave(&cm_id_priv->lock, flags);
4106 	switch (cm_id_priv->id.state) {
4107 	case IB_CM_REQ_SENT:
4108 	case IB_CM_MRA_REQ_RCVD:
4109 	case IB_CM_REQ_RCVD:
4110 	case IB_CM_MRA_REQ_SENT:
4111 	case IB_CM_REP_RCVD:
4112 	case IB_CM_MRA_REP_SENT:
4113 	case IB_CM_REP_SENT:
4114 	case IB_CM_MRA_REP_RCVD:
4115 	case IB_CM_ESTABLISHED:
4116 		*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
4117 				IB_QP_PKEY_INDEX | IB_QP_PORT;
4118 		qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
4119 		if (cm_id_priv->responder_resources)
4120 			qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
4121 						    IB_ACCESS_REMOTE_ATOMIC;
4122 		qp_attr->pkey_index = cm_id_priv->av.pkey_index;
4123 		qp_attr->port_num = cm_id_priv->av.port->port_num;
4124 		ret = 0;
4125 		break;
4126 	default:
4127 		trace_icm_qp_init_err(&cm_id_priv->id);
4128 		ret = -EINVAL;
4129 		break;
4130 	}
4131 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4132 	return ret;
4133 }
4134 
4135 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
4136 			       struct ib_qp_attr *qp_attr,
4137 			       int *qp_attr_mask)
4138 {
4139 	unsigned long flags;
4140 	int ret;
4141 
4142 	spin_lock_irqsave(&cm_id_priv->lock, flags);
4143 	switch (cm_id_priv->id.state) {
4144 	case IB_CM_REQ_RCVD:
4145 	case IB_CM_MRA_REQ_SENT:
4146 	case IB_CM_REP_RCVD:
4147 	case IB_CM_MRA_REP_SENT:
4148 	case IB_CM_REP_SENT:
4149 	case IB_CM_MRA_REP_RCVD:
4150 	case IB_CM_ESTABLISHED:
4151 		*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
4152 				IB_QP_DEST_QPN | IB_QP_RQ_PSN;
4153 		qp_attr->ah_attr = cm_id_priv->av.ah_attr;
4154 		qp_attr->path_mtu = cm_id_priv->path_mtu;
4155 		qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
4156 		qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
4157 		if (cm_id_priv->qp_type == IB_QPT_RC ||
4158 		    cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
4159 			*qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
4160 					 IB_QP_MIN_RNR_TIMER;
4161 			qp_attr->max_dest_rd_atomic =
4162 					cm_id_priv->responder_resources;
4163 			qp_attr->min_rnr_timer = 0;
4164 		}
4165 		if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4166 			*qp_attr_mask |= IB_QP_ALT_PATH;
4167 			qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4168 			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4169 			qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4170 			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4171 		}
4172 		ret = 0;
4173 		break;
4174 	default:
4175 		trace_icm_qp_rtr_err(&cm_id_priv->id);
4176 		ret = -EINVAL;
4177 		break;
4178 	}
4179 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4180 	return ret;
4181 }
4182 
4183 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
4184 			       struct ib_qp_attr *qp_attr,
4185 			       int *qp_attr_mask)
4186 {
4187 	unsigned long flags;
4188 	int ret;
4189 
4190 	spin_lock_irqsave(&cm_id_priv->lock, flags);
4191 	switch (cm_id_priv->id.state) {
4192 	/* Allow transition to RTS before sending REP */
4193 	case IB_CM_REQ_RCVD:
4194 	case IB_CM_MRA_REQ_SENT:
4195 
4196 	case IB_CM_REP_RCVD:
4197 	case IB_CM_MRA_REP_SENT:
4198 	case IB_CM_REP_SENT:
4199 	case IB_CM_MRA_REP_RCVD:
4200 	case IB_CM_ESTABLISHED:
4201 		if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4202 			*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4203 			qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4204 			switch (cm_id_priv->qp_type) {
4205 			case IB_QPT_RC:
4206 			case IB_QPT_XRC_INI:
4207 				*qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4208 						 IB_QP_MAX_QP_RD_ATOMIC;
4209 				qp_attr->retry_cnt = cm_id_priv->retry_count;
4210 				qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4211 				qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4212 				fallthrough;
4213 			case IB_QPT_XRC_TGT:
4214 				*qp_attr_mask |= IB_QP_TIMEOUT;
4215 				qp_attr->timeout = cm_id_priv->av.timeout;
4216 				break;
4217 			default:
4218 				break;
4219 			}
4220 			if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4221 				*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4222 				qp_attr->path_mig_state = IB_MIG_REARM;
4223 			}
4224 		} else {
4225 			*qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4226 			qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4227 			qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4228 			qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4229 			qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4230 			qp_attr->path_mig_state = IB_MIG_REARM;
4231 		}
4232 		ret = 0;
4233 		break;
4234 	default:
4235 		trace_icm_qp_rts_err(&cm_id_priv->id);
4236 		ret = -EINVAL;
4237 		break;
4238 	}
4239 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4240 	return ret;
4241 }
4242 
4243 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4244 		       struct ib_qp_attr *qp_attr,
4245 		       int *qp_attr_mask)
4246 {
4247 	struct cm_id_private *cm_id_priv;
4248 	int ret;
4249 
4250 	cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4251 	switch (qp_attr->qp_state) {
4252 	case IB_QPS_INIT:
4253 		ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4254 		break;
4255 	case IB_QPS_RTR:
4256 		ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4257 		break;
4258 	case IB_QPS_RTS:
4259 		ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4260 		break;
4261 	default:
4262 		ret = -EINVAL;
4263 		break;
4264 	}
4265 	return ret;
4266 }
4267 EXPORT_SYMBOL(ib_cm_init_qp_attr);
4268 
4269 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
4270 			       char *buf)
4271 {
4272 	struct cm_counter_group *group;
4273 	struct cm_counter_attribute *cm_attr;
4274 
4275 	group = container_of(obj, struct cm_counter_group, obj);
4276 	cm_attr = container_of(attr, struct cm_counter_attribute, attr);
4277 
4278 	return sysfs_emit(buf, "%ld\n",
4279 			  atomic_long_read(&group->counter[cm_attr->index]));
4280 }
4281 
4282 static const struct sysfs_ops cm_counter_ops = {
4283 	.show = cm_show_counter
4284 };
4285 
4286 static struct kobj_type cm_counter_obj_type = {
4287 	.sysfs_ops = &cm_counter_ops,
4288 	.default_attrs = cm_counter_default_attrs
4289 };
4290 
4291 static int cm_create_port_fs(struct cm_port *port)
4292 {
4293 	int i, ret;
4294 
4295 	for (i = 0; i < CM_COUNTER_GROUPS; i++) {
4296 		ret = ib_port_register_module_stat(port->cm_dev->ib_device,
4297 						   port->port_num,
4298 						   &port->counter_group[i].obj,
4299 						   &cm_counter_obj_type,
4300 						   counter_group_names[i]);
4301 		if (ret)
4302 			goto error;
4303 	}
4304 
4305 	return 0;
4306 
4307 error:
4308 	while (i--)
4309 		ib_port_unregister_module_stat(&port->counter_group[i].obj);
4310 	return ret;
4311 
4312 }
4313 
4314 static void cm_remove_port_fs(struct cm_port *port)
4315 {
4316 	int i;
4317 
4318 	for (i = 0; i < CM_COUNTER_GROUPS; i++)
4319 		ib_port_unregister_module_stat(&port->counter_group[i].obj);
4320 
4321 }
4322 
4323 static int cm_add_one(struct ib_device *ib_device)
4324 {
4325 	struct cm_device *cm_dev;
4326 	struct cm_port *port;
4327 	struct ib_mad_reg_req reg_req = {
4328 		.mgmt_class = IB_MGMT_CLASS_CM,
4329 		.mgmt_class_version = IB_CM_CLASS_VERSION,
4330 	};
4331 	struct ib_port_modify port_modify = {
4332 		.set_port_cap_mask = IB_PORT_CM_SUP
4333 	};
4334 	unsigned long flags;
4335 	int ret;
4336 	int count = 0;
4337 	unsigned int i;
4338 
4339 	cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
4340 			 GFP_KERNEL);
4341 	if (!cm_dev)
4342 		return -ENOMEM;
4343 
4344 	cm_dev->ib_device = ib_device;
4345 	cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4346 	cm_dev->going_down = 0;
4347 
4348 	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4349 	rdma_for_each_port (ib_device, i) {
4350 		if (!rdma_cap_ib_cm(ib_device, i))
4351 			continue;
4352 
4353 		port = kzalloc(sizeof *port, GFP_KERNEL);
4354 		if (!port) {
4355 			ret = -ENOMEM;
4356 			goto error1;
4357 		}
4358 
4359 		cm_dev->port[i-1] = port;
4360 		port->cm_dev = cm_dev;
4361 		port->port_num = i;
4362 
4363 		INIT_LIST_HEAD(&port->cm_priv_prim_list);
4364 		INIT_LIST_HEAD(&port->cm_priv_altr_list);
4365 
4366 		ret = cm_create_port_fs(port);
4367 		if (ret)
4368 			goto error1;
4369 
4370 		port->mad_agent = ib_register_mad_agent(ib_device, i,
4371 							IB_QPT_GSI,
4372 							&reg_req,
4373 							0,
4374 							cm_send_handler,
4375 							cm_recv_handler,
4376 							port,
4377 							0);
4378 		if (IS_ERR(port->mad_agent)) {
4379 			ret = PTR_ERR(port->mad_agent);
4380 			goto error2;
4381 		}
4382 
4383 		ret = ib_modify_port(ib_device, i, 0, &port_modify);
4384 		if (ret)
4385 			goto error3;
4386 
4387 		count++;
4388 	}
4389 
4390 	if (!count) {
4391 		ret = -EOPNOTSUPP;
4392 		goto free;
4393 	}
4394 
4395 	ib_set_client_data(ib_device, &cm_client, cm_dev);
4396 
4397 	write_lock_irqsave(&cm.device_lock, flags);
4398 	list_add_tail(&cm_dev->list, &cm.device_list);
4399 	write_unlock_irqrestore(&cm.device_lock, flags);
4400 	return 0;
4401 
4402 error3:
4403 	ib_unregister_mad_agent(port->mad_agent);
4404 error2:
4405 	cm_remove_port_fs(port);
4406 error1:
4407 	port_modify.set_port_cap_mask = 0;
4408 	port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4409 	kfree(port);
4410 	while (--i) {
4411 		if (!rdma_cap_ib_cm(ib_device, i))
4412 			continue;
4413 
4414 		port = cm_dev->port[i-1];
4415 		ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4416 		ib_unregister_mad_agent(port->mad_agent);
4417 		cm_remove_port_fs(port);
4418 		kfree(port);
4419 	}
4420 free:
4421 	kfree(cm_dev);
4422 	return ret;
4423 }
4424 
4425 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4426 {
4427 	struct cm_device *cm_dev = client_data;
4428 	struct cm_port *port;
4429 	struct cm_id_private *cm_id_priv;
4430 	struct ib_mad_agent *cur_mad_agent;
4431 	struct ib_port_modify port_modify = {
4432 		.clr_port_cap_mask = IB_PORT_CM_SUP
4433 	};
4434 	unsigned long flags;
4435 	unsigned int i;
4436 
4437 	write_lock_irqsave(&cm.device_lock, flags);
4438 	list_del(&cm_dev->list);
4439 	write_unlock_irqrestore(&cm.device_lock, flags);
4440 
4441 	spin_lock_irq(&cm.lock);
4442 	cm_dev->going_down = 1;
4443 	spin_unlock_irq(&cm.lock);
4444 
4445 	rdma_for_each_port (ib_device, i) {
4446 		if (!rdma_cap_ib_cm(ib_device, i))
4447 			continue;
4448 
4449 		port = cm_dev->port[i-1];
4450 		ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4451 		/* Mark all the cm_id's as not valid */
4452 		spin_lock_irq(&cm.lock);
4453 		list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4454 			cm_id_priv->altr_send_port_not_ready = 1;
4455 		list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4456 			cm_id_priv->prim_send_port_not_ready = 1;
4457 		spin_unlock_irq(&cm.lock);
4458 		/*
4459 		 * We flush the queue here after the going_down set, this
4460 		 * verify that no new works will be queued in the recv handler,
4461 		 * after that we can call the unregister_mad_agent
4462 		 */
4463 		flush_workqueue(cm.wq);
4464 		spin_lock_irq(&cm.state_lock);
4465 		cur_mad_agent = port->mad_agent;
4466 		port->mad_agent = NULL;
4467 		spin_unlock_irq(&cm.state_lock);
4468 		ib_unregister_mad_agent(cur_mad_agent);
4469 		cm_remove_port_fs(port);
4470 		kfree(port);
4471 	}
4472 
4473 	kfree(cm_dev);
4474 }
4475 
4476 static int __init ib_cm_init(void)
4477 {
4478 	int ret;
4479 
4480 	INIT_LIST_HEAD(&cm.device_list);
4481 	rwlock_init(&cm.device_lock);
4482 	spin_lock_init(&cm.lock);
4483 	spin_lock_init(&cm.state_lock);
4484 	cm.listen_service_table = RB_ROOT;
4485 	cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4486 	cm.remote_id_table = RB_ROOT;
4487 	cm.remote_qp_table = RB_ROOT;
4488 	cm.remote_sidr_table = RB_ROOT;
4489 	xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC);
4490 	get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4491 	INIT_LIST_HEAD(&cm.timewait_list);
4492 
4493 	cm.wq = alloc_workqueue("ib_cm", 0, 1);
4494 	if (!cm.wq) {
4495 		ret = -ENOMEM;
4496 		goto error2;
4497 	}
4498 
4499 	ret = ib_register_client(&cm_client);
4500 	if (ret)
4501 		goto error3;
4502 
4503 	return 0;
4504 error3:
4505 	destroy_workqueue(cm.wq);
4506 error2:
4507 	return ret;
4508 }
4509 
4510 static void __exit ib_cm_cleanup(void)
4511 {
4512 	struct cm_timewait_info *timewait_info, *tmp;
4513 
4514 	spin_lock_irq(&cm.lock);
4515 	list_for_each_entry(timewait_info, &cm.timewait_list, list)
4516 		cancel_delayed_work(&timewait_info->work.work);
4517 	spin_unlock_irq(&cm.lock);
4518 
4519 	ib_unregister_client(&cm_client);
4520 	destroy_workqueue(cm.wq);
4521 
4522 	list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4523 		list_del(&timewait_info->list);
4524 		kfree(timewait_info);
4525 	}
4526 
4527 	WARN_ON(!xa_empty(&cm.local_id_table));
4528 }
4529 
4530 module_init(ib_cm_init);
4531 module_exit(ib_cm_cleanup);
4532