xref: /openbmc/linux/drivers/infiniband/ulp/ipoib/ipoib_cm.c (revision 4da722ca19f30f7db250db808d1ab1703607a932)
1 /*
2  * Copyright (c) 2006 Mellanox Technologies. All rights reserved
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <rdma/ib_cm.h>
34 #include <net/dst.h>
35 #include <net/icmp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/moduleparam.h>
41 #include <linux/sched/signal.h>
42 
43 #include "ipoib.h"
44 
45 int ipoib_max_conn_qp = 128;
46 
47 module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
48 MODULE_PARM_DESC(max_nonsrq_conn_qp,
49 		 "Max number of connected-mode QPs per interface "
50 		 "(applied only if shared receive queue is not available)");
51 
52 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
53 static int data_debug_level;
54 
55 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
56 MODULE_PARM_DESC(cm_data_debug_level,
57 		 "Enable data path debug tracing for connected mode if > 0");
58 #endif
59 
60 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
61 
62 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
63 #define IPOIB_CM_RX_TIMEOUT     (2 * 256 * HZ)
64 #define IPOIB_CM_RX_DELAY       (3 * 256 * HZ)
65 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
66 
67 #define IPOIB_CM_RX_RESERVE     (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
68 
69 static struct ib_qp_attr ipoib_cm_err_attr = {
70 	.qp_state = IB_QPS_ERR
71 };
72 
73 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
74 
75 static struct ib_send_wr ipoib_cm_rx_drain_wr = {
76 	.opcode = IB_WR_SEND,
77 };
78 
79 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
80 			       struct ib_cm_event *event);
81 
82 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
83 				  u64 mapping[IPOIB_CM_RX_SG])
84 {
85 	int i;
86 
87 	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
88 
89 	for (i = 0; i < frags; ++i)
90 		ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
91 }
92 
93 static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
94 {
95 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
96 	struct ib_recv_wr *bad_wr;
97 	int i, ret;
98 
99 	priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
100 
101 	for (i = 0; i < priv->cm.num_frags; ++i)
102 		priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
103 
104 	ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
105 	if (unlikely(ret)) {
106 		ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
107 		ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
108 				      priv->cm.srq_ring[id].mapping);
109 		dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
110 		priv->cm.srq_ring[id].skb = NULL;
111 	}
112 
113 	return ret;
114 }
115 
116 static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
117 					struct ipoib_cm_rx *rx,
118 					struct ib_recv_wr *wr,
119 					struct ib_sge *sge, int id)
120 {
121 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
122 	struct ib_recv_wr *bad_wr;
123 	int i, ret;
124 
125 	wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
126 
127 	for (i = 0; i < IPOIB_CM_RX_SG; ++i)
128 		sge[i].addr = rx->rx_ring[id].mapping[i];
129 
130 	ret = ib_post_recv(rx->qp, wr, &bad_wr);
131 	if (unlikely(ret)) {
132 		ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
133 		ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
134 				      rx->rx_ring[id].mapping);
135 		dev_kfree_skb_any(rx->rx_ring[id].skb);
136 		rx->rx_ring[id].skb = NULL;
137 	}
138 
139 	return ret;
140 }
141 
142 static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
143 					     struct ipoib_cm_rx_buf *rx_ring,
144 					     int id, int frags,
145 					     u64 mapping[IPOIB_CM_RX_SG],
146 					     gfp_t gfp)
147 {
148 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
149 	struct sk_buff *skb;
150 	int i;
151 
152 	skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
153 	if (unlikely(!skb))
154 		return NULL;
155 
156 	/*
157 	 * IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
158 	 * IP header to a multiple of 16.
159 	 */
160 	skb_reserve(skb, IPOIB_CM_RX_RESERVE);
161 
162 	mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
163 				       DMA_FROM_DEVICE);
164 	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
165 		dev_kfree_skb_any(skb);
166 		return NULL;
167 	}
168 
169 	for (i = 0; i < frags; i++) {
170 		struct page *page = alloc_page(gfp);
171 
172 		if (!page)
173 			goto partial_error;
174 		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
175 
176 		mapping[i + 1] = ib_dma_map_page(priv->ca, page,
177 						 0, PAGE_SIZE, DMA_FROM_DEVICE);
178 		if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
179 			goto partial_error;
180 	}
181 
182 	rx_ring[id].skb = skb;
183 	return skb;
184 
185 partial_error:
186 
187 	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
188 
189 	for (; i > 0; --i)
190 		ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
191 
192 	dev_kfree_skb_any(skb);
193 	return NULL;
194 }
195 
196 static void ipoib_cm_free_rx_ring(struct net_device *dev,
197 				  struct ipoib_cm_rx_buf *rx_ring)
198 {
199 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
200 	int i;
201 
202 	for (i = 0; i < ipoib_recvq_size; ++i)
203 		if (rx_ring[i].skb) {
204 			ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
205 					      rx_ring[i].mapping);
206 			dev_kfree_skb_any(rx_ring[i].skb);
207 		}
208 
209 	vfree(rx_ring);
210 }
211 
212 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
213 {
214 	struct ib_send_wr *bad_wr;
215 	struct ipoib_cm_rx *p;
216 
217 	/* We only reserved 1 extra slot in CQ for drain WRs, so
218 	 * make sure we have at most 1 outstanding WR. */
219 	if (list_empty(&priv->cm.rx_flush_list) ||
220 	    !list_empty(&priv->cm.rx_drain_list))
221 		return;
222 
223 	/*
224 	 * QPs on flush list are error state.  This way, a "flush
225 	 * error" WC will be immediately generated for each WR we post.
226 	 */
227 	p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
228 	ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID;
229 	if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
230 		ipoib_warn(priv, "failed to post drain wr\n");
231 
232 	list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
233 }
234 
235 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
236 {
237 	struct ipoib_cm_rx *p = ctx;
238 	struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
239 	unsigned long flags;
240 
241 	if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
242 		return;
243 
244 	spin_lock_irqsave(&priv->lock, flags);
245 	list_move(&p->list, &priv->cm.rx_flush_list);
246 	p->state = IPOIB_CM_RX_FLUSH;
247 	ipoib_cm_start_rx_drain(priv);
248 	spin_unlock_irqrestore(&priv->lock, flags);
249 }
250 
251 static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
252 					   struct ipoib_cm_rx *p)
253 {
254 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
255 	struct ib_qp_init_attr attr = {
256 		.event_handler = ipoib_cm_rx_event_handler,
257 		.send_cq = priv->recv_cq, /* For drain WR */
258 		.recv_cq = priv->recv_cq,
259 		.srq = priv->cm.srq,
260 		.cap.max_send_wr = 1, /* For drain WR */
261 		.cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
262 		.sq_sig_type = IB_SIGNAL_ALL_WR,
263 		.qp_type = IB_QPT_RC,
264 		.qp_context = p,
265 	};
266 
267 	if (!ipoib_cm_has_srq(dev)) {
268 		attr.cap.max_recv_wr  = ipoib_recvq_size;
269 		attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
270 	}
271 
272 	return ib_create_qp(priv->pd, &attr);
273 }
274 
275 static int ipoib_cm_modify_rx_qp(struct net_device *dev,
276 				 struct ib_cm_id *cm_id, struct ib_qp *qp,
277 				 unsigned psn)
278 {
279 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
280 	struct ib_qp_attr qp_attr;
281 	int qp_attr_mask, ret;
282 
283 	qp_attr.qp_state = IB_QPS_INIT;
284 	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
285 	if (ret) {
286 		ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
287 		return ret;
288 	}
289 	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
290 	if (ret) {
291 		ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
292 		return ret;
293 	}
294 	qp_attr.qp_state = IB_QPS_RTR;
295 	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
296 	if (ret) {
297 		ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
298 		return ret;
299 	}
300 	qp_attr.rq_psn = psn;
301 	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
302 	if (ret) {
303 		ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
304 		return ret;
305 	}
306 
307 	/*
308 	 * Current Mellanox HCA firmware won't generate completions
309 	 * with error for drain WRs unless the QP has been moved to
310 	 * RTS first. This work-around leaves a window where a QP has
311 	 * moved to error asynchronously, but this will eventually get
312 	 * fixed in firmware, so let's not error out if modify QP
313 	 * fails.
314 	 */
315 	qp_attr.qp_state = IB_QPS_RTS;
316 	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
317 	if (ret) {
318 		ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
319 		return 0;
320 	}
321 	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
322 	if (ret) {
323 		ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
324 		return 0;
325 	}
326 
327 	return 0;
328 }
329 
330 static void ipoib_cm_init_rx_wr(struct net_device *dev,
331 				struct ib_recv_wr *wr,
332 				struct ib_sge *sge)
333 {
334 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
335 	int i;
336 
337 	for (i = 0; i < priv->cm.num_frags; ++i)
338 		sge[i].lkey = priv->pd->local_dma_lkey;
339 
340 	sge[0].length = IPOIB_CM_HEAD_SIZE;
341 	for (i = 1; i < priv->cm.num_frags; ++i)
342 		sge[i].length = PAGE_SIZE;
343 
344 	wr->next    = NULL;
345 	wr->sg_list = sge;
346 	wr->num_sge = priv->cm.num_frags;
347 }
348 
349 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
350 				   struct ipoib_cm_rx *rx)
351 {
352 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
353 	struct {
354 		struct ib_recv_wr wr;
355 		struct ib_sge sge[IPOIB_CM_RX_SG];
356 	} *t;
357 	int ret;
358 	int i;
359 
360 	rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
361 	if (!rx->rx_ring)
362 		return -ENOMEM;
363 
364 	t = kmalloc(sizeof *t, GFP_KERNEL);
365 	if (!t) {
366 		ret = -ENOMEM;
367 		goto err_free_1;
368 	}
369 
370 	ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
371 
372 	spin_lock_irq(&priv->lock);
373 
374 	if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
375 		spin_unlock_irq(&priv->lock);
376 		ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
377 		ret = -EINVAL;
378 		goto err_free;
379 	} else
380 		++priv->cm.nonsrq_conn_qp;
381 
382 	spin_unlock_irq(&priv->lock);
383 
384 	for (i = 0; i < ipoib_recvq_size; ++i) {
385 		if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
386 					   rx->rx_ring[i].mapping,
387 					   GFP_KERNEL)) {
388 			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
389 			ret = -ENOMEM;
390 			goto err_count;
391 		}
392 		ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
393 		if (ret) {
394 			ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
395 				   "failed for buf %d\n", i);
396 			ret = -EIO;
397 			goto err_count;
398 		}
399 	}
400 
401 	rx->recv_count = ipoib_recvq_size;
402 
403 	kfree(t);
404 
405 	return 0;
406 
407 err_count:
408 	spin_lock_irq(&priv->lock);
409 	--priv->cm.nonsrq_conn_qp;
410 	spin_unlock_irq(&priv->lock);
411 
412 err_free:
413 	kfree(t);
414 
415 err_free_1:
416 	ipoib_cm_free_rx_ring(dev, rx->rx_ring);
417 
418 	return ret;
419 }
420 
421 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
422 			     struct ib_qp *qp, struct ib_cm_req_event_param *req,
423 			     unsigned psn)
424 {
425 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
426 	struct ipoib_cm_data data = {};
427 	struct ib_cm_rep_param rep = {};
428 
429 	data.qpn = cpu_to_be32(priv->qp->qp_num);
430 	data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
431 
432 	rep.private_data = &data;
433 	rep.private_data_len = sizeof data;
434 	rep.flow_control = 0;
435 	rep.rnr_retry_count = req->rnr_retry_count;
436 	rep.srq = ipoib_cm_has_srq(dev);
437 	rep.qp_num = qp->qp_num;
438 	rep.starting_psn = psn;
439 	return ib_send_cm_rep(cm_id, &rep);
440 }
441 
442 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
443 {
444 	struct net_device *dev = cm_id->context;
445 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
446 	struct ipoib_cm_rx *p;
447 	unsigned psn;
448 	int ret;
449 
450 	ipoib_dbg(priv, "REQ arrived\n");
451 	p = kzalloc(sizeof *p, GFP_KERNEL);
452 	if (!p)
453 		return -ENOMEM;
454 	p->dev = dev;
455 	p->id = cm_id;
456 	cm_id->context = p;
457 	p->state = IPOIB_CM_RX_LIVE;
458 	p->jiffies = jiffies;
459 	INIT_LIST_HEAD(&p->list);
460 
461 	p->qp = ipoib_cm_create_rx_qp(dev, p);
462 	if (IS_ERR(p->qp)) {
463 		ret = PTR_ERR(p->qp);
464 		goto err_qp;
465 	}
466 
467 	psn = prandom_u32() & 0xffffff;
468 	ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
469 	if (ret)
470 		goto err_modify;
471 
472 	if (!ipoib_cm_has_srq(dev)) {
473 		ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
474 		if (ret)
475 			goto err_modify;
476 	}
477 
478 	spin_lock_irq(&priv->lock);
479 	queue_delayed_work(priv->wq,
480 			   &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
481 	/* Add this entry to passive ids list head, but do not re-add it
482 	 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
483 	p->jiffies = jiffies;
484 	if (p->state == IPOIB_CM_RX_LIVE)
485 		list_move(&p->list, &priv->cm.passive_ids);
486 	spin_unlock_irq(&priv->lock);
487 
488 	ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
489 	if (ret) {
490 		ipoib_warn(priv, "failed to send REP: %d\n", ret);
491 		if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
492 			ipoib_warn(priv, "unable to move qp to error state\n");
493 	}
494 	return 0;
495 
496 err_modify:
497 	ib_destroy_qp(p->qp);
498 err_qp:
499 	kfree(p);
500 	return ret;
501 }
502 
503 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
504 			       struct ib_cm_event *event)
505 {
506 	struct ipoib_cm_rx *p;
507 	struct ipoib_dev_priv *priv;
508 
509 	switch (event->event) {
510 	case IB_CM_REQ_RECEIVED:
511 		return ipoib_cm_req_handler(cm_id, event);
512 	case IB_CM_DREQ_RECEIVED:
513 		p = cm_id->context;
514 		ib_send_cm_drep(cm_id, NULL, 0);
515 		/* Fall through */
516 	case IB_CM_REJ_RECEIVED:
517 		p = cm_id->context;
518 		priv = ipoib_priv(p->dev);
519 		if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
520 			ipoib_warn(priv, "unable to move qp to error state\n");
521 		/* Fall through */
522 	default:
523 		return 0;
524 	}
525 }
526 /* Adjust length of skb with fragments to match received data */
527 static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
528 			  unsigned int length, struct sk_buff *toskb)
529 {
530 	int i, num_frags;
531 	unsigned int size;
532 
533 	/* put header into skb */
534 	size = min(length, hdr_space);
535 	skb->tail += size;
536 	skb->len += size;
537 	length -= size;
538 
539 	num_frags = skb_shinfo(skb)->nr_frags;
540 	for (i = 0; i < num_frags; i++) {
541 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
542 
543 		if (length == 0) {
544 			/* don't need this page */
545 			skb_fill_page_desc(toskb, i, skb_frag_page(frag),
546 					   0, PAGE_SIZE);
547 			--skb_shinfo(skb)->nr_frags;
548 		} else {
549 			size = min(length, (unsigned) PAGE_SIZE);
550 
551 			skb_frag_size_set(frag, size);
552 			skb->data_len += size;
553 			skb->truesize += size;
554 			skb->len += size;
555 			length -= size;
556 		}
557 	}
558 }
559 
560 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
561 {
562 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
563 	struct ipoib_cm_rx_buf *rx_ring;
564 	unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
565 	struct sk_buff *skb, *newskb;
566 	struct ipoib_cm_rx *p;
567 	unsigned long flags;
568 	u64 mapping[IPOIB_CM_RX_SG];
569 	int frags;
570 	int has_srq;
571 	struct sk_buff *small_skb;
572 
573 	ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
574 		       wr_id, wc->status);
575 
576 	if (unlikely(wr_id >= ipoib_recvq_size)) {
577 		if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
578 			spin_lock_irqsave(&priv->lock, flags);
579 			list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
580 			ipoib_cm_start_rx_drain(priv);
581 			queue_work(priv->wq, &priv->cm.rx_reap_task);
582 			spin_unlock_irqrestore(&priv->lock, flags);
583 		} else
584 			ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
585 				   wr_id, ipoib_recvq_size);
586 		return;
587 	}
588 
589 	p = wc->qp->qp_context;
590 
591 	has_srq = ipoib_cm_has_srq(dev);
592 	rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
593 
594 	skb = rx_ring[wr_id].skb;
595 
596 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
597 		ipoib_dbg(priv, "cm recv error "
598 			   "(status=%d, wrid=%d vend_err %x)\n",
599 			   wc->status, wr_id, wc->vendor_err);
600 		++dev->stats.rx_dropped;
601 		if (has_srq)
602 			goto repost;
603 		else {
604 			if (!--p->recv_count) {
605 				spin_lock_irqsave(&priv->lock, flags);
606 				list_move(&p->list, &priv->cm.rx_reap_list);
607 				spin_unlock_irqrestore(&priv->lock, flags);
608 				queue_work(priv->wq, &priv->cm.rx_reap_task);
609 			}
610 			return;
611 		}
612 	}
613 
614 	if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
615 		if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
616 			spin_lock_irqsave(&priv->lock, flags);
617 			p->jiffies = jiffies;
618 			/* Move this entry to list head, but do not re-add it
619 			 * if it has been moved out of list. */
620 			if (p->state == IPOIB_CM_RX_LIVE)
621 				list_move(&p->list, &priv->cm.passive_ids);
622 			spin_unlock_irqrestore(&priv->lock, flags);
623 		}
624 	}
625 
626 	if (wc->byte_len < IPOIB_CM_COPYBREAK) {
627 		int dlen = wc->byte_len;
628 
629 		small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
630 		if (small_skb) {
631 			skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
632 			ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
633 						   dlen, DMA_FROM_DEVICE);
634 			skb_copy_from_linear_data(skb, small_skb->data, dlen);
635 			ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
636 						      dlen, DMA_FROM_DEVICE);
637 			skb_put(small_skb, dlen);
638 			skb = small_skb;
639 			goto copied;
640 		}
641 	}
642 
643 	frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
644 					      (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
645 
646 	newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
647 				       mapping, GFP_ATOMIC);
648 	if (unlikely(!newskb)) {
649 		/*
650 		 * If we can't allocate a new RX buffer, dump
651 		 * this packet and reuse the old buffer.
652 		 */
653 		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
654 		++dev->stats.rx_dropped;
655 		goto repost;
656 	}
657 
658 	ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
659 	memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
660 
661 	ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
662 		       wc->byte_len, wc->slid);
663 
664 	skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
665 
666 copied:
667 	skb->protocol = ((struct ipoib_header *) skb->data)->proto;
668 	skb_add_pseudo_hdr(skb);
669 
670 	++dev->stats.rx_packets;
671 	dev->stats.rx_bytes += skb->len;
672 
673 	skb->dev = dev;
674 	/* XXX get correct PACKET_ type here */
675 	skb->pkt_type = PACKET_HOST;
676 	netif_receive_skb(skb);
677 
678 repost:
679 	if (has_srq) {
680 		if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
681 			ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
682 				   "for buf %d\n", wr_id);
683 	} else {
684 		if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
685 							  &priv->cm.rx_wr,
686 							  priv->cm.rx_sge,
687 							  wr_id))) {
688 			--p->recv_count;
689 			ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
690 				   "for buf %d\n", wr_id);
691 		}
692 	}
693 }
694 
695 static inline int post_send(struct ipoib_dev_priv *priv,
696 			    struct ipoib_cm_tx *tx,
697 			    unsigned int wr_id,
698 			    struct ipoib_tx_buf *tx_req)
699 {
700 	struct ib_send_wr *bad_wr;
701 
702 	ipoib_build_sge(priv, tx_req);
703 
704 	priv->tx_wr.wr.wr_id	= wr_id | IPOIB_OP_CM;
705 
706 	return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr);
707 }
708 
709 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
710 {
711 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
712 	struct ipoib_tx_buf *tx_req;
713 	int rc;
714 	unsigned usable_sge = tx->max_send_sge - !!skb_headlen(skb);
715 
716 	if (unlikely(skb->len > tx->mtu)) {
717 		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
718 			   skb->len, tx->mtu);
719 		++dev->stats.tx_dropped;
720 		++dev->stats.tx_errors;
721 		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
722 		return;
723 	}
724 	if (skb_shinfo(skb)->nr_frags > usable_sge) {
725 		if (skb_linearize(skb) < 0) {
726 			ipoib_warn(priv, "skb could not be linearized\n");
727 			++dev->stats.tx_dropped;
728 			++dev->stats.tx_errors;
729 			dev_kfree_skb_any(skb);
730 			return;
731 		}
732 		/* Does skb_linearize return ok without reducing nr_frags? */
733 		if (skb_shinfo(skb)->nr_frags > usable_sge) {
734 			ipoib_warn(priv, "too many frags after skb linearize\n");
735 			++dev->stats.tx_dropped;
736 			++dev->stats.tx_errors;
737 			dev_kfree_skb_any(skb);
738 			return;
739 		}
740 	}
741 	ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
742 		       tx->tx_head, skb->len, tx->qp->qp_num);
743 
744 	/*
745 	 * We put the skb into the tx_ring _before_ we call post_send()
746 	 * because it's entirely possible that the completion handler will
747 	 * run before we execute anything after the post_send().  That
748 	 * means we have to make sure everything is properly recorded and
749 	 * our state is consistent before we call post_send().
750 	 */
751 	tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
752 	tx_req->skb = skb;
753 
754 	if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
755 		++dev->stats.tx_errors;
756 		dev_kfree_skb_any(skb);
757 		return;
758 	}
759 
760 	skb_orphan(skb);
761 	skb_dst_drop(skb);
762 
763 	rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
764 	if (unlikely(rc)) {
765 		ipoib_warn(priv, "post_send failed, error %d\n", rc);
766 		++dev->stats.tx_errors;
767 		ipoib_dma_unmap_tx(priv, tx_req);
768 		dev_kfree_skb_any(skb);
769 	} else {
770 		netif_trans_update(dev);
771 		++tx->tx_head;
772 
773 		if (++priv->tx_outstanding == ipoib_sendq_size) {
774 			ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
775 				  tx->qp->qp_num);
776 			netif_stop_queue(dev);
777 			rc = ib_req_notify_cq(priv->send_cq,
778 				IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
779 			if (rc < 0)
780 				ipoib_warn(priv, "request notify on send CQ failed\n");
781 			else if (rc)
782 				ipoib_send_comp_handler(priv->send_cq, dev);
783 		}
784 	}
785 }
786 
787 void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
788 {
789 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
790 	struct ipoib_cm_tx *tx = wc->qp->qp_context;
791 	unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
792 	struct ipoib_tx_buf *tx_req;
793 	unsigned long flags;
794 
795 	ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
796 		       wr_id, wc->status);
797 
798 	if (unlikely(wr_id >= ipoib_sendq_size)) {
799 		ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
800 			   wr_id, ipoib_sendq_size);
801 		return;
802 	}
803 
804 	tx_req = &tx->tx_ring[wr_id];
805 
806 	ipoib_dma_unmap_tx(priv, tx_req);
807 
808 	/* FIXME: is this right? Shouldn't we only increment on success? */
809 	++dev->stats.tx_packets;
810 	dev->stats.tx_bytes += tx_req->skb->len;
811 
812 	dev_kfree_skb_any(tx_req->skb);
813 
814 	netif_tx_lock(dev);
815 
816 	++tx->tx_tail;
817 	if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
818 	    netif_queue_stopped(dev) &&
819 	    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
820 		netif_wake_queue(dev);
821 
822 	if (wc->status != IB_WC_SUCCESS &&
823 	    wc->status != IB_WC_WR_FLUSH_ERR) {
824 		struct ipoib_neigh *neigh;
825 
826 		if (wc->status != IB_WC_RNR_RETRY_EXC_ERR)
827 			ipoib_warn(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n",
828 				   wc->status, wr_id, wc->vendor_err);
829 		else
830 			ipoib_dbg(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n",
831 				  wc->status, wr_id, wc->vendor_err);
832 
833 		spin_lock_irqsave(&priv->lock, flags);
834 		neigh = tx->neigh;
835 
836 		if (neigh) {
837 			neigh->cm = NULL;
838 			ipoib_neigh_free(neigh);
839 
840 			tx->neigh = NULL;
841 		}
842 
843 		if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
844 			list_move(&tx->list, &priv->cm.reap_list);
845 			queue_work(priv->wq, &priv->cm.reap_task);
846 		}
847 
848 		clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
849 
850 		spin_unlock_irqrestore(&priv->lock, flags);
851 	}
852 
853 	netif_tx_unlock(dev);
854 }
855 
856 int ipoib_cm_dev_open(struct net_device *dev)
857 {
858 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
859 	int ret;
860 
861 	if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
862 		return 0;
863 
864 	priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
865 	if (IS_ERR(priv->cm.id)) {
866 		printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
867 		ret = PTR_ERR(priv->cm.id);
868 		goto err_cm;
869 	}
870 
871 	ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
872 			   0);
873 	if (ret) {
874 		printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
875 		       IPOIB_CM_IETF_ID | priv->qp->qp_num);
876 		goto err_listen;
877 	}
878 
879 	return 0;
880 
881 err_listen:
882 	ib_destroy_cm_id(priv->cm.id);
883 err_cm:
884 	priv->cm.id = NULL;
885 	return ret;
886 }
887 
888 static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
889 {
890 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
891 	struct ipoib_cm_rx *rx, *n;
892 	LIST_HEAD(list);
893 
894 	spin_lock_irq(&priv->lock);
895 	list_splice_init(&priv->cm.rx_reap_list, &list);
896 	spin_unlock_irq(&priv->lock);
897 
898 	list_for_each_entry_safe(rx, n, &list, list) {
899 		ib_destroy_cm_id(rx->id);
900 		ib_destroy_qp(rx->qp);
901 		if (!ipoib_cm_has_srq(dev)) {
902 			ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
903 			spin_lock_irq(&priv->lock);
904 			--priv->cm.nonsrq_conn_qp;
905 			spin_unlock_irq(&priv->lock);
906 		}
907 		kfree(rx);
908 	}
909 }
910 
911 void ipoib_cm_dev_stop(struct net_device *dev)
912 {
913 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
914 	struct ipoib_cm_rx *p;
915 	unsigned long begin;
916 	int ret;
917 
918 	if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
919 		return;
920 
921 	ib_destroy_cm_id(priv->cm.id);
922 	priv->cm.id = NULL;
923 
924 	spin_lock_irq(&priv->lock);
925 	while (!list_empty(&priv->cm.passive_ids)) {
926 		p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
927 		list_move(&p->list, &priv->cm.rx_error_list);
928 		p->state = IPOIB_CM_RX_ERROR;
929 		spin_unlock_irq(&priv->lock);
930 		ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
931 		if (ret)
932 			ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
933 		spin_lock_irq(&priv->lock);
934 	}
935 
936 	/* Wait for all RX to be drained */
937 	begin = jiffies;
938 
939 	while (!list_empty(&priv->cm.rx_error_list) ||
940 	       !list_empty(&priv->cm.rx_flush_list) ||
941 	       !list_empty(&priv->cm.rx_drain_list)) {
942 		if (time_after(jiffies, begin + 5 * HZ)) {
943 			ipoib_warn(priv, "RX drain timing out\n");
944 
945 			/*
946 			 * assume the HW is wedged and just free up everything.
947 			 */
948 			list_splice_init(&priv->cm.rx_flush_list,
949 					 &priv->cm.rx_reap_list);
950 			list_splice_init(&priv->cm.rx_error_list,
951 					 &priv->cm.rx_reap_list);
952 			list_splice_init(&priv->cm.rx_drain_list,
953 					 &priv->cm.rx_reap_list);
954 			break;
955 		}
956 		spin_unlock_irq(&priv->lock);
957 		msleep(1);
958 		ipoib_drain_cq(dev);
959 		spin_lock_irq(&priv->lock);
960 	}
961 
962 	spin_unlock_irq(&priv->lock);
963 
964 	ipoib_cm_free_rx_reap_list(dev);
965 
966 	cancel_delayed_work(&priv->cm.stale_task);
967 }
968 
969 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
970 {
971 	struct ipoib_cm_tx *p = cm_id->context;
972 	struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
973 	struct ipoib_cm_data *data = event->private_data;
974 	struct sk_buff_head skqueue;
975 	struct ib_qp_attr qp_attr;
976 	int qp_attr_mask, ret;
977 	struct sk_buff *skb;
978 
979 	p->mtu = be32_to_cpu(data->mtu);
980 
981 	if (p->mtu <= IPOIB_ENCAP_LEN) {
982 		ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
983 			   p->mtu, IPOIB_ENCAP_LEN);
984 		return -EINVAL;
985 	}
986 
987 	qp_attr.qp_state = IB_QPS_RTR;
988 	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
989 	if (ret) {
990 		ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
991 		return ret;
992 	}
993 
994 	qp_attr.rq_psn = 0 /* FIXME */;
995 	ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
996 	if (ret) {
997 		ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
998 		return ret;
999 	}
1000 
1001 	qp_attr.qp_state = IB_QPS_RTS;
1002 	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
1003 	if (ret) {
1004 		ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
1005 		return ret;
1006 	}
1007 	ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
1008 	if (ret) {
1009 		ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
1010 		return ret;
1011 	}
1012 
1013 	skb_queue_head_init(&skqueue);
1014 
1015 	spin_lock_irq(&priv->lock);
1016 	set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
1017 	if (p->neigh)
1018 		while ((skb = __skb_dequeue(&p->neigh->queue)))
1019 			__skb_queue_tail(&skqueue, skb);
1020 	spin_unlock_irq(&priv->lock);
1021 
1022 	while ((skb = __skb_dequeue(&skqueue))) {
1023 		skb->dev = p->dev;
1024 		ret = dev_queue_xmit(skb);
1025 		if (ret)
1026 			ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n",
1027 				   __func__, ret);
1028 	}
1029 
1030 	ret = ib_send_cm_rtu(cm_id, NULL, 0);
1031 	if (ret) {
1032 		ipoib_warn(priv, "failed to send RTU: %d\n", ret);
1033 		return ret;
1034 	}
1035 	return 0;
1036 }
1037 
1038 static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
1039 {
1040 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1041 	struct ib_qp_init_attr attr = {
1042 		.send_cq		= priv->recv_cq,
1043 		.recv_cq		= priv->recv_cq,
1044 		.srq			= priv->cm.srq,
1045 		.cap.max_send_wr	= ipoib_sendq_size,
1046 		.cap.max_send_sge	= 1,
1047 		.sq_sig_type		= IB_SIGNAL_ALL_WR,
1048 		.qp_type		= IB_QPT_RC,
1049 		.qp_context		= tx,
1050 		.create_flags		= IB_QP_CREATE_USE_GFP_NOIO
1051 	};
1052 
1053 	struct ib_qp *tx_qp;
1054 
1055 	if (dev->features & NETIF_F_SG)
1056 		attr.cap.max_send_sge =
1057 			min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
1058 
1059 	tx_qp = ib_create_qp(priv->pd, &attr);
1060 	if (PTR_ERR(tx_qp) == -EINVAL) {
1061 		attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
1062 		tx_qp = ib_create_qp(priv->pd, &attr);
1063 	}
1064 	tx->max_send_sge = attr.cap.max_send_sge;
1065 	return tx_qp;
1066 }
1067 
1068 static int ipoib_cm_send_req(struct net_device *dev,
1069 			     struct ib_cm_id *id, struct ib_qp *qp,
1070 			     u32 qpn,
1071 			     struct sa_path_rec *pathrec)
1072 {
1073 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1074 	struct ipoib_cm_data data = {};
1075 	struct ib_cm_req_param req = {};
1076 
1077 	data.qpn = cpu_to_be32(priv->qp->qp_num);
1078 	data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
1079 
1080 	req.primary_path		= pathrec;
1081 	req.alternate_path		= NULL;
1082 	req.service_id			= cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
1083 	req.qp_num			= qp->qp_num;
1084 	req.qp_type			= qp->qp_type;
1085 	req.private_data		= &data;
1086 	req.private_data_len		= sizeof data;
1087 	req.flow_control		= 0;
1088 
1089 	req.starting_psn		= 0; /* FIXME */
1090 
1091 	/*
1092 	 * Pick some arbitrary defaults here; we could make these
1093 	 * module parameters if anyone cared about setting them.
1094 	 */
1095 	req.responder_resources		= 4;
1096 	req.remote_cm_response_timeout	= 20;
1097 	req.local_cm_response_timeout	= 20;
1098 	req.retry_count			= 0; /* RFC draft warns against retries */
1099 	req.rnr_retry_count		= 0; /* RFC draft warns against retries */
1100 	req.max_cm_retries		= 15;
1101 	req.srq				= ipoib_cm_has_srq(dev);
1102 	return ib_send_cm_req(id, &req);
1103 }
1104 
1105 static int ipoib_cm_modify_tx_init(struct net_device *dev,
1106 				  struct ib_cm_id *cm_id, struct ib_qp *qp)
1107 {
1108 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1109 	struct ib_qp_attr qp_attr;
1110 	int qp_attr_mask, ret;
1111 	ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
1112 	if (ret) {
1113 		ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
1114 		return ret;
1115 	}
1116 
1117 	qp_attr.qp_state = IB_QPS_INIT;
1118 	qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1119 	qp_attr.port_num = priv->port;
1120 	qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
1121 
1122 	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1123 	if (ret) {
1124 		ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
1125 		return ret;
1126 	}
1127 	return 0;
1128 }
1129 
1130 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1131 			    struct sa_path_rec *pathrec)
1132 {
1133 	struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
1134 	int ret;
1135 
1136 	p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring,
1137 			       GFP_NOIO, PAGE_KERNEL);
1138 	if (!p->tx_ring) {
1139 		ret = -ENOMEM;
1140 		goto err_tx;
1141 	}
1142 	memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1143 
1144 	p->qp = ipoib_cm_create_tx_qp(p->dev, p);
1145 	if (IS_ERR(p->qp)) {
1146 		ret = PTR_ERR(p->qp);
1147 		ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
1148 		goto err_qp;
1149 	}
1150 
1151 	p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
1152 	if (IS_ERR(p->id)) {
1153 		ret = PTR_ERR(p->id);
1154 		ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
1155 		goto err_id;
1156 	}
1157 
1158 	ret = ipoib_cm_modify_tx_init(p->dev, p->id,  p->qp);
1159 	if (ret) {
1160 		ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
1161 		goto err_modify_send;
1162 	}
1163 
1164 	ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
1165 	if (ret) {
1166 		ipoib_warn(priv, "failed to send cm req: %d\n", ret);
1167 		goto err_modify_send;
1168 	}
1169 
1170 	ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
1171 		  p->qp->qp_num, pathrec->dgid.raw, qpn);
1172 
1173 	return 0;
1174 
1175 err_modify_send:
1176 	ib_destroy_cm_id(p->id);
1177 err_id:
1178 	p->id = NULL;
1179 	ib_destroy_qp(p->qp);
1180 err_qp:
1181 	p->qp = NULL;
1182 	vfree(p->tx_ring);
1183 err_tx:
1184 	return ret;
1185 }
1186 
1187 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1188 {
1189 	struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
1190 	struct ipoib_tx_buf *tx_req;
1191 	unsigned long begin;
1192 
1193 	ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1194 		  p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
1195 
1196 	if (p->id)
1197 		ib_destroy_cm_id(p->id);
1198 
1199 	if (p->tx_ring) {
1200 		/* Wait for all sends to complete */
1201 		begin = jiffies;
1202 		while ((int) p->tx_tail - (int) p->tx_head < 0) {
1203 			if (time_after(jiffies, begin + 5 * HZ)) {
1204 				ipoib_warn(priv, "timing out; %d sends not completed\n",
1205 					   p->tx_head - p->tx_tail);
1206 				goto timeout;
1207 			}
1208 
1209 			msleep(1);
1210 		}
1211 	}
1212 
1213 timeout:
1214 
1215 	while ((int) p->tx_tail - (int) p->tx_head < 0) {
1216 		tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1217 		ipoib_dma_unmap_tx(priv, tx_req);
1218 		dev_kfree_skb_any(tx_req->skb);
1219 		++p->tx_tail;
1220 		netif_tx_lock_bh(p->dev);
1221 		if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
1222 		    netif_queue_stopped(p->dev) &&
1223 		    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1224 			netif_wake_queue(p->dev);
1225 		netif_tx_unlock_bh(p->dev);
1226 	}
1227 
1228 	if (p->qp)
1229 		ib_destroy_qp(p->qp);
1230 
1231 	vfree(p->tx_ring);
1232 	kfree(p);
1233 }
1234 
1235 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1236 			       struct ib_cm_event *event)
1237 {
1238 	struct ipoib_cm_tx *tx = cm_id->context;
1239 	struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
1240 	struct net_device *dev = priv->dev;
1241 	struct ipoib_neigh *neigh;
1242 	unsigned long flags;
1243 	int ret;
1244 
1245 	switch (event->event) {
1246 	case IB_CM_DREQ_RECEIVED:
1247 		ipoib_dbg(priv, "DREQ received.\n");
1248 		ib_send_cm_drep(cm_id, NULL, 0);
1249 		break;
1250 	case IB_CM_REP_RECEIVED:
1251 		ipoib_dbg(priv, "REP received.\n");
1252 		ret = ipoib_cm_rep_handler(cm_id, event);
1253 		if (ret)
1254 			ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1255 				       NULL, 0, NULL, 0);
1256 		break;
1257 	case IB_CM_REQ_ERROR:
1258 	case IB_CM_REJ_RECEIVED:
1259 	case IB_CM_TIMEWAIT_EXIT:
1260 		ipoib_dbg(priv, "CM error %d.\n", event->event);
1261 		netif_tx_lock_bh(dev);
1262 		spin_lock_irqsave(&priv->lock, flags);
1263 		neigh = tx->neigh;
1264 
1265 		if (neigh) {
1266 			neigh->cm = NULL;
1267 			ipoib_neigh_free(neigh);
1268 
1269 			tx->neigh = NULL;
1270 		}
1271 
1272 		if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1273 			list_move(&tx->list, &priv->cm.reap_list);
1274 			queue_work(priv->wq, &priv->cm.reap_task);
1275 		}
1276 
1277 		spin_unlock_irqrestore(&priv->lock, flags);
1278 		netif_tx_unlock_bh(dev);
1279 		break;
1280 	default:
1281 		break;
1282 	}
1283 
1284 	return 0;
1285 }
1286 
1287 struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
1288 				       struct ipoib_neigh *neigh)
1289 {
1290 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1291 	struct ipoib_cm_tx *tx;
1292 
1293 	tx = kzalloc(sizeof *tx, GFP_ATOMIC);
1294 	if (!tx)
1295 		return NULL;
1296 
1297 	neigh->cm = tx;
1298 	tx->neigh = neigh;
1299 	tx->path = path;
1300 	tx->dev = dev;
1301 	list_add(&tx->list, &priv->cm.start_list);
1302 	set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1303 	queue_work(priv->wq, &priv->cm.start_task);
1304 	return tx;
1305 }
1306 
1307 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1308 {
1309 	struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
1310 	unsigned long flags;
1311 	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1312 		spin_lock_irqsave(&priv->lock, flags);
1313 		list_move(&tx->list, &priv->cm.reap_list);
1314 		queue_work(priv->wq, &priv->cm.reap_task);
1315 		ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1316 			  tx->neigh->daddr + 4);
1317 		tx->neigh = NULL;
1318 		spin_unlock_irqrestore(&priv->lock, flags);
1319 	}
1320 }
1321 
1322 #define QPN_AND_OPTIONS_OFFSET	4
1323 
1324 static void ipoib_cm_tx_start(struct work_struct *work)
1325 {
1326 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1327 						   cm.start_task);
1328 	struct net_device *dev = priv->dev;
1329 	struct ipoib_neigh *neigh;
1330 	struct ipoib_cm_tx *p;
1331 	unsigned long flags;
1332 	struct ipoib_path *path;
1333 	int ret;
1334 
1335 	struct sa_path_rec pathrec;
1336 	u32 qpn;
1337 
1338 	netif_tx_lock_bh(dev);
1339 	spin_lock_irqsave(&priv->lock, flags);
1340 
1341 	while (!list_empty(&priv->cm.start_list)) {
1342 		p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1343 		list_del_init(&p->list);
1344 		neigh = p->neigh;
1345 
1346 		qpn = IPOIB_QPN(neigh->daddr);
1347 		/*
1348 		 * As long as the search is with these 2 locks,
1349 		 * path existence indicates its validity.
1350 		 */
1351 		path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1352 		if (!path) {
1353 			pr_info("%s ignore not valid path %pI6\n",
1354 				__func__,
1355 				neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1356 			goto free_neigh;
1357 		}
1358 		memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1359 
1360 		spin_unlock_irqrestore(&priv->lock, flags);
1361 		netif_tx_unlock_bh(dev);
1362 
1363 		ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1364 
1365 		netif_tx_lock_bh(dev);
1366 		spin_lock_irqsave(&priv->lock, flags);
1367 
1368 		if (ret) {
1369 free_neigh:
1370 			neigh = p->neigh;
1371 			if (neigh) {
1372 				neigh->cm = NULL;
1373 				ipoib_neigh_free(neigh);
1374 			}
1375 			list_del(&p->list);
1376 			kfree(p);
1377 		}
1378 	}
1379 
1380 	spin_unlock_irqrestore(&priv->lock, flags);
1381 	netif_tx_unlock_bh(dev);
1382 }
1383 
1384 static void ipoib_cm_tx_reap(struct work_struct *work)
1385 {
1386 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1387 						   cm.reap_task);
1388 	struct net_device *dev = priv->dev;
1389 	struct ipoib_cm_tx *p;
1390 	unsigned long flags;
1391 
1392 	netif_tx_lock_bh(dev);
1393 	spin_lock_irqsave(&priv->lock, flags);
1394 
1395 	while (!list_empty(&priv->cm.reap_list)) {
1396 		p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1397 		list_del_init(&p->list);
1398 		spin_unlock_irqrestore(&priv->lock, flags);
1399 		netif_tx_unlock_bh(dev);
1400 		ipoib_cm_tx_destroy(p);
1401 		netif_tx_lock_bh(dev);
1402 		spin_lock_irqsave(&priv->lock, flags);
1403 	}
1404 
1405 	spin_unlock_irqrestore(&priv->lock, flags);
1406 	netif_tx_unlock_bh(dev);
1407 }
1408 
1409 static void ipoib_cm_skb_reap(struct work_struct *work)
1410 {
1411 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1412 						   cm.skb_task);
1413 	struct net_device *dev = priv->dev;
1414 	struct sk_buff *skb;
1415 	unsigned long flags;
1416 	unsigned mtu = priv->mcast_mtu;
1417 
1418 	netif_tx_lock_bh(dev);
1419 	spin_lock_irqsave(&priv->lock, flags);
1420 
1421 	while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
1422 		spin_unlock_irqrestore(&priv->lock, flags);
1423 		netif_tx_unlock_bh(dev);
1424 
1425 		if (skb->protocol == htons(ETH_P_IP))
1426 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1427 #if IS_ENABLED(CONFIG_IPV6)
1428 		else if (skb->protocol == htons(ETH_P_IPV6))
1429 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1430 #endif
1431 		dev_kfree_skb_any(skb);
1432 
1433 		netif_tx_lock_bh(dev);
1434 		spin_lock_irqsave(&priv->lock, flags);
1435 	}
1436 
1437 	spin_unlock_irqrestore(&priv->lock, flags);
1438 	netif_tx_unlock_bh(dev);
1439 }
1440 
1441 void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
1442 			   unsigned int mtu)
1443 {
1444 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1445 	int e = skb_queue_empty(&priv->cm.skb_queue);
1446 
1447 	if (skb_dst(skb))
1448 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1449 
1450 	skb_queue_tail(&priv->cm.skb_queue, skb);
1451 	if (e)
1452 		queue_work(priv->wq, &priv->cm.skb_task);
1453 }
1454 
1455 static void ipoib_cm_rx_reap(struct work_struct *work)
1456 {
1457 	ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
1458 						cm.rx_reap_task)->dev);
1459 }
1460 
1461 static void ipoib_cm_stale_task(struct work_struct *work)
1462 {
1463 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1464 						   cm.stale_task.work);
1465 	struct ipoib_cm_rx *p;
1466 	int ret;
1467 
1468 	spin_lock_irq(&priv->lock);
1469 	while (!list_empty(&priv->cm.passive_ids)) {
1470 		/* List is sorted by LRU, start from tail,
1471 		 * stop when we see a recently used entry */
1472 		p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1473 		if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1474 			break;
1475 		list_move(&p->list, &priv->cm.rx_error_list);
1476 		p->state = IPOIB_CM_RX_ERROR;
1477 		spin_unlock_irq(&priv->lock);
1478 		ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1479 		if (ret)
1480 			ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1481 		spin_lock_irq(&priv->lock);
1482 	}
1483 
1484 	if (!list_empty(&priv->cm.passive_ids))
1485 		queue_delayed_work(priv->wq,
1486 				   &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1487 	spin_unlock_irq(&priv->lock);
1488 }
1489 
1490 static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1491 			 char *buf)
1492 {
1493 	struct net_device *dev = to_net_dev(d);
1494 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1495 
1496 	if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
1497 		return sprintf(buf, "connected\n");
1498 	else
1499 		return sprintf(buf, "datagram\n");
1500 }
1501 
1502 static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1503 			const char *buf, size_t count)
1504 {
1505 	struct net_device *dev = to_net_dev(d);
1506 	int ret;
1507 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1508 
1509 	if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags))
1510 		return -EPERM;
1511 
1512 	if (!rtnl_trylock())
1513 		return restart_syscall();
1514 
1515 	ret = ipoib_set_mode(dev, buf);
1516 
1517 	/* The assumption is that the function ipoib_set_mode returned
1518 	 * with the rtnl held by it, if not the value -EBUSY returned,
1519 	 * then no need to rtnl_unlock
1520 	 */
1521 	if (ret != -EBUSY)
1522 		rtnl_unlock();
1523 
1524 	return (!ret || ret == -EBUSY) ? count : ret;
1525 }
1526 
1527 static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
1528 
1529 int ipoib_cm_add_mode_attr(struct net_device *dev)
1530 {
1531 	return device_create_file(&dev->dev, &dev_attr_mode);
1532 }
1533 
1534 static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
1535 {
1536 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1537 	struct ib_srq_init_attr srq_init_attr = {
1538 		.srq_type = IB_SRQT_BASIC,
1539 		.attr = {
1540 			.max_wr  = ipoib_recvq_size,
1541 			.max_sge = max_sge
1542 		}
1543 	};
1544 
1545 	priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1546 	if (IS_ERR(priv->cm.srq)) {
1547 		if (PTR_ERR(priv->cm.srq) != -ENOSYS)
1548 			printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
1549 			       priv->ca->name, PTR_ERR(priv->cm.srq));
1550 		priv->cm.srq = NULL;
1551 		return;
1552 	}
1553 
1554 	priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1555 	if (!priv->cm.srq_ring) {
1556 		ib_destroy_srq(priv->cm.srq);
1557 		priv->cm.srq = NULL;
1558 		return;
1559 	}
1560 
1561 }
1562 
1563 int ipoib_cm_dev_init(struct net_device *dev)
1564 {
1565 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1566 	int max_srq_sge, i;
1567 
1568 	INIT_LIST_HEAD(&priv->cm.passive_ids);
1569 	INIT_LIST_HEAD(&priv->cm.reap_list);
1570 	INIT_LIST_HEAD(&priv->cm.start_list);
1571 	INIT_LIST_HEAD(&priv->cm.rx_error_list);
1572 	INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1573 	INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1574 	INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1575 	INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1576 	INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1577 	INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
1578 	INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1579 	INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1580 
1581 	skb_queue_head_init(&priv->cm.skb_queue);
1582 
1583 	ipoib_dbg(priv, "max_srq_sge=%d\n", priv->ca->attrs.max_srq_sge);
1584 
1585 	max_srq_sge = min_t(int, IPOIB_CM_RX_SG, priv->ca->attrs.max_srq_sge);
1586 	ipoib_cm_create_srq(dev, max_srq_sge);
1587 	if (ipoib_cm_has_srq(dev)) {
1588 		priv->cm.max_cm_mtu = max_srq_sge * PAGE_SIZE - 0x10;
1589 		priv->cm.num_frags  = max_srq_sge;
1590 		ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1591 			  priv->cm.max_cm_mtu, priv->cm.num_frags);
1592 	} else {
1593 		priv->cm.max_cm_mtu = IPOIB_CM_MTU;
1594 		priv->cm.num_frags  = IPOIB_CM_RX_SG;
1595 	}
1596 
1597 	ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
1598 
1599 	if (ipoib_cm_has_srq(dev)) {
1600 		for (i = 0; i < ipoib_recvq_size; ++i) {
1601 			if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
1602 						   priv->cm.num_frags - 1,
1603 						   priv->cm.srq_ring[i].mapping,
1604 						   GFP_KERNEL)) {
1605 				ipoib_warn(priv, "failed to allocate "
1606 					   "receive buffer %d\n", i);
1607 				ipoib_cm_dev_cleanup(dev);
1608 				return -ENOMEM;
1609 			}
1610 
1611 			if (ipoib_cm_post_receive_srq(dev, i)) {
1612 				ipoib_warn(priv, "ipoib_cm_post_receive_srq "
1613 					   "failed for buf %d\n", i);
1614 				ipoib_cm_dev_cleanup(dev);
1615 				return -EIO;
1616 			}
1617 		}
1618 	}
1619 
1620 	priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
1621 	return 0;
1622 }
1623 
1624 void ipoib_cm_dev_cleanup(struct net_device *dev)
1625 {
1626 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1627 	int ret;
1628 
1629 	if (!priv->cm.srq)
1630 		return;
1631 
1632 	ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1633 
1634 	ret = ib_destroy_srq(priv->cm.srq);
1635 	if (ret)
1636 		ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1637 
1638 	priv->cm.srq = NULL;
1639 	if (!priv->cm.srq_ring)
1640 		return;
1641 
1642 	ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
1643 	priv->cm.srq_ring = NULL;
1644 }
1645