1 /*
2  * Copyright (c) 2006 Mellanox Technologies. All rights reserved
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <rdma/ib_cm.h>
34 #include <net/dst.h>
35 #include <net/icmp.h>
36 #include <linux/icmpv6.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/moduleparam.h>
41 
42 #include "ipoib.h"
43 
44 int ipoib_max_conn_qp = 128;
45 
46 module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
47 MODULE_PARM_DESC(max_nonsrq_conn_qp,
48 		 "Max number of connected-mode QPs per interface "
49 		 "(applied only if shared receive queue is not available)");
50 
51 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
52 static int data_debug_level;
53 
54 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
55 MODULE_PARM_DESC(cm_data_debug_level,
56 		 "Enable data path debug tracing for connected mode if > 0");
57 #endif
58 
59 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
60 
61 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
62 #define IPOIB_CM_RX_TIMEOUT     (2 * 256 * HZ)
63 #define IPOIB_CM_RX_DELAY       (3 * 256 * HZ)
64 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
65 
66 static struct ib_qp_attr ipoib_cm_err_attr = {
67 	.qp_state = IB_QPS_ERR
68 };
69 
70 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
71 
72 static struct ib_send_wr ipoib_cm_rx_drain_wr = {
73 	.wr_id = IPOIB_CM_RX_DRAIN_WRID,
74 	.opcode = IB_WR_SEND,
75 };
76 
77 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
78 			       struct ib_cm_event *event);
79 
80 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
81 				  u64 mapping[IPOIB_CM_RX_SG])
82 {
83 	int i;
84 
85 	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
86 
87 	for (i = 0; i < frags; ++i)
88 		ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
89 }
90 
91 static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
92 {
93 	struct ipoib_dev_priv *priv = netdev_priv(dev);
94 	struct ib_recv_wr *bad_wr;
95 	int i, ret;
96 
97 	priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
98 
99 	for (i = 0; i < priv->cm.num_frags; ++i)
100 		priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
101 
102 	ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
103 	if (unlikely(ret)) {
104 		ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
105 		ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
106 				      priv->cm.srq_ring[id].mapping);
107 		dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
108 		priv->cm.srq_ring[id].skb = NULL;
109 	}
110 
111 	return ret;
112 }
113 
114 static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
115 					struct ipoib_cm_rx *rx,
116 					struct ib_recv_wr *wr,
117 					struct ib_sge *sge, int id)
118 {
119 	struct ipoib_dev_priv *priv = netdev_priv(dev);
120 	struct ib_recv_wr *bad_wr;
121 	int i, ret;
122 
123 	wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
124 
125 	for (i = 0; i < IPOIB_CM_RX_SG; ++i)
126 		sge[i].addr = rx->rx_ring[id].mapping[i];
127 
128 	ret = ib_post_recv(rx->qp, wr, &bad_wr);
129 	if (unlikely(ret)) {
130 		ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
131 		ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
132 				      rx->rx_ring[id].mapping);
133 		dev_kfree_skb_any(rx->rx_ring[id].skb);
134 		rx->rx_ring[id].skb = NULL;
135 	}
136 
137 	return ret;
138 }
139 
140 static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
141 					     struct ipoib_cm_rx_buf *rx_ring,
142 					     int id, int frags,
143 					     u64 mapping[IPOIB_CM_RX_SG],
144 					     gfp_t gfp)
145 {
146 	struct ipoib_dev_priv *priv = netdev_priv(dev);
147 	struct sk_buff *skb;
148 	int i;
149 
150 	skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
151 	if (unlikely(!skb))
152 		return NULL;
153 
154 	/*
155 	 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
156 	 * IP header to a multiple of 16.
157 	 */
158 	skb_reserve(skb, 12);
159 
160 	mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
161 				       DMA_FROM_DEVICE);
162 	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
163 		dev_kfree_skb_any(skb);
164 		return NULL;
165 	}
166 
167 	for (i = 0; i < frags; i++) {
168 		struct page *page = alloc_page(gfp);
169 
170 		if (!page)
171 			goto partial_error;
172 		skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
173 
174 		mapping[i + 1] = ib_dma_map_page(priv->ca, page,
175 						 0, PAGE_SIZE, DMA_FROM_DEVICE);
176 		if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
177 			goto partial_error;
178 	}
179 
180 	rx_ring[id].skb = skb;
181 	return skb;
182 
183 partial_error:
184 
185 	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
186 
187 	for (; i > 0; --i)
188 		ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
189 
190 	dev_kfree_skb_any(skb);
191 	return NULL;
192 }
193 
194 static void ipoib_cm_free_rx_ring(struct net_device *dev,
195 				  struct ipoib_cm_rx_buf *rx_ring)
196 {
197 	struct ipoib_dev_priv *priv = netdev_priv(dev);
198 	int i;
199 
200 	for (i = 0; i < ipoib_recvq_size; ++i)
201 		if (rx_ring[i].skb) {
202 			ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
203 					      rx_ring[i].mapping);
204 			dev_kfree_skb_any(rx_ring[i].skb);
205 		}
206 
207 	vfree(rx_ring);
208 }
209 
210 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
211 {
212 	struct ib_send_wr *bad_wr;
213 	struct ipoib_cm_rx *p;
214 
215 	/* We only reserved 1 extra slot in CQ for drain WRs, so
216 	 * make sure we have at most 1 outstanding WR. */
217 	if (list_empty(&priv->cm.rx_flush_list) ||
218 	    !list_empty(&priv->cm.rx_drain_list))
219 		return;
220 
221 	/*
222 	 * QPs on flush list are error state.  This way, a "flush
223 	 * error" WC will be immediately generated for each WR we post.
224 	 */
225 	p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
226 	if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
227 		ipoib_warn(priv, "failed to post drain wr\n");
228 
229 	list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
230 }
231 
232 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
233 {
234 	struct ipoib_cm_rx *p = ctx;
235 	struct ipoib_dev_priv *priv = netdev_priv(p->dev);
236 	unsigned long flags;
237 
238 	if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
239 		return;
240 
241 	spin_lock_irqsave(&priv->lock, flags);
242 	list_move(&p->list, &priv->cm.rx_flush_list);
243 	p->state = IPOIB_CM_RX_FLUSH;
244 	ipoib_cm_start_rx_drain(priv);
245 	spin_unlock_irqrestore(&priv->lock, flags);
246 }
247 
248 static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
249 					   struct ipoib_cm_rx *p)
250 {
251 	struct ipoib_dev_priv *priv = netdev_priv(dev);
252 	struct ib_qp_init_attr attr = {
253 		.event_handler = ipoib_cm_rx_event_handler,
254 		.send_cq = priv->recv_cq, /* For drain WR */
255 		.recv_cq = priv->recv_cq,
256 		.srq = priv->cm.srq,
257 		.cap.max_send_wr = 1, /* For drain WR */
258 		.cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
259 		.sq_sig_type = IB_SIGNAL_ALL_WR,
260 		.qp_type = IB_QPT_RC,
261 		.qp_context = p,
262 	};
263 
264 	if (!ipoib_cm_has_srq(dev)) {
265 		attr.cap.max_recv_wr  = ipoib_recvq_size;
266 		attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
267 	}
268 
269 	return ib_create_qp(priv->pd, &attr);
270 }
271 
272 static int ipoib_cm_modify_rx_qp(struct net_device *dev,
273 				 struct ib_cm_id *cm_id, struct ib_qp *qp,
274 				 unsigned psn)
275 {
276 	struct ipoib_dev_priv *priv = netdev_priv(dev);
277 	struct ib_qp_attr qp_attr;
278 	int qp_attr_mask, ret;
279 
280 	qp_attr.qp_state = IB_QPS_INIT;
281 	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
282 	if (ret) {
283 		ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
284 		return ret;
285 	}
286 	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
287 	if (ret) {
288 		ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
289 		return ret;
290 	}
291 	qp_attr.qp_state = IB_QPS_RTR;
292 	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
293 	if (ret) {
294 		ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
295 		return ret;
296 	}
297 	qp_attr.rq_psn = psn;
298 	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
299 	if (ret) {
300 		ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
301 		return ret;
302 	}
303 
304 	/*
305 	 * Current Mellanox HCA firmware won't generate completions
306 	 * with error for drain WRs unless the QP has been moved to
307 	 * RTS first. This work-around leaves a window where a QP has
308 	 * moved to error asynchronously, but this will eventually get
309 	 * fixed in firmware, so let's not error out if modify QP
310 	 * fails.
311 	 */
312 	qp_attr.qp_state = IB_QPS_RTS;
313 	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
314 	if (ret) {
315 		ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
316 		return 0;
317 	}
318 	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
319 	if (ret) {
320 		ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
321 		return 0;
322 	}
323 
324 	return 0;
325 }
326 
327 static void ipoib_cm_init_rx_wr(struct net_device *dev,
328 				struct ib_recv_wr *wr,
329 				struct ib_sge *sge)
330 {
331 	struct ipoib_dev_priv *priv = netdev_priv(dev);
332 	int i;
333 
334 	for (i = 0; i < priv->cm.num_frags; ++i)
335 		sge[i].lkey = priv->mr->lkey;
336 
337 	sge[0].length = IPOIB_CM_HEAD_SIZE;
338 	for (i = 1; i < priv->cm.num_frags; ++i)
339 		sge[i].length = PAGE_SIZE;
340 
341 	wr->next    = NULL;
342 	wr->sg_list = sge;
343 	wr->num_sge = priv->cm.num_frags;
344 }
345 
346 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
347 				   struct ipoib_cm_rx *rx)
348 {
349 	struct ipoib_dev_priv *priv = netdev_priv(dev);
350 	struct {
351 		struct ib_recv_wr wr;
352 		struct ib_sge sge[IPOIB_CM_RX_SG];
353 	} *t;
354 	int ret;
355 	int i;
356 
357 	rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
358 	if (!rx->rx_ring) {
359 		printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
360 		       priv->ca->name, ipoib_recvq_size);
361 		return -ENOMEM;
362 	}
363 
364 	t = kmalloc(sizeof *t, GFP_KERNEL);
365 	if (!t) {
366 		ret = -ENOMEM;
367 		goto err_free;
368 	}
369 
370 	ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
371 
372 	spin_lock_irq(&priv->lock);
373 
374 	if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
375 		spin_unlock_irq(&priv->lock);
376 		ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
377 		ret = -EINVAL;
378 		goto err_free;
379 	} else
380 		++priv->cm.nonsrq_conn_qp;
381 
382 	spin_unlock_irq(&priv->lock);
383 
384 	for (i = 0; i < ipoib_recvq_size; ++i) {
385 		if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
386 					   rx->rx_ring[i].mapping,
387 					   GFP_KERNEL)) {
388 			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
389 				ret = -ENOMEM;
390 				goto err_count;
391 		}
392 		ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
393 		if (ret) {
394 			ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
395 				   "failed for buf %d\n", i);
396 			ret = -EIO;
397 			goto err_count;
398 		}
399 	}
400 
401 	rx->recv_count = ipoib_recvq_size;
402 
403 	kfree(t);
404 
405 	return 0;
406 
407 err_count:
408 	spin_lock_irq(&priv->lock);
409 	--priv->cm.nonsrq_conn_qp;
410 	spin_unlock_irq(&priv->lock);
411 
412 err_free:
413 	kfree(t);
414 	ipoib_cm_free_rx_ring(dev, rx->rx_ring);
415 
416 	return ret;
417 }
418 
419 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
420 			     struct ib_qp *qp, struct ib_cm_req_event_param *req,
421 			     unsigned psn)
422 {
423 	struct ipoib_dev_priv *priv = netdev_priv(dev);
424 	struct ipoib_cm_data data = {};
425 	struct ib_cm_rep_param rep = {};
426 
427 	data.qpn = cpu_to_be32(priv->qp->qp_num);
428 	data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
429 
430 	rep.private_data = &data;
431 	rep.private_data_len = sizeof data;
432 	rep.flow_control = 0;
433 	rep.rnr_retry_count = req->rnr_retry_count;
434 	rep.srq = ipoib_cm_has_srq(dev);
435 	rep.qp_num = qp->qp_num;
436 	rep.starting_psn = psn;
437 	return ib_send_cm_rep(cm_id, &rep);
438 }
439 
440 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
441 {
442 	struct net_device *dev = cm_id->context;
443 	struct ipoib_dev_priv *priv = netdev_priv(dev);
444 	struct ipoib_cm_rx *p;
445 	unsigned psn;
446 	int ret;
447 
448 	ipoib_dbg(priv, "REQ arrived\n");
449 	p = kzalloc(sizeof *p, GFP_KERNEL);
450 	if (!p)
451 		return -ENOMEM;
452 	p->dev = dev;
453 	p->id = cm_id;
454 	cm_id->context = p;
455 	p->state = IPOIB_CM_RX_LIVE;
456 	p->jiffies = jiffies;
457 	INIT_LIST_HEAD(&p->list);
458 
459 	p->qp = ipoib_cm_create_rx_qp(dev, p);
460 	if (IS_ERR(p->qp)) {
461 		ret = PTR_ERR(p->qp);
462 		goto err_qp;
463 	}
464 
465 	psn = prandom_u32() & 0xffffff;
466 	ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
467 	if (ret)
468 		goto err_modify;
469 
470 	if (!ipoib_cm_has_srq(dev)) {
471 		ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
472 		if (ret)
473 			goto err_modify;
474 	}
475 
476 	spin_lock_irq(&priv->lock);
477 	queue_delayed_work(priv->wq,
478 			   &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
479 	/* Add this entry to passive ids list head, but do not re-add it
480 	 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
481 	p->jiffies = jiffies;
482 	if (p->state == IPOIB_CM_RX_LIVE)
483 		list_move(&p->list, &priv->cm.passive_ids);
484 	spin_unlock_irq(&priv->lock);
485 
486 	ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
487 	if (ret) {
488 		ipoib_warn(priv, "failed to send REP: %d\n", ret);
489 		if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
490 			ipoib_warn(priv, "unable to move qp to error state\n");
491 	}
492 	return 0;
493 
494 err_modify:
495 	ib_destroy_qp(p->qp);
496 err_qp:
497 	kfree(p);
498 	return ret;
499 }
500 
501 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
502 			       struct ib_cm_event *event)
503 {
504 	struct ipoib_cm_rx *p;
505 	struct ipoib_dev_priv *priv;
506 
507 	switch (event->event) {
508 	case IB_CM_REQ_RECEIVED:
509 		return ipoib_cm_req_handler(cm_id, event);
510 	case IB_CM_DREQ_RECEIVED:
511 		p = cm_id->context;
512 		ib_send_cm_drep(cm_id, NULL, 0);
513 		/* Fall through */
514 	case IB_CM_REJ_RECEIVED:
515 		p = cm_id->context;
516 		priv = netdev_priv(p->dev);
517 		if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
518 			ipoib_warn(priv, "unable to move qp to error state\n");
519 		/* Fall through */
520 	default:
521 		return 0;
522 	}
523 }
524 /* Adjust length of skb with fragments to match received data */
525 static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
526 			  unsigned int length, struct sk_buff *toskb)
527 {
528 	int i, num_frags;
529 	unsigned int size;
530 
531 	/* put header into skb */
532 	size = min(length, hdr_space);
533 	skb->tail += size;
534 	skb->len += size;
535 	length -= size;
536 
537 	num_frags = skb_shinfo(skb)->nr_frags;
538 	for (i = 0; i < num_frags; i++) {
539 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
540 
541 		if (length == 0) {
542 			/* don't need this page */
543 			skb_fill_page_desc(toskb, i, skb_frag_page(frag),
544 					   0, PAGE_SIZE);
545 			--skb_shinfo(skb)->nr_frags;
546 		} else {
547 			size = min(length, (unsigned) PAGE_SIZE);
548 
549 			skb_frag_size_set(frag, size);
550 			skb->data_len += size;
551 			skb->truesize += size;
552 			skb->len += size;
553 			length -= size;
554 		}
555 	}
556 }
557 
558 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
559 {
560 	struct ipoib_dev_priv *priv = netdev_priv(dev);
561 	struct ipoib_cm_rx_buf *rx_ring;
562 	unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
563 	struct sk_buff *skb, *newskb;
564 	struct ipoib_cm_rx *p;
565 	unsigned long flags;
566 	u64 mapping[IPOIB_CM_RX_SG];
567 	int frags;
568 	int has_srq;
569 	struct sk_buff *small_skb;
570 
571 	ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
572 		       wr_id, wc->status);
573 
574 	if (unlikely(wr_id >= ipoib_recvq_size)) {
575 		if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
576 			spin_lock_irqsave(&priv->lock, flags);
577 			list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
578 			ipoib_cm_start_rx_drain(priv);
579 			queue_work(priv->wq, &priv->cm.rx_reap_task);
580 			spin_unlock_irqrestore(&priv->lock, flags);
581 		} else
582 			ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
583 				   wr_id, ipoib_recvq_size);
584 		return;
585 	}
586 
587 	p = wc->qp->qp_context;
588 
589 	has_srq = ipoib_cm_has_srq(dev);
590 	rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
591 
592 	skb = rx_ring[wr_id].skb;
593 
594 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
595 		ipoib_dbg(priv, "cm recv error "
596 			   "(status=%d, wrid=%d vend_err %x)\n",
597 			   wc->status, wr_id, wc->vendor_err);
598 		++dev->stats.rx_dropped;
599 		if (has_srq)
600 			goto repost;
601 		else {
602 			if (!--p->recv_count) {
603 				spin_lock_irqsave(&priv->lock, flags);
604 				list_move(&p->list, &priv->cm.rx_reap_list);
605 				spin_unlock_irqrestore(&priv->lock, flags);
606 				queue_work(priv->wq, &priv->cm.rx_reap_task);
607 			}
608 			return;
609 		}
610 	}
611 
612 	if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
613 		if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
614 			spin_lock_irqsave(&priv->lock, flags);
615 			p->jiffies = jiffies;
616 			/* Move this entry to list head, but do not re-add it
617 			 * if it has been moved out of list. */
618 			if (p->state == IPOIB_CM_RX_LIVE)
619 				list_move(&p->list, &priv->cm.passive_ids);
620 			spin_unlock_irqrestore(&priv->lock, flags);
621 		}
622 	}
623 
624 	if (wc->byte_len < IPOIB_CM_COPYBREAK) {
625 		int dlen = wc->byte_len;
626 
627 		small_skb = dev_alloc_skb(dlen + 12);
628 		if (small_skb) {
629 			skb_reserve(small_skb, 12);
630 			ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
631 						   dlen, DMA_FROM_DEVICE);
632 			skb_copy_from_linear_data(skb, small_skb->data, dlen);
633 			ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
634 						      dlen, DMA_FROM_DEVICE);
635 			skb_put(small_skb, dlen);
636 			skb = small_skb;
637 			goto copied;
638 		}
639 	}
640 
641 	frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
642 					      (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
643 
644 	newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
645 				       mapping, GFP_ATOMIC);
646 	if (unlikely(!newskb)) {
647 		/*
648 		 * If we can't allocate a new RX buffer, dump
649 		 * this packet and reuse the old buffer.
650 		 */
651 		ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
652 		++dev->stats.rx_dropped;
653 		goto repost;
654 	}
655 
656 	ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
657 	memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
658 
659 	ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
660 		       wc->byte_len, wc->slid);
661 
662 	skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
663 
664 copied:
665 	skb->protocol = ((struct ipoib_header *) skb->data)->proto;
666 	skb_reset_mac_header(skb);
667 	skb_pull(skb, IPOIB_ENCAP_LEN);
668 
669 	++dev->stats.rx_packets;
670 	dev->stats.rx_bytes += skb->len;
671 
672 	skb->dev = dev;
673 	/* XXX get correct PACKET_ type here */
674 	skb->pkt_type = PACKET_HOST;
675 	netif_receive_skb(skb);
676 
677 repost:
678 	if (has_srq) {
679 		if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
680 			ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
681 				   "for buf %d\n", wr_id);
682 	} else {
683 		if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
684 							  &priv->cm.rx_wr,
685 							  priv->cm.rx_sge,
686 							  wr_id))) {
687 			--p->recv_count;
688 			ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
689 				   "for buf %d\n", wr_id);
690 		}
691 	}
692 }
693 
694 static inline int post_send(struct ipoib_dev_priv *priv,
695 			    struct ipoib_cm_tx *tx,
696 			    unsigned int wr_id,
697 			    u64 addr, int len)
698 {
699 	struct ib_send_wr *bad_wr;
700 
701 	priv->tx_sge[0].addr          = addr;
702 	priv->tx_sge[0].length        = len;
703 
704 	priv->tx_wr.num_sge	= 1;
705 	priv->tx_wr.wr_id	= wr_id | IPOIB_OP_CM;
706 
707 	return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
708 }
709 
710 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
711 {
712 	struct ipoib_dev_priv *priv = netdev_priv(dev);
713 	struct ipoib_cm_tx_buf *tx_req;
714 	u64 addr;
715 	int rc;
716 
717 	if (unlikely(skb->len > tx->mtu)) {
718 		ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
719 			   skb->len, tx->mtu);
720 		++dev->stats.tx_dropped;
721 		++dev->stats.tx_errors;
722 		ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
723 		return;
724 	}
725 
726 	ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
727 		       tx->tx_head, skb->len, tx->qp->qp_num);
728 
729 	/*
730 	 * We put the skb into the tx_ring _before_ we call post_send()
731 	 * because it's entirely possible that the completion handler will
732 	 * run before we execute anything after the post_send().  That
733 	 * means we have to make sure everything is properly recorded and
734 	 * our state is consistent before we call post_send().
735 	 */
736 	tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
737 	tx_req->skb = skb;
738 	addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
739 	if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
740 		++dev->stats.tx_errors;
741 		dev_kfree_skb_any(skb);
742 		return;
743 	}
744 
745 	tx_req->mapping = addr;
746 
747 	skb_orphan(skb);
748 	skb_dst_drop(skb);
749 
750 	rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
751 		       addr, skb->len);
752 	if (unlikely(rc)) {
753 		ipoib_warn(priv, "post_send failed, error %d\n", rc);
754 		++dev->stats.tx_errors;
755 		ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
756 		dev_kfree_skb_any(skb);
757 	} else {
758 		dev->trans_start = jiffies;
759 		++tx->tx_head;
760 
761 		if (++priv->tx_outstanding == ipoib_sendq_size) {
762 			ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
763 				  tx->qp->qp_num);
764 			netif_stop_queue(dev);
765 			rc = ib_req_notify_cq(priv->send_cq,
766 				IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
767 			if (rc < 0)
768 				ipoib_warn(priv, "request notify on send CQ failed\n");
769 			else if (rc)
770 				ipoib_send_comp_handler(priv->send_cq, dev);
771 		}
772 	}
773 }
774 
775 void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
776 {
777 	struct ipoib_dev_priv *priv = netdev_priv(dev);
778 	struct ipoib_cm_tx *tx = wc->qp->qp_context;
779 	unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
780 	struct ipoib_cm_tx_buf *tx_req;
781 	unsigned long flags;
782 
783 	ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
784 		       wr_id, wc->status);
785 
786 	if (unlikely(wr_id >= ipoib_sendq_size)) {
787 		ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
788 			   wr_id, ipoib_sendq_size);
789 		return;
790 	}
791 
792 	tx_req = &tx->tx_ring[wr_id];
793 
794 	ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
795 
796 	/* FIXME: is this right? Shouldn't we only increment on success? */
797 	++dev->stats.tx_packets;
798 	dev->stats.tx_bytes += tx_req->skb->len;
799 
800 	dev_kfree_skb_any(tx_req->skb);
801 
802 	netif_tx_lock(dev);
803 
804 	++tx->tx_tail;
805 	if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
806 	    netif_queue_stopped(dev) &&
807 	    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
808 		netif_wake_queue(dev);
809 
810 	if (wc->status != IB_WC_SUCCESS &&
811 	    wc->status != IB_WC_WR_FLUSH_ERR) {
812 		struct ipoib_neigh *neigh;
813 
814 		ipoib_dbg(priv, "failed cm send event "
815 			   "(status=%d, wrid=%d vend_err %x)\n",
816 			   wc->status, wr_id, wc->vendor_err);
817 
818 		spin_lock_irqsave(&priv->lock, flags);
819 		neigh = tx->neigh;
820 
821 		if (neigh) {
822 			neigh->cm = NULL;
823 			ipoib_neigh_free(neigh);
824 
825 			tx->neigh = NULL;
826 		}
827 
828 		if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
829 			list_move(&tx->list, &priv->cm.reap_list);
830 			queue_work(priv->wq, &priv->cm.reap_task);
831 		}
832 
833 		clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
834 
835 		spin_unlock_irqrestore(&priv->lock, flags);
836 	}
837 
838 	netif_tx_unlock(dev);
839 }
840 
841 int ipoib_cm_dev_open(struct net_device *dev)
842 {
843 	struct ipoib_dev_priv *priv = netdev_priv(dev);
844 	int ret;
845 
846 	if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
847 		return 0;
848 
849 	priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
850 	if (IS_ERR(priv->cm.id)) {
851 		printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
852 		ret = PTR_ERR(priv->cm.id);
853 		goto err_cm;
854 	}
855 
856 	ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
857 			   0, NULL);
858 	if (ret) {
859 		printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
860 		       IPOIB_CM_IETF_ID | priv->qp->qp_num);
861 		goto err_listen;
862 	}
863 
864 	return 0;
865 
866 err_listen:
867 	ib_destroy_cm_id(priv->cm.id);
868 err_cm:
869 	priv->cm.id = NULL;
870 	return ret;
871 }
872 
873 static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
874 {
875 	struct ipoib_dev_priv *priv = netdev_priv(dev);
876 	struct ipoib_cm_rx *rx, *n;
877 	LIST_HEAD(list);
878 
879 	spin_lock_irq(&priv->lock);
880 	list_splice_init(&priv->cm.rx_reap_list, &list);
881 	spin_unlock_irq(&priv->lock);
882 
883 	list_for_each_entry_safe(rx, n, &list, list) {
884 		ib_destroy_cm_id(rx->id);
885 		ib_destroy_qp(rx->qp);
886 		if (!ipoib_cm_has_srq(dev)) {
887 			ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
888 			spin_lock_irq(&priv->lock);
889 			--priv->cm.nonsrq_conn_qp;
890 			spin_unlock_irq(&priv->lock);
891 		}
892 		kfree(rx);
893 	}
894 }
895 
896 void ipoib_cm_dev_stop(struct net_device *dev)
897 {
898 	struct ipoib_dev_priv *priv = netdev_priv(dev);
899 	struct ipoib_cm_rx *p;
900 	unsigned long begin;
901 	int ret;
902 
903 	if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
904 		return;
905 
906 	ib_destroy_cm_id(priv->cm.id);
907 	priv->cm.id = NULL;
908 
909 	spin_lock_irq(&priv->lock);
910 	while (!list_empty(&priv->cm.passive_ids)) {
911 		p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
912 		list_move(&p->list, &priv->cm.rx_error_list);
913 		p->state = IPOIB_CM_RX_ERROR;
914 		spin_unlock_irq(&priv->lock);
915 		ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
916 		if (ret)
917 			ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
918 		spin_lock_irq(&priv->lock);
919 	}
920 
921 	/* Wait for all RX to be drained */
922 	begin = jiffies;
923 
924 	while (!list_empty(&priv->cm.rx_error_list) ||
925 	       !list_empty(&priv->cm.rx_flush_list) ||
926 	       !list_empty(&priv->cm.rx_drain_list)) {
927 		if (time_after(jiffies, begin + 5 * HZ)) {
928 			ipoib_warn(priv, "RX drain timing out\n");
929 
930 			/*
931 			 * assume the HW is wedged and just free up everything.
932 			 */
933 			list_splice_init(&priv->cm.rx_flush_list,
934 					 &priv->cm.rx_reap_list);
935 			list_splice_init(&priv->cm.rx_error_list,
936 					 &priv->cm.rx_reap_list);
937 			list_splice_init(&priv->cm.rx_drain_list,
938 					 &priv->cm.rx_reap_list);
939 			break;
940 		}
941 		spin_unlock_irq(&priv->lock);
942 		msleep(1);
943 		ipoib_drain_cq(dev);
944 		spin_lock_irq(&priv->lock);
945 	}
946 
947 	spin_unlock_irq(&priv->lock);
948 
949 	ipoib_cm_free_rx_reap_list(dev);
950 
951 	cancel_delayed_work(&priv->cm.stale_task);
952 }
953 
954 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
955 {
956 	struct ipoib_cm_tx *p = cm_id->context;
957 	struct ipoib_dev_priv *priv = netdev_priv(p->dev);
958 	struct ipoib_cm_data *data = event->private_data;
959 	struct sk_buff_head skqueue;
960 	struct ib_qp_attr qp_attr;
961 	int qp_attr_mask, ret;
962 	struct sk_buff *skb;
963 
964 	p->mtu = be32_to_cpu(data->mtu);
965 
966 	if (p->mtu <= IPOIB_ENCAP_LEN) {
967 		ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
968 			   p->mtu, IPOIB_ENCAP_LEN);
969 		return -EINVAL;
970 	}
971 
972 	qp_attr.qp_state = IB_QPS_RTR;
973 	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
974 	if (ret) {
975 		ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
976 		return ret;
977 	}
978 
979 	qp_attr.rq_psn = 0 /* FIXME */;
980 	ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
981 	if (ret) {
982 		ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
983 		return ret;
984 	}
985 
986 	qp_attr.qp_state = IB_QPS_RTS;
987 	ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
988 	if (ret) {
989 		ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
990 		return ret;
991 	}
992 	ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
993 	if (ret) {
994 		ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
995 		return ret;
996 	}
997 
998 	skb_queue_head_init(&skqueue);
999 
1000 	spin_lock_irq(&priv->lock);
1001 	set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
1002 	if (p->neigh)
1003 		while ((skb = __skb_dequeue(&p->neigh->queue)))
1004 			__skb_queue_tail(&skqueue, skb);
1005 	spin_unlock_irq(&priv->lock);
1006 
1007 	while ((skb = __skb_dequeue(&skqueue))) {
1008 		skb->dev = p->dev;
1009 		if (dev_queue_xmit(skb))
1010 			ipoib_warn(priv, "dev_queue_xmit failed "
1011 				   "to requeue packet\n");
1012 	}
1013 
1014 	ret = ib_send_cm_rtu(cm_id, NULL, 0);
1015 	if (ret) {
1016 		ipoib_warn(priv, "failed to send RTU: %d\n", ret);
1017 		return ret;
1018 	}
1019 	return 0;
1020 }
1021 
1022 static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
1023 {
1024 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1025 	struct ib_qp_init_attr attr = {
1026 		.send_cq		= priv->recv_cq,
1027 		.recv_cq		= priv->recv_cq,
1028 		.srq			= priv->cm.srq,
1029 		.cap.max_send_wr	= ipoib_sendq_size,
1030 		.cap.max_send_sge	= 1,
1031 		.sq_sig_type		= IB_SIGNAL_ALL_WR,
1032 		.qp_type		= IB_QPT_RC,
1033 		.qp_context		= tx,
1034 		.create_flags		= IB_QP_CREATE_USE_GFP_NOIO
1035 	};
1036 
1037 	struct ib_qp *tx_qp;
1038 
1039 	tx_qp = ib_create_qp(priv->pd, &attr);
1040 	if (PTR_ERR(tx_qp) == -EINVAL) {
1041 		ipoib_warn(priv, "can't use GFP_NOIO for QPs on device %s, using GFP_KERNEL\n",
1042 			   priv->ca->name);
1043 		attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
1044 		tx_qp = ib_create_qp(priv->pd, &attr);
1045 	}
1046 	return tx_qp;
1047 }
1048 
1049 static int ipoib_cm_send_req(struct net_device *dev,
1050 			     struct ib_cm_id *id, struct ib_qp *qp,
1051 			     u32 qpn,
1052 			     struct ib_sa_path_rec *pathrec)
1053 {
1054 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1055 	struct ipoib_cm_data data = {};
1056 	struct ib_cm_req_param req = {};
1057 
1058 	data.qpn = cpu_to_be32(priv->qp->qp_num);
1059 	data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
1060 
1061 	req.primary_path		= pathrec;
1062 	req.alternate_path		= NULL;
1063 	req.service_id			= cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
1064 	req.qp_num			= qp->qp_num;
1065 	req.qp_type			= qp->qp_type;
1066 	req.private_data		= &data;
1067 	req.private_data_len		= sizeof data;
1068 	req.flow_control		= 0;
1069 
1070 	req.starting_psn		= 0; /* FIXME */
1071 
1072 	/*
1073 	 * Pick some arbitrary defaults here; we could make these
1074 	 * module parameters if anyone cared about setting them.
1075 	 */
1076 	req.responder_resources		= 4;
1077 	req.remote_cm_response_timeout	= 20;
1078 	req.local_cm_response_timeout	= 20;
1079 	req.retry_count			= 0; /* RFC draft warns against retries */
1080 	req.rnr_retry_count		= 0; /* RFC draft warns against retries */
1081 	req.max_cm_retries		= 15;
1082 	req.srq				= ipoib_cm_has_srq(dev);
1083 	return ib_send_cm_req(id, &req);
1084 }
1085 
1086 static int ipoib_cm_modify_tx_init(struct net_device *dev,
1087 				  struct ib_cm_id *cm_id, struct ib_qp *qp)
1088 {
1089 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1090 	struct ib_qp_attr qp_attr;
1091 	int qp_attr_mask, ret;
1092 	ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
1093 	if (ret) {
1094 		ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
1095 		return ret;
1096 	}
1097 
1098 	qp_attr.qp_state = IB_QPS_INIT;
1099 	qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
1100 	qp_attr.port_num = priv->port;
1101 	qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
1102 
1103 	ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
1104 	if (ret) {
1105 		ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
1106 		return ret;
1107 	}
1108 	return 0;
1109 }
1110 
1111 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1112 			    struct ib_sa_path_rec *pathrec)
1113 {
1114 	struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1115 	int ret;
1116 
1117 	p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring,
1118 			       GFP_NOIO, PAGE_KERNEL);
1119 	if (!p->tx_ring) {
1120 		ipoib_warn(priv, "failed to allocate tx ring\n");
1121 		ret = -ENOMEM;
1122 		goto err_tx;
1123 	}
1124 	memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1125 
1126 	p->qp = ipoib_cm_create_tx_qp(p->dev, p);
1127 	if (IS_ERR(p->qp)) {
1128 		ret = PTR_ERR(p->qp);
1129 		ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
1130 		goto err_qp;
1131 	}
1132 
1133 	p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
1134 	if (IS_ERR(p->id)) {
1135 		ret = PTR_ERR(p->id);
1136 		ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
1137 		goto err_id;
1138 	}
1139 
1140 	ret = ipoib_cm_modify_tx_init(p->dev, p->id,  p->qp);
1141 	if (ret) {
1142 		ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
1143 		goto err_modify;
1144 	}
1145 
1146 	ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
1147 	if (ret) {
1148 		ipoib_warn(priv, "failed to send cm req: %d\n", ret);
1149 		goto err_send_cm;
1150 	}
1151 
1152 	ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
1153 		  p->qp->qp_num, pathrec->dgid.raw, qpn);
1154 
1155 	return 0;
1156 
1157 err_send_cm:
1158 err_modify:
1159 	ib_destroy_cm_id(p->id);
1160 err_id:
1161 	p->id = NULL;
1162 	ib_destroy_qp(p->qp);
1163 err_qp:
1164 	p->qp = NULL;
1165 	vfree(p->tx_ring);
1166 err_tx:
1167 	return ret;
1168 }
1169 
1170 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1171 {
1172 	struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1173 	struct ipoib_cm_tx_buf *tx_req;
1174 	unsigned long begin;
1175 
1176 	ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1177 		  p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
1178 
1179 	if (p->id)
1180 		ib_destroy_cm_id(p->id);
1181 
1182 	if (p->tx_ring) {
1183 		/* Wait for all sends to complete */
1184 		begin = jiffies;
1185 		while ((int) p->tx_tail - (int) p->tx_head < 0) {
1186 			if (time_after(jiffies, begin + 5 * HZ)) {
1187 				ipoib_warn(priv, "timing out; %d sends not completed\n",
1188 					   p->tx_head - p->tx_tail);
1189 				goto timeout;
1190 			}
1191 
1192 			msleep(1);
1193 		}
1194 	}
1195 
1196 timeout:
1197 
1198 	while ((int) p->tx_tail - (int) p->tx_head < 0) {
1199 		tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1200 		ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
1201 				    DMA_TO_DEVICE);
1202 		dev_kfree_skb_any(tx_req->skb);
1203 		++p->tx_tail;
1204 		netif_tx_lock_bh(p->dev);
1205 		if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
1206 		    netif_queue_stopped(p->dev) &&
1207 		    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1208 			netif_wake_queue(p->dev);
1209 		netif_tx_unlock_bh(p->dev);
1210 	}
1211 
1212 	if (p->qp)
1213 		ib_destroy_qp(p->qp);
1214 
1215 	vfree(p->tx_ring);
1216 	kfree(p);
1217 }
1218 
1219 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1220 			       struct ib_cm_event *event)
1221 {
1222 	struct ipoib_cm_tx *tx = cm_id->context;
1223 	struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
1224 	struct net_device *dev = priv->dev;
1225 	struct ipoib_neigh *neigh;
1226 	unsigned long flags;
1227 	int ret;
1228 
1229 	switch (event->event) {
1230 	case IB_CM_DREQ_RECEIVED:
1231 		ipoib_dbg(priv, "DREQ received.\n");
1232 		ib_send_cm_drep(cm_id, NULL, 0);
1233 		break;
1234 	case IB_CM_REP_RECEIVED:
1235 		ipoib_dbg(priv, "REP received.\n");
1236 		ret = ipoib_cm_rep_handler(cm_id, event);
1237 		if (ret)
1238 			ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1239 				       NULL, 0, NULL, 0);
1240 		break;
1241 	case IB_CM_REQ_ERROR:
1242 	case IB_CM_REJ_RECEIVED:
1243 	case IB_CM_TIMEWAIT_EXIT:
1244 		ipoib_dbg(priv, "CM error %d.\n", event->event);
1245 		netif_tx_lock_bh(dev);
1246 		spin_lock_irqsave(&priv->lock, flags);
1247 		neigh = tx->neigh;
1248 
1249 		if (neigh) {
1250 			neigh->cm = NULL;
1251 			ipoib_neigh_free(neigh);
1252 
1253 			tx->neigh = NULL;
1254 		}
1255 
1256 		if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1257 			list_move(&tx->list, &priv->cm.reap_list);
1258 			queue_work(priv->wq, &priv->cm.reap_task);
1259 		}
1260 
1261 		spin_unlock_irqrestore(&priv->lock, flags);
1262 		netif_tx_unlock_bh(dev);
1263 		break;
1264 	default:
1265 		break;
1266 	}
1267 
1268 	return 0;
1269 }
1270 
1271 struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
1272 				       struct ipoib_neigh *neigh)
1273 {
1274 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1275 	struct ipoib_cm_tx *tx;
1276 
1277 	tx = kzalloc(sizeof *tx, GFP_ATOMIC);
1278 	if (!tx)
1279 		return NULL;
1280 
1281 	neigh->cm = tx;
1282 	tx->neigh = neigh;
1283 	tx->path = path;
1284 	tx->dev = dev;
1285 	list_add(&tx->list, &priv->cm.start_list);
1286 	set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1287 	queue_work(priv->wq, &priv->cm.start_task);
1288 	return tx;
1289 }
1290 
1291 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1292 {
1293 	struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
1294 	unsigned long flags;
1295 	if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1296 		spin_lock_irqsave(&priv->lock, flags);
1297 		list_move(&tx->list, &priv->cm.reap_list);
1298 		queue_work(priv->wq, &priv->cm.reap_task);
1299 		ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1300 			  tx->neigh->daddr + 4);
1301 		tx->neigh = NULL;
1302 		spin_unlock_irqrestore(&priv->lock, flags);
1303 	}
1304 }
1305 
1306 static void ipoib_cm_tx_start(struct work_struct *work)
1307 {
1308 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1309 						   cm.start_task);
1310 	struct net_device *dev = priv->dev;
1311 	struct ipoib_neigh *neigh;
1312 	struct ipoib_cm_tx *p;
1313 	unsigned long flags;
1314 	int ret;
1315 
1316 	struct ib_sa_path_rec pathrec;
1317 	u32 qpn;
1318 
1319 	netif_tx_lock_bh(dev);
1320 	spin_lock_irqsave(&priv->lock, flags);
1321 
1322 	while (!list_empty(&priv->cm.start_list)) {
1323 		p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1324 		list_del_init(&p->list);
1325 		neigh = p->neigh;
1326 		qpn = IPOIB_QPN(neigh->daddr);
1327 		memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1328 
1329 		spin_unlock_irqrestore(&priv->lock, flags);
1330 		netif_tx_unlock_bh(dev);
1331 
1332 		ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1333 
1334 		netif_tx_lock_bh(dev);
1335 		spin_lock_irqsave(&priv->lock, flags);
1336 
1337 		if (ret) {
1338 			neigh = p->neigh;
1339 			if (neigh) {
1340 				neigh->cm = NULL;
1341 				ipoib_neigh_free(neigh);
1342 			}
1343 			list_del(&p->list);
1344 			kfree(p);
1345 		}
1346 	}
1347 
1348 	spin_unlock_irqrestore(&priv->lock, flags);
1349 	netif_tx_unlock_bh(dev);
1350 }
1351 
1352 static void ipoib_cm_tx_reap(struct work_struct *work)
1353 {
1354 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1355 						   cm.reap_task);
1356 	struct net_device *dev = priv->dev;
1357 	struct ipoib_cm_tx *p;
1358 	unsigned long flags;
1359 
1360 	netif_tx_lock_bh(dev);
1361 	spin_lock_irqsave(&priv->lock, flags);
1362 
1363 	while (!list_empty(&priv->cm.reap_list)) {
1364 		p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1365 		list_del(&p->list);
1366 		spin_unlock_irqrestore(&priv->lock, flags);
1367 		netif_tx_unlock_bh(dev);
1368 		ipoib_cm_tx_destroy(p);
1369 		netif_tx_lock_bh(dev);
1370 		spin_lock_irqsave(&priv->lock, flags);
1371 	}
1372 
1373 	spin_unlock_irqrestore(&priv->lock, flags);
1374 	netif_tx_unlock_bh(dev);
1375 }
1376 
1377 static void ipoib_cm_skb_reap(struct work_struct *work)
1378 {
1379 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1380 						   cm.skb_task);
1381 	struct net_device *dev = priv->dev;
1382 	struct sk_buff *skb;
1383 	unsigned long flags;
1384 	unsigned mtu = priv->mcast_mtu;
1385 
1386 	netif_tx_lock_bh(dev);
1387 	spin_lock_irqsave(&priv->lock, flags);
1388 
1389 	while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
1390 		spin_unlock_irqrestore(&priv->lock, flags);
1391 		netif_tx_unlock_bh(dev);
1392 
1393 		if (skb->protocol == htons(ETH_P_IP))
1394 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1395 #if IS_ENABLED(CONFIG_IPV6)
1396 		else if (skb->protocol == htons(ETH_P_IPV6))
1397 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1398 #endif
1399 		dev_kfree_skb_any(skb);
1400 
1401 		netif_tx_lock_bh(dev);
1402 		spin_lock_irqsave(&priv->lock, flags);
1403 	}
1404 
1405 	spin_unlock_irqrestore(&priv->lock, flags);
1406 	netif_tx_unlock_bh(dev);
1407 }
1408 
1409 void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
1410 			   unsigned int mtu)
1411 {
1412 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1413 	int e = skb_queue_empty(&priv->cm.skb_queue);
1414 
1415 	if (skb_dst(skb))
1416 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1417 
1418 	skb_queue_tail(&priv->cm.skb_queue, skb);
1419 	if (e)
1420 		queue_work(priv->wq, &priv->cm.skb_task);
1421 }
1422 
1423 static void ipoib_cm_rx_reap(struct work_struct *work)
1424 {
1425 	ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
1426 						cm.rx_reap_task)->dev);
1427 }
1428 
1429 static void ipoib_cm_stale_task(struct work_struct *work)
1430 {
1431 	struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1432 						   cm.stale_task.work);
1433 	struct ipoib_cm_rx *p;
1434 	int ret;
1435 
1436 	spin_lock_irq(&priv->lock);
1437 	while (!list_empty(&priv->cm.passive_ids)) {
1438 		/* List is sorted by LRU, start from tail,
1439 		 * stop when we see a recently used entry */
1440 		p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1441 		if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1442 			break;
1443 		list_move(&p->list, &priv->cm.rx_error_list);
1444 		p->state = IPOIB_CM_RX_ERROR;
1445 		spin_unlock_irq(&priv->lock);
1446 		ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1447 		if (ret)
1448 			ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1449 		spin_lock_irq(&priv->lock);
1450 	}
1451 
1452 	if (!list_empty(&priv->cm.passive_ids))
1453 		queue_delayed_work(priv->wq,
1454 				   &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1455 	spin_unlock_irq(&priv->lock);
1456 }
1457 
1458 
1459 static ssize_t show_mode(struct device *d, struct device_attribute *attr,
1460 			 char *buf)
1461 {
1462 	struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
1463 
1464 	if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
1465 		return sprintf(buf, "connected\n");
1466 	else
1467 		return sprintf(buf, "datagram\n");
1468 }
1469 
1470 static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1471 			const char *buf, size_t count)
1472 {
1473 	struct net_device *dev = to_net_dev(d);
1474 	int ret;
1475 
1476 	if (!rtnl_trylock())
1477 		return restart_syscall();
1478 
1479 	ret = ipoib_set_mode(dev, buf);
1480 
1481 	rtnl_unlock();
1482 
1483 	if (!ret)
1484 		return count;
1485 
1486 	return ret;
1487 }
1488 
1489 static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
1490 
1491 int ipoib_cm_add_mode_attr(struct net_device *dev)
1492 {
1493 	return device_create_file(&dev->dev, &dev_attr_mode);
1494 }
1495 
1496 static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
1497 {
1498 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1499 	struct ib_srq_init_attr srq_init_attr = {
1500 		.srq_type = IB_SRQT_BASIC,
1501 		.attr = {
1502 			.max_wr  = ipoib_recvq_size,
1503 			.max_sge = max_sge
1504 		}
1505 	};
1506 
1507 	priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1508 	if (IS_ERR(priv->cm.srq)) {
1509 		if (PTR_ERR(priv->cm.srq) != -ENOSYS)
1510 			printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
1511 			       priv->ca->name, PTR_ERR(priv->cm.srq));
1512 		priv->cm.srq = NULL;
1513 		return;
1514 	}
1515 
1516 	priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1517 	if (!priv->cm.srq_ring) {
1518 		printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
1519 		       priv->ca->name, ipoib_recvq_size);
1520 		ib_destroy_srq(priv->cm.srq);
1521 		priv->cm.srq = NULL;
1522 		return;
1523 	}
1524 
1525 }
1526 
1527 int ipoib_cm_dev_init(struct net_device *dev)
1528 {
1529 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1530 	int i, ret;
1531 	struct ib_device_attr attr;
1532 
1533 	INIT_LIST_HEAD(&priv->cm.passive_ids);
1534 	INIT_LIST_HEAD(&priv->cm.reap_list);
1535 	INIT_LIST_HEAD(&priv->cm.start_list);
1536 	INIT_LIST_HEAD(&priv->cm.rx_error_list);
1537 	INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1538 	INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1539 	INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1540 	INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1541 	INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1542 	INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
1543 	INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1544 	INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1545 
1546 	skb_queue_head_init(&priv->cm.skb_queue);
1547 
1548 	ret = ib_query_device(priv->ca, &attr);
1549 	if (ret) {
1550 		printk(KERN_WARNING "ib_query_device() failed with %d\n", ret);
1551 		return ret;
1552 	}
1553 
1554 	ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge);
1555 
1556 	attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge);
1557 	ipoib_cm_create_srq(dev, attr.max_srq_sge);
1558 	if (ipoib_cm_has_srq(dev)) {
1559 		priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10;
1560 		priv->cm.num_frags  = attr.max_srq_sge;
1561 		ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1562 			  priv->cm.max_cm_mtu, priv->cm.num_frags);
1563 	} else {
1564 		priv->cm.max_cm_mtu = IPOIB_CM_MTU;
1565 		priv->cm.num_frags  = IPOIB_CM_RX_SG;
1566 	}
1567 
1568 	ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
1569 
1570 	if (ipoib_cm_has_srq(dev)) {
1571 		for (i = 0; i < ipoib_recvq_size; ++i) {
1572 			if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
1573 						   priv->cm.num_frags - 1,
1574 						   priv->cm.srq_ring[i].mapping,
1575 						   GFP_KERNEL)) {
1576 				ipoib_warn(priv, "failed to allocate "
1577 					   "receive buffer %d\n", i);
1578 				ipoib_cm_dev_cleanup(dev);
1579 				return -ENOMEM;
1580 			}
1581 
1582 			if (ipoib_cm_post_receive_srq(dev, i)) {
1583 				ipoib_warn(priv, "ipoib_cm_post_receive_srq "
1584 					   "failed for buf %d\n", i);
1585 				ipoib_cm_dev_cleanup(dev);
1586 				return -EIO;
1587 			}
1588 		}
1589 	}
1590 
1591 	priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
1592 	return 0;
1593 }
1594 
1595 void ipoib_cm_dev_cleanup(struct net_device *dev)
1596 {
1597 	struct ipoib_dev_priv *priv = netdev_priv(dev);
1598 	int ret;
1599 
1600 	if (!priv->cm.srq)
1601 		return;
1602 
1603 	ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1604 
1605 	ret = ib_destroy_srq(priv->cm.srq);
1606 	if (ret)
1607 		ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1608 
1609 	priv->cm.srq = NULL;
1610 	if (!priv->cm.srq_ring)
1611 		return;
1612 
1613 	ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
1614 	priv->cm.srq_ring = NULL;
1615 }
1616