1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/delay.h>
37 #include <linux/moduleparam.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 
44 #include "ipoib.h"
45 
46 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
47 static int data_debug_level;
48 
49 module_param(data_debug_level, int, 0644);
50 MODULE_PARM_DESC(data_debug_level,
51 		 "Enable data path debug tracing if > 0");
52 #endif
53 
54 struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
55 				 struct ib_pd *pd, struct rdma_ah_attr *attr)
56 {
57 	struct ipoib_ah *ah;
58 	struct ib_ah *vah;
59 
60 	ah = kmalloc(sizeof *ah, GFP_KERNEL);
61 	if (!ah)
62 		return ERR_PTR(-ENOMEM);
63 
64 	ah->dev       = dev;
65 	ah->last_send = 0;
66 	kref_init(&ah->ref);
67 
68 	vah = rdma_create_ah(pd, attr);
69 	if (IS_ERR(vah)) {
70 		kfree(ah);
71 		ah = (struct ipoib_ah *)vah;
72 	} else {
73 		ah->ah = vah;
74 		ipoib_dbg(ipoib_priv(dev), "Created ah %p\n", ah->ah);
75 	}
76 
77 	return ah;
78 }
79 
80 void ipoib_free_ah(struct kref *kref)
81 {
82 	struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
83 	struct ipoib_dev_priv *priv = ipoib_priv(ah->dev);
84 
85 	unsigned long flags;
86 
87 	spin_lock_irqsave(&priv->lock, flags);
88 	list_add_tail(&ah->list, &priv->dead_ahs);
89 	spin_unlock_irqrestore(&priv->lock, flags);
90 }
91 
92 static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
93 				  u64 mapping[IPOIB_UD_RX_SG])
94 {
95 	ib_dma_unmap_single(priv->ca, mapping[0],
96 			    IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
97 			    DMA_FROM_DEVICE);
98 }
99 
100 static int ipoib_ib_post_receive(struct net_device *dev, int id)
101 {
102 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
103 	struct ib_recv_wr *bad_wr;
104 	int ret;
105 
106 	priv->rx_wr.wr_id   = id | IPOIB_OP_RECV;
107 	priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
108 	priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
109 
110 
111 	ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
112 	if (unlikely(ret)) {
113 		ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
114 		ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
115 		dev_kfree_skb_any(priv->rx_ring[id].skb);
116 		priv->rx_ring[id].skb = NULL;
117 	}
118 
119 	return ret;
120 }
121 
122 static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
123 {
124 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
125 	struct sk_buff *skb;
126 	int buf_size;
127 	u64 *mapping;
128 
129 	buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
130 
131 	skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
132 	if (unlikely(!skb))
133 		return NULL;
134 
135 	/*
136 	 * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
137 	 * 64 bytes aligned
138 	 */
139 	skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
140 
141 	mapping = priv->rx_ring[id].mapping;
142 	mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
143 				       DMA_FROM_DEVICE);
144 	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
145 		goto error;
146 
147 	priv->rx_ring[id].skb = skb;
148 	return skb;
149 error:
150 	dev_kfree_skb_any(skb);
151 	return NULL;
152 }
153 
154 static int ipoib_ib_post_receives(struct net_device *dev)
155 {
156 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
157 	int i;
158 
159 	for (i = 0; i < ipoib_recvq_size; ++i) {
160 		if (!ipoib_alloc_rx_skb(dev, i)) {
161 			ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
162 			return -ENOMEM;
163 		}
164 		if (ipoib_ib_post_receive(dev, i)) {
165 			ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
166 			return -EIO;
167 		}
168 	}
169 
170 	return 0;
171 }
172 
173 static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
174 {
175 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
176 	unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
177 	struct sk_buff *skb;
178 	u64 mapping[IPOIB_UD_RX_SG];
179 	union ib_gid *dgid;
180 	union ib_gid *sgid;
181 
182 	ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
183 		       wr_id, wc->status);
184 
185 	if (unlikely(wr_id >= ipoib_recvq_size)) {
186 		ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
187 			   wr_id, ipoib_recvq_size);
188 		return;
189 	}
190 
191 	skb  = priv->rx_ring[wr_id].skb;
192 
193 	if (unlikely(wc->status != IB_WC_SUCCESS)) {
194 		if (wc->status != IB_WC_WR_FLUSH_ERR)
195 			ipoib_warn(priv,
196 				   "failed recv event (status=%d, wrid=%d vend_err %#x)\n",
197 				   wc->status, wr_id, wc->vendor_err);
198 		ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
199 		dev_kfree_skb_any(skb);
200 		priv->rx_ring[wr_id].skb = NULL;
201 		return;
202 	}
203 
204 	memcpy(mapping, priv->rx_ring[wr_id].mapping,
205 	       IPOIB_UD_RX_SG * sizeof *mapping);
206 
207 	/*
208 	 * If we can't allocate a new RX buffer, dump
209 	 * this packet and reuse the old buffer.
210 	 */
211 	if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
212 		++dev->stats.rx_dropped;
213 		goto repost;
214 	}
215 
216 	ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
217 		       wc->byte_len, wc->slid);
218 
219 	ipoib_ud_dma_unmap_rx(priv, mapping);
220 
221 	skb_put(skb, wc->byte_len);
222 
223 	/* First byte of dgid signals multicast when 0xff */
224 	dgid = &((struct ib_grh *)skb->data)->dgid;
225 
226 	if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
227 		skb->pkt_type = PACKET_HOST;
228 	else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
229 		skb->pkt_type = PACKET_BROADCAST;
230 	else
231 		skb->pkt_type = PACKET_MULTICAST;
232 
233 	sgid = &((struct ib_grh *)skb->data)->sgid;
234 
235 	/*
236 	 * Drop packets that this interface sent, ie multicast packets
237 	 * that the HCA has replicated.
238 	 */
239 	if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
240 		int need_repost = 1;
241 
242 		if ((wc->wc_flags & IB_WC_GRH) &&
243 		    sgid->global.interface_id != priv->local_gid.global.interface_id)
244 			need_repost = 0;
245 
246 		if (need_repost) {
247 			dev_kfree_skb_any(skb);
248 			goto repost;
249 		}
250 	}
251 
252 	skb_pull(skb, IB_GRH_BYTES);
253 
254 	skb->protocol = ((struct ipoib_header *) skb->data)->proto;
255 	skb_add_pseudo_hdr(skb);
256 
257 	++dev->stats.rx_packets;
258 	dev->stats.rx_bytes += skb->len;
259 	if (skb->pkt_type == PACKET_MULTICAST)
260 		dev->stats.multicast++;
261 
262 	skb->dev = dev;
263 	if ((dev->features & NETIF_F_RXCSUM) &&
264 			likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
265 		skb->ip_summed = CHECKSUM_UNNECESSARY;
266 
267 	napi_gro_receive(&priv->recv_napi, skb);
268 
269 repost:
270 	if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
271 		ipoib_warn(priv, "ipoib_ib_post_receive failed "
272 			   "for buf %d\n", wr_id);
273 }
274 
275 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
276 {
277 	struct sk_buff *skb = tx_req->skb;
278 	u64 *mapping = tx_req->mapping;
279 	int i;
280 	int off;
281 
282 	if (skb_headlen(skb)) {
283 		mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
284 					       DMA_TO_DEVICE);
285 		if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
286 			return -EIO;
287 
288 		off = 1;
289 	} else
290 		off = 0;
291 
292 	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
293 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
294 		mapping[i + off] = ib_dma_map_page(ca,
295 						 skb_frag_page(frag),
296 						 frag->page_offset, skb_frag_size(frag),
297 						 DMA_TO_DEVICE);
298 		if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
299 			goto partial_error;
300 	}
301 	return 0;
302 
303 partial_error:
304 	for (; i > 0; --i) {
305 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
306 
307 		ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
308 	}
309 
310 	if (off)
311 		ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
312 
313 	return -EIO;
314 }
315 
316 void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
317 			struct ipoib_tx_buf *tx_req)
318 {
319 	struct sk_buff *skb = tx_req->skb;
320 	u64 *mapping = tx_req->mapping;
321 	int i;
322 	int off;
323 
324 	if (skb_headlen(skb)) {
325 		ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
326 				    DMA_TO_DEVICE);
327 		off = 1;
328 	} else
329 		off = 0;
330 
331 	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
332 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
333 
334 		ib_dma_unmap_page(priv->ca, mapping[i + off],
335 				  skb_frag_size(frag), DMA_TO_DEVICE);
336 	}
337 }
338 
339 /*
340  * As the result of a completion error the QP Can be transferred to SQE states.
341  * The function checks if the (send)QP is in SQE state and
342  * moves it back to RTS state, that in order to have it functional again.
343  */
344 static void ipoib_qp_state_validate_work(struct work_struct *work)
345 {
346 	struct ipoib_qp_state_validate *qp_work =
347 		container_of(work, struct ipoib_qp_state_validate, work);
348 
349 	struct ipoib_dev_priv *priv = qp_work->priv;
350 	struct ib_qp_attr qp_attr;
351 	struct ib_qp_init_attr query_init_attr;
352 	int ret;
353 
354 	ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
355 	if (ret) {
356 		ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
357 			   __func__, ret);
358 		goto free_res;
359 	}
360 	pr_info("%s: QP: 0x%x is in state: %d\n",
361 		__func__, priv->qp->qp_num, qp_attr.qp_state);
362 
363 	/* currently support only in SQE->RTS transition*/
364 	if (qp_attr.qp_state == IB_QPS_SQE) {
365 		qp_attr.qp_state = IB_QPS_RTS;
366 
367 		ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
368 		if (ret) {
369 			pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
370 				ret, priv->qp->qp_num);
371 			goto free_res;
372 		}
373 		pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
374 			__func__, priv->qp->qp_num);
375 	} else {
376 		pr_warn("QP (%d) will stay in state: %d\n",
377 			priv->qp->qp_num, qp_attr.qp_state);
378 	}
379 
380 free_res:
381 	kfree(qp_work);
382 }
383 
384 static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
385 {
386 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
387 	unsigned int wr_id = wc->wr_id;
388 	struct ipoib_tx_buf *tx_req;
389 
390 	ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
391 		       wr_id, wc->status);
392 
393 	if (unlikely(wr_id >= ipoib_sendq_size)) {
394 		ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
395 			   wr_id, ipoib_sendq_size);
396 		return;
397 	}
398 
399 	tx_req = &priv->tx_ring[wr_id];
400 
401 	ipoib_dma_unmap_tx(priv, tx_req);
402 
403 	++dev->stats.tx_packets;
404 	dev->stats.tx_bytes += tx_req->skb->len;
405 
406 	dev_kfree_skb_any(tx_req->skb);
407 
408 	++priv->tx_tail;
409 
410 	if (unlikely(netif_queue_stopped(dev) &&
411 		     ((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) &&
412 		     test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
413 		netif_wake_queue(dev);
414 
415 	if (wc->status != IB_WC_SUCCESS &&
416 	    wc->status != IB_WC_WR_FLUSH_ERR) {
417 		struct ipoib_qp_state_validate *qp_work;
418 		ipoib_warn(priv,
419 			   "failed send event (status=%d, wrid=%d vend_err %#x)\n",
420 			   wc->status, wr_id, wc->vendor_err);
421 		qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
422 		if (!qp_work)
423 			return;
424 
425 		INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
426 		qp_work->priv = priv;
427 		queue_work(priv->wq, &qp_work->work);
428 	}
429 }
430 
431 static int poll_tx(struct ipoib_dev_priv *priv)
432 {
433 	int n, i;
434 	struct ib_wc *wc;
435 
436 	n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
437 	for (i = 0; i < n; ++i) {
438 		wc = priv->send_wc + i;
439 		if (wc->wr_id & IPOIB_OP_CM)
440 			ipoib_cm_handle_tx_wc(priv->dev, priv->send_wc + i);
441 		else
442 			ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
443 	}
444 	return n == MAX_SEND_CQE;
445 }
446 
447 int ipoib_rx_poll(struct napi_struct *napi, int budget)
448 {
449 	struct ipoib_dev_priv *priv =
450 		container_of(napi, struct ipoib_dev_priv, recv_napi);
451 	struct net_device *dev = priv->dev;
452 	int done;
453 	int t;
454 	int n, i;
455 
456 	done  = 0;
457 
458 poll_more:
459 	while (done < budget) {
460 		int max = (budget - done);
461 
462 		t = min(IPOIB_NUM_WC, max);
463 		n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
464 
465 		for (i = 0; i < n; i++) {
466 			struct ib_wc *wc = priv->ibwc + i;
467 
468 			if (wc->wr_id & IPOIB_OP_RECV) {
469 				++done;
470 				if (wc->wr_id & IPOIB_OP_CM)
471 					ipoib_cm_handle_rx_wc(dev, wc);
472 				else
473 					ipoib_ib_handle_rx_wc(dev, wc);
474 			} else {
475 				pr_warn("%s: Got unexpected wqe id\n", __func__);
476 			}
477 		}
478 
479 		if (n != t)
480 			break;
481 	}
482 
483 	if (done < budget) {
484 		napi_complete(napi);
485 		if (unlikely(ib_req_notify_cq(priv->recv_cq,
486 					      IB_CQ_NEXT_COMP |
487 					      IB_CQ_REPORT_MISSED_EVENTS)) &&
488 		    napi_reschedule(napi))
489 			goto poll_more;
490 	}
491 
492 	return done;
493 }
494 
495 int ipoib_tx_poll(struct napi_struct *napi, int budget)
496 {
497 	struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv,
498 						   send_napi);
499 	struct net_device *dev = priv->dev;
500 	int n, i;
501 	struct ib_wc *wc;
502 
503 poll_more:
504 	n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
505 
506 	for (i = 0; i < n; i++) {
507 		wc = priv->send_wc + i;
508 		if (wc->wr_id & IPOIB_OP_CM)
509 			ipoib_cm_handle_tx_wc(dev, wc);
510 		else
511 			ipoib_ib_handle_tx_wc(dev, wc);
512 	}
513 
514 	if (n < budget) {
515 		napi_complete(napi);
516 		if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
517 					      IB_CQ_REPORT_MISSED_EVENTS)) &&
518 		    napi_reschedule(napi))
519 			goto poll_more;
520 	}
521 	return n < 0 ? 0 : n;
522 }
523 
524 void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr)
525 {
526 	struct ipoib_dev_priv *priv = ctx_ptr;
527 
528 	napi_schedule(&priv->recv_napi);
529 }
530 
531 void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr)
532 {
533 	struct ipoib_dev_priv *priv = ctx_ptr;
534 
535 	napi_schedule(&priv->send_napi);
536 }
537 
538 static inline int post_send(struct ipoib_dev_priv *priv,
539 			    unsigned int wr_id,
540 			    struct ib_ah *address, u32 dqpn,
541 			    struct ipoib_tx_buf *tx_req,
542 			    void *head, int hlen)
543 {
544 	struct ib_send_wr *bad_wr;
545 	struct sk_buff *skb = tx_req->skb;
546 
547 	ipoib_build_sge(priv, tx_req);
548 
549 	priv->tx_wr.wr.wr_id	= wr_id;
550 	priv->tx_wr.remote_qpn	= dqpn;
551 	priv->tx_wr.ah		= address;
552 
553 	if (head) {
554 		priv->tx_wr.mss		= skb_shinfo(skb)->gso_size;
555 		priv->tx_wr.header	= head;
556 		priv->tx_wr.hlen	= hlen;
557 		priv->tx_wr.wr.opcode	= IB_WR_LSO;
558 	} else
559 		priv->tx_wr.wr.opcode	= IB_WR_SEND;
560 
561 	return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr);
562 }
563 
564 int ipoib_send(struct net_device *dev, struct sk_buff *skb,
565 	       struct ib_ah *address, u32 dqpn)
566 {
567 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
568 	struct ipoib_tx_buf *tx_req;
569 	int hlen, rc;
570 	void *phead;
571 	unsigned usable_sge = priv->max_send_sge - !!skb_headlen(skb);
572 
573 	if (skb_is_gso(skb)) {
574 		hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
575 		phead = skb->data;
576 		if (unlikely(!skb_pull(skb, hlen))) {
577 			ipoib_warn(priv, "linear data too small\n");
578 			++dev->stats.tx_dropped;
579 			++dev->stats.tx_errors;
580 			dev_kfree_skb_any(skb);
581 			return -1;
582 		}
583 	} else {
584 		if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
585 			ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
586 				   skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
587 			++dev->stats.tx_dropped;
588 			++dev->stats.tx_errors;
589 			ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
590 			return -1;
591 		}
592 		phead = NULL;
593 		hlen  = 0;
594 	}
595 	if (skb_shinfo(skb)->nr_frags > usable_sge) {
596 		if (skb_linearize(skb) < 0) {
597 			ipoib_warn(priv, "skb could not be linearized\n");
598 			++dev->stats.tx_dropped;
599 			++dev->stats.tx_errors;
600 			dev_kfree_skb_any(skb);
601 			return -1;
602 		}
603 		/* Does skb_linearize return ok without reducing nr_frags? */
604 		if (skb_shinfo(skb)->nr_frags > usable_sge) {
605 			ipoib_warn(priv, "too many frags after skb linearize\n");
606 			++dev->stats.tx_dropped;
607 			++dev->stats.tx_errors;
608 			dev_kfree_skb_any(skb);
609 			return -1;
610 		}
611 	}
612 
613 	ipoib_dbg_data(priv,
614 		       "sending packet, length=%d address=%p dqpn=0x%06x\n",
615 		       skb->len, address, dqpn);
616 
617 	/*
618 	 * We put the skb into the tx_ring _before_ we call post_send()
619 	 * because it's entirely possible that the completion handler will
620 	 * run before we execute anything after the post_send().  That
621 	 * means we have to make sure everything is properly recorded and
622 	 * our state is consistent before we call post_send().
623 	 */
624 	tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
625 	tx_req->skb = skb;
626 	if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
627 		++dev->stats.tx_errors;
628 		dev_kfree_skb_any(skb);
629 		return -1;
630 	}
631 
632 	if (skb->ip_summed == CHECKSUM_PARTIAL)
633 		priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
634 	else
635 		priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
636 	/* increase the tx_head after send success, but use it for queue state */
637 	if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) {
638 		ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
639 		netif_stop_queue(dev);
640 	}
641 
642 	skb_orphan(skb);
643 	skb_dst_drop(skb);
644 
645 	if (netif_queue_stopped(dev))
646 		if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
647 				     IB_CQ_REPORT_MISSED_EVENTS) < 0)
648 			ipoib_warn(priv, "request notify on send CQ failed\n");
649 
650 	rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
651 		       address, dqpn, tx_req, phead, hlen);
652 	if (unlikely(rc)) {
653 		ipoib_warn(priv, "post_send failed, error %d\n", rc);
654 		++dev->stats.tx_errors;
655 		ipoib_dma_unmap_tx(priv, tx_req);
656 		dev_kfree_skb_any(skb);
657 		if (netif_queue_stopped(dev))
658 			netif_wake_queue(dev);
659 		rc = 0;
660 	} else {
661 		netif_trans_update(dev);
662 
663 		rc = priv->tx_head;
664 		++priv->tx_head;
665 	}
666 	return rc;
667 }
668 
669 static void __ipoib_reap_ah(struct net_device *dev)
670 {
671 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
672 	struct ipoib_ah *ah, *tah;
673 	LIST_HEAD(remove_list);
674 	unsigned long flags;
675 
676 	netif_tx_lock_bh(dev);
677 	spin_lock_irqsave(&priv->lock, flags);
678 
679 	list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
680 		if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
681 			list_del(&ah->list);
682 			rdma_destroy_ah(ah->ah);
683 			kfree(ah);
684 		}
685 
686 	spin_unlock_irqrestore(&priv->lock, flags);
687 	netif_tx_unlock_bh(dev);
688 }
689 
690 void ipoib_reap_ah(struct work_struct *work)
691 {
692 	struct ipoib_dev_priv *priv =
693 		container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
694 	struct net_device *dev = priv->dev;
695 
696 	__ipoib_reap_ah(dev);
697 
698 	if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
699 		queue_delayed_work(priv->wq, &priv->ah_reap_task,
700 				   round_jiffies_relative(HZ));
701 }
702 
703 static void ipoib_flush_ah(struct net_device *dev)
704 {
705 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
706 
707 	cancel_delayed_work(&priv->ah_reap_task);
708 	flush_workqueue(priv->wq);
709 	ipoib_reap_ah(&priv->ah_reap_task.work);
710 }
711 
712 static void ipoib_stop_ah(struct net_device *dev)
713 {
714 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
715 
716 	set_bit(IPOIB_STOP_REAPER, &priv->flags);
717 	ipoib_flush_ah(dev);
718 }
719 
720 static int recvs_pending(struct net_device *dev)
721 {
722 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
723 	int pending = 0;
724 	int i;
725 
726 	for (i = 0; i < ipoib_recvq_size; ++i)
727 		if (priv->rx_ring[i].skb)
728 			++pending;
729 
730 	return pending;
731 }
732 
733 static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
734 					struct ib_qp *qp,
735 					enum ib_qp_state new_state)
736 {
737 	struct ib_qp_attr qp_attr;
738 	struct ib_qp_init_attr query_init_attr;
739 	int ret;
740 
741 	ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
742 	if (ret) {
743 		ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
744 		return;
745 	}
746 	/* print according to the new-state and the previous state.*/
747 	if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
748 		ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
749 	else
750 		ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
751 			   new_state, qp_attr.qp_state);
752 }
753 
754 static void ipoib_napi_enable(struct net_device *dev)
755 {
756 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
757 
758 	napi_enable(&priv->recv_napi);
759 	napi_enable(&priv->send_napi);
760 }
761 
762 static void ipoib_napi_disable(struct net_device *dev)
763 {
764 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
765 
766 	napi_disable(&priv->recv_napi);
767 	napi_disable(&priv->send_napi);
768 }
769 
770 int ipoib_ib_dev_stop_default(struct net_device *dev)
771 {
772 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
773 	struct ib_qp_attr qp_attr;
774 	unsigned long begin;
775 	struct ipoib_tx_buf *tx_req;
776 	int i;
777 
778 	if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
779 		ipoib_napi_disable(dev);
780 
781 	ipoib_cm_dev_stop(dev);
782 
783 	/*
784 	 * Move our QP to the error state and then reinitialize in
785 	 * when all work requests have completed or have been flushed.
786 	 */
787 	qp_attr.qp_state = IB_QPS_ERR;
788 	if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
789 		check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
790 
791 	/* Wait for all sends and receives to complete */
792 	begin = jiffies;
793 
794 	while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
795 		if (time_after(jiffies, begin + 5 * HZ)) {
796 			ipoib_warn(priv,
797 				   "timing out; %d sends %d receives not completed\n",
798 				   priv->tx_head - priv->tx_tail,
799 				   recvs_pending(dev));
800 
801 			/*
802 			 * assume the HW is wedged and just free up
803 			 * all our pending work requests.
804 			 */
805 			while ((int)priv->tx_tail - (int)priv->tx_head < 0) {
806 				tx_req = &priv->tx_ring[priv->tx_tail &
807 							(ipoib_sendq_size - 1)];
808 				ipoib_dma_unmap_tx(priv, tx_req);
809 				dev_kfree_skb_any(tx_req->skb);
810 				++priv->tx_tail;
811 			}
812 
813 			for (i = 0; i < ipoib_recvq_size; ++i) {
814 				struct ipoib_rx_buf *rx_req;
815 
816 				rx_req = &priv->rx_ring[i];
817 				if (!rx_req->skb)
818 					continue;
819 				ipoib_ud_dma_unmap_rx(priv,
820 						      priv->rx_ring[i].mapping);
821 				dev_kfree_skb_any(rx_req->skb);
822 				rx_req->skb = NULL;
823 			}
824 
825 			goto timeout;
826 		}
827 
828 		ipoib_drain_cq(dev);
829 
830 		usleep_range(1000, 2000);
831 	}
832 
833 	ipoib_dbg(priv, "All sends and receives done.\n");
834 
835 timeout:
836 	qp_attr.qp_state = IB_QPS_RESET;
837 	if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
838 		ipoib_warn(priv, "Failed to modify QP to RESET state\n");
839 
840 	ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
841 
842 	return 0;
843 }
844 
845 int ipoib_ib_dev_stop(struct net_device *dev)
846 {
847 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
848 
849 	priv->rn_ops->ndo_stop(dev);
850 
851 	clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
852 	ipoib_flush_ah(dev);
853 
854 	return 0;
855 }
856 
857 int ipoib_ib_dev_open_default(struct net_device *dev)
858 {
859 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
860 	int ret;
861 
862 	ret = ipoib_init_qp(dev);
863 	if (ret) {
864 		ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
865 		return -1;
866 	}
867 
868 	ret = ipoib_ib_post_receives(dev);
869 	if (ret) {
870 		ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
871 		goto out;
872 	}
873 
874 	ret = ipoib_cm_dev_open(dev);
875 	if (ret) {
876 		ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
877 		goto out;
878 	}
879 
880 	if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
881 		ipoib_napi_enable(dev);
882 
883 	return 0;
884 out:
885 	return -1;
886 }
887 
888 int ipoib_ib_dev_open(struct net_device *dev)
889 {
890 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
891 
892 	ipoib_pkey_dev_check_presence(dev);
893 
894 	if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
895 		ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
896 			   (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
897 		return -1;
898 	}
899 
900 	clear_bit(IPOIB_STOP_REAPER, &priv->flags);
901 	queue_delayed_work(priv->wq, &priv->ah_reap_task,
902 			   round_jiffies_relative(HZ));
903 
904 	if (priv->rn_ops->ndo_open(dev)) {
905 		pr_warn("%s: Failed to open dev\n", dev->name);
906 		goto dev_stop;
907 	}
908 
909 	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
910 
911 	return 0;
912 
913 dev_stop:
914 	set_bit(IPOIB_STOP_REAPER, &priv->flags);
915 	cancel_delayed_work(&priv->ah_reap_task);
916 	set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
917 	ipoib_ib_dev_stop(dev);
918 	return -1;
919 }
920 
921 void ipoib_pkey_dev_check_presence(struct net_device *dev)
922 {
923 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
924 	struct rdma_netdev *rn = netdev_priv(dev);
925 
926 	if (!(priv->pkey & 0x7fff) ||
927 	    ib_find_pkey(priv->ca, priv->port, priv->pkey,
928 			 &priv->pkey_index)) {
929 		clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
930 	} else {
931 		if (rn->set_id)
932 			rn->set_id(dev, priv->pkey_index);
933 		set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
934 	}
935 }
936 
937 void ipoib_ib_dev_up(struct net_device *dev)
938 {
939 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
940 
941 	ipoib_pkey_dev_check_presence(dev);
942 
943 	if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
944 		ipoib_dbg(priv, "PKEY is not assigned.\n");
945 		return;
946 	}
947 
948 	set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
949 
950 	ipoib_mcast_start_thread(dev);
951 }
952 
953 void ipoib_ib_dev_down(struct net_device *dev)
954 {
955 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
956 
957 	ipoib_dbg(priv, "downing ib_dev\n");
958 
959 	clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
960 	netif_carrier_off(dev);
961 
962 	ipoib_mcast_stop_thread(dev);
963 	ipoib_mcast_dev_flush(dev);
964 
965 	ipoib_flush_paths(dev);
966 }
967 
968 void ipoib_drain_cq(struct net_device *dev)
969 {
970 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
971 	int i, n;
972 
973 	/*
974 	 * We call completion handling routines that expect to be
975 	 * called from the BH-disabled NAPI poll context, so disable
976 	 * BHs here too.
977 	 */
978 	local_bh_disable();
979 
980 	do {
981 		n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
982 		for (i = 0; i < n; ++i) {
983 			/*
984 			 * Convert any successful completions to flush
985 			 * errors to avoid passing packets up the
986 			 * stack after bringing the device down.
987 			 */
988 			if (priv->ibwc[i].status == IB_WC_SUCCESS)
989 				priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
990 
991 			if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
992 				if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
993 					ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
994 				else
995 					ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
996 			} else {
997 				pr_warn("%s: Got unexpected wqe id\n", __func__);
998 			}
999 		}
1000 	} while (n == IPOIB_NUM_WC);
1001 
1002 	while (poll_tx(priv))
1003 		; /* nothing */
1004 
1005 	local_bh_enable();
1006 }
1007 
1008 /*
1009  * Takes whatever value which is in pkey index 0 and updates priv->pkey
1010  * returns 0 if the pkey value was changed.
1011  */
1012 static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
1013 {
1014 	int result;
1015 	u16 prev_pkey;
1016 
1017 	prev_pkey = priv->pkey;
1018 	result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
1019 	if (result) {
1020 		ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
1021 			   priv->port, result);
1022 		return result;
1023 	}
1024 
1025 	priv->pkey |= 0x8000;
1026 
1027 	if (prev_pkey != priv->pkey) {
1028 		ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
1029 			  prev_pkey, priv->pkey);
1030 		/*
1031 		 * Update the pkey in the broadcast address, while making sure to set
1032 		 * the full membership bit, so that we join the right broadcast group.
1033 		 */
1034 		priv->dev->broadcast[8] = priv->pkey >> 8;
1035 		priv->dev->broadcast[9] = priv->pkey & 0xff;
1036 		return 0;
1037 	}
1038 
1039 	return 1;
1040 }
1041 /*
1042  * returns 0 if pkey value was found in a different slot.
1043  */
1044 static inline int update_child_pkey(struct ipoib_dev_priv *priv)
1045 {
1046 	u16 old_index = priv->pkey_index;
1047 
1048 	priv->pkey_index = 0;
1049 	ipoib_pkey_dev_check_presence(priv->dev);
1050 
1051 	if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
1052 	    (old_index == priv->pkey_index))
1053 		return 1;
1054 	return 0;
1055 }
1056 
1057 /*
1058  * returns true if the device address of the ipoib interface has changed and the
1059  * new address is a valid one (i.e in the gid table), return false otherwise.
1060  */
1061 static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
1062 {
1063 	union ib_gid search_gid;
1064 	union ib_gid gid0;
1065 	union ib_gid *netdev_gid;
1066 	int err;
1067 	u16 index;
1068 	u8 port;
1069 	bool ret = false;
1070 
1071 	netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
1072 	if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
1073 		return false;
1074 
1075 	netif_addr_lock_bh(priv->dev);
1076 
1077 	/* The subnet prefix may have changed, update it now so we won't have
1078 	 * to do it later
1079 	 */
1080 	priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1081 	netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix;
1082 	search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1083 
1084 	search_gid.global.interface_id = priv->local_gid.global.interface_id;
1085 
1086 	netif_addr_unlock_bh(priv->dev);
1087 
1088 	err = ib_find_gid(priv->ca, &search_gid, priv->dev, &port, &index);
1089 
1090 	netif_addr_lock_bh(priv->dev);
1091 
1092 	if (search_gid.global.interface_id !=
1093 	    priv->local_gid.global.interface_id)
1094 		/* There was a change while we were looking up the gid, bail
1095 		 * here and let the next work sort this out
1096 		 */
1097 		goto out;
1098 
1099 	/* The next section of code needs some background:
1100 	 * Per IB spec the port GUID can't change if the HCA is powered on.
1101 	 * port GUID is the basis for GID at index 0 which is the basis for
1102 	 * the default device address of a ipoib interface.
1103 	 *
1104 	 * so it seems the flow should be:
1105 	 * if user_changed_dev_addr && gid in gid tbl
1106 	 *	set bit dev_addr_set
1107 	 *	return true
1108 	 * else
1109 	 *	return false
1110 	 *
1111 	 * The issue is that there are devices that don't follow the spec,
1112 	 * they change the port GUID when the HCA is powered, so in order
1113 	 * not to break userspace applications, We need to check if the
1114 	 * user wanted to control the device address and we assume that
1115 	 * if he sets the device address back to be based on GID index 0,
1116 	 * he no longer wishs to control it.
1117 	 *
1118 	 * If the user doesn't control the the device address,
1119 	 * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
1120 	 * the port GUID has changed and GID at index 0 has changed
1121 	 * so we need to change priv->local_gid and priv->dev->dev_addr
1122 	 * to reflect the new GID.
1123 	 */
1124 	if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
1125 		if (!err && port == priv->port) {
1126 			set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1127 			if (index == 0)
1128 				clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
1129 					  &priv->flags);
1130 			else
1131 				set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
1132 			ret = true;
1133 		} else {
1134 			ret = false;
1135 		}
1136 	} else {
1137 		if (!err && port == priv->port) {
1138 			ret = true;
1139 		} else {
1140 			if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
1141 				memcpy(&priv->local_gid, &gid0,
1142 				       sizeof(priv->local_gid));
1143 				memcpy(priv->dev->dev_addr + 4, &gid0,
1144 				       sizeof(priv->local_gid));
1145 				ret = true;
1146 			}
1147 		}
1148 	}
1149 
1150 out:
1151 	netif_addr_unlock_bh(priv->dev);
1152 
1153 	return ret;
1154 }
1155 
1156 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1157 				enum ipoib_flush_level level,
1158 				int nesting)
1159 {
1160 	struct ipoib_dev_priv *cpriv;
1161 	struct net_device *dev = priv->dev;
1162 	int result;
1163 
1164 	down_read_nested(&priv->vlan_rwsem, nesting);
1165 
1166 	/*
1167 	 * Flush any child interfaces too -- they might be up even if
1168 	 * the parent is down.
1169 	 */
1170 	list_for_each_entry(cpriv, &priv->child_intfs, list)
1171 		__ipoib_ib_dev_flush(cpriv, level, nesting + 1);
1172 
1173 	up_read(&priv->vlan_rwsem);
1174 
1175 	if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
1176 	    level != IPOIB_FLUSH_HEAVY) {
1177 		/* Make sure the dev_addr is set even if not flushing */
1178 		if (level == IPOIB_FLUSH_LIGHT)
1179 			ipoib_dev_addr_changed_valid(priv);
1180 		ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
1181 		return;
1182 	}
1183 
1184 	if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1185 		/* interface is down. update pkey and leave. */
1186 		if (level == IPOIB_FLUSH_HEAVY) {
1187 			if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1188 				update_parent_pkey(priv);
1189 			else
1190 				update_child_pkey(priv);
1191 		} else if (level == IPOIB_FLUSH_LIGHT)
1192 			ipoib_dev_addr_changed_valid(priv);
1193 		ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
1194 		return;
1195 	}
1196 
1197 	if (level == IPOIB_FLUSH_HEAVY) {
1198 		/* child devices chase their origin pkey value, while non-child
1199 		 * (parent) devices should always takes what present in pkey index 0
1200 		 */
1201 		if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
1202 			result = update_child_pkey(priv);
1203 			if (result) {
1204 				/* restart QP only if P_Key index is changed */
1205 				ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
1206 				return;
1207 			}
1208 
1209 		} else {
1210 			result = update_parent_pkey(priv);
1211 			/* restart QP only if P_Key value changed */
1212 			if (result) {
1213 				ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
1214 				return;
1215 			}
1216 		}
1217 	}
1218 
1219 	if (level == IPOIB_FLUSH_LIGHT) {
1220 		int oper_up;
1221 		ipoib_mark_paths_invalid(dev);
1222 		/* Set IPoIB operation as down to prevent races between:
1223 		 * the flush flow which leaves MCG and on the fly joins
1224 		 * which can happen during that time. mcast restart task
1225 		 * should deal with join requests we missed.
1226 		 */
1227 		oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1228 		ipoib_mcast_dev_flush(dev);
1229 		if (oper_up)
1230 			set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1231 		ipoib_flush_ah(dev);
1232 	}
1233 
1234 	if (level >= IPOIB_FLUSH_NORMAL)
1235 		ipoib_ib_dev_down(dev);
1236 
1237 	if (level == IPOIB_FLUSH_HEAVY) {
1238 		if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
1239 			ipoib_ib_dev_stop(dev);
1240 
1241 		if (ipoib_ib_dev_open(dev))
1242 			return;
1243 
1244 		if (netif_queue_stopped(dev))
1245 			netif_start_queue(dev);
1246 	}
1247 
1248 	/*
1249 	 * The device could have been brought down between the start and when
1250 	 * we get here, don't bring it back up if it's not configured up
1251 	 */
1252 	if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1253 		if (level >= IPOIB_FLUSH_NORMAL)
1254 			ipoib_ib_dev_up(dev);
1255 		if (ipoib_dev_addr_changed_valid(priv))
1256 			ipoib_mcast_restart_task(&priv->restart_task);
1257 	}
1258 }
1259 
1260 void ipoib_ib_dev_flush_light(struct work_struct *work)
1261 {
1262 	struct ipoib_dev_priv *priv =
1263 		container_of(work, struct ipoib_dev_priv, flush_light);
1264 
1265 	__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
1266 }
1267 
1268 void ipoib_ib_dev_flush_normal(struct work_struct *work)
1269 {
1270 	struct ipoib_dev_priv *priv =
1271 		container_of(work, struct ipoib_dev_priv, flush_normal);
1272 
1273 	__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
1274 }
1275 
1276 void ipoib_ib_dev_flush_heavy(struct work_struct *work)
1277 {
1278 	struct ipoib_dev_priv *priv =
1279 		container_of(work, struct ipoib_dev_priv, flush_heavy);
1280 
1281 	rtnl_lock();
1282 	__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
1283 	rtnl_unlock();
1284 }
1285 
1286 void ipoib_ib_dev_cleanup(struct net_device *dev)
1287 {
1288 	struct ipoib_dev_priv *priv = ipoib_priv(dev);
1289 
1290 	ipoib_dbg(priv, "cleaning up ib_dev\n");
1291 	/*
1292 	 * We must make sure there are no more (path) completions
1293 	 * that may wish to touch priv fields that are no longer valid
1294 	 */
1295 	ipoib_flush_paths(dev);
1296 
1297 	ipoib_mcast_stop_thread(dev);
1298 	ipoib_mcast_dev_flush(dev);
1299 
1300 	/*
1301 	 * All of our ah references aren't free until after
1302 	 * ipoib_mcast_dev_flush(), ipoib_flush_paths, and
1303 	 * the neighbor garbage collection is stopped and reaped.
1304 	 * That should all be done now, so make a final ah flush.
1305 	 */
1306 	ipoib_stop_ah(dev);
1307 
1308 	clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
1309 
1310 	priv->rn_ops->ndo_uninit(dev);
1311 
1312 	if (priv->pd) {
1313 		ib_dealloc_pd(priv->pd);
1314 		priv->pd = NULL;
1315 	}
1316 }
1317 
1318