1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/delay.h> 37 #include <linux/moduleparam.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/slab.h> 40 41 #include <linux/ip.h> 42 #include <linux/tcp.h> 43 44 #include "ipoib.h" 45 46 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA 47 static int data_debug_level; 48 49 module_param(data_debug_level, int, 0644); 50 MODULE_PARM_DESC(data_debug_level, 51 "Enable data path debug tracing if > 0"); 52 #endif 53 54 struct ipoib_ah *ipoib_create_ah(struct net_device *dev, 55 struct ib_pd *pd, struct rdma_ah_attr *attr) 56 { 57 struct ipoib_ah *ah; 58 struct ib_ah *vah; 59 60 ah = kmalloc(sizeof *ah, GFP_KERNEL); 61 if (!ah) 62 return ERR_PTR(-ENOMEM); 63 64 ah->dev = dev; 65 ah->last_send = 0; 66 kref_init(&ah->ref); 67 68 vah = rdma_create_ah(pd, attr); 69 if (IS_ERR(vah)) { 70 kfree(ah); 71 ah = (struct ipoib_ah *)vah; 72 } else { 73 ah->ah = vah; 74 ipoib_dbg(ipoib_priv(dev), "Created ah %p\n", ah->ah); 75 } 76 77 return ah; 78 } 79 80 void ipoib_free_ah(struct kref *kref) 81 { 82 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref); 83 struct ipoib_dev_priv *priv = ipoib_priv(ah->dev); 84 85 unsigned long flags; 86 87 spin_lock_irqsave(&priv->lock, flags); 88 list_add_tail(&ah->list, &priv->dead_ahs); 89 spin_unlock_irqrestore(&priv->lock, flags); 90 } 91 92 static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv, 93 u64 mapping[IPOIB_UD_RX_SG]) 94 { 95 ib_dma_unmap_single(priv->ca, mapping[0], 96 IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), 97 DMA_FROM_DEVICE); 98 } 99 100 static int ipoib_ib_post_receive(struct net_device *dev, int id) 101 { 102 struct ipoib_dev_priv *priv = ipoib_priv(dev); 103 struct ib_recv_wr *bad_wr; 104 int ret; 105 106 priv->rx_wr.wr_id = id | IPOIB_OP_RECV; 107 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; 108 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; 109 110 111 ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); 112 if (unlikely(ret)) { 113 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); 114 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping); 115 dev_kfree_skb_any(priv->rx_ring[id].skb); 116 priv->rx_ring[id].skb = NULL; 117 } 118 119 return ret; 120 } 121 122 static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id) 123 { 124 struct ipoib_dev_priv *priv = ipoib_priv(dev); 125 struct sk_buff *skb; 126 int buf_size; 127 u64 *mapping; 128 129 buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); 130 131 skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN); 132 if (unlikely(!skb)) 133 return NULL; 134 135 /* 136 * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is 137 * 64 bytes aligned 138 */ 139 skb_reserve(skb, sizeof(struct ipoib_pseudo_header)); 140 141 mapping = priv->rx_ring[id].mapping; 142 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, 143 DMA_FROM_DEVICE); 144 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) 145 goto error; 146 147 priv->rx_ring[id].skb = skb; 148 return skb; 149 error: 150 dev_kfree_skb_any(skb); 151 return NULL; 152 } 153 154 static int ipoib_ib_post_receives(struct net_device *dev) 155 { 156 struct ipoib_dev_priv *priv = ipoib_priv(dev); 157 int i; 158 159 for (i = 0; i < ipoib_recvq_size; ++i) { 160 if (!ipoib_alloc_rx_skb(dev, i)) { 161 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 162 return -ENOMEM; 163 } 164 if (ipoib_ib_post_receive(dev, i)) { 165 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i); 166 return -EIO; 167 } 168 } 169 170 return 0; 171 } 172 173 static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) 174 { 175 struct ipoib_dev_priv *priv = ipoib_priv(dev); 176 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; 177 struct sk_buff *skb; 178 u64 mapping[IPOIB_UD_RX_SG]; 179 union ib_gid *dgid; 180 union ib_gid *sgid; 181 182 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", 183 wr_id, wc->status); 184 185 if (unlikely(wr_id >= ipoib_recvq_size)) { 186 ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n", 187 wr_id, ipoib_recvq_size); 188 return; 189 } 190 191 skb = priv->rx_ring[wr_id].skb; 192 193 if (unlikely(wc->status != IB_WC_SUCCESS)) { 194 if (wc->status != IB_WC_WR_FLUSH_ERR) 195 ipoib_warn(priv, "failed recv event " 196 "(status=%d, wrid=%d vend_err %x)\n", 197 wc->status, wr_id, wc->vendor_err); 198 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); 199 dev_kfree_skb_any(skb); 200 priv->rx_ring[wr_id].skb = NULL; 201 return; 202 } 203 204 memcpy(mapping, priv->rx_ring[wr_id].mapping, 205 IPOIB_UD_RX_SG * sizeof *mapping); 206 207 /* 208 * If we can't allocate a new RX buffer, dump 209 * this packet and reuse the old buffer. 210 */ 211 if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) { 212 ++dev->stats.rx_dropped; 213 goto repost; 214 } 215 216 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 217 wc->byte_len, wc->slid); 218 219 ipoib_ud_dma_unmap_rx(priv, mapping); 220 221 skb_put(skb, wc->byte_len); 222 223 /* First byte of dgid signals multicast when 0xff */ 224 dgid = &((struct ib_grh *)skb->data)->dgid; 225 226 if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff) 227 skb->pkt_type = PACKET_HOST; 228 else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0) 229 skb->pkt_type = PACKET_BROADCAST; 230 else 231 skb->pkt_type = PACKET_MULTICAST; 232 233 sgid = &((struct ib_grh *)skb->data)->sgid; 234 235 /* 236 * Drop packets that this interface sent, ie multicast packets 237 * that the HCA has replicated. 238 */ 239 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) { 240 int need_repost = 1; 241 242 if ((wc->wc_flags & IB_WC_GRH) && 243 sgid->global.interface_id != priv->local_gid.global.interface_id) 244 need_repost = 0; 245 246 if (need_repost) { 247 dev_kfree_skb_any(skb); 248 goto repost; 249 } 250 } 251 252 skb_pull(skb, IB_GRH_BYTES); 253 254 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 255 skb_add_pseudo_hdr(skb); 256 257 ++dev->stats.rx_packets; 258 dev->stats.rx_bytes += skb->len; 259 260 skb->dev = dev; 261 if ((dev->features & NETIF_F_RXCSUM) && 262 likely(wc->wc_flags & IB_WC_IP_CSUM_OK)) 263 skb->ip_summed = CHECKSUM_UNNECESSARY; 264 265 napi_gro_receive(&priv->napi, skb); 266 267 repost: 268 if (unlikely(ipoib_ib_post_receive(dev, wr_id))) 269 ipoib_warn(priv, "ipoib_ib_post_receive failed " 270 "for buf %d\n", wr_id); 271 } 272 273 int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) 274 { 275 struct sk_buff *skb = tx_req->skb; 276 u64 *mapping = tx_req->mapping; 277 int i; 278 int off; 279 280 if (skb_headlen(skb)) { 281 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb), 282 DMA_TO_DEVICE); 283 if (unlikely(ib_dma_mapping_error(ca, mapping[0]))) 284 return -EIO; 285 286 off = 1; 287 } else 288 off = 0; 289 290 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 291 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 292 mapping[i + off] = ib_dma_map_page(ca, 293 skb_frag_page(frag), 294 frag->page_offset, skb_frag_size(frag), 295 DMA_TO_DEVICE); 296 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off]))) 297 goto partial_error; 298 } 299 return 0; 300 301 partial_error: 302 for (; i > 0; --i) { 303 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 304 305 ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE); 306 } 307 308 if (off) 309 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); 310 311 return -EIO; 312 } 313 314 void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv, 315 struct ipoib_tx_buf *tx_req) 316 { 317 struct sk_buff *skb = tx_req->skb; 318 u64 *mapping = tx_req->mapping; 319 int i; 320 int off; 321 322 if (skb_headlen(skb)) { 323 ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb), 324 DMA_TO_DEVICE); 325 off = 1; 326 } else 327 off = 0; 328 329 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 330 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 331 332 ib_dma_unmap_page(priv->ca, mapping[i + off], 333 skb_frag_size(frag), DMA_TO_DEVICE); 334 } 335 } 336 337 /* 338 * As the result of a completion error the QP Can be transferred to SQE states. 339 * The function checks if the (send)QP is in SQE state and 340 * moves it back to RTS state, that in order to have it functional again. 341 */ 342 static void ipoib_qp_state_validate_work(struct work_struct *work) 343 { 344 struct ipoib_qp_state_validate *qp_work = 345 container_of(work, struct ipoib_qp_state_validate, work); 346 347 struct ipoib_dev_priv *priv = qp_work->priv; 348 struct ib_qp_attr qp_attr; 349 struct ib_qp_init_attr query_init_attr; 350 int ret; 351 352 ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr); 353 if (ret) { 354 ipoib_warn(priv, "%s: Failed to query QP ret: %d\n", 355 __func__, ret); 356 goto free_res; 357 } 358 pr_info("%s: QP: 0x%x is in state: %d\n", 359 __func__, priv->qp->qp_num, qp_attr.qp_state); 360 361 /* currently support only in SQE->RTS transition*/ 362 if (qp_attr.qp_state == IB_QPS_SQE) { 363 qp_attr.qp_state = IB_QPS_RTS; 364 365 ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE); 366 if (ret) { 367 pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n", 368 ret, priv->qp->qp_num); 369 goto free_res; 370 } 371 pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n", 372 __func__, priv->qp->qp_num); 373 } else { 374 pr_warn("QP (%d) will stay in state: %d\n", 375 priv->qp->qp_num, qp_attr.qp_state); 376 } 377 378 free_res: 379 kfree(qp_work); 380 } 381 382 static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) 383 { 384 struct ipoib_dev_priv *priv = ipoib_priv(dev); 385 unsigned int wr_id = wc->wr_id; 386 struct ipoib_tx_buf *tx_req; 387 388 ipoib_dbg_data(priv, "send completion: id %d, status: %d\n", 389 wr_id, wc->status); 390 391 if (unlikely(wr_id >= ipoib_sendq_size)) { 392 ipoib_warn(priv, "send completion event with wrid %d (> %d)\n", 393 wr_id, ipoib_sendq_size); 394 return; 395 } 396 397 tx_req = &priv->tx_ring[wr_id]; 398 399 ipoib_dma_unmap_tx(priv, tx_req); 400 401 ++dev->stats.tx_packets; 402 dev->stats.tx_bytes += tx_req->skb->len; 403 404 dev_kfree_skb_any(tx_req->skb); 405 406 ++priv->tx_tail; 407 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 408 netif_queue_stopped(dev) && 409 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 410 netif_wake_queue(dev); 411 412 if (wc->status != IB_WC_SUCCESS && 413 wc->status != IB_WC_WR_FLUSH_ERR) { 414 struct ipoib_qp_state_validate *qp_work; 415 ipoib_warn(priv, "failed send event " 416 "(status=%d, wrid=%d vend_err %x)\n", 417 wc->status, wr_id, wc->vendor_err); 418 qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC); 419 if (!qp_work) 420 return; 421 422 INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work); 423 qp_work->priv = priv; 424 queue_work(priv->wq, &qp_work->work); 425 } 426 } 427 428 static int poll_tx(struct ipoib_dev_priv *priv) 429 { 430 int n, i; 431 432 n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); 433 for (i = 0; i < n; ++i) 434 ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i); 435 436 return n == MAX_SEND_CQE; 437 } 438 439 int ipoib_poll(struct napi_struct *napi, int budget) 440 { 441 struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi); 442 struct net_device *dev = priv->dev; 443 int done; 444 int t; 445 int n, i; 446 447 done = 0; 448 449 poll_more: 450 while (done < budget) { 451 int max = (budget - done); 452 453 t = min(IPOIB_NUM_WC, max); 454 n = ib_poll_cq(priv->recv_cq, t, priv->ibwc); 455 456 for (i = 0; i < n; i++) { 457 struct ib_wc *wc = priv->ibwc + i; 458 459 if (wc->wr_id & IPOIB_OP_RECV) { 460 ++done; 461 if (wc->wr_id & IPOIB_OP_CM) 462 ipoib_cm_handle_rx_wc(dev, wc); 463 else 464 ipoib_ib_handle_rx_wc(dev, wc); 465 } else 466 ipoib_cm_handle_tx_wc(priv->dev, wc); 467 } 468 469 if (n != t) 470 break; 471 } 472 473 if (done < budget) { 474 napi_complete(napi); 475 if (unlikely(ib_req_notify_cq(priv->recv_cq, 476 IB_CQ_NEXT_COMP | 477 IB_CQ_REPORT_MISSED_EVENTS)) && 478 napi_reschedule(napi)) 479 goto poll_more; 480 } 481 482 return done; 483 } 484 485 void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) 486 { 487 struct net_device *dev = dev_ptr; 488 struct ipoib_dev_priv *priv = ipoib_priv(dev); 489 490 napi_schedule(&priv->napi); 491 } 492 493 static void drain_tx_cq(struct net_device *dev) 494 { 495 struct ipoib_dev_priv *priv = ipoib_priv(dev); 496 497 netif_tx_lock(dev); 498 while (poll_tx(priv)) 499 ; /* nothing */ 500 501 if (netif_queue_stopped(dev)) 502 mod_timer(&priv->poll_timer, jiffies + 1); 503 504 netif_tx_unlock(dev); 505 } 506 507 void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr) 508 { 509 struct ipoib_dev_priv *priv = ipoib_priv(dev_ptr); 510 511 mod_timer(&priv->poll_timer, jiffies); 512 } 513 514 static inline int post_send(struct ipoib_dev_priv *priv, 515 unsigned int wr_id, 516 struct ib_ah *address, u32 dqpn, 517 struct ipoib_tx_buf *tx_req, 518 void *head, int hlen) 519 { 520 struct ib_send_wr *bad_wr; 521 struct sk_buff *skb = tx_req->skb; 522 523 ipoib_build_sge(priv, tx_req); 524 525 priv->tx_wr.wr.wr_id = wr_id; 526 priv->tx_wr.remote_qpn = dqpn; 527 priv->tx_wr.ah = address; 528 529 if (head) { 530 priv->tx_wr.mss = skb_shinfo(skb)->gso_size; 531 priv->tx_wr.header = head; 532 priv->tx_wr.hlen = hlen; 533 priv->tx_wr.wr.opcode = IB_WR_LSO; 534 } else 535 priv->tx_wr.wr.opcode = IB_WR_SEND; 536 537 return ib_post_send(priv->qp, &priv->tx_wr.wr, &bad_wr); 538 } 539 540 int ipoib_send(struct net_device *dev, struct sk_buff *skb, 541 struct ib_ah *address, u32 dqpn) 542 { 543 struct ipoib_dev_priv *priv = ipoib_priv(dev); 544 struct ipoib_tx_buf *tx_req; 545 int hlen, rc; 546 void *phead; 547 unsigned usable_sge = priv->max_send_sge - !!skb_headlen(skb); 548 549 if (skb_is_gso(skb)) { 550 hlen = skb_transport_offset(skb) + tcp_hdrlen(skb); 551 phead = skb->data; 552 if (unlikely(!skb_pull(skb, hlen))) { 553 ipoib_warn(priv, "linear data too small\n"); 554 ++dev->stats.tx_dropped; 555 ++dev->stats.tx_errors; 556 dev_kfree_skb_any(skb); 557 return -1; 558 } 559 } else { 560 if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { 561 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 562 skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); 563 ++dev->stats.tx_dropped; 564 ++dev->stats.tx_errors; 565 ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); 566 return -1; 567 } 568 phead = NULL; 569 hlen = 0; 570 } 571 if (skb_shinfo(skb)->nr_frags > usable_sge) { 572 if (skb_linearize(skb) < 0) { 573 ipoib_warn(priv, "skb could not be linearized\n"); 574 ++dev->stats.tx_dropped; 575 ++dev->stats.tx_errors; 576 dev_kfree_skb_any(skb); 577 return -1; 578 } 579 /* Does skb_linearize return ok without reducing nr_frags? */ 580 if (skb_shinfo(skb)->nr_frags > usable_sge) { 581 ipoib_warn(priv, "too many frags after skb linearize\n"); 582 ++dev->stats.tx_dropped; 583 ++dev->stats.tx_errors; 584 dev_kfree_skb_any(skb); 585 return -1; 586 } 587 } 588 589 ipoib_dbg_data(priv, 590 "sending packet, length=%d address=%p dqpn=0x%06x\n", 591 skb->len, address, dqpn); 592 593 /* 594 * We put the skb into the tx_ring _before_ we call post_send() 595 * because it's entirely possible that the completion handler will 596 * run before we execute anything after the post_send(). That 597 * means we have to make sure everything is properly recorded and 598 * our state is consistent before we call post_send(). 599 */ 600 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; 601 tx_req->skb = skb; 602 if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { 603 ++dev->stats.tx_errors; 604 dev_kfree_skb_any(skb); 605 return -1; 606 } 607 608 if (skb->ip_summed == CHECKSUM_PARTIAL) 609 priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM; 610 else 611 priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; 612 613 if (++priv->tx_outstanding == ipoib_sendq_size) { 614 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); 615 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) 616 ipoib_warn(priv, "request notify on send CQ failed\n"); 617 netif_stop_queue(dev); 618 } 619 620 skb_orphan(skb); 621 skb_dst_drop(skb); 622 623 rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 624 address, dqpn, tx_req, phead, hlen); 625 if (unlikely(rc)) { 626 ipoib_warn(priv, "post_send failed, error %d\n", rc); 627 ++dev->stats.tx_errors; 628 --priv->tx_outstanding; 629 ipoib_dma_unmap_tx(priv, tx_req); 630 dev_kfree_skb_any(skb); 631 if (netif_queue_stopped(dev)) 632 netif_wake_queue(dev); 633 rc = 0; 634 } else { 635 netif_trans_update(dev); 636 637 rc = priv->tx_head; 638 ++priv->tx_head; 639 } 640 641 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) 642 while (poll_tx(priv)) 643 ; /* nothing */ 644 645 return rc; 646 } 647 648 static void __ipoib_reap_ah(struct net_device *dev) 649 { 650 struct ipoib_dev_priv *priv = ipoib_priv(dev); 651 struct ipoib_ah *ah, *tah; 652 LIST_HEAD(remove_list); 653 unsigned long flags; 654 655 netif_tx_lock_bh(dev); 656 spin_lock_irqsave(&priv->lock, flags); 657 658 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) 659 if ((int) priv->tx_tail - (int) ah->last_send >= 0) { 660 list_del(&ah->list); 661 rdma_destroy_ah(ah->ah); 662 kfree(ah); 663 } 664 665 spin_unlock_irqrestore(&priv->lock, flags); 666 netif_tx_unlock_bh(dev); 667 } 668 669 void ipoib_reap_ah(struct work_struct *work) 670 { 671 struct ipoib_dev_priv *priv = 672 container_of(work, struct ipoib_dev_priv, ah_reap_task.work); 673 struct net_device *dev = priv->dev; 674 675 __ipoib_reap_ah(dev); 676 677 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) 678 queue_delayed_work(priv->wq, &priv->ah_reap_task, 679 round_jiffies_relative(HZ)); 680 } 681 682 static void ipoib_flush_ah(struct net_device *dev) 683 { 684 struct ipoib_dev_priv *priv = ipoib_priv(dev); 685 686 cancel_delayed_work(&priv->ah_reap_task); 687 flush_workqueue(priv->wq); 688 ipoib_reap_ah(&priv->ah_reap_task.work); 689 } 690 691 static void ipoib_stop_ah(struct net_device *dev) 692 { 693 struct ipoib_dev_priv *priv = ipoib_priv(dev); 694 695 set_bit(IPOIB_STOP_REAPER, &priv->flags); 696 ipoib_flush_ah(dev); 697 } 698 699 static int recvs_pending(struct net_device *dev) 700 { 701 struct ipoib_dev_priv *priv = ipoib_priv(dev); 702 int pending = 0; 703 int i; 704 705 for (i = 0; i < ipoib_recvq_size; ++i) 706 if (priv->rx_ring[i].skb) 707 ++pending; 708 709 return pending; 710 } 711 712 int ipoib_ib_dev_stop_default(struct net_device *dev) 713 { 714 struct ipoib_dev_priv *priv = ipoib_priv(dev); 715 struct ib_qp_attr qp_attr; 716 unsigned long begin; 717 struct ipoib_tx_buf *tx_req; 718 int i; 719 720 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 721 napi_disable(&priv->napi); 722 723 ipoib_cm_dev_stop(dev); 724 725 /* 726 * Move our QP to the error state and then reinitialize in 727 * when all work requests have completed or have been flushed. 728 */ 729 qp_attr.qp_state = IB_QPS_ERR; 730 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) 731 ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); 732 733 /* Wait for all sends and receives to complete */ 734 begin = jiffies; 735 736 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) { 737 if (time_after(jiffies, begin + 5 * HZ)) { 738 ipoib_warn(priv, 739 "timing out; %d sends %d receives not completed\n", 740 priv->tx_head - priv->tx_tail, 741 recvs_pending(dev)); 742 743 /* 744 * assume the HW is wedged and just free up 745 * all our pending work requests. 746 */ 747 while ((int)priv->tx_tail - (int)priv->tx_head < 0) { 748 tx_req = &priv->tx_ring[priv->tx_tail & 749 (ipoib_sendq_size - 1)]; 750 ipoib_dma_unmap_tx(priv, tx_req); 751 dev_kfree_skb_any(tx_req->skb); 752 ++priv->tx_tail; 753 --priv->tx_outstanding; 754 } 755 756 for (i = 0; i < ipoib_recvq_size; ++i) { 757 struct ipoib_rx_buf *rx_req; 758 759 rx_req = &priv->rx_ring[i]; 760 if (!rx_req->skb) 761 continue; 762 ipoib_ud_dma_unmap_rx(priv, 763 priv->rx_ring[i].mapping); 764 dev_kfree_skb_any(rx_req->skb); 765 rx_req->skb = NULL; 766 } 767 768 goto timeout; 769 } 770 771 ipoib_drain_cq(dev); 772 773 msleep(1); 774 } 775 776 ipoib_dbg(priv, "All sends and receives done.\n"); 777 778 timeout: 779 del_timer_sync(&priv->poll_timer); 780 qp_attr.qp_state = IB_QPS_RESET; 781 if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) 782 ipoib_warn(priv, "Failed to modify QP to RESET state\n"); 783 784 ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP); 785 786 return 0; 787 } 788 789 int ipoib_ib_dev_stop(struct net_device *dev) 790 { 791 struct ipoib_dev_priv *priv = ipoib_priv(dev); 792 793 priv->rn_ops->ndo_stop(dev); 794 795 clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 796 ipoib_flush_ah(dev); 797 798 return 0; 799 } 800 801 void ipoib_ib_tx_timer_func(unsigned long ctx) 802 { 803 drain_tx_cq((struct net_device *)ctx); 804 } 805 806 int ipoib_ib_dev_open_default(struct net_device *dev) 807 { 808 struct ipoib_dev_priv *priv = ipoib_priv(dev); 809 int ret; 810 811 ret = ipoib_init_qp(dev); 812 if (ret) { 813 ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret); 814 return -1; 815 } 816 817 ret = ipoib_ib_post_receives(dev); 818 if (ret) { 819 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret); 820 goto out; 821 } 822 823 ret = ipoib_cm_dev_open(dev); 824 if (ret) { 825 ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret); 826 goto out; 827 } 828 829 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 830 napi_enable(&priv->napi); 831 832 return 0; 833 out: 834 return -1; 835 } 836 837 int ipoib_ib_dev_open(struct net_device *dev) 838 { 839 struct ipoib_dev_priv *priv = ipoib_priv(dev); 840 841 ipoib_pkey_dev_check_presence(dev); 842 843 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 844 ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey, 845 (!(priv->pkey & 0x7fff) ? "Invalid" : "not found")); 846 return -1; 847 } 848 849 clear_bit(IPOIB_STOP_REAPER, &priv->flags); 850 queue_delayed_work(priv->wq, &priv->ah_reap_task, 851 round_jiffies_relative(HZ)); 852 853 if (priv->rn_ops->ndo_open(dev)) { 854 pr_warn("%s: Failed to open dev\n", dev->name); 855 goto dev_stop; 856 } 857 858 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 859 860 return 0; 861 862 dev_stop: 863 set_bit(IPOIB_STOP_REAPER, &priv->flags); 864 cancel_delayed_work(&priv->ah_reap_task); 865 set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); 866 ipoib_ib_dev_stop(dev); 867 return -1; 868 } 869 870 void ipoib_pkey_dev_check_presence(struct net_device *dev) 871 { 872 struct ipoib_dev_priv *priv = ipoib_priv(dev); 873 874 if (!(priv->pkey & 0x7fff) || 875 ib_find_pkey(priv->ca, priv->port, priv->pkey, 876 &priv->pkey_index)) 877 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 878 else 879 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 880 } 881 882 void ipoib_ib_dev_up(struct net_device *dev) 883 { 884 struct ipoib_dev_priv *priv = ipoib_priv(dev); 885 886 ipoib_pkey_dev_check_presence(dev); 887 888 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { 889 ipoib_dbg(priv, "PKEY is not assigned.\n"); 890 return; 891 } 892 893 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 894 895 ipoib_mcast_start_thread(dev); 896 } 897 898 void ipoib_ib_dev_down(struct net_device *dev) 899 { 900 struct ipoib_dev_priv *priv = ipoib_priv(dev); 901 902 ipoib_dbg(priv, "downing ib_dev\n"); 903 904 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 905 netif_carrier_off(dev); 906 907 ipoib_mcast_stop_thread(dev); 908 ipoib_mcast_dev_flush(dev); 909 910 ipoib_flush_paths(dev); 911 } 912 913 void ipoib_drain_cq(struct net_device *dev) 914 { 915 struct ipoib_dev_priv *priv = ipoib_priv(dev); 916 int i, n; 917 918 /* 919 * We call completion handling routines that expect to be 920 * called from the BH-disabled NAPI poll context, so disable 921 * BHs here too. 922 */ 923 local_bh_disable(); 924 925 do { 926 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); 927 for (i = 0; i < n; ++i) { 928 /* 929 * Convert any successful completions to flush 930 * errors to avoid passing packets up the 931 * stack after bringing the device down. 932 */ 933 if (priv->ibwc[i].status == IB_WC_SUCCESS) 934 priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR; 935 936 if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) { 937 if (priv->ibwc[i].wr_id & IPOIB_OP_CM) 938 ipoib_cm_handle_rx_wc(dev, priv->ibwc + i); 939 else 940 ipoib_ib_handle_rx_wc(dev, priv->ibwc + i); 941 } else 942 ipoib_cm_handle_tx_wc(dev, priv->ibwc + i); 943 } 944 } while (n == IPOIB_NUM_WC); 945 946 while (poll_tx(priv)) 947 ; /* nothing */ 948 949 local_bh_enable(); 950 } 951 952 /* 953 * Takes whatever value which is in pkey index 0 and updates priv->pkey 954 * returns 0 if the pkey value was changed. 955 */ 956 static inline int update_parent_pkey(struct ipoib_dev_priv *priv) 957 { 958 int result; 959 u16 prev_pkey; 960 961 prev_pkey = priv->pkey; 962 result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey); 963 if (result) { 964 ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n", 965 priv->port, result); 966 return result; 967 } 968 969 priv->pkey |= 0x8000; 970 971 if (prev_pkey != priv->pkey) { 972 ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n", 973 prev_pkey, priv->pkey); 974 /* 975 * Update the pkey in the broadcast address, while making sure to set 976 * the full membership bit, so that we join the right broadcast group. 977 */ 978 priv->dev->broadcast[8] = priv->pkey >> 8; 979 priv->dev->broadcast[9] = priv->pkey & 0xff; 980 981 /* 982 * Update the broadcast address in the priv->broadcast object, 983 * in case it already exists, otherwise no one will do that. 984 */ 985 if (priv->broadcast) { 986 spin_lock_irq(&priv->lock); 987 memcpy(priv->broadcast->mcmember.mgid.raw, 988 priv->dev->broadcast + 4, 989 sizeof(union ib_gid)); 990 spin_unlock_irq(&priv->lock); 991 } 992 993 return 0; 994 } 995 996 return 1; 997 } 998 /* 999 * returns 0 if pkey value was found in a different slot. 1000 */ 1001 static inline int update_child_pkey(struct ipoib_dev_priv *priv) 1002 { 1003 u16 old_index = priv->pkey_index; 1004 1005 priv->pkey_index = 0; 1006 ipoib_pkey_dev_check_presence(priv->dev); 1007 1008 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && 1009 (old_index == priv->pkey_index)) 1010 return 1; 1011 return 0; 1012 } 1013 1014 /* 1015 * returns true if the device address of the ipoib interface has changed and the 1016 * new address is a valid one (i.e in the gid table), return false otherwise. 1017 */ 1018 static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) 1019 { 1020 union ib_gid search_gid; 1021 union ib_gid gid0; 1022 union ib_gid *netdev_gid; 1023 int err; 1024 u16 index; 1025 u8 port; 1026 bool ret = false; 1027 1028 netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4); 1029 if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL)) 1030 return false; 1031 1032 netif_addr_lock_bh(priv->dev); 1033 1034 /* The subnet prefix may have changed, update it now so we won't have 1035 * to do it later 1036 */ 1037 priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix; 1038 netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix; 1039 search_gid.global.subnet_prefix = gid0.global.subnet_prefix; 1040 1041 search_gid.global.interface_id = priv->local_gid.global.interface_id; 1042 1043 netif_addr_unlock_bh(priv->dev); 1044 1045 err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB, 1046 priv->dev, &port, &index); 1047 1048 netif_addr_lock_bh(priv->dev); 1049 1050 if (search_gid.global.interface_id != 1051 priv->local_gid.global.interface_id) 1052 /* There was a change while we were looking up the gid, bail 1053 * here and let the next work sort this out 1054 */ 1055 goto out; 1056 1057 /* The next section of code needs some background: 1058 * Per IB spec the port GUID can't change if the HCA is powered on. 1059 * port GUID is the basis for GID at index 0 which is the basis for 1060 * the default device address of a ipoib interface. 1061 * 1062 * so it seems the flow should be: 1063 * if user_changed_dev_addr && gid in gid tbl 1064 * set bit dev_addr_set 1065 * return true 1066 * else 1067 * return false 1068 * 1069 * The issue is that there are devices that don't follow the spec, 1070 * they change the port GUID when the HCA is powered, so in order 1071 * not to break userspace applications, We need to check if the 1072 * user wanted to control the device address and we assume that 1073 * if he sets the device address back to be based on GID index 0, 1074 * he no longer wishs to control it. 1075 * 1076 * If the user doesn't control the the device address, 1077 * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means 1078 * the port GUID has changed and GID at index 0 has changed 1079 * so we need to change priv->local_gid and priv->dev->dev_addr 1080 * to reflect the new GID. 1081 */ 1082 if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { 1083 if (!err && port == priv->port) { 1084 set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); 1085 if (index == 0) 1086 clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL, 1087 &priv->flags); 1088 else 1089 set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags); 1090 ret = true; 1091 } else { 1092 ret = false; 1093 } 1094 } else { 1095 if (!err && port == priv->port) { 1096 ret = true; 1097 } else { 1098 if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) { 1099 memcpy(&priv->local_gid, &gid0, 1100 sizeof(priv->local_gid)); 1101 memcpy(priv->dev->dev_addr + 4, &gid0, 1102 sizeof(priv->local_gid)); 1103 ret = true; 1104 } 1105 } 1106 } 1107 1108 out: 1109 netif_addr_unlock_bh(priv->dev); 1110 1111 return ret; 1112 } 1113 1114 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, 1115 enum ipoib_flush_level level, 1116 int nesting) 1117 { 1118 struct ipoib_dev_priv *cpriv; 1119 struct net_device *dev = priv->dev; 1120 int result; 1121 1122 down_read_nested(&priv->vlan_rwsem, nesting); 1123 1124 /* 1125 * Flush any child interfaces too -- they might be up even if 1126 * the parent is down. 1127 */ 1128 list_for_each_entry(cpriv, &priv->child_intfs, list) 1129 __ipoib_ib_dev_flush(cpriv, level, nesting + 1); 1130 1131 up_read(&priv->vlan_rwsem); 1132 1133 if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) && 1134 level != IPOIB_FLUSH_HEAVY) { 1135 /* Make sure the dev_addr is set even if not flushing */ 1136 if (level == IPOIB_FLUSH_LIGHT) 1137 ipoib_dev_addr_changed_valid(priv); 1138 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); 1139 return; 1140 } 1141 1142 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { 1143 /* interface is down. update pkey and leave. */ 1144 if (level == IPOIB_FLUSH_HEAVY) { 1145 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) 1146 update_parent_pkey(priv); 1147 else 1148 update_child_pkey(priv); 1149 } else if (level == IPOIB_FLUSH_LIGHT) 1150 ipoib_dev_addr_changed_valid(priv); 1151 ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); 1152 return; 1153 } 1154 1155 if (level == IPOIB_FLUSH_HEAVY) { 1156 /* child devices chase their origin pkey value, while non-child 1157 * (parent) devices should always takes what present in pkey index 0 1158 */ 1159 if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 1160 result = update_child_pkey(priv); 1161 if (result) { 1162 /* restart QP only if P_Key index is changed */ 1163 ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); 1164 return; 1165 } 1166 1167 } else { 1168 result = update_parent_pkey(priv); 1169 /* restart QP only if P_Key value changed */ 1170 if (result) { 1171 ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n"); 1172 return; 1173 } 1174 } 1175 } 1176 1177 if (level == IPOIB_FLUSH_LIGHT) { 1178 int oper_up; 1179 ipoib_mark_paths_invalid(dev); 1180 /* Set IPoIB operation as down to prevent races between: 1181 * the flush flow which leaves MCG and on the fly joins 1182 * which can happen during that time. mcast restart task 1183 * should deal with join requests we missed. 1184 */ 1185 oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 1186 ipoib_mcast_dev_flush(dev); 1187 if (oper_up) 1188 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); 1189 ipoib_flush_ah(dev); 1190 } 1191 1192 if (level >= IPOIB_FLUSH_NORMAL) 1193 ipoib_ib_dev_down(dev); 1194 1195 if (level == IPOIB_FLUSH_HEAVY) { 1196 if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) 1197 ipoib_ib_dev_stop(dev); 1198 if (ipoib_ib_dev_open(dev) != 0) 1199 return; 1200 if (netif_queue_stopped(dev)) 1201 netif_start_queue(dev); 1202 } 1203 1204 /* 1205 * The device could have been brought down between the start and when 1206 * we get here, don't bring it back up if it's not configured up 1207 */ 1208 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { 1209 if (level >= IPOIB_FLUSH_NORMAL) 1210 ipoib_ib_dev_up(dev); 1211 if (ipoib_dev_addr_changed_valid(priv)) 1212 ipoib_mcast_restart_task(&priv->restart_task); 1213 } 1214 } 1215 1216 void ipoib_ib_dev_flush_light(struct work_struct *work) 1217 { 1218 struct ipoib_dev_priv *priv = 1219 container_of(work, struct ipoib_dev_priv, flush_light); 1220 1221 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0); 1222 } 1223 1224 void ipoib_ib_dev_flush_normal(struct work_struct *work) 1225 { 1226 struct ipoib_dev_priv *priv = 1227 container_of(work, struct ipoib_dev_priv, flush_normal); 1228 1229 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0); 1230 } 1231 1232 void ipoib_ib_dev_flush_heavy(struct work_struct *work) 1233 { 1234 struct ipoib_dev_priv *priv = 1235 container_of(work, struct ipoib_dev_priv, flush_heavy); 1236 1237 __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0); 1238 } 1239 1240 void ipoib_ib_dev_cleanup(struct net_device *dev) 1241 { 1242 struct ipoib_dev_priv *priv = ipoib_priv(dev); 1243 1244 ipoib_dbg(priv, "cleaning up ib_dev\n"); 1245 /* 1246 * We must make sure there are no more (path) completions 1247 * that may wish to touch priv fields that are no longer valid 1248 */ 1249 ipoib_flush_paths(dev); 1250 1251 ipoib_mcast_stop_thread(dev); 1252 ipoib_mcast_dev_flush(dev); 1253 1254 /* 1255 * All of our ah references aren't free until after 1256 * ipoib_mcast_dev_flush(), ipoib_flush_paths, and 1257 * the neighbor garbage collection is stopped and reaped. 1258 * That should all be done now, so make a final ah flush. 1259 */ 1260 ipoib_stop_ah(dev); 1261 1262 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); 1263 1264 priv->rn_ops->ndo_uninit(dev); 1265 1266 if (priv->pd) { 1267 ib_dealloc_pd(priv->pd); 1268 priv->pd = NULL; 1269 } 1270 } 1271 1272