1 /* 2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <rdma/ib_cm.h> 34 #include <rdma/ib_cache.h> 35 #include <net/dst.h> 36 #include <net/icmp.h> 37 #include <linux/icmpv6.h> 38 #include <linux/delay.h> 39 #include <linux/vmalloc.h> 40 41 #include "ipoib.h" 42 43 int ipoib_max_conn_qp = 128; 44 45 module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444); 46 MODULE_PARM_DESC(max_nonsrq_conn_qp, 47 "Max number of connected-mode QPs per interface " 48 "(applied only if shared receive queue is not available)"); 49 50 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA 51 static int data_debug_level; 52 53 module_param_named(cm_data_debug_level, data_debug_level, int, 0644); 54 MODULE_PARM_DESC(cm_data_debug_level, 55 "Enable data path debug tracing for connected mode if > 0"); 56 #endif 57 58 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL 59 60 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ) 61 #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ) 62 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ) 63 #define IPOIB_CM_RX_UPDATE_MASK (0x3) 64 65 static struct ib_qp_attr ipoib_cm_err_attr = { 66 .qp_state = IB_QPS_ERR 67 }; 68 69 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff 70 71 static struct ib_send_wr ipoib_cm_rx_drain_wr = { 72 .wr_id = IPOIB_CM_RX_DRAIN_WRID, 73 .opcode = IB_WR_SEND, 74 }; 75 76 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 77 struct ib_cm_event *event); 78 79 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags, 80 u64 mapping[IPOIB_CM_RX_SG]) 81 { 82 int i; 83 84 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); 85 86 for (i = 0; i < frags; ++i) 87 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); 88 } 89 90 static int ipoib_cm_post_receive_srq(struct net_device *dev, int id) 91 { 92 struct ipoib_dev_priv *priv = netdev_priv(dev); 93 struct ib_recv_wr *bad_wr; 94 int i, ret; 95 96 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; 97 98 for (i = 0; i < priv->cm.num_frags; ++i) 99 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i]; 100 101 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); 102 if (unlikely(ret)) { 103 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); 104 ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1, 105 priv->cm.srq_ring[id].mapping); 106 dev_kfree_skb_any(priv->cm.srq_ring[id].skb); 107 priv->cm.srq_ring[id].skb = NULL; 108 } 109 110 return ret; 111 } 112 113 static int ipoib_cm_post_receive_nonsrq(struct net_device *dev, 114 struct ipoib_cm_rx *rx, 115 struct ib_recv_wr *wr, 116 struct ib_sge *sge, int id) 117 { 118 struct ipoib_dev_priv *priv = netdev_priv(dev); 119 struct ib_recv_wr *bad_wr; 120 int i, ret; 121 122 wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; 123 124 for (i = 0; i < IPOIB_CM_RX_SG; ++i) 125 sge[i].addr = rx->rx_ring[id].mapping[i]; 126 127 ret = ib_post_recv(rx->qp, wr, &bad_wr); 128 if (unlikely(ret)) { 129 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret); 130 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, 131 rx->rx_ring[id].mapping); 132 dev_kfree_skb_any(rx->rx_ring[id].skb); 133 rx->rx_ring[id].skb = NULL; 134 } 135 136 return ret; 137 } 138 139 static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, 140 struct ipoib_cm_rx_buf *rx_ring, 141 int id, int frags, 142 u64 mapping[IPOIB_CM_RX_SG]) 143 { 144 struct ipoib_dev_priv *priv = netdev_priv(dev); 145 struct sk_buff *skb; 146 int i; 147 148 skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12); 149 if (unlikely(!skb)) 150 return NULL; 151 152 /* 153 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the 154 * IP header to a multiple of 16. 155 */ 156 skb_reserve(skb, 12); 157 158 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, 159 DMA_FROM_DEVICE); 160 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { 161 dev_kfree_skb_any(skb); 162 return NULL; 163 } 164 165 for (i = 0; i < frags; i++) { 166 struct page *page = alloc_page(GFP_ATOMIC); 167 168 if (!page) 169 goto partial_error; 170 skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE); 171 172 mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page, 173 0, PAGE_SIZE, DMA_FROM_DEVICE); 174 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1]))) 175 goto partial_error; 176 } 177 178 rx_ring[id].skb = skb; 179 return skb; 180 181 partial_error: 182 183 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); 184 185 for (; i > 0; --i) 186 ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE); 187 188 dev_kfree_skb_any(skb); 189 return NULL; 190 } 191 192 static void ipoib_cm_free_rx_ring(struct net_device *dev, 193 struct ipoib_cm_rx_buf *rx_ring) 194 { 195 struct ipoib_dev_priv *priv = netdev_priv(dev); 196 int i; 197 198 for (i = 0; i < ipoib_recvq_size; ++i) 199 if (rx_ring[i].skb) { 200 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, 201 rx_ring[i].mapping); 202 dev_kfree_skb_any(rx_ring[i].skb); 203 } 204 205 kfree(rx_ring); 206 } 207 208 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv) 209 { 210 struct ib_send_wr *bad_wr; 211 struct ipoib_cm_rx *p; 212 213 /* We only reserved 1 extra slot in CQ for drain WRs, so 214 * make sure we have at most 1 outstanding WR. */ 215 if (list_empty(&priv->cm.rx_flush_list) || 216 !list_empty(&priv->cm.rx_drain_list)) 217 return; 218 219 /* 220 * QPs on flush list are error state. This way, a "flush 221 * error" WC will be immediately generated for each WR we post. 222 */ 223 p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list); 224 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr)) 225 ipoib_warn(priv, "failed to post drain wr\n"); 226 227 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list); 228 } 229 230 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx) 231 { 232 struct ipoib_cm_rx *p = ctx; 233 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 234 unsigned long flags; 235 236 if (event->event != IB_EVENT_QP_LAST_WQE_REACHED) 237 return; 238 239 spin_lock_irqsave(&priv->lock, flags); 240 list_move(&p->list, &priv->cm.rx_flush_list); 241 p->state = IPOIB_CM_RX_FLUSH; 242 ipoib_cm_start_rx_drain(priv); 243 spin_unlock_irqrestore(&priv->lock, flags); 244 } 245 246 static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, 247 struct ipoib_cm_rx *p) 248 { 249 struct ipoib_dev_priv *priv = netdev_priv(dev); 250 struct ib_qp_init_attr attr = { 251 .event_handler = ipoib_cm_rx_event_handler, 252 .send_cq = priv->recv_cq, /* For drain WR */ 253 .recv_cq = priv->recv_cq, 254 .srq = priv->cm.srq, 255 .cap.max_send_wr = 1, /* For drain WR */ 256 .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */ 257 .sq_sig_type = IB_SIGNAL_ALL_WR, 258 .qp_type = IB_QPT_RC, 259 .qp_context = p, 260 }; 261 262 if (!ipoib_cm_has_srq(dev)) { 263 attr.cap.max_recv_wr = ipoib_recvq_size; 264 attr.cap.max_recv_sge = IPOIB_CM_RX_SG; 265 } 266 267 return ib_create_qp(priv->pd, &attr); 268 } 269 270 static int ipoib_cm_modify_rx_qp(struct net_device *dev, 271 struct ib_cm_id *cm_id, struct ib_qp *qp, 272 unsigned psn) 273 { 274 struct ipoib_dev_priv *priv = netdev_priv(dev); 275 struct ib_qp_attr qp_attr; 276 int qp_attr_mask, ret; 277 278 qp_attr.qp_state = IB_QPS_INIT; 279 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 280 if (ret) { 281 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret); 282 return ret; 283 } 284 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 285 if (ret) { 286 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret); 287 return ret; 288 } 289 qp_attr.qp_state = IB_QPS_RTR; 290 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 291 if (ret) { 292 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); 293 return ret; 294 } 295 qp_attr.rq_psn = psn; 296 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 297 if (ret) { 298 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); 299 return ret; 300 } 301 302 /* 303 * Current Mellanox HCA firmware won't generate completions 304 * with error for drain WRs unless the QP has been moved to 305 * RTS first. This work-around leaves a window where a QP has 306 * moved to error asynchronously, but this will eventually get 307 * fixed in firmware, so let's not error out if modify QP 308 * fails. 309 */ 310 qp_attr.qp_state = IB_QPS_RTS; 311 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 312 if (ret) { 313 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); 314 return 0; 315 } 316 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 317 if (ret) { 318 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); 319 return 0; 320 } 321 322 return 0; 323 } 324 325 static void ipoib_cm_init_rx_wr(struct net_device *dev, 326 struct ib_recv_wr *wr, 327 struct ib_sge *sge) 328 { 329 struct ipoib_dev_priv *priv = netdev_priv(dev); 330 int i; 331 332 for (i = 0; i < priv->cm.num_frags; ++i) 333 sge[i].lkey = priv->mr->lkey; 334 335 sge[0].length = IPOIB_CM_HEAD_SIZE; 336 for (i = 1; i < priv->cm.num_frags; ++i) 337 sge[i].length = PAGE_SIZE; 338 339 wr->next = NULL; 340 wr->sg_list = priv->cm.rx_sge; 341 wr->num_sge = priv->cm.num_frags; 342 } 343 344 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id, 345 struct ipoib_cm_rx *rx) 346 { 347 struct ipoib_dev_priv *priv = netdev_priv(dev); 348 struct { 349 struct ib_recv_wr wr; 350 struct ib_sge sge[IPOIB_CM_RX_SG]; 351 } *t; 352 int ret; 353 int i; 354 355 rx->rx_ring = kcalloc(ipoib_recvq_size, sizeof *rx->rx_ring, GFP_KERNEL); 356 if (!rx->rx_ring) 357 return -ENOMEM; 358 359 t = kmalloc(sizeof *t, GFP_KERNEL); 360 if (!t) { 361 ret = -ENOMEM; 362 goto err_free; 363 } 364 365 ipoib_cm_init_rx_wr(dev, &t->wr, t->sge); 366 367 spin_lock_irq(&priv->lock); 368 369 if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) { 370 spin_unlock_irq(&priv->lock); 371 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0); 372 ret = -EINVAL; 373 goto err_free; 374 } else 375 ++priv->cm.nonsrq_conn_qp; 376 377 spin_unlock_irq(&priv->lock); 378 379 for (i = 0; i < ipoib_recvq_size; ++i) { 380 if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1, 381 rx->rx_ring[i].mapping)) { 382 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 383 ret = -ENOMEM; 384 goto err_count; 385 } 386 ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i); 387 if (ret) { 388 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq " 389 "failed for buf %d\n", i); 390 ret = -EIO; 391 goto err_count; 392 } 393 } 394 395 rx->recv_count = ipoib_recvq_size; 396 397 kfree(t); 398 399 return 0; 400 401 err_count: 402 spin_lock_irq(&priv->lock); 403 --priv->cm.nonsrq_conn_qp; 404 spin_unlock_irq(&priv->lock); 405 406 err_free: 407 kfree(t); 408 ipoib_cm_free_rx_ring(dev, rx->rx_ring); 409 410 return ret; 411 } 412 413 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, 414 struct ib_qp *qp, struct ib_cm_req_event_param *req, 415 unsigned psn) 416 { 417 struct ipoib_dev_priv *priv = netdev_priv(dev); 418 struct ipoib_cm_data data = {}; 419 struct ib_cm_rep_param rep = {}; 420 421 data.qpn = cpu_to_be32(priv->qp->qp_num); 422 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE); 423 424 rep.private_data = &data; 425 rep.private_data_len = sizeof data; 426 rep.flow_control = 0; 427 rep.rnr_retry_count = req->rnr_retry_count; 428 rep.srq = ipoib_cm_has_srq(dev); 429 rep.qp_num = qp->qp_num; 430 rep.starting_psn = psn; 431 return ib_send_cm_rep(cm_id, &rep); 432 } 433 434 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 435 { 436 struct net_device *dev = cm_id->context; 437 struct ipoib_dev_priv *priv = netdev_priv(dev); 438 struct ipoib_cm_rx *p; 439 unsigned psn; 440 int ret; 441 442 ipoib_dbg(priv, "REQ arrived\n"); 443 p = kzalloc(sizeof *p, GFP_KERNEL); 444 if (!p) 445 return -ENOMEM; 446 p->dev = dev; 447 p->id = cm_id; 448 cm_id->context = p; 449 p->state = IPOIB_CM_RX_LIVE; 450 p->jiffies = jiffies; 451 INIT_LIST_HEAD(&p->list); 452 453 p->qp = ipoib_cm_create_rx_qp(dev, p); 454 if (IS_ERR(p->qp)) { 455 ret = PTR_ERR(p->qp); 456 goto err_qp; 457 } 458 459 psn = random32() & 0xffffff; 460 ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn); 461 if (ret) 462 goto err_modify; 463 464 if (!ipoib_cm_has_srq(dev)) { 465 ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p); 466 if (ret) 467 goto err_modify; 468 } 469 470 spin_lock_irq(&priv->lock); 471 queue_delayed_work(ipoib_workqueue, 472 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 473 /* Add this entry to passive ids list head, but do not re-add it 474 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ 475 p->jiffies = jiffies; 476 if (p->state == IPOIB_CM_RX_LIVE) 477 list_move(&p->list, &priv->cm.passive_ids); 478 spin_unlock_irq(&priv->lock); 479 480 ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn); 481 if (ret) { 482 ipoib_warn(priv, "failed to send REP: %d\n", ret); 483 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) 484 ipoib_warn(priv, "unable to move qp to error state\n"); 485 } 486 return 0; 487 488 err_modify: 489 ib_destroy_qp(p->qp); 490 err_qp: 491 kfree(p); 492 return ret; 493 } 494 495 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, 496 struct ib_cm_event *event) 497 { 498 struct ipoib_cm_rx *p; 499 struct ipoib_dev_priv *priv; 500 501 switch (event->event) { 502 case IB_CM_REQ_RECEIVED: 503 return ipoib_cm_req_handler(cm_id, event); 504 case IB_CM_DREQ_RECEIVED: 505 p = cm_id->context; 506 ib_send_cm_drep(cm_id, NULL, 0); 507 /* Fall through */ 508 case IB_CM_REJ_RECEIVED: 509 p = cm_id->context; 510 priv = netdev_priv(p->dev); 511 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) 512 ipoib_warn(priv, "unable to move qp to error state\n"); 513 /* Fall through */ 514 default: 515 return 0; 516 } 517 } 518 /* Adjust length of skb with fragments to match received data */ 519 static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, 520 unsigned int length, struct sk_buff *toskb) 521 { 522 int i, num_frags; 523 unsigned int size; 524 525 /* put header into skb */ 526 size = min(length, hdr_space); 527 skb->tail += size; 528 skb->len += size; 529 length -= size; 530 531 num_frags = skb_shinfo(skb)->nr_frags; 532 for (i = 0; i < num_frags; i++) { 533 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 534 535 if (length == 0) { 536 /* don't need this page */ 537 skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE); 538 --skb_shinfo(skb)->nr_frags; 539 } else { 540 size = min(length, (unsigned) PAGE_SIZE); 541 542 frag->size = size; 543 skb->data_len += size; 544 skb->truesize += size; 545 skb->len += size; 546 length -= size; 547 } 548 } 549 } 550 551 void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) 552 { 553 struct ipoib_dev_priv *priv = netdev_priv(dev); 554 struct ipoib_cm_rx_buf *rx_ring; 555 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV); 556 struct sk_buff *skb, *newskb; 557 struct ipoib_cm_rx *p; 558 unsigned long flags; 559 u64 mapping[IPOIB_CM_RX_SG]; 560 int frags; 561 int has_srq; 562 struct sk_buff *small_skb; 563 564 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n", 565 wr_id, wc->status); 566 567 if (unlikely(wr_id >= ipoib_recvq_size)) { 568 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) { 569 spin_lock_irqsave(&priv->lock, flags); 570 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); 571 ipoib_cm_start_rx_drain(priv); 572 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); 573 spin_unlock_irqrestore(&priv->lock, flags); 574 } else 575 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", 576 wr_id, ipoib_recvq_size); 577 return; 578 } 579 580 p = wc->qp->qp_context; 581 582 has_srq = ipoib_cm_has_srq(dev); 583 rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring; 584 585 skb = rx_ring[wr_id].skb; 586 587 if (unlikely(wc->status != IB_WC_SUCCESS)) { 588 ipoib_dbg(priv, "cm recv error " 589 "(status=%d, wrid=%d vend_err %x)\n", 590 wc->status, wr_id, wc->vendor_err); 591 ++dev->stats.rx_dropped; 592 if (has_srq) 593 goto repost; 594 else { 595 if (!--p->recv_count) { 596 spin_lock_irqsave(&priv->lock, flags); 597 list_move(&p->list, &priv->cm.rx_reap_list); 598 spin_unlock_irqrestore(&priv->lock, flags); 599 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); 600 } 601 return; 602 } 603 } 604 605 if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) { 606 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { 607 spin_lock_irqsave(&priv->lock, flags); 608 p->jiffies = jiffies; 609 /* Move this entry to list head, but do not re-add it 610 * if it has been moved out of list. */ 611 if (p->state == IPOIB_CM_RX_LIVE) 612 list_move(&p->list, &priv->cm.passive_ids); 613 spin_unlock_irqrestore(&priv->lock, flags); 614 } 615 } 616 617 if (wc->byte_len < IPOIB_CM_COPYBREAK) { 618 int dlen = wc->byte_len; 619 620 small_skb = dev_alloc_skb(dlen + 12); 621 if (small_skb) { 622 skb_reserve(small_skb, 12); 623 ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0], 624 dlen, DMA_FROM_DEVICE); 625 skb_copy_from_linear_data(skb, small_skb->data, dlen); 626 ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0], 627 dlen, DMA_FROM_DEVICE); 628 skb_put(small_skb, dlen); 629 skb = small_skb; 630 goto copied; 631 } 632 } 633 634 frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len, 635 (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE; 636 637 newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping); 638 if (unlikely(!newskb)) { 639 /* 640 * If we can't allocate a new RX buffer, dump 641 * this packet and reuse the old buffer. 642 */ 643 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); 644 ++dev->stats.rx_dropped; 645 goto repost; 646 } 647 648 ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping); 649 memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping); 650 651 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 652 wc->byte_len, wc->slid); 653 654 skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); 655 656 copied: 657 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 658 skb_reset_mac_header(skb); 659 skb_pull(skb, IPOIB_ENCAP_LEN); 660 661 dev->last_rx = jiffies; 662 ++dev->stats.rx_packets; 663 dev->stats.rx_bytes += skb->len; 664 665 skb->dev = dev; 666 /* XXX get correct PACKET_ type here */ 667 skb->pkt_type = PACKET_HOST; 668 netif_receive_skb(skb); 669 670 repost: 671 if (has_srq) { 672 if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id))) 673 ipoib_warn(priv, "ipoib_cm_post_receive_srq failed " 674 "for buf %d\n", wr_id); 675 } else { 676 if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p, 677 &priv->cm.rx_wr, 678 priv->cm.rx_sge, 679 wr_id))) { 680 --p->recv_count; 681 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed " 682 "for buf %d\n", wr_id); 683 } 684 } 685 } 686 687 static inline int post_send(struct ipoib_dev_priv *priv, 688 struct ipoib_cm_tx *tx, 689 unsigned int wr_id, 690 u64 addr, int len) 691 { 692 struct ib_send_wr *bad_wr; 693 694 priv->tx_sge[0].addr = addr; 695 priv->tx_sge[0].length = len; 696 697 priv->tx_wr.num_sge = 1; 698 priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; 699 700 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); 701 } 702 703 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) 704 { 705 struct ipoib_dev_priv *priv = netdev_priv(dev); 706 struct ipoib_cm_tx_buf *tx_req; 707 u64 addr; 708 709 if (unlikely(skb->len > tx->mtu)) { 710 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", 711 skb->len, tx->mtu); 712 ++dev->stats.tx_dropped; 713 ++dev->stats.tx_errors; 714 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN); 715 return; 716 } 717 718 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n", 719 tx->tx_head, skb->len, tx->qp->qp_num); 720 721 /* 722 * We put the skb into the tx_ring _before_ we call post_send() 723 * because it's entirely possible that the completion handler will 724 * run before we execute anything after the post_send(). That 725 * means we have to make sure everything is properly recorded and 726 * our state is consistent before we call post_send(). 727 */ 728 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; 729 tx_req->skb = skb; 730 addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); 731 if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 732 ++dev->stats.tx_errors; 733 dev_kfree_skb_any(skb); 734 return; 735 } 736 737 tx_req->mapping = addr; 738 739 if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), 740 addr, skb->len))) { 741 ipoib_warn(priv, "post_send failed\n"); 742 ++dev->stats.tx_errors; 743 ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); 744 dev_kfree_skb_any(skb); 745 } else { 746 dev->trans_start = jiffies; 747 ++tx->tx_head; 748 749 if (++priv->tx_outstanding == ipoib_sendq_size) { 750 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", 751 tx->qp->qp_num); 752 netif_stop_queue(dev); 753 } 754 } 755 } 756 757 void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) 758 { 759 struct ipoib_dev_priv *priv = netdev_priv(dev); 760 struct ipoib_cm_tx *tx = wc->qp->qp_context; 761 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; 762 struct ipoib_cm_tx_buf *tx_req; 763 unsigned long flags; 764 765 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", 766 wr_id, wc->status); 767 768 if (unlikely(wr_id >= ipoib_sendq_size)) { 769 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n", 770 wr_id, ipoib_sendq_size); 771 return; 772 } 773 774 tx_req = &tx->tx_ring[wr_id]; 775 776 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); 777 778 /* FIXME: is this right? Shouldn't we only increment on success? */ 779 ++dev->stats.tx_packets; 780 dev->stats.tx_bytes += tx_req->skb->len; 781 782 dev_kfree_skb_any(tx_req->skb); 783 784 spin_lock_irqsave(&priv->tx_lock, flags); 785 ++tx->tx_tail; 786 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 787 netif_queue_stopped(dev) && 788 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 789 netif_wake_queue(dev); 790 791 if (wc->status != IB_WC_SUCCESS && 792 wc->status != IB_WC_WR_FLUSH_ERR) { 793 struct ipoib_neigh *neigh; 794 795 ipoib_dbg(priv, "failed cm send event " 796 "(status=%d, wrid=%d vend_err %x)\n", 797 wc->status, wr_id, wc->vendor_err); 798 799 spin_lock(&priv->lock); 800 neigh = tx->neigh; 801 802 if (neigh) { 803 neigh->cm = NULL; 804 list_del(&neigh->list); 805 if (neigh->ah) 806 ipoib_put_ah(neigh->ah); 807 ipoib_neigh_free(dev, neigh); 808 809 tx->neigh = NULL; 810 } 811 812 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 813 list_move(&tx->list, &priv->cm.reap_list); 814 queue_work(ipoib_workqueue, &priv->cm.reap_task); 815 } 816 817 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); 818 819 spin_unlock(&priv->lock); 820 } 821 822 spin_unlock_irqrestore(&priv->tx_lock, flags); 823 } 824 825 int ipoib_cm_dev_open(struct net_device *dev) 826 { 827 struct ipoib_dev_priv *priv = netdev_priv(dev); 828 int ret; 829 830 if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) 831 return 0; 832 833 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev); 834 if (IS_ERR(priv->cm.id)) { 835 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name); 836 ret = PTR_ERR(priv->cm.id); 837 goto err_cm; 838 } 839 840 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), 841 0, NULL); 842 if (ret) { 843 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name, 844 IPOIB_CM_IETF_ID | priv->qp->qp_num); 845 goto err_listen; 846 } 847 848 return 0; 849 850 err_listen: 851 ib_destroy_cm_id(priv->cm.id); 852 err_cm: 853 priv->cm.id = NULL; 854 return ret; 855 } 856 857 static void ipoib_cm_free_rx_reap_list(struct net_device *dev) 858 { 859 struct ipoib_dev_priv *priv = netdev_priv(dev); 860 struct ipoib_cm_rx *rx, *n; 861 LIST_HEAD(list); 862 863 spin_lock_irq(&priv->lock); 864 list_splice_init(&priv->cm.rx_reap_list, &list); 865 spin_unlock_irq(&priv->lock); 866 867 list_for_each_entry_safe(rx, n, &list, list) { 868 ib_destroy_cm_id(rx->id); 869 ib_destroy_qp(rx->qp); 870 if (!ipoib_cm_has_srq(dev)) { 871 ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring); 872 spin_lock_irq(&priv->lock); 873 --priv->cm.nonsrq_conn_qp; 874 spin_unlock_irq(&priv->lock); 875 } 876 kfree(rx); 877 } 878 } 879 880 void ipoib_cm_dev_stop(struct net_device *dev) 881 { 882 struct ipoib_dev_priv *priv = netdev_priv(dev); 883 struct ipoib_cm_rx *p; 884 unsigned long begin; 885 int ret; 886 887 if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id) 888 return; 889 890 ib_destroy_cm_id(priv->cm.id); 891 priv->cm.id = NULL; 892 893 spin_lock_irq(&priv->lock); 894 while (!list_empty(&priv->cm.passive_ids)) { 895 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); 896 list_move(&p->list, &priv->cm.rx_error_list); 897 p->state = IPOIB_CM_RX_ERROR; 898 spin_unlock_irq(&priv->lock); 899 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); 900 if (ret) 901 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); 902 spin_lock_irq(&priv->lock); 903 } 904 905 /* Wait for all RX to be drained */ 906 begin = jiffies; 907 908 while (!list_empty(&priv->cm.rx_error_list) || 909 !list_empty(&priv->cm.rx_flush_list) || 910 !list_empty(&priv->cm.rx_drain_list)) { 911 if (time_after(jiffies, begin + 5 * HZ)) { 912 ipoib_warn(priv, "RX drain timing out\n"); 913 914 /* 915 * assume the HW is wedged and just free up everything. 916 */ 917 list_splice_init(&priv->cm.rx_flush_list, 918 &priv->cm.rx_reap_list); 919 list_splice_init(&priv->cm.rx_error_list, 920 &priv->cm.rx_reap_list); 921 list_splice_init(&priv->cm.rx_drain_list, 922 &priv->cm.rx_reap_list); 923 break; 924 } 925 spin_unlock_irq(&priv->lock); 926 msleep(1); 927 ipoib_drain_cq(dev); 928 spin_lock_irq(&priv->lock); 929 } 930 931 spin_unlock_irq(&priv->lock); 932 933 ipoib_cm_free_rx_reap_list(dev); 934 935 cancel_delayed_work(&priv->cm.stale_task); 936 } 937 938 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) 939 { 940 struct ipoib_cm_tx *p = cm_id->context; 941 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 942 struct ipoib_cm_data *data = event->private_data; 943 struct sk_buff_head skqueue; 944 struct ib_qp_attr qp_attr; 945 int qp_attr_mask, ret; 946 struct sk_buff *skb; 947 948 p->mtu = be32_to_cpu(data->mtu); 949 950 if (p->mtu <= IPOIB_ENCAP_LEN) { 951 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n", 952 p->mtu, IPOIB_ENCAP_LEN); 953 return -EINVAL; 954 } 955 956 qp_attr.qp_state = IB_QPS_RTR; 957 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 958 if (ret) { 959 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); 960 return ret; 961 } 962 963 qp_attr.rq_psn = 0 /* FIXME */; 964 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); 965 if (ret) { 966 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); 967 return ret; 968 } 969 970 qp_attr.qp_state = IB_QPS_RTS; 971 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); 972 if (ret) { 973 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); 974 return ret; 975 } 976 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); 977 if (ret) { 978 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); 979 return ret; 980 } 981 982 skb_queue_head_init(&skqueue); 983 984 spin_lock_irq(&priv->lock); 985 set_bit(IPOIB_FLAG_OPER_UP, &p->flags); 986 if (p->neigh) 987 while ((skb = __skb_dequeue(&p->neigh->queue))) 988 __skb_queue_tail(&skqueue, skb); 989 spin_unlock_irq(&priv->lock); 990 991 while ((skb = __skb_dequeue(&skqueue))) { 992 skb->dev = p->dev; 993 if (dev_queue_xmit(skb)) 994 ipoib_warn(priv, "dev_queue_xmit failed " 995 "to requeue packet\n"); 996 } 997 998 ret = ib_send_cm_rtu(cm_id, NULL, 0); 999 if (ret) { 1000 ipoib_warn(priv, "failed to send RTU: %d\n", ret); 1001 return ret; 1002 } 1003 return 0; 1004 } 1005 1006 static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx) 1007 { 1008 struct ipoib_dev_priv *priv = netdev_priv(dev); 1009 struct ib_qp_init_attr attr = { 1010 .send_cq = priv->recv_cq, 1011 .recv_cq = priv->recv_cq, 1012 .srq = priv->cm.srq, 1013 .cap.max_send_wr = ipoib_sendq_size, 1014 .cap.max_send_sge = 1, 1015 .sq_sig_type = IB_SIGNAL_ALL_WR, 1016 .qp_type = IB_QPT_RC, 1017 .qp_context = tx 1018 }; 1019 1020 return ib_create_qp(priv->pd, &attr); 1021 } 1022 1023 static int ipoib_cm_send_req(struct net_device *dev, 1024 struct ib_cm_id *id, struct ib_qp *qp, 1025 u32 qpn, 1026 struct ib_sa_path_rec *pathrec) 1027 { 1028 struct ipoib_dev_priv *priv = netdev_priv(dev); 1029 struct ipoib_cm_data data = {}; 1030 struct ib_cm_req_param req = {}; 1031 1032 data.qpn = cpu_to_be32(priv->qp->qp_num); 1033 data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE); 1034 1035 req.primary_path = pathrec; 1036 req.alternate_path = NULL; 1037 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn); 1038 req.qp_num = qp->qp_num; 1039 req.qp_type = qp->qp_type; 1040 req.private_data = &data; 1041 req.private_data_len = sizeof data; 1042 req.flow_control = 0; 1043 1044 req.starting_psn = 0; /* FIXME */ 1045 1046 /* 1047 * Pick some arbitrary defaults here; we could make these 1048 * module parameters if anyone cared about setting them. 1049 */ 1050 req.responder_resources = 4; 1051 req.remote_cm_response_timeout = 20; 1052 req.local_cm_response_timeout = 20; 1053 req.retry_count = 0; /* RFC draft warns against retries */ 1054 req.rnr_retry_count = 0; /* RFC draft warns against retries */ 1055 req.max_cm_retries = 15; 1056 req.srq = ipoib_cm_has_srq(dev); 1057 return ib_send_cm_req(id, &req); 1058 } 1059 1060 static int ipoib_cm_modify_tx_init(struct net_device *dev, 1061 struct ib_cm_id *cm_id, struct ib_qp *qp) 1062 { 1063 struct ipoib_dev_priv *priv = netdev_priv(dev); 1064 struct ib_qp_attr qp_attr; 1065 int qp_attr_mask, ret; 1066 ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index); 1067 if (ret) { 1068 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret); 1069 return ret; 1070 } 1071 1072 qp_attr.qp_state = IB_QPS_INIT; 1073 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; 1074 qp_attr.port_num = priv->port; 1075 qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; 1076 1077 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); 1078 if (ret) { 1079 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret); 1080 return ret; 1081 } 1082 return 0; 1083 } 1084 1085 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, 1086 struct ib_sa_path_rec *pathrec) 1087 { 1088 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 1089 int ret; 1090 1091 p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring); 1092 if (!p->tx_ring) { 1093 ipoib_warn(priv, "failed to allocate tx ring\n"); 1094 ret = -ENOMEM; 1095 goto err_tx; 1096 } 1097 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring); 1098 1099 p->qp = ipoib_cm_create_tx_qp(p->dev, p); 1100 if (IS_ERR(p->qp)) { 1101 ret = PTR_ERR(p->qp); 1102 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); 1103 goto err_qp; 1104 } 1105 1106 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p); 1107 if (IS_ERR(p->id)) { 1108 ret = PTR_ERR(p->id); 1109 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret); 1110 goto err_id; 1111 } 1112 1113 ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp); 1114 if (ret) { 1115 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret); 1116 goto err_modify; 1117 } 1118 1119 ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec); 1120 if (ret) { 1121 ipoib_warn(priv, "failed to send cm req: %d\n", ret); 1122 goto err_send_cm; 1123 } 1124 1125 ipoib_dbg(priv, "Request connection 0x%x for gid " IPOIB_GID_FMT " qpn 0x%x\n", 1126 p->qp->qp_num, IPOIB_GID_ARG(pathrec->dgid), qpn); 1127 1128 return 0; 1129 1130 err_send_cm: 1131 err_modify: 1132 ib_destroy_cm_id(p->id); 1133 err_id: 1134 p->id = NULL; 1135 ib_destroy_qp(p->qp); 1136 err_qp: 1137 p->qp = NULL; 1138 vfree(p->tx_ring); 1139 err_tx: 1140 return ret; 1141 } 1142 1143 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) 1144 { 1145 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 1146 struct ipoib_cm_tx_buf *tx_req; 1147 unsigned long flags; 1148 unsigned long begin; 1149 1150 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", 1151 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail); 1152 1153 if (p->id) 1154 ib_destroy_cm_id(p->id); 1155 1156 if (p->tx_ring) { 1157 /* Wait for all sends to complete */ 1158 begin = jiffies; 1159 while ((int) p->tx_tail - (int) p->tx_head < 0) { 1160 if (time_after(jiffies, begin + 5 * HZ)) { 1161 ipoib_warn(priv, "timing out; %d sends not completed\n", 1162 p->tx_head - p->tx_tail); 1163 goto timeout; 1164 } 1165 1166 msleep(1); 1167 } 1168 } 1169 1170 timeout: 1171 1172 while ((int) p->tx_tail - (int) p->tx_head < 0) { 1173 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; 1174 ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, 1175 DMA_TO_DEVICE); 1176 dev_kfree_skb_any(tx_req->skb); 1177 ++p->tx_tail; 1178 spin_lock_irqsave(&priv->tx_lock, flags); 1179 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 1180 netif_queue_stopped(p->dev) && 1181 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 1182 netif_wake_queue(p->dev); 1183 spin_unlock_irqrestore(&priv->tx_lock, flags); 1184 } 1185 1186 if (p->qp) 1187 ib_destroy_qp(p->qp); 1188 1189 vfree(p->tx_ring); 1190 kfree(p); 1191 } 1192 1193 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 1194 struct ib_cm_event *event) 1195 { 1196 struct ipoib_cm_tx *tx = cm_id->context; 1197 struct ipoib_dev_priv *priv = netdev_priv(tx->dev); 1198 struct net_device *dev = priv->dev; 1199 struct ipoib_neigh *neigh; 1200 int ret; 1201 1202 switch (event->event) { 1203 case IB_CM_DREQ_RECEIVED: 1204 ipoib_dbg(priv, "DREQ received.\n"); 1205 ib_send_cm_drep(cm_id, NULL, 0); 1206 break; 1207 case IB_CM_REP_RECEIVED: 1208 ipoib_dbg(priv, "REP received.\n"); 1209 ret = ipoib_cm_rep_handler(cm_id, event); 1210 if (ret) 1211 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, 1212 NULL, 0, NULL, 0); 1213 break; 1214 case IB_CM_REQ_ERROR: 1215 case IB_CM_REJ_RECEIVED: 1216 case IB_CM_TIMEWAIT_EXIT: 1217 ipoib_dbg(priv, "CM error %d.\n", event->event); 1218 spin_lock_irq(&priv->tx_lock); 1219 spin_lock(&priv->lock); 1220 neigh = tx->neigh; 1221 1222 if (neigh) { 1223 neigh->cm = NULL; 1224 list_del(&neigh->list); 1225 if (neigh->ah) 1226 ipoib_put_ah(neigh->ah); 1227 ipoib_neigh_free(dev, neigh); 1228 1229 tx->neigh = NULL; 1230 } 1231 1232 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1233 list_move(&tx->list, &priv->cm.reap_list); 1234 queue_work(ipoib_workqueue, &priv->cm.reap_task); 1235 } 1236 1237 spin_unlock(&priv->lock); 1238 spin_unlock_irq(&priv->tx_lock); 1239 break; 1240 default: 1241 break; 1242 } 1243 1244 return 0; 1245 } 1246 1247 struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path, 1248 struct ipoib_neigh *neigh) 1249 { 1250 struct ipoib_dev_priv *priv = netdev_priv(dev); 1251 struct ipoib_cm_tx *tx; 1252 1253 tx = kzalloc(sizeof *tx, GFP_ATOMIC); 1254 if (!tx) 1255 return NULL; 1256 1257 neigh->cm = tx; 1258 tx->neigh = neigh; 1259 tx->path = path; 1260 tx->dev = dev; 1261 list_add(&tx->list, &priv->cm.start_list); 1262 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); 1263 queue_work(ipoib_workqueue, &priv->cm.start_task); 1264 return tx; 1265 } 1266 1267 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) 1268 { 1269 struct ipoib_dev_priv *priv = netdev_priv(tx->dev); 1270 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { 1271 list_move(&tx->list, &priv->cm.reap_list); 1272 queue_work(ipoib_workqueue, &priv->cm.reap_task); 1273 ipoib_dbg(priv, "Reap connection for gid " IPOIB_GID_FMT "\n", 1274 IPOIB_GID_ARG(tx->neigh->dgid)); 1275 tx->neigh = NULL; 1276 } 1277 } 1278 1279 static void ipoib_cm_tx_start(struct work_struct *work) 1280 { 1281 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1282 cm.start_task); 1283 struct net_device *dev = priv->dev; 1284 struct ipoib_neigh *neigh; 1285 struct ipoib_cm_tx *p; 1286 unsigned long flags; 1287 int ret; 1288 1289 struct ib_sa_path_rec pathrec; 1290 u32 qpn; 1291 1292 spin_lock_irqsave(&priv->tx_lock, flags); 1293 spin_lock(&priv->lock); 1294 while (!list_empty(&priv->cm.start_list)) { 1295 p = list_entry(priv->cm.start_list.next, typeof(*p), list); 1296 list_del_init(&p->list); 1297 neigh = p->neigh; 1298 qpn = IPOIB_QPN(neigh->neighbour->ha); 1299 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); 1300 spin_unlock(&priv->lock); 1301 spin_unlock_irqrestore(&priv->tx_lock, flags); 1302 ret = ipoib_cm_tx_init(p, qpn, &pathrec); 1303 spin_lock_irqsave(&priv->tx_lock, flags); 1304 spin_lock(&priv->lock); 1305 if (ret) { 1306 neigh = p->neigh; 1307 if (neigh) { 1308 neigh->cm = NULL; 1309 list_del(&neigh->list); 1310 if (neigh->ah) 1311 ipoib_put_ah(neigh->ah); 1312 ipoib_neigh_free(dev, neigh); 1313 } 1314 list_del(&p->list); 1315 kfree(p); 1316 } 1317 } 1318 spin_unlock(&priv->lock); 1319 spin_unlock_irqrestore(&priv->tx_lock, flags); 1320 } 1321 1322 static void ipoib_cm_tx_reap(struct work_struct *work) 1323 { 1324 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1325 cm.reap_task); 1326 struct ipoib_cm_tx *p; 1327 1328 spin_lock_irq(&priv->tx_lock); 1329 spin_lock(&priv->lock); 1330 while (!list_empty(&priv->cm.reap_list)) { 1331 p = list_entry(priv->cm.reap_list.next, typeof(*p), list); 1332 list_del(&p->list); 1333 spin_unlock(&priv->lock); 1334 spin_unlock_irq(&priv->tx_lock); 1335 ipoib_cm_tx_destroy(p); 1336 spin_lock_irq(&priv->tx_lock); 1337 spin_lock(&priv->lock); 1338 } 1339 spin_unlock(&priv->lock); 1340 spin_unlock_irq(&priv->tx_lock); 1341 } 1342 1343 static void ipoib_cm_skb_reap(struct work_struct *work) 1344 { 1345 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1346 cm.skb_task); 1347 struct sk_buff *skb; 1348 1349 unsigned mtu = priv->mcast_mtu; 1350 1351 spin_lock_irq(&priv->tx_lock); 1352 spin_lock(&priv->lock); 1353 while ((skb = skb_dequeue(&priv->cm.skb_queue))) { 1354 spin_unlock(&priv->lock); 1355 spin_unlock_irq(&priv->tx_lock); 1356 if (skb->protocol == htons(ETH_P_IP)) 1357 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 1358 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1359 else if (skb->protocol == htons(ETH_P_IPV6)) 1360 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev); 1361 #endif 1362 dev_kfree_skb_any(skb); 1363 spin_lock_irq(&priv->tx_lock); 1364 spin_lock(&priv->lock); 1365 } 1366 spin_unlock(&priv->lock); 1367 spin_unlock_irq(&priv->tx_lock); 1368 } 1369 1370 void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, 1371 unsigned int mtu) 1372 { 1373 struct ipoib_dev_priv *priv = netdev_priv(dev); 1374 int e = skb_queue_empty(&priv->cm.skb_queue); 1375 1376 if (skb->dst) 1377 skb->dst->ops->update_pmtu(skb->dst, mtu); 1378 1379 skb_queue_tail(&priv->cm.skb_queue, skb); 1380 if (e) 1381 queue_work(ipoib_workqueue, &priv->cm.skb_task); 1382 } 1383 1384 static void ipoib_cm_rx_reap(struct work_struct *work) 1385 { 1386 ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv, 1387 cm.rx_reap_task)->dev); 1388 } 1389 1390 static void ipoib_cm_stale_task(struct work_struct *work) 1391 { 1392 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1393 cm.stale_task.work); 1394 struct ipoib_cm_rx *p; 1395 int ret; 1396 1397 spin_lock_irq(&priv->lock); 1398 while (!list_empty(&priv->cm.passive_ids)) { 1399 /* List is sorted by LRU, start from tail, 1400 * stop when we see a recently used entry */ 1401 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list); 1402 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) 1403 break; 1404 list_move(&p->list, &priv->cm.rx_error_list); 1405 p->state = IPOIB_CM_RX_ERROR; 1406 spin_unlock_irq(&priv->lock); 1407 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); 1408 if (ret) 1409 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); 1410 spin_lock_irq(&priv->lock); 1411 } 1412 1413 if (!list_empty(&priv->cm.passive_ids)) 1414 queue_delayed_work(ipoib_workqueue, 1415 &priv->cm.stale_task, IPOIB_CM_RX_DELAY); 1416 spin_unlock_irq(&priv->lock); 1417 } 1418 1419 1420 static ssize_t show_mode(struct device *d, struct device_attribute *attr, 1421 char *buf) 1422 { 1423 struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d)); 1424 1425 if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) 1426 return sprintf(buf, "connected\n"); 1427 else 1428 return sprintf(buf, "datagram\n"); 1429 } 1430 1431 static ssize_t set_mode(struct device *d, struct device_attribute *attr, 1432 const char *buf, size_t count) 1433 { 1434 struct net_device *dev = to_net_dev(d); 1435 struct ipoib_dev_priv *priv = netdev_priv(dev); 1436 1437 /* flush paths if we switch modes so that connections are restarted */ 1438 if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { 1439 set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 1440 ipoib_warn(priv, "enabling connected mode " 1441 "will cause multicast packet drops\n"); 1442 1443 rtnl_lock(); 1444 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO); 1445 rtnl_unlock(); 1446 priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; 1447 1448 ipoib_flush_paths(dev); 1449 return count; 1450 } 1451 1452 if (!strcmp(buf, "datagram\n")) { 1453 clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); 1454 1455 rtnl_lock(); 1456 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) { 1457 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 1458 if (priv->hca_caps & IB_DEVICE_UD_TSO) 1459 dev->features |= NETIF_F_TSO; 1460 } 1461 dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu)); 1462 rtnl_unlock(); 1463 ipoib_flush_paths(dev); 1464 1465 return count; 1466 } 1467 1468 return -EINVAL; 1469 } 1470 1471 static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode); 1472 1473 int ipoib_cm_add_mode_attr(struct net_device *dev) 1474 { 1475 return device_create_file(&dev->dev, &dev_attr_mode); 1476 } 1477 1478 static void ipoib_cm_create_srq(struct net_device *dev, int max_sge) 1479 { 1480 struct ipoib_dev_priv *priv = netdev_priv(dev); 1481 struct ib_srq_init_attr srq_init_attr = { 1482 .attr = { 1483 .max_wr = ipoib_recvq_size, 1484 .max_sge = max_sge 1485 } 1486 }; 1487 1488 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr); 1489 if (IS_ERR(priv->cm.srq)) { 1490 if (PTR_ERR(priv->cm.srq) != -ENOSYS) 1491 printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n", 1492 priv->ca->name, PTR_ERR(priv->cm.srq)); 1493 priv->cm.srq = NULL; 1494 return; 1495 } 1496 1497 priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring, 1498 GFP_KERNEL); 1499 if (!priv->cm.srq_ring) { 1500 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n", 1501 priv->ca->name, ipoib_recvq_size); 1502 ib_destroy_srq(priv->cm.srq); 1503 priv->cm.srq = NULL; 1504 } 1505 } 1506 1507 int ipoib_cm_dev_init(struct net_device *dev) 1508 { 1509 struct ipoib_dev_priv *priv = netdev_priv(dev); 1510 int i, ret; 1511 struct ib_device_attr attr; 1512 1513 INIT_LIST_HEAD(&priv->cm.passive_ids); 1514 INIT_LIST_HEAD(&priv->cm.reap_list); 1515 INIT_LIST_HEAD(&priv->cm.start_list); 1516 INIT_LIST_HEAD(&priv->cm.rx_error_list); 1517 INIT_LIST_HEAD(&priv->cm.rx_flush_list); 1518 INIT_LIST_HEAD(&priv->cm.rx_drain_list); 1519 INIT_LIST_HEAD(&priv->cm.rx_reap_list); 1520 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start); 1521 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap); 1522 INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap); 1523 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap); 1524 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task); 1525 1526 skb_queue_head_init(&priv->cm.skb_queue); 1527 1528 ret = ib_query_device(priv->ca, &attr); 1529 if (ret) { 1530 printk(KERN_WARNING "ib_query_device() failed with %d\n", ret); 1531 return ret; 1532 } 1533 1534 ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge); 1535 1536 attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge); 1537 ipoib_cm_create_srq(dev, attr.max_srq_sge); 1538 if (ipoib_cm_has_srq(dev)) { 1539 priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10; 1540 priv->cm.num_frags = attr.max_srq_sge; 1541 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n", 1542 priv->cm.max_cm_mtu, priv->cm.num_frags); 1543 } else { 1544 priv->cm.max_cm_mtu = IPOIB_CM_MTU; 1545 priv->cm.num_frags = IPOIB_CM_RX_SG; 1546 } 1547 1548 ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge); 1549 1550 if (ipoib_cm_has_srq(dev)) { 1551 for (i = 0; i < ipoib_recvq_size; ++i) { 1552 if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i, 1553 priv->cm.num_frags - 1, 1554 priv->cm.srq_ring[i].mapping)) { 1555 ipoib_warn(priv, "failed to allocate " 1556 "receive buffer %d\n", i); 1557 ipoib_cm_dev_cleanup(dev); 1558 return -ENOMEM; 1559 } 1560 1561 if (ipoib_cm_post_receive_srq(dev, i)) { 1562 ipoib_warn(priv, "ipoib_cm_post_receive_srq " 1563 "failed for buf %d\n", i); 1564 ipoib_cm_dev_cleanup(dev); 1565 return -EIO; 1566 } 1567 } 1568 } 1569 1570 priv->dev->dev_addr[0] = IPOIB_FLAGS_RC; 1571 return 0; 1572 } 1573 1574 void ipoib_cm_dev_cleanup(struct net_device *dev) 1575 { 1576 struct ipoib_dev_priv *priv = netdev_priv(dev); 1577 int ret; 1578 1579 if (!priv->cm.srq) 1580 return; 1581 1582 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n"); 1583 1584 ret = ib_destroy_srq(priv->cm.srq); 1585 if (ret) 1586 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret); 1587 1588 priv->cm.srq = NULL; 1589 if (!priv->cm.srq_ring) 1590 return; 1591 1592 ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring); 1593 priv->cm.srq_ring = NULL; 1594 } 1595