1 /* QLogic qedr NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <net/ip.h> 33 #include <net/ipv6.h> 34 #include <net/udp.h> 35 #include <net/addrconf.h> 36 #include <net/route.h> 37 #include <net/ip6_route.h> 38 #include <net/flow.h> 39 #include "qedr.h" 40 #include "qedr_iw_cm.h" 41 42 static inline void 43 qedr_fill_sockaddr4(const struct qed_iwarp_cm_info *cm_info, 44 struct iw_cm_event *event) 45 { 46 struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr; 47 struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr; 48 49 laddr->sin_family = AF_INET; 50 raddr->sin_family = AF_INET; 51 52 laddr->sin_port = htons(cm_info->local_port); 53 raddr->sin_port = htons(cm_info->remote_port); 54 55 laddr->sin_addr.s_addr = htonl(cm_info->local_ip[0]); 56 raddr->sin_addr.s_addr = htonl(cm_info->remote_ip[0]); 57 } 58 59 static inline void 60 qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info, 61 struct iw_cm_event *event) 62 { 63 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr; 64 struct sockaddr_in6 *raddr6 = 65 (struct sockaddr_in6 *)&event->remote_addr; 66 int i; 67 68 laddr6->sin6_family = AF_INET6; 69 raddr6->sin6_family = AF_INET6; 70 71 laddr6->sin6_port = htons(cm_info->local_port); 72 raddr6->sin6_port = htons(cm_info->remote_port); 73 74 for (i = 0; i < 4; i++) { 75 laddr6->sin6_addr.in6_u.u6_addr32[i] = 76 htonl(cm_info->local_ip[i]); 77 raddr6->sin6_addr.in6_u.u6_addr32[i] = 78 htonl(cm_info->remote_ip[i]); 79 } 80 } 81 82 static void 83 qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params) 84 { 85 struct qedr_iw_listener *listener = (struct qedr_iw_listener *)context; 86 struct qedr_dev *dev = listener->dev; 87 struct iw_cm_event event; 88 struct qedr_iw_ep *ep; 89 90 ep = kzalloc(sizeof(*ep), GFP_ATOMIC); 91 if (!ep) 92 return; 93 94 ep->dev = dev; 95 ep->qed_context = params->ep_context; 96 97 memset(&event, 0, sizeof(event)); 98 event.event = IW_CM_EVENT_CONNECT_REQUEST; 99 event.status = params->status; 100 101 if (!IS_ENABLED(CONFIG_IPV6) || 102 params->cm_info->ip_version == QED_TCP_IPV4) 103 qedr_fill_sockaddr4(params->cm_info, &event); 104 else 105 qedr_fill_sockaddr6(params->cm_info, &event); 106 107 event.provider_data = (void *)ep; 108 event.private_data = (void *)params->cm_info->private_data; 109 event.private_data_len = (u8)params->cm_info->private_data_len; 110 event.ord = params->cm_info->ord; 111 event.ird = params->cm_info->ird; 112 113 listener->cm_id->event_handler(listener->cm_id, &event); 114 } 115 116 static void 117 qedr_iw_issue_event(void *context, 118 struct qed_iwarp_cm_event_params *params, 119 enum iw_cm_event_type event_type) 120 { 121 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 122 struct iw_cm_event event; 123 124 memset(&event, 0, sizeof(event)); 125 event.status = params->status; 126 event.event = event_type; 127 128 if (params->cm_info) { 129 event.ird = params->cm_info->ird; 130 event.ord = params->cm_info->ord; 131 event.private_data_len = params->cm_info->private_data_len; 132 event.private_data = (void *)params->cm_info->private_data; 133 } 134 135 if (ep->cm_id) 136 ep->cm_id->event_handler(ep->cm_id, &event); 137 } 138 139 static void 140 qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params) 141 { 142 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 143 144 if (ep->cm_id) { 145 qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE); 146 147 ep->cm_id->rem_ref(ep->cm_id); 148 ep->cm_id = NULL; 149 } 150 } 151 152 static void 153 qedr_iw_qp_event(void *context, 154 struct qed_iwarp_cm_event_params *params, 155 enum ib_event_type ib_event, char *str) 156 { 157 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 158 struct qedr_dev *dev = ep->dev; 159 struct ib_qp *ibqp = &ep->qp->ibqp; 160 struct ib_event event; 161 162 DP_NOTICE(dev, "QP error received: %s\n", str); 163 164 if (ibqp->event_handler) { 165 event.event = ib_event; 166 event.device = ibqp->device; 167 event.element.qp = ibqp; 168 ibqp->event_handler(&event, ibqp->qp_context); 169 } 170 } 171 172 struct qedr_discon_work { 173 struct work_struct work; 174 struct qedr_iw_ep *ep; 175 enum qed_iwarp_event_type event; 176 int status; 177 }; 178 179 static void qedr_iw_disconnect_worker(struct work_struct *work) 180 { 181 struct qedr_discon_work *dwork = 182 container_of(work, struct qedr_discon_work, work); 183 struct qed_rdma_modify_qp_in_params qp_params = { 0 }; 184 struct qedr_iw_ep *ep = dwork->ep; 185 struct qedr_dev *dev = ep->dev; 186 struct qedr_qp *qp = ep->qp; 187 struct iw_cm_event event; 188 189 if (qp->destroyed) { 190 kfree(dwork); 191 qedr_iw_qp_rem_ref(&qp->ibqp); 192 return; 193 } 194 195 memset(&event, 0, sizeof(event)); 196 event.status = dwork->status; 197 event.event = IW_CM_EVENT_DISCONNECT; 198 199 /* Success means graceful disconnect was requested. modifying 200 * to SQD is translated to graceful disconnect. O/w reset is sent 201 */ 202 if (dwork->status) 203 qp_params.new_state = QED_ROCE_QP_STATE_ERR; 204 else 205 qp_params.new_state = QED_ROCE_QP_STATE_SQD; 206 207 kfree(dwork); 208 209 if (ep->cm_id) 210 ep->cm_id->event_handler(ep->cm_id, &event); 211 212 SET_FIELD(qp_params.modify_flags, 213 QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1); 214 215 dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params); 216 217 qedr_iw_qp_rem_ref(&qp->ibqp); 218 } 219 220 static void 221 qedr_iw_disconnect_event(void *context, 222 struct qed_iwarp_cm_event_params *params) 223 { 224 struct qedr_discon_work *work; 225 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 226 struct qedr_dev *dev = ep->dev; 227 struct qedr_qp *qp = ep->qp; 228 229 work = kzalloc(sizeof(*work), GFP_ATOMIC); 230 if (!work) 231 return; 232 233 qedr_iw_qp_add_ref(&qp->ibqp); 234 work->ep = ep; 235 work->event = params->event; 236 work->status = params->status; 237 238 INIT_WORK(&work->work, qedr_iw_disconnect_worker); 239 queue_work(dev->iwarp_wq, &work->work); 240 } 241 242 static void 243 qedr_iw_passive_complete(void *context, 244 struct qed_iwarp_cm_event_params *params) 245 { 246 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 247 struct qedr_dev *dev = ep->dev; 248 249 /* We will only reach the following state if MPA_REJECT was called on 250 * passive. In this case there will be no associated QP. 251 */ 252 if ((params->status == -ECONNREFUSED) && (!ep->qp)) { 253 DP_DEBUG(dev, QEDR_MSG_IWARP, 254 "PASSIVE connection refused releasing ep...\n"); 255 kfree(ep); 256 return; 257 } 258 259 qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED); 260 261 if (params->status < 0) 262 qedr_iw_close_event(context, params); 263 } 264 265 static int 266 qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params) 267 { 268 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 269 struct qedr_dev *dev = ep->dev; 270 struct qed_iwarp_send_rtr_in rtr_in; 271 272 rtr_in.ep_context = params->ep_context; 273 274 return dev->ops->iwarp_send_rtr(dev->rdma_ctx, &rtr_in); 275 } 276 277 static int 278 qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params) 279 { 280 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 281 struct qedr_dev *dev = ep->dev; 282 283 switch (params->event) { 284 case QED_IWARP_EVENT_MPA_REQUEST: 285 qedr_iw_mpa_request(context, params); 286 break; 287 case QED_IWARP_EVENT_ACTIVE_MPA_REPLY: 288 qedr_iw_mpa_reply(context, params); 289 break; 290 case QED_IWARP_EVENT_PASSIVE_COMPLETE: 291 ep->during_connect = 0; 292 qedr_iw_passive_complete(context, params); 293 break; 294 295 case QED_IWARP_EVENT_ACTIVE_COMPLETE: 296 ep->during_connect = 0; 297 qedr_iw_issue_event(context, 298 params, 299 IW_CM_EVENT_CONNECT_REPLY); 300 if (params->status < 0) { 301 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context; 302 303 ep->cm_id->rem_ref(ep->cm_id); 304 ep->cm_id = NULL; 305 } 306 break; 307 case QED_IWARP_EVENT_DISCONNECT: 308 qedr_iw_disconnect_event(context, params); 309 break; 310 case QED_IWARP_EVENT_CLOSE: 311 ep->during_connect = 0; 312 qedr_iw_close_event(context, params); 313 break; 314 case QED_IWARP_EVENT_RQ_EMPTY: 315 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, 316 "QED_IWARP_EVENT_RQ_EMPTY"); 317 break; 318 case QED_IWARP_EVENT_IRQ_FULL: 319 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, 320 "QED_IWARP_EVENT_IRQ_FULL"); 321 break; 322 case QED_IWARP_EVENT_LLP_TIMEOUT: 323 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, 324 "QED_IWARP_EVENT_LLP_TIMEOUT"); 325 break; 326 case QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR: 327 qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR, 328 "QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR"); 329 break; 330 case QED_IWARP_EVENT_CQ_OVERFLOW: 331 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, 332 "QED_IWARP_EVENT_CQ_OVERFLOW"); 333 break; 334 case QED_IWARP_EVENT_QP_CATASTROPHIC: 335 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, 336 "QED_IWARP_EVENT_QP_CATASTROPHIC"); 337 break; 338 case QED_IWARP_EVENT_LOCAL_ACCESS_ERROR: 339 qedr_iw_qp_event(context, params, IB_EVENT_QP_ACCESS_ERR, 340 "QED_IWARP_EVENT_LOCAL_ACCESS_ERROR"); 341 break; 342 case QED_IWARP_EVENT_REMOTE_OPERATION_ERROR: 343 qedr_iw_qp_event(context, params, IB_EVENT_QP_FATAL, 344 "QED_IWARP_EVENT_REMOTE_OPERATION_ERROR"); 345 break; 346 case QED_IWARP_EVENT_TERMINATE_RECEIVED: 347 DP_NOTICE(dev, "Got terminate message\n"); 348 break; 349 default: 350 DP_NOTICE(dev, "Unknown event received %d\n", params->event); 351 break; 352 }; 353 return 0; 354 } 355 356 static u16 qedr_iw_get_vlan_ipv4(struct qedr_dev *dev, u32 *addr) 357 { 358 struct net_device *ndev; 359 u16 vlan_id = 0; 360 361 ndev = ip_dev_find(&init_net, htonl(addr[0])); 362 363 if (ndev) { 364 vlan_id = rdma_vlan_dev_vlan_id(ndev); 365 dev_put(ndev); 366 } 367 if (vlan_id == 0xffff) 368 vlan_id = 0; 369 return vlan_id; 370 } 371 372 static u16 qedr_iw_get_vlan_ipv6(u32 *addr) 373 { 374 struct net_device *ndev = NULL; 375 struct in6_addr laddr6; 376 u16 vlan_id = 0; 377 int i; 378 379 if (!IS_ENABLED(CONFIG_IPV6)) 380 return vlan_id; 381 382 for (i = 0; i < 4; i++) 383 laddr6.in6_u.u6_addr32[i] = htonl(addr[i]); 384 385 rcu_read_lock(); 386 for_each_netdev_rcu(&init_net, ndev) { 387 if (ipv6_chk_addr(&init_net, &laddr6, ndev, 1)) { 388 vlan_id = rdma_vlan_dev_vlan_id(ndev); 389 break; 390 } 391 } 392 393 rcu_read_unlock(); 394 if (vlan_id == 0xffff) 395 vlan_id = 0; 396 397 return vlan_id; 398 } 399 400 static int 401 qedr_addr4_resolve(struct qedr_dev *dev, 402 struct sockaddr_in *src_in, 403 struct sockaddr_in *dst_in, u8 *dst_mac) 404 { 405 __be32 src_ip = src_in->sin_addr.s_addr; 406 __be32 dst_ip = dst_in->sin_addr.s_addr; 407 struct neighbour *neigh = NULL; 408 struct rtable *rt = NULL; 409 int rc = 0; 410 411 rt = ip_route_output(&init_net, dst_ip, src_ip, 0, 0); 412 if (IS_ERR(rt)) { 413 DP_ERR(dev, "ip_route_output returned error\n"); 414 return -EINVAL; 415 } 416 417 neigh = dst_neigh_lookup(&rt->dst, &dst_ip); 418 419 if (neigh) { 420 rcu_read_lock(); 421 if (neigh->nud_state & NUD_VALID) { 422 ether_addr_copy(dst_mac, neigh->ha); 423 DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac); 424 } else { 425 neigh_event_send(neigh, NULL); 426 } 427 rcu_read_unlock(); 428 neigh_release(neigh); 429 } 430 431 ip_rt_put(rt); 432 433 return rc; 434 } 435 436 static int 437 qedr_addr6_resolve(struct qedr_dev *dev, 438 struct sockaddr_in6 *src_in, 439 struct sockaddr_in6 *dst_in, u8 *dst_mac) 440 { 441 struct neighbour *neigh = NULL; 442 struct dst_entry *dst; 443 struct flowi6 fl6; 444 int rc = 0; 445 446 memset(&fl6, 0, sizeof(fl6)); 447 fl6.daddr = dst_in->sin6_addr; 448 fl6.saddr = src_in->sin6_addr; 449 450 dst = ip6_route_output(&init_net, NULL, &fl6); 451 452 if ((!dst) || dst->error) { 453 if (dst) { 454 dst_release(dst); 455 DP_ERR(dev, 456 "ip6_route_output returned dst->error = %d\n", 457 dst->error); 458 } 459 return -EINVAL; 460 } 461 neigh = dst_neigh_lookup(dst, &fl6.daddr); 462 if (neigh) { 463 rcu_read_lock(); 464 if (neigh->nud_state & NUD_VALID) { 465 ether_addr_copy(dst_mac, neigh->ha); 466 DP_DEBUG(dev, QEDR_MSG_QP, "mac_addr=[%pM]\n", dst_mac); 467 } else { 468 neigh_event_send(neigh, NULL); 469 } 470 rcu_read_unlock(); 471 neigh_release(neigh); 472 } 473 474 dst_release(dst); 475 476 return rc; 477 } 478 479 int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 480 { 481 struct qedr_dev *dev = get_qedr_dev(cm_id->device); 482 struct qed_iwarp_connect_out out_params; 483 struct qed_iwarp_connect_in in_params; 484 struct qed_iwarp_cm_info *cm_info; 485 struct sockaddr_in6 *laddr6; 486 struct sockaddr_in6 *raddr6; 487 struct sockaddr_in *laddr; 488 struct sockaddr_in *raddr; 489 struct qedr_iw_ep *ep; 490 struct qedr_qp *qp; 491 int rc = 0; 492 int i; 493 494 qp = idr_find(&dev->qpidr.idr, conn_param->qpn); 495 496 laddr = (struct sockaddr_in *)&cm_id->m_local_addr; 497 raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; 498 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; 499 raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; 500 501 DP_DEBUG(dev, QEDR_MSG_IWARP, "MAPPED %d %d\n", 502 ntohs(((struct sockaddr_in *)&cm_id->remote_addr)->sin_port), 503 ntohs(raddr->sin_port)); 504 505 DP_DEBUG(dev, QEDR_MSG_IWARP, 506 "Connect source address: %pISpc, remote address: %pISpc\n", 507 &cm_id->local_addr, &cm_id->remote_addr); 508 509 if (!laddr->sin_port || !raddr->sin_port) 510 return -EINVAL; 511 512 ep = kzalloc(sizeof(*ep), GFP_KERNEL); 513 if (!ep) 514 return -ENOMEM; 515 516 ep->dev = dev; 517 ep->qp = qp; 518 qp->ep = ep; 519 cm_id->add_ref(cm_id); 520 ep->cm_id = cm_id; 521 522 in_params.event_cb = qedr_iw_event_handler; 523 in_params.cb_context = ep; 524 525 cm_info = &in_params.cm_info; 526 memset(cm_info->local_ip, 0, sizeof(cm_info->local_ip)); 527 memset(cm_info->remote_ip, 0, sizeof(cm_info->remote_ip)); 528 529 if (!IS_ENABLED(CONFIG_IPV6) || 530 cm_id->remote_addr.ss_family == AF_INET) { 531 cm_info->ip_version = QED_TCP_IPV4; 532 533 cm_info->remote_ip[0] = ntohl(raddr->sin_addr.s_addr); 534 cm_info->local_ip[0] = ntohl(laddr->sin_addr.s_addr); 535 cm_info->remote_port = ntohs(raddr->sin_port); 536 cm_info->local_port = ntohs(laddr->sin_port); 537 cm_info->vlan = qedr_iw_get_vlan_ipv4(dev, cm_info->local_ip); 538 539 rc = qedr_addr4_resolve(dev, laddr, raddr, 540 (u8 *)in_params.remote_mac_addr); 541 542 in_params.mss = dev->iwarp_max_mtu - 543 (sizeof(struct iphdr) + sizeof(struct tcphdr)); 544 545 } else { 546 in_params.cm_info.ip_version = QED_TCP_IPV6; 547 548 for (i = 0; i < 4; i++) { 549 cm_info->remote_ip[i] = 550 ntohl(raddr6->sin6_addr.in6_u.u6_addr32[i]); 551 cm_info->local_ip[i] = 552 ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]); 553 } 554 555 cm_info->local_port = ntohs(laddr6->sin6_port); 556 cm_info->remote_port = ntohs(raddr6->sin6_port); 557 558 in_params.mss = dev->iwarp_max_mtu - 559 (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)); 560 561 cm_info->vlan = qedr_iw_get_vlan_ipv6(cm_info->local_ip); 562 563 rc = qedr_addr6_resolve(dev, laddr6, raddr6, 564 (u8 *)in_params.remote_mac_addr); 565 } 566 if (rc) 567 goto err; 568 569 DP_DEBUG(dev, QEDR_MSG_IWARP, 570 "ord = %d ird=%d private_data=%p private_data_len=%d rq_psn=%d\n", 571 conn_param->ord, conn_param->ird, conn_param->private_data, 572 conn_param->private_data_len, qp->rq_psn); 573 574 cm_info->ord = conn_param->ord; 575 cm_info->ird = conn_param->ird; 576 cm_info->private_data = conn_param->private_data; 577 cm_info->private_data_len = conn_param->private_data_len; 578 in_params.qp = qp->qed_qp; 579 memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN); 580 581 ep->during_connect = 1; 582 rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params); 583 if (rc) 584 goto err; 585 586 return rc; 587 588 err: 589 cm_id->rem_ref(cm_id); 590 kfree(ep); 591 return rc; 592 } 593 594 int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog) 595 { 596 struct qedr_dev *dev = get_qedr_dev(cm_id->device); 597 struct qedr_iw_listener *listener; 598 struct qed_iwarp_listen_in iparams; 599 struct qed_iwarp_listen_out oparams; 600 struct sockaddr_in *laddr; 601 struct sockaddr_in6 *laddr6; 602 int rc; 603 int i; 604 605 laddr = (struct sockaddr_in *)&cm_id->m_local_addr; 606 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; 607 608 DP_DEBUG(dev, QEDR_MSG_IWARP, 609 "Create Listener address: %pISpc\n", &cm_id->local_addr); 610 611 listener = kzalloc(sizeof(*listener), GFP_KERNEL); 612 if (!listener) 613 return -ENOMEM; 614 615 listener->dev = dev; 616 cm_id->add_ref(cm_id); 617 listener->cm_id = cm_id; 618 listener->backlog = backlog; 619 620 iparams.cb_context = listener; 621 iparams.event_cb = qedr_iw_event_handler; 622 iparams.max_backlog = backlog; 623 624 if (!IS_ENABLED(CONFIG_IPV6) || 625 cm_id->local_addr.ss_family == AF_INET) { 626 iparams.ip_version = QED_TCP_IPV4; 627 memset(iparams.ip_addr, 0, sizeof(iparams.ip_addr)); 628 629 iparams.ip_addr[0] = ntohl(laddr->sin_addr.s_addr); 630 iparams.port = ntohs(laddr->sin_port); 631 iparams.vlan = qedr_iw_get_vlan_ipv4(dev, iparams.ip_addr); 632 } else { 633 iparams.ip_version = QED_TCP_IPV6; 634 635 for (i = 0; i < 4; i++) { 636 iparams.ip_addr[i] = 637 ntohl(laddr6->sin6_addr.in6_u.u6_addr32[i]); 638 } 639 640 iparams.port = ntohs(laddr6->sin6_port); 641 642 iparams.vlan = qedr_iw_get_vlan_ipv6(iparams.ip_addr); 643 } 644 rc = dev->ops->iwarp_create_listen(dev->rdma_ctx, &iparams, &oparams); 645 if (rc) 646 goto err; 647 648 listener->qed_handle = oparams.handle; 649 cm_id->provider_data = listener; 650 return rc; 651 652 err: 653 cm_id->rem_ref(cm_id); 654 kfree(listener); 655 return rc; 656 } 657 658 int qedr_iw_destroy_listen(struct iw_cm_id *cm_id) 659 { 660 struct qedr_iw_listener *listener = cm_id->provider_data; 661 struct qedr_dev *dev = get_qedr_dev(cm_id->device); 662 int rc = 0; 663 664 if (listener->qed_handle) 665 rc = dev->ops->iwarp_destroy_listen(dev->rdma_ctx, 666 listener->qed_handle); 667 668 cm_id->rem_ref(cm_id); 669 return rc; 670 } 671 672 int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 673 { 674 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data; 675 struct qedr_dev *dev = ep->dev; 676 struct qedr_qp *qp; 677 struct qed_iwarp_accept_in params; 678 int rc; 679 680 DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn); 681 682 qp = idr_find(&dev->qpidr.idr, conn_param->qpn); 683 if (!qp) { 684 DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn); 685 return -EINVAL; 686 } 687 688 ep->qp = qp; 689 qp->ep = ep; 690 cm_id->add_ref(cm_id); 691 ep->cm_id = cm_id; 692 693 params.ep_context = ep->qed_context; 694 params.cb_context = ep; 695 params.qp = ep->qp->qed_qp; 696 params.private_data = conn_param->private_data; 697 params.private_data_len = conn_param->private_data_len; 698 params.ird = conn_param->ird; 699 params.ord = conn_param->ord; 700 701 ep->during_connect = 1; 702 rc = dev->ops->iwarp_accept(dev->rdma_ctx, ¶ms); 703 if (rc) 704 goto err; 705 706 return rc; 707 err: 708 ep->during_connect = 0; 709 cm_id->rem_ref(cm_id); 710 return rc; 711 } 712 713 int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 714 { 715 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)cm_id->provider_data; 716 struct qedr_dev *dev = ep->dev; 717 struct qed_iwarp_reject_in params; 718 719 params.ep_context = ep->qed_context; 720 params.cb_context = ep; 721 params.private_data = pdata; 722 params.private_data_len = pdata_len; 723 ep->qp = NULL; 724 725 return dev->ops->iwarp_reject(dev->rdma_ctx, ¶ms); 726 } 727 728 void qedr_iw_qp_add_ref(struct ib_qp *ibqp) 729 { 730 struct qedr_qp *qp = get_qedr_qp(ibqp); 731 732 atomic_inc(&qp->refcnt); 733 } 734 735 void qedr_iw_qp_rem_ref(struct ib_qp *ibqp) 736 { 737 struct qedr_qp *qp = get_qedr_qp(ibqp); 738 739 if (atomic_dec_and_test(&qp->refcnt)) { 740 spin_lock_irq(&qp->dev->qpidr.idr_lock); 741 idr_remove(&qp->dev->qpidr.idr, qp->qp_id); 742 spin_unlock_irq(&qp->dev->qpidr.idr_lock); 743 kfree(qp); 744 } 745 } 746 747 struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn) 748 { 749 struct qedr_dev *dev = get_qedr_dev(ibdev); 750 751 return idr_find(&dev->qpidr.idr, qpn); 752 } 753