1 /* 2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <linux/skbuff.h> 36 #include <linux/timer.h> 37 #include <linux/notifier.h> 38 #include <linux/inetdevice.h> 39 #include <linux/ip.h> 40 #include <linux/tcp.h> 41 #include <linux/if_vlan.h> 42 43 #include <net/neighbour.h> 44 #include <net/netevent.h> 45 #include <net/route.h> 46 #include <net/tcp.h> 47 #include <net/ip6_route.h> 48 #include <net/addrconf.h> 49 50 #include <rdma/ib_addr.h> 51 52 #include <libcxgb_cm.h> 53 #include "iw_cxgb4.h" 54 #include "clip_tbl.h" 55 56 static char *states[] = { 57 "idle", 58 "listen", 59 "connecting", 60 "mpa_wait_req", 61 "mpa_req_sent", 62 "mpa_req_rcvd", 63 "mpa_rep_sent", 64 "fpdu_mode", 65 "aborting", 66 "closing", 67 "moribund", 68 "dead", 69 NULL, 70 }; 71 72 static int nocong; 73 module_param(nocong, int, 0644); 74 MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)"); 75 76 static int enable_ecn; 77 module_param(enable_ecn, int, 0644); 78 MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); 79 80 static int dack_mode = 1; 81 module_param(dack_mode, int, 0644); 82 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); 83 84 uint c4iw_max_read_depth = 32; 85 module_param(c4iw_max_read_depth, int, 0644); 86 MODULE_PARM_DESC(c4iw_max_read_depth, 87 "Per-connection max ORD/IRD (default=32)"); 88 89 static int enable_tcp_timestamps; 90 module_param(enable_tcp_timestamps, int, 0644); 91 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); 92 93 static int enable_tcp_sack; 94 module_param(enable_tcp_sack, int, 0644); 95 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); 96 97 static int enable_tcp_window_scaling = 1; 98 module_param(enable_tcp_window_scaling, int, 0644); 99 MODULE_PARM_DESC(enable_tcp_window_scaling, 100 "Enable tcp window scaling (default=1)"); 101 102 static int peer2peer = 1; 103 module_param(peer2peer, int, 0644); 104 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)"); 105 106 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 107 module_param(p2p_type, int, 0644); 108 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " 109 "1=RDMA_READ 0=RDMA_WRITE (default 1)"); 110 111 static int ep_timeout_secs = 60; 112 module_param(ep_timeout_secs, int, 0644); 113 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 114 "in seconds (default=60)"); 115 116 static int mpa_rev = 2; 117 module_param(mpa_rev, int, 0644); 118 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " 119 "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft" 120 " compliant (default=2)"); 121 122 static int markers_enabled; 123 module_param(markers_enabled, int, 0644); 124 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); 125 126 static int crc_enabled = 1; 127 module_param(crc_enabled, int, 0644); 128 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); 129 130 static int rcv_win = 256 * 1024; 131 module_param(rcv_win, int, 0644); 132 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); 133 134 static int snd_win = 128 * 1024; 135 module_param(snd_win, int, 0644); 136 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); 137 138 static struct workqueue_struct *workq; 139 140 static struct sk_buff_head rxq; 141 142 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 143 static void ep_timeout(struct timer_list *t); 144 static void connect_reply_upcall(struct c4iw_ep *ep, int status); 145 static int sched(struct c4iw_dev *dev, struct sk_buff *skb); 146 147 static LIST_HEAD(timeout_list); 148 static spinlock_t timeout_lock; 149 150 static void deref_cm_id(struct c4iw_ep_common *epc) 151 { 152 epc->cm_id->rem_ref(epc->cm_id); 153 epc->cm_id = NULL; 154 set_bit(CM_ID_DEREFED, &epc->history); 155 } 156 157 static void ref_cm_id(struct c4iw_ep_common *epc) 158 { 159 set_bit(CM_ID_REFED, &epc->history); 160 epc->cm_id->add_ref(epc->cm_id); 161 } 162 163 static void deref_qp(struct c4iw_ep *ep) 164 { 165 c4iw_qp_rem_ref(&ep->com.qp->ibqp); 166 clear_bit(QP_REFERENCED, &ep->com.flags); 167 set_bit(QP_DEREFED, &ep->com.history); 168 } 169 170 static void ref_qp(struct c4iw_ep *ep) 171 { 172 set_bit(QP_REFERENCED, &ep->com.flags); 173 set_bit(QP_REFED, &ep->com.history); 174 c4iw_qp_add_ref(&ep->com.qp->ibqp); 175 } 176 177 static void start_ep_timer(struct c4iw_ep *ep) 178 { 179 pr_debug("ep %p\n", ep); 180 if (timer_pending(&ep->timer)) { 181 pr_err("%s timer already started! ep %p\n", 182 __func__, ep); 183 return; 184 } 185 clear_bit(TIMEOUT, &ep->com.flags); 186 c4iw_get_ep(&ep->com); 187 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 188 add_timer(&ep->timer); 189 } 190 191 static int stop_ep_timer(struct c4iw_ep *ep) 192 { 193 pr_debug("ep %p stopping\n", ep); 194 del_timer_sync(&ep->timer); 195 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 196 c4iw_put_ep(&ep->com); 197 return 0; 198 } 199 return 1; 200 } 201 202 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, 203 struct l2t_entry *l2e) 204 { 205 int error = 0; 206 207 if (c4iw_fatal_error(rdev)) { 208 kfree_skb(skb); 209 pr_err("%s - device in error state - dropping\n", __func__); 210 return -EIO; 211 } 212 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); 213 if (error < 0) 214 kfree_skb(skb); 215 else if (error == NET_XMIT_DROP) 216 return -ENOMEM; 217 return error < 0 ? error : 0; 218 } 219 220 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) 221 { 222 int error = 0; 223 224 if (c4iw_fatal_error(rdev)) { 225 kfree_skb(skb); 226 pr_err("%s - device in error state - dropping\n", __func__); 227 return -EIO; 228 } 229 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); 230 if (error < 0) 231 kfree_skb(skb); 232 return error < 0 ? error : 0; 233 } 234 235 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) 236 { 237 u32 len = roundup(sizeof(struct cpl_tid_release), 16); 238 239 skb = get_skb(skb, len, GFP_KERNEL); 240 if (!skb) 241 return; 242 243 cxgb_mk_tid_release(skb, len, hwtid, 0); 244 c4iw_ofld_send(rdev, skb); 245 return; 246 } 247 248 static void set_emss(struct c4iw_ep *ep, u16 opt) 249 { 250 ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] - 251 ((AF_INET == ep->com.remote_addr.ss_family) ? 252 sizeof(struct iphdr) : sizeof(struct ipv6hdr)) - 253 sizeof(struct tcphdr); 254 ep->mss = ep->emss; 255 if (TCPOPT_TSTAMP_G(opt)) 256 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4); 257 if (ep->emss < 128) 258 ep->emss = 128; 259 if (ep->emss & 7) 260 pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n", 261 TCPOPT_MSS_G(opt), ep->mss, ep->emss); 262 pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss, 263 ep->emss); 264 } 265 266 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) 267 { 268 enum c4iw_ep_state state; 269 270 mutex_lock(&epc->mutex); 271 state = epc->state; 272 mutex_unlock(&epc->mutex); 273 return state; 274 } 275 276 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 277 { 278 epc->state = new; 279 } 280 281 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 282 { 283 mutex_lock(&epc->mutex); 284 pr_debug("%s -> %s\n", states[epc->state], states[new]); 285 __state_set(epc, new); 286 mutex_unlock(&epc->mutex); 287 return; 288 } 289 290 static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size) 291 { 292 struct sk_buff *skb; 293 unsigned int i; 294 size_t len; 295 296 len = roundup(sizeof(union cpl_wr_size), 16); 297 for (i = 0; i < size; i++) { 298 skb = alloc_skb(len, GFP_KERNEL); 299 if (!skb) 300 goto fail; 301 skb_queue_tail(ep_skb_list, skb); 302 } 303 return 0; 304 fail: 305 skb_queue_purge(ep_skb_list); 306 return -ENOMEM; 307 } 308 309 static void *alloc_ep(int size, gfp_t gfp) 310 { 311 struct c4iw_ep_common *epc; 312 313 epc = kzalloc(size, gfp); 314 if (epc) { 315 epc->wr_waitp = c4iw_alloc_wr_wait(gfp); 316 if (!epc->wr_waitp) { 317 kfree(epc); 318 epc = NULL; 319 goto out; 320 } 321 kref_init(&epc->kref); 322 mutex_init(&epc->mutex); 323 c4iw_init_wr_wait(epc->wr_waitp); 324 } 325 pr_debug("alloc ep %p\n", epc); 326 out: 327 return epc; 328 } 329 330 static void remove_ep_tid(struct c4iw_ep *ep) 331 { 332 unsigned long flags; 333 334 spin_lock_irqsave(&ep->com.dev->lock, flags); 335 _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0); 336 if (idr_is_empty(&ep->com.dev->hwtid_idr)) 337 wake_up(&ep->com.dev->wait); 338 spin_unlock_irqrestore(&ep->com.dev->lock, flags); 339 } 340 341 static void insert_ep_tid(struct c4iw_ep *ep) 342 { 343 unsigned long flags; 344 345 spin_lock_irqsave(&ep->com.dev->lock, flags); 346 _insert_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep, ep->hwtid, 0); 347 spin_unlock_irqrestore(&ep->com.dev->lock, flags); 348 } 349 350 /* 351 * Atomically lookup the ep ptr given the tid and grab a reference on the ep. 352 */ 353 static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid) 354 { 355 struct c4iw_ep *ep; 356 unsigned long flags; 357 358 spin_lock_irqsave(&dev->lock, flags); 359 ep = idr_find(&dev->hwtid_idr, tid); 360 if (ep) 361 c4iw_get_ep(&ep->com); 362 spin_unlock_irqrestore(&dev->lock, flags); 363 return ep; 364 } 365 366 /* 367 * Atomically lookup the ep ptr given the stid and grab a reference on the ep. 368 */ 369 static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev, 370 unsigned int stid) 371 { 372 struct c4iw_listen_ep *ep; 373 unsigned long flags; 374 375 spin_lock_irqsave(&dev->lock, flags); 376 ep = idr_find(&dev->stid_idr, stid); 377 if (ep) 378 c4iw_get_ep(&ep->com); 379 spin_unlock_irqrestore(&dev->lock, flags); 380 return ep; 381 } 382 383 void _c4iw_free_ep(struct kref *kref) 384 { 385 struct c4iw_ep *ep; 386 387 ep = container_of(kref, struct c4iw_ep, com.kref); 388 pr_debug("ep %p state %s\n", ep, states[ep->com.state]); 389 if (test_bit(QP_REFERENCED, &ep->com.flags)) 390 deref_qp(ep); 391 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 392 if (ep->com.remote_addr.ss_family == AF_INET6) { 393 struct sockaddr_in6 *sin6 = 394 (struct sockaddr_in6 *) 395 &ep->com.local_addr; 396 397 cxgb4_clip_release( 398 ep->com.dev->rdev.lldi.ports[0], 399 (const u32 *)&sin6->sin6_addr.s6_addr, 400 1); 401 } 402 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid, 403 ep->com.local_addr.ss_family); 404 dst_release(ep->dst); 405 cxgb4_l2t_release(ep->l2t); 406 kfree_skb(ep->mpa_skb); 407 } 408 if (!skb_queue_empty(&ep->com.ep_skb_list)) 409 skb_queue_purge(&ep->com.ep_skb_list); 410 c4iw_put_wr_wait(ep->com.wr_waitp); 411 kfree(ep); 412 } 413 414 static void release_ep_resources(struct c4iw_ep *ep) 415 { 416 set_bit(RELEASE_RESOURCES, &ep->com.flags); 417 418 /* 419 * If we have a hwtid, then remove it from the idr table 420 * so lookups will no longer find this endpoint. Otherwise 421 * we have a race where one thread finds the ep ptr just 422 * before the other thread is freeing the ep memory. 423 */ 424 if (ep->hwtid != -1) 425 remove_ep_tid(ep); 426 c4iw_put_ep(&ep->com); 427 } 428 429 static int status2errno(int status) 430 { 431 switch (status) { 432 case CPL_ERR_NONE: 433 return 0; 434 case CPL_ERR_CONN_RESET: 435 return -ECONNRESET; 436 case CPL_ERR_ARP_MISS: 437 return -EHOSTUNREACH; 438 case CPL_ERR_CONN_TIMEDOUT: 439 return -ETIMEDOUT; 440 case CPL_ERR_TCAM_FULL: 441 return -ENOMEM; 442 case CPL_ERR_CONN_EXIST: 443 return -EADDRINUSE; 444 default: 445 return -EIO; 446 } 447 } 448 449 /* 450 * Try and reuse skbs already allocated... 451 */ 452 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 453 { 454 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { 455 skb_trim(skb, 0); 456 skb_get(skb); 457 skb_reset_transport_header(skb); 458 } else { 459 skb = alloc_skb(len, gfp); 460 } 461 t4_set_arp_err_handler(skb, NULL, NULL); 462 return skb; 463 } 464 465 static struct net_device *get_real_dev(struct net_device *egress_dev) 466 { 467 return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev; 468 } 469 470 static void arp_failure_discard(void *handle, struct sk_buff *skb) 471 { 472 pr_err("ARP failure\n"); 473 kfree_skb(skb); 474 } 475 476 static void mpa_start_arp_failure(void *handle, struct sk_buff *skb) 477 { 478 pr_err("ARP failure during MPA Negotiation - Closing Connection\n"); 479 } 480 481 enum { 482 NUM_FAKE_CPLS = 2, 483 FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0, 484 FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1, 485 }; 486 487 static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) 488 { 489 struct c4iw_ep *ep; 490 491 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); 492 release_ep_resources(ep); 493 kfree_skb(skb); 494 return 0; 495 } 496 497 static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) 498 { 499 struct c4iw_ep *ep; 500 501 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); 502 c4iw_put_ep(&ep->parent_ep->com); 503 release_ep_resources(ep); 504 kfree_skb(skb); 505 return 0; 506 } 507 508 /* 509 * Fake up a special CPL opcode and call sched() so process_work() will call 510 * _put_ep_safe() in a safe context to free the ep resources. This is needed 511 * because ARP error handlers are called in an ATOMIC context, and 512 * _c4iw_free_ep() needs to block. 513 */ 514 static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb, 515 int cpl) 516 { 517 struct cpl_act_establish *rpl = cplhdr(skb); 518 519 /* Set our special ARP_FAILURE opcode */ 520 rpl->ot.opcode = cpl; 521 522 /* 523 * Save ep in the skb->cb area, after where sched() will save the dev 524 * ptr. 525 */ 526 *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep; 527 sched(ep->com.dev, skb); 528 } 529 530 /* Handle an ARP failure for an accept */ 531 static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb) 532 { 533 struct c4iw_ep *ep = handle; 534 535 pr_err("ARP failure during accept - tid %u - dropping connection\n", 536 ep->hwtid); 537 538 __state_set(&ep->com, DEAD); 539 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE); 540 } 541 542 /* 543 * Handle an ARP failure for an active open. 544 */ 545 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 546 { 547 struct c4iw_ep *ep = handle; 548 549 pr_err("ARP failure during connect\n"); 550 connect_reply_upcall(ep, -EHOSTUNREACH); 551 __state_set(&ep->com, DEAD); 552 if (ep->com.remote_addr.ss_family == AF_INET6) { 553 struct sockaddr_in6 *sin6 = 554 (struct sockaddr_in6 *)&ep->com.local_addr; 555 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 556 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 557 } 558 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 559 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 560 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); 561 } 562 563 /* 564 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant 565 * and send it along. 566 */ 567 static void abort_arp_failure(void *handle, struct sk_buff *skb) 568 { 569 int ret; 570 struct c4iw_ep *ep = handle; 571 struct c4iw_rdev *rdev = &ep->com.dev->rdev; 572 struct cpl_abort_req *req = cplhdr(skb); 573 574 pr_debug("rdev %p\n", rdev); 575 req->cmd = CPL_ABORT_NO_RST; 576 skb_get(skb); 577 ret = c4iw_ofld_send(rdev, skb); 578 if (ret) { 579 __state_set(&ep->com, DEAD); 580 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); 581 } else 582 kfree_skb(skb); 583 } 584 585 static int send_flowc(struct c4iw_ep *ep) 586 { 587 struct fw_flowc_wr *flowc; 588 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); 589 u16 vlan = ep->l2t->vlan; 590 int nparams; 591 int flowclen, flowclen16; 592 593 if (WARN_ON(!skb)) 594 return -ENOMEM; 595 596 if (vlan == CPL_L2T_VLAN_NONE) 597 nparams = 9; 598 else 599 nparams = 10; 600 601 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); 602 flowclen16 = DIV_ROUND_UP(flowclen, 16); 603 flowclen = flowclen16 * 16; 604 605 flowc = __skb_put(skb, flowclen); 606 memset(flowc, 0, flowclen); 607 608 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | 609 FW_FLOWC_WR_NPARAMS_V(nparams)); 610 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) | 611 FW_WR_FLOWID_V(ep->hwtid)); 612 613 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 614 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V 615 (ep->com.dev->rdev.lldi.pf)); 616 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 617 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 618 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 619 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); 620 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 621 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); 622 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 623 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); 624 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 625 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); 626 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 627 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win); 628 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 629 flowc->mnemval[7].val = cpu_to_be32(ep->emss); 630 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_RCV_SCALE; 631 flowc->mnemval[8].val = cpu_to_be32(ep->snd_wscale); 632 if (nparams == 10) { 633 u16 pri; 634 pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 635 flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 636 flowc->mnemval[9].val = cpu_to_be32(pri); 637 } 638 639 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 640 return c4iw_ofld_send(&ep->com.dev->rdev, skb); 641 } 642 643 static int send_halfclose(struct c4iw_ep *ep) 644 { 645 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); 646 u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16); 647 648 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 649 if (WARN_ON(!skb)) 650 return -ENOMEM; 651 652 cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx, 653 NULL, arp_failure_discard); 654 655 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 656 } 657 658 static int send_abort(struct c4iw_ep *ep) 659 { 660 u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16); 661 struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list); 662 663 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 664 if (WARN_ON(!req_skb)) 665 return -ENOMEM; 666 667 cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx, 668 ep, abort_arp_failure); 669 670 return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t); 671 } 672 673 static int send_connect(struct c4iw_ep *ep) 674 { 675 struct cpl_act_open_req *req = NULL; 676 struct cpl_t5_act_open_req *t5req = NULL; 677 struct cpl_t6_act_open_req *t6req = NULL; 678 struct cpl_act_open_req6 *req6 = NULL; 679 struct cpl_t5_act_open_req6 *t5req6 = NULL; 680 struct cpl_t6_act_open_req6 *t6req6 = NULL; 681 struct sk_buff *skb; 682 u64 opt0; 683 u32 opt2; 684 unsigned int mtu_idx; 685 u32 wscale; 686 int win, sizev4, sizev6, wrlen; 687 struct sockaddr_in *la = (struct sockaddr_in *) 688 &ep->com.local_addr; 689 struct sockaddr_in *ra = (struct sockaddr_in *) 690 &ep->com.remote_addr; 691 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *) 692 &ep->com.local_addr; 693 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *) 694 &ep->com.remote_addr; 695 int ret; 696 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; 697 u32 isn = (prandom_u32() & ~7UL) - 1; 698 struct net_device *netdev; 699 u64 params; 700 701 netdev = ep->com.dev->rdev.lldi.ports[0]; 702 703 switch (CHELSIO_CHIP_VERSION(adapter_type)) { 704 case CHELSIO_T4: 705 sizev4 = sizeof(struct cpl_act_open_req); 706 sizev6 = sizeof(struct cpl_act_open_req6); 707 break; 708 case CHELSIO_T5: 709 sizev4 = sizeof(struct cpl_t5_act_open_req); 710 sizev6 = sizeof(struct cpl_t5_act_open_req6); 711 break; 712 case CHELSIO_T6: 713 sizev4 = sizeof(struct cpl_t6_act_open_req); 714 sizev6 = sizeof(struct cpl_t6_act_open_req6); 715 break; 716 default: 717 pr_err("T%d Chip is not supported\n", 718 CHELSIO_CHIP_VERSION(adapter_type)); 719 return -EINVAL; 720 } 721 722 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? 723 roundup(sizev4, 16) : 724 roundup(sizev6, 16); 725 726 pr_debug("ep %p atid %u\n", ep, ep->atid); 727 728 skb = get_skb(NULL, wrlen, GFP_KERNEL); 729 if (!skb) { 730 pr_err("%s - failed to alloc skb\n", __func__); 731 return -ENOMEM; 732 } 733 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 734 735 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 736 enable_tcp_timestamps, 737 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); 738 wscale = cxgb_compute_wscale(rcv_win); 739 740 /* 741 * Specify the largest window that will fit in opt0. The 742 * remainder will be specified in the rx_data_ack. 743 */ 744 win = ep->rcv_win >> 10; 745 if (win > RCV_BUFSIZ_M) 746 win = RCV_BUFSIZ_M; 747 748 opt0 = (nocong ? NO_CONG_F : 0) | 749 KEEP_ALIVE_F | 750 DELACK_F | 751 WND_SCALE_V(wscale) | 752 MSS_IDX_V(mtu_idx) | 753 L2T_IDX_V(ep->l2t->idx) | 754 TX_CHAN_V(ep->tx_chan) | 755 SMAC_SEL_V(ep->smac_idx) | 756 DSCP_V(ep->tos >> 2) | 757 ULP_MODE_V(ULP_MODE_TCPDDP) | 758 RCV_BUFSIZ_V(win); 759 opt2 = RX_CHANNEL_V(0) | 760 CCTRL_ECN_V(enable_ecn) | 761 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); 762 if (enable_tcp_timestamps) 763 opt2 |= TSTAMPS_EN_F; 764 if (enable_tcp_sack) 765 opt2 |= SACK_EN_F; 766 if (wscale && enable_tcp_window_scaling) 767 opt2 |= WND_SCALE_EN_F; 768 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) { 769 if (peer2peer) 770 isn += 4; 771 772 opt2 |= T5_OPT_2_VALID_F; 773 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); 774 opt2 |= T5_ISS_F; 775 } 776 777 params = cxgb4_select_ntuple(netdev, ep->l2t); 778 779 if (ep->com.remote_addr.ss_family == AF_INET6) 780 cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], 781 (const u32 *)&la6->sin6_addr.s6_addr, 1); 782 783 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); 784 785 if (ep->com.remote_addr.ss_family == AF_INET) { 786 switch (CHELSIO_CHIP_VERSION(adapter_type)) { 787 case CHELSIO_T4: 788 req = skb_put(skb, wrlen); 789 INIT_TP_WR(req, 0); 790 break; 791 case CHELSIO_T5: 792 t5req = skb_put(skb, wrlen); 793 INIT_TP_WR(t5req, 0); 794 req = (struct cpl_act_open_req *)t5req; 795 break; 796 case CHELSIO_T6: 797 t6req = skb_put(skb, wrlen); 798 INIT_TP_WR(t6req, 0); 799 req = (struct cpl_act_open_req *)t6req; 800 t5req = (struct cpl_t5_act_open_req *)t6req; 801 break; 802 default: 803 pr_err("T%d Chip is not supported\n", 804 CHELSIO_CHIP_VERSION(adapter_type)); 805 ret = -EINVAL; 806 goto clip_release; 807 } 808 809 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 810 ((ep->rss_qid<<14) | ep->atid))); 811 req->local_port = la->sin_port; 812 req->peer_port = ra->sin_port; 813 req->local_ip = la->sin_addr.s_addr; 814 req->peer_ip = ra->sin_addr.s_addr; 815 req->opt0 = cpu_to_be64(opt0); 816 817 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 818 req->params = cpu_to_be32(params); 819 req->opt2 = cpu_to_be32(opt2); 820 } else { 821 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 822 t5req->params = 823 cpu_to_be64(FILTER_TUPLE_V(params)); 824 t5req->rsvd = cpu_to_be32(isn); 825 pr_debug("snd_isn %u\n", t5req->rsvd); 826 t5req->opt2 = cpu_to_be32(opt2); 827 } else { 828 t6req->params = 829 cpu_to_be64(FILTER_TUPLE_V(params)); 830 t6req->rsvd = cpu_to_be32(isn); 831 pr_debug("snd_isn %u\n", t6req->rsvd); 832 t6req->opt2 = cpu_to_be32(opt2); 833 } 834 } 835 } else { 836 switch (CHELSIO_CHIP_VERSION(adapter_type)) { 837 case CHELSIO_T4: 838 req6 = skb_put(skb, wrlen); 839 INIT_TP_WR(req6, 0); 840 break; 841 case CHELSIO_T5: 842 t5req6 = skb_put(skb, wrlen); 843 INIT_TP_WR(t5req6, 0); 844 req6 = (struct cpl_act_open_req6 *)t5req6; 845 break; 846 case CHELSIO_T6: 847 t6req6 = skb_put(skb, wrlen); 848 INIT_TP_WR(t6req6, 0); 849 req6 = (struct cpl_act_open_req6 *)t6req6; 850 t5req6 = (struct cpl_t5_act_open_req6 *)t6req6; 851 break; 852 default: 853 pr_err("T%d Chip is not supported\n", 854 CHELSIO_CHIP_VERSION(adapter_type)); 855 ret = -EINVAL; 856 goto clip_release; 857 } 858 859 OPCODE_TID(req6) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 860 ((ep->rss_qid<<14)|ep->atid))); 861 req6->local_port = la6->sin6_port; 862 req6->peer_port = ra6->sin6_port; 863 req6->local_ip_hi = *((__be64 *)(la6->sin6_addr.s6_addr)); 864 req6->local_ip_lo = *((__be64 *)(la6->sin6_addr.s6_addr + 8)); 865 req6->peer_ip_hi = *((__be64 *)(ra6->sin6_addr.s6_addr)); 866 req6->peer_ip_lo = *((__be64 *)(ra6->sin6_addr.s6_addr + 8)); 867 req6->opt0 = cpu_to_be64(opt0); 868 869 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 870 req6->params = cpu_to_be32(cxgb4_select_ntuple(netdev, 871 ep->l2t)); 872 req6->opt2 = cpu_to_be32(opt2); 873 } else { 874 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 875 t5req6->params = 876 cpu_to_be64(FILTER_TUPLE_V(params)); 877 t5req6->rsvd = cpu_to_be32(isn); 878 pr_debug("snd_isn %u\n", t5req6->rsvd); 879 t5req6->opt2 = cpu_to_be32(opt2); 880 } else { 881 t6req6->params = 882 cpu_to_be64(FILTER_TUPLE_V(params)); 883 t6req6->rsvd = cpu_to_be32(isn); 884 pr_debug("snd_isn %u\n", t6req6->rsvd); 885 t6req6->opt2 = cpu_to_be32(opt2); 886 } 887 888 } 889 } 890 891 set_bit(ACT_OPEN_REQ, &ep->com.history); 892 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 893 clip_release: 894 if (ret && ep->com.remote_addr.ss_family == AF_INET6) 895 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 896 (const u32 *)&la6->sin6_addr.s6_addr, 1); 897 return ret; 898 } 899 900 static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, 901 u8 mpa_rev_to_use) 902 { 903 int mpalen, wrlen, ret; 904 struct fw_ofld_tx_data_wr *req; 905 struct mpa_message *mpa; 906 struct mpa_v2_conn_params mpa_v2_params; 907 908 pr_debug("ep %p tid %u pd_len %d\n", 909 ep, ep->hwtid, ep->plen); 910 911 mpalen = sizeof(*mpa) + ep->plen; 912 if (mpa_rev_to_use == 2) 913 mpalen += sizeof(struct mpa_v2_conn_params); 914 wrlen = roundup(mpalen + sizeof *req, 16); 915 skb = get_skb(skb, wrlen, GFP_KERNEL); 916 if (!skb) { 917 connect_reply_upcall(ep, -ENOMEM); 918 return -ENOMEM; 919 } 920 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 921 922 req = skb_put_zero(skb, wrlen); 923 req->op_to_immdlen = cpu_to_be32( 924 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 925 FW_WR_COMPL_F | 926 FW_WR_IMMDLEN_V(mpalen)); 927 req->flowid_len16 = cpu_to_be32( 928 FW_WR_FLOWID_V(ep->hwtid) | 929 FW_WR_LEN16_V(wrlen >> 4)); 930 req->plen = cpu_to_be32(mpalen); 931 req->tunnel_to_proxy = cpu_to_be32( 932 FW_OFLD_TX_DATA_WR_FLUSH_F | 933 FW_OFLD_TX_DATA_WR_SHOVE_F); 934 935 mpa = (struct mpa_message *)(req + 1); 936 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 937 938 mpa->flags = 0; 939 if (crc_enabled) 940 mpa->flags |= MPA_CRC; 941 if (markers_enabled) { 942 mpa->flags |= MPA_MARKERS; 943 ep->mpa_attr.recv_marker_enabled = 1; 944 } else { 945 ep->mpa_attr.recv_marker_enabled = 0; 946 } 947 if (mpa_rev_to_use == 2) 948 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 949 950 mpa->private_data_size = htons(ep->plen); 951 mpa->revision = mpa_rev_to_use; 952 if (mpa_rev_to_use == 1) { 953 ep->tried_with_mpa_v1 = 1; 954 ep->retry_with_mpa_v1 = 0; 955 } 956 957 if (mpa_rev_to_use == 2) { 958 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 959 sizeof (struct mpa_v2_conn_params)); 960 pr_debug("initiator ird %u ord %u\n", ep->ird, 961 ep->ord); 962 mpa_v2_params.ird = htons((u16)ep->ird); 963 mpa_v2_params.ord = htons((u16)ep->ord); 964 965 if (peer2peer) { 966 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 967 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 968 mpa_v2_params.ord |= 969 htons(MPA_V2_RDMA_WRITE_RTR); 970 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 971 mpa_v2_params.ord |= 972 htons(MPA_V2_RDMA_READ_RTR); 973 } 974 memcpy(mpa->private_data, &mpa_v2_params, 975 sizeof(struct mpa_v2_conn_params)); 976 977 if (ep->plen) 978 memcpy(mpa->private_data + 979 sizeof(struct mpa_v2_conn_params), 980 ep->mpa_pkt + sizeof(*mpa), ep->plen); 981 } else 982 if (ep->plen) 983 memcpy(mpa->private_data, 984 ep->mpa_pkt + sizeof(*mpa), ep->plen); 985 986 /* 987 * Reference the mpa skb. This ensures the data area 988 * will remain in memory until the hw acks the tx. 989 * Function fw4_ack() will deref it. 990 */ 991 skb_get(skb); 992 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 993 ep->mpa_skb = skb; 994 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 995 if (ret) 996 return ret; 997 start_ep_timer(ep); 998 __state_set(&ep->com, MPA_REQ_SENT); 999 ep->mpa_attr.initiator = 1; 1000 ep->snd_seq += mpalen; 1001 return ret; 1002 } 1003 1004 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 1005 { 1006 int mpalen, wrlen; 1007 struct fw_ofld_tx_data_wr *req; 1008 struct mpa_message *mpa; 1009 struct sk_buff *skb; 1010 struct mpa_v2_conn_params mpa_v2_params; 1011 1012 pr_debug("ep %p tid %u pd_len %d\n", 1013 ep, ep->hwtid, ep->plen); 1014 1015 mpalen = sizeof(*mpa) + plen; 1016 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 1017 mpalen += sizeof(struct mpa_v2_conn_params); 1018 wrlen = roundup(mpalen + sizeof *req, 16); 1019 1020 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1021 if (!skb) { 1022 pr_err("%s - cannot alloc skb!\n", __func__); 1023 return -ENOMEM; 1024 } 1025 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1026 1027 req = skb_put_zero(skb, wrlen); 1028 req->op_to_immdlen = cpu_to_be32( 1029 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 1030 FW_WR_COMPL_F | 1031 FW_WR_IMMDLEN_V(mpalen)); 1032 req->flowid_len16 = cpu_to_be32( 1033 FW_WR_FLOWID_V(ep->hwtid) | 1034 FW_WR_LEN16_V(wrlen >> 4)); 1035 req->plen = cpu_to_be32(mpalen); 1036 req->tunnel_to_proxy = cpu_to_be32( 1037 FW_OFLD_TX_DATA_WR_FLUSH_F | 1038 FW_OFLD_TX_DATA_WR_SHOVE_F); 1039 1040 mpa = (struct mpa_message *)(req + 1); 1041 memset(mpa, 0, sizeof(*mpa)); 1042 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1043 mpa->flags = MPA_REJECT; 1044 mpa->revision = ep->mpa_attr.version; 1045 mpa->private_data_size = htons(plen); 1046 1047 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1048 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1049 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 1050 sizeof (struct mpa_v2_conn_params)); 1051 mpa_v2_params.ird = htons(((u16)ep->ird) | 1052 (peer2peer ? MPA_V2_PEER2PEER_MODEL : 1053 0)); 1054 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 1055 (p2p_type == 1056 FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 1057 MPA_V2_RDMA_WRITE_RTR : p2p_type == 1058 FW_RI_INIT_P2PTYPE_READ_REQ ? 1059 MPA_V2_RDMA_READ_RTR : 0) : 0)); 1060 memcpy(mpa->private_data, &mpa_v2_params, 1061 sizeof(struct mpa_v2_conn_params)); 1062 1063 if (ep->plen) 1064 memcpy(mpa->private_data + 1065 sizeof(struct mpa_v2_conn_params), pdata, plen); 1066 } else 1067 if (plen) 1068 memcpy(mpa->private_data, pdata, plen); 1069 1070 /* 1071 * Reference the mpa skb again. This ensures the data area 1072 * will remain in memory until the hw acks the tx. 1073 * Function fw4_ack() will deref it. 1074 */ 1075 skb_get(skb); 1076 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1077 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure); 1078 ep->mpa_skb = skb; 1079 ep->snd_seq += mpalen; 1080 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1081 } 1082 1083 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 1084 { 1085 int mpalen, wrlen; 1086 struct fw_ofld_tx_data_wr *req; 1087 struct mpa_message *mpa; 1088 struct sk_buff *skb; 1089 struct mpa_v2_conn_params mpa_v2_params; 1090 1091 pr_debug("ep %p tid %u pd_len %d\n", 1092 ep, ep->hwtid, ep->plen); 1093 1094 mpalen = sizeof(*mpa) + plen; 1095 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 1096 mpalen += sizeof(struct mpa_v2_conn_params); 1097 wrlen = roundup(mpalen + sizeof *req, 16); 1098 1099 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1100 if (!skb) { 1101 pr_err("%s - cannot alloc skb!\n", __func__); 1102 return -ENOMEM; 1103 } 1104 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1105 1106 req = skb_put_zero(skb, wrlen); 1107 req->op_to_immdlen = cpu_to_be32( 1108 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 1109 FW_WR_COMPL_F | 1110 FW_WR_IMMDLEN_V(mpalen)); 1111 req->flowid_len16 = cpu_to_be32( 1112 FW_WR_FLOWID_V(ep->hwtid) | 1113 FW_WR_LEN16_V(wrlen >> 4)); 1114 req->plen = cpu_to_be32(mpalen); 1115 req->tunnel_to_proxy = cpu_to_be32( 1116 FW_OFLD_TX_DATA_WR_FLUSH_F | 1117 FW_OFLD_TX_DATA_WR_SHOVE_F); 1118 1119 mpa = (struct mpa_message *)(req + 1); 1120 memset(mpa, 0, sizeof(*mpa)); 1121 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1122 mpa->flags = 0; 1123 if (ep->mpa_attr.crc_enabled) 1124 mpa->flags |= MPA_CRC; 1125 if (ep->mpa_attr.recv_marker_enabled) 1126 mpa->flags |= MPA_MARKERS; 1127 mpa->revision = ep->mpa_attr.version; 1128 mpa->private_data_size = htons(plen); 1129 1130 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1131 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1132 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 1133 sizeof (struct mpa_v2_conn_params)); 1134 mpa_v2_params.ird = htons((u16)ep->ird); 1135 mpa_v2_params.ord = htons((u16)ep->ord); 1136 if (peer2peer && (ep->mpa_attr.p2p_type != 1137 FW_RI_INIT_P2PTYPE_DISABLED)) { 1138 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1139 1140 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 1141 mpa_v2_params.ord |= 1142 htons(MPA_V2_RDMA_WRITE_RTR); 1143 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 1144 mpa_v2_params.ord |= 1145 htons(MPA_V2_RDMA_READ_RTR); 1146 } 1147 1148 memcpy(mpa->private_data, &mpa_v2_params, 1149 sizeof(struct mpa_v2_conn_params)); 1150 1151 if (ep->plen) 1152 memcpy(mpa->private_data + 1153 sizeof(struct mpa_v2_conn_params), pdata, plen); 1154 } else 1155 if (plen) 1156 memcpy(mpa->private_data, pdata, plen); 1157 1158 /* 1159 * Reference the mpa skb. This ensures the data area 1160 * will remain in memory until the hw acks the tx. 1161 * Function fw4_ack() will deref it. 1162 */ 1163 skb_get(skb); 1164 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure); 1165 ep->mpa_skb = skb; 1166 __state_set(&ep->com, MPA_REP_SENT); 1167 ep->snd_seq += mpalen; 1168 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1169 } 1170 1171 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) 1172 { 1173 struct c4iw_ep *ep; 1174 struct cpl_act_establish *req = cplhdr(skb); 1175 unsigned short tcp_opt = ntohs(req->tcp_opt); 1176 unsigned int tid = GET_TID(req); 1177 unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); 1178 struct tid_info *t = dev->rdev.lldi.tids; 1179 int ret; 1180 1181 ep = lookup_atid(t, atid); 1182 1183 pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid, 1184 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); 1185 1186 mutex_lock(&ep->com.mutex); 1187 dst_confirm(ep->dst); 1188 1189 /* setup the hwtid for this connection */ 1190 ep->hwtid = tid; 1191 cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family); 1192 insert_ep_tid(ep); 1193 1194 ep->snd_seq = be32_to_cpu(req->snd_isn); 1195 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 1196 ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); 1197 1198 set_emss(ep, tcp_opt); 1199 1200 /* dealloc the atid */ 1201 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 1202 cxgb4_free_atid(t, atid); 1203 set_bit(ACT_ESTAB, &ep->com.history); 1204 1205 /* start MPA negotiation */ 1206 ret = send_flowc(ep); 1207 if (ret) 1208 goto err; 1209 if (ep->retry_with_mpa_v1) 1210 ret = send_mpa_req(ep, skb, 1); 1211 else 1212 ret = send_mpa_req(ep, skb, mpa_rev); 1213 if (ret) 1214 goto err; 1215 mutex_unlock(&ep->com.mutex); 1216 return 0; 1217 err: 1218 mutex_unlock(&ep->com.mutex); 1219 connect_reply_upcall(ep, -ENOMEM); 1220 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1221 return 0; 1222 } 1223 1224 static void close_complete_upcall(struct c4iw_ep *ep, int status) 1225 { 1226 struct iw_cm_event event; 1227 1228 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1229 memset(&event, 0, sizeof(event)); 1230 event.event = IW_CM_EVENT_CLOSE; 1231 event.status = status; 1232 if (ep->com.cm_id) { 1233 pr_debug("close complete delivered ep %p cm_id %p tid %u\n", 1234 ep, ep->com.cm_id, ep->hwtid); 1235 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1236 deref_cm_id(&ep->com); 1237 set_bit(CLOSE_UPCALL, &ep->com.history); 1238 } 1239 } 1240 1241 static void peer_close_upcall(struct c4iw_ep *ep) 1242 { 1243 struct iw_cm_event event; 1244 1245 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1246 memset(&event, 0, sizeof(event)); 1247 event.event = IW_CM_EVENT_DISCONNECT; 1248 if (ep->com.cm_id) { 1249 pr_debug("peer close delivered ep %p cm_id %p tid %u\n", 1250 ep, ep->com.cm_id, ep->hwtid); 1251 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1252 set_bit(DISCONN_UPCALL, &ep->com.history); 1253 } 1254 } 1255 1256 static void peer_abort_upcall(struct c4iw_ep *ep) 1257 { 1258 struct iw_cm_event event; 1259 1260 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1261 memset(&event, 0, sizeof(event)); 1262 event.event = IW_CM_EVENT_CLOSE; 1263 event.status = -ECONNRESET; 1264 if (ep->com.cm_id) { 1265 pr_debug("abort delivered ep %p cm_id %p tid %u\n", ep, 1266 ep->com.cm_id, ep->hwtid); 1267 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1268 deref_cm_id(&ep->com); 1269 set_bit(ABORT_UPCALL, &ep->com.history); 1270 } 1271 } 1272 1273 static void connect_reply_upcall(struct c4iw_ep *ep, int status) 1274 { 1275 struct iw_cm_event event; 1276 1277 pr_debug("ep %p tid %u status %d\n", 1278 ep, ep->hwtid, status); 1279 memset(&event, 0, sizeof(event)); 1280 event.event = IW_CM_EVENT_CONNECT_REPLY; 1281 event.status = status; 1282 memcpy(&event.local_addr, &ep->com.local_addr, 1283 sizeof(ep->com.local_addr)); 1284 memcpy(&event.remote_addr, &ep->com.remote_addr, 1285 sizeof(ep->com.remote_addr)); 1286 1287 if ((status == 0) || (status == -ECONNREFUSED)) { 1288 if (!ep->tried_with_mpa_v1) { 1289 /* this means MPA_v2 is used */ 1290 event.ord = ep->ird; 1291 event.ird = ep->ord; 1292 event.private_data_len = ep->plen - 1293 sizeof(struct mpa_v2_conn_params); 1294 event.private_data = ep->mpa_pkt + 1295 sizeof(struct mpa_message) + 1296 sizeof(struct mpa_v2_conn_params); 1297 } else { 1298 /* this means MPA_v1 is used */ 1299 event.ord = cur_max_read_depth(ep->com.dev); 1300 event.ird = cur_max_read_depth(ep->com.dev); 1301 event.private_data_len = ep->plen; 1302 event.private_data = ep->mpa_pkt + 1303 sizeof(struct mpa_message); 1304 } 1305 } 1306 1307 pr_debug("ep %p tid %u status %d\n", ep, 1308 ep->hwtid, status); 1309 set_bit(CONN_RPL_UPCALL, &ep->com.history); 1310 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1311 1312 if (status < 0) 1313 deref_cm_id(&ep->com); 1314 } 1315 1316 static int connect_request_upcall(struct c4iw_ep *ep) 1317 { 1318 struct iw_cm_event event; 1319 int ret; 1320 1321 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1322 memset(&event, 0, sizeof(event)); 1323 event.event = IW_CM_EVENT_CONNECT_REQUEST; 1324 memcpy(&event.local_addr, &ep->com.local_addr, 1325 sizeof(ep->com.local_addr)); 1326 memcpy(&event.remote_addr, &ep->com.remote_addr, 1327 sizeof(ep->com.remote_addr)); 1328 event.provider_data = ep; 1329 if (!ep->tried_with_mpa_v1) { 1330 /* this means MPA_v2 is used */ 1331 event.ord = ep->ord; 1332 event.ird = ep->ird; 1333 event.private_data_len = ep->plen - 1334 sizeof(struct mpa_v2_conn_params); 1335 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 1336 sizeof(struct mpa_v2_conn_params); 1337 } else { 1338 /* this means MPA_v1 is used. Send max supported */ 1339 event.ord = cur_max_read_depth(ep->com.dev); 1340 event.ird = cur_max_read_depth(ep->com.dev); 1341 event.private_data_len = ep->plen; 1342 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1343 } 1344 c4iw_get_ep(&ep->com); 1345 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, 1346 &event); 1347 if (ret) 1348 c4iw_put_ep(&ep->com); 1349 set_bit(CONNREQ_UPCALL, &ep->com.history); 1350 c4iw_put_ep(&ep->parent_ep->com); 1351 return ret; 1352 } 1353 1354 static void established_upcall(struct c4iw_ep *ep) 1355 { 1356 struct iw_cm_event event; 1357 1358 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1359 memset(&event, 0, sizeof(event)); 1360 event.event = IW_CM_EVENT_ESTABLISHED; 1361 event.ird = ep->ord; 1362 event.ord = ep->ird; 1363 if (ep->com.cm_id) { 1364 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1365 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1366 set_bit(ESTAB_UPCALL, &ep->com.history); 1367 } 1368 } 1369 1370 static int update_rx_credits(struct c4iw_ep *ep, u32 credits) 1371 { 1372 struct sk_buff *skb; 1373 u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16); 1374 u32 credit_dack; 1375 1376 pr_debug("ep %p tid %u credits %u\n", 1377 ep, ep->hwtid, credits); 1378 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1379 if (!skb) { 1380 pr_err("update_rx_credits - cannot alloc skb!\n"); 1381 return 0; 1382 } 1383 1384 /* 1385 * If we couldn't specify the entire rcv window at connection setup 1386 * due to the limit in the number of bits in the RCV_BUFSIZ field, 1387 * then add the overage in to the credits returned. 1388 */ 1389 if (ep->rcv_win > RCV_BUFSIZ_M * 1024) 1390 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024; 1391 1392 credit_dack = credits | RX_FORCE_ACK_F | RX_DACK_CHANGE_F | 1393 RX_DACK_MODE_V(dack_mode); 1394 1395 cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx, 1396 credit_dack); 1397 1398 c4iw_ofld_send(&ep->com.dev->rdev, skb); 1399 return credits; 1400 } 1401 1402 #define RELAXED_IRD_NEGOTIATION 1 1403 1404 /* 1405 * process_mpa_reply - process streaming mode MPA reply 1406 * 1407 * Returns: 1408 * 1409 * 0 upon success indicating a connect request was delivered to the ULP 1410 * or the mpa request is incomplete but valid so far. 1411 * 1412 * 1 if a failure requires the caller to close the connection. 1413 * 1414 * 2 if a failure requires the caller to abort the connection. 1415 */ 1416 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1417 { 1418 struct mpa_message *mpa; 1419 struct mpa_v2_conn_params *mpa_v2_params; 1420 u16 plen; 1421 u16 resp_ird, resp_ord; 1422 u8 rtr_mismatch = 0, insuff_ird = 0; 1423 struct c4iw_qp_attributes attrs; 1424 enum c4iw_qp_attr_mask mask; 1425 int err; 1426 int disconnect = 0; 1427 1428 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1429 1430 /* 1431 * If we get more than the supported amount of private data 1432 * then we must fail this connection. 1433 */ 1434 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1435 err = -EINVAL; 1436 goto err_stop_timer; 1437 } 1438 1439 /* 1440 * copy the new data into our accumulation buffer. 1441 */ 1442 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1443 skb->len); 1444 ep->mpa_pkt_len += skb->len; 1445 1446 /* 1447 * if we don't even have the mpa message, then bail. 1448 */ 1449 if (ep->mpa_pkt_len < sizeof(*mpa)) 1450 return 0; 1451 mpa = (struct mpa_message *) ep->mpa_pkt; 1452 1453 /* Validate MPA header. */ 1454 if (mpa->revision > mpa_rev) { 1455 pr_err("%s MPA version mismatch. Local = %d, Received = %d\n", 1456 __func__, mpa_rev, mpa->revision); 1457 err = -EPROTO; 1458 goto err_stop_timer; 1459 } 1460 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1461 err = -EPROTO; 1462 goto err_stop_timer; 1463 } 1464 1465 plen = ntohs(mpa->private_data_size); 1466 1467 /* 1468 * Fail if there's too much private data. 1469 */ 1470 if (plen > MPA_MAX_PRIVATE_DATA) { 1471 err = -EPROTO; 1472 goto err_stop_timer; 1473 } 1474 1475 /* 1476 * If plen does not account for pkt size 1477 */ 1478 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1479 err = -EPROTO; 1480 goto err_stop_timer; 1481 } 1482 1483 ep->plen = (u8) plen; 1484 1485 /* 1486 * If we don't have all the pdata yet, then bail. 1487 * We'll continue process when more data arrives. 1488 */ 1489 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1490 return 0; 1491 1492 if (mpa->flags & MPA_REJECT) { 1493 err = -ECONNREFUSED; 1494 goto err_stop_timer; 1495 } 1496 1497 /* 1498 * Stop mpa timer. If it expired, then 1499 * we ignore the MPA reply. process_timeout() 1500 * will abort the connection. 1501 */ 1502 if (stop_ep_timer(ep)) 1503 return 0; 1504 1505 /* 1506 * If we get here we have accumulated the entire mpa 1507 * start reply message including private data. And 1508 * the MPA header is valid. 1509 */ 1510 __state_set(&ep->com, FPDU_MODE); 1511 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1512 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1513 ep->mpa_attr.version = mpa->revision; 1514 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1515 1516 if (mpa->revision == 2) { 1517 ep->mpa_attr.enhanced_rdma_conn = 1518 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1519 if (ep->mpa_attr.enhanced_rdma_conn) { 1520 mpa_v2_params = (struct mpa_v2_conn_params *) 1521 (ep->mpa_pkt + sizeof(*mpa)); 1522 resp_ird = ntohs(mpa_v2_params->ird) & 1523 MPA_V2_IRD_ORD_MASK; 1524 resp_ord = ntohs(mpa_v2_params->ord) & 1525 MPA_V2_IRD_ORD_MASK; 1526 pr_debug("responder ird %u ord %u ep ird %u ord %u\n", 1527 resp_ird, resp_ord, ep->ird, ep->ord); 1528 1529 /* 1530 * This is a double-check. Ideally, below checks are 1531 * not required since ird/ord stuff has been taken 1532 * care of in c4iw_accept_cr 1533 */ 1534 if (ep->ird < resp_ord) { 1535 if (RELAXED_IRD_NEGOTIATION && resp_ord <= 1536 ep->com.dev->rdev.lldi.max_ordird_qp) 1537 ep->ird = resp_ord; 1538 else 1539 insuff_ird = 1; 1540 } else if (ep->ird > resp_ord) { 1541 ep->ird = resp_ord; 1542 } 1543 if (ep->ord > resp_ird) { 1544 if (RELAXED_IRD_NEGOTIATION) 1545 ep->ord = resp_ird; 1546 else 1547 insuff_ird = 1; 1548 } 1549 if (insuff_ird) { 1550 err = -ENOMEM; 1551 ep->ird = resp_ord; 1552 ep->ord = resp_ird; 1553 } 1554 1555 if (ntohs(mpa_v2_params->ird) & 1556 MPA_V2_PEER2PEER_MODEL) { 1557 if (ntohs(mpa_v2_params->ord) & 1558 MPA_V2_RDMA_WRITE_RTR) 1559 ep->mpa_attr.p2p_type = 1560 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1561 else if (ntohs(mpa_v2_params->ord) & 1562 MPA_V2_RDMA_READ_RTR) 1563 ep->mpa_attr.p2p_type = 1564 FW_RI_INIT_P2PTYPE_READ_REQ; 1565 } 1566 } 1567 } else if (mpa->revision == 1) 1568 if (peer2peer) 1569 ep->mpa_attr.p2p_type = p2p_type; 1570 1571 pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n", 1572 ep->mpa_attr.crc_enabled, 1573 ep->mpa_attr.recv_marker_enabled, 1574 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1575 ep->mpa_attr.p2p_type, p2p_type); 1576 1577 /* 1578 * If responder's RTR does not match with that of initiator, assign 1579 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 1580 * generated when moving QP to RTS state. 1581 * A TERM message will be sent after QP has moved to RTS state 1582 */ 1583 if ((ep->mpa_attr.version == 2) && peer2peer && 1584 (ep->mpa_attr.p2p_type != p2p_type)) { 1585 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1586 rtr_mismatch = 1; 1587 } 1588 1589 attrs.mpa_attr = ep->mpa_attr; 1590 attrs.max_ird = ep->ird; 1591 attrs.max_ord = ep->ord; 1592 attrs.llp_stream_handle = ep; 1593 attrs.next_state = C4IW_QP_STATE_RTS; 1594 1595 mask = C4IW_QP_ATTR_NEXT_STATE | 1596 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 1597 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 1598 1599 /* bind QP and TID with INIT_WR */ 1600 err = c4iw_modify_qp(ep->com.qp->rhp, 1601 ep->com.qp, mask, &attrs, 1); 1602 if (err) 1603 goto err; 1604 1605 /* 1606 * If responder's RTR requirement did not match with what initiator 1607 * supports, generate TERM message 1608 */ 1609 if (rtr_mismatch) { 1610 pr_err("%s: RTR mismatch, sending TERM\n", __func__); 1611 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1612 attrs.ecode = MPA_NOMATCH_RTR; 1613 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1614 attrs.send_term = 1; 1615 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1616 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1617 err = -ENOMEM; 1618 disconnect = 1; 1619 goto out; 1620 } 1621 1622 /* 1623 * Generate TERM if initiator IRD is not sufficient for responder 1624 * provided ORD. Currently, we do the same behaviour even when 1625 * responder provided IRD is also not sufficient as regards to 1626 * initiator ORD. 1627 */ 1628 if (insuff_ird) { 1629 pr_err("%s: Insufficient IRD, sending TERM\n", __func__); 1630 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1631 attrs.ecode = MPA_INSUFF_IRD; 1632 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1633 attrs.send_term = 1; 1634 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1635 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1636 err = -ENOMEM; 1637 disconnect = 1; 1638 goto out; 1639 } 1640 goto out; 1641 err_stop_timer: 1642 stop_ep_timer(ep); 1643 err: 1644 disconnect = 2; 1645 out: 1646 connect_reply_upcall(ep, err); 1647 return disconnect; 1648 } 1649 1650 /* 1651 * process_mpa_request - process streaming mode MPA request 1652 * 1653 * Returns: 1654 * 1655 * 0 upon success indicating a connect request was delivered to the ULP 1656 * or the mpa request is incomplete but valid so far. 1657 * 1658 * 1 if a failure requires the caller to close the connection. 1659 * 1660 * 2 if a failure requires the caller to abort the connection. 1661 */ 1662 static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) 1663 { 1664 struct mpa_message *mpa; 1665 struct mpa_v2_conn_params *mpa_v2_params; 1666 u16 plen; 1667 1668 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1669 1670 /* 1671 * If we get more than the supported amount of private data 1672 * then we must fail this connection. 1673 */ 1674 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) 1675 goto err_stop_timer; 1676 1677 pr_debug("enter (%s line %u)\n", __FILE__, __LINE__); 1678 1679 /* 1680 * Copy the new data into our accumulation buffer. 1681 */ 1682 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1683 skb->len); 1684 ep->mpa_pkt_len += skb->len; 1685 1686 /* 1687 * If we don't even have the mpa message, then bail. 1688 * We'll continue process when more data arrives. 1689 */ 1690 if (ep->mpa_pkt_len < sizeof(*mpa)) 1691 return 0; 1692 1693 pr_debug("enter (%s line %u)\n", __FILE__, __LINE__); 1694 mpa = (struct mpa_message *) ep->mpa_pkt; 1695 1696 /* 1697 * Validate MPA Header. 1698 */ 1699 if (mpa->revision > mpa_rev) { 1700 pr_err("%s MPA version mismatch. Local = %d, Received = %d\n", 1701 __func__, mpa_rev, mpa->revision); 1702 goto err_stop_timer; 1703 } 1704 1705 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) 1706 goto err_stop_timer; 1707 1708 plen = ntohs(mpa->private_data_size); 1709 1710 /* 1711 * Fail if there's too much private data. 1712 */ 1713 if (plen > MPA_MAX_PRIVATE_DATA) 1714 goto err_stop_timer; 1715 1716 /* 1717 * If plen does not account for pkt size 1718 */ 1719 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) 1720 goto err_stop_timer; 1721 ep->plen = (u8) plen; 1722 1723 /* 1724 * If we don't have all the pdata yet, then bail. 1725 */ 1726 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1727 return 0; 1728 1729 /* 1730 * If we get here we have accumulated the entire mpa 1731 * start reply message including private data. 1732 */ 1733 ep->mpa_attr.initiator = 0; 1734 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1735 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1736 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1737 ep->mpa_attr.version = mpa->revision; 1738 if (mpa->revision == 1) 1739 ep->tried_with_mpa_v1 = 1; 1740 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1741 1742 if (mpa->revision == 2) { 1743 ep->mpa_attr.enhanced_rdma_conn = 1744 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1745 if (ep->mpa_attr.enhanced_rdma_conn) { 1746 mpa_v2_params = (struct mpa_v2_conn_params *) 1747 (ep->mpa_pkt + sizeof(*mpa)); 1748 ep->ird = ntohs(mpa_v2_params->ird) & 1749 MPA_V2_IRD_ORD_MASK; 1750 ep->ird = min_t(u32, ep->ird, 1751 cur_max_read_depth(ep->com.dev)); 1752 ep->ord = ntohs(mpa_v2_params->ord) & 1753 MPA_V2_IRD_ORD_MASK; 1754 ep->ord = min_t(u32, ep->ord, 1755 cur_max_read_depth(ep->com.dev)); 1756 pr_debug("initiator ird %u ord %u\n", 1757 ep->ird, ep->ord); 1758 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 1759 if (peer2peer) { 1760 if (ntohs(mpa_v2_params->ord) & 1761 MPA_V2_RDMA_WRITE_RTR) 1762 ep->mpa_attr.p2p_type = 1763 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1764 else if (ntohs(mpa_v2_params->ord) & 1765 MPA_V2_RDMA_READ_RTR) 1766 ep->mpa_attr.p2p_type = 1767 FW_RI_INIT_P2PTYPE_READ_REQ; 1768 } 1769 } 1770 } else if (mpa->revision == 1) 1771 if (peer2peer) 1772 ep->mpa_attr.p2p_type = p2p_type; 1773 1774 pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n", 1775 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1776 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1777 ep->mpa_attr.p2p_type); 1778 1779 __state_set(&ep->com, MPA_REQ_RCVD); 1780 1781 /* drive upcall */ 1782 mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING); 1783 if (ep->parent_ep->com.state != DEAD) { 1784 if (connect_request_upcall(ep)) 1785 goto err_unlock_parent; 1786 } else { 1787 goto err_unlock_parent; 1788 } 1789 mutex_unlock(&ep->parent_ep->com.mutex); 1790 return 0; 1791 1792 err_unlock_parent: 1793 mutex_unlock(&ep->parent_ep->com.mutex); 1794 goto err_out; 1795 err_stop_timer: 1796 (void)stop_ep_timer(ep); 1797 err_out: 1798 return 2; 1799 } 1800 1801 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) 1802 { 1803 struct c4iw_ep *ep; 1804 struct cpl_rx_data *hdr = cplhdr(skb); 1805 unsigned int dlen = ntohs(hdr->len); 1806 unsigned int tid = GET_TID(hdr); 1807 __u8 status = hdr->status; 1808 int disconnect = 0; 1809 1810 ep = get_ep_from_tid(dev, tid); 1811 if (!ep) 1812 return 0; 1813 pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen); 1814 skb_pull(skb, sizeof(*hdr)); 1815 skb_trim(skb, dlen); 1816 mutex_lock(&ep->com.mutex); 1817 1818 switch (ep->com.state) { 1819 case MPA_REQ_SENT: 1820 update_rx_credits(ep, dlen); 1821 ep->rcv_seq += dlen; 1822 disconnect = process_mpa_reply(ep, skb); 1823 break; 1824 case MPA_REQ_WAIT: 1825 update_rx_credits(ep, dlen); 1826 ep->rcv_seq += dlen; 1827 disconnect = process_mpa_request(ep, skb); 1828 break; 1829 case FPDU_MODE: { 1830 struct c4iw_qp_attributes attrs; 1831 1832 update_rx_credits(ep, dlen); 1833 if (status) 1834 pr_err("%s Unexpected streaming data." \ 1835 " qpid %u ep %p state %d tid %u status %d\n", 1836 __func__, ep->com.qp->wq.sq.qid, ep, 1837 ep->com.state, ep->hwtid, status); 1838 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1839 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1840 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1841 disconnect = 1; 1842 break; 1843 } 1844 default: 1845 break; 1846 } 1847 mutex_unlock(&ep->com.mutex); 1848 if (disconnect) 1849 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); 1850 c4iw_put_ep(&ep->com); 1851 return 0; 1852 } 1853 1854 static void complete_cached_srq_buffers(struct c4iw_ep *ep, 1855 __be32 srqidx_status) 1856 { 1857 enum chip_type adapter_type; 1858 u32 srqidx; 1859 1860 adapter_type = ep->com.dev->rdev.lldi.adapter_type; 1861 srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(srqidx_status)); 1862 1863 /* 1864 * If this TCB had a srq buffer cached, then we must complete 1865 * it. For user mode, that means saving the srqidx in the 1866 * user/kernel status page for this qp. For kernel mode, just 1867 * synthesize the CQE now. 1868 */ 1869 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T5 && srqidx) { 1870 if (ep->com.qp->ibqp.uobject) 1871 t4_set_wq_in_error(&ep->com.qp->wq, srqidx); 1872 else 1873 c4iw_flush_srqidx(ep->com.qp, srqidx); 1874 } 1875 } 1876 1877 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1878 { 1879 struct c4iw_ep *ep; 1880 struct cpl_abort_rpl_rss6 *rpl = cplhdr(skb); 1881 int release = 0; 1882 unsigned int tid = GET_TID(rpl); 1883 1884 ep = get_ep_from_tid(dev, tid); 1885 if (!ep) { 1886 pr_warn("Abort rpl to freed endpoint\n"); 1887 return 0; 1888 } 1889 1890 complete_cached_srq_buffers(ep, rpl->srqidx_status); 1891 1892 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1893 mutex_lock(&ep->com.mutex); 1894 switch (ep->com.state) { 1895 case ABORTING: 1896 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); 1897 __state_set(&ep->com, DEAD); 1898 release = 1; 1899 break; 1900 default: 1901 pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state); 1902 break; 1903 } 1904 mutex_unlock(&ep->com.mutex); 1905 1906 if (release) 1907 release_ep_resources(ep); 1908 c4iw_put_ep(&ep->com); 1909 return 0; 1910 } 1911 1912 static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) 1913 { 1914 struct sk_buff *skb; 1915 struct fw_ofld_connection_wr *req; 1916 unsigned int mtu_idx; 1917 u32 wscale; 1918 struct sockaddr_in *sin; 1919 int win; 1920 1921 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1922 req = __skb_put_zero(skb, sizeof(*req)); 1923 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR)); 1924 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); 1925 req->le.filter = cpu_to_be32(cxgb4_select_ntuple( 1926 ep->com.dev->rdev.lldi.ports[0], 1927 ep->l2t)); 1928 sin = (struct sockaddr_in *)&ep->com.local_addr; 1929 req->le.lport = sin->sin_port; 1930 req->le.u.ipv4.lip = sin->sin_addr.s_addr; 1931 sin = (struct sockaddr_in *)&ep->com.remote_addr; 1932 req->le.pport = sin->sin_port; 1933 req->le.u.ipv4.pip = sin->sin_addr.s_addr; 1934 req->tcb.t_state_to_astid = 1935 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) | 1936 FW_OFLD_CONNECTION_WR_ASTID_V(atid)); 1937 req->tcb.cplrxdataack_cplpassacceptrpl = 1938 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F); 1939 req->tcb.tx_max = (__force __be32) jiffies; 1940 req->tcb.rcv_adv = htons(1); 1941 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 1942 enable_tcp_timestamps, 1943 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); 1944 wscale = cxgb_compute_wscale(rcv_win); 1945 1946 /* 1947 * Specify the largest window that will fit in opt0. The 1948 * remainder will be specified in the rx_data_ack. 1949 */ 1950 win = ep->rcv_win >> 10; 1951 if (win > RCV_BUFSIZ_M) 1952 win = RCV_BUFSIZ_M; 1953 1954 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F | 1955 (nocong ? NO_CONG_F : 0) | 1956 KEEP_ALIVE_F | 1957 DELACK_F | 1958 WND_SCALE_V(wscale) | 1959 MSS_IDX_V(mtu_idx) | 1960 L2T_IDX_V(ep->l2t->idx) | 1961 TX_CHAN_V(ep->tx_chan) | 1962 SMAC_SEL_V(ep->smac_idx) | 1963 DSCP_V(ep->tos >> 2) | 1964 ULP_MODE_V(ULP_MODE_TCPDDP) | 1965 RCV_BUFSIZ_V(win)); 1966 req->tcb.opt2 = (__force __be32) (PACE_V(1) | 1967 TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 1968 RX_CHANNEL_V(0) | 1969 CCTRL_ECN_V(enable_ecn) | 1970 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid)); 1971 if (enable_tcp_timestamps) 1972 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F; 1973 if (enable_tcp_sack) 1974 req->tcb.opt2 |= (__force __be32)SACK_EN_F; 1975 if (wscale && enable_tcp_window_scaling) 1976 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F; 1977 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0); 1978 req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2); 1979 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); 1980 set_bit(ACT_OFLD_CONN, &ep->com.history); 1981 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1982 } 1983 1984 /* 1985 * Some of the error codes above implicitly indicate that there is no TID 1986 * allocated with the result of an ACT_OPEN. We use this predicate to make 1987 * that explicit. 1988 */ 1989 static inline int act_open_has_tid(int status) 1990 { 1991 return (status != CPL_ERR_TCAM_PARITY && 1992 status != CPL_ERR_TCAM_MISS && 1993 status != CPL_ERR_TCAM_FULL && 1994 status != CPL_ERR_CONN_EXIST_SYNRECV && 1995 status != CPL_ERR_CONN_EXIST); 1996 } 1997 1998 static char *neg_adv_str(unsigned int status) 1999 { 2000 switch (status) { 2001 case CPL_ERR_RTX_NEG_ADVICE: 2002 return "Retransmit timeout"; 2003 case CPL_ERR_PERSIST_NEG_ADVICE: 2004 return "Persist timeout"; 2005 case CPL_ERR_KEEPALV_NEG_ADVICE: 2006 return "Keepalive timeout"; 2007 default: 2008 return "Unknown"; 2009 } 2010 } 2011 2012 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) 2013 { 2014 ep->snd_win = snd_win; 2015 ep->rcv_win = rcv_win; 2016 pr_debug("snd_win %d rcv_win %d\n", 2017 ep->snd_win, ep->rcv_win); 2018 } 2019 2020 #define ACT_OPEN_RETRY_COUNT 2 2021 2022 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, 2023 struct dst_entry *dst, struct c4iw_dev *cdev, 2024 bool clear_mpa_v1, enum chip_type adapter_type, u8 tos) 2025 { 2026 struct neighbour *n; 2027 int err, step; 2028 struct net_device *pdev; 2029 2030 n = dst_neigh_lookup(dst, peer_ip); 2031 if (!n) 2032 return -ENODEV; 2033 2034 rcu_read_lock(); 2035 err = -ENOMEM; 2036 if (n->dev->flags & IFF_LOOPBACK) { 2037 if (iptype == 4) 2038 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip); 2039 else if (IS_ENABLED(CONFIG_IPV6)) 2040 for_each_netdev(&init_net, pdev) { 2041 if (ipv6_chk_addr(&init_net, 2042 (struct in6_addr *)peer_ip, 2043 pdev, 1)) 2044 break; 2045 } 2046 else 2047 pdev = NULL; 2048 2049 if (!pdev) { 2050 err = -ENODEV; 2051 goto out; 2052 } 2053 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 2054 n, pdev, rt_tos2priority(tos)); 2055 if (!ep->l2t) { 2056 dev_put(pdev); 2057 goto out; 2058 } 2059 ep->mtu = pdev->mtu; 2060 ep->tx_chan = cxgb4_port_chan(pdev); 2061 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type, 2062 cxgb4_port_viid(pdev)); 2063 step = cdev->rdev.lldi.ntxq / 2064 cdev->rdev.lldi.nchan; 2065 ep->txq_idx = cxgb4_port_idx(pdev) * step; 2066 step = cdev->rdev.lldi.nrxq / 2067 cdev->rdev.lldi.nchan; 2068 ep->ctrlq_idx = cxgb4_port_idx(pdev); 2069 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 2070 cxgb4_port_idx(pdev) * step]; 2071 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); 2072 dev_put(pdev); 2073 } else { 2074 pdev = get_real_dev(n->dev); 2075 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 2076 n, pdev, 0); 2077 if (!ep->l2t) 2078 goto out; 2079 ep->mtu = dst_mtu(dst); 2080 ep->tx_chan = cxgb4_port_chan(pdev); 2081 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type, 2082 cxgb4_port_viid(pdev)); 2083 step = cdev->rdev.lldi.ntxq / 2084 cdev->rdev.lldi.nchan; 2085 ep->txq_idx = cxgb4_port_idx(pdev) * step; 2086 ep->ctrlq_idx = cxgb4_port_idx(pdev); 2087 step = cdev->rdev.lldi.nrxq / 2088 cdev->rdev.lldi.nchan; 2089 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 2090 cxgb4_port_idx(pdev) * step]; 2091 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); 2092 2093 if (clear_mpa_v1) { 2094 ep->retry_with_mpa_v1 = 0; 2095 ep->tried_with_mpa_v1 = 0; 2096 } 2097 } 2098 err = 0; 2099 out: 2100 rcu_read_unlock(); 2101 2102 neigh_release(n); 2103 2104 return err; 2105 } 2106 2107 static int c4iw_reconnect(struct c4iw_ep *ep) 2108 { 2109 int err = 0; 2110 int size = 0; 2111 struct sockaddr_in *laddr = (struct sockaddr_in *) 2112 &ep->com.cm_id->m_local_addr; 2113 struct sockaddr_in *raddr = (struct sockaddr_in *) 2114 &ep->com.cm_id->m_remote_addr; 2115 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *) 2116 &ep->com.cm_id->m_local_addr; 2117 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *) 2118 &ep->com.cm_id->m_remote_addr; 2119 int iptype; 2120 __u8 *ra; 2121 2122 pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id); 2123 c4iw_init_wr_wait(ep->com.wr_waitp); 2124 2125 /* When MPA revision is different on nodes, the node with MPA_rev=2 2126 * tries to reconnect with MPA_rev 1 for the same EP through 2127 * c4iw_reconnect(), where the same EP is assigned with new tid for 2128 * further connection establishment. As we are using the same EP pointer 2129 * for reconnect, few skbs are used during the previous c4iw_connect(), 2130 * which leaves the EP with inadequate skbs for further 2131 * c4iw_reconnect(), Further causing a crash due to an empty 2132 * skb_list() during peer_abort(). Allocate skbs which is already used. 2133 */ 2134 size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list)); 2135 if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) { 2136 err = -ENOMEM; 2137 goto fail1; 2138 } 2139 2140 /* 2141 * Allocate an active TID to initiate a TCP connection. 2142 */ 2143 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); 2144 if (ep->atid == -1) { 2145 pr_err("%s - cannot alloc atid\n", __func__); 2146 err = -ENOMEM; 2147 goto fail2; 2148 } 2149 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid); 2150 2151 /* find a route */ 2152 if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) { 2153 ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev, 2154 laddr->sin_addr.s_addr, 2155 raddr->sin_addr.s_addr, 2156 laddr->sin_port, 2157 raddr->sin_port, ep->com.cm_id->tos); 2158 iptype = 4; 2159 ra = (__u8 *)&raddr->sin_addr; 2160 } else { 2161 ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi, 2162 get_real_dev, 2163 laddr6->sin6_addr.s6_addr, 2164 raddr6->sin6_addr.s6_addr, 2165 laddr6->sin6_port, 2166 raddr6->sin6_port, 0, 2167 raddr6->sin6_scope_id); 2168 iptype = 6; 2169 ra = (__u8 *)&raddr6->sin6_addr; 2170 } 2171 if (!ep->dst) { 2172 pr_err("%s - cannot find route\n", __func__); 2173 err = -EHOSTUNREACH; 2174 goto fail3; 2175 } 2176 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false, 2177 ep->com.dev->rdev.lldi.adapter_type, 2178 ep->com.cm_id->tos); 2179 if (err) { 2180 pr_err("%s - cannot alloc l2e\n", __func__); 2181 goto fail4; 2182 } 2183 2184 pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 2185 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 2186 ep->l2t->idx); 2187 2188 state_set(&ep->com, CONNECTING); 2189 ep->tos = ep->com.cm_id->tos; 2190 2191 /* send connect request to rnic */ 2192 err = send_connect(ep); 2193 if (!err) 2194 goto out; 2195 2196 cxgb4_l2t_release(ep->l2t); 2197 fail4: 2198 dst_release(ep->dst); 2199 fail3: 2200 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 2201 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 2202 fail2: 2203 /* 2204 * remember to send notification to upper layer. 2205 * We are in here so the upper layer is not aware that this is 2206 * re-connect attempt and so, upper layer is still waiting for 2207 * response of 1st connect request. 2208 */ 2209 connect_reply_upcall(ep, -ECONNRESET); 2210 fail1: 2211 c4iw_put_ep(&ep->com); 2212 out: 2213 return err; 2214 } 2215 2216 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2217 { 2218 struct c4iw_ep *ep; 2219 struct cpl_act_open_rpl *rpl = cplhdr(skb); 2220 unsigned int atid = TID_TID_G(AOPEN_ATID_G( 2221 ntohl(rpl->atid_status))); 2222 struct tid_info *t = dev->rdev.lldi.tids; 2223 int status = AOPEN_STATUS_G(ntohl(rpl->atid_status)); 2224 struct sockaddr_in *la; 2225 struct sockaddr_in *ra; 2226 struct sockaddr_in6 *la6; 2227 struct sockaddr_in6 *ra6; 2228 int ret = 0; 2229 2230 ep = lookup_atid(t, atid); 2231 la = (struct sockaddr_in *)&ep->com.local_addr; 2232 ra = (struct sockaddr_in *)&ep->com.remote_addr; 2233 la6 = (struct sockaddr_in6 *)&ep->com.local_addr; 2234 ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr; 2235 2236 pr_debug("ep %p atid %u status %u errno %d\n", ep, atid, 2237 status, status2errno(status)); 2238 2239 if (cxgb_is_neg_adv(status)) { 2240 pr_debug("Connection problems for atid %u status %u (%s)\n", 2241 atid, status, neg_adv_str(status)); 2242 ep->stats.connect_neg_adv++; 2243 mutex_lock(&dev->rdev.stats.lock); 2244 dev->rdev.stats.neg_adv++; 2245 mutex_unlock(&dev->rdev.stats.lock); 2246 return 0; 2247 } 2248 2249 set_bit(ACT_OPEN_RPL, &ep->com.history); 2250 2251 /* 2252 * Log interesting failures. 2253 */ 2254 switch (status) { 2255 case CPL_ERR_CONN_RESET: 2256 case CPL_ERR_CONN_TIMEDOUT: 2257 break; 2258 case CPL_ERR_TCAM_FULL: 2259 mutex_lock(&dev->rdev.stats.lock); 2260 dev->rdev.stats.tcam_full++; 2261 mutex_unlock(&dev->rdev.stats.lock); 2262 if (ep->com.local_addr.ss_family == AF_INET && 2263 dev->rdev.lldi.enable_fw_ofld_conn) { 2264 ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G( 2265 ntohl(rpl->atid_status)))); 2266 if (ret) 2267 goto fail; 2268 return 0; 2269 } 2270 break; 2271 case CPL_ERR_CONN_EXIST: 2272 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 2273 set_bit(ACT_RETRY_INUSE, &ep->com.history); 2274 if (ep->com.remote_addr.ss_family == AF_INET6) { 2275 struct sockaddr_in6 *sin6 = 2276 (struct sockaddr_in6 *) 2277 &ep->com.local_addr; 2278 cxgb4_clip_release( 2279 ep->com.dev->rdev.lldi.ports[0], 2280 (const u32 *) 2281 &sin6->sin6_addr.s6_addr, 1); 2282 } 2283 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, 2284 atid); 2285 cxgb4_free_atid(t, atid); 2286 dst_release(ep->dst); 2287 cxgb4_l2t_release(ep->l2t); 2288 c4iw_reconnect(ep); 2289 return 0; 2290 } 2291 break; 2292 default: 2293 if (ep->com.local_addr.ss_family == AF_INET) { 2294 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n", 2295 atid, status, status2errno(status), 2296 &la->sin_addr.s_addr, ntohs(la->sin_port), 2297 &ra->sin_addr.s_addr, ntohs(ra->sin_port)); 2298 } else { 2299 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n", 2300 atid, status, status2errno(status), 2301 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port), 2302 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port)); 2303 } 2304 break; 2305 } 2306 2307 fail: 2308 connect_reply_upcall(ep, status2errno(status)); 2309 state_set(&ep->com, DEAD); 2310 2311 if (ep->com.remote_addr.ss_family == AF_INET6) { 2312 struct sockaddr_in6 *sin6 = 2313 (struct sockaddr_in6 *)&ep->com.local_addr; 2314 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 2315 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 2316 } 2317 if (status && act_open_has_tid(status)) 2318 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl), 2319 ep->com.local_addr.ss_family); 2320 2321 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 2322 cxgb4_free_atid(t, atid); 2323 dst_release(ep->dst); 2324 cxgb4_l2t_release(ep->l2t); 2325 c4iw_put_ep(&ep->com); 2326 2327 return 0; 2328 } 2329 2330 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2331 { 2332 struct cpl_pass_open_rpl *rpl = cplhdr(skb); 2333 unsigned int stid = GET_TID(rpl); 2334 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); 2335 2336 if (!ep) { 2337 pr_warn("%s stid %d lookup failure!\n", __func__, stid); 2338 goto out; 2339 } 2340 pr_debug("ep %p status %d error %d\n", ep, 2341 rpl->status, status2errno(rpl->status)); 2342 c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status)); 2343 c4iw_put_ep(&ep->com); 2344 out: 2345 return 0; 2346 } 2347 2348 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2349 { 2350 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); 2351 unsigned int stid = GET_TID(rpl); 2352 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); 2353 2354 if (!ep) { 2355 pr_warn("%s stid %d lookup failure!\n", __func__, stid); 2356 goto out; 2357 } 2358 pr_debug("ep %p\n", ep); 2359 c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status)); 2360 c4iw_put_ep(&ep->com); 2361 out: 2362 return 0; 2363 } 2364 2365 static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, 2366 struct cpl_pass_accept_req *req) 2367 { 2368 struct cpl_pass_accept_rpl *rpl; 2369 unsigned int mtu_idx; 2370 u64 opt0; 2371 u32 opt2; 2372 u32 wscale; 2373 struct cpl_t5_pass_accept_rpl *rpl5 = NULL; 2374 int win; 2375 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; 2376 2377 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 2378 2379 skb_get(skb); 2380 rpl = cplhdr(skb); 2381 if (!is_t4(adapter_type)) { 2382 skb_trim(skb, roundup(sizeof(*rpl5), 16)); 2383 rpl5 = (void *)rpl; 2384 INIT_TP_WR(rpl5, ep->hwtid); 2385 } else { 2386 skb_trim(skb, sizeof(*rpl)); 2387 INIT_TP_WR(rpl, ep->hwtid); 2388 } 2389 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, 2390 ep->hwtid)); 2391 2392 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 2393 enable_tcp_timestamps && req->tcpopt.tstamp, 2394 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); 2395 wscale = cxgb_compute_wscale(rcv_win); 2396 2397 /* 2398 * Specify the largest window that will fit in opt0. The 2399 * remainder will be specified in the rx_data_ack. 2400 */ 2401 win = ep->rcv_win >> 10; 2402 if (win > RCV_BUFSIZ_M) 2403 win = RCV_BUFSIZ_M; 2404 opt0 = (nocong ? NO_CONG_F : 0) | 2405 KEEP_ALIVE_F | 2406 DELACK_F | 2407 WND_SCALE_V(wscale) | 2408 MSS_IDX_V(mtu_idx) | 2409 L2T_IDX_V(ep->l2t->idx) | 2410 TX_CHAN_V(ep->tx_chan) | 2411 SMAC_SEL_V(ep->smac_idx) | 2412 DSCP_V(ep->tos >> 2) | 2413 ULP_MODE_V(ULP_MODE_TCPDDP) | 2414 RCV_BUFSIZ_V(win); 2415 opt2 = RX_CHANNEL_V(0) | 2416 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); 2417 2418 if (enable_tcp_timestamps && req->tcpopt.tstamp) 2419 opt2 |= TSTAMPS_EN_F; 2420 if (enable_tcp_sack && req->tcpopt.sack) 2421 opt2 |= SACK_EN_F; 2422 if (wscale && enable_tcp_window_scaling) 2423 opt2 |= WND_SCALE_EN_F; 2424 if (enable_ecn) { 2425 const struct tcphdr *tcph; 2426 u32 hlen = ntohl(req->hdr_len); 2427 2428 if (CHELSIO_CHIP_VERSION(adapter_type) <= CHELSIO_T5) 2429 tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) + 2430 IP_HDR_LEN_G(hlen); 2431 else 2432 tcph = (const void *)(req + 1) + 2433 T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen); 2434 if (tcph->ece && tcph->cwr) 2435 opt2 |= CCTRL_ECN_V(1); 2436 } 2437 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) { 2438 u32 isn = (prandom_u32() & ~7UL) - 1; 2439 opt2 |= T5_OPT_2_VALID_F; 2440 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); 2441 opt2 |= T5_ISS_F; 2442 rpl5 = (void *)rpl; 2443 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); 2444 if (peer2peer) 2445 isn += 4; 2446 rpl5->iss = cpu_to_be32(isn); 2447 pr_debug("iss %u\n", be32_to_cpu(rpl5->iss)); 2448 } 2449 2450 rpl->opt0 = cpu_to_be64(opt0); 2451 rpl->opt2 = cpu_to_be32(opt2); 2452 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 2453 t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure); 2454 2455 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 2456 } 2457 2458 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb) 2459 { 2460 pr_debug("c4iw_dev %p tid %u\n", dev, hwtid); 2461 skb_trim(skb, sizeof(struct cpl_tid_release)); 2462 release_tid(&dev->rdev, hwtid, skb); 2463 return; 2464 } 2465 2466 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 2467 { 2468 struct c4iw_ep *child_ep = NULL, *parent_ep; 2469 struct cpl_pass_accept_req *req = cplhdr(skb); 2470 unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); 2471 struct tid_info *t = dev->rdev.lldi.tids; 2472 unsigned int hwtid = GET_TID(req); 2473 struct dst_entry *dst; 2474 __u8 local_ip[16], peer_ip[16]; 2475 __be16 local_port, peer_port; 2476 struct sockaddr_in6 *sin6; 2477 int err; 2478 u16 peer_mss = ntohs(req->tcpopt.mss); 2479 int iptype; 2480 unsigned short hdrs; 2481 u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); 2482 2483 parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid); 2484 if (!parent_ep) { 2485 pr_err("%s connect request on invalid stid %d\n", 2486 __func__, stid); 2487 goto reject; 2488 } 2489 2490 if (state_read(&parent_ep->com) != LISTEN) { 2491 pr_err("%s - listening ep not in LISTEN\n", __func__); 2492 goto reject; 2493 } 2494 2495 cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type, 2496 &iptype, local_ip, peer_ip, &local_port, &peer_port); 2497 2498 /* Find output route */ 2499 if (iptype == 4) { 2500 pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n" 2501 , parent_ep, hwtid, 2502 local_ip, peer_ip, ntohs(local_port), 2503 ntohs(peer_port), peer_mss); 2504 dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev, 2505 *(__be32 *)local_ip, *(__be32 *)peer_ip, 2506 local_port, peer_port, tos); 2507 } else { 2508 pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" 2509 , parent_ep, hwtid, 2510 local_ip, peer_ip, ntohs(local_port), 2511 ntohs(peer_port), peer_mss); 2512 dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev, 2513 local_ip, peer_ip, local_port, peer_port, 2514 PASS_OPEN_TOS_G(ntohl(req->tos_stid)), 2515 ((struct sockaddr_in6 *) 2516 &parent_ep->com.local_addr)->sin6_scope_id); 2517 } 2518 if (!dst) { 2519 pr_err("%s - failed to find dst entry!\n", __func__); 2520 goto reject; 2521 } 2522 2523 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 2524 if (!child_ep) { 2525 pr_err("%s - failed to allocate ep entry!\n", __func__); 2526 dst_release(dst); 2527 goto reject; 2528 } 2529 2530 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false, 2531 parent_ep->com.dev->rdev.lldi.adapter_type, tos); 2532 if (err) { 2533 pr_err("%s - failed to allocate l2t entry!\n", __func__); 2534 dst_release(dst); 2535 kfree(child_ep); 2536 goto reject; 2537 } 2538 2539 hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + 2540 sizeof(struct tcphdr) + 2541 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); 2542 if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) 2543 child_ep->mtu = peer_mss + hdrs; 2544 2545 skb_queue_head_init(&child_ep->com.ep_skb_list); 2546 if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF)) 2547 goto fail; 2548 2549 state_set(&child_ep->com, CONNECTING); 2550 child_ep->com.dev = dev; 2551 child_ep->com.cm_id = NULL; 2552 2553 if (iptype == 4) { 2554 struct sockaddr_in *sin = (struct sockaddr_in *) 2555 &child_ep->com.local_addr; 2556 2557 sin->sin_family = AF_INET; 2558 sin->sin_port = local_port; 2559 sin->sin_addr.s_addr = *(__be32 *)local_ip; 2560 2561 sin = (struct sockaddr_in *)&child_ep->com.local_addr; 2562 sin->sin_family = AF_INET; 2563 sin->sin_port = ((struct sockaddr_in *) 2564 &parent_ep->com.local_addr)->sin_port; 2565 sin->sin_addr.s_addr = *(__be32 *)local_ip; 2566 2567 sin = (struct sockaddr_in *)&child_ep->com.remote_addr; 2568 sin->sin_family = AF_INET; 2569 sin->sin_port = peer_port; 2570 sin->sin_addr.s_addr = *(__be32 *)peer_ip; 2571 } else { 2572 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; 2573 sin6->sin6_family = PF_INET6; 2574 sin6->sin6_port = local_port; 2575 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); 2576 2577 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; 2578 sin6->sin6_family = PF_INET6; 2579 sin6->sin6_port = ((struct sockaddr_in6 *) 2580 &parent_ep->com.local_addr)->sin6_port; 2581 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); 2582 2583 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr; 2584 sin6->sin6_family = PF_INET6; 2585 sin6->sin6_port = peer_port; 2586 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); 2587 } 2588 2589 c4iw_get_ep(&parent_ep->com); 2590 child_ep->parent_ep = parent_ep; 2591 child_ep->tos = tos; 2592 child_ep->dst = dst; 2593 child_ep->hwtid = hwtid; 2594 2595 pr_debug("tx_chan %u smac_idx %u rss_qid %u\n", 2596 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); 2597 2598 timer_setup(&child_ep->timer, ep_timeout, 0); 2599 cxgb4_insert_tid(t, child_ep, hwtid, 2600 child_ep->com.local_addr.ss_family); 2601 insert_ep_tid(child_ep); 2602 if (accept_cr(child_ep, skb, req)) { 2603 c4iw_put_ep(&parent_ep->com); 2604 release_ep_resources(child_ep); 2605 } else { 2606 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); 2607 } 2608 if (iptype == 6) { 2609 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; 2610 cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0], 2611 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 2612 } 2613 goto out; 2614 fail: 2615 c4iw_put_ep(&child_ep->com); 2616 reject: 2617 reject_cr(dev, hwtid, skb); 2618 out: 2619 if (parent_ep) 2620 c4iw_put_ep(&parent_ep->com); 2621 return 0; 2622 } 2623 2624 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) 2625 { 2626 struct c4iw_ep *ep; 2627 struct cpl_pass_establish *req = cplhdr(skb); 2628 unsigned int tid = GET_TID(req); 2629 int ret; 2630 u16 tcp_opt = ntohs(req->tcp_opt); 2631 2632 ep = get_ep_from_tid(dev, tid); 2633 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 2634 ep->snd_seq = be32_to_cpu(req->snd_isn); 2635 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 2636 ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); 2637 2638 pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid, tcp_opt); 2639 2640 set_emss(ep, tcp_opt); 2641 2642 dst_confirm(ep->dst); 2643 mutex_lock(&ep->com.mutex); 2644 ep->com.state = MPA_REQ_WAIT; 2645 start_ep_timer(ep); 2646 set_bit(PASS_ESTAB, &ep->com.history); 2647 ret = send_flowc(ep); 2648 mutex_unlock(&ep->com.mutex); 2649 if (ret) 2650 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 2651 c4iw_put_ep(&ep->com); 2652 2653 return 0; 2654 } 2655 2656 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) 2657 { 2658 struct cpl_peer_close *hdr = cplhdr(skb); 2659 struct c4iw_ep *ep; 2660 struct c4iw_qp_attributes attrs; 2661 int disconnect = 1; 2662 int release = 0; 2663 unsigned int tid = GET_TID(hdr); 2664 int ret; 2665 2666 ep = get_ep_from_tid(dev, tid); 2667 if (!ep) 2668 return 0; 2669 2670 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 2671 dst_confirm(ep->dst); 2672 2673 set_bit(PEER_CLOSE, &ep->com.history); 2674 mutex_lock(&ep->com.mutex); 2675 switch (ep->com.state) { 2676 case MPA_REQ_WAIT: 2677 __state_set(&ep->com, CLOSING); 2678 break; 2679 case MPA_REQ_SENT: 2680 __state_set(&ep->com, CLOSING); 2681 connect_reply_upcall(ep, -ECONNRESET); 2682 break; 2683 case MPA_REQ_RCVD: 2684 2685 /* 2686 * We're gonna mark this puppy DEAD, but keep 2687 * the reference on it until the ULP accepts or 2688 * rejects the CR. Also wake up anyone waiting 2689 * in rdma connection migration (see c4iw_accept_cr()). 2690 */ 2691 __state_set(&ep->com, CLOSING); 2692 pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid); 2693 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); 2694 break; 2695 case MPA_REP_SENT: 2696 __state_set(&ep->com, CLOSING); 2697 pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid); 2698 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); 2699 break; 2700 case FPDU_MODE: 2701 start_ep_timer(ep); 2702 __state_set(&ep->com, CLOSING); 2703 attrs.next_state = C4IW_QP_STATE_CLOSING; 2704 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2705 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2706 if (ret != -ECONNRESET) { 2707 peer_close_upcall(ep); 2708 disconnect = 1; 2709 } 2710 break; 2711 case ABORTING: 2712 disconnect = 0; 2713 break; 2714 case CLOSING: 2715 __state_set(&ep->com, MORIBUND); 2716 disconnect = 0; 2717 break; 2718 case MORIBUND: 2719 (void)stop_ep_timer(ep); 2720 if (ep->com.cm_id && ep->com.qp) { 2721 attrs.next_state = C4IW_QP_STATE_IDLE; 2722 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2723 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2724 } 2725 close_complete_upcall(ep, 0); 2726 __state_set(&ep->com, DEAD); 2727 release = 1; 2728 disconnect = 0; 2729 break; 2730 case DEAD: 2731 disconnect = 0; 2732 break; 2733 default: 2734 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); 2735 } 2736 mutex_unlock(&ep->com.mutex); 2737 if (disconnect) 2738 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2739 if (release) 2740 release_ep_resources(ep); 2741 c4iw_put_ep(&ep->com); 2742 return 0; 2743 } 2744 2745 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 2746 { 2747 struct cpl_abort_req_rss6 *req = cplhdr(skb); 2748 struct c4iw_ep *ep; 2749 struct sk_buff *rpl_skb; 2750 struct c4iw_qp_attributes attrs; 2751 int ret; 2752 int release = 0; 2753 unsigned int tid = GET_TID(req); 2754 u8 status; 2755 2756 u32 len = roundup(sizeof(struct cpl_abort_rpl), 16); 2757 2758 ep = get_ep_from_tid(dev, tid); 2759 if (!ep) 2760 return 0; 2761 2762 status = ABORT_RSS_STATUS_G(be32_to_cpu(req->srqidx_status)); 2763 2764 if (cxgb_is_neg_adv(status)) { 2765 pr_debug("Negative advice on abort- tid %u status %d (%s)\n", 2766 ep->hwtid, status, neg_adv_str(status)); 2767 ep->stats.abort_neg_adv++; 2768 mutex_lock(&dev->rdev.stats.lock); 2769 dev->rdev.stats.neg_adv++; 2770 mutex_unlock(&dev->rdev.stats.lock); 2771 goto deref_ep; 2772 } 2773 2774 complete_cached_srq_buffers(ep, req->srqidx_status); 2775 2776 pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, 2777 ep->com.state); 2778 set_bit(PEER_ABORT, &ep->com.history); 2779 2780 /* 2781 * Wake up any threads in rdma_init() or rdma_fini(). 2782 * However, this is not needed if com state is just 2783 * MPA_REQ_SENT 2784 */ 2785 if (ep->com.state != MPA_REQ_SENT) 2786 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); 2787 2788 mutex_lock(&ep->com.mutex); 2789 switch (ep->com.state) { 2790 case CONNECTING: 2791 c4iw_put_ep(&ep->parent_ep->com); 2792 break; 2793 case MPA_REQ_WAIT: 2794 (void)stop_ep_timer(ep); 2795 break; 2796 case MPA_REQ_SENT: 2797 (void)stop_ep_timer(ep); 2798 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) 2799 connect_reply_upcall(ep, -ECONNRESET); 2800 else { 2801 /* 2802 * we just don't send notification upwards because we 2803 * want to retry with mpa_v1 without upper layers even 2804 * knowing it. 2805 * 2806 * do some housekeeping so as to re-initiate the 2807 * connection 2808 */ 2809 pr_info("%s: mpa_rev=%d. Retrying with mpav1\n", 2810 __func__, mpa_rev); 2811 ep->retry_with_mpa_v1 = 1; 2812 } 2813 break; 2814 case MPA_REP_SENT: 2815 break; 2816 case MPA_REQ_RCVD: 2817 break; 2818 case MORIBUND: 2819 case CLOSING: 2820 stop_ep_timer(ep); 2821 /*FALLTHROUGH*/ 2822 case FPDU_MODE: 2823 if (ep->com.cm_id && ep->com.qp) { 2824 attrs.next_state = C4IW_QP_STATE_ERROR; 2825 ret = c4iw_modify_qp(ep->com.qp->rhp, 2826 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 2827 &attrs, 1); 2828 if (ret) 2829 pr_err("%s - qp <- error failed!\n", __func__); 2830 } 2831 peer_abort_upcall(ep); 2832 break; 2833 case ABORTING: 2834 break; 2835 case DEAD: 2836 pr_warn("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); 2837 mutex_unlock(&ep->com.mutex); 2838 goto deref_ep; 2839 default: 2840 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); 2841 break; 2842 } 2843 dst_confirm(ep->dst); 2844 if (ep->com.state != ABORTING) { 2845 __state_set(&ep->com, DEAD); 2846 /* we don't release if we want to retry with mpa_v1 */ 2847 if (!ep->retry_with_mpa_v1) 2848 release = 1; 2849 } 2850 mutex_unlock(&ep->com.mutex); 2851 2852 rpl_skb = skb_dequeue(&ep->com.ep_skb_list); 2853 if (WARN_ON(!rpl_skb)) { 2854 release = 1; 2855 goto out; 2856 } 2857 2858 cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx); 2859 2860 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); 2861 out: 2862 if (release) 2863 release_ep_resources(ep); 2864 else if (ep->retry_with_mpa_v1) { 2865 if (ep->com.remote_addr.ss_family == AF_INET6) { 2866 struct sockaddr_in6 *sin6 = 2867 (struct sockaddr_in6 *) 2868 &ep->com.local_addr; 2869 cxgb4_clip_release( 2870 ep->com.dev->rdev.lldi.ports[0], 2871 (const u32 *)&sin6->sin6_addr.s6_addr, 2872 1); 2873 } 2874 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); 2875 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid, 2876 ep->com.local_addr.ss_family); 2877 dst_release(ep->dst); 2878 cxgb4_l2t_release(ep->l2t); 2879 c4iw_reconnect(ep); 2880 } 2881 2882 deref_ep: 2883 c4iw_put_ep(&ep->com); 2884 /* Dereferencing ep, referenced in peer_abort_intr() */ 2885 c4iw_put_ep(&ep->com); 2886 return 0; 2887 } 2888 2889 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2890 { 2891 struct c4iw_ep *ep; 2892 struct c4iw_qp_attributes attrs; 2893 struct cpl_close_con_rpl *rpl = cplhdr(skb); 2894 int release = 0; 2895 unsigned int tid = GET_TID(rpl); 2896 2897 ep = get_ep_from_tid(dev, tid); 2898 if (!ep) 2899 return 0; 2900 2901 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 2902 2903 /* The cm_id may be null if we failed to connect */ 2904 mutex_lock(&ep->com.mutex); 2905 set_bit(CLOSE_CON_RPL, &ep->com.history); 2906 switch (ep->com.state) { 2907 case CLOSING: 2908 __state_set(&ep->com, MORIBUND); 2909 break; 2910 case MORIBUND: 2911 (void)stop_ep_timer(ep); 2912 if ((ep->com.cm_id) && (ep->com.qp)) { 2913 attrs.next_state = C4IW_QP_STATE_IDLE; 2914 c4iw_modify_qp(ep->com.qp->rhp, 2915 ep->com.qp, 2916 C4IW_QP_ATTR_NEXT_STATE, 2917 &attrs, 1); 2918 } 2919 close_complete_upcall(ep, 0); 2920 __state_set(&ep->com, DEAD); 2921 release = 1; 2922 break; 2923 case ABORTING: 2924 case DEAD: 2925 break; 2926 default: 2927 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); 2928 break; 2929 } 2930 mutex_unlock(&ep->com.mutex); 2931 if (release) 2932 release_ep_resources(ep); 2933 c4iw_put_ep(&ep->com); 2934 return 0; 2935 } 2936 2937 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) 2938 { 2939 struct cpl_rdma_terminate *rpl = cplhdr(skb); 2940 unsigned int tid = GET_TID(rpl); 2941 struct c4iw_ep *ep; 2942 struct c4iw_qp_attributes attrs; 2943 2944 ep = get_ep_from_tid(dev, tid); 2945 2946 if (ep && ep->com.qp) { 2947 pr_warn("TERM received tid %u qpid %u\n", 2948 tid, ep->com.qp->wq.sq.qid); 2949 attrs.next_state = C4IW_QP_STATE_TERMINATE; 2950 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2951 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2952 } else 2953 pr_warn("TERM received tid %u no ep/qp\n", tid); 2954 c4iw_put_ep(&ep->com); 2955 2956 return 0; 2957 } 2958 2959 /* 2960 * Upcall from the adapter indicating data has been transmitted. 2961 * For us its just the single MPA request or reply. We can now free 2962 * the skb holding the mpa message. 2963 */ 2964 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) 2965 { 2966 struct c4iw_ep *ep; 2967 struct cpl_fw4_ack *hdr = cplhdr(skb); 2968 u8 credits = hdr->credits; 2969 unsigned int tid = GET_TID(hdr); 2970 2971 2972 ep = get_ep_from_tid(dev, tid); 2973 if (!ep) 2974 return 0; 2975 pr_debug("ep %p tid %u credits %u\n", 2976 ep, ep->hwtid, credits); 2977 if (credits == 0) { 2978 pr_debug("0 credit ack ep %p tid %u state %u\n", 2979 ep, ep->hwtid, state_read(&ep->com)); 2980 goto out; 2981 } 2982 2983 dst_confirm(ep->dst); 2984 if (ep->mpa_skb) { 2985 pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n", 2986 ep, ep->hwtid, state_read(&ep->com), 2987 ep->mpa_attr.initiator ? 1 : 0); 2988 mutex_lock(&ep->com.mutex); 2989 kfree_skb(ep->mpa_skb); 2990 ep->mpa_skb = NULL; 2991 if (test_bit(STOP_MPA_TIMER, &ep->com.flags)) 2992 stop_ep_timer(ep); 2993 mutex_unlock(&ep->com.mutex); 2994 } 2995 out: 2996 c4iw_put_ep(&ep->com); 2997 return 0; 2998 } 2999 3000 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 3001 { 3002 int abort; 3003 struct c4iw_ep *ep = to_ep(cm_id); 3004 3005 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 3006 3007 mutex_lock(&ep->com.mutex); 3008 if (ep->com.state != MPA_REQ_RCVD) { 3009 mutex_unlock(&ep->com.mutex); 3010 c4iw_put_ep(&ep->com); 3011 return -ECONNRESET; 3012 } 3013 set_bit(ULP_REJECT, &ep->com.history); 3014 if (mpa_rev == 0) 3015 abort = 1; 3016 else 3017 abort = send_mpa_reject(ep, pdata, pdata_len); 3018 mutex_unlock(&ep->com.mutex); 3019 3020 stop_ep_timer(ep); 3021 c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL); 3022 c4iw_put_ep(&ep->com); 3023 return 0; 3024 } 3025 3026 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 3027 { 3028 int err; 3029 struct c4iw_qp_attributes attrs; 3030 enum c4iw_qp_attr_mask mask; 3031 struct c4iw_ep *ep = to_ep(cm_id); 3032 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 3033 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 3034 int abort = 0; 3035 3036 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 3037 3038 mutex_lock(&ep->com.mutex); 3039 if (ep->com.state != MPA_REQ_RCVD) { 3040 err = -ECONNRESET; 3041 goto err_out; 3042 } 3043 3044 if (!qp) { 3045 err = -EINVAL; 3046 goto err_out; 3047 } 3048 3049 set_bit(ULP_ACCEPT, &ep->com.history); 3050 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) || 3051 (conn_param->ird > cur_max_read_depth(ep->com.dev))) { 3052 err = -EINVAL; 3053 goto err_abort; 3054 } 3055 3056 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 3057 if (conn_param->ord > ep->ird) { 3058 if (RELAXED_IRD_NEGOTIATION) { 3059 conn_param->ord = ep->ird; 3060 } else { 3061 ep->ird = conn_param->ird; 3062 ep->ord = conn_param->ord; 3063 send_mpa_reject(ep, conn_param->private_data, 3064 conn_param->private_data_len); 3065 err = -ENOMEM; 3066 goto err_abort; 3067 } 3068 } 3069 if (conn_param->ird < ep->ord) { 3070 if (RELAXED_IRD_NEGOTIATION && 3071 ep->ord <= h->rdev.lldi.max_ordird_qp) { 3072 conn_param->ird = ep->ord; 3073 } else { 3074 err = -ENOMEM; 3075 goto err_abort; 3076 } 3077 } 3078 } 3079 ep->ird = conn_param->ird; 3080 ep->ord = conn_param->ord; 3081 3082 if (ep->mpa_attr.version == 1) { 3083 if (peer2peer && ep->ird == 0) 3084 ep->ird = 1; 3085 } else { 3086 if (peer2peer && 3087 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) && 3088 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0) 3089 ep->ird = 1; 3090 } 3091 3092 pr_debug("ird %d ord %d\n", ep->ird, ep->ord); 3093 3094 ep->com.cm_id = cm_id; 3095 ref_cm_id(&ep->com); 3096 ep->com.qp = qp; 3097 ref_qp(ep); 3098 3099 /* bind QP to EP and move to RTS */ 3100 attrs.mpa_attr = ep->mpa_attr; 3101 attrs.max_ird = ep->ird; 3102 attrs.max_ord = ep->ord; 3103 attrs.llp_stream_handle = ep; 3104 attrs.next_state = C4IW_QP_STATE_RTS; 3105 3106 /* bind QP and TID with INIT_WR */ 3107 mask = C4IW_QP_ATTR_NEXT_STATE | 3108 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 3109 C4IW_QP_ATTR_MPA_ATTR | 3110 C4IW_QP_ATTR_MAX_IRD | 3111 C4IW_QP_ATTR_MAX_ORD; 3112 3113 err = c4iw_modify_qp(ep->com.qp->rhp, 3114 ep->com.qp, mask, &attrs, 1); 3115 if (err) 3116 goto err_deref_cm_id; 3117 3118 set_bit(STOP_MPA_TIMER, &ep->com.flags); 3119 err = send_mpa_reply(ep, conn_param->private_data, 3120 conn_param->private_data_len); 3121 if (err) 3122 goto err_deref_cm_id; 3123 3124 __state_set(&ep->com, FPDU_MODE); 3125 established_upcall(ep); 3126 mutex_unlock(&ep->com.mutex); 3127 c4iw_put_ep(&ep->com); 3128 return 0; 3129 err_deref_cm_id: 3130 deref_cm_id(&ep->com); 3131 err_abort: 3132 abort = 1; 3133 err_out: 3134 mutex_unlock(&ep->com.mutex); 3135 if (abort) 3136 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 3137 c4iw_put_ep(&ep->com); 3138 return err; 3139 } 3140 3141 static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) 3142 { 3143 struct in_device *ind; 3144 int found = 0; 3145 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr; 3146 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; 3147 3148 ind = in_dev_get(dev->rdev.lldi.ports[0]); 3149 if (!ind) 3150 return -EADDRNOTAVAIL; 3151 for_primary_ifa(ind) { 3152 laddr->sin_addr.s_addr = ifa->ifa_address; 3153 raddr->sin_addr.s_addr = ifa->ifa_address; 3154 found = 1; 3155 break; 3156 } 3157 endfor_ifa(ind); 3158 in_dev_put(ind); 3159 return found ? 0 : -EADDRNOTAVAIL; 3160 } 3161 3162 static int get_lladdr(struct net_device *dev, struct in6_addr *addr, 3163 unsigned char banned_flags) 3164 { 3165 struct inet6_dev *idev; 3166 int err = -EADDRNOTAVAIL; 3167 3168 rcu_read_lock(); 3169 idev = __in6_dev_get(dev); 3170 if (idev != NULL) { 3171 struct inet6_ifaddr *ifp; 3172 3173 read_lock_bh(&idev->lock); 3174 list_for_each_entry(ifp, &idev->addr_list, if_list) { 3175 if (ifp->scope == IFA_LINK && 3176 !(ifp->flags & banned_flags)) { 3177 memcpy(addr, &ifp->addr, 16); 3178 err = 0; 3179 break; 3180 } 3181 } 3182 read_unlock_bh(&idev->lock); 3183 } 3184 rcu_read_unlock(); 3185 return err; 3186 } 3187 3188 static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) 3189 { 3190 struct in6_addr uninitialized_var(addr); 3191 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; 3192 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; 3193 3194 if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) { 3195 memcpy(la6->sin6_addr.s6_addr, &addr, 16); 3196 memcpy(ra6->sin6_addr.s6_addr, &addr, 16); 3197 return 0; 3198 } 3199 return -EADDRNOTAVAIL; 3200 } 3201 3202 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 3203 { 3204 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 3205 struct c4iw_ep *ep; 3206 int err = 0; 3207 struct sockaddr_in *laddr; 3208 struct sockaddr_in *raddr; 3209 struct sockaddr_in6 *laddr6; 3210 struct sockaddr_in6 *raddr6; 3211 __u8 *ra; 3212 int iptype; 3213 3214 if ((conn_param->ord > cur_max_read_depth(dev)) || 3215 (conn_param->ird > cur_max_read_depth(dev))) { 3216 err = -EINVAL; 3217 goto out; 3218 } 3219 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 3220 if (!ep) { 3221 pr_err("%s - cannot alloc ep\n", __func__); 3222 err = -ENOMEM; 3223 goto out; 3224 } 3225 3226 skb_queue_head_init(&ep->com.ep_skb_list); 3227 if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) { 3228 err = -ENOMEM; 3229 goto fail1; 3230 } 3231 3232 timer_setup(&ep->timer, ep_timeout, 0); 3233 ep->plen = conn_param->private_data_len; 3234 if (ep->plen) 3235 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 3236 conn_param->private_data, ep->plen); 3237 ep->ird = conn_param->ird; 3238 ep->ord = conn_param->ord; 3239 3240 if (peer2peer && ep->ord == 0) 3241 ep->ord = 1; 3242 3243 ep->com.cm_id = cm_id; 3244 ref_cm_id(&ep->com); 3245 cm_id->provider_data = ep; 3246 ep->com.dev = dev; 3247 ep->com.qp = get_qhp(dev, conn_param->qpn); 3248 if (!ep->com.qp) { 3249 pr_warn("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); 3250 err = -EINVAL; 3251 goto fail2; 3252 } 3253 ref_qp(ep); 3254 pr_debug("qpn 0x%x qp %p cm_id %p\n", conn_param->qpn, 3255 ep->com.qp, cm_id); 3256 3257 /* 3258 * Allocate an active TID to initiate a TCP connection. 3259 */ 3260 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); 3261 if (ep->atid == -1) { 3262 pr_err("%s - cannot alloc atid\n", __func__); 3263 err = -ENOMEM; 3264 goto fail2; 3265 } 3266 insert_handle(dev, &dev->atid_idr, ep, ep->atid); 3267 3268 memcpy(&ep->com.local_addr, &cm_id->m_local_addr, 3269 sizeof(ep->com.local_addr)); 3270 memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr, 3271 sizeof(ep->com.remote_addr)); 3272 3273 laddr = (struct sockaddr_in *)&ep->com.local_addr; 3274 raddr = (struct sockaddr_in *)&ep->com.remote_addr; 3275 laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr; 3276 raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr; 3277 3278 if (cm_id->m_remote_addr.ss_family == AF_INET) { 3279 iptype = 4; 3280 ra = (__u8 *)&raddr->sin_addr; 3281 3282 /* 3283 * Handle loopback requests to INADDR_ANY. 3284 */ 3285 if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { 3286 err = pick_local_ipaddrs(dev, cm_id); 3287 if (err) 3288 goto fail2; 3289 } 3290 3291 /* find a route */ 3292 pr_debug("saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n", 3293 &laddr->sin_addr, ntohs(laddr->sin_port), 3294 ra, ntohs(raddr->sin_port)); 3295 ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev, 3296 laddr->sin_addr.s_addr, 3297 raddr->sin_addr.s_addr, 3298 laddr->sin_port, 3299 raddr->sin_port, cm_id->tos); 3300 } else { 3301 iptype = 6; 3302 ra = (__u8 *)&raddr6->sin6_addr; 3303 3304 /* 3305 * Handle loopback requests to INADDR_ANY. 3306 */ 3307 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { 3308 err = pick_local_ip6addrs(dev, cm_id); 3309 if (err) 3310 goto fail2; 3311 } 3312 3313 /* find a route */ 3314 pr_debug("saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n", 3315 laddr6->sin6_addr.s6_addr, 3316 ntohs(laddr6->sin6_port), 3317 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port)); 3318 ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev, 3319 laddr6->sin6_addr.s6_addr, 3320 raddr6->sin6_addr.s6_addr, 3321 laddr6->sin6_port, 3322 raddr6->sin6_port, 0, 3323 raddr6->sin6_scope_id); 3324 } 3325 if (!ep->dst) { 3326 pr_err("%s - cannot find route\n", __func__); 3327 err = -EHOSTUNREACH; 3328 goto fail3; 3329 } 3330 3331 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true, 3332 ep->com.dev->rdev.lldi.adapter_type, cm_id->tos); 3333 if (err) { 3334 pr_err("%s - cannot alloc l2e\n", __func__); 3335 goto fail4; 3336 } 3337 3338 pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 3339 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 3340 ep->l2t->idx); 3341 3342 state_set(&ep->com, CONNECTING); 3343 ep->tos = cm_id->tos; 3344 3345 /* send connect request to rnic */ 3346 err = send_connect(ep); 3347 if (!err) 3348 goto out; 3349 3350 cxgb4_l2t_release(ep->l2t); 3351 fail4: 3352 dst_release(ep->dst); 3353 fail3: 3354 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 3355 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 3356 fail2: 3357 skb_queue_purge(&ep->com.ep_skb_list); 3358 deref_cm_id(&ep->com); 3359 fail1: 3360 c4iw_put_ep(&ep->com); 3361 out: 3362 return err; 3363 } 3364 3365 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) 3366 { 3367 int err; 3368 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) 3369 &ep->com.local_addr; 3370 3371 if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) { 3372 err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], 3373 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3374 if (err) 3375 return err; 3376 } 3377 c4iw_init_wr_wait(ep->com.wr_waitp); 3378 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0], 3379 ep->stid, &sin6->sin6_addr, 3380 sin6->sin6_port, 3381 ep->com.dev->rdev.lldi.rxq_ids[0]); 3382 if (!err) 3383 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 3384 ep->com.wr_waitp, 3385 0, 0, __func__); 3386 else if (err > 0) 3387 err = net_xmit_errno(err); 3388 if (err) { 3389 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 3390 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3391 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n", 3392 err, ep->stid, 3393 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port)); 3394 } 3395 return err; 3396 } 3397 3398 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) 3399 { 3400 int err; 3401 struct sockaddr_in *sin = (struct sockaddr_in *) 3402 &ep->com.local_addr; 3403 3404 if (dev->rdev.lldi.enable_fw_ofld_conn) { 3405 do { 3406 err = cxgb4_create_server_filter( 3407 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3408 sin->sin_addr.s_addr, sin->sin_port, 0, 3409 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0); 3410 if (err == -EBUSY) { 3411 if (c4iw_fatal_error(&ep->com.dev->rdev)) { 3412 err = -EIO; 3413 break; 3414 } 3415 set_current_state(TASK_UNINTERRUPTIBLE); 3416 schedule_timeout(usecs_to_jiffies(100)); 3417 } 3418 } while (err == -EBUSY); 3419 } else { 3420 c4iw_init_wr_wait(ep->com.wr_waitp); 3421 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], 3422 ep->stid, sin->sin_addr.s_addr, sin->sin_port, 3423 0, ep->com.dev->rdev.lldi.rxq_ids[0]); 3424 if (!err) 3425 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 3426 ep->com.wr_waitp, 3427 0, 0, __func__); 3428 else if (err > 0) 3429 err = net_xmit_errno(err); 3430 } 3431 if (err) 3432 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n" 3433 , err, ep->stid, 3434 &sin->sin_addr, ntohs(sin->sin_port)); 3435 return err; 3436 } 3437 3438 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 3439 { 3440 int err = 0; 3441 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 3442 struct c4iw_listen_ep *ep; 3443 3444 might_sleep(); 3445 3446 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 3447 if (!ep) { 3448 pr_err("%s - cannot alloc ep\n", __func__); 3449 err = -ENOMEM; 3450 goto fail1; 3451 } 3452 skb_queue_head_init(&ep->com.ep_skb_list); 3453 pr_debug("ep %p\n", ep); 3454 ep->com.cm_id = cm_id; 3455 ref_cm_id(&ep->com); 3456 ep->com.dev = dev; 3457 ep->backlog = backlog; 3458 memcpy(&ep->com.local_addr, &cm_id->m_local_addr, 3459 sizeof(ep->com.local_addr)); 3460 3461 /* 3462 * Allocate a server TID. 3463 */ 3464 if (dev->rdev.lldi.enable_fw_ofld_conn && 3465 ep->com.local_addr.ss_family == AF_INET) 3466 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, 3467 cm_id->m_local_addr.ss_family, ep); 3468 else 3469 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, 3470 cm_id->m_local_addr.ss_family, ep); 3471 3472 if (ep->stid == -1) { 3473 pr_err("%s - cannot alloc stid\n", __func__); 3474 err = -ENOMEM; 3475 goto fail2; 3476 } 3477 insert_handle(dev, &dev->stid_idr, ep, ep->stid); 3478 3479 state_set(&ep->com, LISTEN); 3480 if (ep->com.local_addr.ss_family == AF_INET) 3481 err = create_server4(dev, ep); 3482 else 3483 err = create_server6(dev, ep); 3484 if (!err) { 3485 cm_id->provider_data = ep; 3486 goto out; 3487 } 3488 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); 3489 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3490 ep->com.local_addr.ss_family); 3491 fail2: 3492 deref_cm_id(&ep->com); 3493 c4iw_put_ep(&ep->com); 3494 fail1: 3495 out: 3496 return err; 3497 } 3498 3499 int c4iw_destroy_listen(struct iw_cm_id *cm_id) 3500 { 3501 int err; 3502 struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 3503 3504 pr_debug("ep %p\n", ep); 3505 3506 might_sleep(); 3507 state_set(&ep->com, DEAD); 3508 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn && 3509 ep->com.local_addr.ss_family == AF_INET) { 3510 err = cxgb4_remove_server_filter( 3511 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3512 ep->com.dev->rdev.lldi.rxq_ids[0], 0); 3513 } else { 3514 struct sockaddr_in6 *sin6; 3515 c4iw_init_wr_wait(ep->com.wr_waitp); 3516 err = cxgb4_remove_server( 3517 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3518 ep->com.dev->rdev.lldi.rxq_ids[0], 0); 3519 if (err) 3520 goto done; 3521 err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp, 3522 0, 0, __func__); 3523 sin6 = (struct sockaddr_in6 *)&ep->com.local_addr; 3524 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 3525 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3526 } 3527 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); 3528 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3529 ep->com.local_addr.ss_family); 3530 done: 3531 deref_cm_id(&ep->com); 3532 c4iw_put_ep(&ep->com); 3533 return err; 3534 } 3535 3536 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 3537 { 3538 int ret = 0; 3539 int close = 0; 3540 int fatal = 0; 3541 struct c4iw_rdev *rdev; 3542 3543 mutex_lock(&ep->com.mutex); 3544 3545 pr_debug("ep %p state %s, abrupt %d\n", ep, 3546 states[ep->com.state], abrupt); 3547 3548 /* 3549 * Ref the ep here in case we have fatal errors causing the 3550 * ep to be released and freed. 3551 */ 3552 c4iw_get_ep(&ep->com); 3553 3554 rdev = &ep->com.dev->rdev; 3555 if (c4iw_fatal_error(rdev)) { 3556 fatal = 1; 3557 close_complete_upcall(ep, -EIO); 3558 ep->com.state = DEAD; 3559 } 3560 switch (ep->com.state) { 3561 case MPA_REQ_WAIT: 3562 case MPA_REQ_SENT: 3563 case MPA_REQ_RCVD: 3564 case MPA_REP_SENT: 3565 case FPDU_MODE: 3566 case CONNECTING: 3567 close = 1; 3568 if (abrupt) 3569 ep->com.state = ABORTING; 3570 else { 3571 ep->com.state = CLOSING; 3572 3573 /* 3574 * if we close before we see the fw4_ack() then we fix 3575 * up the timer state since we're reusing it. 3576 */ 3577 if (ep->mpa_skb && 3578 test_bit(STOP_MPA_TIMER, &ep->com.flags)) { 3579 clear_bit(STOP_MPA_TIMER, &ep->com.flags); 3580 stop_ep_timer(ep); 3581 } 3582 start_ep_timer(ep); 3583 } 3584 set_bit(CLOSE_SENT, &ep->com.flags); 3585 break; 3586 case CLOSING: 3587 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 3588 close = 1; 3589 if (abrupt) { 3590 (void)stop_ep_timer(ep); 3591 ep->com.state = ABORTING; 3592 } else 3593 ep->com.state = MORIBUND; 3594 } 3595 break; 3596 case MORIBUND: 3597 case ABORTING: 3598 case DEAD: 3599 pr_debug("ignoring disconnect ep %p state %u\n", 3600 ep, ep->com.state); 3601 break; 3602 default: 3603 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); 3604 break; 3605 } 3606 3607 if (close) { 3608 if (abrupt) { 3609 set_bit(EP_DISC_ABORT, &ep->com.history); 3610 close_complete_upcall(ep, -ECONNRESET); 3611 ret = send_abort(ep); 3612 } else { 3613 set_bit(EP_DISC_CLOSE, &ep->com.history); 3614 ret = send_halfclose(ep); 3615 } 3616 if (ret) { 3617 set_bit(EP_DISC_FAIL, &ep->com.history); 3618 if (!abrupt) { 3619 stop_ep_timer(ep); 3620 close_complete_upcall(ep, -EIO); 3621 } 3622 if (ep->com.qp) { 3623 struct c4iw_qp_attributes attrs; 3624 3625 attrs.next_state = C4IW_QP_STATE_ERROR; 3626 ret = c4iw_modify_qp(ep->com.qp->rhp, 3627 ep->com.qp, 3628 C4IW_QP_ATTR_NEXT_STATE, 3629 &attrs, 1); 3630 if (ret) 3631 pr_err("%s - qp <- error failed!\n", 3632 __func__); 3633 } 3634 fatal = 1; 3635 } 3636 } 3637 mutex_unlock(&ep->com.mutex); 3638 c4iw_put_ep(&ep->com); 3639 if (fatal) 3640 release_ep_resources(ep); 3641 return ret; 3642 } 3643 3644 static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 3645 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 3646 { 3647 struct c4iw_ep *ep; 3648 int atid = be32_to_cpu(req->tid); 3649 3650 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, 3651 (__force u32) req->tid); 3652 if (!ep) 3653 return; 3654 3655 switch (req->retval) { 3656 case FW_ENOMEM: 3657 set_bit(ACT_RETRY_NOMEM, &ep->com.history); 3658 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 3659 send_fw_act_open_req(ep, atid); 3660 return; 3661 } 3662 /* fall through */ 3663 case FW_EADDRINUSE: 3664 set_bit(ACT_RETRY_INUSE, &ep->com.history); 3665 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 3666 send_fw_act_open_req(ep, atid); 3667 return; 3668 } 3669 break; 3670 default: 3671 pr_info("%s unexpected ofld conn wr retval %d\n", 3672 __func__, req->retval); 3673 break; 3674 } 3675 pr_err("active ofld_connect_wr failure %d atid %d\n", 3676 req->retval, atid); 3677 mutex_lock(&dev->rdev.stats.lock); 3678 dev->rdev.stats.act_ofld_conn_fails++; 3679 mutex_unlock(&dev->rdev.stats.lock); 3680 connect_reply_upcall(ep, status2errno(req->retval)); 3681 state_set(&ep->com, DEAD); 3682 if (ep->com.remote_addr.ss_family == AF_INET6) { 3683 struct sockaddr_in6 *sin6 = 3684 (struct sockaddr_in6 *)&ep->com.local_addr; 3685 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 3686 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3687 } 3688 remove_handle(dev, &dev->atid_idr, atid); 3689 cxgb4_free_atid(dev->rdev.lldi.tids, atid); 3690 dst_release(ep->dst); 3691 cxgb4_l2t_release(ep->l2t); 3692 c4iw_put_ep(&ep->com); 3693 } 3694 3695 static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 3696 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 3697 { 3698 struct sk_buff *rpl_skb; 3699 struct cpl_pass_accept_req *cpl; 3700 int ret; 3701 3702 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie; 3703 if (req->retval) { 3704 pr_err("%s passive open failure %d\n", __func__, req->retval); 3705 mutex_lock(&dev->rdev.stats.lock); 3706 dev->rdev.stats.pas_ofld_conn_fails++; 3707 mutex_unlock(&dev->rdev.stats.lock); 3708 kfree_skb(rpl_skb); 3709 } else { 3710 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); 3711 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 3712 (__force u32) htonl( 3713 (__force u32) req->tid))); 3714 ret = pass_accept_req(dev, rpl_skb); 3715 if (!ret) 3716 kfree_skb(rpl_skb); 3717 } 3718 return; 3719 } 3720 3721 static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 3722 { 3723 struct cpl_fw6_msg *rpl = cplhdr(skb); 3724 struct cpl_fw6_msg_ofld_connection_wr_rpl *req; 3725 3726 switch (rpl->type) { 3727 case FW6_TYPE_CQE: 3728 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 3729 break; 3730 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 3731 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data; 3732 switch (req->t_state) { 3733 case TCP_SYN_SENT: 3734 active_ofld_conn_reply(dev, skb, req); 3735 break; 3736 case TCP_SYN_RECV: 3737 passive_ofld_conn_reply(dev, skb, req); 3738 break; 3739 default: 3740 pr_err("%s unexpected ofld conn wr state %d\n", 3741 __func__, req->t_state); 3742 break; 3743 } 3744 break; 3745 } 3746 return 0; 3747 } 3748 3749 static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) 3750 { 3751 __be32 l2info; 3752 __be16 hdr_len, vlantag, len; 3753 u16 eth_hdr_len; 3754 int tcp_hdr_len, ip_hdr_len; 3755 u8 intf; 3756 struct cpl_rx_pkt *cpl = cplhdr(skb); 3757 struct cpl_pass_accept_req *req; 3758 struct tcp_options_received tmp_opt; 3759 struct c4iw_dev *dev; 3760 enum chip_type type; 3761 3762 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 3763 /* Store values from cpl_rx_pkt in temporary location. */ 3764 vlantag = cpl->vlan; 3765 len = cpl->len; 3766 l2info = cpl->l2info; 3767 hdr_len = cpl->hdr_len; 3768 intf = cpl->iff; 3769 3770 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); 3771 3772 /* 3773 * We need to parse the TCP options from SYN packet. 3774 * to generate cpl_pass_accept_req. 3775 */ 3776 memset(&tmp_opt, 0, sizeof(tmp_opt)); 3777 tcp_clear_options(&tmp_opt); 3778 tcp_parse_options(&init_net, skb, &tmp_opt, 0, NULL); 3779 3780 req = __skb_push(skb, sizeof(*req)); 3781 memset(req, 0, sizeof(*req)); 3782 req->l2info = cpu_to_be16(SYN_INTF_V(intf) | 3783 SYN_MAC_IDX_V(RX_MACIDX_G( 3784 be32_to_cpu(l2info))) | 3785 SYN_XACT_MATCH_F); 3786 type = dev->rdev.lldi.adapter_type; 3787 tcp_hdr_len = RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len)); 3788 ip_hdr_len = RX_IPHDR_LEN_G(be16_to_cpu(hdr_len)); 3789 req->hdr_len = 3790 cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info)))); 3791 if (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) { 3792 eth_hdr_len = is_t4(type) ? 3793 RX_ETHHDR_LEN_G(be32_to_cpu(l2info)) : 3794 RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info)); 3795 req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) | 3796 IP_HDR_LEN_V(ip_hdr_len) | 3797 ETH_HDR_LEN_V(eth_hdr_len)); 3798 } else { /* T6 and later */ 3799 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info)); 3800 req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) | 3801 T6_IP_HDR_LEN_V(ip_hdr_len) | 3802 T6_ETH_HDR_LEN_V(eth_hdr_len)); 3803 } 3804 req->vlan = vlantag; 3805 req->len = len; 3806 req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) | 3807 PASS_OPEN_TOS_V(tos)); 3808 req->tcpopt.mss = htons(tmp_opt.mss_clamp); 3809 if (tmp_opt.wscale_ok) 3810 req->tcpopt.wsf = tmp_opt.snd_wscale; 3811 req->tcpopt.tstamp = tmp_opt.saw_tstamp; 3812 if (tmp_opt.sack_ok) 3813 req->tcpopt.sack = 1; 3814 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0)); 3815 return; 3816 } 3817 3818 static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, 3819 __be32 laddr, __be16 lport, 3820 __be32 raddr, __be16 rport, 3821 u32 rcv_isn, u32 filter, u16 window, 3822 u32 rss_qid, u8 port_id) 3823 { 3824 struct sk_buff *req_skb; 3825 struct fw_ofld_connection_wr *req; 3826 struct cpl_pass_accept_req *cpl = cplhdr(skb); 3827 int ret; 3828 3829 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); 3830 if (!req_skb) 3831 return; 3832 req = __skb_put_zero(req_skb, sizeof(*req)); 3833 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F); 3834 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); 3835 req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F); 3836 req->le.filter = (__force __be32) filter; 3837 req->le.lport = lport; 3838 req->le.pport = rport; 3839 req->le.u.ipv4.lip = laddr; 3840 req->le.u.ipv4.pip = raddr; 3841 req->tcb.rcv_nxt = htonl(rcv_isn + 1); 3842 req->tcb.rcv_adv = htons(window); 3843 req->tcb.t_state_to_astid = 3844 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) | 3845 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) | 3846 FW_OFLD_CONNECTION_WR_ASTID_V( 3847 PASS_OPEN_TID_G(ntohl(cpl->tos_stid)))); 3848 3849 /* 3850 * We store the qid in opt2 which will be used by the firmware 3851 * to send us the wr response. 3852 */ 3853 req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid)); 3854 3855 /* 3856 * We initialize the MSS index in TCB to 0xF. 3857 * So that when driver sends cpl_pass_accept_rpl 3858 * TCB picks up the correct value. If this was 0 3859 * TP will ignore any value > 0 for MSS index. 3860 */ 3861 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF)); 3862 req->cookie = (uintptr_t)skb; 3863 3864 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 3865 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); 3866 if (ret < 0) { 3867 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__, 3868 ret); 3869 kfree_skb(skb); 3870 kfree_skb(req_skb); 3871 } 3872 } 3873 3874 /* 3875 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt 3876 * messages when a filter is being used instead of server to 3877 * redirect a syn packet. When packets hit filter they are redirected 3878 * to the offload queue and driver tries to establish the connection 3879 * using firmware work request. 3880 */ 3881 static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) 3882 { 3883 int stid; 3884 unsigned int filter; 3885 struct ethhdr *eh = NULL; 3886 struct vlan_ethhdr *vlan_eh = NULL; 3887 struct iphdr *iph; 3888 struct tcphdr *tcph; 3889 struct rss_header *rss = (void *)skb->data; 3890 struct cpl_rx_pkt *cpl = (void *)skb->data; 3891 struct cpl_pass_accept_req *req = (void *)(rss + 1); 3892 struct l2t_entry *e; 3893 struct dst_entry *dst; 3894 struct c4iw_ep *lep = NULL; 3895 u16 window; 3896 struct port_info *pi; 3897 struct net_device *pdev; 3898 u16 rss_qid, eth_hdr_len; 3899 int step; 3900 struct neighbour *neigh; 3901 3902 /* Drop all non-SYN packets */ 3903 if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F))) 3904 goto reject; 3905 3906 /* 3907 * Drop all packets which did not hit the filter. 3908 * Unlikely to happen. 3909 */ 3910 if (!(rss->filter_hit && rss->filter_tid)) 3911 goto reject; 3912 3913 /* 3914 * Calculate the server tid from filter hit index from cpl_rx_pkt. 3915 */ 3916 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val); 3917 3918 lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid); 3919 if (!lep) { 3920 pr_warn("%s connect request on invalid stid %d\n", 3921 __func__, stid); 3922 goto reject; 3923 } 3924 3925 switch (CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)) { 3926 case CHELSIO_T4: 3927 eth_hdr_len = RX_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info)); 3928 break; 3929 case CHELSIO_T5: 3930 eth_hdr_len = RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info)); 3931 break; 3932 case CHELSIO_T6: 3933 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info)); 3934 break; 3935 default: 3936 pr_err("T%d Chip is not supported\n", 3937 CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)); 3938 goto reject; 3939 } 3940 3941 if (eth_hdr_len == ETH_HLEN) { 3942 eh = (struct ethhdr *)(req + 1); 3943 iph = (struct iphdr *)(eh + 1); 3944 } else { 3945 vlan_eh = (struct vlan_ethhdr *)(req + 1); 3946 iph = (struct iphdr *)(vlan_eh + 1); 3947 skb->vlan_tci = ntohs(cpl->vlan); 3948 } 3949 3950 if (iph->version != 0x4) 3951 goto reject; 3952 3953 tcph = (struct tcphdr *)(iph + 1); 3954 skb_set_network_header(skb, (void *)iph - (void *)rss); 3955 skb_set_transport_header(skb, (void *)tcph - (void *)rss); 3956 skb_get(skb); 3957 3958 pr_debug("lip 0x%x lport %u pip 0x%x pport %u tos %d\n", 3959 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr), 3960 ntohs(tcph->source), iph->tos); 3961 3962 dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev, 3963 iph->daddr, iph->saddr, tcph->dest, 3964 tcph->source, iph->tos); 3965 if (!dst) { 3966 pr_err("%s - failed to find dst entry!\n", __func__); 3967 goto reject; 3968 } 3969 neigh = dst_neigh_lookup_skb(dst, skb); 3970 3971 if (!neigh) { 3972 pr_err("%s - failed to allocate neigh!\n", __func__); 3973 goto free_dst; 3974 } 3975 3976 if (neigh->dev->flags & IFF_LOOPBACK) { 3977 pdev = ip_dev_find(&init_net, iph->daddr); 3978 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 3979 pdev, 0); 3980 pi = (struct port_info *)netdev_priv(pdev); 3981 dev_put(pdev); 3982 } else { 3983 pdev = get_real_dev(neigh->dev); 3984 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 3985 pdev, 0); 3986 pi = (struct port_info *)netdev_priv(pdev); 3987 } 3988 neigh_release(neigh); 3989 if (!e) { 3990 pr_err("%s - failed to allocate l2t entry!\n", 3991 __func__); 3992 goto free_dst; 3993 } 3994 3995 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 3996 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; 3997 window = (__force u16) htons((__force u16)tcph->window); 3998 3999 /* Calcuate filter portion for LE region. */ 4000 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple( 4001 dev->rdev.lldi.ports[0], 4002 e)); 4003 4004 /* 4005 * Synthesize the cpl_pass_accept_req. We have everything except the 4006 * TID. Once firmware sends a reply with TID we update the TID field 4007 * in cpl and pass it through the regular cpl_pass_accept_req path. 4008 */ 4009 build_cpl_pass_accept_req(skb, stid, iph->tos); 4010 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr, 4011 tcph->source, ntohl(tcph->seq), filter, window, 4012 rss_qid, pi->port_id); 4013 cxgb4_l2t_release(e); 4014 free_dst: 4015 dst_release(dst); 4016 reject: 4017 if (lep) 4018 c4iw_put_ep(&lep->com); 4019 return 0; 4020 } 4021 4022 /* 4023 * These are the real handlers that are called from a 4024 * work queue. 4025 */ 4026 static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = { 4027 [CPL_ACT_ESTABLISH] = act_establish, 4028 [CPL_ACT_OPEN_RPL] = act_open_rpl, 4029 [CPL_RX_DATA] = rx_data, 4030 [CPL_ABORT_RPL_RSS] = abort_rpl, 4031 [CPL_ABORT_RPL] = abort_rpl, 4032 [CPL_PASS_OPEN_RPL] = pass_open_rpl, 4033 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, 4034 [CPL_PASS_ACCEPT_REQ] = pass_accept_req, 4035 [CPL_PASS_ESTABLISH] = pass_establish, 4036 [CPL_PEER_CLOSE] = peer_close, 4037 [CPL_ABORT_REQ_RSS] = peer_abort, 4038 [CPL_CLOSE_CON_RPL] = close_con_rpl, 4039 [CPL_RDMA_TERMINATE] = terminate, 4040 [CPL_FW4_ACK] = fw4_ack, 4041 [CPL_FW6_MSG] = deferred_fw6_msg, 4042 [CPL_RX_PKT] = rx_pkt, 4043 [FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe, 4044 [FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe 4045 }; 4046 4047 static void process_timeout(struct c4iw_ep *ep) 4048 { 4049 struct c4iw_qp_attributes attrs; 4050 int abort = 1; 4051 4052 mutex_lock(&ep->com.mutex); 4053 pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state); 4054 set_bit(TIMEDOUT, &ep->com.history); 4055 switch (ep->com.state) { 4056 case MPA_REQ_SENT: 4057 connect_reply_upcall(ep, -ETIMEDOUT); 4058 break; 4059 case MPA_REQ_WAIT: 4060 case MPA_REQ_RCVD: 4061 case MPA_REP_SENT: 4062 case FPDU_MODE: 4063 break; 4064 case CLOSING: 4065 case MORIBUND: 4066 if (ep->com.cm_id && ep->com.qp) { 4067 attrs.next_state = C4IW_QP_STATE_ERROR; 4068 c4iw_modify_qp(ep->com.qp->rhp, 4069 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 4070 &attrs, 1); 4071 } 4072 close_complete_upcall(ep, -ETIMEDOUT); 4073 break; 4074 case ABORTING: 4075 case DEAD: 4076 4077 /* 4078 * These states are expected if the ep timed out at the same 4079 * time as another thread was calling stop_ep_timer(). 4080 * So we silently do nothing for these states. 4081 */ 4082 abort = 0; 4083 break; 4084 default: 4085 WARN(1, "%s unexpected state ep %p tid %u state %u\n", 4086 __func__, ep, ep->hwtid, ep->com.state); 4087 abort = 0; 4088 } 4089 mutex_unlock(&ep->com.mutex); 4090 if (abort) 4091 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 4092 c4iw_put_ep(&ep->com); 4093 } 4094 4095 static void process_timedout_eps(void) 4096 { 4097 struct c4iw_ep *ep; 4098 4099 spin_lock_irq(&timeout_lock); 4100 while (!list_empty(&timeout_list)) { 4101 struct list_head *tmp; 4102 4103 tmp = timeout_list.next; 4104 list_del(tmp); 4105 tmp->next = NULL; 4106 tmp->prev = NULL; 4107 spin_unlock_irq(&timeout_lock); 4108 ep = list_entry(tmp, struct c4iw_ep, entry); 4109 process_timeout(ep); 4110 spin_lock_irq(&timeout_lock); 4111 } 4112 spin_unlock_irq(&timeout_lock); 4113 } 4114 4115 static void process_work(struct work_struct *work) 4116 { 4117 struct sk_buff *skb = NULL; 4118 struct c4iw_dev *dev; 4119 struct cpl_act_establish *rpl; 4120 unsigned int opcode; 4121 int ret; 4122 4123 process_timedout_eps(); 4124 while ((skb = skb_dequeue(&rxq))) { 4125 rpl = cplhdr(skb); 4126 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 4127 opcode = rpl->ot.opcode; 4128 4129 if (opcode >= ARRAY_SIZE(work_handlers) || 4130 !work_handlers[opcode]) { 4131 pr_err("No handler for opcode 0x%x.\n", opcode); 4132 kfree_skb(skb); 4133 } else { 4134 ret = work_handlers[opcode](dev, skb); 4135 if (!ret) 4136 kfree_skb(skb); 4137 } 4138 process_timedout_eps(); 4139 } 4140 } 4141 4142 static DECLARE_WORK(skb_work, process_work); 4143 4144 static void ep_timeout(struct timer_list *t) 4145 { 4146 struct c4iw_ep *ep = from_timer(ep, t, timer); 4147 int kickit = 0; 4148 4149 spin_lock(&timeout_lock); 4150 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 4151 /* 4152 * Only insert if it is not already on the list. 4153 */ 4154 if (!ep->entry.next) { 4155 list_add_tail(&ep->entry, &timeout_list); 4156 kickit = 1; 4157 } 4158 } 4159 spin_unlock(&timeout_lock); 4160 if (kickit) 4161 queue_work(workq, &skb_work); 4162 } 4163 4164 /* 4165 * All the CM events are handled on a work queue to have a safe context. 4166 */ 4167 static int sched(struct c4iw_dev *dev, struct sk_buff *skb) 4168 { 4169 4170 /* 4171 * Save dev in the skb->cb area. 4172 */ 4173 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; 4174 4175 /* 4176 * Queue the skb and schedule the worker thread. 4177 */ 4178 skb_queue_tail(&rxq, skb); 4179 queue_work(workq, &skb_work); 4180 return 0; 4181 } 4182 4183 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 4184 { 4185 struct cpl_set_tcb_rpl *rpl = cplhdr(skb); 4186 4187 if (rpl->status != CPL_ERR_NONE) { 4188 pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n", 4189 rpl->status, GET_TID(rpl)); 4190 } 4191 kfree_skb(skb); 4192 return 0; 4193 } 4194 4195 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 4196 { 4197 struct cpl_fw6_msg *rpl = cplhdr(skb); 4198 struct c4iw_wr_wait *wr_waitp; 4199 int ret; 4200 4201 pr_debug("type %u\n", rpl->type); 4202 4203 switch (rpl->type) { 4204 case FW6_TYPE_WR_RPL: 4205 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 4206 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 4207 pr_debug("wr_waitp %p ret %u\n", wr_waitp, ret); 4208 if (wr_waitp) 4209 c4iw_wake_up_deref(wr_waitp, ret ? -ret : 0); 4210 kfree_skb(skb); 4211 break; 4212 case FW6_TYPE_CQE: 4213 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 4214 sched(dev, skb); 4215 break; 4216 default: 4217 pr_err("%s unexpected fw6 msg type %u\n", 4218 __func__, rpl->type); 4219 kfree_skb(skb); 4220 break; 4221 } 4222 return 0; 4223 } 4224 4225 static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) 4226 { 4227 struct cpl_abort_req_rss *req = cplhdr(skb); 4228 struct c4iw_ep *ep; 4229 unsigned int tid = GET_TID(req); 4230 4231 ep = get_ep_from_tid(dev, tid); 4232 /* This EP will be dereferenced in peer_abort() */ 4233 if (!ep) { 4234 pr_warn("Abort on non-existent endpoint, tid %d\n", tid); 4235 kfree_skb(skb); 4236 return 0; 4237 } 4238 if (cxgb_is_neg_adv(req->status)) { 4239 pr_debug("Negative advice on abort- tid %u status %d (%s)\n", 4240 ep->hwtid, req->status, 4241 neg_adv_str(req->status)); 4242 goto out; 4243 } 4244 pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state); 4245 4246 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); 4247 out: 4248 sched(dev, skb); 4249 return 0; 4250 } 4251 4252 /* 4253 * Most upcalls from the T4 Core go to sched() to 4254 * schedule the processing on a work queue. 4255 */ 4256 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { 4257 [CPL_ACT_ESTABLISH] = sched, 4258 [CPL_ACT_OPEN_RPL] = sched, 4259 [CPL_RX_DATA] = sched, 4260 [CPL_ABORT_RPL_RSS] = sched, 4261 [CPL_ABORT_RPL] = sched, 4262 [CPL_PASS_OPEN_RPL] = sched, 4263 [CPL_CLOSE_LISTSRV_RPL] = sched, 4264 [CPL_PASS_ACCEPT_REQ] = sched, 4265 [CPL_PASS_ESTABLISH] = sched, 4266 [CPL_PEER_CLOSE] = sched, 4267 [CPL_CLOSE_CON_RPL] = sched, 4268 [CPL_ABORT_REQ_RSS] = peer_abort_intr, 4269 [CPL_RDMA_TERMINATE] = sched, 4270 [CPL_FW4_ACK] = sched, 4271 [CPL_SET_TCB_RPL] = set_tcb_rpl, 4272 [CPL_FW6_MSG] = fw6_msg, 4273 [CPL_RX_PKT] = sched 4274 }; 4275 4276 int __init c4iw_cm_init(void) 4277 { 4278 spin_lock_init(&timeout_lock); 4279 skb_queue_head_init(&rxq); 4280 4281 workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM); 4282 if (!workq) 4283 return -ENOMEM; 4284 4285 return 0; 4286 } 4287 4288 void c4iw_cm_term(void) 4289 { 4290 WARN_ON(!list_empty(&timeout_list)); 4291 flush_workqueue(workq); 4292 destroy_workqueue(workq); 4293 } 4294