1 /* 2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <linux/skbuff.h> 36 #include <linux/timer.h> 37 #include <linux/notifier.h> 38 #include <linux/inetdevice.h> 39 #include <linux/ip.h> 40 #include <linux/tcp.h> 41 #include <linux/if_vlan.h> 42 43 #include <net/neighbour.h> 44 #include <net/netevent.h> 45 #include <net/route.h> 46 #include <net/tcp.h> 47 #include <net/ip6_route.h> 48 #include <net/addrconf.h> 49 50 #include <rdma/ib_addr.h> 51 52 #include <libcxgb_cm.h> 53 #include "iw_cxgb4.h" 54 #include "clip_tbl.h" 55 56 static char *states[] = { 57 "idle", 58 "listen", 59 "connecting", 60 "mpa_wait_req", 61 "mpa_req_sent", 62 "mpa_req_rcvd", 63 "mpa_rep_sent", 64 "fpdu_mode", 65 "aborting", 66 "closing", 67 "moribund", 68 "dead", 69 NULL, 70 }; 71 72 static int nocong; 73 module_param(nocong, int, 0644); 74 MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)"); 75 76 static int enable_ecn; 77 module_param(enable_ecn, int, 0644); 78 MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); 79 80 static int dack_mode; 81 module_param(dack_mode, int, 0644); 82 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)"); 83 84 uint c4iw_max_read_depth = 32; 85 module_param(c4iw_max_read_depth, int, 0644); 86 MODULE_PARM_DESC(c4iw_max_read_depth, 87 "Per-connection max ORD/IRD (default=32)"); 88 89 static int enable_tcp_timestamps; 90 module_param(enable_tcp_timestamps, int, 0644); 91 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); 92 93 static int enable_tcp_sack; 94 module_param(enable_tcp_sack, int, 0644); 95 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); 96 97 static int enable_tcp_window_scaling = 1; 98 module_param(enable_tcp_window_scaling, int, 0644); 99 MODULE_PARM_DESC(enable_tcp_window_scaling, 100 "Enable tcp window scaling (default=1)"); 101 102 static int peer2peer = 1; 103 module_param(peer2peer, int, 0644); 104 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)"); 105 106 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 107 module_param(p2p_type, int, 0644); 108 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " 109 "1=RDMA_READ 0=RDMA_WRITE (default 1)"); 110 111 static int ep_timeout_secs = 60; 112 module_param(ep_timeout_secs, int, 0644); 113 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 114 "in seconds (default=60)"); 115 116 static int mpa_rev = 2; 117 module_param(mpa_rev, int, 0644); 118 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " 119 "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft" 120 " compliant (default=2)"); 121 122 static int markers_enabled; 123 module_param(markers_enabled, int, 0644); 124 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); 125 126 static int crc_enabled = 1; 127 module_param(crc_enabled, int, 0644); 128 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); 129 130 static int rcv_win = 256 * 1024; 131 module_param(rcv_win, int, 0644); 132 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); 133 134 static int snd_win = 128 * 1024; 135 module_param(snd_win, int, 0644); 136 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); 137 138 static struct workqueue_struct *workq; 139 140 static struct sk_buff_head rxq; 141 142 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 143 static void ep_timeout(struct timer_list *t); 144 static void connect_reply_upcall(struct c4iw_ep *ep, int status); 145 static int sched(struct c4iw_dev *dev, struct sk_buff *skb); 146 147 static LIST_HEAD(timeout_list); 148 static DEFINE_SPINLOCK(timeout_lock); 149 150 static void deref_cm_id(struct c4iw_ep_common *epc) 151 { 152 epc->cm_id->rem_ref(epc->cm_id); 153 epc->cm_id = NULL; 154 set_bit(CM_ID_DEREFED, &epc->history); 155 } 156 157 static void ref_cm_id(struct c4iw_ep_common *epc) 158 { 159 set_bit(CM_ID_REFED, &epc->history); 160 epc->cm_id->add_ref(epc->cm_id); 161 } 162 163 static void deref_qp(struct c4iw_ep *ep) 164 { 165 c4iw_qp_rem_ref(&ep->com.qp->ibqp); 166 clear_bit(QP_REFERENCED, &ep->com.flags); 167 set_bit(QP_DEREFED, &ep->com.history); 168 } 169 170 static void ref_qp(struct c4iw_ep *ep) 171 { 172 set_bit(QP_REFERENCED, &ep->com.flags); 173 set_bit(QP_REFED, &ep->com.history); 174 c4iw_qp_add_ref(&ep->com.qp->ibqp); 175 } 176 177 static void start_ep_timer(struct c4iw_ep *ep) 178 { 179 pr_debug("ep %p\n", ep); 180 if (timer_pending(&ep->timer)) { 181 pr_err("%s timer already started! ep %p\n", 182 __func__, ep); 183 return; 184 } 185 clear_bit(TIMEOUT, &ep->com.flags); 186 c4iw_get_ep(&ep->com); 187 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 188 add_timer(&ep->timer); 189 } 190 191 static int stop_ep_timer(struct c4iw_ep *ep) 192 { 193 pr_debug("ep %p stopping\n", ep); 194 del_timer_sync(&ep->timer); 195 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 196 c4iw_put_ep(&ep->com); 197 return 0; 198 } 199 return 1; 200 } 201 202 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, 203 struct l2t_entry *l2e) 204 { 205 int error = 0; 206 207 if (c4iw_fatal_error(rdev)) { 208 kfree_skb(skb); 209 pr_err("%s - device in error state - dropping\n", __func__); 210 return -EIO; 211 } 212 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); 213 if (error < 0) 214 kfree_skb(skb); 215 else if (error == NET_XMIT_DROP) 216 return -ENOMEM; 217 return error < 0 ? error : 0; 218 } 219 220 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) 221 { 222 int error = 0; 223 224 if (c4iw_fatal_error(rdev)) { 225 kfree_skb(skb); 226 pr_err("%s - device in error state - dropping\n", __func__); 227 return -EIO; 228 } 229 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); 230 if (error < 0) 231 kfree_skb(skb); 232 return error < 0 ? error : 0; 233 } 234 235 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) 236 { 237 u32 len = roundup(sizeof(struct cpl_tid_release), 16); 238 239 skb = get_skb(skb, len, GFP_KERNEL); 240 if (!skb) 241 return; 242 243 cxgb_mk_tid_release(skb, len, hwtid, 0); 244 c4iw_ofld_send(rdev, skb); 245 return; 246 } 247 248 static void set_emss(struct c4iw_ep *ep, u16 opt) 249 { 250 ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] - 251 ((AF_INET == ep->com.remote_addr.ss_family) ? 252 sizeof(struct iphdr) : sizeof(struct ipv6hdr)) - 253 sizeof(struct tcphdr); 254 ep->mss = ep->emss; 255 if (TCPOPT_TSTAMP_G(opt)) 256 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4); 257 if (ep->emss < 128) 258 ep->emss = 128; 259 if (ep->emss & 7) 260 pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n", 261 TCPOPT_MSS_G(opt), ep->mss, ep->emss); 262 pr_debug("mss_idx %u mss %u emss=%u\n", TCPOPT_MSS_G(opt), ep->mss, 263 ep->emss); 264 } 265 266 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) 267 { 268 enum c4iw_ep_state state; 269 270 mutex_lock(&epc->mutex); 271 state = epc->state; 272 mutex_unlock(&epc->mutex); 273 return state; 274 } 275 276 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 277 { 278 epc->state = new; 279 } 280 281 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 282 { 283 mutex_lock(&epc->mutex); 284 pr_debug("%s -> %s\n", states[epc->state], states[new]); 285 __state_set(epc, new); 286 mutex_unlock(&epc->mutex); 287 return; 288 } 289 290 static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size) 291 { 292 struct sk_buff *skb; 293 unsigned int i; 294 size_t len; 295 296 len = roundup(sizeof(union cpl_wr_size), 16); 297 for (i = 0; i < size; i++) { 298 skb = alloc_skb(len, GFP_KERNEL); 299 if (!skb) 300 goto fail; 301 skb_queue_tail(ep_skb_list, skb); 302 } 303 return 0; 304 fail: 305 skb_queue_purge(ep_skb_list); 306 return -ENOMEM; 307 } 308 309 static void *alloc_ep(int size, gfp_t gfp) 310 { 311 struct c4iw_ep_common *epc; 312 313 epc = kzalloc(size, gfp); 314 if (epc) { 315 epc->wr_waitp = c4iw_alloc_wr_wait(gfp); 316 if (!epc->wr_waitp) { 317 kfree(epc); 318 epc = NULL; 319 goto out; 320 } 321 kref_init(&epc->kref); 322 mutex_init(&epc->mutex); 323 c4iw_init_wr_wait(epc->wr_waitp); 324 } 325 pr_debug("alloc ep %p\n", epc); 326 out: 327 return epc; 328 } 329 330 static void remove_ep_tid(struct c4iw_ep *ep) 331 { 332 unsigned long flags; 333 334 xa_lock_irqsave(&ep->com.dev->hwtids, flags); 335 __xa_erase(&ep->com.dev->hwtids, ep->hwtid); 336 if (xa_empty(&ep->com.dev->hwtids)) 337 wake_up(&ep->com.dev->wait); 338 xa_unlock_irqrestore(&ep->com.dev->hwtids, flags); 339 } 340 341 static int insert_ep_tid(struct c4iw_ep *ep) 342 { 343 unsigned long flags; 344 int err; 345 346 xa_lock_irqsave(&ep->com.dev->hwtids, flags); 347 err = __xa_insert(&ep->com.dev->hwtids, ep->hwtid, ep, GFP_KERNEL); 348 xa_unlock_irqrestore(&ep->com.dev->hwtids, flags); 349 350 return err; 351 } 352 353 /* 354 * Atomically lookup the ep ptr given the tid and grab a reference on the ep. 355 */ 356 static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid) 357 { 358 struct c4iw_ep *ep; 359 unsigned long flags; 360 361 xa_lock_irqsave(&dev->hwtids, flags); 362 ep = xa_load(&dev->hwtids, tid); 363 if (ep) 364 c4iw_get_ep(&ep->com); 365 xa_unlock_irqrestore(&dev->hwtids, flags); 366 return ep; 367 } 368 369 /* 370 * Atomically lookup the ep ptr given the stid and grab a reference on the ep. 371 */ 372 static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev, 373 unsigned int stid) 374 { 375 struct c4iw_listen_ep *ep; 376 unsigned long flags; 377 378 xa_lock_irqsave(&dev->stids, flags); 379 ep = xa_load(&dev->stids, stid); 380 if (ep) 381 c4iw_get_ep(&ep->com); 382 xa_unlock_irqrestore(&dev->stids, flags); 383 return ep; 384 } 385 386 void _c4iw_free_ep(struct kref *kref) 387 { 388 struct c4iw_ep *ep; 389 390 ep = container_of(kref, struct c4iw_ep, com.kref); 391 pr_debug("ep %p state %s\n", ep, states[ep->com.state]); 392 if (test_bit(QP_REFERENCED, &ep->com.flags)) 393 deref_qp(ep); 394 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 395 if (ep->com.remote_addr.ss_family == AF_INET6) { 396 struct sockaddr_in6 *sin6 = 397 (struct sockaddr_in6 *) 398 &ep->com.local_addr; 399 400 cxgb4_clip_release( 401 ep->com.dev->rdev.lldi.ports[0], 402 (const u32 *)&sin6->sin6_addr.s6_addr, 403 1); 404 } 405 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid, 406 ep->com.local_addr.ss_family); 407 dst_release(ep->dst); 408 cxgb4_l2t_release(ep->l2t); 409 kfree_skb(ep->mpa_skb); 410 } 411 if (!skb_queue_empty(&ep->com.ep_skb_list)) 412 skb_queue_purge(&ep->com.ep_skb_list); 413 c4iw_put_wr_wait(ep->com.wr_waitp); 414 kfree(ep); 415 } 416 417 static void release_ep_resources(struct c4iw_ep *ep) 418 { 419 set_bit(RELEASE_RESOURCES, &ep->com.flags); 420 421 /* 422 * If we have a hwtid, then remove it from the idr table 423 * so lookups will no longer find this endpoint. Otherwise 424 * we have a race where one thread finds the ep ptr just 425 * before the other thread is freeing the ep memory. 426 */ 427 if (ep->hwtid != -1) 428 remove_ep_tid(ep); 429 c4iw_put_ep(&ep->com); 430 } 431 432 static int status2errno(int status) 433 { 434 switch (status) { 435 case CPL_ERR_NONE: 436 return 0; 437 case CPL_ERR_CONN_RESET: 438 return -ECONNRESET; 439 case CPL_ERR_ARP_MISS: 440 return -EHOSTUNREACH; 441 case CPL_ERR_CONN_TIMEDOUT: 442 return -ETIMEDOUT; 443 case CPL_ERR_TCAM_FULL: 444 return -ENOMEM; 445 case CPL_ERR_CONN_EXIST: 446 return -EADDRINUSE; 447 default: 448 return -EIO; 449 } 450 } 451 452 /* 453 * Try and reuse skbs already allocated... 454 */ 455 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 456 { 457 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { 458 skb_trim(skb, 0); 459 skb_get(skb); 460 skb_reset_transport_header(skb); 461 } else { 462 skb = alloc_skb(len, gfp); 463 if (!skb) 464 return NULL; 465 } 466 t4_set_arp_err_handler(skb, NULL, NULL); 467 return skb; 468 } 469 470 static struct net_device *get_real_dev(struct net_device *egress_dev) 471 { 472 return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev; 473 } 474 475 static void arp_failure_discard(void *handle, struct sk_buff *skb) 476 { 477 pr_err("ARP failure\n"); 478 kfree_skb(skb); 479 } 480 481 static void mpa_start_arp_failure(void *handle, struct sk_buff *skb) 482 { 483 pr_err("ARP failure during MPA Negotiation - Closing Connection\n"); 484 } 485 486 enum { 487 NUM_FAKE_CPLS = 2, 488 FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0, 489 FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1, 490 }; 491 492 static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) 493 { 494 struct c4iw_ep *ep; 495 496 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); 497 release_ep_resources(ep); 498 return 0; 499 } 500 501 static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) 502 { 503 struct c4iw_ep *ep; 504 505 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); 506 c4iw_put_ep(&ep->parent_ep->com); 507 release_ep_resources(ep); 508 return 0; 509 } 510 511 /* 512 * Fake up a special CPL opcode and call sched() so process_work() will call 513 * _put_ep_safe() in a safe context to free the ep resources. This is needed 514 * because ARP error handlers are called in an ATOMIC context, and 515 * _c4iw_free_ep() needs to block. 516 */ 517 static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb, 518 int cpl) 519 { 520 struct cpl_act_establish *rpl = cplhdr(skb); 521 522 /* Set our special ARP_FAILURE opcode */ 523 rpl->ot.opcode = cpl; 524 525 /* 526 * Save ep in the skb->cb area, after where sched() will save the dev 527 * ptr. 528 */ 529 *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep; 530 sched(ep->com.dev, skb); 531 } 532 533 /* Handle an ARP failure for an accept */ 534 static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb) 535 { 536 struct c4iw_ep *ep = handle; 537 538 pr_err("ARP failure during accept - tid %u - dropping connection\n", 539 ep->hwtid); 540 541 __state_set(&ep->com, DEAD); 542 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE); 543 } 544 545 /* 546 * Handle an ARP failure for an active open. 547 */ 548 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 549 { 550 struct c4iw_ep *ep = handle; 551 552 pr_err("ARP failure during connect\n"); 553 connect_reply_upcall(ep, -EHOSTUNREACH); 554 __state_set(&ep->com, DEAD); 555 if (ep->com.remote_addr.ss_family == AF_INET6) { 556 struct sockaddr_in6 *sin6 = 557 (struct sockaddr_in6 *)&ep->com.local_addr; 558 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 559 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 560 } 561 xa_erase_irq(&ep->com.dev->atids, ep->atid); 562 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 563 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); 564 } 565 566 /* 567 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant 568 * and send it along. 569 */ 570 static void abort_arp_failure(void *handle, struct sk_buff *skb) 571 { 572 int ret; 573 struct c4iw_ep *ep = handle; 574 struct c4iw_rdev *rdev = &ep->com.dev->rdev; 575 struct cpl_abort_req *req = cplhdr(skb); 576 577 pr_debug("rdev %p\n", rdev); 578 req->cmd = CPL_ABORT_NO_RST; 579 skb_get(skb); 580 ret = c4iw_ofld_send(rdev, skb); 581 if (ret) { 582 __state_set(&ep->com, DEAD); 583 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); 584 } else 585 kfree_skb(skb); 586 } 587 588 static int send_flowc(struct c4iw_ep *ep) 589 { 590 struct fw_flowc_wr *flowc; 591 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); 592 u16 vlan = ep->l2t->vlan; 593 int nparams; 594 int flowclen, flowclen16; 595 596 if (WARN_ON(!skb)) 597 return -ENOMEM; 598 599 if (vlan == CPL_L2T_VLAN_NONE) 600 nparams = 9; 601 else 602 nparams = 10; 603 604 flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); 605 flowclen16 = DIV_ROUND_UP(flowclen, 16); 606 flowclen = flowclen16 * 16; 607 608 flowc = __skb_put(skb, flowclen); 609 memset(flowc, 0, flowclen); 610 611 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | 612 FW_FLOWC_WR_NPARAMS_V(nparams)); 613 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) | 614 FW_WR_FLOWID_V(ep->hwtid)); 615 616 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 617 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V 618 (ep->com.dev->rdev.lldi.pf)); 619 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 620 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 621 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 622 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); 623 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 624 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); 625 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 626 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); 627 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 628 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); 629 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 630 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win); 631 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 632 flowc->mnemval[7].val = cpu_to_be32(ep->emss); 633 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_RCV_SCALE; 634 flowc->mnemval[8].val = cpu_to_be32(ep->snd_wscale); 635 if (nparams == 10) { 636 u16 pri; 637 pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 638 flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 639 flowc->mnemval[9].val = cpu_to_be32(pri); 640 } 641 642 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 643 return c4iw_ofld_send(&ep->com.dev->rdev, skb); 644 } 645 646 static int send_halfclose(struct c4iw_ep *ep) 647 { 648 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); 649 u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16); 650 651 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 652 if (WARN_ON(!skb)) 653 return -ENOMEM; 654 655 cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx, 656 NULL, arp_failure_discard); 657 658 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 659 } 660 661 static void read_tcb(struct c4iw_ep *ep) 662 { 663 struct sk_buff *skb; 664 struct cpl_get_tcb *req; 665 int wrlen = roundup(sizeof(*req), 16); 666 667 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 668 if (WARN_ON(!skb)) 669 return; 670 671 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); 672 req = (struct cpl_get_tcb *) skb_put(skb, wrlen); 673 memset(req, 0, wrlen); 674 INIT_TP_WR(req, ep->hwtid); 675 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_GET_TCB, ep->hwtid)); 676 req->reply_ctrl = htons(REPLY_CHAN_V(0) | QUEUENO_V(ep->rss_qid)); 677 678 /* 679 * keep a ref on the ep so the tcb is not unlocked before this 680 * cpl completes. The ref is released in read_tcb_rpl(). 681 */ 682 c4iw_get_ep(&ep->com); 683 if (WARN_ON(c4iw_ofld_send(&ep->com.dev->rdev, skb))) 684 c4iw_put_ep(&ep->com); 685 } 686 687 static int send_abort_req(struct c4iw_ep *ep) 688 { 689 u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16); 690 struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list); 691 692 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 693 if (WARN_ON(!req_skb)) 694 return -ENOMEM; 695 696 cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx, 697 ep, abort_arp_failure); 698 699 return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t); 700 } 701 702 static int send_abort(struct c4iw_ep *ep) 703 { 704 if (!ep->com.qp || !ep->com.qp->srq) { 705 send_abort_req(ep); 706 return 0; 707 } 708 set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags); 709 read_tcb(ep); 710 return 0; 711 } 712 713 static int send_connect(struct c4iw_ep *ep) 714 { 715 struct cpl_act_open_req *req = NULL; 716 struct cpl_t5_act_open_req *t5req = NULL; 717 struct cpl_t6_act_open_req *t6req = NULL; 718 struct cpl_act_open_req6 *req6 = NULL; 719 struct cpl_t5_act_open_req6 *t5req6 = NULL; 720 struct cpl_t6_act_open_req6 *t6req6 = NULL; 721 struct sk_buff *skb; 722 u64 opt0; 723 u32 opt2; 724 unsigned int mtu_idx; 725 u32 wscale; 726 int win, sizev4, sizev6, wrlen; 727 struct sockaddr_in *la = (struct sockaddr_in *) 728 &ep->com.local_addr; 729 struct sockaddr_in *ra = (struct sockaddr_in *) 730 &ep->com.remote_addr; 731 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *) 732 &ep->com.local_addr; 733 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *) 734 &ep->com.remote_addr; 735 int ret; 736 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; 737 u32 isn = (get_random_u32() & ~7UL) - 1; 738 struct net_device *netdev; 739 u64 params; 740 741 netdev = ep->com.dev->rdev.lldi.ports[0]; 742 743 switch (CHELSIO_CHIP_VERSION(adapter_type)) { 744 case CHELSIO_T4: 745 sizev4 = sizeof(struct cpl_act_open_req); 746 sizev6 = sizeof(struct cpl_act_open_req6); 747 break; 748 case CHELSIO_T5: 749 sizev4 = sizeof(struct cpl_t5_act_open_req); 750 sizev6 = sizeof(struct cpl_t5_act_open_req6); 751 break; 752 case CHELSIO_T6: 753 sizev4 = sizeof(struct cpl_t6_act_open_req); 754 sizev6 = sizeof(struct cpl_t6_act_open_req6); 755 break; 756 default: 757 pr_err("T%d Chip is not supported\n", 758 CHELSIO_CHIP_VERSION(adapter_type)); 759 return -EINVAL; 760 } 761 762 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? 763 roundup(sizev4, 16) : 764 roundup(sizev6, 16); 765 766 pr_debug("ep %p atid %u\n", ep, ep->atid); 767 768 skb = get_skb(NULL, wrlen, GFP_KERNEL); 769 if (!skb) { 770 pr_err("%s - failed to alloc skb\n", __func__); 771 return -ENOMEM; 772 } 773 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 774 775 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 776 enable_tcp_timestamps, 777 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); 778 wscale = cxgb_compute_wscale(rcv_win); 779 780 /* 781 * Specify the largest window that will fit in opt0. The 782 * remainder will be specified in the rx_data_ack. 783 */ 784 win = ep->rcv_win >> 10; 785 if (win > RCV_BUFSIZ_M) 786 win = RCV_BUFSIZ_M; 787 788 opt0 = (nocong ? NO_CONG_F : 0) | 789 KEEP_ALIVE_F | 790 DELACK_F | 791 WND_SCALE_V(wscale) | 792 MSS_IDX_V(mtu_idx) | 793 L2T_IDX_V(ep->l2t->idx) | 794 TX_CHAN_V(ep->tx_chan) | 795 SMAC_SEL_V(ep->smac_idx) | 796 DSCP_V(ep->tos >> 2) | 797 ULP_MODE_V(ULP_MODE_TCPDDP) | 798 RCV_BUFSIZ_V(win); 799 opt2 = RX_CHANNEL_V(0) | 800 CCTRL_ECN_V(enable_ecn) | 801 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); 802 if (enable_tcp_timestamps) 803 opt2 |= TSTAMPS_EN_F; 804 if (enable_tcp_sack) 805 opt2 |= SACK_EN_F; 806 if (wscale && enable_tcp_window_scaling) 807 opt2 |= WND_SCALE_EN_F; 808 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) { 809 if (peer2peer) 810 isn += 4; 811 812 opt2 |= T5_OPT_2_VALID_F; 813 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); 814 opt2 |= T5_ISS_F; 815 } 816 817 params = cxgb4_select_ntuple(netdev, ep->l2t); 818 819 if (ep->com.remote_addr.ss_family == AF_INET6) 820 cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], 821 (const u32 *)&la6->sin6_addr.s6_addr, 1); 822 823 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); 824 825 if (ep->com.remote_addr.ss_family == AF_INET) { 826 switch (CHELSIO_CHIP_VERSION(adapter_type)) { 827 case CHELSIO_T4: 828 req = skb_put(skb, wrlen); 829 INIT_TP_WR(req, 0); 830 break; 831 case CHELSIO_T5: 832 t5req = skb_put(skb, wrlen); 833 INIT_TP_WR(t5req, 0); 834 req = (struct cpl_act_open_req *)t5req; 835 break; 836 case CHELSIO_T6: 837 t6req = skb_put(skb, wrlen); 838 INIT_TP_WR(t6req, 0); 839 req = (struct cpl_act_open_req *)t6req; 840 t5req = (struct cpl_t5_act_open_req *)t6req; 841 break; 842 default: 843 pr_err("T%d Chip is not supported\n", 844 CHELSIO_CHIP_VERSION(adapter_type)); 845 ret = -EINVAL; 846 goto clip_release; 847 } 848 849 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 850 ((ep->rss_qid<<14) | ep->atid))); 851 req->local_port = la->sin_port; 852 req->peer_port = ra->sin_port; 853 req->local_ip = la->sin_addr.s_addr; 854 req->peer_ip = ra->sin_addr.s_addr; 855 req->opt0 = cpu_to_be64(opt0); 856 857 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 858 req->params = cpu_to_be32(params); 859 req->opt2 = cpu_to_be32(opt2); 860 } else { 861 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 862 t5req->params = 863 cpu_to_be64(FILTER_TUPLE_V(params)); 864 t5req->rsvd = cpu_to_be32(isn); 865 pr_debug("snd_isn %u\n", t5req->rsvd); 866 t5req->opt2 = cpu_to_be32(opt2); 867 } else { 868 t6req->params = 869 cpu_to_be64(FILTER_TUPLE_V(params)); 870 t6req->rsvd = cpu_to_be32(isn); 871 pr_debug("snd_isn %u\n", t6req->rsvd); 872 t6req->opt2 = cpu_to_be32(opt2); 873 } 874 } 875 } else { 876 switch (CHELSIO_CHIP_VERSION(adapter_type)) { 877 case CHELSIO_T4: 878 req6 = skb_put(skb, wrlen); 879 INIT_TP_WR(req6, 0); 880 break; 881 case CHELSIO_T5: 882 t5req6 = skb_put(skb, wrlen); 883 INIT_TP_WR(t5req6, 0); 884 req6 = (struct cpl_act_open_req6 *)t5req6; 885 break; 886 case CHELSIO_T6: 887 t6req6 = skb_put(skb, wrlen); 888 INIT_TP_WR(t6req6, 0); 889 req6 = (struct cpl_act_open_req6 *)t6req6; 890 t5req6 = (struct cpl_t5_act_open_req6 *)t6req6; 891 break; 892 default: 893 pr_err("T%d Chip is not supported\n", 894 CHELSIO_CHIP_VERSION(adapter_type)); 895 ret = -EINVAL; 896 goto clip_release; 897 } 898 899 OPCODE_TID(req6) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 900 ((ep->rss_qid<<14)|ep->atid))); 901 req6->local_port = la6->sin6_port; 902 req6->peer_port = ra6->sin6_port; 903 req6->local_ip_hi = *((__be64 *)(la6->sin6_addr.s6_addr)); 904 req6->local_ip_lo = *((__be64 *)(la6->sin6_addr.s6_addr + 8)); 905 req6->peer_ip_hi = *((__be64 *)(ra6->sin6_addr.s6_addr)); 906 req6->peer_ip_lo = *((__be64 *)(ra6->sin6_addr.s6_addr + 8)); 907 req6->opt0 = cpu_to_be64(opt0); 908 909 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 910 req6->params = cpu_to_be32(cxgb4_select_ntuple(netdev, 911 ep->l2t)); 912 req6->opt2 = cpu_to_be32(opt2); 913 } else { 914 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 915 t5req6->params = 916 cpu_to_be64(FILTER_TUPLE_V(params)); 917 t5req6->rsvd = cpu_to_be32(isn); 918 pr_debug("snd_isn %u\n", t5req6->rsvd); 919 t5req6->opt2 = cpu_to_be32(opt2); 920 } else { 921 t6req6->params = 922 cpu_to_be64(FILTER_TUPLE_V(params)); 923 t6req6->rsvd = cpu_to_be32(isn); 924 pr_debug("snd_isn %u\n", t6req6->rsvd); 925 t6req6->opt2 = cpu_to_be32(opt2); 926 } 927 928 } 929 } 930 931 set_bit(ACT_OPEN_REQ, &ep->com.history); 932 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 933 clip_release: 934 if (ret && ep->com.remote_addr.ss_family == AF_INET6) 935 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 936 (const u32 *)&la6->sin6_addr.s6_addr, 1); 937 return ret; 938 } 939 940 static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, 941 u8 mpa_rev_to_use) 942 { 943 int mpalen, wrlen, ret; 944 struct fw_ofld_tx_data_wr *req; 945 struct mpa_message *mpa; 946 struct mpa_v2_conn_params mpa_v2_params; 947 948 pr_debug("ep %p tid %u pd_len %d\n", 949 ep, ep->hwtid, ep->plen); 950 951 mpalen = sizeof(*mpa) + ep->plen; 952 if (mpa_rev_to_use == 2) 953 mpalen += sizeof(struct mpa_v2_conn_params); 954 wrlen = roundup(mpalen + sizeof(*req), 16); 955 skb = get_skb(skb, wrlen, GFP_KERNEL); 956 if (!skb) { 957 connect_reply_upcall(ep, -ENOMEM); 958 return -ENOMEM; 959 } 960 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 961 962 req = skb_put_zero(skb, wrlen); 963 req->op_to_immdlen = cpu_to_be32( 964 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 965 FW_WR_COMPL_F | 966 FW_WR_IMMDLEN_V(mpalen)); 967 req->flowid_len16 = cpu_to_be32( 968 FW_WR_FLOWID_V(ep->hwtid) | 969 FW_WR_LEN16_V(wrlen >> 4)); 970 req->plen = cpu_to_be32(mpalen); 971 req->tunnel_to_proxy = cpu_to_be32( 972 FW_OFLD_TX_DATA_WR_FLUSH_F | 973 FW_OFLD_TX_DATA_WR_SHOVE_F); 974 975 mpa = (struct mpa_message *)(req + 1); 976 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 977 978 mpa->flags = 0; 979 if (crc_enabled) 980 mpa->flags |= MPA_CRC; 981 if (markers_enabled) { 982 mpa->flags |= MPA_MARKERS; 983 ep->mpa_attr.recv_marker_enabled = 1; 984 } else { 985 ep->mpa_attr.recv_marker_enabled = 0; 986 } 987 if (mpa_rev_to_use == 2) 988 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 989 990 mpa->private_data_size = htons(ep->plen); 991 mpa->revision = mpa_rev_to_use; 992 if (mpa_rev_to_use == 1) { 993 ep->tried_with_mpa_v1 = 1; 994 ep->retry_with_mpa_v1 = 0; 995 } 996 997 if (mpa_rev_to_use == 2) { 998 mpa->private_data_size = 999 htons(ntohs(mpa->private_data_size) + 1000 sizeof(struct mpa_v2_conn_params)); 1001 pr_debug("initiator ird %u ord %u\n", ep->ird, 1002 ep->ord); 1003 mpa_v2_params.ird = htons((u16)ep->ird); 1004 mpa_v2_params.ord = htons((u16)ep->ord); 1005 1006 if (peer2peer) { 1007 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1008 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 1009 mpa_v2_params.ord |= 1010 htons(MPA_V2_RDMA_WRITE_RTR); 1011 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 1012 mpa_v2_params.ord |= 1013 htons(MPA_V2_RDMA_READ_RTR); 1014 } 1015 memcpy(mpa->private_data, &mpa_v2_params, 1016 sizeof(struct mpa_v2_conn_params)); 1017 1018 if (ep->plen) 1019 memcpy(mpa->private_data + 1020 sizeof(struct mpa_v2_conn_params), 1021 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1022 } else 1023 if (ep->plen) 1024 memcpy(mpa->private_data, 1025 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1026 1027 /* 1028 * Reference the mpa skb. This ensures the data area 1029 * will remain in memory until the hw acks the tx. 1030 * Function fw4_ack() will deref it. 1031 */ 1032 skb_get(skb); 1033 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 1034 ep->mpa_skb = skb; 1035 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1036 if (ret) 1037 return ret; 1038 start_ep_timer(ep); 1039 __state_set(&ep->com, MPA_REQ_SENT); 1040 ep->mpa_attr.initiator = 1; 1041 ep->snd_seq += mpalen; 1042 return ret; 1043 } 1044 1045 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 1046 { 1047 int mpalen, wrlen; 1048 struct fw_ofld_tx_data_wr *req; 1049 struct mpa_message *mpa; 1050 struct sk_buff *skb; 1051 struct mpa_v2_conn_params mpa_v2_params; 1052 1053 pr_debug("ep %p tid %u pd_len %d\n", 1054 ep, ep->hwtid, ep->plen); 1055 1056 mpalen = sizeof(*mpa) + plen; 1057 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 1058 mpalen += sizeof(struct mpa_v2_conn_params); 1059 wrlen = roundup(mpalen + sizeof(*req), 16); 1060 1061 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1062 if (!skb) { 1063 pr_err("%s - cannot alloc skb!\n", __func__); 1064 return -ENOMEM; 1065 } 1066 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1067 1068 req = skb_put_zero(skb, wrlen); 1069 req->op_to_immdlen = cpu_to_be32( 1070 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 1071 FW_WR_COMPL_F | 1072 FW_WR_IMMDLEN_V(mpalen)); 1073 req->flowid_len16 = cpu_to_be32( 1074 FW_WR_FLOWID_V(ep->hwtid) | 1075 FW_WR_LEN16_V(wrlen >> 4)); 1076 req->plen = cpu_to_be32(mpalen); 1077 req->tunnel_to_proxy = cpu_to_be32( 1078 FW_OFLD_TX_DATA_WR_FLUSH_F | 1079 FW_OFLD_TX_DATA_WR_SHOVE_F); 1080 1081 mpa = (struct mpa_message *)(req + 1); 1082 memset(mpa, 0, sizeof(*mpa)); 1083 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1084 mpa->flags = MPA_REJECT; 1085 mpa->revision = ep->mpa_attr.version; 1086 mpa->private_data_size = htons(plen); 1087 1088 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1089 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1090 mpa->private_data_size = 1091 htons(ntohs(mpa->private_data_size) + 1092 sizeof(struct mpa_v2_conn_params)); 1093 mpa_v2_params.ird = htons(((u16)ep->ird) | 1094 (peer2peer ? MPA_V2_PEER2PEER_MODEL : 1095 0)); 1096 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 1097 (p2p_type == 1098 FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 1099 MPA_V2_RDMA_WRITE_RTR : p2p_type == 1100 FW_RI_INIT_P2PTYPE_READ_REQ ? 1101 MPA_V2_RDMA_READ_RTR : 0) : 0)); 1102 memcpy(mpa->private_data, &mpa_v2_params, 1103 sizeof(struct mpa_v2_conn_params)); 1104 1105 if (ep->plen) 1106 memcpy(mpa->private_data + 1107 sizeof(struct mpa_v2_conn_params), pdata, plen); 1108 } else 1109 if (plen) 1110 memcpy(mpa->private_data, pdata, plen); 1111 1112 /* 1113 * Reference the mpa skb again. This ensures the data area 1114 * will remain in memory until the hw acks the tx. 1115 * Function fw4_ack() will deref it. 1116 */ 1117 skb_get(skb); 1118 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1119 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure); 1120 ep->mpa_skb = skb; 1121 ep->snd_seq += mpalen; 1122 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1123 } 1124 1125 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 1126 { 1127 int mpalen, wrlen; 1128 struct fw_ofld_tx_data_wr *req; 1129 struct mpa_message *mpa; 1130 struct sk_buff *skb; 1131 struct mpa_v2_conn_params mpa_v2_params; 1132 1133 pr_debug("ep %p tid %u pd_len %d\n", 1134 ep, ep->hwtid, ep->plen); 1135 1136 mpalen = sizeof(*mpa) + plen; 1137 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 1138 mpalen += sizeof(struct mpa_v2_conn_params); 1139 wrlen = roundup(mpalen + sizeof(*req), 16); 1140 1141 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1142 if (!skb) { 1143 pr_err("%s - cannot alloc skb!\n", __func__); 1144 return -ENOMEM; 1145 } 1146 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1147 1148 req = skb_put_zero(skb, wrlen); 1149 req->op_to_immdlen = cpu_to_be32( 1150 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 1151 FW_WR_COMPL_F | 1152 FW_WR_IMMDLEN_V(mpalen)); 1153 req->flowid_len16 = cpu_to_be32( 1154 FW_WR_FLOWID_V(ep->hwtid) | 1155 FW_WR_LEN16_V(wrlen >> 4)); 1156 req->plen = cpu_to_be32(mpalen); 1157 req->tunnel_to_proxy = cpu_to_be32( 1158 FW_OFLD_TX_DATA_WR_FLUSH_F | 1159 FW_OFLD_TX_DATA_WR_SHOVE_F); 1160 1161 mpa = (struct mpa_message *)(req + 1); 1162 memset(mpa, 0, sizeof(*mpa)); 1163 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1164 mpa->flags = 0; 1165 if (ep->mpa_attr.crc_enabled) 1166 mpa->flags |= MPA_CRC; 1167 if (ep->mpa_attr.recv_marker_enabled) 1168 mpa->flags |= MPA_MARKERS; 1169 mpa->revision = ep->mpa_attr.version; 1170 mpa->private_data_size = htons(plen); 1171 1172 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1173 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1174 mpa->private_data_size = 1175 htons(ntohs(mpa->private_data_size) + 1176 sizeof(struct mpa_v2_conn_params)); 1177 mpa_v2_params.ird = htons((u16)ep->ird); 1178 mpa_v2_params.ord = htons((u16)ep->ord); 1179 if (peer2peer && (ep->mpa_attr.p2p_type != 1180 FW_RI_INIT_P2PTYPE_DISABLED)) { 1181 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1182 1183 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 1184 mpa_v2_params.ord |= 1185 htons(MPA_V2_RDMA_WRITE_RTR); 1186 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 1187 mpa_v2_params.ord |= 1188 htons(MPA_V2_RDMA_READ_RTR); 1189 } 1190 1191 memcpy(mpa->private_data, &mpa_v2_params, 1192 sizeof(struct mpa_v2_conn_params)); 1193 1194 if (ep->plen) 1195 memcpy(mpa->private_data + 1196 sizeof(struct mpa_v2_conn_params), pdata, plen); 1197 } else 1198 if (plen) 1199 memcpy(mpa->private_data, pdata, plen); 1200 1201 /* 1202 * Reference the mpa skb. This ensures the data area 1203 * will remain in memory until the hw acks the tx. 1204 * Function fw4_ack() will deref it. 1205 */ 1206 skb_get(skb); 1207 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure); 1208 ep->mpa_skb = skb; 1209 __state_set(&ep->com, MPA_REP_SENT); 1210 ep->snd_seq += mpalen; 1211 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1212 } 1213 1214 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) 1215 { 1216 struct c4iw_ep *ep; 1217 struct cpl_act_establish *req = cplhdr(skb); 1218 unsigned short tcp_opt = ntohs(req->tcp_opt); 1219 unsigned int tid = GET_TID(req); 1220 unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); 1221 struct tid_info *t = dev->rdev.lldi.tids; 1222 int ret; 1223 1224 ep = lookup_atid(t, atid); 1225 if (!ep) 1226 return -EINVAL; 1227 1228 pr_debug("ep %p tid %u snd_isn %u rcv_isn %u\n", ep, tid, 1229 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); 1230 1231 mutex_lock(&ep->com.mutex); 1232 dst_confirm(ep->dst); 1233 1234 /* setup the hwtid for this connection */ 1235 ep->hwtid = tid; 1236 cxgb4_insert_tid(t, ep, tid, ep->com.local_addr.ss_family); 1237 insert_ep_tid(ep); 1238 1239 ep->snd_seq = be32_to_cpu(req->snd_isn); 1240 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 1241 ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); 1242 1243 set_emss(ep, tcp_opt); 1244 1245 /* dealloc the atid */ 1246 xa_erase_irq(&ep->com.dev->atids, atid); 1247 cxgb4_free_atid(t, atid); 1248 set_bit(ACT_ESTAB, &ep->com.history); 1249 1250 /* start MPA negotiation */ 1251 ret = send_flowc(ep); 1252 if (ret) 1253 goto err; 1254 if (ep->retry_with_mpa_v1) 1255 ret = send_mpa_req(ep, skb, 1); 1256 else 1257 ret = send_mpa_req(ep, skb, mpa_rev); 1258 if (ret) 1259 goto err; 1260 mutex_unlock(&ep->com.mutex); 1261 return 0; 1262 err: 1263 mutex_unlock(&ep->com.mutex); 1264 connect_reply_upcall(ep, -ENOMEM); 1265 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1266 return 0; 1267 } 1268 1269 static void close_complete_upcall(struct c4iw_ep *ep, int status) 1270 { 1271 struct iw_cm_event event; 1272 1273 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1274 memset(&event, 0, sizeof(event)); 1275 event.event = IW_CM_EVENT_CLOSE; 1276 event.status = status; 1277 if (ep->com.cm_id) { 1278 pr_debug("close complete delivered ep %p cm_id %p tid %u\n", 1279 ep, ep->com.cm_id, ep->hwtid); 1280 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1281 deref_cm_id(&ep->com); 1282 set_bit(CLOSE_UPCALL, &ep->com.history); 1283 } 1284 } 1285 1286 static void peer_close_upcall(struct c4iw_ep *ep) 1287 { 1288 struct iw_cm_event event; 1289 1290 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1291 memset(&event, 0, sizeof(event)); 1292 event.event = IW_CM_EVENT_DISCONNECT; 1293 if (ep->com.cm_id) { 1294 pr_debug("peer close delivered ep %p cm_id %p tid %u\n", 1295 ep, ep->com.cm_id, ep->hwtid); 1296 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1297 set_bit(DISCONN_UPCALL, &ep->com.history); 1298 } 1299 } 1300 1301 static void peer_abort_upcall(struct c4iw_ep *ep) 1302 { 1303 struct iw_cm_event event; 1304 1305 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1306 memset(&event, 0, sizeof(event)); 1307 event.event = IW_CM_EVENT_CLOSE; 1308 event.status = -ECONNRESET; 1309 if (ep->com.cm_id) { 1310 pr_debug("abort delivered ep %p cm_id %p tid %u\n", ep, 1311 ep->com.cm_id, ep->hwtid); 1312 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1313 deref_cm_id(&ep->com); 1314 set_bit(ABORT_UPCALL, &ep->com.history); 1315 } 1316 } 1317 1318 static void connect_reply_upcall(struct c4iw_ep *ep, int status) 1319 { 1320 struct iw_cm_event event; 1321 1322 pr_debug("ep %p tid %u status %d\n", 1323 ep, ep->hwtid, status); 1324 memset(&event, 0, sizeof(event)); 1325 event.event = IW_CM_EVENT_CONNECT_REPLY; 1326 event.status = status; 1327 memcpy(&event.local_addr, &ep->com.local_addr, 1328 sizeof(ep->com.local_addr)); 1329 memcpy(&event.remote_addr, &ep->com.remote_addr, 1330 sizeof(ep->com.remote_addr)); 1331 1332 if ((status == 0) || (status == -ECONNREFUSED)) { 1333 if (!ep->tried_with_mpa_v1) { 1334 /* this means MPA_v2 is used */ 1335 event.ord = ep->ird; 1336 event.ird = ep->ord; 1337 event.private_data_len = ep->plen - 1338 sizeof(struct mpa_v2_conn_params); 1339 event.private_data = ep->mpa_pkt + 1340 sizeof(struct mpa_message) + 1341 sizeof(struct mpa_v2_conn_params); 1342 } else { 1343 /* this means MPA_v1 is used */ 1344 event.ord = cur_max_read_depth(ep->com.dev); 1345 event.ird = cur_max_read_depth(ep->com.dev); 1346 event.private_data_len = ep->plen; 1347 event.private_data = ep->mpa_pkt + 1348 sizeof(struct mpa_message); 1349 } 1350 } 1351 1352 pr_debug("ep %p tid %u status %d\n", ep, 1353 ep->hwtid, status); 1354 set_bit(CONN_RPL_UPCALL, &ep->com.history); 1355 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1356 1357 if (status < 0) 1358 deref_cm_id(&ep->com); 1359 } 1360 1361 static int connect_request_upcall(struct c4iw_ep *ep) 1362 { 1363 struct iw_cm_event event; 1364 int ret; 1365 1366 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1367 memset(&event, 0, sizeof(event)); 1368 event.event = IW_CM_EVENT_CONNECT_REQUEST; 1369 memcpy(&event.local_addr, &ep->com.local_addr, 1370 sizeof(ep->com.local_addr)); 1371 memcpy(&event.remote_addr, &ep->com.remote_addr, 1372 sizeof(ep->com.remote_addr)); 1373 event.provider_data = ep; 1374 if (!ep->tried_with_mpa_v1) { 1375 /* this means MPA_v2 is used */ 1376 event.ord = ep->ord; 1377 event.ird = ep->ird; 1378 event.private_data_len = ep->plen - 1379 sizeof(struct mpa_v2_conn_params); 1380 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 1381 sizeof(struct mpa_v2_conn_params); 1382 } else { 1383 /* this means MPA_v1 is used. Send max supported */ 1384 event.ord = cur_max_read_depth(ep->com.dev); 1385 event.ird = cur_max_read_depth(ep->com.dev); 1386 event.private_data_len = ep->plen; 1387 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1388 } 1389 c4iw_get_ep(&ep->com); 1390 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, 1391 &event); 1392 if (ret) 1393 c4iw_put_ep(&ep->com); 1394 set_bit(CONNREQ_UPCALL, &ep->com.history); 1395 c4iw_put_ep(&ep->parent_ep->com); 1396 return ret; 1397 } 1398 1399 static void established_upcall(struct c4iw_ep *ep) 1400 { 1401 struct iw_cm_event event; 1402 1403 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1404 memset(&event, 0, sizeof(event)); 1405 event.event = IW_CM_EVENT_ESTABLISHED; 1406 event.ird = ep->ord; 1407 event.ord = ep->ird; 1408 if (ep->com.cm_id) { 1409 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1410 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1411 set_bit(ESTAB_UPCALL, &ep->com.history); 1412 } 1413 } 1414 1415 static int update_rx_credits(struct c4iw_ep *ep, u32 credits) 1416 { 1417 struct sk_buff *skb; 1418 u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16); 1419 u32 credit_dack; 1420 1421 pr_debug("ep %p tid %u credits %u\n", 1422 ep, ep->hwtid, credits); 1423 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1424 if (!skb) { 1425 pr_err("update_rx_credits - cannot alloc skb!\n"); 1426 return 0; 1427 } 1428 1429 /* 1430 * If we couldn't specify the entire rcv window at connection setup 1431 * due to the limit in the number of bits in the RCV_BUFSIZ field, 1432 * then add the overage in to the credits returned. 1433 */ 1434 if (ep->rcv_win > RCV_BUFSIZ_M * 1024) 1435 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024; 1436 1437 credit_dack = credits | RX_FORCE_ACK_F | RX_DACK_CHANGE_F | 1438 RX_DACK_MODE_V(dack_mode); 1439 1440 cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx, 1441 credit_dack); 1442 1443 c4iw_ofld_send(&ep->com.dev->rdev, skb); 1444 return credits; 1445 } 1446 1447 #define RELAXED_IRD_NEGOTIATION 1 1448 1449 /* 1450 * process_mpa_reply - process streaming mode MPA reply 1451 * 1452 * Returns: 1453 * 1454 * 0 upon success indicating a connect request was delivered to the ULP 1455 * or the mpa request is incomplete but valid so far. 1456 * 1457 * 1 if a failure requires the caller to close the connection. 1458 * 1459 * 2 if a failure requires the caller to abort the connection. 1460 */ 1461 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1462 { 1463 struct mpa_message *mpa; 1464 struct mpa_v2_conn_params *mpa_v2_params; 1465 u16 plen; 1466 u16 resp_ird, resp_ord; 1467 u8 rtr_mismatch = 0, insuff_ird = 0; 1468 struct c4iw_qp_attributes attrs; 1469 enum c4iw_qp_attr_mask mask; 1470 int err; 1471 int disconnect = 0; 1472 1473 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1474 1475 /* 1476 * If we get more than the supported amount of private data 1477 * then we must fail this connection. 1478 */ 1479 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1480 err = -EINVAL; 1481 goto err_stop_timer; 1482 } 1483 1484 /* 1485 * copy the new data into our accumulation buffer. 1486 */ 1487 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1488 skb->len); 1489 ep->mpa_pkt_len += skb->len; 1490 1491 /* 1492 * if we don't even have the mpa message, then bail. 1493 */ 1494 if (ep->mpa_pkt_len < sizeof(*mpa)) 1495 return 0; 1496 mpa = (struct mpa_message *) ep->mpa_pkt; 1497 1498 /* Validate MPA header. */ 1499 if (mpa->revision > mpa_rev) { 1500 pr_err("%s MPA version mismatch. Local = %d, Received = %d\n", 1501 __func__, mpa_rev, mpa->revision); 1502 err = -EPROTO; 1503 goto err_stop_timer; 1504 } 1505 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1506 err = -EPROTO; 1507 goto err_stop_timer; 1508 } 1509 1510 plen = ntohs(mpa->private_data_size); 1511 1512 /* 1513 * Fail if there's too much private data. 1514 */ 1515 if (plen > MPA_MAX_PRIVATE_DATA) { 1516 err = -EPROTO; 1517 goto err_stop_timer; 1518 } 1519 1520 /* 1521 * If plen does not account for pkt size 1522 */ 1523 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1524 err = -EPROTO; 1525 goto err_stop_timer; 1526 } 1527 1528 ep->plen = (u8) plen; 1529 1530 /* 1531 * If we don't have all the pdata yet, then bail. 1532 * We'll continue process when more data arrives. 1533 */ 1534 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1535 return 0; 1536 1537 if (mpa->flags & MPA_REJECT) { 1538 err = -ECONNREFUSED; 1539 goto err_stop_timer; 1540 } 1541 1542 /* 1543 * Stop mpa timer. If it expired, then 1544 * we ignore the MPA reply. process_timeout() 1545 * will abort the connection. 1546 */ 1547 if (stop_ep_timer(ep)) 1548 return 0; 1549 1550 /* 1551 * If we get here we have accumulated the entire mpa 1552 * start reply message including private data. And 1553 * the MPA header is valid. 1554 */ 1555 __state_set(&ep->com, FPDU_MODE); 1556 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1557 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1558 ep->mpa_attr.version = mpa->revision; 1559 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1560 1561 if (mpa->revision == 2) { 1562 ep->mpa_attr.enhanced_rdma_conn = 1563 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1564 if (ep->mpa_attr.enhanced_rdma_conn) { 1565 mpa_v2_params = (struct mpa_v2_conn_params *) 1566 (ep->mpa_pkt + sizeof(*mpa)); 1567 resp_ird = ntohs(mpa_v2_params->ird) & 1568 MPA_V2_IRD_ORD_MASK; 1569 resp_ord = ntohs(mpa_v2_params->ord) & 1570 MPA_V2_IRD_ORD_MASK; 1571 pr_debug("responder ird %u ord %u ep ird %u ord %u\n", 1572 resp_ird, resp_ord, ep->ird, ep->ord); 1573 1574 /* 1575 * This is a double-check. Ideally, below checks are 1576 * not required since ird/ord stuff has been taken 1577 * care of in c4iw_accept_cr 1578 */ 1579 if (ep->ird < resp_ord) { 1580 if (RELAXED_IRD_NEGOTIATION && resp_ord <= 1581 ep->com.dev->rdev.lldi.max_ordird_qp) 1582 ep->ird = resp_ord; 1583 else 1584 insuff_ird = 1; 1585 } else if (ep->ird > resp_ord) { 1586 ep->ird = resp_ord; 1587 } 1588 if (ep->ord > resp_ird) { 1589 if (RELAXED_IRD_NEGOTIATION) 1590 ep->ord = resp_ird; 1591 else 1592 insuff_ird = 1; 1593 } 1594 if (insuff_ird) { 1595 err = -ENOMEM; 1596 ep->ird = resp_ord; 1597 ep->ord = resp_ird; 1598 } 1599 1600 if (ntohs(mpa_v2_params->ird) & 1601 MPA_V2_PEER2PEER_MODEL) { 1602 if (ntohs(mpa_v2_params->ord) & 1603 MPA_V2_RDMA_WRITE_RTR) 1604 ep->mpa_attr.p2p_type = 1605 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1606 else if (ntohs(mpa_v2_params->ord) & 1607 MPA_V2_RDMA_READ_RTR) 1608 ep->mpa_attr.p2p_type = 1609 FW_RI_INIT_P2PTYPE_READ_REQ; 1610 } 1611 } 1612 } else if (mpa->revision == 1) 1613 if (peer2peer) 1614 ep->mpa_attr.p2p_type = p2p_type; 1615 1616 pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n", 1617 ep->mpa_attr.crc_enabled, 1618 ep->mpa_attr.recv_marker_enabled, 1619 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1620 ep->mpa_attr.p2p_type, p2p_type); 1621 1622 /* 1623 * If responder's RTR does not match with that of initiator, assign 1624 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 1625 * generated when moving QP to RTS state. 1626 * A TERM message will be sent after QP has moved to RTS state 1627 */ 1628 if ((ep->mpa_attr.version == 2) && peer2peer && 1629 (ep->mpa_attr.p2p_type != p2p_type)) { 1630 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1631 rtr_mismatch = 1; 1632 } 1633 1634 attrs.mpa_attr = ep->mpa_attr; 1635 attrs.max_ird = ep->ird; 1636 attrs.max_ord = ep->ord; 1637 attrs.llp_stream_handle = ep; 1638 attrs.next_state = C4IW_QP_STATE_RTS; 1639 1640 mask = C4IW_QP_ATTR_NEXT_STATE | 1641 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 1642 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 1643 1644 /* bind QP and TID with INIT_WR */ 1645 err = c4iw_modify_qp(ep->com.qp->rhp, 1646 ep->com.qp, mask, &attrs, 1); 1647 if (err) 1648 goto err; 1649 1650 /* 1651 * If responder's RTR requirement did not match with what initiator 1652 * supports, generate TERM message 1653 */ 1654 if (rtr_mismatch) { 1655 pr_err("%s: RTR mismatch, sending TERM\n", __func__); 1656 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1657 attrs.ecode = MPA_NOMATCH_RTR; 1658 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1659 attrs.send_term = 1; 1660 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1661 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1662 err = -ENOMEM; 1663 disconnect = 1; 1664 goto out; 1665 } 1666 1667 /* 1668 * Generate TERM if initiator IRD is not sufficient for responder 1669 * provided ORD. Currently, we do the same behaviour even when 1670 * responder provided IRD is also not sufficient as regards to 1671 * initiator ORD. 1672 */ 1673 if (insuff_ird) { 1674 pr_err("%s: Insufficient IRD, sending TERM\n", __func__); 1675 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1676 attrs.ecode = MPA_INSUFF_IRD; 1677 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1678 attrs.send_term = 1; 1679 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1680 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1681 err = -ENOMEM; 1682 disconnect = 1; 1683 goto out; 1684 } 1685 goto out; 1686 err_stop_timer: 1687 stop_ep_timer(ep); 1688 err: 1689 disconnect = 2; 1690 out: 1691 connect_reply_upcall(ep, err); 1692 return disconnect; 1693 } 1694 1695 /* 1696 * process_mpa_request - process streaming mode MPA request 1697 * 1698 * Returns: 1699 * 1700 * 0 upon success indicating a connect request was delivered to the ULP 1701 * or the mpa request is incomplete but valid so far. 1702 * 1703 * 1 if a failure requires the caller to close the connection. 1704 * 1705 * 2 if a failure requires the caller to abort the connection. 1706 */ 1707 static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) 1708 { 1709 struct mpa_message *mpa; 1710 struct mpa_v2_conn_params *mpa_v2_params; 1711 u16 plen; 1712 1713 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1714 1715 /* 1716 * If we get more than the supported amount of private data 1717 * then we must fail this connection. 1718 */ 1719 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) 1720 goto err_stop_timer; 1721 1722 pr_debug("enter (%s line %u)\n", __FILE__, __LINE__); 1723 1724 /* 1725 * Copy the new data into our accumulation buffer. 1726 */ 1727 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1728 skb->len); 1729 ep->mpa_pkt_len += skb->len; 1730 1731 /* 1732 * If we don't even have the mpa message, then bail. 1733 * We'll continue process when more data arrives. 1734 */ 1735 if (ep->mpa_pkt_len < sizeof(*mpa)) 1736 return 0; 1737 1738 pr_debug("enter (%s line %u)\n", __FILE__, __LINE__); 1739 mpa = (struct mpa_message *) ep->mpa_pkt; 1740 1741 /* 1742 * Validate MPA Header. 1743 */ 1744 if (mpa->revision > mpa_rev) { 1745 pr_err("%s MPA version mismatch. Local = %d, Received = %d\n", 1746 __func__, mpa_rev, mpa->revision); 1747 goto err_stop_timer; 1748 } 1749 1750 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) 1751 goto err_stop_timer; 1752 1753 plen = ntohs(mpa->private_data_size); 1754 1755 /* 1756 * Fail if there's too much private data. 1757 */ 1758 if (plen > MPA_MAX_PRIVATE_DATA) 1759 goto err_stop_timer; 1760 1761 /* 1762 * If plen does not account for pkt size 1763 */ 1764 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) 1765 goto err_stop_timer; 1766 ep->plen = (u8) plen; 1767 1768 /* 1769 * If we don't have all the pdata yet, then bail. 1770 */ 1771 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1772 return 0; 1773 1774 /* 1775 * If we get here we have accumulated the entire mpa 1776 * start reply message including private data. 1777 */ 1778 ep->mpa_attr.initiator = 0; 1779 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1780 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1781 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1782 ep->mpa_attr.version = mpa->revision; 1783 if (mpa->revision == 1) 1784 ep->tried_with_mpa_v1 = 1; 1785 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1786 1787 if (mpa->revision == 2) { 1788 ep->mpa_attr.enhanced_rdma_conn = 1789 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1790 if (ep->mpa_attr.enhanced_rdma_conn) { 1791 mpa_v2_params = (struct mpa_v2_conn_params *) 1792 (ep->mpa_pkt + sizeof(*mpa)); 1793 ep->ird = ntohs(mpa_v2_params->ird) & 1794 MPA_V2_IRD_ORD_MASK; 1795 ep->ird = min_t(u32, ep->ird, 1796 cur_max_read_depth(ep->com.dev)); 1797 ep->ord = ntohs(mpa_v2_params->ord) & 1798 MPA_V2_IRD_ORD_MASK; 1799 ep->ord = min_t(u32, ep->ord, 1800 cur_max_read_depth(ep->com.dev)); 1801 pr_debug("initiator ird %u ord %u\n", 1802 ep->ird, ep->ord); 1803 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 1804 if (peer2peer) { 1805 if (ntohs(mpa_v2_params->ord) & 1806 MPA_V2_RDMA_WRITE_RTR) 1807 ep->mpa_attr.p2p_type = 1808 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1809 else if (ntohs(mpa_v2_params->ord) & 1810 MPA_V2_RDMA_READ_RTR) 1811 ep->mpa_attr.p2p_type = 1812 FW_RI_INIT_P2PTYPE_READ_REQ; 1813 } 1814 } 1815 } else if (mpa->revision == 1) 1816 if (peer2peer) 1817 ep->mpa_attr.p2p_type = p2p_type; 1818 1819 pr_debug("crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n", 1820 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1821 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1822 ep->mpa_attr.p2p_type); 1823 1824 __state_set(&ep->com, MPA_REQ_RCVD); 1825 1826 /* drive upcall */ 1827 mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING); 1828 if (ep->parent_ep->com.state != DEAD) { 1829 if (connect_request_upcall(ep)) 1830 goto err_unlock_parent; 1831 } else { 1832 goto err_unlock_parent; 1833 } 1834 mutex_unlock(&ep->parent_ep->com.mutex); 1835 return 0; 1836 1837 err_unlock_parent: 1838 mutex_unlock(&ep->parent_ep->com.mutex); 1839 goto err_out; 1840 err_stop_timer: 1841 (void)stop_ep_timer(ep); 1842 err_out: 1843 return 2; 1844 } 1845 1846 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) 1847 { 1848 struct c4iw_ep *ep; 1849 struct cpl_rx_data *hdr = cplhdr(skb); 1850 unsigned int dlen = ntohs(hdr->len); 1851 unsigned int tid = GET_TID(hdr); 1852 __u8 status = hdr->status; 1853 int disconnect = 0; 1854 1855 ep = get_ep_from_tid(dev, tid); 1856 if (!ep) 1857 return 0; 1858 pr_debug("ep %p tid %u dlen %u\n", ep, ep->hwtid, dlen); 1859 skb_pull(skb, sizeof(*hdr)); 1860 skb_trim(skb, dlen); 1861 mutex_lock(&ep->com.mutex); 1862 1863 switch (ep->com.state) { 1864 case MPA_REQ_SENT: 1865 update_rx_credits(ep, dlen); 1866 ep->rcv_seq += dlen; 1867 disconnect = process_mpa_reply(ep, skb); 1868 break; 1869 case MPA_REQ_WAIT: 1870 update_rx_credits(ep, dlen); 1871 ep->rcv_seq += dlen; 1872 disconnect = process_mpa_request(ep, skb); 1873 break; 1874 case FPDU_MODE: { 1875 struct c4iw_qp_attributes attrs; 1876 1877 update_rx_credits(ep, dlen); 1878 if (status) 1879 pr_err("%s Unexpected streaming data." \ 1880 " qpid %u ep %p state %d tid %u status %d\n", 1881 __func__, ep->com.qp->wq.sq.qid, ep, 1882 ep->com.state, ep->hwtid, status); 1883 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1884 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1885 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1886 disconnect = 1; 1887 break; 1888 } 1889 default: 1890 break; 1891 } 1892 mutex_unlock(&ep->com.mutex); 1893 if (disconnect) 1894 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); 1895 c4iw_put_ep(&ep->com); 1896 return 0; 1897 } 1898 1899 static void complete_cached_srq_buffers(struct c4iw_ep *ep, u32 srqidx) 1900 { 1901 enum chip_type adapter_type; 1902 1903 adapter_type = ep->com.dev->rdev.lldi.adapter_type; 1904 1905 /* 1906 * If this TCB had a srq buffer cached, then we must complete 1907 * it. For user mode, that means saving the srqidx in the 1908 * user/kernel status page for this qp. For kernel mode, just 1909 * synthesize the CQE now. 1910 */ 1911 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T5 && srqidx) { 1912 if (ep->com.qp->ibqp.uobject) 1913 t4_set_wq_in_error(&ep->com.qp->wq, srqidx); 1914 else 1915 c4iw_flush_srqidx(ep->com.qp, srqidx); 1916 } 1917 } 1918 1919 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1920 { 1921 u32 srqidx; 1922 struct c4iw_ep *ep; 1923 struct cpl_abort_rpl_rss6 *rpl = cplhdr(skb); 1924 int release = 0; 1925 unsigned int tid = GET_TID(rpl); 1926 1927 ep = get_ep_from_tid(dev, tid); 1928 if (!ep) { 1929 pr_warn("Abort rpl to freed endpoint\n"); 1930 return 0; 1931 } 1932 1933 if (ep->com.qp && ep->com.qp->srq) { 1934 srqidx = ABORT_RSS_SRQIDX_G(be32_to_cpu(rpl->srqidx_status)); 1935 complete_cached_srq_buffers(ep, srqidx ? srqidx : ep->srqe_idx); 1936 } 1937 1938 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 1939 mutex_lock(&ep->com.mutex); 1940 switch (ep->com.state) { 1941 case ABORTING: 1942 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); 1943 __state_set(&ep->com, DEAD); 1944 release = 1; 1945 break; 1946 default: 1947 pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state); 1948 break; 1949 } 1950 mutex_unlock(&ep->com.mutex); 1951 1952 if (release) { 1953 close_complete_upcall(ep, -ECONNRESET); 1954 release_ep_resources(ep); 1955 } 1956 c4iw_put_ep(&ep->com); 1957 return 0; 1958 } 1959 1960 static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) 1961 { 1962 struct sk_buff *skb; 1963 struct fw_ofld_connection_wr *req; 1964 unsigned int mtu_idx; 1965 u32 wscale; 1966 struct sockaddr_in *sin; 1967 int win; 1968 1969 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1970 if (!skb) 1971 return -ENOMEM; 1972 1973 req = __skb_put_zero(skb, sizeof(*req)); 1974 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR)); 1975 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); 1976 req->le.filter = cpu_to_be32(cxgb4_select_ntuple( 1977 ep->com.dev->rdev.lldi.ports[0], 1978 ep->l2t)); 1979 sin = (struct sockaddr_in *)&ep->com.local_addr; 1980 req->le.lport = sin->sin_port; 1981 req->le.u.ipv4.lip = sin->sin_addr.s_addr; 1982 sin = (struct sockaddr_in *)&ep->com.remote_addr; 1983 req->le.pport = sin->sin_port; 1984 req->le.u.ipv4.pip = sin->sin_addr.s_addr; 1985 req->tcb.t_state_to_astid = 1986 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) | 1987 FW_OFLD_CONNECTION_WR_ASTID_V(atid)); 1988 req->tcb.cplrxdataack_cplpassacceptrpl = 1989 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F); 1990 req->tcb.tx_max = (__force __be32) jiffies; 1991 req->tcb.rcv_adv = htons(1); 1992 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 1993 enable_tcp_timestamps, 1994 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); 1995 wscale = cxgb_compute_wscale(rcv_win); 1996 1997 /* 1998 * Specify the largest window that will fit in opt0. The 1999 * remainder will be specified in the rx_data_ack. 2000 */ 2001 win = ep->rcv_win >> 10; 2002 if (win > RCV_BUFSIZ_M) 2003 win = RCV_BUFSIZ_M; 2004 2005 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F | 2006 (nocong ? NO_CONG_F : 0) | 2007 KEEP_ALIVE_F | 2008 DELACK_F | 2009 WND_SCALE_V(wscale) | 2010 MSS_IDX_V(mtu_idx) | 2011 L2T_IDX_V(ep->l2t->idx) | 2012 TX_CHAN_V(ep->tx_chan) | 2013 SMAC_SEL_V(ep->smac_idx) | 2014 DSCP_V(ep->tos >> 2) | 2015 ULP_MODE_V(ULP_MODE_TCPDDP) | 2016 RCV_BUFSIZ_V(win)); 2017 req->tcb.opt2 = (__force __be32) (PACE_V(1) | 2018 TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 2019 RX_CHANNEL_V(0) | 2020 CCTRL_ECN_V(enable_ecn) | 2021 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid)); 2022 if (enable_tcp_timestamps) 2023 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F; 2024 if (enable_tcp_sack) 2025 req->tcb.opt2 |= (__force __be32)SACK_EN_F; 2026 if (wscale && enable_tcp_window_scaling) 2027 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F; 2028 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0); 2029 req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2); 2030 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); 2031 set_bit(ACT_OFLD_CONN, &ep->com.history); 2032 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 2033 } 2034 2035 /* 2036 * Some of the error codes above implicitly indicate that there is no TID 2037 * allocated with the result of an ACT_OPEN. We use this predicate to make 2038 * that explicit. 2039 */ 2040 static inline int act_open_has_tid(int status) 2041 { 2042 return (status != CPL_ERR_TCAM_PARITY && 2043 status != CPL_ERR_TCAM_MISS && 2044 status != CPL_ERR_TCAM_FULL && 2045 status != CPL_ERR_CONN_EXIST_SYNRECV && 2046 status != CPL_ERR_CONN_EXIST); 2047 } 2048 2049 static char *neg_adv_str(unsigned int status) 2050 { 2051 switch (status) { 2052 case CPL_ERR_RTX_NEG_ADVICE: 2053 return "Retransmit timeout"; 2054 case CPL_ERR_PERSIST_NEG_ADVICE: 2055 return "Persist timeout"; 2056 case CPL_ERR_KEEPALV_NEG_ADVICE: 2057 return "Keepalive timeout"; 2058 default: 2059 return "Unknown"; 2060 } 2061 } 2062 2063 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) 2064 { 2065 ep->snd_win = snd_win; 2066 ep->rcv_win = rcv_win; 2067 pr_debug("snd_win %d rcv_win %d\n", 2068 ep->snd_win, ep->rcv_win); 2069 } 2070 2071 #define ACT_OPEN_RETRY_COUNT 2 2072 2073 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, 2074 struct dst_entry *dst, struct c4iw_dev *cdev, 2075 bool clear_mpa_v1, enum chip_type adapter_type, u8 tos) 2076 { 2077 struct neighbour *n; 2078 int err, step; 2079 struct net_device *pdev; 2080 2081 n = dst_neigh_lookup(dst, peer_ip); 2082 if (!n) 2083 return -ENODEV; 2084 2085 rcu_read_lock(); 2086 err = -ENOMEM; 2087 if (n->dev->flags & IFF_LOOPBACK) { 2088 if (iptype == 4) 2089 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip); 2090 else if (IS_ENABLED(CONFIG_IPV6)) 2091 for_each_netdev(&init_net, pdev) { 2092 if (ipv6_chk_addr(&init_net, 2093 (struct in6_addr *)peer_ip, 2094 pdev, 1)) 2095 break; 2096 } 2097 else 2098 pdev = NULL; 2099 2100 if (!pdev) { 2101 err = -ENODEV; 2102 goto out; 2103 } 2104 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 2105 n, pdev, rt_tos2priority(tos)); 2106 if (!ep->l2t) { 2107 dev_put(pdev); 2108 goto out; 2109 } 2110 ep->mtu = pdev->mtu; 2111 ep->tx_chan = cxgb4_port_chan(pdev); 2112 ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx; 2113 step = cdev->rdev.lldi.ntxq / 2114 cdev->rdev.lldi.nchan; 2115 ep->txq_idx = cxgb4_port_idx(pdev) * step; 2116 step = cdev->rdev.lldi.nrxq / 2117 cdev->rdev.lldi.nchan; 2118 ep->ctrlq_idx = cxgb4_port_idx(pdev); 2119 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 2120 cxgb4_port_idx(pdev) * step]; 2121 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); 2122 dev_put(pdev); 2123 } else { 2124 pdev = get_real_dev(n->dev); 2125 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 2126 n, pdev, rt_tos2priority(tos)); 2127 if (!ep->l2t) 2128 goto out; 2129 ep->mtu = dst_mtu(dst); 2130 ep->tx_chan = cxgb4_port_chan(pdev); 2131 ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx; 2132 step = cdev->rdev.lldi.ntxq / 2133 cdev->rdev.lldi.nchan; 2134 ep->txq_idx = cxgb4_port_idx(pdev) * step; 2135 ep->ctrlq_idx = cxgb4_port_idx(pdev); 2136 step = cdev->rdev.lldi.nrxq / 2137 cdev->rdev.lldi.nchan; 2138 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 2139 cxgb4_port_idx(pdev) * step]; 2140 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); 2141 2142 if (clear_mpa_v1) { 2143 ep->retry_with_mpa_v1 = 0; 2144 ep->tried_with_mpa_v1 = 0; 2145 } 2146 } 2147 err = 0; 2148 out: 2149 rcu_read_unlock(); 2150 2151 neigh_release(n); 2152 2153 return err; 2154 } 2155 2156 static int c4iw_reconnect(struct c4iw_ep *ep) 2157 { 2158 int err = 0; 2159 int size = 0; 2160 struct sockaddr_in *laddr = (struct sockaddr_in *) 2161 &ep->com.cm_id->m_local_addr; 2162 struct sockaddr_in *raddr = (struct sockaddr_in *) 2163 &ep->com.cm_id->m_remote_addr; 2164 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *) 2165 &ep->com.cm_id->m_local_addr; 2166 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *) 2167 &ep->com.cm_id->m_remote_addr; 2168 int iptype; 2169 __u8 *ra; 2170 2171 pr_debug("qp %p cm_id %p\n", ep->com.qp, ep->com.cm_id); 2172 c4iw_init_wr_wait(ep->com.wr_waitp); 2173 2174 /* When MPA revision is different on nodes, the node with MPA_rev=2 2175 * tries to reconnect with MPA_rev 1 for the same EP through 2176 * c4iw_reconnect(), where the same EP is assigned with new tid for 2177 * further connection establishment. As we are using the same EP pointer 2178 * for reconnect, few skbs are used during the previous c4iw_connect(), 2179 * which leaves the EP with inadequate skbs for further 2180 * c4iw_reconnect(), Further causing a crash due to an empty 2181 * skb_list() during peer_abort(). Allocate skbs which is already used. 2182 */ 2183 size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list)); 2184 if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) { 2185 err = -ENOMEM; 2186 goto fail1; 2187 } 2188 2189 /* 2190 * Allocate an active TID to initiate a TCP connection. 2191 */ 2192 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); 2193 if (ep->atid == -1) { 2194 pr_err("%s - cannot alloc atid\n", __func__); 2195 err = -ENOMEM; 2196 goto fail2; 2197 } 2198 err = xa_insert_irq(&ep->com.dev->atids, ep->atid, ep, GFP_KERNEL); 2199 if (err) 2200 goto fail2a; 2201 2202 /* find a route */ 2203 if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) { 2204 ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev, 2205 laddr->sin_addr.s_addr, 2206 raddr->sin_addr.s_addr, 2207 laddr->sin_port, 2208 raddr->sin_port, ep->com.cm_id->tos); 2209 iptype = 4; 2210 ra = (__u8 *)&raddr->sin_addr; 2211 } else { 2212 ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi, 2213 get_real_dev, 2214 laddr6->sin6_addr.s6_addr, 2215 raddr6->sin6_addr.s6_addr, 2216 laddr6->sin6_port, 2217 raddr6->sin6_port, 2218 ep->com.cm_id->tos, 2219 raddr6->sin6_scope_id); 2220 iptype = 6; 2221 ra = (__u8 *)&raddr6->sin6_addr; 2222 } 2223 if (!ep->dst) { 2224 pr_err("%s - cannot find route\n", __func__); 2225 err = -EHOSTUNREACH; 2226 goto fail3; 2227 } 2228 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false, 2229 ep->com.dev->rdev.lldi.adapter_type, 2230 ep->com.cm_id->tos); 2231 if (err) { 2232 pr_err("%s - cannot alloc l2e\n", __func__); 2233 goto fail4; 2234 } 2235 2236 pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 2237 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 2238 ep->l2t->idx); 2239 2240 state_set(&ep->com, CONNECTING); 2241 ep->tos = ep->com.cm_id->tos; 2242 2243 /* send connect request to rnic */ 2244 err = send_connect(ep); 2245 if (!err) 2246 goto out; 2247 2248 cxgb4_l2t_release(ep->l2t); 2249 fail4: 2250 dst_release(ep->dst); 2251 fail3: 2252 xa_erase_irq(&ep->com.dev->atids, ep->atid); 2253 fail2a: 2254 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 2255 fail2: 2256 /* 2257 * remember to send notification to upper layer. 2258 * We are in here so the upper layer is not aware that this is 2259 * re-connect attempt and so, upper layer is still waiting for 2260 * response of 1st connect request. 2261 */ 2262 connect_reply_upcall(ep, -ECONNRESET); 2263 fail1: 2264 c4iw_put_ep(&ep->com); 2265 out: 2266 return err; 2267 } 2268 2269 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2270 { 2271 struct c4iw_ep *ep; 2272 struct cpl_act_open_rpl *rpl = cplhdr(skb); 2273 unsigned int atid = TID_TID_G(AOPEN_ATID_G( 2274 ntohl(rpl->atid_status))); 2275 struct tid_info *t = dev->rdev.lldi.tids; 2276 int status = AOPEN_STATUS_G(ntohl(rpl->atid_status)); 2277 struct sockaddr_in *la; 2278 struct sockaddr_in *ra; 2279 struct sockaddr_in6 *la6; 2280 struct sockaddr_in6 *ra6; 2281 int ret = 0; 2282 2283 ep = lookup_atid(t, atid); 2284 if (!ep) 2285 return -EINVAL; 2286 2287 la = (struct sockaddr_in *)&ep->com.local_addr; 2288 ra = (struct sockaddr_in *)&ep->com.remote_addr; 2289 la6 = (struct sockaddr_in6 *)&ep->com.local_addr; 2290 ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr; 2291 2292 pr_debug("ep %p atid %u status %u errno %d\n", ep, atid, 2293 status, status2errno(status)); 2294 2295 if (cxgb_is_neg_adv(status)) { 2296 pr_debug("Connection problems for atid %u status %u (%s)\n", 2297 atid, status, neg_adv_str(status)); 2298 ep->stats.connect_neg_adv++; 2299 mutex_lock(&dev->rdev.stats.lock); 2300 dev->rdev.stats.neg_adv++; 2301 mutex_unlock(&dev->rdev.stats.lock); 2302 return 0; 2303 } 2304 2305 set_bit(ACT_OPEN_RPL, &ep->com.history); 2306 2307 /* 2308 * Log interesting failures. 2309 */ 2310 switch (status) { 2311 case CPL_ERR_CONN_RESET: 2312 case CPL_ERR_CONN_TIMEDOUT: 2313 break; 2314 case CPL_ERR_TCAM_FULL: 2315 mutex_lock(&dev->rdev.stats.lock); 2316 dev->rdev.stats.tcam_full++; 2317 mutex_unlock(&dev->rdev.stats.lock); 2318 if (ep->com.local_addr.ss_family == AF_INET && 2319 dev->rdev.lldi.enable_fw_ofld_conn) { 2320 ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G( 2321 ntohl(rpl->atid_status)))); 2322 if (ret) 2323 goto fail; 2324 return 0; 2325 } 2326 break; 2327 case CPL_ERR_CONN_EXIST: 2328 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 2329 set_bit(ACT_RETRY_INUSE, &ep->com.history); 2330 if (ep->com.remote_addr.ss_family == AF_INET6) { 2331 struct sockaddr_in6 *sin6 = 2332 (struct sockaddr_in6 *) 2333 &ep->com.local_addr; 2334 cxgb4_clip_release( 2335 ep->com.dev->rdev.lldi.ports[0], 2336 (const u32 *) 2337 &sin6->sin6_addr.s6_addr, 1); 2338 } 2339 xa_erase_irq(&ep->com.dev->atids, atid); 2340 cxgb4_free_atid(t, atid); 2341 dst_release(ep->dst); 2342 cxgb4_l2t_release(ep->l2t); 2343 c4iw_reconnect(ep); 2344 return 0; 2345 } 2346 break; 2347 default: 2348 if (ep->com.local_addr.ss_family == AF_INET) { 2349 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n", 2350 atid, status, status2errno(status), 2351 &la->sin_addr.s_addr, ntohs(la->sin_port), 2352 &ra->sin_addr.s_addr, ntohs(ra->sin_port)); 2353 } else { 2354 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n", 2355 atid, status, status2errno(status), 2356 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port), 2357 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port)); 2358 } 2359 break; 2360 } 2361 2362 fail: 2363 connect_reply_upcall(ep, status2errno(status)); 2364 state_set(&ep->com, DEAD); 2365 2366 if (ep->com.remote_addr.ss_family == AF_INET6) { 2367 struct sockaddr_in6 *sin6 = 2368 (struct sockaddr_in6 *)&ep->com.local_addr; 2369 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 2370 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 2371 } 2372 if (status && act_open_has_tid(status)) 2373 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl), 2374 ep->com.local_addr.ss_family); 2375 2376 xa_erase_irq(&ep->com.dev->atids, atid); 2377 cxgb4_free_atid(t, atid); 2378 dst_release(ep->dst); 2379 cxgb4_l2t_release(ep->l2t); 2380 c4iw_put_ep(&ep->com); 2381 2382 return 0; 2383 } 2384 2385 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2386 { 2387 struct cpl_pass_open_rpl *rpl = cplhdr(skb); 2388 unsigned int stid = GET_TID(rpl); 2389 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); 2390 2391 if (!ep) { 2392 pr_warn("%s stid %d lookup failure!\n", __func__, stid); 2393 goto out; 2394 } 2395 pr_debug("ep %p status %d error %d\n", ep, 2396 rpl->status, status2errno(rpl->status)); 2397 c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status)); 2398 c4iw_put_ep(&ep->com); 2399 out: 2400 return 0; 2401 } 2402 2403 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2404 { 2405 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); 2406 unsigned int stid = GET_TID(rpl); 2407 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); 2408 2409 if (!ep) { 2410 pr_warn("%s stid %d lookup failure!\n", __func__, stid); 2411 goto out; 2412 } 2413 pr_debug("ep %p\n", ep); 2414 c4iw_wake_up_noref(ep->com.wr_waitp, status2errno(rpl->status)); 2415 c4iw_put_ep(&ep->com); 2416 out: 2417 return 0; 2418 } 2419 2420 static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, 2421 struct cpl_pass_accept_req *req) 2422 { 2423 struct cpl_pass_accept_rpl *rpl; 2424 unsigned int mtu_idx; 2425 u64 opt0; 2426 u32 opt2; 2427 u32 wscale; 2428 struct cpl_t5_pass_accept_rpl *rpl5 = NULL; 2429 int win; 2430 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; 2431 2432 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 2433 cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 2434 enable_tcp_timestamps && req->tcpopt.tstamp, 2435 (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); 2436 wscale = cxgb_compute_wscale(rcv_win); 2437 2438 /* 2439 * Specify the largest window that will fit in opt0. The 2440 * remainder will be specified in the rx_data_ack. 2441 */ 2442 win = ep->rcv_win >> 10; 2443 if (win > RCV_BUFSIZ_M) 2444 win = RCV_BUFSIZ_M; 2445 opt0 = (nocong ? NO_CONG_F : 0) | 2446 KEEP_ALIVE_F | 2447 DELACK_F | 2448 WND_SCALE_V(wscale) | 2449 MSS_IDX_V(mtu_idx) | 2450 L2T_IDX_V(ep->l2t->idx) | 2451 TX_CHAN_V(ep->tx_chan) | 2452 SMAC_SEL_V(ep->smac_idx) | 2453 DSCP_V(ep->tos >> 2) | 2454 ULP_MODE_V(ULP_MODE_TCPDDP) | 2455 RCV_BUFSIZ_V(win); 2456 opt2 = RX_CHANNEL_V(0) | 2457 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); 2458 2459 if (enable_tcp_timestamps && req->tcpopt.tstamp) 2460 opt2 |= TSTAMPS_EN_F; 2461 if (enable_tcp_sack && req->tcpopt.sack) 2462 opt2 |= SACK_EN_F; 2463 if (wscale && enable_tcp_window_scaling) 2464 opt2 |= WND_SCALE_EN_F; 2465 if (enable_ecn) { 2466 const struct tcphdr *tcph; 2467 u32 hlen = ntohl(req->hdr_len); 2468 2469 if (CHELSIO_CHIP_VERSION(adapter_type) <= CHELSIO_T5) 2470 tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) + 2471 IP_HDR_LEN_G(hlen); 2472 else 2473 tcph = (const void *)(req + 1) + 2474 T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen); 2475 if (tcph->ece && tcph->cwr) 2476 opt2 |= CCTRL_ECN_V(1); 2477 } 2478 2479 if (!is_t4(adapter_type)) { 2480 u32 isn = (get_random_u32() & ~7UL) - 1; 2481 2482 skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL); 2483 rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16)); 2484 rpl = (void *)rpl5; 2485 INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid); 2486 opt2 |= T5_OPT_2_VALID_F; 2487 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); 2488 opt2 |= T5_ISS_F; 2489 if (peer2peer) 2490 isn += 4; 2491 rpl5->iss = cpu_to_be32(isn); 2492 pr_debug("iss %u\n", be32_to_cpu(rpl5->iss)); 2493 } else { 2494 skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 2495 rpl = __skb_put_zero(skb, sizeof(*rpl)); 2496 INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid); 2497 } 2498 2499 rpl->opt0 = cpu_to_be64(opt0); 2500 rpl->opt2 = cpu_to_be32(opt2); 2501 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 2502 t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure); 2503 2504 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 2505 } 2506 2507 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb) 2508 { 2509 pr_debug("c4iw_dev %p tid %u\n", dev, hwtid); 2510 skb_trim(skb, sizeof(struct cpl_tid_release)); 2511 release_tid(&dev->rdev, hwtid, skb); 2512 return; 2513 } 2514 2515 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 2516 { 2517 struct c4iw_ep *child_ep = NULL, *parent_ep; 2518 struct cpl_pass_accept_req *req = cplhdr(skb); 2519 unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); 2520 struct tid_info *t = dev->rdev.lldi.tids; 2521 unsigned int hwtid = GET_TID(req); 2522 struct dst_entry *dst; 2523 __u8 local_ip[16], peer_ip[16]; 2524 __be16 local_port, peer_port; 2525 struct sockaddr_in6 *sin6; 2526 int err; 2527 u16 peer_mss = ntohs(req->tcpopt.mss); 2528 int iptype; 2529 unsigned short hdrs; 2530 u8 tos; 2531 2532 parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid); 2533 if (!parent_ep) { 2534 pr_err("%s connect request on invalid stid %d\n", 2535 __func__, stid); 2536 goto reject; 2537 } 2538 2539 if (state_read(&parent_ep->com) != LISTEN) { 2540 pr_err("%s - listening ep not in LISTEN\n", __func__); 2541 goto reject; 2542 } 2543 2544 if (parent_ep->com.cm_id->tos_set) 2545 tos = parent_ep->com.cm_id->tos; 2546 else 2547 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); 2548 2549 cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type, 2550 &iptype, local_ip, peer_ip, &local_port, &peer_port); 2551 2552 /* Find output route */ 2553 if (iptype == 4) { 2554 pr_debug("parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n" 2555 , parent_ep, hwtid, 2556 local_ip, peer_ip, ntohs(local_port), 2557 ntohs(peer_port), peer_mss); 2558 dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev, 2559 *(__be32 *)local_ip, *(__be32 *)peer_ip, 2560 local_port, peer_port, tos); 2561 } else { 2562 pr_debug("parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" 2563 , parent_ep, hwtid, 2564 local_ip, peer_ip, ntohs(local_port), 2565 ntohs(peer_port), peer_mss); 2566 dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev, 2567 local_ip, peer_ip, local_port, peer_port, 2568 tos, 2569 ((struct sockaddr_in6 *) 2570 &parent_ep->com.local_addr)->sin6_scope_id); 2571 } 2572 if (!dst) { 2573 pr_err("%s - failed to find dst entry!\n", __func__); 2574 goto reject; 2575 } 2576 2577 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 2578 if (!child_ep) { 2579 pr_err("%s - failed to allocate ep entry!\n", __func__); 2580 dst_release(dst); 2581 goto reject; 2582 } 2583 2584 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false, 2585 parent_ep->com.dev->rdev.lldi.adapter_type, tos); 2586 if (err) { 2587 pr_err("%s - failed to allocate l2t entry!\n", __func__); 2588 dst_release(dst); 2589 kfree(child_ep); 2590 goto reject; 2591 } 2592 2593 hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + 2594 sizeof(struct tcphdr) + 2595 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); 2596 if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) 2597 child_ep->mtu = peer_mss + hdrs; 2598 2599 skb_queue_head_init(&child_ep->com.ep_skb_list); 2600 if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF)) 2601 goto fail; 2602 2603 state_set(&child_ep->com, CONNECTING); 2604 child_ep->com.dev = dev; 2605 child_ep->com.cm_id = NULL; 2606 2607 if (iptype == 4) { 2608 struct sockaddr_in *sin = (struct sockaddr_in *) 2609 &child_ep->com.local_addr; 2610 2611 sin->sin_family = AF_INET; 2612 sin->sin_port = local_port; 2613 sin->sin_addr.s_addr = *(__be32 *)local_ip; 2614 2615 sin = (struct sockaddr_in *)&child_ep->com.local_addr; 2616 sin->sin_family = AF_INET; 2617 sin->sin_port = ((struct sockaddr_in *) 2618 &parent_ep->com.local_addr)->sin_port; 2619 sin->sin_addr.s_addr = *(__be32 *)local_ip; 2620 2621 sin = (struct sockaddr_in *)&child_ep->com.remote_addr; 2622 sin->sin_family = AF_INET; 2623 sin->sin_port = peer_port; 2624 sin->sin_addr.s_addr = *(__be32 *)peer_ip; 2625 } else { 2626 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; 2627 sin6->sin6_family = PF_INET6; 2628 sin6->sin6_port = local_port; 2629 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); 2630 2631 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; 2632 sin6->sin6_family = PF_INET6; 2633 sin6->sin6_port = ((struct sockaddr_in6 *) 2634 &parent_ep->com.local_addr)->sin6_port; 2635 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); 2636 2637 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr; 2638 sin6->sin6_family = PF_INET6; 2639 sin6->sin6_port = peer_port; 2640 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); 2641 } 2642 2643 c4iw_get_ep(&parent_ep->com); 2644 child_ep->parent_ep = parent_ep; 2645 child_ep->tos = tos; 2646 child_ep->dst = dst; 2647 child_ep->hwtid = hwtid; 2648 2649 pr_debug("tx_chan %u smac_idx %u rss_qid %u\n", 2650 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); 2651 2652 timer_setup(&child_ep->timer, ep_timeout, 0); 2653 cxgb4_insert_tid(t, child_ep, hwtid, 2654 child_ep->com.local_addr.ss_family); 2655 insert_ep_tid(child_ep); 2656 if (accept_cr(child_ep, skb, req)) { 2657 c4iw_put_ep(&parent_ep->com); 2658 release_ep_resources(child_ep); 2659 } else { 2660 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); 2661 } 2662 if (iptype == 6) { 2663 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; 2664 cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0], 2665 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 2666 } 2667 goto out; 2668 fail: 2669 c4iw_put_ep(&child_ep->com); 2670 reject: 2671 reject_cr(dev, hwtid, skb); 2672 out: 2673 if (parent_ep) 2674 c4iw_put_ep(&parent_ep->com); 2675 return 0; 2676 } 2677 2678 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) 2679 { 2680 struct c4iw_ep *ep; 2681 struct cpl_pass_establish *req = cplhdr(skb); 2682 unsigned int tid = GET_TID(req); 2683 int ret; 2684 u16 tcp_opt = ntohs(req->tcp_opt); 2685 2686 ep = get_ep_from_tid(dev, tid); 2687 if (!ep) 2688 return 0; 2689 2690 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 2691 ep->snd_seq = be32_to_cpu(req->snd_isn); 2692 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 2693 ep->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt); 2694 2695 pr_debug("ep %p hwtid %u tcp_opt 0x%02x\n", ep, tid, tcp_opt); 2696 2697 set_emss(ep, tcp_opt); 2698 2699 dst_confirm(ep->dst); 2700 mutex_lock(&ep->com.mutex); 2701 ep->com.state = MPA_REQ_WAIT; 2702 start_ep_timer(ep); 2703 set_bit(PASS_ESTAB, &ep->com.history); 2704 ret = send_flowc(ep); 2705 mutex_unlock(&ep->com.mutex); 2706 if (ret) 2707 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 2708 c4iw_put_ep(&ep->com); 2709 2710 return 0; 2711 } 2712 2713 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) 2714 { 2715 struct cpl_peer_close *hdr = cplhdr(skb); 2716 struct c4iw_ep *ep; 2717 struct c4iw_qp_attributes attrs; 2718 int disconnect = 1; 2719 int release = 0; 2720 unsigned int tid = GET_TID(hdr); 2721 int ret; 2722 2723 ep = get_ep_from_tid(dev, tid); 2724 if (!ep) 2725 return 0; 2726 2727 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 2728 dst_confirm(ep->dst); 2729 2730 set_bit(PEER_CLOSE, &ep->com.history); 2731 mutex_lock(&ep->com.mutex); 2732 switch (ep->com.state) { 2733 case MPA_REQ_WAIT: 2734 __state_set(&ep->com, CLOSING); 2735 break; 2736 case MPA_REQ_SENT: 2737 __state_set(&ep->com, CLOSING); 2738 connect_reply_upcall(ep, -ECONNRESET); 2739 break; 2740 case MPA_REQ_RCVD: 2741 2742 /* 2743 * We're gonna mark this puppy DEAD, but keep 2744 * the reference on it until the ULP accepts or 2745 * rejects the CR. Also wake up anyone waiting 2746 * in rdma connection migration (see c4iw_accept_cr()). 2747 */ 2748 __state_set(&ep->com, CLOSING); 2749 pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid); 2750 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); 2751 break; 2752 case MPA_REP_SENT: 2753 __state_set(&ep->com, CLOSING); 2754 pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid); 2755 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); 2756 break; 2757 case FPDU_MODE: 2758 start_ep_timer(ep); 2759 __state_set(&ep->com, CLOSING); 2760 attrs.next_state = C4IW_QP_STATE_CLOSING; 2761 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2762 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2763 if (ret != -ECONNRESET) { 2764 peer_close_upcall(ep); 2765 disconnect = 1; 2766 } 2767 break; 2768 case ABORTING: 2769 disconnect = 0; 2770 break; 2771 case CLOSING: 2772 __state_set(&ep->com, MORIBUND); 2773 disconnect = 0; 2774 break; 2775 case MORIBUND: 2776 (void)stop_ep_timer(ep); 2777 if (ep->com.cm_id && ep->com.qp) { 2778 attrs.next_state = C4IW_QP_STATE_IDLE; 2779 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2780 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2781 } 2782 close_complete_upcall(ep, 0); 2783 __state_set(&ep->com, DEAD); 2784 release = 1; 2785 disconnect = 0; 2786 break; 2787 case DEAD: 2788 disconnect = 0; 2789 break; 2790 default: 2791 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); 2792 } 2793 mutex_unlock(&ep->com.mutex); 2794 if (disconnect) 2795 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2796 if (release) 2797 release_ep_resources(ep); 2798 c4iw_put_ep(&ep->com); 2799 return 0; 2800 } 2801 2802 static void finish_peer_abort(struct c4iw_dev *dev, struct c4iw_ep *ep) 2803 { 2804 complete_cached_srq_buffers(ep, ep->srqe_idx); 2805 if (ep->com.cm_id && ep->com.qp) { 2806 struct c4iw_qp_attributes attrs; 2807 2808 attrs.next_state = C4IW_QP_STATE_ERROR; 2809 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2810 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2811 } 2812 peer_abort_upcall(ep); 2813 release_ep_resources(ep); 2814 c4iw_put_ep(&ep->com); 2815 } 2816 2817 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 2818 { 2819 struct cpl_abort_req_rss6 *req = cplhdr(skb); 2820 struct c4iw_ep *ep; 2821 struct sk_buff *rpl_skb; 2822 struct c4iw_qp_attributes attrs; 2823 int ret; 2824 int release = 0; 2825 unsigned int tid = GET_TID(req); 2826 u8 status; 2827 u32 srqidx; 2828 2829 u32 len = roundup(sizeof(struct cpl_abort_rpl), 16); 2830 2831 ep = get_ep_from_tid(dev, tid); 2832 if (!ep) 2833 return 0; 2834 2835 status = ABORT_RSS_STATUS_G(be32_to_cpu(req->srqidx_status)); 2836 2837 if (cxgb_is_neg_adv(status)) { 2838 pr_debug("Negative advice on abort- tid %u status %d (%s)\n", 2839 ep->hwtid, status, neg_adv_str(status)); 2840 ep->stats.abort_neg_adv++; 2841 mutex_lock(&dev->rdev.stats.lock); 2842 dev->rdev.stats.neg_adv++; 2843 mutex_unlock(&dev->rdev.stats.lock); 2844 goto deref_ep; 2845 } 2846 2847 pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, 2848 ep->com.state); 2849 set_bit(PEER_ABORT, &ep->com.history); 2850 2851 /* 2852 * Wake up any threads in rdma_init() or rdma_fini(). 2853 * However, this is not needed if com state is just 2854 * MPA_REQ_SENT 2855 */ 2856 if (ep->com.state != MPA_REQ_SENT) 2857 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); 2858 2859 mutex_lock(&ep->com.mutex); 2860 switch (ep->com.state) { 2861 case CONNECTING: 2862 c4iw_put_ep(&ep->parent_ep->com); 2863 break; 2864 case MPA_REQ_WAIT: 2865 (void)stop_ep_timer(ep); 2866 break; 2867 case MPA_REQ_SENT: 2868 (void)stop_ep_timer(ep); 2869 if (status != CPL_ERR_CONN_RESET || mpa_rev == 1 || 2870 (mpa_rev == 2 && ep->tried_with_mpa_v1)) 2871 connect_reply_upcall(ep, -ECONNRESET); 2872 else { 2873 /* 2874 * we just don't send notification upwards because we 2875 * want to retry with mpa_v1 without upper layers even 2876 * knowing it. 2877 * 2878 * do some housekeeping so as to re-initiate the 2879 * connection 2880 */ 2881 pr_info("%s: mpa_rev=%d. Retrying with mpav1\n", 2882 __func__, mpa_rev); 2883 ep->retry_with_mpa_v1 = 1; 2884 } 2885 break; 2886 case MPA_REP_SENT: 2887 break; 2888 case MPA_REQ_RCVD: 2889 break; 2890 case MORIBUND: 2891 case CLOSING: 2892 stop_ep_timer(ep); 2893 fallthrough; 2894 case FPDU_MODE: 2895 if (ep->com.qp && ep->com.qp->srq) { 2896 srqidx = ABORT_RSS_SRQIDX_G( 2897 be32_to_cpu(req->srqidx_status)); 2898 if (srqidx) { 2899 complete_cached_srq_buffers(ep, srqidx); 2900 } else { 2901 /* Hold ep ref until finish_peer_abort() */ 2902 c4iw_get_ep(&ep->com); 2903 __state_set(&ep->com, ABORTING); 2904 set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags); 2905 read_tcb(ep); 2906 break; 2907 2908 } 2909 } 2910 2911 if (ep->com.cm_id && ep->com.qp) { 2912 attrs.next_state = C4IW_QP_STATE_ERROR; 2913 ret = c4iw_modify_qp(ep->com.qp->rhp, 2914 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 2915 &attrs, 1); 2916 if (ret) 2917 pr_err("%s - qp <- error failed!\n", __func__); 2918 } 2919 peer_abort_upcall(ep); 2920 break; 2921 case ABORTING: 2922 break; 2923 case DEAD: 2924 pr_warn("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); 2925 mutex_unlock(&ep->com.mutex); 2926 goto deref_ep; 2927 default: 2928 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); 2929 break; 2930 } 2931 dst_confirm(ep->dst); 2932 if (ep->com.state != ABORTING) { 2933 __state_set(&ep->com, DEAD); 2934 /* we don't release if we want to retry with mpa_v1 */ 2935 if (!ep->retry_with_mpa_v1) 2936 release = 1; 2937 } 2938 mutex_unlock(&ep->com.mutex); 2939 2940 rpl_skb = skb_dequeue(&ep->com.ep_skb_list); 2941 if (WARN_ON(!rpl_skb)) { 2942 release = 1; 2943 goto out; 2944 } 2945 2946 cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx); 2947 2948 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); 2949 out: 2950 if (release) 2951 release_ep_resources(ep); 2952 else if (ep->retry_with_mpa_v1) { 2953 if (ep->com.remote_addr.ss_family == AF_INET6) { 2954 struct sockaddr_in6 *sin6 = 2955 (struct sockaddr_in6 *) 2956 &ep->com.local_addr; 2957 cxgb4_clip_release( 2958 ep->com.dev->rdev.lldi.ports[0], 2959 (const u32 *)&sin6->sin6_addr.s6_addr, 2960 1); 2961 } 2962 xa_erase_irq(&ep->com.dev->hwtids, ep->hwtid); 2963 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid, 2964 ep->com.local_addr.ss_family); 2965 dst_release(ep->dst); 2966 cxgb4_l2t_release(ep->l2t); 2967 c4iw_reconnect(ep); 2968 } 2969 2970 deref_ep: 2971 c4iw_put_ep(&ep->com); 2972 /* Dereferencing ep, referenced in peer_abort_intr() */ 2973 c4iw_put_ep(&ep->com); 2974 return 0; 2975 } 2976 2977 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2978 { 2979 struct c4iw_ep *ep; 2980 struct c4iw_qp_attributes attrs; 2981 struct cpl_close_con_rpl *rpl = cplhdr(skb); 2982 int release = 0; 2983 unsigned int tid = GET_TID(rpl); 2984 2985 ep = get_ep_from_tid(dev, tid); 2986 if (!ep) 2987 return 0; 2988 2989 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 2990 2991 /* The cm_id may be null if we failed to connect */ 2992 mutex_lock(&ep->com.mutex); 2993 set_bit(CLOSE_CON_RPL, &ep->com.history); 2994 switch (ep->com.state) { 2995 case CLOSING: 2996 __state_set(&ep->com, MORIBUND); 2997 break; 2998 case MORIBUND: 2999 (void)stop_ep_timer(ep); 3000 if ((ep->com.cm_id) && (ep->com.qp)) { 3001 attrs.next_state = C4IW_QP_STATE_IDLE; 3002 c4iw_modify_qp(ep->com.qp->rhp, 3003 ep->com.qp, 3004 C4IW_QP_ATTR_NEXT_STATE, 3005 &attrs, 1); 3006 } 3007 close_complete_upcall(ep, 0); 3008 __state_set(&ep->com, DEAD); 3009 release = 1; 3010 break; 3011 case ABORTING: 3012 case DEAD: 3013 break; 3014 default: 3015 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); 3016 break; 3017 } 3018 mutex_unlock(&ep->com.mutex); 3019 if (release) 3020 release_ep_resources(ep); 3021 c4iw_put_ep(&ep->com); 3022 return 0; 3023 } 3024 3025 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) 3026 { 3027 struct cpl_rdma_terminate *rpl = cplhdr(skb); 3028 unsigned int tid = GET_TID(rpl); 3029 struct c4iw_ep *ep; 3030 struct c4iw_qp_attributes attrs; 3031 3032 ep = get_ep_from_tid(dev, tid); 3033 3034 if (ep) { 3035 if (ep->com.qp) { 3036 pr_warn("TERM received tid %u qpid %u\n", tid, 3037 ep->com.qp->wq.sq.qid); 3038 attrs.next_state = C4IW_QP_STATE_TERMINATE; 3039 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 3040 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 3041 } 3042 3043 /* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3, 3044 * when entering the TERM state the RNIC MUST initiate a CLOSE. 3045 */ 3046 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 3047 c4iw_put_ep(&ep->com); 3048 } else 3049 pr_warn("TERM received tid %u no ep/qp\n", tid); 3050 3051 return 0; 3052 } 3053 3054 /* 3055 * Upcall from the adapter indicating data has been transmitted. 3056 * For us its just the single MPA request or reply. We can now free 3057 * the skb holding the mpa message. 3058 */ 3059 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) 3060 { 3061 struct c4iw_ep *ep; 3062 struct cpl_fw4_ack *hdr = cplhdr(skb); 3063 u8 credits = hdr->credits; 3064 unsigned int tid = GET_TID(hdr); 3065 3066 3067 ep = get_ep_from_tid(dev, tid); 3068 if (!ep) 3069 return 0; 3070 pr_debug("ep %p tid %u credits %u\n", 3071 ep, ep->hwtid, credits); 3072 if (credits == 0) { 3073 pr_debug("0 credit ack ep %p tid %u state %u\n", 3074 ep, ep->hwtid, state_read(&ep->com)); 3075 goto out; 3076 } 3077 3078 dst_confirm(ep->dst); 3079 if (ep->mpa_skb) { 3080 pr_debug("last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n", 3081 ep, ep->hwtid, state_read(&ep->com), 3082 ep->mpa_attr.initiator ? 1 : 0); 3083 mutex_lock(&ep->com.mutex); 3084 kfree_skb(ep->mpa_skb); 3085 ep->mpa_skb = NULL; 3086 if (test_bit(STOP_MPA_TIMER, &ep->com.flags)) 3087 stop_ep_timer(ep); 3088 mutex_unlock(&ep->com.mutex); 3089 } 3090 out: 3091 c4iw_put_ep(&ep->com); 3092 return 0; 3093 } 3094 3095 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 3096 { 3097 int abort; 3098 struct c4iw_ep *ep = to_ep(cm_id); 3099 3100 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 3101 3102 mutex_lock(&ep->com.mutex); 3103 if (ep->com.state != MPA_REQ_RCVD) { 3104 mutex_unlock(&ep->com.mutex); 3105 c4iw_put_ep(&ep->com); 3106 return -ECONNRESET; 3107 } 3108 set_bit(ULP_REJECT, &ep->com.history); 3109 if (mpa_rev == 0) 3110 abort = 1; 3111 else 3112 abort = send_mpa_reject(ep, pdata, pdata_len); 3113 mutex_unlock(&ep->com.mutex); 3114 3115 stop_ep_timer(ep); 3116 c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL); 3117 c4iw_put_ep(&ep->com); 3118 return 0; 3119 } 3120 3121 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 3122 { 3123 int err; 3124 struct c4iw_qp_attributes attrs; 3125 enum c4iw_qp_attr_mask mask; 3126 struct c4iw_ep *ep = to_ep(cm_id); 3127 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 3128 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 3129 int abort = 0; 3130 3131 pr_debug("ep %p tid %u\n", ep, ep->hwtid); 3132 3133 mutex_lock(&ep->com.mutex); 3134 if (ep->com.state != MPA_REQ_RCVD) { 3135 err = -ECONNRESET; 3136 goto err_out; 3137 } 3138 3139 if (!qp) { 3140 err = -EINVAL; 3141 goto err_out; 3142 } 3143 3144 set_bit(ULP_ACCEPT, &ep->com.history); 3145 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) || 3146 (conn_param->ird > cur_max_read_depth(ep->com.dev))) { 3147 err = -EINVAL; 3148 goto err_abort; 3149 } 3150 3151 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 3152 if (conn_param->ord > ep->ird) { 3153 if (RELAXED_IRD_NEGOTIATION) { 3154 conn_param->ord = ep->ird; 3155 } else { 3156 ep->ird = conn_param->ird; 3157 ep->ord = conn_param->ord; 3158 send_mpa_reject(ep, conn_param->private_data, 3159 conn_param->private_data_len); 3160 err = -ENOMEM; 3161 goto err_abort; 3162 } 3163 } 3164 if (conn_param->ird < ep->ord) { 3165 if (RELAXED_IRD_NEGOTIATION && 3166 ep->ord <= h->rdev.lldi.max_ordird_qp) { 3167 conn_param->ird = ep->ord; 3168 } else { 3169 err = -ENOMEM; 3170 goto err_abort; 3171 } 3172 } 3173 } 3174 ep->ird = conn_param->ird; 3175 ep->ord = conn_param->ord; 3176 3177 if (ep->mpa_attr.version == 1) { 3178 if (peer2peer && ep->ird == 0) 3179 ep->ird = 1; 3180 } else { 3181 if (peer2peer && 3182 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) && 3183 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0) 3184 ep->ird = 1; 3185 } 3186 3187 pr_debug("ird %d ord %d\n", ep->ird, ep->ord); 3188 3189 ep->com.cm_id = cm_id; 3190 ref_cm_id(&ep->com); 3191 ep->com.qp = qp; 3192 ref_qp(ep); 3193 3194 /* bind QP to EP and move to RTS */ 3195 attrs.mpa_attr = ep->mpa_attr; 3196 attrs.max_ird = ep->ird; 3197 attrs.max_ord = ep->ord; 3198 attrs.llp_stream_handle = ep; 3199 attrs.next_state = C4IW_QP_STATE_RTS; 3200 3201 /* bind QP and TID with INIT_WR */ 3202 mask = C4IW_QP_ATTR_NEXT_STATE | 3203 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 3204 C4IW_QP_ATTR_MPA_ATTR | 3205 C4IW_QP_ATTR_MAX_IRD | 3206 C4IW_QP_ATTR_MAX_ORD; 3207 3208 err = c4iw_modify_qp(ep->com.qp->rhp, 3209 ep->com.qp, mask, &attrs, 1); 3210 if (err) 3211 goto err_deref_cm_id; 3212 3213 set_bit(STOP_MPA_TIMER, &ep->com.flags); 3214 err = send_mpa_reply(ep, conn_param->private_data, 3215 conn_param->private_data_len); 3216 if (err) 3217 goto err_deref_cm_id; 3218 3219 __state_set(&ep->com, FPDU_MODE); 3220 established_upcall(ep); 3221 mutex_unlock(&ep->com.mutex); 3222 c4iw_put_ep(&ep->com); 3223 return 0; 3224 err_deref_cm_id: 3225 deref_cm_id(&ep->com); 3226 err_abort: 3227 abort = 1; 3228 err_out: 3229 mutex_unlock(&ep->com.mutex); 3230 if (abort) 3231 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 3232 c4iw_put_ep(&ep->com); 3233 return err; 3234 } 3235 3236 static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) 3237 { 3238 struct in_device *ind; 3239 int found = 0; 3240 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr; 3241 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; 3242 const struct in_ifaddr *ifa; 3243 3244 ind = in_dev_get(dev->rdev.lldi.ports[0]); 3245 if (!ind) 3246 return -EADDRNOTAVAIL; 3247 rcu_read_lock(); 3248 in_dev_for_each_ifa_rcu(ifa, ind) { 3249 if (ifa->ifa_flags & IFA_F_SECONDARY) 3250 continue; 3251 laddr->sin_addr.s_addr = ifa->ifa_address; 3252 raddr->sin_addr.s_addr = ifa->ifa_address; 3253 found = 1; 3254 break; 3255 } 3256 rcu_read_unlock(); 3257 3258 in_dev_put(ind); 3259 return found ? 0 : -EADDRNOTAVAIL; 3260 } 3261 3262 static int get_lladdr(struct net_device *dev, struct in6_addr *addr, 3263 unsigned char banned_flags) 3264 { 3265 struct inet6_dev *idev; 3266 int err = -EADDRNOTAVAIL; 3267 3268 rcu_read_lock(); 3269 idev = __in6_dev_get(dev); 3270 if (idev != NULL) { 3271 struct inet6_ifaddr *ifp; 3272 3273 read_lock_bh(&idev->lock); 3274 list_for_each_entry(ifp, &idev->addr_list, if_list) { 3275 if (ifp->scope == IFA_LINK && 3276 !(ifp->flags & banned_flags)) { 3277 memcpy(addr, &ifp->addr, 16); 3278 err = 0; 3279 break; 3280 } 3281 } 3282 read_unlock_bh(&idev->lock); 3283 } 3284 rcu_read_unlock(); 3285 return err; 3286 } 3287 3288 static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) 3289 { 3290 struct in6_addr addr; 3291 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; 3292 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; 3293 3294 if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) { 3295 memcpy(la6->sin6_addr.s6_addr, &addr, 16); 3296 memcpy(ra6->sin6_addr.s6_addr, &addr, 16); 3297 return 0; 3298 } 3299 return -EADDRNOTAVAIL; 3300 } 3301 3302 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 3303 { 3304 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 3305 struct c4iw_ep *ep; 3306 int err = 0; 3307 struct sockaddr_in *laddr; 3308 struct sockaddr_in *raddr; 3309 struct sockaddr_in6 *laddr6; 3310 struct sockaddr_in6 *raddr6; 3311 __u8 *ra; 3312 int iptype; 3313 3314 if ((conn_param->ord > cur_max_read_depth(dev)) || 3315 (conn_param->ird > cur_max_read_depth(dev))) { 3316 err = -EINVAL; 3317 goto out; 3318 } 3319 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 3320 if (!ep) { 3321 pr_err("%s - cannot alloc ep\n", __func__); 3322 err = -ENOMEM; 3323 goto out; 3324 } 3325 3326 skb_queue_head_init(&ep->com.ep_skb_list); 3327 if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) { 3328 err = -ENOMEM; 3329 goto fail1; 3330 } 3331 3332 timer_setup(&ep->timer, ep_timeout, 0); 3333 ep->plen = conn_param->private_data_len; 3334 if (ep->plen) 3335 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 3336 conn_param->private_data, ep->plen); 3337 ep->ird = conn_param->ird; 3338 ep->ord = conn_param->ord; 3339 3340 if (peer2peer && ep->ord == 0) 3341 ep->ord = 1; 3342 3343 ep->com.cm_id = cm_id; 3344 ref_cm_id(&ep->com); 3345 cm_id->provider_data = ep; 3346 ep->com.dev = dev; 3347 ep->com.qp = get_qhp(dev, conn_param->qpn); 3348 if (!ep->com.qp) { 3349 pr_warn("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); 3350 err = -EINVAL; 3351 goto fail2; 3352 } 3353 ref_qp(ep); 3354 pr_debug("qpn 0x%x qp %p cm_id %p\n", conn_param->qpn, 3355 ep->com.qp, cm_id); 3356 3357 /* 3358 * Allocate an active TID to initiate a TCP connection. 3359 */ 3360 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); 3361 if (ep->atid == -1) { 3362 pr_err("%s - cannot alloc atid\n", __func__); 3363 err = -ENOMEM; 3364 goto fail2; 3365 } 3366 err = xa_insert_irq(&dev->atids, ep->atid, ep, GFP_KERNEL); 3367 if (err) 3368 goto fail5; 3369 3370 memcpy(&ep->com.local_addr, &cm_id->m_local_addr, 3371 sizeof(ep->com.local_addr)); 3372 memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr, 3373 sizeof(ep->com.remote_addr)); 3374 3375 laddr = (struct sockaddr_in *)&ep->com.local_addr; 3376 raddr = (struct sockaddr_in *)&ep->com.remote_addr; 3377 laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr; 3378 raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr; 3379 3380 if (cm_id->m_remote_addr.ss_family == AF_INET) { 3381 iptype = 4; 3382 ra = (__u8 *)&raddr->sin_addr; 3383 3384 /* 3385 * Handle loopback requests to INADDR_ANY. 3386 */ 3387 if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { 3388 err = pick_local_ipaddrs(dev, cm_id); 3389 if (err) 3390 goto fail3; 3391 } 3392 3393 /* find a route */ 3394 pr_debug("saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n", 3395 &laddr->sin_addr, ntohs(laddr->sin_port), 3396 ra, ntohs(raddr->sin_port)); 3397 ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev, 3398 laddr->sin_addr.s_addr, 3399 raddr->sin_addr.s_addr, 3400 laddr->sin_port, 3401 raddr->sin_port, cm_id->tos); 3402 } else { 3403 iptype = 6; 3404 ra = (__u8 *)&raddr6->sin6_addr; 3405 3406 /* 3407 * Handle loopback requests to INADDR_ANY. 3408 */ 3409 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { 3410 err = pick_local_ip6addrs(dev, cm_id); 3411 if (err) 3412 goto fail3; 3413 } 3414 3415 /* find a route */ 3416 pr_debug("saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n", 3417 laddr6->sin6_addr.s6_addr, 3418 ntohs(laddr6->sin6_port), 3419 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port)); 3420 ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev, 3421 laddr6->sin6_addr.s6_addr, 3422 raddr6->sin6_addr.s6_addr, 3423 laddr6->sin6_port, 3424 raddr6->sin6_port, cm_id->tos, 3425 raddr6->sin6_scope_id); 3426 } 3427 if (!ep->dst) { 3428 pr_err("%s - cannot find route\n", __func__); 3429 err = -EHOSTUNREACH; 3430 goto fail3; 3431 } 3432 3433 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true, 3434 ep->com.dev->rdev.lldi.adapter_type, cm_id->tos); 3435 if (err) { 3436 pr_err("%s - cannot alloc l2e\n", __func__); 3437 goto fail4; 3438 } 3439 3440 pr_debug("txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 3441 ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 3442 ep->l2t->idx); 3443 3444 state_set(&ep->com, CONNECTING); 3445 ep->tos = cm_id->tos; 3446 3447 /* send connect request to rnic */ 3448 err = send_connect(ep); 3449 if (!err) 3450 goto out; 3451 3452 cxgb4_l2t_release(ep->l2t); 3453 fail4: 3454 dst_release(ep->dst); 3455 fail3: 3456 xa_erase_irq(&ep->com.dev->atids, ep->atid); 3457 fail5: 3458 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 3459 fail2: 3460 skb_queue_purge(&ep->com.ep_skb_list); 3461 deref_cm_id(&ep->com); 3462 fail1: 3463 c4iw_put_ep(&ep->com); 3464 out: 3465 return err; 3466 } 3467 3468 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) 3469 { 3470 int err; 3471 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) 3472 &ep->com.local_addr; 3473 3474 if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) { 3475 err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], 3476 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3477 if (err) 3478 return err; 3479 } 3480 c4iw_init_wr_wait(ep->com.wr_waitp); 3481 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0], 3482 ep->stid, &sin6->sin6_addr, 3483 sin6->sin6_port, 3484 ep->com.dev->rdev.lldi.rxq_ids[0]); 3485 if (!err) 3486 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 3487 ep->com.wr_waitp, 3488 0, 0, __func__); 3489 else if (err > 0) 3490 err = net_xmit_errno(err); 3491 if (err) { 3492 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 3493 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3494 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n", 3495 err, ep->stid, 3496 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port)); 3497 } 3498 return err; 3499 } 3500 3501 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) 3502 { 3503 int err; 3504 struct sockaddr_in *sin = (struct sockaddr_in *) 3505 &ep->com.local_addr; 3506 3507 if (dev->rdev.lldi.enable_fw_ofld_conn) { 3508 do { 3509 err = cxgb4_create_server_filter( 3510 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3511 sin->sin_addr.s_addr, sin->sin_port, 0, 3512 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0); 3513 if (err == -EBUSY) { 3514 if (c4iw_fatal_error(&ep->com.dev->rdev)) { 3515 err = -EIO; 3516 break; 3517 } 3518 set_current_state(TASK_UNINTERRUPTIBLE); 3519 schedule_timeout(usecs_to_jiffies(100)); 3520 } 3521 } while (err == -EBUSY); 3522 } else { 3523 c4iw_init_wr_wait(ep->com.wr_waitp); 3524 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], 3525 ep->stid, sin->sin_addr.s_addr, sin->sin_port, 3526 0, ep->com.dev->rdev.lldi.rxq_ids[0]); 3527 if (!err) 3528 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 3529 ep->com.wr_waitp, 3530 0, 0, __func__); 3531 else if (err > 0) 3532 err = net_xmit_errno(err); 3533 } 3534 if (err) 3535 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n" 3536 , err, ep->stid, 3537 &sin->sin_addr, ntohs(sin->sin_port)); 3538 return err; 3539 } 3540 3541 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 3542 { 3543 int err = 0; 3544 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 3545 struct c4iw_listen_ep *ep; 3546 3547 might_sleep(); 3548 3549 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 3550 if (!ep) { 3551 pr_err("%s - cannot alloc ep\n", __func__); 3552 err = -ENOMEM; 3553 goto fail1; 3554 } 3555 skb_queue_head_init(&ep->com.ep_skb_list); 3556 pr_debug("ep %p\n", ep); 3557 ep->com.cm_id = cm_id; 3558 ref_cm_id(&ep->com); 3559 ep->com.dev = dev; 3560 ep->backlog = backlog; 3561 memcpy(&ep->com.local_addr, &cm_id->m_local_addr, 3562 sizeof(ep->com.local_addr)); 3563 3564 /* 3565 * Allocate a server TID. 3566 */ 3567 if (dev->rdev.lldi.enable_fw_ofld_conn && 3568 ep->com.local_addr.ss_family == AF_INET) 3569 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, 3570 cm_id->m_local_addr.ss_family, ep); 3571 else 3572 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, 3573 cm_id->m_local_addr.ss_family, ep); 3574 3575 if (ep->stid == -1) { 3576 pr_err("%s - cannot alloc stid\n", __func__); 3577 err = -ENOMEM; 3578 goto fail2; 3579 } 3580 err = xa_insert_irq(&dev->stids, ep->stid, ep, GFP_KERNEL); 3581 if (err) 3582 goto fail3; 3583 3584 state_set(&ep->com, LISTEN); 3585 if (ep->com.local_addr.ss_family == AF_INET) 3586 err = create_server4(dev, ep); 3587 else 3588 err = create_server6(dev, ep); 3589 if (!err) { 3590 cm_id->provider_data = ep; 3591 goto out; 3592 } 3593 xa_erase_irq(&ep->com.dev->stids, ep->stid); 3594 fail3: 3595 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3596 ep->com.local_addr.ss_family); 3597 fail2: 3598 deref_cm_id(&ep->com); 3599 c4iw_put_ep(&ep->com); 3600 fail1: 3601 out: 3602 return err; 3603 } 3604 3605 int c4iw_destroy_listen(struct iw_cm_id *cm_id) 3606 { 3607 int err; 3608 struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 3609 3610 pr_debug("ep %p\n", ep); 3611 3612 might_sleep(); 3613 state_set(&ep->com, DEAD); 3614 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn && 3615 ep->com.local_addr.ss_family == AF_INET) { 3616 err = cxgb4_remove_server_filter( 3617 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3618 ep->com.dev->rdev.lldi.rxq_ids[0], false); 3619 } else { 3620 struct sockaddr_in6 *sin6; 3621 c4iw_init_wr_wait(ep->com.wr_waitp); 3622 err = cxgb4_remove_server( 3623 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3624 ep->com.dev->rdev.lldi.rxq_ids[0], 3625 ep->com.local_addr.ss_family == AF_INET6); 3626 if (err) 3627 goto done; 3628 err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp, 3629 0, 0, __func__); 3630 sin6 = (struct sockaddr_in6 *)&ep->com.local_addr; 3631 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 3632 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3633 } 3634 xa_erase_irq(&ep->com.dev->stids, ep->stid); 3635 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3636 ep->com.local_addr.ss_family); 3637 done: 3638 deref_cm_id(&ep->com); 3639 c4iw_put_ep(&ep->com); 3640 return err; 3641 } 3642 3643 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 3644 { 3645 int ret = 0; 3646 int close = 0; 3647 int fatal = 0; 3648 struct c4iw_rdev *rdev; 3649 3650 mutex_lock(&ep->com.mutex); 3651 3652 pr_debug("ep %p state %s, abrupt %d\n", ep, 3653 states[ep->com.state], abrupt); 3654 3655 /* 3656 * Ref the ep here in case we have fatal errors causing the 3657 * ep to be released and freed. 3658 */ 3659 c4iw_get_ep(&ep->com); 3660 3661 rdev = &ep->com.dev->rdev; 3662 if (c4iw_fatal_error(rdev)) { 3663 fatal = 1; 3664 close_complete_upcall(ep, -EIO); 3665 ep->com.state = DEAD; 3666 } 3667 switch (ep->com.state) { 3668 case MPA_REQ_WAIT: 3669 case MPA_REQ_SENT: 3670 case MPA_REQ_RCVD: 3671 case MPA_REP_SENT: 3672 case FPDU_MODE: 3673 case CONNECTING: 3674 close = 1; 3675 if (abrupt) 3676 ep->com.state = ABORTING; 3677 else { 3678 ep->com.state = CLOSING; 3679 3680 /* 3681 * if we close before we see the fw4_ack() then we fix 3682 * up the timer state since we're reusing it. 3683 */ 3684 if (ep->mpa_skb && 3685 test_bit(STOP_MPA_TIMER, &ep->com.flags)) { 3686 clear_bit(STOP_MPA_TIMER, &ep->com.flags); 3687 stop_ep_timer(ep); 3688 } 3689 start_ep_timer(ep); 3690 } 3691 set_bit(CLOSE_SENT, &ep->com.flags); 3692 break; 3693 case CLOSING: 3694 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 3695 close = 1; 3696 if (abrupt) { 3697 (void)stop_ep_timer(ep); 3698 ep->com.state = ABORTING; 3699 } else 3700 ep->com.state = MORIBUND; 3701 } 3702 break; 3703 case MORIBUND: 3704 case ABORTING: 3705 case DEAD: 3706 pr_debug("ignoring disconnect ep %p state %u\n", 3707 ep, ep->com.state); 3708 break; 3709 default: 3710 WARN_ONCE(1, "Bad endpoint state %u\n", ep->com.state); 3711 break; 3712 } 3713 3714 if (close) { 3715 if (abrupt) { 3716 set_bit(EP_DISC_ABORT, &ep->com.history); 3717 ret = send_abort(ep); 3718 } else { 3719 set_bit(EP_DISC_CLOSE, &ep->com.history); 3720 ret = send_halfclose(ep); 3721 } 3722 if (ret) { 3723 set_bit(EP_DISC_FAIL, &ep->com.history); 3724 if (!abrupt) { 3725 stop_ep_timer(ep); 3726 close_complete_upcall(ep, -EIO); 3727 } 3728 if (ep->com.qp) { 3729 struct c4iw_qp_attributes attrs; 3730 3731 attrs.next_state = C4IW_QP_STATE_ERROR; 3732 ret = c4iw_modify_qp(ep->com.qp->rhp, 3733 ep->com.qp, 3734 C4IW_QP_ATTR_NEXT_STATE, 3735 &attrs, 1); 3736 if (ret) 3737 pr_err("%s - qp <- error failed!\n", 3738 __func__); 3739 } 3740 fatal = 1; 3741 } 3742 } 3743 mutex_unlock(&ep->com.mutex); 3744 c4iw_put_ep(&ep->com); 3745 if (fatal) 3746 release_ep_resources(ep); 3747 return ret; 3748 } 3749 3750 static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 3751 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 3752 { 3753 struct c4iw_ep *ep; 3754 int atid = be32_to_cpu(req->tid); 3755 3756 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, 3757 (__force u32) req->tid); 3758 if (!ep) 3759 return; 3760 3761 switch (req->retval) { 3762 case FW_ENOMEM: 3763 set_bit(ACT_RETRY_NOMEM, &ep->com.history); 3764 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 3765 send_fw_act_open_req(ep, atid); 3766 return; 3767 } 3768 fallthrough; 3769 case FW_EADDRINUSE: 3770 set_bit(ACT_RETRY_INUSE, &ep->com.history); 3771 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 3772 send_fw_act_open_req(ep, atid); 3773 return; 3774 } 3775 break; 3776 default: 3777 pr_info("%s unexpected ofld conn wr retval %d\n", 3778 __func__, req->retval); 3779 break; 3780 } 3781 pr_err("active ofld_connect_wr failure %d atid %d\n", 3782 req->retval, atid); 3783 mutex_lock(&dev->rdev.stats.lock); 3784 dev->rdev.stats.act_ofld_conn_fails++; 3785 mutex_unlock(&dev->rdev.stats.lock); 3786 connect_reply_upcall(ep, status2errno(req->retval)); 3787 state_set(&ep->com, DEAD); 3788 if (ep->com.remote_addr.ss_family == AF_INET6) { 3789 struct sockaddr_in6 *sin6 = 3790 (struct sockaddr_in6 *)&ep->com.local_addr; 3791 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 3792 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3793 } 3794 xa_erase_irq(&dev->atids, atid); 3795 cxgb4_free_atid(dev->rdev.lldi.tids, atid); 3796 dst_release(ep->dst); 3797 cxgb4_l2t_release(ep->l2t); 3798 c4iw_put_ep(&ep->com); 3799 } 3800 3801 static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 3802 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 3803 { 3804 struct sk_buff *rpl_skb; 3805 struct cpl_pass_accept_req *cpl; 3806 int ret; 3807 3808 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie; 3809 if (req->retval) { 3810 pr_err("%s passive open failure %d\n", __func__, req->retval); 3811 mutex_lock(&dev->rdev.stats.lock); 3812 dev->rdev.stats.pas_ofld_conn_fails++; 3813 mutex_unlock(&dev->rdev.stats.lock); 3814 kfree_skb(rpl_skb); 3815 } else { 3816 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); 3817 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 3818 (__force u32) htonl( 3819 (__force u32) req->tid))); 3820 ret = pass_accept_req(dev, rpl_skb); 3821 if (!ret) 3822 kfree_skb(rpl_skb); 3823 } 3824 return; 3825 } 3826 3827 static inline u64 t4_tcb_get_field64(__be64 *tcb, u16 word) 3828 { 3829 u64 tlo = be64_to_cpu(tcb[((31 - word) / 2)]); 3830 u64 thi = be64_to_cpu(tcb[((31 - word) / 2) - 1]); 3831 u64 t; 3832 u32 shift = 32; 3833 3834 t = (thi << shift) | (tlo >> shift); 3835 3836 return t; 3837 } 3838 3839 static inline u32 t4_tcb_get_field32(__be64 *tcb, u16 word, u32 mask, u32 shift) 3840 { 3841 u32 v; 3842 u64 t = be64_to_cpu(tcb[(31 - word) / 2]); 3843 3844 if (word & 0x1) 3845 shift += 32; 3846 v = (t >> shift) & mask; 3847 return v; 3848 } 3849 3850 static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 3851 { 3852 struct cpl_get_tcb_rpl *rpl = cplhdr(skb); 3853 __be64 *tcb = (__be64 *)(rpl + 1); 3854 unsigned int tid = GET_TID(rpl); 3855 struct c4iw_ep *ep; 3856 u64 t_flags_64; 3857 u32 rx_pdu_out; 3858 3859 ep = get_ep_from_tid(dev, tid); 3860 if (!ep) 3861 return 0; 3862 /* Examine the TF_RX_PDU_OUT (bit 49 of the t_flags) in order to 3863 * determine if there's a rx PDU feedback event pending. 3864 * 3865 * If that bit is set, it means we'll need to re-read the TCB's 3866 * rq_start value. The final value is the one present in a TCB 3867 * with the TF_RX_PDU_OUT bit cleared. 3868 */ 3869 3870 t_flags_64 = t4_tcb_get_field64(tcb, TCB_T_FLAGS_W); 3871 rx_pdu_out = (t_flags_64 & TF_RX_PDU_OUT_V(1)) >> TF_RX_PDU_OUT_S; 3872 3873 c4iw_put_ep(&ep->com); /* from get_ep_from_tid() */ 3874 c4iw_put_ep(&ep->com); /* from read_tcb() */ 3875 3876 /* If TF_RX_PDU_OUT bit is set, re-read the TCB */ 3877 if (rx_pdu_out) { 3878 if (++ep->rx_pdu_out_cnt >= 2) { 3879 WARN_ONCE(1, "tcb re-read() reached the guard limit, finishing the cleanup\n"); 3880 goto cleanup; 3881 } 3882 read_tcb(ep); 3883 return 0; 3884 } 3885 3886 ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M, 3887 TCB_RQ_START_S); 3888 cleanup: 3889 pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx); 3890 3891 if (test_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) 3892 finish_peer_abort(dev, ep); 3893 else if (test_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) 3894 send_abort_req(ep); 3895 else 3896 WARN_ONCE(1, "unexpected state!"); 3897 3898 return 0; 3899 } 3900 3901 static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 3902 { 3903 struct cpl_fw6_msg *rpl = cplhdr(skb); 3904 struct cpl_fw6_msg_ofld_connection_wr_rpl *req; 3905 3906 switch (rpl->type) { 3907 case FW6_TYPE_CQE: 3908 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 3909 break; 3910 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 3911 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data; 3912 switch (req->t_state) { 3913 case TCP_SYN_SENT: 3914 active_ofld_conn_reply(dev, skb, req); 3915 break; 3916 case TCP_SYN_RECV: 3917 passive_ofld_conn_reply(dev, skb, req); 3918 break; 3919 default: 3920 pr_err("%s unexpected ofld conn wr state %d\n", 3921 __func__, req->t_state); 3922 break; 3923 } 3924 break; 3925 } 3926 return 0; 3927 } 3928 3929 static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) 3930 { 3931 __be32 l2info; 3932 __be16 hdr_len, vlantag, len; 3933 u16 eth_hdr_len; 3934 int tcp_hdr_len, ip_hdr_len; 3935 u8 intf; 3936 struct cpl_rx_pkt *cpl = cplhdr(skb); 3937 struct cpl_pass_accept_req *req; 3938 struct tcp_options_received tmp_opt; 3939 struct c4iw_dev *dev; 3940 enum chip_type type; 3941 3942 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 3943 /* Store values from cpl_rx_pkt in temporary location. */ 3944 vlantag = cpl->vlan; 3945 len = cpl->len; 3946 l2info = cpl->l2info; 3947 hdr_len = cpl->hdr_len; 3948 intf = cpl->iff; 3949 3950 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); 3951 3952 /* 3953 * We need to parse the TCP options from SYN packet. 3954 * to generate cpl_pass_accept_req. 3955 */ 3956 memset(&tmp_opt, 0, sizeof(tmp_opt)); 3957 tcp_clear_options(&tmp_opt); 3958 tcp_parse_options(&init_net, skb, &tmp_opt, 0, NULL); 3959 3960 req = __skb_push(skb, sizeof(*req)); 3961 memset(req, 0, sizeof(*req)); 3962 req->l2info = cpu_to_be16(SYN_INTF_V(intf) | 3963 SYN_MAC_IDX_V(RX_MACIDX_G( 3964 be32_to_cpu(l2info))) | 3965 SYN_XACT_MATCH_F); 3966 type = dev->rdev.lldi.adapter_type; 3967 tcp_hdr_len = RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len)); 3968 ip_hdr_len = RX_IPHDR_LEN_G(be16_to_cpu(hdr_len)); 3969 req->hdr_len = 3970 cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info)))); 3971 if (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) { 3972 eth_hdr_len = is_t4(type) ? 3973 RX_ETHHDR_LEN_G(be32_to_cpu(l2info)) : 3974 RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info)); 3975 req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) | 3976 IP_HDR_LEN_V(ip_hdr_len) | 3977 ETH_HDR_LEN_V(eth_hdr_len)); 3978 } else { /* T6 and later */ 3979 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info)); 3980 req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) | 3981 T6_IP_HDR_LEN_V(ip_hdr_len) | 3982 T6_ETH_HDR_LEN_V(eth_hdr_len)); 3983 } 3984 req->vlan = vlantag; 3985 req->len = len; 3986 req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) | 3987 PASS_OPEN_TOS_V(tos)); 3988 req->tcpopt.mss = htons(tmp_opt.mss_clamp); 3989 if (tmp_opt.wscale_ok) 3990 req->tcpopt.wsf = tmp_opt.snd_wscale; 3991 req->tcpopt.tstamp = tmp_opt.saw_tstamp; 3992 if (tmp_opt.sack_ok) 3993 req->tcpopt.sack = 1; 3994 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0)); 3995 return; 3996 } 3997 3998 static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, 3999 __be32 laddr, __be16 lport, 4000 __be32 raddr, __be16 rport, 4001 u32 rcv_isn, u32 filter, u16 window, 4002 u32 rss_qid, u8 port_id) 4003 { 4004 struct sk_buff *req_skb; 4005 struct fw_ofld_connection_wr *req; 4006 struct cpl_pass_accept_req *cpl = cplhdr(skb); 4007 int ret; 4008 4009 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); 4010 if (!req_skb) 4011 return; 4012 req = __skb_put_zero(req_skb, sizeof(*req)); 4013 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F); 4014 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); 4015 req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F); 4016 req->le.filter = (__force __be32) filter; 4017 req->le.lport = lport; 4018 req->le.pport = rport; 4019 req->le.u.ipv4.lip = laddr; 4020 req->le.u.ipv4.pip = raddr; 4021 req->tcb.rcv_nxt = htonl(rcv_isn + 1); 4022 req->tcb.rcv_adv = htons(window); 4023 req->tcb.t_state_to_astid = 4024 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) | 4025 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) | 4026 FW_OFLD_CONNECTION_WR_ASTID_V( 4027 PASS_OPEN_TID_G(ntohl(cpl->tos_stid)))); 4028 4029 /* 4030 * We store the qid in opt2 which will be used by the firmware 4031 * to send us the wr response. 4032 */ 4033 req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid)); 4034 4035 /* 4036 * We initialize the MSS index in TCB to 0xF. 4037 * So that when driver sends cpl_pass_accept_rpl 4038 * TCB picks up the correct value. If this was 0 4039 * TP will ignore any value > 0 for MSS index. 4040 */ 4041 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF)); 4042 req->cookie = (uintptr_t)skb; 4043 4044 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 4045 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); 4046 if (ret < 0) { 4047 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__, 4048 ret); 4049 kfree_skb(skb); 4050 kfree_skb(req_skb); 4051 } 4052 } 4053 4054 /* 4055 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt 4056 * messages when a filter is being used instead of server to 4057 * redirect a syn packet. When packets hit filter they are redirected 4058 * to the offload queue and driver tries to establish the connection 4059 * using firmware work request. 4060 */ 4061 static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) 4062 { 4063 int stid; 4064 unsigned int filter; 4065 struct ethhdr *eh = NULL; 4066 struct vlan_ethhdr *vlan_eh = NULL; 4067 struct iphdr *iph; 4068 struct tcphdr *tcph; 4069 struct rss_header *rss = (void *)skb->data; 4070 struct cpl_rx_pkt *cpl = (void *)skb->data; 4071 struct cpl_pass_accept_req *req = (void *)(rss + 1); 4072 struct l2t_entry *e; 4073 struct dst_entry *dst; 4074 struct c4iw_ep *lep = NULL; 4075 u16 window; 4076 struct port_info *pi; 4077 struct net_device *pdev; 4078 u16 rss_qid, eth_hdr_len; 4079 int step; 4080 struct neighbour *neigh; 4081 4082 /* Drop all non-SYN packets */ 4083 if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F))) 4084 goto reject; 4085 4086 /* 4087 * Drop all packets which did not hit the filter. 4088 * Unlikely to happen. 4089 */ 4090 if (!(rss->filter_hit && rss->filter_tid)) 4091 goto reject; 4092 4093 /* 4094 * Calculate the server tid from filter hit index from cpl_rx_pkt. 4095 */ 4096 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val); 4097 4098 lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid); 4099 if (!lep) { 4100 pr_warn("%s connect request on invalid stid %d\n", 4101 __func__, stid); 4102 goto reject; 4103 } 4104 4105 switch (CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)) { 4106 case CHELSIO_T4: 4107 eth_hdr_len = RX_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info)); 4108 break; 4109 case CHELSIO_T5: 4110 eth_hdr_len = RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info)); 4111 break; 4112 case CHELSIO_T6: 4113 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info)); 4114 break; 4115 default: 4116 pr_err("T%d Chip is not supported\n", 4117 CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)); 4118 goto reject; 4119 } 4120 4121 if (eth_hdr_len == ETH_HLEN) { 4122 eh = (struct ethhdr *)(req + 1); 4123 iph = (struct iphdr *)(eh + 1); 4124 } else { 4125 vlan_eh = (struct vlan_ethhdr *)(req + 1); 4126 iph = (struct iphdr *)(vlan_eh + 1); 4127 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan)); 4128 } 4129 4130 if (iph->version != 0x4) 4131 goto reject; 4132 4133 tcph = (struct tcphdr *)(iph + 1); 4134 skb_set_network_header(skb, (void *)iph - (void *)rss); 4135 skb_set_transport_header(skb, (void *)tcph - (void *)rss); 4136 skb_get(skb); 4137 4138 pr_debug("lip 0x%x lport %u pip 0x%x pport %u tos %d\n", 4139 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr), 4140 ntohs(tcph->source), iph->tos); 4141 4142 dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev, 4143 iph->daddr, iph->saddr, tcph->dest, 4144 tcph->source, iph->tos); 4145 if (!dst) { 4146 pr_err("%s - failed to find dst entry!\n", __func__); 4147 goto reject; 4148 } 4149 neigh = dst_neigh_lookup_skb(dst, skb); 4150 4151 if (!neigh) { 4152 pr_err("%s - failed to allocate neigh!\n", __func__); 4153 goto free_dst; 4154 } 4155 4156 if (neigh->dev->flags & IFF_LOOPBACK) { 4157 pdev = ip_dev_find(&init_net, iph->daddr); 4158 if (!pdev) { 4159 pr_err("%s - failed to find device!\n", __func__); 4160 goto free_dst; 4161 } 4162 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 4163 pdev, 0); 4164 pi = (struct port_info *)netdev_priv(pdev); 4165 dev_put(pdev); 4166 } else { 4167 pdev = get_real_dev(neigh->dev); 4168 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 4169 pdev, 0); 4170 pi = (struct port_info *)netdev_priv(pdev); 4171 } 4172 neigh_release(neigh); 4173 if (!e) { 4174 pr_err("%s - failed to allocate l2t entry!\n", 4175 __func__); 4176 goto free_dst; 4177 } 4178 4179 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 4180 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; 4181 window = (__force u16) htons((__force u16)tcph->window); 4182 4183 /* Calcuate filter portion for LE region. */ 4184 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple( 4185 dev->rdev.lldi.ports[0], 4186 e)); 4187 4188 /* 4189 * Synthesize the cpl_pass_accept_req. We have everything except the 4190 * TID. Once firmware sends a reply with TID we update the TID field 4191 * in cpl and pass it through the regular cpl_pass_accept_req path. 4192 */ 4193 build_cpl_pass_accept_req(skb, stid, iph->tos); 4194 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr, 4195 tcph->source, ntohl(tcph->seq), filter, window, 4196 rss_qid, pi->port_id); 4197 cxgb4_l2t_release(e); 4198 free_dst: 4199 dst_release(dst); 4200 reject: 4201 if (lep) 4202 c4iw_put_ep(&lep->com); 4203 return 0; 4204 } 4205 4206 /* 4207 * These are the real handlers that are called from a 4208 * work queue. 4209 */ 4210 static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = { 4211 [CPL_ACT_ESTABLISH] = act_establish, 4212 [CPL_ACT_OPEN_RPL] = act_open_rpl, 4213 [CPL_RX_DATA] = rx_data, 4214 [CPL_ABORT_RPL_RSS] = abort_rpl, 4215 [CPL_ABORT_RPL] = abort_rpl, 4216 [CPL_PASS_OPEN_RPL] = pass_open_rpl, 4217 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, 4218 [CPL_PASS_ACCEPT_REQ] = pass_accept_req, 4219 [CPL_PASS_ESTABLISH] = pass_establish, 4220 [CPL_PEER_CLOSE] = peer_close, 4221 [CPL_ABORT_REQ_RSS] = peer_abort, 4222 [CPL_CLOSE_CON_RPL] = close_con_rpl, 4223 [CPL_RDMA_TERMINATE] = terminate, 4224 [CPL_FW4_ACK] = fw4_ack, 4225 [CPL_GET_TCB_RPL] = read_tcb_rpl, 4226 [CPL_FW6_MSG] = deferred_fw6_msg, 4227 [CPL_RX_PKT] = rx_pkt, 4228 [FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe, 4229 [FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe 4230 }; 4231 4232 static void process_timeout(struct c4iw_ep *ep) 4233 { 4234 struct c4iw_qp_attributes attrs; 4235 int abort = 1; 4236 4237 mutex_lock(&ep->com.mutex); 4238 pr_debug("ep %p tid %u state %d\n", ep, ep->hwtid, ep->com.state); 4239 set_bit(TIMEDOUT, &ep->com.history); 4240 switch (ep->com.state) { 4241 case MPA_REQ_SENT: 4242 connect_reply_upcall(ep, -ETIMEDOUT); 4243 break; 4244 case MPA_REQ_WAIT: 4245 case MPA_REQ_RCVD: 4246 case MPA_REP_SENT: 4247 case FPDU_MODE: 4248 break; 4249 case CLOSING: 4250 case MORIBUND: 4251 if (ep->com.cm_id && ep->com.qp) { 4252 attrs.next_state = C4IW_QP_STATE_ERROR; 4253 c4iw_modify_qp(ep->com.qp->rhp, 4254 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 4255 &attrs, 1); 4256 } 4257 close_complete_upcall(ep, -ETIMEDOUT); 4258 break; 4259 case ABORTING: 4260 case DEAD: 4261 4262 /* 4263 * These states are expected if the ep timed out at the same 4264 * time as another thread was calling stop_ep_timer(). 4265 * So we silently do nothing for these states. 4266 */ 4267 abort = 0; 4268 break; 4269 default: 4270 WARN(1, "%s unexpected state ep %p tid %u state %u\n", 4271 __func__, ep, ep->hwtid, ep->com.state); 4272 abort = 0; 4273 } 4274 mutex_unlock(&ep->com.mutex); 4275 if (abort) 4276 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 4277 c4iw_put_ep(&ep->com); 4278 } 4279 4280 static void process_timedout_eps(void) 4281 { 4282 struct c4iw_ep *ep; 4283 4284 spin_lock_irq(&timeout_lock); 4285 while (!list_empty(&timeout_list)) { 4286 struct list_head *tmp; 4287 4288 tmp = timeout_list.next; 4289 list_del(tmp); 4290 tmp->next = NULL; 4291 tmp->prev = NULL; 4292 spin_unlock_irq(&timeout_lock); 4293 ep = list_entry(tmp, struct c4iw_ep, entry); 4294 process_timeout(ep); 4295 spin_lock_irq(&timeout_lock); 4296 } 4297 spin_unlock_irq(&timeout_lock); 4298 } 4299 4300 static void process_work(struct work_struct *work) 4301 { 4302 struct sk_buff *skb = NULL; 4303 struct c4iw_dev *dev; 4304 struct cpl_act_establish *rpl; 4305 unsigned int opcode; 4306 int ret; 4307 4308 process_timedout_eps(); 4309 while ((skb = skb_dequeue(&rxq))) { 4310 rpl = cplhdr(skb); 4311 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 4312 opcode = rpl->ot.opcode; 4313 4314 if (opcode >= ARRAY_SIZE(work_handlers) || 4315 !work_handlers[opcode]) { 4316 pr_err("No handler for opcode 0x%x.\n", opcode); 4317 kfree_skb(skb); 4318 } else { 4319 ret = work_handlers[opcode](dev, skb); 4320 if (!ret) 4321 kfree_skb(skb); 4322 } 4323 process_timedout_eps(); 4324 } 4325 } 4326 4327 static DECLARE_WORK(skb_work, process_work); 4328 4329 static void ep_timeout(struct timer_list *t) 4330 { 4331 struct c4iw_ep *ep = from_timer(ep, t, timer); 4332 int kickit = 0; 4333 4334 spin_lock(&timeout_lock); 4335 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 4336 /* 4337 * Only insert if it is not already on the list. 4338 */ 4339 if (!ep->entry.next) { 4340 list_add_tail(&ep->entry, &timeout_list); 4341 kickit = 1; 4342 } 4343 } 4344 spin_unlock(&timeout_lock); 4345 if (kickit) 4346 queue_work(workq, &skb_work); 4347 } 4348 4349 /* 4350 * All the CM events are handled on a work queue to have a safe context. 4351 */ 4352 static int sched(struct c4iw_dev *dev, struct sk_buff *skb) 4353 { 4354 4355 /* 4356 * Save dev in the skb->cb area. 4357 */ 4358 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; 4359 4360 /* 4361 * Queue the skb and schedule the worker thread. 4362 */ 4363 skb_queue_tail(&rxq, skb); 4364 queue_work(workq, &skb_work); 4365 return 0; 4366 } 4367 4368 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 4369 { 4370 struct cpl_set_tcb_rpl *rpl = cplhdr(skb); 4371 4372 if (rpl->status != CPL_ERR_NONE) { 4373 pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n", 4374 rpl->status, GET_TID(rpl)); 4375 } 4376 kfree_skb(skb); 4377 return 0; 4378 } 4379 4380 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 4381 { 4382 struct cpl_fw6_msg *rpl = cplhdr(skb); 4383 struct c4iw_wr_wait *wr_waitp; 4384 int ret; 4385 4386 pr_debug("type %u\n", rpl->type); 4387 4388 switch (rpl->type) { 4389 case FW6_TYPE_WR_RPL: 4390 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 4391 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 4392 pr_debug("wr_waitp %p ret %u\n", wr_waitp, ret); 4393 if (wr_waitp) 4394 c4iw_wake_up_deref(wr_waitp, ret ? -ret : 0); 4395 kfree_skb(skb); 4396 break; 4397 case FW6_TYPE_CQE: 4398 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 4399 sched(dev, skb); 4400 break; 4401 default: 4402 pr_err("%s unexpected fw6 msg type %u\n", 4403 __func__, rpl->type); 4404 kfree_skb(skb); 4405 break; 4406 } 4407 return 0; 4408 } 4409 4410 static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) 4411 { 4412 struct cpl_abort_req_rss *req = cplhdr(skb); 4413 struct c4iw_ep *ep; 4414 unsigned int tid = GET_TID(req); 4415 4416 ep = get_ep_from_tid(dev, tid); 4417 /* This EP will be dereferenced in peer_abort() */ 4418 if (!ep) { 4419 pr_warn("Abort on non-existent endpoint, tid %d\n", tid); 4420 kfree_skb(skb); 4421 return 0; 4422 } 4423 if (cxgb_is_neg_adv(req->status)) { 4424 pr_debug("Negative advice on abort- tid %u status %d (%s)\n", 4425 ep->hwtid, req->status, 4426 neg_adv_str(req->status)); 4427 goto out; 4428 } 4429 pr_debug("ep %p tid %u state %u\n", ep, ep->hwtid, ep->com.state); 4430 4431 c4iw_wake_up_noref(ep->com.wr_waitp, -ECONNRESET); 4432 out: 4433 sched(dev, skb); 4434 return 0; 4435 } 4436 4437 /* 4438 * Most upcalls from the T4 Core go to sched() to 4439 * schedule the processing on a work queue. 4440 */ 4441 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { 4442 [CPL_ACT_ESTABLISH] = sched, 4443 [CPL_ACT_OPEN_RPL] = sched, 4444 [CPL_RX_DATA] = sched, 4445 [CPL_ABORT_RPL_RSS] = sched, 4446 [CPL_ABORT_RPL] = sched, 4447 [CPL_PASS_OPEN_RPL] = sched, 4448 [CPL_CLOSE_LISTSRV_RPL] = sched, 4449 [CPL_PASS_ACCEPT_REQ] = sched, 4450 [CPL_PASS_ESTABLISH] = sched, 4451 [CPL_PEER_CLOSE] = sched, 4452 [CPL_CLOSE_CON_RPL] = sched, 4453 [CPL_ABORT_REQ_RSS] = peer_abort_intr, 4454 [CPL_RDMA_TERMINATE] = sched, 4455 [CPL_FW4_ACK] = sched, 4456 [CPL_SET_TCB_RPL] = set_tcb_rpl, 4457 [CPL_GET_TCB_RPL] = sched, 4458 [CPL_FW6_MSG] = fw6_msg, 4459 [CPL_RX_PKT] = sched 4460 }; 4461 4462 int __init c4iw_cm_init(void) 4463 { 4464 skb_queue_head_init(&rxq); 4465 4466 workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM); 4467 if (!workq) 4468 return -ENOMEM; 4469 4470 return 0; 4471 } 4472 4473 void c4iw_cm_term(void) 4474 { 4475 WARN_ON(!list_empty(&timeout_list)); 4476 destroy_workqueue(workq); 4477 } 4478