1 /* 2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <linux/skbuff.h> 36 #include <linux/timer.h> 37 #include <linux/notifier.h> 38 #include <linux/inetdevice.h> 39 #include <linux/ip.h> 40 #include <linux/tcp.h> 41 #include <linux/if_vlan.h> 42 43 #include <net/neighbour.h> 44 #include <net/netevent.h> 45 #include <net/route.h> 46 #include <net/tcp.h> 47 #include <net/ip6_route.h> 48 #include <net/addrconf.h> 49 50 #include <rdma/ib_addr.h> 51 52 #include "iw_cxgb4.h" 53 #include "clip_tbl.h" 54 55 static char *states[] = { 56 "idle", 57 "listen", 58 "connecting", 59 "mpa_wait_req", 60 "mpa_req_sent", 61 "mpa_req_rcvd", 62 "mpa_rep_sent", 63 "fpdu_mode", 64 "aborting", 65 "closing", 66 "moribund", 67 "dead", 68 NULL, 69 }; 70 71 static int nocong; 72 module_param(nocong, int, 0644); 73 MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)"); 74 75 static int enable_ecn; 76 module_param(enable_ecn, int, 0644); 77 MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); 78 79 static int dack_mode = 1; 80 module_param(dack_mode, int, 0644); 81 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); 82 83 uint c4iw_max_read_depth = 32; 84 module_param(c4iw_max_read_depth, int, 0644); 85 MODULE_PARM_DESC(c4iw_max_read_depth, 86 "Per-connection max ORD/IRD (default=32)"); 87 88 static int enable_tcp_timestamps; 89 module_param(enable_tcp_timestamps, int, 0644); 90 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); 91 92 static int enable_tcp_sack; 93 module_param(enable_tcp_sack, int, 0644); 94 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); 95 96 static int enable_tcp_window_scaling = 1; 97 module_param(enable_tcp_window_scaling, int, 0644); 98 MODULE_PARM_DESC(enable_tcp_window_scaling, 99 "Enable tcp window scaling (default=1)"); 100 101 int c4iw_debug; 102 module_param(c4iw_debug, int, 0644); 103 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); 104 105 static int peer2peer = 1; 106 module_param(peer2peer, int, 0644); 107 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)"); 108 109 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 110 module_param(p2p_type, int, 0644); 111 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " 112 "1=RDMA_READ 0=RDMA_WRITE (default 1)"); 113 114 static int ep_timeout_secs = 60; 115 module_param(ep_timeout_secs, int, 0644); 116 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 117 "in seconds (default=60)"); 118 119 static int mpa_rev = 2; 120 module_param(mpa_rev, int, 0644); 121 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " 122 "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft" 123 " compliant (default=2)"); 124 125 static int markers_enabled; 126 module_param(markers_enabled, int, 0644); 127 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); 128 129 static int crc_enabled = 1; 130 module_param(crc_enabled, int, 0644); 131 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); 132 133 static int rcv_win = 256 * 1024; 134 module_param(rcv_win, int, 0644); 135 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); 136 137 static int snd_win = 128 * 1024; 138 module_param(snd_win, int, 0644); 139 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); 140 141 static struct workqueue_struct *workq; 142 143 static struct sk_buff_head rxq; 144 145 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 146 static void ep_timeout(unsigned long arg); 147 static void connect_reply_upcall(struct c4iw_ep *ep, int status); 148 static int sched(struct c4iw_dev *dev, struct sk_buff *skb); 149 150 static LIST_HEAD(timeout_list); 151 static spinlock_t timeout_lock; 152 153 static void deref_cm_id(struct c4iw_ep_common *epc) 154 { 155 epc->cm_id->rem_ref(epc->cm_id); 156 epc->cm_id = NULL; 157 set_bit(CM_ID_DEREFED, &epc->history); 158 } 159 160 static void ref_cm_id(struct c4iw_ep_common *epc) 161 { 162 set_bit(CM_ID_REFED, &epc->history); 163 epc->cm_id->add_ref(epc->cm_id); 164 } 165 166 static void deref_qp(struct c4iw_ep *ep) 167 { 168 c4iw_qp_rem_ref(&ep->com.qp->ibqp); 169 clear_bit(QP_REFERENCED, &ep->com.flags); 170 set_bit(QP_DEREFED, &ep->com.history); 171 } 172 173 static void ref_qp(struct c4iw_ep *ep) 174 { 175 set_bit(QP_REFERENCED, &ep->com.flags); 176 set_bit(QP_REFED, &ep->com.history); 177 c4iw_qp_add_ref(&ep->com.qp->ibqp); 178 } 179 180 static void start_ep_timer(struct c4iw_ep *ep) 181 { 182 PDBG("%s ep %p\n", __func__, ep); 183 if (timer_pending(&ep->timer)) { 184 pr_err("%s timer already started! ep %p\n", 185 __func__, ep); 186 return; 187 } 188 clear_bit(TIMEOUT, &ep->com.flags); 189 c4iw_get_ep(&ep->com); 190 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 191 ep->timer.data = (unsigned long)ep; 192 ep->timer.function = ep_timeout; 193 add_timer(&ep->timer); 194 } 195 196 static int stop_ep_timer(struct c4iw_ep *ep) 197 { 198 PDBG("%s ep %p stopping\n", __func__, ep); 199 del_timer_sync(&ep->timer); 200 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 201 c4iw_put_ep(&ep->com); 202 return 0; 203 } 204 return 1; 205 } 206 207 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, 208 struct l2t_entry *l2e) 209 { 210 int error = 0; 211 212 if (c4iw_fatal_error(rdev)) { 213 kfree_skb(skb); 214 PDBG("%s - device in error state - dropping\n", __func__); 215 return -EIO; 216 } 217 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); 218 if (error < 0) 219 kfree_skb(skb); 220 else if (error == NET_XMIT_DROP) 221 return -ENOMEM; 222 return error < 0 ? error : 0; 223 } 224 225 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) 226 { 227 int error = 0; 228 229 if (c4iw_fatal_error(rdev)) { 230 kfree_skb(skb); 231 PDBG("%s - device in error state - dropping\n", __func__); 232 return -EIO; 233 } 234 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); 235 if (error < 0) 236 kfree_skb(skb); 237 return error < 0 ? error : 0; 238 } 239 240 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) 241 { 242 struct cpl_tid_release *req; 243 244 skb = get_skb(skb, sizeof *req, GFP_KERNEL); 245 if (!skb) 246 return; 247 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); 248 INIT_TP_WR(req, hwtid); 249 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); 250 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 251 c4iw_ofld_send(rdev, skb); 252 return; 253 } 254 255 static void set_emss(struct c4iw_ep *ep, u16 opt) 256 { 257 ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] - 258 ((AF_INET == ep->com.remote_addr.ss_family) ? 259 sizeof(struct iphdr) : sizeof(struct ipv6hdr)) - 260 sizeof(struct tcphdr); 261 ep->mss = ep->emss; 262 if (TCPOPT_TSTAMP_G(opt)) 263 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4); 264 if (ep->emss < 128) 265 ep->emss = 128; 266 if (ep->emss & 7) 267 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n", 268 TCPOPT_MSS_G(opt), ep->mss, ep->emss); 269 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt), 270 ep->mss, ep->emss); 271 } 272 273 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) 274 { 275 enum c4iw_ep_state state; 276 277 mutex_lock(&epc->mutex); 278 state = epc->state; 279 mutex_unlock(&epc->mutex); 280 return state; 281 } 282 283 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 284 { 285 epc->state = new; 286 } 287 288 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 289 { 290 mutex_lock(&epc->mutex); 291 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); 292 __state_set(epc, new); 293 mutex_unlock(&epc->mutex); 294 return; 295 } 296 297 static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size) 298 { 299 struct sk_buff *skb; 300 unsigned int i; 301 size_t len; 302 303 len = roundup(sizeof(union cpl_wr_size), 16); 304 for (i = 0; i < size; i++) { 305 skb = alloc_skb(len, GFP_KERNEL); 306 if (!skb) 307 goto fail; 308 skb_queue_tail(ep_skb_list, skb); 309 } 310 return 0; 311 fail: 312 skb_queue_purge(ep_skb_list); 313 return -ENOMEM; 314 } 315 316 static void *alloc_ep(int size, gfp_t gfp) 317 { 318 struct c4iw_ep_common *epc; 319 320 epc = kzalloc(size, gfp); 321 if (epc) { 322 kref_init(&epc->kref); 323 mutex_init(&epc->mutex); 324 c4iw_init_wr_wait(&epc->wr_wait); 325 } 326 PDBG("%s alloc ep %p\n", __func__, epc); 327 return epc; 328 } 329 330 static void remove_ep_tid(struct c4iw_ep *ep) 331 { 332 unsigned long flags; 333 334 spin_lock_irqsave(&ep->com.dev->lock, flags); 335 _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0); 336 spin_unlock_irqrestore(&ep->com.dev->lock, flags); 337 } 338 339 static void insert_ep_tid(struct c4iw_ep *ep) 340 { 341 unsigned long flags; 342 343 spin_lock_irqsave(&ep->com.dev->lock, flags); 344 _insert_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep, ep->hwtid, 0); 345 spin_unlock_irqrestore(&ep->com.dev->lock, flags); 346 } 347 348 /* 349 * Atomically lookup the ep ptr given the tid and grab a reference on the ep. 350 */ 351 static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid) 352 { 353 struct c4iw_ep *ep; 354 unsigned long flags; 355 356 spin_lock_irqsave(&dev->lock, flags); 357 ep = idr_find(&dev->hwtid_idr, tid); 358 if (ep) 359 c4iw_get_ep(&ep->com); 360 spin_unlock_irqrestore(&dev->lock, flags); 361 return ep; 362 } 363 364 /* 365 * Atomically lookup the ep ptr given the stid and grab a reference on the ep. 366 */ 367 static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev, 368 unsigned int stid) 369 { 370 struct c4iw_listen_ep *ep; 371 unsigned long flags; 372 373 spin_lock_irqsave(&dev->lock, flags); 374 ep = idr_find(&dev->stid_idr, stid); 375 if (ep) 376 c4iw_get_ep(&ep->com); 377 spin_unlock_irqrestore(&dev->lock, flags); 378 return ep; 379 } 380 381 void _c4iw_free_ep(struct kref *kref) 382 { 383 struct c4iw_ep *ep; 384 385 ep = container_of(kref, struct c4iw_ep, com.kref); 386 PDBG("%s ep %p state %s\n", __func__, ep, states[ep->com.state]); 387 if (test_bit(QP_REFERENCED, &ep->com.flags)) 388 deref_qp(ep); 389 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 390 if (ep->com.remote_addr.ss_family == AF_INET6) { 391 struct sockaddr_in6 *sin6 = 392 (struct sockaddr_in6 *) 393 &ep->com.local_addr; 394 395 cxgb4_clip_release( 396 ep->com.dev->rdev.lldi.ports[0], 397 (const u32 *)&sin6->sin6_addr.s6_addr, 398 1); 399 } 400 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 401 dst_release(ep->dst); 402 cxgb4_l2t_release(ep->l2t); 403 if (ep->mpa_skb) 404 kfree_skb(ep->mpa_skb); 405 } 406 if (!skb_queue_empty(&ep->com.ep_skb_list)) 407 skb_queue_purge(&ep->com.ep_skb_list); 408 kfree(ep); 409 } 410 411 static void release_ep_resources(struct c4iw_ep *ep) 412 { 413 set_bit(RELEASE_RESOURCES, &ep->com.flags); 414 415 /* 416 * If we have a hwtid, then remove it from the idr table 417 * so lookups will no longer find this endpoint. Otherwise 418 * we have a race where one thread finds the ep ptr just 419 * before the other thread is freeing the ep memory. 420 */ 421 if (ep->hwtid != -1) 422 remove_ep_tid(ep); 423 c4iw_put_ep(&ep->com); 424 } 425 426 static int status2errno(int status) 427 { 428 switch (status) { 429 case CPL_ERR_NONE: 430 return 0; 431 case CPL_ERR_CONN_RESET: 432 return -ECONNRESET; 433 case CPL_ERR_ARP_MISS: 434 return -EHOSTUNREACH; 435 case CPL_ERR_CONN_TIMEDOUT: 436 return -ETIMEDOUT; 437 case CPL_ERR_TCAM_FULL: 438 return -ENOMEM; 439 case CPL_ERR_CONN_EXIST: 440 return -EADDRINUSE; 441 default: 442 return -EIO; 443 } 444 } 445 446 /* 447 * Try and reuse skbs already allocated... 448 */ 449 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 450 { 451 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { 452 skb_trim(skb, 0); 453 skb_get(skb); 454 skb_reset_transport_header(skb); 455 } else { 456 skb = alloc_skb(len, gfp); 457 } 458 t4_set_arp_err_handler(skb, NULL, NULL); 459 return skb; 460 } 461 462 static struct net_device *get_real_dev(struct net_device *egress_dev) 463 { 464 return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev; 465 } 466 467 static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev) 468 { 469 int i; 470 471 egress_dev = get_real_dev(egress_dev); 472 for (i = 0; i < dev->rdev.lldi.nports; i++) 473 if (dev->rdev.lldi.ports[i] == egress_dev) 474 return 1; 475 return 0; 476 } 477 478 static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip, 479 __u8 *peer_ip, __be16 local_port, 480 __be16 peer_port, u8 tos, 481 __u32 sin6_scope_id) 482 { 483 struct dst_entry *dst = NULL; 484 485 if (IS_ENABLED(CONFIG_IPV6)) { 486 struct flowi6 fl6; 487 488 memset(&fl6, 0, sizeof(fl6)); 489 memcpy(&fl6.daddr, peer_ip, 16); 490 memcpy(&fl6.saddr, local_ip, 16); 491 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) 492 fl6.flowi6_oif = sin6_scope_id; 493 dst = ip6_route_output(&init_net, NULL, &fl6); 494 if (!dst) 495 goto out; 496 if (!our_interface(dev, ip6_dst_idev(dst)->dev) && 497 !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) { 498 dst_release(dst); 499 dst = NULL; 500 } 501 } 502 503 out: 504 return dst; 505 } 506 507 static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip, 508 __be32 peer_ip, __be16 local_port, 509 __be16 peer_port, u8 tos) 510 { 511 struct rtable *rt; 512 struct flowi4 fl4; 513 struct neighbour *n; 514 515 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, 516 peer_port, local_port, IPPROTO_TCP, 517 tos, 0); 518 if (IS_ERR(rt)) 519 return NULL; 520 n = dst_neigh_lookup(&rt->dst, &peer_ip); 521 if (!n) 522 return NULL; 523 if (!our_interface(dev, n->dev) && 524 !(n->dev->flags & IFF_LOOPBACK)) { 525 neigh_release(n); 526 dst_release(&rt->dst); 527 return NULL; 528 } 529 neigh_release(n); 530 return &rt->dst; 531 } 532 533 static void arp_failure_discard(void *handle, struct sk_buff *skb) 534 { 535 pr_err(MOD "ARP failure\n"); 536 kfree_skb(skb); 537 } 538 539 static void mpa_start_arp_failure(void *handle, struct sk_buff *skb) 540 { 541 pr_err("ARP failure during MPA Negotiation - Closing Connection\n"); 542 } 543 544 enum { 545 NUM_FAKE_CPLS = 2, 546 FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0, 547 FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1, 548 }; 549 550 static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) 551 { 552 struct c4iw_ep *ep; 553 554 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); 555 release_ep_resources(ep); 556 return 0; 557 } 558 559 static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) 560 { 561 struct c4iw_ep *ep; 562 563 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); 564 c4iw_put_ep(&ep->parent_ep->com); 565 release_ep_resources(ep); 566 return 0; 567 } 568 569 /* 570 * Fake up a special CPL opcode and call sched() so process_work() will call 571 * _put_ep_safe() in a safe context to free the ep resources. This is needed 572 * because ARP error handlers are called in an ATOMIC context, and 573 * _c4iw_free_ep() needs to block. 574 */ 575 static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb, 576 int cpl) 577 { 578 struct cpl_act_establish *rpl = cplhdr(skb); 579 580 /* Set our special ARP_FAILURE opcode */ 581 rpl->ot.opcode = cpl; 582 583 /* 584 * Save ep in the skb->cb area, after where sched() will save the dev 585 * ptr. 586 */ 587 *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep; 588 sched(ep->com.dev, skb); 589 } 590 591 /* Handle an ARP failure for an accept */ 592 static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb) 593 { 594 struct c4iw_ep *ep = handle; 595 596 pr_err(MOD "ARP failure during accept - tid %u -dropping connection\n", 597 ep->hwtid); 598 599 __state_set(&ep->com, DEAD); 600 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE); 601 } 602 603 /* 604 * Handle an ARP failure for an active open. 605 */ 606 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 607 { 608 struct c4iw_ep *ep = handle; 609 610 printk(KERN_ERR MOD "ARP failure during connect\n"); 611 connect_reply_upcall(ep, -EHOSTUNREACH); 612 __state_set(&ep->com, DEAD); 613 if (ep->com.remote_addr.ss_family == AF_INET6) { 614 struct sockaddr_in6 *sin6 = 615 (struct sockaddr_in6 *)&ep->com.local_addr; 616 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 617 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 618 } 619 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 620 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 621 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); 622 } 623 624 /* 625 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant 626 * and send it along. 627 */ 628 static void abort_arp_failure(void *handle, struct sk_buff *skb) 629 { 630 int ret; 631 struct c4iw_ep *ep = handle; 632 struct c4iw_rdev *rdev = &ep->com.dev->rdev; 633 struct cpl_abort_req *req = cplhdr(skb); 634 635 PDBG("%s rdev %p\n", __func__, rdev); 636 req->cmd = CPL_ABORT_NO_RST; 637 ret = c4iw_ofld_send(rdev, skb); 638 if (ret) { 639 __state_set(&ep->com, DEAD); 640 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); 641 } 642 } 643 644 static int send_flowc(struct c4iw_ep *ep) 645 { 646 struct fw_flowc_wr *flowc; 647 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); 648 int i; 649 u16 vlan = ep->l2t->vlan; 650 int nparams; 651 652 if (WARN_ON(!skb)) 653 return -ENOMEM; 654 655 if (vlan == CPL_L2T_VLAN_NONE) 656 nparams = 8; 657 else 658 nparams = 9; 659 660 flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN); 661 662 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | 663 FW_FLOWC_WR_NPARAMS_V(nparams)); 664 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN, 665 16)) | FW_WR_FLOWID_V(ep->hwtid)); 666 667 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 668 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V 669 (ep->com.dev->rdev.lldi.pf)); 670 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 671 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 672 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 673 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); 674 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 675 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); 676 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 677 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); 678 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 679 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); 680 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 681 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win); 682 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 683 flowc->mnemval[7].val = cpu_to_be32(ep->emss); 684 if (nparams == 9) { 685 u16 pri; 686 687 pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 688 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 689 flowc->mnemval[8].val = cpu_to_be32(pri); 690 } else { 691 /* Pad WR to 16 byte boundary */ 692 flowc->mnemval[8].mnemonic = 0; 693 flowc->mnemval[8].val = 0; 694 } 695 for (i = 0; i < 9; i++) { 696 flowc->mnemval[i].r4[0] = 0; 697 flowc->mnemval[i].r4[1] = 0; 698 flowc->mnemval[i].r4[2] = 0; 699 } 700 701 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 702 return c4iw_ofld_send(&ep->com.dev->rdev, skb); 703 } 704 705 static int send_halfclose(struct c4iw_ep *ep) 706 { 707 struct cpl_close_con_req *req; 708 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); 709 int wrlen = roundup(sizeof *req, 16); 710 711 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 712 if (WARN_ON(!skb)) 713 return -ENOMEM; 714 715 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 716 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 717 req = (struct cpl_close_con_req *) skb_put(skb, wrlen); 718 memset(req, 0, wrlen); 719 INIT_TP_WR(req, ep->hwtid); 720 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, 721 ep->hwtid)); 722 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 723 } 724 725 static int send_abort(struct c4iw_ep *ep) 726 { 727 struct cpl_abort_req *req; 728 int wrlen = roundup(sizeof *req, 16); 729 struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list); 730 731 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 732 if (WARN_ON(!req_skb)) 733 return -ENOMEM; 734 735 set_wr_txq(req_skb, CPL_PRIORITY_DATA, ep->txq_idx); 736 t4_set_arp_err_handler(req_skb, ep, abort_arp_failure); 737 req = (struct cpl_abort_req *)skb_put(req_skb, wrlen); 738 memset(req, 0, wrlen); 739 INIT_TP_WR(req, ep->hwtid); 740 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); 741 req->cmd = CPL_ABORT_SEND_RST; 742 return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t); 743 } 744 745 static void best_mtu(const unsigned short *mtus, unsigned short mtu, 746 unsigned int *idx, int use_ts, int ipv6) 747 { 748 unsigned short hdr_size = (ipv6 ? 749 sizeof(struct ipv6hdr) : 750 sizeof(struct iphdr)) + 751 sizeof(struct tcphdr) + 752 (use_ts ? 753 round_up(TCPOLEN_TIMESTAMP, 4) : 0); 754 unsigned short data_size = mtu - hdr_size; 755 756 cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx); 757 } 758 759 static int send_connect(struct c4iw_ep *ep) 760 { 761 struct cpl_act_open_req *req = NULL; 762 struct cpl_t5_act_open_req *t5req = NULL; 763 struct cpl_t6_act_open_req *t6req = NULL; 764 struct cpl_act_open_req6 *req6 = NULL; 765 struct cpl_t5_act_open_req6 *t5req6 = NULL; 766 struct cpl_t6_act_open_req6 *t6req6 = NULL; 767 struct sk_buff *skb; 768 u64 opt0; 769 u32 opt2; 770 unsigned int mtu_idx; 771 int wscale; 772 int win, sizev4, sizev6, wrlen; 773 struct sockaddr_in *la = (struct sockaddr_in *) 774 &ep->com.local_addr; 775 struct sockaddr_in *ra = (struct sockaddr_in *) 776 &ep->com.remote_addr; 777 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *) 778 &ep->com.local_addr; 779 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *) 780 &ep->com.remote_addr; 781 int ret; 782 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; 783 u32 isn = (prandom_u32() & ~7UL) - 1; 784 785 switch (CHELSIO_CHIP_VERSION(adapter_type)) { 786 case CHELSIO_T4: 787 sizev4 = sizeof(struct cpl_act_open_req); 788 sizev6 = sizeof(struct cpl_act_open_req6); 789 break; 790 case CHELSIO_T5: 791 sizev4 = sizeof(struct cpl_t5_act_open_req); 792 sizev6 = sizeof(struct cpl_t5_act_open_req6); 793 break; 794 case CHELSIO_T6: 795 sizev4 = sizeof(struct cpl_t6_act_open_req); 796 sizev6 = sizeof(struct cpl_t6_act_open_req6); 797 break; 798 default: 799 pr_err("T%d Chip is not supported\n", 800 CHELSIO_CHIP_VERSION(adapter_type)); 801 return -EINVAL; 802 } 803 804 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? 805 roundup(sizev4, 16) : 806 roundup(sizev6, 16); 807 808 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); 809 810 skb = get_skb(NULL, wrlen, GFP_KERNEL); 811 if (!skb) { 812 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 813 __func__); 814 return -ENOMEM; 815 } 816 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 817 818 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 819 enable_tcp_timestamps, 820 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); 821 wscale = compute_wscale(rcv_win); 822 823 /* 824 * Specify the largest window that will fit in opt0. The 825 * remainder will be specified in the rx_data_ack. 826 */ 827 win = ep->rcv_win >> 10; 828 if (win > RCV_BUFSIZ_M) 829 win = RCV_BUFSIZ_M; 830 831 opt0 = (nocong ? NO_CONG_F : 0) | 832 KEEP_ALIVE_F | 833 DELACK_F | 834 WND_SCALE_V(wscale) | 835 MSS_IDX_V(mtu_idx) | 836 L2T_IDX_V(ep->l2t->idx) | 837 TX_CHAN_V(ep->tx_chan) | 838 SMAC_SEL_V(ep->smac_idx) | 839 DSCP_V(ep->tos >> 2) | 840 ULP_MODE_V(ULP_MODE_TCPDDP) | 841 RCV_BUFSIZ_V(win); 842 opt2 = RX_CHANNEL_V(0) | 843 CCTRL_ECN_V(enable_ecn) | 844 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); 845 if (enable_tcp_timestamps) 846 opt2 |= TSTAMPS_EN_F; 847 if (enable_tcp_sack) 848 opt2 |= SACK_EN_F; 849 if (wscale && enable_tcp_window_scaling) 850 opt2 |= WND_SCALE_EN_F; 851 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) { 852 if (peer2peer) 853 isn += 4; 854 855 opt2 |= T5_OPT_2_VALID_F; 856 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); 857 opt2 |= T5_ISS_F; 858 } 859 860 if (ep->com.remote_addr.ss_family == AF_INET6) 861 cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], 862 (const u32 *)&la6->sin6_addr.s6_addr, 1); 863 864 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); 865 866 if (ep->com.remote_addr.ss_family == AF_INET) { 867 switch (CHELSIO_CHIP_VERSION(adapter_type)) { 868 case CHELSIO_T4: 869 req = (struct cpl_act_open_req *)skb_put(skb, wrlen); 870 INIT_TP_WR(req, 0); 871 break; 872 case CHELSIO_T5: 873 t5req = (struct cpl_t5_act_open_req *)skb_put(skb, 874 wrlen); 875 INIT_TP_WR(t5req, 0); 876 req = (struct cpl_act_open_req *)t5req; 877 break; 878 case CHELSIO_T6: 879 t6req = (struct cpl_t6_act_open_req *)skb_put(skb, 880 wrlen); 881 INIT_TP_WR(t6req, 0); 882 req = (struct cpl_act_open_req *)t6req; 883 t5req = (struct cpl_t5_act_open_req *)t6req; 884 break; 885 default: 886 pr_err("T%d Chip is not supported\n", 887 CHELSIO_CHIP_VERSION(adapter_type)); 888 ret = -EINVAL; 889 goto clip_release; 890 } 891 892 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 893 ((ep->rss_qid<<14) | ep->atid))); 894 req->local_port = la->sin_port; 895 req->peer_port = ra->sin_port; 896 req->local_ip = la->sin_addr.s_addr; 897 req->peer_ip = ra->sin_addr.s_addr; 898 req->opt0 = cpu_to_be64(opt0); 899 900 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 901 req->params = cpu_to_be32(cxgb4_select_ntuple( 902 ep->com.dev->rdev.lldi.ports[0], 903 ep->l2t)); 904 req->opt2 = cpu_to_be32(opt2); 905 } else { 906 t5req->params = cpu_to_be64(FILTER_TUPLE_V( 907 cxgb4_select_ntuple( 908 ep->com.dev->rdev.lldi.ports[0], 909 ep->l2t))); 910 t5req->rsvd = cpu_to_be32(isn); 911 PDBG("%s snd_isn %u\n", __func__, t5req->rsvd); 912 t5req->opt2 = cpu_to_be32(opt2); 913 } 914 } else { 915 switch (CHELSIO_CHIP_VERSION(adapter_type)) { 916 case CHELSIO_T4: 917 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); 918 INIT_TP_WR(req6, 0); 919 break; 920 case CHELSIO_T5: 921 t5req6 = (struct cpl_t5_act_open_req6 *)skb_put(skb, 922 wrlen); 923 INIT_TP_WR(t5req6, 0); 924 req6 = (struct cpl_act_open_req6 *)t5req6; 925 break; 926 case CHELSIO_T6: 927 t6req6 = (struct cpl_t6_act_open_req6 *)skb_put(skb, 928 wrlen); 929 INIT_TP_WR(t6req6, 0); 930 req6 = (struct cpl_act_open_req6 *)t6req6; 931 t5req6 = (struct cpl_t5_act_open_req6 *)t6req6; 932 break; 933 default: 934 pr_err("T%d Chip is not supported\n", 935 CHELSIO_CHIP_VERSION(adapter_type)); 936 ret = -EINVAL; 937 goto clip_release; 938 } 939 940 OPCODE_TID(req6) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 941 ((ep->rss_qid<<14)|ep->atid))); 942 req6->local_port = la6->sin6_port; 943 req6->peer_port = ra6->sin6_port; 944 req6->local_ip_hi = *((__be64 *)(la6->sin6_addr.s6_addr)); 945 req6->local_ip_lo = *((__be64 *)(la6->sin6_addr.s6_addr + 8)); 946 req6->peer_ip_hi = *((__be64 *)(ra6->sin6_addr.s6_addr)); 947 req6->peer_ip_lo = *((__be64 *)(ra6->sin6_addr.s6_addr + 8)); 948 req6->opt0 = cpu_to_be64(opt0); 949 950 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 951 req6->params = cpu_to_be32(cxgb4_select_ntuple( 952 ep->com.dev->rdev.lldi.ports[0], 953 ep->l2t)); 954 req6->opt2 = cpu_to_be32(opt2); 955 } else { 956 t5req6->params = cpu_to_be64(FILTER_TUPLE_V( 957 cxgb4_select_ntuple( 958 ep->com.dev->rdev.lldi.ports[0], 959 ep->l2t))); 960 t5req6->rsvd = cpu_to_be32(isn); 961 PDBG("%s snd_isn %u\n", __func__, t5req6->rsvd); 962 t5req6->opt2 = cpu_to_be32(opt2); 963 } 964 } 965 966 set_bit(ACT_OPEN_REQ, &ep->com.history); 967 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 968 clip_release: 969 if (ret && ep->com.remote_addr.ss_family == AF_INET6) 970 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 971 (const u32 *)&la6->sin6_addr.s6_addr, 1); 972 return ret; 973 } 974 975 static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, 976 u8 mpa_rev_to_use) 977 { 978 int mpalen, wrlen, ret; 979 struct fw_ofld_tx_data_wr *req; 980 struct mpa_message *mpa; 981 struct mpa_v2_conn_params mpa_v2_params; 982 983 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 984 985 BUG_ON(skb_cloned(skb)); 986 987 mpalen = sizeof(*mpa) + ep->plen; 988 if (mpa_rev_to_use == 2) 989 mpalen += sizeof(struct mpa_v2_conn_params); 990 wrlen = roundup(mpalen + sizeof *req, 16); 991 skb = get_skb(skb, wrlen, GFP_KERNEL); 992 if (!skb) { 993 connect_reply_upcall(ep, -ENOMEM); 994 return -ENOMEM; 995 } 996 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 997 998 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 999 memset(req, 0, wrlen); 1000 req->op_to_immdlen = cpu_to_be32( 1001 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 1002 FW_WR_COMPL_F | 1003 FW_WR_IMMDLEN_V(mpalen)); 1004 req->flowid_len16 = cpu_to_be32( 1005 FW_WR_FLOWID_V(ep->hwtid) | 1006 FW_WR_LEN16_V(wrlen >> 4)); 1007 req->plen = cpu_to_be32(mpalen); 1008 req->tunnel_to_proxy = cpu_to_be32( 1009 FW_OFLD_TX_DATA_WR_FLUSH_F | 1010 FW_OFLD_TX_DATA_WR_SHOVE_F); 1011 1012 mpa = (struct mpa_message *)(req + 1); 1013 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 1014 1015 mpa->flags = 0; 1016 if (crc_enabled) 1017 mpa->flags |= MPA_CRC; 1018 if (markers_enabled) { 1019 mpa->flags |= MPA_MARKERS; 1020 ep->mpa_attr.recv_marker_enabled = 1; 1021 } else { 1022 ep->mpa_attr.recv_marker_enabled = 0; 1023 } 1024 if (mpa_rev_to_use == 2) 1025 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1026 1027 mpa->private_data_size = htons(ep->plen); 1028 mpa->revision = mpa_rev_to_use; 1029 if (mpa_rev_to_use == 1) { 1030 ep->tried_with_mpa_v1 = 1; 1031 ep->retry_with_mpa_v1 = 0; 1032 } 1033 1034 if (mpa_rev_to_use == 2) { 1035 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 1036 sizeof (struct mpa_v2_conn_params)); 1037 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, 1038 ep->ord); 1039 mpa_v2_params.ird = htons((u16)ep->ird); 1040 mpa_v2_params.ord = htons((u16)ep->ord); 1041 1042 if (peer2peer) { 1043 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1044 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 1045 mpa_v2_params.ord |= 1046 htons(MPA_V2_RDMA_WRITE_RTR); 1047 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 1048 mpa_v2_params.ord |= 1049 htons(MPA_V2_RDMA_READ_RTR); 1050 } 1051 memcpy(mpa->private_data, &mpa_v2_params, 1052 sizeof(struct mpa_v2_conn_params)); 1053 1054 if (ep->plen) 1055 memcpy(mpa->private_data + 1056 sizeof(struct mpa_v2_conn_params), 1057 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1058 } else 1059 if (ep->plen) 1060 memcpy(mpa->private_data, 1061 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1062 1063 /* 1064 * Reference the mpa skb. This ensures the data area 1065 * will remain in memory until the hw acks the tx. 1066 * Function fw4_ack() will deref it. 1067 */ 1068 skb_get(skb); 1069 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 1070 BUG_ON(ep->mpa_skb); 1071 ep->mpa_skb = skb; 1072 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1073 if (ret) 1074 return ret; 1075 start_ep_timer(ep); 1076 __state_set(&ep->com, MPA_REQ_SENT); 1077 ep->mpa_attr.initiator = 1; 1078 ep->snd_seq += mpalen; 1079 return ret; 1080 } 1081 1082 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 1083 { 1084 int mpalen, wrlen; 1085 struct fw_ofld_tx_data_wr *req; 1086 struct mpa_message *mpa; 1087 struct sk_buff *skb; 1088 struct mpa_v2_conn_params mpa_v2_params; 1089 1090 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 1091 1092 mpalen = sizeof(*mpa) + plen; 1093 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 1094 mpalen += sizeof(struct mpa_v2_conn_params); 1095 wrlen = roundup(mpalen + sizeof *req, 16); 1096 1097 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1098 if (!skb) { 1099 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 1100 return -ENOMEM; 1101 } 1102 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1103 1104 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 1105 memset(req, 0, wrlen); 1106 req->op_to_immdlen = cpu_to_be32( 1107 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 1108 FW_WR_COMPL_F | 1109 FW_WR_IMMDLEN_V(mpalen)); 1110 req->flowid_len16 = cpu_to_be32( 1111 FW_WR_FLOWID_V(ep->hwtid) | 1112 FW_WR_LEN16_V(wrlen >> 4)); 1113 req->plen = cpu_to_be32(mpalen); 1114 req->tunnel_to_proxy = cpu_to_be32( 1115 FW_OFLD_TX_DATA_WR_FLUSH_F | 1116 FW_OFLD_TX_DATA_WR_SHOVE_F); 1117 1118 mpa = (struct mpa_message *)(req + 1); 1119 memset(mpa, 0, sizeof(*mpa)); 1120 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1121 mpa->flags = MPA_REJECT; 1122 mpa->revision = ep->mpa_attr.version; 1123 mpa->private_data_size = htons(plen); 1124 1125 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1126 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1127 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 1128 sizeof (struct mpa_v2_conn_params)); 1129 mpa_v2_params.ird = htons(((u16)ep->ird) | 1130 (peer2peer ? MPA_V2_PEER2PEER_MODEL : 1131 0)); 1132 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 1133 (p2p_type == 1134 FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 1135 MPA_V2_RDMA_WRITE_RTR : p2p_type == 1136 FW_RI_INIT_P2PTYPE_READ_REQ ? 1137 MPA_V2_RDMA_READ_RTR : 0) : 0)); 1138 memcpy(mpa->private_data, &mpa_v2_params, 1139 sizeof(struct mpa_v2_conn_params)); 1140 1141 if (ep->plen) 1142 memcpy(mpa->private_data + 1143 sizeof(struct mpa_v2_conn_params), pdata, plen); 1144 } else 1145 if (plen) 1146 memcpy(mpa->private_data, pdata, plen); 1147 1148 /* 1149 * Reference the mpa skb again. This ensures the data area 1150 * will remain in memory until the hw acks the tx. 1151 * Function fw4_ack() will deref it. 1152 */ 1153 skb_get(skb); 1154 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1155 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure); 1156 BUG_ON(ep->mpa_skb); 1157 ep->mpa_skb = skb; 1158 ep->snd_seq += mpalen; 1159 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1160 } 1161 1162 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 1163 { 1164 int mpalen, wrlen; 1165 struct fw_ofld_tx_data_wr *req; 1166 struct mpa_message *mpa; 1167 struct sk_buff *skb; 1168 struct mpa_v2_conn_params mpa_v2_params; 1169 1170 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 1171 1172 mpalen = sizeof(*mpa) + plen; 1173 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 1174 mpalen += sizeof(struct mpa_v2_conn_params); 1175 wrlen = roundup(mpalen + sizeof *req, 16); 1176 1177 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1178 if (!skb) { 1179 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 1180 return -ENOMEM; 1181 } 1182 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1183 1184 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); 1185 memset(req, 0, wrlen); 1186 req->op_to_immdlen = cpu_to_be32( 1187 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 1188 FW_WR_COMPL_F | 1189 FW_WR_IMMDLEN_V(mpalen)); 1190 req->flowid_len16 = cpu_to_be32( 1191 FW_WR_FLOWID_V(ep->hwtid) | 1192 FW_WR_LEN16_V(wrlen >> 4)); 1193 req->plen = cpu_to_be32(mpalen); 1194 req->tunnel_to_proxy = cpu_to_be32( 1195 FW_OFLD_TX_DATA_WR_FLUSH_F | 1196 FW_OFLD_TX_DATA_WR_SHOVE_F); 1197 1198 mpa = (struct mpa_message *)(req + 1); 1199 memset(mpa, 0, sizeof(*mpa)); 1200 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1201 mpa->flags = 0; 1202 if (ep->mpa_attr.crc_enabled) 1203 mpa->flags |= MPA_CRC; 1204 if (ep->mpa_attr.recv_marker_enabled) 1205 mpa->flags |= MPA_MARKERS; 1206 mpa->revision = ep->mpa_attr.version; 1207 mpa->private_data_size = htons(plen); 1208 1209 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1210 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1211 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 1212 sizeof (struct mpa_v2_conn_params)); 1213 mpa_v2_params.ird = htons((u16)ep->ird); 1214 mpa_v2_params.ord = htons((u16)ep->ord); 1215 if (peer2peer && (ep->mpa_attr.p2p_type != 1216 FW_RI_INIT_P2PTYPE_DISABLED)) { 1217 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1218 1219 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 1220 mpa_v2_params.ord |= 1221 htons(MPA_V2_RDMA_WRITE_RTR); 1222 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 1223 mpa_v2_params.ord |= 1224 htons(MPA_V2_RDMA_READ_RTR); 1225 } 1226 1227 memcpy(mpa->private_data, &mpa_v2_params, 1228 sizeof(struct mpa_v2_conn_params)); 1229 1230 if (ep->plen) 1231 memcpy(mpa->private_data + 1232 sizeof(struct mpa_v2_conn_params), pdata, plen); 1233 } else 1234 if (plen) 1235 memcpy(mpa->private_data, pdata, plen); 1236 1237 /* 1238 * Reference the mpa skb. This ensures the data area 1239 * will remain in memory until the hw acks the tx. 1240 * Function fw4_ack() will deref it. 1241 */ 1242 skb_get(skb); 1243 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure); 1244 ep->mpa_skb = skb; 1245 __state_set(&ep->com, MPA_REP_SENT); 1246 ep->snd_seq += mpalen; 1247 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1248 } 1249 1250 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) 1251 { 1252 struct c4iw_ep *ep; 1253 struct cpl_act_establish *req = cplhdr(skb); 1254 unsigned int tid = GET_TID(req); 1255 unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); 1256 struct tid_info *t = dev->rdev.lldi.tids; 1257 int ret; 1258 1259 ep = lookup_atid(t, atid); 1260 1261 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, 1262 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); 1263 1264 mutex_lock(&ep->com.mutex); 1265 dst_confirm(ep->dst); 1266 1267 /* setup the hwtid for this connection */ 1268 ep->hwtid = tid; 1269 cxgb4_insert_tid(t, ep, tid); 1270 insert_ep_tid(ep); 1271 1272 ep->snd_seq = be32_to_cpu(req->snd_isn); 1273 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 1274 1275 set_emss(ep, ntohs(req->tcp_opt)); 1276 1277 /* dealloc the atid */ 1278 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 1279 cxgb4_free_atid(t, atid); 1280 set_bit(ACT_ESTAB, &ep->com.history); 1281 1282 /* start MPA negotiation */ 1283 ret = send_flowc(ep); 1284 if (ret) 1285 goto err; 1286 if (ep->retry_with_mpa_v1) 1287 ret = send_mpa_req(ep, skb, 1); 1288 else 1289 ret = send_mpa_req(ep, skb, mpa_rev); 1290 if (ret) 1291 goto err; 1292 mutex_unlock(&ep->com.mutex); 1293 return 0; 1294 err: 1295 mutex_unlock(&ep->com.mutex); 1296 connect_reply_upcall(ep, -ENOMEM); 1297 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1298 return 0; 1299 } 1300 1301 static void close_complete_upcall(struct c4iw_ep *ep, int status) 1302 { 1303 struct iw_cm_event event; 1304 1305 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1306 memset(&event, 0, sizeof(event)); 1307 event.event = IW_CM_EVENT_CLOSE; 1308 event.status = status; 1309 if (ep->com.cm_id) { 1310 PDBG("close complete delivered ep %p cm_id %p tid %u\n", 1311 ep, ep->com.cm_id, ep->hwtid); 1312 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1313 deref_cm_id(&ep->com); 1314 set_bit(CLOSE_UPCALL, &ep->com.history); 1315 } 1316 } 1317 1318 static void peer_close_upcall(struct c4iw_ep *ep) 1319 { 1320 struct iw_cm_event event; 1321 1322 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1323 memset(&event, 0, sizeof(event)); 1324 event.event = IW_CM_EVENT_DISCONNECT; 1325 if (ep->com.cm_id) { 1326 PDBG("peer close delivered ep %p cm_id %p tid %u\n", 1327 ep, ep->com.cm_id, ep->hwtid); 1328 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1329 set_bit(DISCONN_UPCALL, &ep->com.history); 1330 } 1331 } 1332 1333 static void peer_abort_upcall(struct c4iw_ep *ep) 1334 { 1335 struct iw_cm_event event; 1336 1337 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1338 memset(&event, 0, sizeof(event)); 1339 event.event = IW_CM_EVENT_CLOSE; 1340 event.status = -ECONNRESET; 1341 if (ep->com.cm_id) { 1342 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, 1343 ep->com.cm_id, ep->hwtid); 1344 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1345 deref_cm_id(&ep->com); 1346 set_bit(ABORT_UPCALL, &ep->com.history); 1347 } 1348 } 1349 1350 static void connect_reply_upcall(struct c4iw_ep *ep, int status) 1351 { 1352 struct iw_cm_event event; 1353 1354 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); 1355 memset(&event, 0, sizeof(event)); 1356 event.event = IW_CM_EVENT_CONNECT_REPLY; 1357 event.status = status; 1358 memcpy(&event.local_addr, &ep->com.local_addr, 1359 sizeof(ep->com.local_addr)); 1360 memcpy(&event.remote_addr, &ep->com.remote_addr, 1361 sizeof(ep->com.remote_addr)); 1362 1363 if ((status == 0) || (status == -ECONNREFUSED)) { 1364 if (!ep->tried_with_mpa_v1) { 1365 /* this means MPA_v2 is used */ 1366 event.ord = ep->ird; 1367 event.ird = ep->ord; 1368 event.private_data_len = ep->plen - 1369 sizeof(struct mpa_v2_conn_params); 1370 event.private_data = ep->mpa_pkt + 1371 sizeof(struct mpa_message) + 1372 sizeof(struct mpa_v2_conn_params); 1373 } else { 1374 /* this means MPA_v1 is used */ 1375 event.ord = cur_max_read_depth(ep->com.dev); 1376 event.ird = cur_max_read_depth(ep->com.dev); 1377 event.private_data_len = ep->plen; 1378 event.private_data = ep->mpa_pkt + 1379 sizeof(struct mpa_message); 1380 } 1381 } 1382 1383 PDBG("%s ep %p tid %u status %d\n", __func__, ep, 1384 ep->hwtid, status); 1385 set_bit(CONN_RPL_UPCALL, &ep->com.history); 1386 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1387 1388 if (status < 0) 1389 deref_cm_id(&ep->com); 1390 } 1391 1392 static int connect_request_upcall(struct c4iw_ep *ep) 1393 { 1394 struct iw_cm_event event; 1395 int ret; 1396 1397 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1398 memset(&event, 0, sizeof(event)); 1399 event.event = IW_CM_EVENT_CONNECT_REQUEST; 1400 memcpy(&event.local_addr, &ep->com.local_addr, 1401 sizeof(ep->com.local_addr)); 1402 memcpy(&event.remote_addr, &ep->com.remote_addr, 1403 sizeof(ep->com.remote_addr)); 1404 event.provider_data = ep; 1405 if (!ep->tried_with_mpa_v1) { 1406 /* this means MPA_v2 is used */ 1407 event.ord = ep->ord; 1408 event.ird = ep->ird; 1409 event.private_data_len = ep->plen - 1410 sizeof(struct mpa_v2_conn_params); 1411 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 1412 sizeof(struct mpa_v2_conn_params); 1413 } else { 1414 /* this means MPA_v1 is used. Send max supported */ 1415 event.ord = cur_max_read_depth(ep->com.dev); 1416 event.ird = cur_max_read_depth(ep->com.dev); 1417 event.private_data_len = ep->plen; 1418 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1419 } 1420 c4iw_get_ep(&ep->com); 1421 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, 1422 &event); 1423 if (ret) 1424 c4iw_put_ep(&ep->com); 1425 set_bit(CONNREQ_UPCALL, &ep->com.history); 1426 c4iw_put_ep(&ep->parent_ep->com); 1427 return ret; 1428 } 1429 1430 static void established_upcall(struct c4iw_ep *ep) 1431 { 1432 struct iw_cm_event event; 1433 1434 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1435 memset(&event, 0, sizeof(event)); 1436 event.event = IW_CM_EVENT_ESTABLISHED; 1437 event.ird = ep->ord; 1438 event.ord = ep->ird; 1439 if (ep->com.cm_id) { 1440 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1441 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1442 set_bit(ESTAB_UPCALL, &ep->com.history); 1443 } 1444 } 1445 1446 static int update_rx_credits(struct c4iw_ep *ep, u32 credits) 1447 { 1448 struct cpl_rx_data_ack *req; 1449 struct sk_buff *skb; 1450 int wrlen = roundup(sizeof *req, 16); 1451 1452 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 1453 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1454 if (!skb) { 1455 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); 1456 return 0; 1457 } 1458 1459 /* 1460 * If we couldn't specify the entire rcv window at connection setup 1461 * due to the limit in the number of bits in the RCV_BUFSIZ field, 1462 * then add the overage in to the credits returned. 1463 */ 1464 if (ep->rcv_win > RCV_BUFSIZ_M * 1024) 1465 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024; 1466 1467 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); 1468 memset(req, 0, wrlen); 1469 INIT_TP_WR(req, ep->hwtid); 1470 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 1471 ep->hwtid)); 1472 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F | 1473 RX_DACK_CHANGE_F | 1474 RX_DACK_MODE_V(dack_mode)); 1475 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); 1476 c4iw_ofld_send(&ep->com.dev->rdev, skb); 1477 return credits; 1478 } 1479 1480 #define RELAXED_IRD_NEGOTIATION 1 1481 1482 /* 1483 * process_mpa_reply - process streaming mode MPA reply 1484 * 1485 * Returns: 1486 * 1487 * 0 upon success indicating a connect request was delivered to the ULP 1488 * or the mpa request is incomplete but valid so far. 1489 * 1490 * 1 if a failure requires the caller to close the connection. 1491 * 1492 * 2 if a failure requires the caller to abort the connection. 1493 */ 1494 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1495 { 1496 struct mpa_message *mpa; 1497 struct mpa_v2_conn_params *mpa_v2_params; 1498 u16 plen; 1499 u16 resp_ird, resp_ord; 1500 u8 rtr_mismatch = 0, insuff_ird = 0; 1501 struct c4iw_qp_attributes attrs; 1502 enum c4iw_qp_attr_mask mask; 1503 int err; 1504 int disconnect = 0; 1505 1506 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1507 1508 /* 1509 * If we get more than the supported amount of private data 1510 * then we must fail this connection. 1511 */ 1512 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1513 err = -EINVAL; 1514 goto err_stop_timer; 1515 } 1516 1517 /* 1518 * copy the new data into our accumulation buffer. 1519 */ 1520 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1521 skb->len); 1522 ep->mpa_pkt_len += skb->len; 1523 1524 /* 1525 * if we don't even have the mpa message, then bail. 1526 */ 1527 if (ep->mpa_pkt_len < sizeof(*mpa)) 1528 return 0; 1529 mpa = (struct mpa_message *) ep->mpa_pkt; 1530 1531 /* Validate MPA header. */ 1532 if (mpa->revision > mpa_rev) { 1533 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1534 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1535 err = -EPROTO; 1536 goto err_stop_timer; 1537 } 1538 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1539 err = -EPROTO; 1540 goto err_stop_timer; 1541 } 1542 1543 plen = ntohs(mpa->private_data_size); 1544 1545 /* 1546 * Fail if there's too much private data. 1547 */ 1548 if (plen > MPA_MAX_PRIVATE_DATA) { 1549 err = -EPROTO; 1550 goto err_stop_timer; 1551 } 1552 1553 /* 1554 * If plen does not account for pkt size 1555 */ 1556 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1557 err = -EPROTO; 1558 goto err_stop_timer; 1559 } 1560 1561 ep->plen = (u8) plen; 1562 1563 /* 1564 * If we don't have all the pdata yet, then bail. 1565 * We'll continue process when more data arrives. 1566 */ 1567 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1568 return 0; 1569 1570 if (mpa->flags & MPA_REJECT) { 1571 err = -ECONNREFUSED; 1572 goto err_stop_timer; 1573 } 1574 1575 /* 1576 * Stop mpa timer. If it expired, then 1577 * we ignore the MPA reply. process_timeout() 1578 * will abort the connection. 1579 */ 1580 if (stop_ep_timer(ep)) 1581 return 0; 1582 1583 /* 1584 * If we get here we have accumulated the entire mpa 1585 * start reply message including private data. And 1586 * the MPA header is valid. 1587 */ 1588 __state_set(&ep->com, FPDU_MODE); 1589 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1590 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1591 ep->mpa_attr.version = mpa->revision; 1592 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1593 1594 if (mpa->revision == 2) { 1595 ep->mpa_attr.enhanced_rdma_conn = 1596 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1597 if (ep->mpa_attr.enhanced_rdma_conn) { 1598 mpa_v2_params = (struct mpa_v2_conn_params *) 1599 (ep->mpa_pkt + sizeof(*mpa)); 1600 resp_ird = ntohs(mpa_v2_params->ird) & 1601 MPA_V2_IRD_ORD_MASK; 1602 resp_ord = ntohs(mpa_v2_params->ord) & 1603 MPA_V2_IRD_ORD_MASK; 1604 PDBG("%s responder ird %u ord %u ep ird %u ord %u\n", 1605 __func__, resp_ird, resp_ord, ep->ird, ep->ord); 1606 1607 /* 1608 * This is a double-check. Ideally, below checks are 1609 * not required since ird/ord stuff has been taken 1610 * care of in c4iw_accept_cr 1611 */ 1612 if (ep->ird < resp_ord) { 1613 if (RELAXED_IRD_NEGOTIATION && resp_ord <= 1614 ep->com.dev->rdev.lldi.max_ordird_qp) 1615 ep->ird = resp_ord; 1616 else 1617 insuff_ird = 1; 1618 } else if (ep->ird > resp_ord) { 1619 ep->ird = resp_ord; 1620 } 1621 if (ep->ord > resp_ird) { 1622 if (RELAXED_IRD_NEGOTIATION) 1623 ep->ord = resp_ird; 1624 else 1625 insuff_ird = 1; 1626 } 1627 if (insuff_ird) { 1628 err = -ENOMEM; 1629 ep->ird = resp_ord; 1630 ep->ord = resp_ird; 1631 } 1632 1633 if (ntohs(mpa_v2_params->ird) & 1634 MPA_V2_PEER2PEER_MODEL) { 1635 if (ntohs(mpa_v2_params->ord) & 1636 MPA_V2_RDMA_WRITE_RTR) 1637 ep->mpa_attr.p2p_type = 1638 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1639 else if (ntohs(mpa_v2_params->ord) & 1640 MPA_V2_RDMA_READ_RTR) 1641 ep->mpa_attr.p2p_type = 1642 FW_RI_INIT_P2PTYPE_READ_REQ; 1643 } 1644 } 1645 } else if (mpa->revision == 1) 1646 if (peer2peer) 1647 ep->mpa_attr.p2p_type = p2p_type; 1648 1649 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1650 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = " 1651 "%d\n", __func__, ep->mpa_attr.crc_enabled, 1652 ep->mpa_attr.recv_marker_enabled, 1653 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1654 ep->mpa_attr.p2p_type, p2p_type); 1655 1656 /* 1657 * If responder's RTR does not match with that of initiator, assign 1658 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 1659 * generated when moving QP to RTS state. 1660 * A TERM message will be sent after QP has moved to RTS state 1661 */ 1662 if ((ep->mpa_attr.version == 2) && peer2peer && 1663 (ep->mpa_attr.p2p_type != p2p_type)) { 1664 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1665 rtr_mismatch = 1; 1666 } 1667 1668 attrs.mpa_attr = ep->mpa_attr; 1669 attrs.max_ird = ep->ird; 1670 attrs.max_ord = ep->ord; 1671 attrs.llp_stream_handle = ep; 1672 attrs.next_state = C4IW_QP_STATE_RTS; 1673 1674 mask = C4IW_QP_ATTR_NEXT_STATE | 1675 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 1676 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 1677 1678 /* bind QP and TID with INIT_WR */ 1679 err = c4iw_modify_qp(ep->com.qp->rhp, 1680 ep->com.qp, mask, &attrs, 1); 1681 if (err) 1682 goto err; 1683 1684 /* 1685 * If responder's RTR requirement did not match with what initiator 1686 * supports, generate TERM message 1687 */ 1688 if (rtr_mismatch) { 1689 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); 1690 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1691 attrs.ecode = MPA_NOMATCH_RTR; 1692 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1693 attrs.send_term = 1; 1694 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1695 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1696 err = -ENOMEM; 1697 disconnect = 1; 1698 goto out; 1699 } 1700 1701 /* 1702 * Generate TERM if initiator IRD is not sufficient for responder 1703 * provided ORD. Currently, we do the same behaviour even when 1704 * responder provided IRD is also not sufficient as regards to 1705 * initiator ORD. 1706 */ 1707 if (insuff_ird) { 1708 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", 1709 __func__); 1710 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1711 attrs.ecode = MPA_INSUFF_IRD; 1712 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1713 attrs.send_term = 1; 1714 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1715 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1716 err = -ENOMEM; 1717 disconnect = 1; 1718 goto out; 1719 } 1720 goto out; 1721 err_stop_timer: 1722 stop_ep_timer(ep); 1723 err: 1724 disconnect = 2; 1725 out: 1726 connect_reply_upcall(ep, err); 1727 return disconnect; 1728 } 1729 1730 /* 1731 * process_mpa_request - process streaming mode MPA request 1732 * 1733 * Returns: 1734 * 1735 * 0 upon success indicating a connect request was delivered to the ULP 1736 * or the mpa request is incomplete but valid so far. 1737 * 1738 * 1 if a failure requires the caller to close the connection. 1739 * 1740 * 2 if a failure requires the caller to abort the connection. 1741 */ 1742 static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) 1743 { 1744 struct mpa_message *mpa; 1745 struct mpa_v2_conn_params *mpa_v2_params; 1746 u16 plen; 1747 1748 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1749 1750 /* 1751 * If we get more than the supported amount of private data 1752 * then we must fail this connection. 1753 */ 1754 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) 1755 goto err_stop_timer; 1756 1757 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1758 1759 /* 1760 * Copy the new data into our accumulation buffer. 1761 */ 1762 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1763 skb->len); 1764 ep->mpa_pkt_len += skb->len; 1765 1766 /* 1767 * If we don't even have the mpa message, then bail. 1768 * We'll continue process when more data arrives. 1769 */ 1770 if (ep->mpa_pkt_len < sizeof(*mpa)) 1771 return 0; 1772 1773 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1774 mpa = (struct mpa_message *) ep->mpa_pkt; 1775 1776 /* 1777 * Validate MPA Header. 1778 */ 1779 if (mpa->revision > mpa_rev) { 1780 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1781 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1782 goto err_stop_timer; 1783 } 1784 1785 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) 1786 goto err_stop_timer; 1787 1788 plen = ntohs(mpa->private_data_size); 1789 1790 /* 1791 * Fail if there's too much private data. 1792 */ 1793 if (plen > MPA_MAX_PRIVATE_DATA) 1794 goto err_stop_timer; 1795 1796 /* 1797 * If plen does not account for pkt size 1798 */ 1799 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) 1800 goto err_stop_timer; 1801 ep->plen = (u8) plen; 1802 1803 /* 1804 * If we don't have all the pdata yet, then bail. 1805 */ 1806 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1807 return 0; 1808 1809 /* 1810 * If we get here we have accumulated the entire mpa 1811 * start reply message including private data. 1812 */ 1813 ep->mpa_attr.initiator = 0; 1814 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1815 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1816 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1817 ep->mpa_attr.version = mpa->revision; 1818 if (mpa->revision == 1) 1819 ep->tried_with_mpa_v1 = 1; 1820 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1821 1822 if (mpa->revision == 2) { 1823 ep->mpa_attr.enhanced_rdma_conn = 1824 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1825 if (ep->mpa_attr.enhanced_rdma_conn) { 1826 mpa_v2_params = (struct mpa_v2_conn_params *) 1827 (ep->mpa_pkt + sizeof(*mpa)); 1828 ep->ird = ntohs(mpa_v2_params->ird) & 1829 MPA_V2_IRD_ORD_MASK; 1830 ep->ird = min_t(u32, ep->ird, 1831 cur_max_read_depth(ep->com.dev)); 1832 ep->ord = ntohs(mpa_v2_params->ord) & 1833 MPA_V2_IRD_ORD_MASK; 1834 ep->ord = min_t(u32, ep->ord, 1835 cur_max_read_depth(ep->com.dev)); 1836 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, 1837 ep->ord); 1838 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 1839 if (peer2peer) { 1840 if (ntohs(mpa_v2_params->ord) & 1841 MPA_V2_RDMA_WRITE_RTR) 1842 ep->mpa_attr.p2p_type = 1843 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1844 else if (ntohs(mpa_v2_params->ord) & 1845 MPA_V2_RDMA_READ_RTR) 1846 ep->mpa_attr.p2p_type = 1847 FW_RI_INIT_P2PTYPE_READ_REQ; 1848 } 1849 } 1850 } else if (mpa->revision == 1) 1851 if (peer2peer) 1852 ep->mpa_attr.p2p_type = p2p_type; 1853 1854 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1855 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, 1856 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1857 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1858 ep->mpa_attr.p2p_type); 1859 1860 __state_set(&ep->com, MPA_REQ_RCVD); 1861 1862 /* drive upcall */ 1863 mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING); 1864 if (ep->parent_ep->com.state != DEAD) { 1865 if (connect_request_upcall(ep)) 1866 goto err_unlock_parent; 1867 } else { 1868 goto err_unlock_parent; 1869 } 1870 mutex_unlock(&ep->parent_ep->com.mutex); 1871 return 0; 1872 1873 err_unlock_parent: 1874 mutex_unlock(&ep->parent_ep->com.mutex); 1875 goto err_out; 1876 err_stop_timer: 1877 (void)stop_ep_timer(ep); 1878 err_out: 1879 return 2; 1880 } 1881 1882 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) 1883 { 1884 struct c4iw_ep *ep; 1885 struct cpl_rx_data *hdr = cplhdr(skb); 1886 unsigned int dlen = ntohs(hdr->len); 1887 unsigned int tid = GET_TID(hdr); 1888 __u8 status = hdr->status; 1889 int disconnect = 0; 1890 1891 ep = get_ep_from_tid(dev, tid); 1892 if (!ep) 1893 return 0; 1894 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); 1895 skb_pull(skb, sizeof(*hdr)); 1896 skb_trim(skb, dlen); 1897 mutex_lock(&ep->com.mutex); 1898 1899 /* update RX credits */ 1900 update_rx_credits(ep, dlen); 1901 1902 switch (ep->com.state) { 1903 case MPA_REQ_SENT: 1904 ep->rcv_seq += dlen; 1905 disconnect = process_mpa_reply(ep, skb); 1906 break; 1907 case MPA_REQ_WAIT: 1908 ep->rcv_seq += dlen; 1909 disconnect = process_mpa_request(ep, skb); 1910 break; 1911 case FPDU_MODE: { 1912 struct c4iw_qp_attributes attrs; 1913 BUG_ON(!ep->com.qp); 1914 if (status) 1915 pr_err("%s Unexpected streaming data." \ 1916 " qpid %u ep %p state %d tid %u status %d\n", 1917 __func__, ep->com.qp->wq.sq.qid, ep, 1918 ep->com.state, ep->hwtid, status); 1919 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1920 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1921 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1922 disconnect = 1; 1923 break; 1924 } 1925 default: 1926 break; 1927 } 1928 mutex_unlock(&ep->com.mutex); 1929 if (disconnect) 1930 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); 1931 c4iw_put_ep(&ep->com); 1932 return 0; 1933 } 1934 1935 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1936 { 1937 struct c4iw_ep *ep; 1938 struct cpl_abort_rpl_rss *rpl = cplhdr(skb); 1939 int release = 0; 1940 unsigned int tid = GET_TID(rpl); 1941 1942 ep = get_ep_from_tid(dev, tid); 1943 if (!ep) { 1944 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n"); 1945 return 0; 1946 } 1947 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1948 mutex_lock(&ep->com.mutex); 1949 switch (ep->com.state) { 1950 case ABORTING: 1951 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 1952 __state_set(&ep->com, DEAD); 1953 release = 1; 1954 break; 1955 default: 1956 printk(KERN_ERR "%s ep %p state %d\n", 1957 __func__, ep, ep->com.state); 1958 break; 1959 } 1960 mutex_unlock(&ep->com.mutex); 1961 1962 if (release) 1963 release_ep_resources(ep); 1964 c4iw_put_ep(&ep->com); 1965 return 0; 1966 } 1967 1968 static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) 1969 { 1970 struct sk_buff *skb; 1971 struct fw_ofld_connection_wr *req; 1972 unsigned int mtu_idx; 1973 int wscale; 1974 struct sockaddr_in *sin; 1975 int win; 1976 1977 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1978 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); 1979 memset(req, 0, sizeof(*req)); 1980 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR)); 1981 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); 1982 req->le.filter = cpu_to_be32(cxgb4_select_ntuple( 1983 ep->com.dev->rdev.lldi.ports[0], 1984 ep->l2t)); 1985 sin = (struct sockaddr_in *)&ep->com.local_addr; 1986 req->le.lport = sin->sin_port; 1987 req->le.u.ipv4.lip = sin->sin_addr.s_addr; 1988 sin = (struct sockaddr_in *)&ep->com.remote_addr; 1989 req->le.pport = sin->sin_port; 1990 req->le.u.ipv4.pip = sin->sin_addr.s_addr; 1991 req->tcb.t_state_to_astid = 1992 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) | 1993 FW_OFLD_CONNECTION_WR_ASTID_V(atid)); 1994 req->tcb.cplrxdataack_cplpassacceptrpl = 1995 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F); 1996 req->tcb.tx_max = (__force __be32) jiffies; 1997 req->tcb.rcv_adv = htons(1); 1998 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 1999 enable_tcp_timestamps, 2000 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); 2001 wscale = compute_wscale(rcv_win); 2002 2003 /* 2004 * Specify the largest window that will fit in opt0. The 2005 * remainder will be specified in the rx_data_ack. 2006 */ 2007 win = ep->rcv_win >> 10; 2008 if (win > RCV_BUFSIZ_M) 2009 win = RCV_BUFSIZ_M; 2010 2011 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F | 2012 (nocong ? NO_CONG_F : 0) | 2013 KEEP_ALIVE_F | 2014 DELACK_F | 2015 WND_SCALE_V(wscale) | 2016 MSS_IDX_V(mtu_idx) | 2017 L2T_IDX_V(ep->l2t->idx) | 2018 TX_CHAN_V(ep->tx_chan) | 2019 SMAC_SEL_V(ep->smac_idx) | 2020 DSCP_V(ep->tos >> 2) | 2021 ULP_MODE_V(ULP_MODE_TCPDDP) | 2022 RCV_BUFSIZ_V(win)); 2023 req->tcb.opt2 = (__force __be32) (PACE_V(1) | 2024 TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 2025 RX_CHANNEL_V(0) | 2026 CCTRL_ECN_V(enable_ecn) | 2027 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid)); 2028 if (enable_tcp_timestamps) 2029 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F; 2030 if (enable_tcp_sack) 2031 req->tcb.opt2 |= (__force __be32)SACK_EN_F; 2032 if (wscale && enable_tcp_window_scaling) 2033 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F; 2034 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0); 2035 req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2); 2036 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); 2037 set_bit(ACT_OFLD_CONN, &ep->com.history); 2038 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 2039 } 2040 2041 /* 2042 * Some of the error codes above implicitly indicate that there is no TID 2043 * allocated with the result of an ACT_OPEN. We use this predicate to make 2044 * that explicit. 2045 */ 2046 static inline int act_open_has_tid(int status) 2047 { 2048 return (status != CPL_ERR_TCAM_PARITY && 2049 status != CPL_ERR_TCAM_MISS && 2050 status != CPL_ERR_TCAM_FULL && 2051 status != CPL_ERR_CONN_EXIST_SYNRECV && 2052 status != CPL_ERR_CONN_EXIST); 2053 } 2054 2055 /* Returns whether a CPL status conveys negative advice. 2056 */ 2057 static int is_neg_adv(unsigned int status) 2058 { 2059 return status == CPL_ERR_RTX_NEG_ADVICE || 2060 status == CPL_ERR_PERSIST_NEG_ADVICE || 2061 status == CPL_ERR_KEEPALV_NEG_ADVICE; 2062 } 2063 2064 static char *neg_adv_str(unsigned int status) 2065 { 2066 switch (status) { 2067 case CPL_ERR_RTX_NEG_ADVICE: 2068 return "Retransmit timeout"; 2069 case CPL_ERR_PERSIST_NEG_ADVICE: 2070 return "Persist timeout"; 2071 case CPL_ERR_KEEPALV_NEG_ADVICE: 2072 return "Keepalive timeout"; 2073 default: 2074 return "Unknown"; 2075 } 2076 } 2077 2078 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) 2079 { 2080 ep->snd_win = snd_win; 2081 ep->rcv_win = rcv_win; 2082 PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win); 2083 } 2084 2085 #define ACT_OPEN_RETRY_COUNT 2 2086 2087 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, 2088 struct dst_entry *dst, struct c4iw_dev *cdev, 2089 bool clear_mpa_v1, enum chip_type adapter_type, u8 tos) 2090 { 2091 struct neighbour *n; 2092 int err, step; 2093 struct net_device *pdev; 2094 2095 n = dst_neigh_lookup(dst, peer_ip); 2096 if (!n) 2097 return -ENODEV; 2098 2099 rcu_read_lock(); 2100 err = -ENOMEM; 2101 if (n->dev->flags & IFF_LOOPBACK) { 2102 if (iptype == 4) 2103 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip); 2104 else if (IS_ENABLED(CONFIG_IPV6)) 2105 for_each_netdev(&init_net, pdev) { 2106 if (ipv6_chk_addr(&init_net, 2107 (struct in6_addr *)peer_ip, 2108 pdev, 1)) 2109 break; 2110 } 2111 else 2112 pdev = NULL; 2113 2114 if (!pdev) { 2115 err = -ENODEV; 2116 goto out; 2117 } 2118 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 2119 n, pdev, rt_tos2priority(tos)); 2120 if (!ep->l2t) 2121 goto out; 2122 ep->mtu = pdev->mtu; 2123 ep->tx_chan = cxgb4_port_chan(pdev); 2124 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type, 2125 cxgb4_port_viid(pdev)); 2126 step = cdev->rdev.lldi.ntxq / 2127 cdev->rdev.lldi.nchan; 2128 ep->txq_idx = cxgb4_port_idx(pdev) * step; 2129 step = cdev->rdev.lldi.nrxq / 2130 cdev->rdev.lldi.nchan; 2131 ep->ctrlq_idx = cxgb4_port_idx(pdev); 2132 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 2133 cxgb4_port_idx(pdev) * step]; 2134 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); 2135 dev_put(pdev); 2136 } else { 2137 pdev = get_real_dev(n->dev); 2138 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 2139 n, pdev, 0); 2140 if (!ep->l2t) 2141 goto out; 2142 ep->mtu = dst_mtu(dst); 2143 ep->tx_chan = cxgb4_port_chan(pdev); 2144 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type, 2145 cxgb4_port_viid(pdev)); 2146 step = cdev->rdev.lldi.ntxq / 2147 cdev->rdev.lldi.nchan; 2148 ep->txq_idx = cxgb4_port_idx(pdev) * step; 2149 ep->ctrlq_idx = cxgb4_port_idx(pdev); 2150 step = cdev->rdev.lldi.nrxq / 2151 cdev->rdev.lldi.nchan; 2152 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 2153 cxgb4_port_idx(pdev) * step]; 2154 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); 2155 2156 if (clear_mpa_v1) { 2157 ep->retry_with_mpa_v1 = 0; 2158 ep->tried_with_mpa_v1 = 0; 2159 } 2160 } 2161 err = 0; 2162 out: 2163 rcu_read_unlock(); 2164 2165 neigh_release(n); 2166 2167 return err; 2168 } 2169 2170 static int c4iw_reconnect(struct c4iw_ep *ep) 2171 { 2172 int err = 0; 2173 int size = 0; 2174 struct sockaddr_in *laddr = (struct sockaddr_in *) 2175 &ep->com.cm_id->m_local_addr; 2176 struct sockaddr_in *raddr = (struct sockaddr_in *) 2177 &ep->com.cm_id->m_remote_addr; 2178 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *) 2179 &ep->com.cm_id->m_local_addr; 2180 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *) 2181 &ep->com.cm_id->m_remote_addr; 2182 int iptype; 2183 __u8 *ra; 2184 2185 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); 2186 init_timer(&ep->timer); 2187 c4iw_init_wr_wait(&ep->com.wr_wait); 2188 2189 /* When MPA revision is different on nodes, the node with MPA_rev=2 2190 * tries to reconnect with MPA_rev 1 for the same EP through 2191 * c4iw_reconnect(), where the same EP is assigned with new tid for 2192 * further connection establishment. As we are using the same EP pointer 2193 * for reconnect, few skbs are used during the previous c4iw_connect(), 2194 * which leaves the EP with inadequate skbs for further 2195 * c4iw_reconnect(), Further causing an assert BUG_ON() due to empty 2196 * skb_list() during peer_abort(). Allocate skbs which is already used. 2197 */ 2198 size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list)); 2199 if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) { 2200 err = -ENOMEM; 2201 goto fail1; 2202 } 2203 2204 /* 2205 * Allocate an active TID to initiate a TCP connection. 2206 */ 2207 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); 2208 if (ep->atid == -1) { 2209 pr_err("%s - cannot alloc atid.\n", __func__); 2210 err = -ENOMEM; 2211 goto fail2; 2212 } 2213 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid); 2214 2215 /* find a route */ 2216 if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) { 2217 ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr, 2218 raddr->sin_addr.s_addr, laddr->sin_port, 2219 raddr->sin_port, ep->com.cm_id->tos); 2220 iptype = 4; 2221 ra = (__u8 *)&raddr->sin_addr; 2222 } else { 2223 ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr, 2224 raddr6->sin6_addr.s6_addr, 2225 laddr6->sin6_port, raddr6->sin6_port, 0, 2226 raddr6->sin6_scope_id); 2227 iptype = 6; 2228 ra = (__u8 *)&raddr6->sin6_addr; 2229 } 2230 if (!ep->dst) { 2231 pr_err("%s - cannot find route.\n", __func__); 2232 err = -EHOSTUNREACH; 2233 goto fail3; 2234 } 2235 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false, 2236 ep->com.dev->rdev.lldi.adapter_type, 2237 ep->com.cm_id->tos); 2238 if (err) { 2239 pr_err("%s - cannot alloc l2e.\n", __func__); 2240 goto fail4; 2241 } 2242 2243 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 2244 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 2245 ep->l2t->idx); 2246 2247 state_set(&ep->com, CONNECTING); 2248 ep->tos = ep->com.cm_id->tos; 2249 2250 /* send connect request to rnic */ 2251 err = send_connect(ep); 2252 if (!err) 2253 goto out; 2254 2255 cxgb4_l2t_release(ep->l2t); 2256 fail4: 2257 dst_release(ep->dst); 2258 fail3: 2259 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 2260 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 2261 fail2: 2262 /* 2263 * remember to send notification to upper layer. 2264 * We are in here so the upper layer is not aware that this is 2265 * re-connect attempt and so, upper layer is still waiting for 2266 * response of 1st connect request. 2267 */ 2268 connect_reply_upcall(ep, -ECONNRESET); 2269 fail1: 2270 c4iw_put_ep(&ep->com); 2271 out: 2272 return err; 2273 } 2274 2275 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2276 { 2277 struct c4iw_ep *ep; 2278 struct cpl_act_open_rpl *rpl = cplhdr(skb); 2279 unsigned int atid = TID_TID_G(AOPEN_ATID_G( 2280 ntohl(rpl->atid_status))); 2281 struct tid_info *t = dev->rdev.lldi.tids; 2282 int status = AOPEN_STATUS_G(ntohl(rpl->atid_status)); 2283 struct sockaddr_in *la; 2284 struct sockaddr_in *ra; 2285 struct sockaddr_in6 *la6; 2286 struct sockaddr_in6 *ra6; 2287 int ret = 0; 2288 2289 ep = lookup_atid(t, atid); 2290 la = (struct sockaddr_in *)&ep->com.local_addr; 2291 ra = (struct sockaddr_in *)&ep->com.remote_addr; 2292 la6 = (struct sockaddr_in6 *)&ep->com.local_addr; 2293 ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr; 2294 2295 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, 2296 status, status2errno(status)); 2297 2298 if (is_neg_adv(status)) { 2299 PDBG("%s Connection problems for atid %u status %u (%s)\n", 2300 __func__, atid, status, neg_adv_str(status)); 2301 ep->stats.connect_neg_adv++; 2302 mutex_lock(&dev->rdev.stats.lock); 2303 dev->rdev.stats.neg_adv++; 2304 mutex_unlock(&dev->rdev.stats.lock); 2305 return 0; 2306 } 2307 2308 set_bit(ACT_OPEN_RPL, &ep->com.history); 2309 2310 /* 2311 * Log interesting failures. 2312 */ 2313 switch (status) { 2314 case CPL_ERR_CONN_RESET: 2315 case CPL_ERR_CONN_TIMEDOUT: 2316 break; 2317 case CPL_ERR_TCAM_FULL: 2318 mutex_lock(&dev->rdev.stats.lock); 2319 dev->rdev.stats.tcam_full++; 2320 mutex_unlock(&dev->rdev.stats.lock); 2321 if (ep->com.local_addr.ss_family == AF_INET && 2322 dev->rdev.lldi.enable_fw_ofld_conn) { 2323 ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G( 2324 ntohl(rpl->atid_status)))); 2325 if (ret) 2326 goto fail; 2327 return 0; 2328 } 2329 break; 2330 case CPL_ERR_CONN_EXIST: 2331 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 2332 set_bit(ACT_RETRY_INUSE, &ep->com.history); 2333 if (ep->com.remote_addr.ss_family == AF_INET6) { 2334 struct sockaddr_in6 *sin6 = 2335 (struct sockaddr_in6 *) 2336 &ep->com.local_addr; 2337 cxgb4_clip_release( 2338 ep->com.dev->rdev.lldi.ports[0], 2339 (const u32 *) 2340 &sin6->sin6_addr.s6_addr, 1); 2341 } 2342 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, 2343 atid); 2344 cxgb4_free_atid(t, atid); 2345 dst_release(ep->dst); 2346 cxgb4_l2t_release(ep->l2t); 2347 c4iw_reconnect(ep); 2348 return 0; 2349 } 2350 break; 2351 default: 2352 if (ep->com.local_addr.ss_family == AF_INET) { 2353 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n", 2354 atid, status, status2errno(status), 2355 &la->sin_addr.s_addr, ntohs(la->sin_port), 2356 &ra->sin_addr.s_addr, ntohs(ra->sin_port)); 2357 } else { 2358 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n", 2359 atid, status, status2errno(status), 2360 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port), 2361 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port)); 2362 } 2363 break; 2364 } 2365 2366 fail: 2367 connect_reply_upcall(ep, status2errno(status)); 2368 state_set(&ep->com, DEAD); 2369 2370 if (ep->com.remote_addr.ss_family == AF_INET6) { 2371 struct sockaddr_in6 *sin6 = 2372 (struct sockaddr_in6 *)&ep->com.local_addr; 2373 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 2374 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 2375 } 2376 if (status && act_open_has_tid(status)) 2377 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); 2378 2379 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 2380 cxgb4_free_atid(t, atid); 2381 dst_release(ep->dst); 2382 cxgb4_l2t_release(ep->l2t); 2383 c4iw_put_ep(&ep->com); 2384 2385 return 0; 2386 } 2387 2388 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2389 { 2390 struct cpl_pass_open_rpl *rpl = cplhdr(skb); 2391 unsigned int stid = GET_TID(rpl); 2392 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); 2393 2394 if (!ep) { 2395 PDBG("%s stid %d lookup failure!\n", __func__, stid); 2396 goto out; 2397 } 2398 PDBG("%s ep %p status %d error %d\n", __func__, ep, 2399 rpl->status, status2errno(rpl->status)); 2400 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 2401 c4iw_put_ep(&ep->com); 2402 out: 2403 return 0; 2404 } 2405 2406 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2407 { 2408 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); 2409 unsigned int stid = GET_TID(rpl); 2410 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); 2411 2412 PDBG("%s ep %p\n", __func__, ep); 2413 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 2414 c4iw_put_ep(&ep->com); 2415 return 0; 2416 } 2417 2418 static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, 2419 struct cpl_pass_accept_req *req) 2420 { 2421 struct cpl_pass_accept_rpl *rpl; 2422 unsigned int mtu_idx; 2423 u64 opt0; 2424 u32 opt2; 2425 int wscale; 2426 struct cpl_t5_pass_accept_rpl *rpl5 = NULL; 2427 int win; 2428 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; 2429 2430 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2431 BUG_ON(skb_cloned(skb)); 2432 2433 skb_get(skb); 2434 rpl = cplhdr(skb); 2435 if (!is_t4(adapter_type)) { 2436 skb_trim(skb, roundup(sizeof(*rpl5), 16)); 2437 rpl5 = (void *)rpl; 2438 INIT_TP_WR(rpl5, ep->hwtid); 2439 } else { 2440 skb_trim(skb, sizeof(*rpl)); 2441 INIT_TP_WR(rpl, ep->hwtid); 2442 } 2443 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, 2444 ep->hwtid)); 2445 2446 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 2447 enable_tcp_timestamps && req->tcpopt.tstamp, 2448 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); 2449 wscale = compute_wscale(rcv_win); 2450 2451 /* 2452 * Specify the largest window that will fit in opt0. The 2453 * remainder will be specified in the rx_data_ack. 2454 */ 2455 win = ep->rcv_win >> 10; 2456 if (win > RCV_BUFSIZ_M) 2457 win = RCV_BUFSIZ_M; 2458 opt0 = (nocong ? NO_CONG_F : 0) | 2459 KEEP_ALIVE_F | 2460 DELACK_F | 2461 WND_SCALE_V(wscale) | 2462 MSS_IDX_V(mtu_idx) | 2463 L2T_IDX_V(ep->l2t->idx) | 2464 TX_CHAN_V(ep->tx_chan) | 2465 SMAC_SEL_V(ep->smac_idx) | 2466 DSCP_V(ep->tos >> 2) | 2467 ULP_MODE_V(ULP_MODE_TCPDDP) | 2468 RCV_BUFSIZ_V(win); 2469 opt2 = RX_CHANNEL_V(0) | 2470 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); 2471 2472 if (enable_tcp_timestamps && req->tcpopt.tstamp) 2473 opt2 |= TSTAMPS_EN_F; 2474 if (enable_tcp_sack && req->tcpopt.sack) 2475 opt2 |= SACK_EN_F; 2476 if (wscale && enable_tcp_window_scaling) 2477 opt2 |= WND_SCALE_EN_F; 2478 if (enable_ecn) { 2479 const struct tcphdr *tcph; 2480 u32 hlen = ntohl(req->hdr_len); 2481 2482 if (CHELSIO_CHIP_VERSION(adapter_type) <= CHELSIO_T5) 2483 tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) + 2484 IP_HDR_LEN_G(hlen); 2485 else 2486 tcph = (const void *)(req + 1) + 2487 T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen); 2488 if (tcph->ece && tcph->cwr) 2489 opt2 |= CCTRL_ECN_V(1); 2490 } 2491 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) { 2492 u32 isn = (prandom_u32() & ~7UL) - 1; 2493 opt2 |= T5_OPT_2_VALID_F; 2494 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); 2495 opt2 |= T5_ISS_F; 2496 rpl5 = (void *)rpl; 2497 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); 2498 if (peer2peer) 2499 isn += 4; 2500 rpl5->iss = cpu_to_be32(isn); 2501 PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss)); 2502 } 2503 2504 rpl->opt0 = cpu_to_be64(opt0); 2505 rpl->opt2 = cpu_to_be32(opt2); 2506 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 2507 t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure); 2508 2509 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 2510 } 2511 2512 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb) 2513 { 2514 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid); 2515 BUG_ON(skb_cloned(skb)); 2516 skb_trim(skb, sizeof(struct cpl_tid_release)); 2517 release_tid(&dev->rdev, hwtid, skb); 2518 return; 2519 } 2520 2521 static void get_4tuple(struct cpl_pass_accept_req *req, enum chip_type type, 2522 int *iptype, __u8 *local_ip, __u8 *peer_ip, 2523 __be16 *local_port, __be16 *peer_port) 2524 { 2525 int eth_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ? 2526 ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)) : 2527 T6_ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)); 2528 int ip_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ? 2529 IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)) : 2530 T6_IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)); 2531 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); 2532 struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len); 2533 struct tcphdr *tcp = (struct tcphdr *) 2534 ((u8 *)(req + 1) + eth_len + ip_len); 2535 2536 if (ip->version == 4) { 2537 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, 2538 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), 2539 ntohs(tcp->dest)); 2540 *iptype = 4; 2541 memcpy(peer_ip, &ip->saddr, 4); 2542 memcpy(local_ip, &ip->daddr, 4); 2543 } else { 2544 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__, 2545 ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source), 2546 ntohs(tcp->dest)); 2547 *iptype = 6; 2548 memcpy(peer_ip, ip6->saddr.s6_addr, 16); 2549 memcpy(local_ip, ip6->daddr.s6_addr, 16); 2550 } 2551 *peer_port = tcp->source; 2552 *local_port = tcp->dest; 2553 2554 return; 2555 } 2556 2557 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 2558 { 2559 struct c4iw_ep *child_ep = NULL, *parent_ep; 2560 struct cpl_pass_accept_req *req = cplhdr(skb); 2561 unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); 2562 struct tid_info *t = dev->rdev.lldi.tids; 2563 unsigned int hwtid = GET_TID(req); 2564 struct dst_entry *dst; 2565 __u8 local_ip[16], peer_ip[16]; 2566 __be16 local_port, peer_port; 2567 struct sockaddr_in6 *sin6; 2568 int err; 2569 u16 peer_mss = ntohs(req->tcpopt.mss); 2570 int iptype; 2571 unsigned short hdrs; 2572 u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); 2573 2574 parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid); 2575 if (!parent_ep) { 2576 PDBG("%s connect request on invalid stid %d\n", __func__, stid); 2577 goto reject; 2578 } 2579 2580 if (state_read(&parent_ep->com) != LISTEN) { 2581 PDBG("%s - listening ep not in LISTEN\n", __func__); 2582 goto reject; 2583 } 2584 2585 get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type, &iptype, 2586 local_ip, peer_ip, &local_port, &peer_port); 2587 2588 /* Find output route */ 2589 if (iptype == 4) { 2590 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n" 2591 , __func__, parent_ep, hwtid, 2592 local_ip, peer_ip, ntohs(local_port), 2593 ntohs(peer_port), peer_mss); 2594 dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip, 2595 local_port, peer_port, 2596 tos); 2597 } else { 2598 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" 2599 , __func__, parent_ep, hwtid, 2600 local_ip, peer_ip, ntohs(local_port), 2601 ntohs(peer_port), peer_mss); 2602 dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port, 2603 PASS_OPEN_TOS_G(ntohl(req->tos_stid)), 2604 ((struct sockaddr_in6 *) 2605 &parent_ep->com.local_addr)->sin6_scope_id); 2606 } 2607 if (!dst) { 2608 printk(KERN_ERR MOD "%s - failed to find dst entry!\n", 2609 __func__); 2610 goto reject; 2611 } 2612 2613 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 2614 if (!child_ep) { 2615 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 2616 __func__); 2617 dst_release(dst); 2618 goto reject; 2619 } 2620 2621 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false, 2622 parent_ep->com.dev->rdev.lldi.adapter_type, tos); 2623 if (err) { 2624 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 2625 __func__); 2626 dst_release(dst); 2627 kfree(child_ep); 2628 goto reject; 2629 } 2630 2631 hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) + 2632 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); 2633 if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) 2634 child_ep->mtu = peer_mss + hdrs; 2635 2636 skb_queue_head_init(&child_ep->com.ep_skb_list); 2637 if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF)) 2638 goto fail; 2639 2640 state_set(&child_ep->com, CONNECTING); 2641 child_ep->com.dev = dev; 2642 child_ep->com.cm_id = NULL; 2643 2644 if (iptype == 4) { 2645 struct sockaddr_in *sin = (struct sockaddr_in *) 2646 &child_ep->com.local_addr; 2647 2648 sin->sin_family = PF_INET; 2649 sin->sin_port = local_port; 2650 sin->sin_addr.s_addr = *(__be32 *)local_ip; 2651 2652 sin = (struct sockaddr_in *)&child_ep->com.local_addr; 2653 sin->sin_family = PF_INET; 2654 sin->sin_port = ((struct sockaddr_in *) 2655 &parent_ep->com.local_addr)->sin_port; 2656 sin->sin_addr.s_addr = *(__be32 *)local_ip; 2657 2658 sin = (struct sockaddr_in *)&child_ep->com.remote_addr; 2659 sin->sin_family = PF_INET; 2660 sin->sin_port = peer_port; 2661 sin->sin_addr.s_addr = *(__be32 *)peer_ip; 2662 } else { 2663 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; 2664 sin6->sin6_family = PF_INET6; 2665 sin6->sin6_port = local_port; 2666 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); 2667 2668 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; 2669 sin6->sin6_family = PF_INET6; 2670 sin6->sin6_port = ((struct sockaddr_in6 *) 2671 &parent_ep->com.local_addr)->sin6_port; 2672 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); 2673 2674 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr; 2675 sin6->sin6_family = PF_INET6; 2676 sin6->sin6_port = peer_port; 2677 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); 2678 } 2679 2680 c4iw_get_ep(&parent_ep->com); 2681 child_ep->parent_ep = parent_ep; 2682 child_ep->tos = tos; 2683 child_ep->dst = dst; 2684 child_ep->hwtid = hwtid; 2685 2686 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, 2687 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); 2688 2689 init_timer(&child_ep->timer); 2690 cxgb4_insert_tid(t, child_ep, hwtid); 2691 insert_ep_tid(child_ep); 2692 if (accept_cr(child_ep, skb, req)) { 2693 c4iw_put_ep(&parent_ep->com); 2694 release_ep_resources(child_ep); 2695 } else { 2696 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); 2697 } 2698 if (iptype == 6) { 2699 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; 2700 cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0], 2701 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 2702 } 2703 goto out; 2704 fail: 2705 c4iw_put_ep(&child_ep->com); 2706 reject: 2707 reject_cr(dev, hwtid, skb); 2708 if (parent_ep) 2709 c4iw_put_ep(&parent_ep->com); 2710 out: 2711 return 0; 2712 } 2713 2714 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) 2715 { 2716 struct c4iw_ep *ep; 2717 struct cpl_pass_establish *req = cplhdr(skb); 2718 unsigned int tid = GET_TID(req); 2719 int ret; 2720 2721 ep = get_ep_from_tid(dev, tid); 2722 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2723 ep->snd_seq = be32_to_cpu(req->snd_isn); 2724 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 2725 2726 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid, 2727 ntohs(req->tcp_opt)); 2728 2729 set_emss(ep, ntohs(req->tcp_opt)); 2730 2731 dst_confirm(ep->dst); 2732 mutex_lock(&ep->com.mutex); 2733 ep->com.state = MPA_REQ_WAIT; 2734 start_ep_timer(ep); 2735 set_bit(PASS_ESTAB, &ep->com.history); 2736 ret = send_flowc(ep); 2737 mutex_unlock(&ep->com.mutex); 2738 if (ret) 2739 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 2740 c4iw_put_ep(&ep->com); 2741 2742 return 0; 2743 } 2744 2745 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) 2746 { 2747 struct cpl_peer_close *hdr = cplhdr(skb); 2748 struct c4iw_ep *ep; 2749 struct c4iw_qp_attributes attrs; 2750 int disconnect = 1; 2751 int release = 0; 2752 unsigned int tid = GET_TID(hdr); 2753 int ret; 2754 2755 ep = get_ep_from_tid(dev, tid); 2756 if (!ep) 2757 return 0; 2758 2759 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2760 dst_confirm(ep->dst); 2761 2762 set_bit(PEER_CLOSE, &ep->com.history); 2763 mutex_lock(&ep->com.mutex); 2764 switch (ep->com.state) { 2765 case MPA_REQ_WAIT: 2766 __state_set(&ep->com, CLOSING); 2767 break; 2768 case MPA_REQ_SENT: 2769 __state_set(&ep->com, CLOSING); 2770 connect_reply_upcall(ep, -ECONNRESET); 2771 break; 2772 case MPA_REQ_RCVD: 2773 2774 /* 2775 * We're gonna mark this puppy DEAD, but keep 2776 * the reference on it until the ULP accepts or 2777 * rejects the CR. Also wake up anyone waiting 2778 * in rdma connection migration (see c4iw_accept_cr()). 2779 */ 2780 __state_set(&ep->com, CLOSING); 2781 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 2782 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2783 break; 2784 case MPA_REP_SENT: 2785 __state_set(&ep->com, CLOSING); 2786 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 2787 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2788 break; 2789 case FPDU_MODE: 2790 start_ep_timer(ep); 2791 __state_set(&ep->com, CLOSING); 2792 attrs.next_state = C4IW_QP_STATE_CLOSING; 2793 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2794 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2795 if (ret != -ECONNRESET) { 2796 peer_close_upcall(ep); 2797 disconnect = 1; 2798 } 2799 break; 2800 case ABORTING: 2801 disconnect = 0; 2802 break; 2803 case CLOSING: 2804 __state_set(&ep->com, MORIBUND); 2805 disconnect = 0; 2806 break; 2807 case MORIBUND: 2808 (void)stop_ep_timer(ep); 2809 if (ep->com.cm_id && ep->com.qp) { 2810 attrs.next_state = C4IW_QP_STATE_IDLE; 2811 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2812 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2813 } 2814 close_complete_upcall(ep, 0); 2815 __state_set(&ep->com, DEAD); 2816 release = 1; 2817 disconnect = 0; 2818 break; 2819 case DEAD: 2820 disconnect = 0; 2821 break; 2822 default: 2823 BUG_ON(1); 2824 } 2825 mutex_unlock(&ep->com.mutex); 2826 if (disconnect) 2827 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2828 if (release) 2829 release_ep_resources(ep); 2830 c4iw_put_ep(&ep->com); 2831 return 0; 2832 } 2833 2834 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 2835 { 2836 struct cpl_abort_req_rss *req = cplhdr(skb); 2837 struct c4iw_ep *ep; 2838 struct cpl_abort_rpl *rpl; 2839 struct sk_buff *rpl_skb; 2840 struct c4iw_qp_attributes attrs; 2841 int ret; 2842 int release = 0; 2843 unsigned int tid = GET_TID(req); 2844 2845 ep = get_ep_from_tid(dev, tid); 2846 if (!ep) 2847 return 0; 2848 2849 if (is_neg_adv(req->status)) { 2850 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", 2851 __func__, ep->hwtid, req->status, 2852 neg_adv_str(req->status)); 2853 ep->stats.abort_neg_adv++; 2854 mutex_lock(&dev->rdev.stats.lock); 2855 dev->rdev.stats.neg_adv++; 2856 mutex_unlock(&dev->rdev.stats.lock); 2857 goto deref_ep; 2858 } 2859 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 2860 ep->com.state); 2861 set_bit(PEER_ABORT, &ep->com.history); 2862 2863 /* 2864 * Wake up any threads in rdma_init() or rdma_fini(). 2865 * However, this is not needed if com state is just 2866 * MPA_REQ_SENT 2867 */ 2868 if (ep->com.state != MPA_REQ_SENT) 2869 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2870 2871 mutex_lock(&ep->com.mutex); 2872 switch (ep->com.state) { 2873 case CONNECTING: 2874 c4iw_put_ep(&ep->parent_ep->com); 2875 break; 2876 case MPA_REQ_WAIT: 2877 (void)stop_ep_timer(ep); 2878 break; 2879 case MPA_REQ_SENT: 2880 (void)stop_ep_timer(ep); 2881 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) 2882 connect_reply_upcall(ep, -ECONNRESET); 2883 else { 2884 /* 2885 * we just don't send notification upwards because we 2886 * want to retry with mpa_v1 without upper layers even 2887 * knowing it. 2888 * 2889 * do some housekeeping so as to re-initiate the 2890 * connection 2891 */ 2892 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__, 2893 mpa_rev); 2894 ep->retry_with_mpa_v1 = 1; 2895 } 2896 break; 2897 case MPA_REP_SENT: 2898 break; 2899 case MPA_REQ_RCVD: 2900 break; 2901 case MORIBUND: 2902 case CLOSING: 2903 stop_ep_timer(ep); 2904 /*FALLTHROUGH*/ 2905 case FPDU_MODE: 2906 if (ep->com.cm_id && ep->com.qp) { 2907 attrs.next_state = C4IW_QP_STATE_ERROR; 2908 ret = c4iw_modify_qp(ep->com.qp->rhp, 2909 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 2910 &attrs, 1); 2911 if (ret) 2912 printk(KERN_ERR MOD 2913 "%s - qp <- error failed!\n", 2914 __func__); 2915 } 2916 peer_abort_upcall(ep); 2917 break; 2918 case ABORTING: 2919 break; 2920 case DEAD: 2921 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); 2922 mutex_unlock(&ep->com.mutex); 2923 goto deref_ep; 2924 default: 2925 BUG_ON(1); 2926 break; 2927 } 2928 dst_confirm(ep->dst); 2929 if (ep->com.state != ABORTING) { 2930 __state_set(&ep->com, DEAD); 2931 /* we don't release if we want to retry with mpa_v1 */ 2932 if (!ep->retry_with_mpa_v1) 2933 release = 1; 2934 } 2935 mutex_unlock(&ep->com.mutex); 2936 2937 rpl_skb = skb_dequeue(&ep->com.ep_skb_list); 2938 if (WARN_ON(!rpl_skb)) { 2939 release = 1; 2940 goto out; 2941 } 2942 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 2943 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); 2944 INIT_TP_WR(rpl, ep->hwtid); 2945 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); 2946 rpl->cmd = CPL_ABORT_NO_RST; 2947 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); 2948 out: 2949 if (release) 2950 release_ep_resources(ep); 2951 else if (ep->retry_with_mpa_v1) { 2952 if (ep->com.remote_addr.ss_family == AF_INET6) { 2953 struct sockaddr_in6 *sin6 = 2954 (struct sockaddr_in6 *) 2955 &ep->com.local_addr; 2956 cxgb4_clip_release( 2957 ep->com.dev->rdev.lldi.ports[0], 2958 (const u32 *)&sin6->sin6_addr.s6_addr, 2959 1); 2960 } 2961 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); 2962 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 2963 dst_release(ep->dst); 2964 cxgb4_l2t_release(ep->l2t); 2965 c4iw_reconnect(ep); 2966 } 2967 2968 deref_ep: 2969 c4iw_put_ep(&ep->com); 2970 /* Dereferencing ep, referenced in peer_abort_intr() */ 2971 c4iw_put_ep(&ep->com); 2972 return 0; 2973 } 2974 2975 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2976 { 2977 struct c4iw_ep *ep; 2978 struct c4iw_qp_attributes attrs; 2979 struct cpl_close_con_rpl *rpl = cplhdr(skb); 2980 int release = 0; 2981 unsigned int tid = GET_TID(rpl); 2982 2983 ep = get_ep_from_tid(dev, tid); 2984 if (!ep) 2985 return 0; 2986 2987 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2988 BUG_ON(!ep); 2989 2990 /* The cm_id may be null if we failed to connect */ 2991 mutex_lock(&ep->com.mutex); 2992 set_bit(CLOSE_CON_RPL, &ep->com.history); 2993 switch (ep->com.state) { 2994 case CLOSING: 2995 __state_set(&ep->com, MORIBUND); 2996 break; 2997 case MORIBUND: 2998 (void)stop_ep_timer(ep); 2999 if ((ep->com.cm_id) && (ep->com.qp)) { 3000 attrs.next_state = C4IW_QP_STATE_IDLE; 3001 c4iw_modify_qp(ep->com.qp->rhp, 3002 ep->com.qp, 3003 C4IW_QP_ATTR_NEXT_STATE, 3004 &attrs, 1); 3005 } 3006 close_complete_upcall(ep, 0); 3007 __state_set(&ep->com, DEAD); 3008 release = 1; 3009 break; 3010 case ABORTING: 3011 case DEAD: 3012 break; 3013 default: 3014 BUG_ON(1); 3015 break; 3016 } 3017 mutex_unlock(&ep->com.mutex); 3018 if (release) 3019 release_ep_resources(ep); 3020 c4iw_put_ep(&ep->com); 3021 return 0; 3022 } 3023 3024 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) 3025 { 3026 struct cpl_rdma_terminate *rpl = cplhdr(skb); 3027 unsigned int tid = GET_TID(rpl); 3028 struct c4iw_ep *ep; 3029 struct c4iw_qp_attributes attrs; 3030 3031 ep = get_ep_from_tid(dev, tid); 3032 BUG_ON(!ep); 3033 3034 if (ep && ep->com.qp) { 3035 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, 3036 ep->com.qp->wq.sq.qid); 3037 attrs.next_state = C4IW_QP_STATE_TERMINATE; 3038 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 3039 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 3040 } else 3041 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); 3042 c4iw_put_ep(&ep->com); 3043 3044 return 0; 3045 } 3046 3047 /* 3048 * Upcall from the adapter indicating data has been transmitted. 3049 * For us its just the single MPA request or reply. We can now free 3050 * the skb holding the mpa message. 3051 */ 3052 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) 3053 { 3054 struct c4iw_ep *ep; 3055 struct cpl_fw4_ack *hdr = cplhdr(skb); 3056 u8 credits = hdr->credits; 3057 unsigned int tid = GET_TID(hdr); 3058 3059 3060 ep = get_ep_from_tid(dev, tid); 3061 if (!ep) 3062 return 0; 3063 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 3064 if (credits == 0) { 3065 PDBG("%s 0 credit ack ep %p tid %u state %u\n", 3066 __func__, ep, ep->hwtid, state_read(&ep->com)); 3067 goto out; 3068 } 3069 3070 dst_confirm(ep->dst); 3071 if (ep->mpa_skb) { 3072 PDBG("%s last streaming msg ack ep %p tid %u state %u " 3073 "initiator %u freeing skb\n", __func__, ep, ep->hwtid, 3074 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); 3075 mutex_lock(&ep->com.mutex); 3076 kfree_skb(ep->mpa_skb); 3077 ep->mpa_skb = NULL; 3078 if (test_bit(STOP_MPA_TIMER, &ep->com.flags)) 3079 stop_ep_timer(ep); 3080 mutex_unlock(&ep->com.mutex); 3081 } 3082 out: 3083 c4iw_put_ep(&ep->com); 3084 return 0; 3085 } 3086 3087 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 3088 { 3089 int abort; 3090 struct c4iw_ep *ep = to_ep(cm_id); 3091 3092 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 3093 3094 mutex_lock(&ep->com.mutex); 3095 if (ep->com.state != MPA_REQ_RCVD) { 3096 mutex_unlock(&ep->com.mutex); 3097 c4iw_put_ep(&ep->com); 3098 return -ECONNRESET; 3099 } 3100 set_bit(ULP_REJECT, &ep->com.history); 3101 if (mpa_rev == 0) 3102 abort = 1; 3103 else 3104 abort = send_mpa_reject(ep, pdata, pdata_len); 3105 mutex_unlock(&ep->com.mutex); 3106 3107 stop_ep_timer(ep); 3108 c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL); 3109 c4iw_put_ep(&ep->com); 3110 return 0; 3111 } 3112 3113 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 3114 { 3115 int err; 3116 struct c4iw_qp_attributes attrs; 3117 enum c4iw_qp_attr_mask mask; 3118 struct c4iw_ep *ep = to_ep(cm_id); 3119 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 3120 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 3121 int abort = 0; 3122 3123 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 3124 3125 mutex_lock(&ep->com.mutex); 3126 if (ep->com.state != MPA_REQ_RCVD) { 3127 err = -ECONNRESET; 3128 goto err_out; 3129 } 3130 3131 BUG_ON(!qp); 3132 3133 set_bit(ULP_ACCEPT, &ep->com.history); 3134 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) || 3135 (conn_param->ird > cur_max_read_depth(ep->com.dev))) { 3136 err = -EINVAL; 3137 goto err_abort; 3138 } 3139 3140 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 3141 if (conn_param->ord > ep->ird) { 3142 if (RELAXED_IRD_NEGOTIATION) { 3143 conn_param->ord = ep->ird; 3144 } else { 3145 ep->ird = conn_param->ird; 3146 ep->ord = conn_param->ord; 3147 send_mpa_reject(ep, conn_param->private_data, 3148 conn_param->private_data_len); 3149 err = -ENOMEM; 3150 goto err_abort; 3151 } 3152 } 3153 if (conn_param->ird < ep->ord) { 3154 if (RELAXED_IRD_NEGOTIATION && 3155 ep->ord <= h->rdev.lldi.max_ordird_qp) { 3156 conn_param->ird = ep->ord; 3157 } else { 3158 err = -ENOMEM; 3159 goto err_abort; 3160 } 3161 } 3162 } 3163 ep->ird = conn_param->ird; 3164 ep->ord = conn_param->ord; 3165 3166 if (ep->mpa_attr.version == 1) { 3167 if (peer2peer && ep->ird == 0) 3168 ep->ird = 1; 3169 } else { 3170 if (peer2peer && 3171 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) && 3172 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0) 3173 ep->ird = 1; 3174 } 3175 3176 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 3177 3178 ep->com.cm_id = cm_id; 3179 ref_cm_id(&ep->com); 3180 ep->com.qp = qp; 3181 ref_qp(ep); 3182 3183 /* bind QP to EP and move to RTS */ 3184 attrs.mpa_attr = ep->mpa_attr; 3185 attrs.max_ird = ep->ird; 3186 attrs.max_ord = ep->ord; 3187 attrs.llp_stream_handle = ep; 3188 attrs.next_state = C4IW_QP_STATE_RTS; 3189 3190 /* bind QP and TID with INIT_WR */ 3191 mask = C4IW_QP_ATTR_NEXT_STATE | 3192 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 3193 C4IW_QP_ATTR_MPA_ATTR | 3194 C4IW_QP_ATTR_MAX_IRD | 3195 C4IW_QP_ATTR_MAX_ORD; 3196 3197 err = c4iw_modify_qp(ep->com.qp->rhp, 3198 ep->com.qp, mask, &attrs, 1); 3199 if (err) 3200 goto err_deref_cm_id; 3201 3202 set_bit(STOP_MPA_TIMER, &ep->com.flags); 3203 err = send_mpa_reply(ep, conn_param->private_data, 3204 conn_param->private_data_len); 3205 if (err) 3206 goto err_deref_cm_id; 3207 3208 __state_set(&ep->com, FPDU_MODE); 3209 established_upcall(ep); 3210 mutex_unlock(&ep->com.mutex); 3211 c4iw_put_ep(&ep->com); 3212 return 0; 3213 err_deref_cm_id: 3214 deref_cm_id(&ep->com); 3215 err_abort: 3216 abort = 1; 3217 err_out: 3218 mutex_unlock(&ep->com.mutex); 3219 if (abort) 3220 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 3221 c4iw_put_ep(&ep->com); 3222 return err; 3223 } 3224 3225 static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) 3226 { 3227 struct in_device *ind; 3228 int found = 0; 3229 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr; 3230 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; 3231 3232 ind = in_dev_get(dev->rdev.lldi.ports[0]); 3233 if (!ind) 3234 return -EADDRNOTAVAIL; 3235 for_primary_ifa(ind) { 3236 laddr->sin_addr.s_addr = ifa->ifa_address; 3237 raddr->sin_addr.s_addr = ifa->ifa_address; 3238 found = 1; 3239 break; 3240 } 3241 endfor_ifa(ind); 3242 in_dev_put(ind); 3243 return found ? 0 : -EADDRNOTAVAIL; 3244 } 3245 3246 static int get_lladdr(struct net_device *dev, struct in6_addr *addr, 3247 unsigned char banned_flags) 3248 { 3249 struct inet6_dev *idev; 3250 int err = -EADDRNOTAVAIL; 3251 3252 rcu_read_lock(); 3253 idev = __in6_dev_get(dev); 3254 if (idev != NULL) { 3255 struct inet6_ifaddr *ifp; 3256 3257 read_lock_bh(&idev->lock); 3258 list_for_each_entry(ifp, &idev->addr_list, if_list) { 3259 if (ifp->scope == IFA_LINK && 3260 !(ifp->flags & banned_flags)) { 3261 memcpy(addr, &ifp->addr, 16); 3262 err = 0; 3263 break; 3264 } 3265 } 3266 read_unlock_bh(&idev->lock); 3267 } 3268 rcu_read_unlock(); 3269 return err; 3270 } 3271 3272 static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) 3273 { 3274 struct in6_addr uninitialized_var(addr); 3275 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; 3276 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; 3277 3278 if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) { 3279 memcpy(la6->sin6_addr.s6_addr, &addr, 16); 3280 memcpy(ra6->sin6_addr.s6_addr, &addr, 16); 3281 return 0; 3282 } 3283 return -EADDRNOTAVAIL; 3284 } 3285 3286 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 3287 { 3288 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 3289 struct c4iw_ep *ep; 3290 int err = 0; 3291 struct sockaddr_in *laddr; 3292 struct sockaddr_in *raddr; 3293 struct sockaddr_in6 *laddr6; 3294 struct sockaddr_in6 *raddr6; 3295 __u8 *ra; 3296 int iptype; 3297 3298 if ((conn_param->ord > cur_max_read_depth(dev)) || 3299 (conn_param->ird > cur_max_read_depth(dev))) { 3300 err = -EINVAL; 3301 goto out; 3302 } 3303 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 3304 if (!ep) { 3305 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 3306 err = -ENOMEM; 3307 goto out; 3308 } 3309 3310 skb_queue_head_init(&ep->com.ep_skb_list); 3311 if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) { 3312 err = -ENOMEM; 3313 goto fail1; 3314 } 3315 3316 init_timer(&ep->timer); 3317 ep->plen = conn_param->private_data_len; 3318 if (ep->plen) 3319 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 3320 conn_param->private_data, ep->plen); 3321 ep->ird = conn_param->ird; 3322 ep->ord = conn_param->ord; 3323 3324 if (peer2peer && ep->ord == 0) 3325 ep->ord = 1; 3326 3327 ep->com.cm_id = cm_id; 3328 ref_cm_id(&ep->com); 3329 ep->com.dev = dev; 3330 ep->com.qp = get_qhp(dev, conn_param->qpn); 3331 if (!ep->com.qp) { 3332 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); 3333 err = -EINVAL; 3334 goto fail2; 3335 } 3336 ref_qp(ep); 3337 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, 3338 ep->com.qp, cm_id); 3339 3340 /* 3341 * Allocate an active TID to initiate a TCP connection. 3342 */ 3343 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); 3344 if (ep->atid == -1) { 3345 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 3346 err = -ENOMEM; 3347 goto fail2; 3348 } 3349 insert_handle(dev, &dev->atid_idr, ep, ep->atid); 3350 3351 memcpy(&ep->com.local_addr, &cm_id->m_local_addr, 3352 sizeof(ep->com.local_addr)); 3353 memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr, 3354 sizeof(ep->com.remote_addr)); 3355 3356 laddr = (struct sockaddr_in *)&ep->com.local_addr; 3357 raddr = (struct sockaddr_in *)&ep->com.remote_addr; 3358 laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr; 3359 raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr; 3360 3361 if (cm_id->m_remote_addr.ss_family == AF_INET) { 3362 iptype = 4; 3363 ra = (__u8 *)&raddr->sin_addr; 3364 3365 /* 3366 * Handle loopback requests to INADDR_ANY. 3367 */ 3368 if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { 3369 err = pick_local_ipaddrs(dev, cm_id); 3370 if (err) 3371 goto fail2; 3372 } 3373 3374 /* find a route */ 3375 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n", 3376 __func__, &laddr->sin_addr, ntohs(laddr->sin_port), 3377 ra, ntohs(raddr->sin_port)); 3378 ep->dst = find_route(dev, laddr->sin_addr.s_addr, 3379 raddr->sin_addr.s_addr, laddr->sin_port, 3380 raddr->sin_port, cm_id->tos); 3381 } else { 3382 iptype = 6; 3383 ra = (__u8 *)&raddr6->sin6_addr; 3384 3385 /* 3386 * Handle loopback requests to INADDR_ANY. 3387 */ 3388 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { 3389 err = pick_local_ip6addrs(dev, cm_id); 3390 if (err) 3391 goto fail2; 3392 } 3393 3394 /* find a route */ 3395 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n", 3396 __func__, laddr6->sin6_addr.s6_addr, 3397 ntohs(laddr6->sin6_port), 3398 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port)); 3399 ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr, 3400 raddr6->sin6_addr.s6_addr, 3401 laddr6->sin6_port, raddr6->sin6_port, 0, 3402 raddr6->sin6_scope_id); 3403 } 3404 if (!ep->dst) { 3405 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 3406 err = -EHOSTUNREACH; 3407 goto fail3; 3408 } 3409 3410 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true, 3411 ep->com.dev->rdev.lldi.adapter_type, cm_id->tos); 3412 if (err) { 3413 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 3414 goto fail4; 3415 } 3416 3417 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 3418 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 3419 ep->l2t->idx); 3420 3421 state_set(&ep->com, CONNECTING); 3422 ep->tos = cm_id->tos; 3423 3424 /* send connect request to rnic */ 3425 err = send_connect(ep); 3426 if (!err) 3427 goto out; 3428 3429 cxgb4_l2t_release(ep->l2t); 3430 fail4: 3431 dst_release(ep->dst); 3432 fail3: 3433 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 3434 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 3435 fail2: 3436 skb_queue_purge(&ep->com.ep_skb_list); 3437 deref_cm_id(&ep->com); 3438 fail1: 3439 c4iw_put_ep(&ep->com); 3440 out: 3441 return err; 3442 } 3443 3444 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) 3445 { 3446 int err; 3447 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) 3448 &ep->com.local_addr; 3449 3450 if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) { 3451 err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], 3452 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3453 if (err) 3454 return err; 3455 } 3456 c4iw_init_wr_wait(&ep->com.wr_wait); 3457 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0], 3458 ep->stid, &sin6->sin6_addr, 3459 sin6->sin6_port, 3460 ep->com.dev->rdev.lldi.rxq_ids[0]); 3461 if (!err) 3462 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 3463 &ep->com.wr_wait, 3464 0, 0, __func__); 3465 else if (err > 0) 3466 err = net_xmit_errno(err); 3467 if (err) { 3468 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 3469 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3470 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n", 3471 err, ep->stid, 3472 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port)); 3473 } 3474 return err; 3475 } 3476 3477 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) 3478 { 3479 int err; 3480 struct sockaddr_in *sin = (struct sockaddr_in *) 3481 &ep->com.local_addr; 3482 3483 if (dev->rdev.lldi.enable_fw_ofld_conn) { 3484 do { 3485 err = cxgb4_create_server_filter( 3486 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3487 sin->sin_addr.s_addr, sin->sin_port, 0, 3488 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0); 3489 if (err == -EBUSY) { 3490 if (c4iw_fatal_error(&ep->com.dev->rdev)) { 3491 err = -EIO; 3492 break; 3493 } 3494 set_current_state(TASK_UNINTERRUPTIBLE); 3495 schedule_timeout(usecs_to_jiffies(100)); 3496 } 3497 } while (err == -EBUSY); 3498 } else { 3499 c4iw_init_wr_wait(&ep->com.wr_wait); 3500 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], 3501 ep->stid, sin->sin_addr.s_addr, sin->sin_port, 3502 0, ep->com.dev->rdev.lldi.rxq_ids[0]); 3503 if (!err) 3504 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 3505 &ep->com.wr_wait, 3506 0, 0, __func__); 3507 else if (err > 0) 3508 err = net_xmit_errno(err); 3509 } 3510 if (err) 3511 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n" 3512 , err, ep->stid, 3513 &sin->sin_addr, ntohs(sin->sin_port)); 3514 return err; 3515 } 3516 3517 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 3518 { 3519 int err = 0; 3520 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 3521 struct c4iw_listen_ep *ep; 3522 3523 might_sleep(); 3524 3525 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 3526 if (!ep) { 3527 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 3528 err = -ENOMEM; 3529 goto fail1; 3530 } 3531 skb_queue_head_init(&ep->com.ep_skb_list); 3532 PDBG("%s ep %p\n", __func__, ep); 3533 ep->com.cm_id = cm_id; 3534 ref_cm_id(&ep->com); 3535 ep->com.dev = dev; 3536 ep->backlog = backlog; 3537 memcpy(&ep->com.local_addr, &cm_id->m_local_addr, 3538 sizeof(ep->com.local_addr)); 3539 3540 /* 3541 * Allocate a server TID. 3542 */ 3543 if (dev->rdev.lldi.enable_fw_ofld_conn && 3544 ep->com.local_addr.ss_family == AF_INET) 3545 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, 3546 cm_id->m_local_addr.ss_family, ep); 3547 else 3548 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, 3549 cm_id->m_local_addr.ss_family, ep); 3550 3551 if (ep->stid == -1) { 3552 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); 3553 err = -ENOMEM; 3554 goto fail2; 3555 } 3556 insert_handle(dev, &dev->stid_idr, ep, ep->stid); 3557 3558 memcpy(&ep->com.local_addr, &cm_id->m_local_addr, 3559 sizeof(ep->com.local_addr)); 3560 3561 state_set(&ep->com, LISTEN); 3562 if (ep->com.local_addr.ss_family == AF_INET) 3563 err = create_server4(dev, ep); 3564 else 3565 err = create_server6(dev, ep); 3566 if (!err) { 3567 cm_id->provider_data = ep; 3568 goto out; 3569 } 3570 3571 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3572 ep->com.local_addr.ss_family); 3573 fail2: 3574 deref_cm_id(&ep->com); 3575 c4iw_put_ep(&ep->com); 3576 fail1: 3577 out: 3578 return err; 3579 } 3580 3581 int c4iw_destroy_listen(struct iw_cm_id *cm_id) 3582 { 3583 int err; 3584 struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 3585 3586 PDBG("%s ep %p\n", __func__, ep); 3587 3588 might_sleep(); 3589 state_set(&ep->com, DEAD); 3590 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn && 3591 ep->com.local_addr.ss_family == AF_INET) { 3592 err = cxgb4_remove_server_filter( 3593 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3594 ep->com.dev->rdev.lldi.rxq_ids[0], 0); 3595 } else { 3596 struct sockaddr_in6 *sin6; 3597 c4iw_init_wr_wait(&ep->com.wr_wait); 3598 err = cxgb4_remove_server( 3599 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3600 ep->com.dev->rdev.lldi.rxq_ids[0], 0); 3601 if (err) 3602 goto done; 3603 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 3604 0, 0, __func__); 3605 sin6 = (struct sockaddr_in6 *)&ep->com.local_addr; 3606 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 3607 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3608 } 3609 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); 3610 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3611 ep->com.local_addr.ss_family); 3612 done: 3613 deref_cm_id(&ep->com); 3614 c4iw_put_ep(&ep->com); 3615 return err; 3616 } 3617 3618 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 3619 { 3620 int ret = 0; 3621 int close = 0; 3622 int fatal = 0; 3623 struct c4iw_rdev *rdev; 3624 3625 mutex_lock(&ep->com.mutex); 3626 3627 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, 3628 states[ep->com.state], abrupt); 3629 3630 /* 3631 * Ref the ep here in case we have fatal errors causing the 3632 * ep to be released and freed. 3633 */ 3634 c4iw_get_ep(&ep->com); 3635 3636 rdev = &ep->com.dev->rdev; 3637 if (c4iw_fatal_error(rdev)) { 3638 fatal = 1; 3639 close_complete_upcall(ep, -EIO); 3640 ep->com.state = DEAD; 3641 } 3642 switch (ep->com.state) { 3643 case MPA_REQ_WAIT: 3644 case MPA_REQ_SENT: 3645 case MPA_REQ_RCVD: 3646 case MPA_REP_SENT: 3647 case FPDU_MODE: 3648 case CONNECTING: 3649 close = 1; 3650 if (abrupt) 3651 ep->com.state = ABORTING; 3652 else { 3653 ep->com.state = CLOSING; 3654 3655 /* 3656 * if we close before we see the fw4_ack() then we fix 3657 * up the timer state since we're reusing it. 3658 */ 3659 if (ep->mpa_skb && 3660 test_bit(STOP_MPA_TIMER, &ep->com.flags)) { 3661 clear_bit(STOP_MPA_TIMER, &ep->com.flags); 3662 stop_ep_timer(ep); 3663 } 3664 start_ep_timer(ep); 3665 } 3666 set_bit(CLOSE_SENT, &ep->com.flags); 3667 break; 3668 case CLOSING: 3669 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 3670 close = 1; 3671 if (abrupt) { 3672 (void)stop_ep_timer(ep); 3673 ep->com.state = ABORTING; 3674 } else 3675 ep->com.state = MORIBUND; 3676 } 3677 break; 3678 case MORIBUND: 3679 case ABORTING: 3680 case DEAD: 3681 PDBG("%s ignoring disconnect ep %p state %u\n", 3682 __func__, ep, ep->com.state); 3683 break; 3684 default: 3685 BUG(); 3686 break; 3687 } 3688 3689 if (close) { 3690 if (abrupt) { 3691 set_bit(EP_DISC_ABORT, &ep->com.history); 3692 close_complete_upcall(ep, -ECONNRESET); 3693 ret = send_abort(ep); 3694 } else { 3695 set_bit(EP_DISC_CLOSE, &ep->com.history); 3696 ret = send_halfclose(ep); 3697 } 3698 if (ret) { 3699 set_bit(EP_DISC_FAIL, &ep->com.history); 3700 if (!abrupt) { 3701 stop_ep_timer(ep); 3702 close_complete_upcall(ep, -EIO); 3703 } 3704 if (ep->com.qp) { 3705 struct c4iw_qp_attributes attrs; 3706 3707 attrs.next_state = C4IW_QP_STATE_ERROR; 3708 ret = c4iw_modify_qp(ep->com.qp->rhp, 3709 ep->com.qp, 3710 C4IW_QP_ATTR_NEXT_STATE, 3711 &attrs, 1); 3712 if (ret) 3713 pr_err(MOD 3714 "%s - qp <- error failed!\n", 3715 __func__); 3716 } 3717 fatal = 1; 3718 } 3719 } 3720 mutex_unlock(&ep->com.mutex); 3721 c4iw_put_ep(&ep->com); 3722 if (fatal) 3723 release_ep_resources(ep); 3724 return ret; 3725 } 3726 3727 static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 3728 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 3729 { 3730 struct c4iw_ep *ep; 3731 int atid = be32_to_cpu(req->tid); 3732 3733 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, 3734 (__force u32) req->tid); 3735 if (!ep) 3736 return; 3737 3738 switch (req->retval) { 3739 case FW_ENOMEM: 3740 set_bit(ACT_RETRY_NOMEM, &ep->com.history); 3741 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 3742 send_fw_act_open_req(ep, atid); 3743 return; 3744 } 3745 case FW_EADDRINUSE: 3746 set_bit(ACT_RETRY_INUSE, &ep->com.history); 3747 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 3748 send_fw_act_open_req(ep, atid); 3749 return; 3750 } 3751 break; 3752 default: 3753 pr_info("%s unexpected ofld conn wr retval %d\n", 3754 __func__, req->retval); 3755 break; 3756 } 3757 pr_err("active ofld_connect_wr failure %d atid %d\n", 3758 req->retval, atid); 3759 mutex_lock(&dev->rdev.stats.lock); 3760 dev->rdev.stats.act_ofld_conn_fails++; 3761 mutex_unlock(&dev->rdev.stats.lock); 3762 connect_reply_upcall(ep, status2errno(req->retval)); 3763 state_set(&ep->com, DEAD); 3764 if (ep->com.remote_addr.ss_family == AF_INET6) { 3765 struct sockaddr_in6 *sin6 = 3766 (struct sockaddr_in6 *)&ep->com.local_addr; 3767 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 3768 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3769 } 3770 remove_handle(dev, &dev->atid_idr, atid); 3771 cxgb4_free_atid(dev->rdev.lldi.tids, atid); 3772 dst_release(ep->dst); 3773 cxgb4_l2t_release(ep->l2t); 3774 c4iw_put_ep(&ep->com); 3775 } 3776 3777 static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 3778 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 3779 { 3780 struct sk_buff *rpl_skb; 3781 struct cpl_pass_accept_req *cpl; 3782 int ret; 3783 3784 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie; 3785 BUG_ON(!rpl_skb); 3786 if (req->retval) { 3787 PDBG("%s passive open failure %d\n", __func__, req->retval); 3788 mutex_lock(&dev->rdev.stats.lock); 3789 dev->rdev.stats.pas_ofld_conn_fails++; 3790 mutex_unlock(&dev->rdev.stats.lock); 3791 kfree_skb(rpl_skb); 3792 } else { 3793 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); 3794 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 3795 (__force u32) htonl( 3796 (__force u32) req->tid))); 3797 ret = pass_accept_req(dev, rpl_skb); 3798 if (!ret) 3799 kfree_skb(rpl_skb); 3800 } 3801 return; 3802 } 3803 3804 static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 3805 { 3806 struct cpl_fw6_msg *rpl = cplhdr(skb); 3807 struct cpl_fw6_msg_ofld_connection_wr_rpl *req; 3808 3809 switch (rpl->type) { 3810 case FW6_TYPE_CQE: 3811 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 3812 break; 3813 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 3814 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data; 3815 switch (req->t_state) { 3816 case TCP_SYN_SENT: 3817 active_ofld_conn_reply(dev, skb, req); 3818 break; 3819 case TCP_SYN_RECV: 3820 passive_ofld_conn_reply(dev, skb, req); 3821 break; 3822 default: 3823 pr_err("%s unexpected ofld conn wr state %d\n", 3824 __func__, req->t_state); 3825 break; 3826 } 3827 break; 3828 } 3829 return 0; 3830 } 3831 3832 static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) 3833 { 3834 __be32 l2info; 3835 __be16 hdr_len, vlantag, len; 3836 u16 eth_hdr_len; 3837 int tcp_hdr_len, ip_hdr_len; 3838 u8 intf; 3839 struct cpl_rx_pkt *cpl = cplhdr(skb); 3840 struct cpl_pass_accept_req *req; 3841 struct tcp_options_received tmp_opt; 3842 struct c4iw_dev *dev; 3843 enum chip_type type; 3844 3845 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 3846 /* Store values from cpl_rx_pkt in temporary location. */ 3847 vlantag = cpl->vlan; 3848 len = cpl->len; 3849 l2info = cpl->l2info; 3850 hdr_len = cpl->hdr_len; 3851 intf = cpl->iff; 3852 3853 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); 3854 3855 /* 3856 * We need to parse the TCP options from SYN packet. 3857 * to generate cpl_pass_accept_req. 3858 */ 3859 memset(&tmp_opt, 0, sizeof(tmp_opt)); 3860 tcp_clear_options(&tmp_opt); 3861 tcp_parse_options(skb, &tmp_opt, 0, NULL); 3862 3863 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); 3864 memset(req, 0, sizeof(*req)); 3865 req->l2info = cpu_to_be16(SYN_INTF_V(intf) | 3866 SYN_MAC_IDX_V(RX_MACIDX_G( 3867 be32_to_cpu(l2info))) | 3868 SYN_XACT_MATCH_F); 3869 type = dev->rdev.lldi.adapter_type; 3870 tcp_hdr_len = RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len)); 3871 ip_hdr_len = RX_IPHDR_LEN_G(be16_to_cpu(hdr_len)); 3872 req->hdr_len = 3873 cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info)))); 3874 if (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) { 3875 eth_hdr_len = is_t4(type) ? 3876 RX_ETHHDR_LEN_G(be32_to_cpu(l2info)) : 3877 RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info)); 3878 req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) | 3879 IP_HDR_LEN_V(ip_hdr_len) | 3880 ETH_HDR_LEN_V(eth_hdr_len)); 3881 } else { /* T6 and later */ 3882 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info)); 3883 req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) | 3884 T6_IP_HDR_LEN_V(ip_hdr_len) | 3885 T6_ETH_HDR_LEN_V(eth_hdr_len)); 3886 } 3887 req->vlan = vlantag; 3888 req->len = len; 3889 req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) | 3890 PASS_OPEN_TOS_V(tos)); 3891 req->tcpopt.mss = htons(tmp_opt.mss_clamp); 3892 if (tmp_opt.wscale_ok) 3893 req->tcpopt.wsf = tmp_opt.snd_wscale; 3894 req->tcpopt.tstamp = tmp_opt.saw_tstamp; 3895 if (tmp_opt.sack_ok) 3896 req->tcpopt.sack = 1; 3897 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0)); 3898 return; 3899 } 3900 3901 static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, 3902 __be32 laddr, __be16 lport, 3903 __be32 raddr, __be16 rport, 3904 u32 rcv_isn, u32 filter, u16 window, 3905 u32 rss_qid, u8 port_id) 3906 { 3907 struct sk_buff *req_skb; 3908 struct fw_ofld_connection_wr *req; 3909 struct cpl_pass_accept_req *cpl = cplhdr(skb); 3910 int ret; 3911 3912 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); 3913 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); 3914 memset(req, 0, sizeof(*req)); 3915 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F); 3916 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); 3917 req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F); 3918 req->le.filter = (__force __be32) filter; 3919 req->le.lport = lport; 3920 req->le.pport = rport; 3921 req->le.u.ipv4.lip = laddr; 3922 req->le.u.ipv4.pip = raddr; 3923 req->tcb.rcv_nxt = htonl(rcv_isn + 1); 3924 req->tcb.rcv_adv = htons(window); 3925 req->tcb.t_state_to_astid = 3926 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) | 3927 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) | 3928 FW_OFLD_CONNECTION_WR_ASTID_V( 3929 PASS_OPEN_TID_G(ntohl(cpl->tos_stid)))); 3930 3931 /* 3932 * We store the qid in opt2 which will be used by the firmware 3933 * to send us the wr response. 3934 */ 3935 req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid)); 3936 3937 /* 3938 * We initialize the MSS index in TCB to 0xF. 3939 * So that when driver sends cpl_pass_accept_rpl 3940 * TCB picks up the correct value. If this was 0 3941 * TP will ignore any value > 0 for MSS index. 3942 */ 3943 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF)); 3944 req->cookie = (uintptr_t)skb; 3945 3946 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 3947 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); 3948 if (ret < 0) { 3949 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__, 3950 ret); 3951 kfree_skb(skb); 3952 kfree_skb(req_skb); 3953 } 3954 } 3955 3956 /* 3957 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt 3958 * messages when a filter is being used instead of server to 3959 * redirect a syn packet. When packets hit filter they are redirected 3960 * to the offload queue and driver tries to establish the connection 3961 * using firmware work request. 3962 */ 3963 static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) 3964 { 3965 int stid; 3966 unsigned int filter; 3967 struct ethhdr *eh = NULL; 3968 struct vlan_ethhdr *vlan_eh = NULL; 3969 struct iphdr *iph; 3970 struct tcphdr *tcph; 3971 struct rss_header *rss = (void *)skb->data; 3972 struct cpl_rx_pkt *cpl = (void *)skb->data; 3973 struct cpl_pass_accept_req *req = (void *)(rss + 1); 3974 struct l2t_entry *e; 3975 struct dst_entry *dst; 3976 struct c4iw_ep *lep = NULL; 3977 u16 window; 3978 struct port_info *pi; 3979 struct net_device *pdev; 3980 u16 rss_qid, eth_hdr_len; 3981 int step; 3982 u32 tx_chan; 3983 struct neighbour *neigh; 3984 3985 /* Drop all non-SYN packets */ 3986 if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F))) 3987 goto reject; 3988 3989 /* 3990 * Drop all packets which did not hit the filter. 3991 * Unlikely to happen. 3992 */ 3993 if (!(rss->filter_hit && rss->filter_tid)) 3994 goto reject; 3995 3996 /* 3997 * Calculate the server tid from filter hit index from cpl_rx_pkt. 3998 */ 3999 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val); 4000 4001 lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid); 4002 if (!lep) { 4003 PDBG("%s connect request on invalid stid %d\n", __func__, stid); 4004 goto reject; 4005 } 4006 4007 switch (CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)) { 4008 case CHELSIO_T4: 4009 eth_hdr_len = RX_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info)); 4010 break; 4011 case CHELSIO_T5: 4012 eth_hdr_len = RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info)); 4013 break; 4014 case CHELSIO_T6: 4015 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info)); 4016 break; 4017 default: 4018 pr_err("T%d Chip is not supported\n", 4019 CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)); 4020 goto reject; 4021 } 4022 4023 if (eth_hdr_len == ETH_HLEN) { 4024 eh = (struct ethhdr *)(req + 1); 4025 iph = (struct iphdr *)(eh + 1); 4026 } else { 4027 vlan_eh = (struct vlan_ethhdr *)(req + 1); 4028 iph = (struct iphdr *)(vlan_eh + 1); 4029 skb->vlan_tci = ntohs(cpl->vlan); 4030 } 4031 4032 if (iph->version != 0x4) 4033 goto reject; 4034 4035 tcph = (struct tcphdr *)(iph + 1); 4036 skb_set_network_header(skb, (void *)iph - (void *)rss); 4037 skb_set_transport_header(skb, (void *)tcph - (void *)rss); 4038 skb_get(skb); 4039 4040 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__, 4041 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr), 4042 ntohs(tcph->source), iph->tos); 4043 4044 dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source, 4045 iph->tos); 4046 if (!dst) { 4047 pr_err("%s - failed to find dst entry!\n", 4048 __func__); 4049 goto reject; 4050 } 4051 neigh = dst_neigh_lookup_skb(dst, skb); 4052 4053 if (!neigh) { 4054 pr_err("%s - failed to allocate neigh!\n", 4055 __func__); 4056 goto free_dst; 4057 } 4058 4059 if (neigh->dev->flags & IFF_LOOPBACK) { 4060 pdev = ip_dev_find(&init_net, iph->daddr); 4061 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 4062 pdev, 0); 4063 pi = (struct port_info *)netdev_priv(pdev); 4064 tx_chan = cxgb4_port_chan(pdev); 4065 dev_put(pdev); 4066 } else { 4067 pdev = get_real_dev(neigh->dev); 4068 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 4069 pdev, 0); 4070 pi = (struct port_info *)netdev_priv(pdev); 4071 tx_chan = cxgb4_port_chan(pdev); 4072 } 4073 neigh_release(neigh); 4074 if (!e) { 4075 pr_err("%s - failed to allocate l2t entry!\n", 4076 __func__); 4077 goto free_dst; 4078 } 4079 4080 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 4081 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; 4082 window = (__force u16) htons((__force u16)tcph->window); 4083 4084 /* Calcuate filter portion for LE region. */ 4085 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple( 4086 dev->rdev.lldi.ports[0], 4087 e)); 4088 4089 /* 4090 * Synthesize the cpl_pass_accept_req. We have everything except the 4091 * TID. Once firmware sends a reply with TID we update the TID field 4092 * in cpl and pass it through the regular cpl_pass_accept_req path. 4093 */ 4094 build_cpl_pass_accept_req(skb, stid, iph->tos); 4095 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr, 4096 tcph->source, ntohl(tcph->seq), filter, window, 4097 rss_qid, pi->port_id); 4098 cxgb4_l2t_release(e); 4099 free_dst: 4100 dst_release(dst); 4101 reject: 4102 if (lep) 4103 c4iw_put_ep(&lep->com); 4104 return 0; 4105 } 4106 4107 /* 4108 * These are the real handlers that are called from a 4109 * work queue. 4110 */ 4111 static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = { 4112 [CPL_ACT_ESTABLISH] = act_establish, 4113 [CPL_ACT_OPEN_RPL] = act_open_rpl, 4114 [CPL_RX_DATA] = rx_data, 4115 [CPL_ABORT_RPL_RSS] = abort_rpl, 4116 [CPL_ABORT_RPL] = abort_rpl, 4117 [CPL_PASS_OPEN_RPL] = pass_open_rpl, 4118 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, 4119 [CPL_PASS_ACCEPT_REQ] = pass_accept_req, 4120 [CPL_PASS_ESTABLISH] = pass_establish, 4121 [CPL_PEER_CLOSE] = peer_close, 4122 [CPL_ABORT_REQ_RSS] = peer_abort, 4123 [CPL_CLOSE_CON_RPL] = close_con_rpl, 4124 [CPL_RDMA_TERMINATE] = terminate, 4125 [CPL_FW4_ACK] = fw4_ack, 4126 [CPL_FW6_MSG] = deferred_fw6_msg, 4127 [CPL_RX_PKT] = rx_pkt, 4128 [FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe, 4129 [FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe 4130 }; 4131 4132 static void process_timeout(struct c4iw_ep *ep) 4133 { 4134 struct c4iw_qp_attributes attrs; 4135 int abort = 1; 4136 4137 mutex_lock(&ep->com.mutex); 4138 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 4139 ep->com.state); 4140 set_bit(TIMEDOUT, &ep->com.history); 4141 switch (ep->com.state) { 4142 case MPA_REQ_SENT: 4143 connect_reply_upcall(ep, -ETIMEDOUT); 4144 break; 4145 case MPA_REQ_WAIT: 4146 case MPA_REQ_RCVD: 4147 case MPA_REP_SENT: 4148 case FPDU_MODE: 4149 break; 4150 case CLOSING: 4151 case MORIBUND: 4152 if (ep->com.cm_id && ep->com.qp) { 4153 attrs.next_state = C4IW_QP_STATE_ERROR; 4154 c4iw_modify_qp(ep->com.qp->rhp, 4155 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 4156 &attrs, 1); 4157 } 4158 close_complete_upcall(ep, -ETIMEDOUT); 4159 break; 4160 case ABORTING: 4161 case DEAD: 4162 4163 /* 4164 * These states are expected if the ep timed out at the same 4165 * time as another thread was calling stop_ep_timer(). 4166 * So we silently do nothing for these states. 4167 */ 4168 abort = 0; 4169 break; 4170 default: 4171 WARN(1, "%s unexpected state ep %p tid %u state %u\n", 4172 __func__, ep, ep->hwtid, ep->com.state); 4173 abort = 0; 4174 } 4175 mutex_unlock(&ep->com.mutex); 4176 if (abort) 4177 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 4178 c4iw_put_ep(&ep->com); 4179 } 4180 4181 static void process_timedout_eps(void) 4182 { 4183 struct c4iw_ep *ep; 4184 4185 spin_lock_irq(&timeout_lock); 4186 while (!list_empty(&timeout_list)) { 4187 struct list_head *tmp; 4188 4189 tmp = timeout_list.next; 4190 list_del(tmp); 4191 tmp->next = NULL; 4192 tmp->prev = NULL; 4193 spin_unlock_irq(&timeout_lock); 4194 ep = list_entry(tmp, struct c4iw_ep, entry); 4195 process_timeout(ep); 4196 spin_lock_irq(&timeout_lock); 4197 } 4198 spin_unlock_irq(&timeout_lock); 4199 } 4200 4201 static void process_work(struct work_struct *work) 4202 { 4203 struct sk_buff *skb = NULL; 4204 struct c4iw_dev *dev; 4205 struct cpl_act_establish *rpl; 4206 unsigned int opcode; 4207 int ret; 4208 4209 process_timedout_eps(); 4210 while ((skb = skb_dequeue(&rxq))) { 4211 rpl = cplhdr(skb); 4212 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 4213 opcode = rpl->ot.opcode; 4214 4215 BUG_ON(!work_handlers[opcode]); 4216 ret = work_handlers[opcode](dev, skb); 4217 if (!ret) 4218 kfree_skb(skb); 4219 process_timedout_eps(); 4220 } 4221 } 4222 4223 static DECLARE_WORK(skb_work, process_work); 4224 4225 static void ep_timeout(unsigned long arg) 4226 { 4227 struct c4iw_ep *ep = (struct c4iw_ep *)arg; 4228 int kickit = 0; 4229 4230 spin_lock(&timeout_lock); 4231 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 4232 /* 4233 * Only insert if it is not already on the list. 4234 */ 4235 if (!ep->entry.next) { 4236 list_add_tail(&ep->entry, &timeout_list); 4237 kickit = 1; 4238 } 4239 } 4240 spin_unlock(&timeout_lock); 4241 if (kickit) 4242 queue_work(workq, &skb_work); 4243 } 4244 4245 /* 4246 * All the CM events are handled on a work queue to have a safe context. 4247 */ 4248 static int sched(struct c4iw_dev *dev, struct sk_buff *skb) 4249 { 4250 4251 /* 4252 * Save dev in the skb->cb area. 4253 */ 4254 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; 4255 4256 /* 4257 * Queue the skb and schedule the worker thread. 4258 */ 4259 skb_queue_tail(&rxq, skb); 4260 queue_work(workq, &skb_work); 4261 return 0; 4262 } 4263 4264 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 4265 { 4266 struct cpl_set_tcb_rpl *rpl = cplhdr(skb); 4267 4268 if (rpl->status != CPL_ERR_NONE) { 4269 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " 4270 "for tid %u\n", rpl->status, GET_TID(rpl)); 4271 } 4272 kfree_skb(skb); 4273 return 0; 4274 } 4275 4276 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 4277 { 4278 struct cpl_fw6_msg *rpl = cplhdr(skb); 4279 struct c4iw_wr_wait *wr_waitp; 4280 int ret; 4281 4282 PDBG("%s type %u\n", __func__, rpl->type); 4283 4284 switch (rpl->type) { 4285 case FW6_TYPE_WR_RPL: 4286 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 4287 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 4288 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); 4289 if (wr_waitp) 4290 c4iw_wake_up(wr_waitp, ret ? -ret : 0); 4291 kfree_skb(skb); 4292 break; 4293 case FW6_TYPE_CQE: 4294 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 4295 sched(dev, skb); 4296 break; 4297 default: 4298 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, 4299 rpl->type); 4300 kfree_skb(skb); 4301 break; 4302 } 4303 return 0; 4304 } 4305 4306 static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) 4307 { 4308 struct cpl_abort_req_rss *req = cplhdr(skb); 4309 struct c4iw_ep *ep; 4310 unsigned int tid = GET_TID(req); 4311 4312 ep = get_ep_from_tid(dev, tid); 4313 /* This EP will be dereferenced in peer_abort() */ 4314 if (!ep) { 4315 printk(KERN_WARNING MOD 4316 "Abort on non-existent endpoint, tid %d\n", tid); 4317 kfree_skb(skb); 4318 return 0; 4319 } 4320 if (is_neg_adv(req->status)) { 4321 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", 4322 __func__, ep->hwtid, req->status, 4323 neg_adv_str(req->status)); 4324 goto out; 4325 } 4326 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 4327 ep->com.state); 4328 4329 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 4330 out: 4331 sched(dev, skb); 4332 return 0; 4333 } 4334 4335 /* 4336 * Most upcalls from the T4 Core go to sched() to 4337 * schedule the processing on a work queue. 4338 */ 4339 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { 4340 [CPL_ACT_ESTABLISH] = sched, 4341 [CPL_ACT_OPEN_RPL] = sched, 4342 [CPL_RX_DATA] = sched, 4343 [CPL_ABORT_RPL_RSS] = sched, 4344 [CPL_ABORT_RPL] = sched, 4345 [CPL_PASS_OPEN_RPL] = sched, 4346 [CPL_CLOSE_LISTSRV_RPL] = sched, 4347 [CPL_PASS_ACCEPT_REQ] = sched, 4348 [CPL_PASS_ESTABLISH] = sched, 4349 [CPL_PEER_CLOSE] = sched, 4350 [CPL_CLOSE_CON_RPL] = sched, 4351 [CPL_ABORT_REQ_RSS] = peer_abort_intr, 4352 [CPL_RDMA_TERMINATE] = sched, 4353 [CPL_FW4_ACK] = sched, 4354 [CPL_SET_TCB_RPL] = set_tcb_rpl, 4355 [CPL_FW6_MSG] = fw6_msg, 4356 [CPL_RX_PKT] = sched 4357 }; 4358 4359 int __init c4iw_cm_init(void) 4360 { 4361 spin_lock_init(&timeout_lock); 4362 skb_queue_head_init(&rxq); 4363 4364 workq = create_singlethread_workqueue("iw_cxgb4"); 4365 if (!workq) 4366 return -ENOMEM; 4367 4368 return 0; 4369 } 4370 4371 void c4iw_cm_term(void) 4372 { 4373 WARN_ON(!list_empty(&timeout_list)); 4374 flush_workqueue(workq); 4375 destroy_workqueue(workq); 4376 } 4377