1 /* 2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <linux/skbuff.h> 36 #include <linux/timer.h> 37 #include <linux/notifier.h> 38 #include <linux/inetdevice.h> 39 #include <linux/ip.h> 40 #include <linux/tcp.h> 41 #include <linux/if_vlan.h> 42 43 #include <net/neighbour.h> 44 #include <net/netevent.h> 45 #include <net/route.h> 46 #include <net/tcp.h> 47 #include <net/ip6_route.h> 48 #include <net/addrconf.h> 49 50 #include <rdma/ib_addr.h> 51 52 #include "iw_cxgb4.h" 53 54 static char *states[] = { 55 "idle", 56 "listen", 57 "connecting", 58 "mpa_wait_req", 59 "mpa_req_sent", 60 "mpa_req_rcvd", 61 "mpa_rep_sent", 62 "fpdu_mode", 63 "aborting", 64 "closing", 65 "moribund", 66 "dead", 67 NULL, 68 }; 69 70 static int nocong; 71 module_param(nocong, int, 0644); 72 MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)"); 73 74 static int enable_ecn; 75 module_param(enable_ecn, int, 0644); 76 MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); 77 78 static int dack_mode = 1; 79 module_param(dack_mode, int, 0644); 80 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); 81 82 int c4iw_max_read_depth = 8; 83 module_param(c4iw_max_read_depth, int, 0644); 84 MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); 85 86 static int enable_tcp_timestamps; 87 module_param(enable_tcp_timestamps, int, 0644); 88 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); 89 90 static int enable_tcp_sack; 91 module_param(enable_tcp_sack, int, 0644); 92 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); 93 94 static int enable_tcp_window_scaling = 1; 95 module_param(enable_tcp_window_scaling, int, 0644); 96 MODULE_PARM_DESC(enable_tcp_window_scaling, 97 "Enable tcp window scaling (default=1)"); 98 99 int c4iw_debug; 100 module_param(c4iw_debug, int, 0644); 101 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); 102 103 static int peer2peer = 1; 104 module_param(peer2peer, int, 0644); 105 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)"); 106 107 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 108 module_param(p2p_type, int, 0644); 109 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " 110 "1=RDMA_READ 0=RDMA_WRITE (default 1)"); 111 112 static int ep_timeout_secs = 60; 113 module_param(ep_timeout_secs, int, 0644); 114 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 115 "in seconds (default=60)"); 116 117 static int mpa_rev = 1; 118 module_param(mpa_rev, int, 0644); 119 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " 120 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft" 121 " compliant (default=1)"); 122 123 static int markers_enabled; 124 module_param(markers_enabled, int, 0644); 125 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); 126 127 static int crc_enabled = 1; 128 module_param(crc_enabled, int, 0644); 129 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); 130 131 static int rcv_win = 256 * 1024; 132 module_param(rcv_win, int, 0644); 133 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); 134 135 static int snd_win = 128 * 1024; 136 module_param(snd_win, int, 0644); 137 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); 138 139 static struct workqueue_struct *workq; 140 141 static struct sk_buff_head rxq; 142 143 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 144 static void ep_timeout(unsigned long arg); 145 static void connect_reply_upcall(struct c4iw_ep *ep, int status); 146 147 static LIST_HEAD(timeout_list); 148 static spinlock_t timeout_lock; 149 150 static void deref_qp(struct c4iw_ep *ep) 151 { 152 c4iw_qp_rem_ref(&ep->com.qp->ibqp); 153 clear_bit(QP_REFERENCED, &ep->com.flags); 154 } 155 156 static void ref_qp(struct c4iw_ep *ep) 157 { 158 set_bit(QP_REFERENCED, &ep->com.flags); 159 c4iw_qp_add_ref(&ep->com.qp->ibqp); 160 } 161 162 static void start_ep_timer(struct c4iw_ep *ep) 163 { 164 PDBG("%s ep %p\n", __func__, ep); 165 if (timer_pending(&ep->timer)) { 166 pr_err("%s timer already started! ep %p\n", 167 __func__, ep); 168 return; 169 } 170 clear_bit(TIMEOUT, &ep->com.flags); 171 c4iw_get_ep(&ep->com); 172 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 173 ep->timer.data = (unsigned long)ep; 174 ep->timer.function = ep_timeout; 175 add_timer(&ep->timer); 176 } 177 178 static int stop_ep_timer(struct c4iw_ep *ep) 179 { 180 PDBG("%s ep %p stopping\n", __func__, ep); 181 del_timer_sync(&ep->timer); 182 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 183 c4iw_put_ep(&ep->com); 184 return 0; 185 } 186 return 1; 187 } 188 189 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, 190 struct l2t_entry *l2e) 191 { 192 int error = 0; 193 194 if (c4iw_fatal_error(rdev)) { 195 kfree_skb(skb); 196 PDBG("%s - device in error state - dropping\n", __func__); 197 return -EIO; 198 } 199 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); 200 if (error < 0) 201 kfree_skb(skb); 202 return error < 0 ? error : 0; 203 } 204 205 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) 206 { 207 int error = 0; 208 209 if (c4iw_fatal_error(rdev)) { 210 kfree_skb(skb); 211 PDBG("%s - device in error state - dropping\n", __func__); 212 return -EIO; 213 } 214 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); 215 if (error < 0) 216 kfree_skb(skb); 217 return error < 0 ? error : 0; 218 } 219 220 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) 221 { 222 struct cpl_tid_release *req; 223 224 skb = get_skb(skb, sizeof *req, GFP_KERNEL); 225 if (!skb) 226 return; 227 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); 228 INIT_TP_WR(req, hwtid); 229 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); 230 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 231 c4iw_ofld_send(rdev, skb); 232 return; 233 } 234 235 static void set_emss(struct c4iw_ep *ep, u16 opt) 236 { 237 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 238 sizeof(struct iphdr) - sizeof(struct tcphdr); 239 ep->mss = ep->emss; 240 if (GET_TCPOPT_TSTAMP(opt)) 241 ep->emss -= 12; 242 if (ep->emss < 128) 243 ep->emss = 128; 244 if (ep->emss & 7) 245 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n", 246 GET_TCPOPT_MSS(opt), ep->mss, ep->emss); 247 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), 248 ep->mss, ep->emss); 249 } 250 251 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) 252 { 253 enum c4iw_ep_state state; 254 255 mutex_lock(&epc->mutex); 256 state = epc->state; 257 mutex_unlock(&epc->mutex); 258 return state; 259 } 260 261 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 262 { 263 epc->state = new; 264 } 265 266 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 267 { 268 mutex_lock(&epc->mutex); 269 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); 270 __state_set(epc, new); 271 mutex_unlock(&epc->mutex); 272 return; 273 } 274 275 static void *alloc_ep(int size, gfp_t gfp) 276 { 277 struct c4iw_ep_common *epc; 278 279 epc = kzalloc(size, gfp); 280 if (epc) { 281 kref_init(&epc->kref); 282 mutex_init(&epc->mutex); 283 c4iw_init_wr_wait(&epc->wr_wait); 284 } 285 PDBG("%s alloc ep %p\n", __func__, epc); 286 return epc; 287 } 288 289 void _c4iw_free_ep(struct kref *kref) 290 { 291 struct c4iw_ep *ep; 292 293 ep = container_of(kref, struct c4iw_ep, com.kref); 294 PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); 295 if (test_bit(QP_REFERENCED, &ep->com.flags)) 296 deref_qp(ep); 297 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 298 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); 299 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 300 dst_release(ep->dst); 301 cxgb4_l2t_release(ep->l2t); 302 } 303 if (test_bit(RELEASE_MAPINFO, &ep->com.flags)) { 304 print_addr(&ep->com, __func__, "remove_mapinfo/mapping"); 305 iwpm_remove_mapinfo(&ep->com.local_addr, 306 &ep->com.mapped_local_addr); 307 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW); 308 } 309 kfree(ep); 310 } 311 312 static void release_ep_resources(struct c4iw_ep *ep) 313 { 314 set_bit(RELEASE_RESOURCES, &ep->com.flags); 315 c4iw_put_ep(&ep->com); 316 } 317 318 static int status2errno(int status) 319 { 320 switch (status) { 321 case CPL_ERR_NONE: 322 return 0; 323 case CPL_ERR_CONN_RESET: 324 return -ECONNRESET; 325 case CPL_ERR_ARP_MISS: 326 return -EHOSTUNREACH; 327 case CPL_ERR_CONN_TIMEDOUT: 328 return -ETIMEDOUT; 329 case CPL_ERR_TCAM_FULL: 330 return -ENOMEM; 331 case CPL_ERR_CONN_EXIST: 332 return -EADDRINUSE; 333 default: 334 return -EIO; 335 } 336 } 337 338 /* 339 * Try and reuse skbs already allocated... 340 */ 341 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 342 { 343 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { 344 skb_trim(skb, 0); 345 skb_get(skb); 346 skb_reset_transport_header(skb); 347 } else { 348 skb = alloc_skb(len, gfp); 349 } 350 t4_set_arp_err_handler(skb, NULL, NULL); 351 return skb; 352 } 353 354 static struct net_device *get_real_dev(struct net_device *egress_dev) 355 { 356 return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev; 357 } 358 359 static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev) 360 { 361 int i; 362 363 egress_dev = get_real_dev(egress_dev); 364 for (i = 0; i < dev->rdev.lldi.nports; i++) 365 if (dev->rdev.lldi.ports[i] == egress_dev) 366 return 1; 367 return 0; 368 } 369 370 static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip, 371 __u8 *peer_ip, __be16 local_port, 372 __be16 peer_port, u8 tos, 373 __u32 sin6_scope_id) 374 { 375 struct dst_entry *dst = NULL; 376 377 if (IS_ENABLED(CONFIG_IPV6)) { 378 struct flowi6 fl6; 379 380 memset(&fl6, 0, sizeof(fl6)); 381 memcpy(&fl6.daddr, peer_ip, 16); 382 memcpy(&fl6.saddr, local_ip, 16); 383 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) 384 fl6.flowi6_oif = sin6_scope_id; 385 dst = ip6_route_output(&init_net, NULL, &fl6); 386 if (!dst) 387 goto out; 388 if (!our_interface(dev, ip6_dst_idev(dst)->dev) && 389 !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) { 390 dst_release(dst); 391 dst = NULL; 392 } 393 } 394 395 out: 396 return dst; 397 } 398 399 static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip, 400 __be32 peer_ip, __be16 local_port, 401 __be16 peer_port, u8 tos) 402 { 403 struct rtable *rt; 404 struct flowi4 fl4; 405 struct neighbour *n; 406 407 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, 408 peer_port, local_port, IPPROTO_TCP, 409 tos, 0); 410 if (IS_ERR(rt)) 411 return NULL; 412 n = dst_neigh_lookup(&rt->dst, &peer_ip); 413 if (!n) 414 return NULL; 415 if (!our_interface(dev, n->dev) && 416 !(n->dev->flags & IFF_LOOPBACK)) { 417 dst_release(&rt->dst); 418 return NULL; 419 } 420 neigh_release(n); 421 return &rt->dst; 422 } 423 424 static void arp_failure_discard(void *handle, struct sk_buff *skb) 425 { 426 PDBG("%s c4iw_dev %p\n", __func__, handle); 427 kfree_skb(skb); 428 } 429 430 /* 431 * Handle an ARP failure for an active open. 432 */ 433 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 434 { 435 printk(KERN_ERR MOD "ARP failure duing connect\n"); 436 kfree_skb(skb); 437 } 438 439 /* 440 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant 441 * and send it along. 442 */ 443 static void abort_arp_failure(void *handle, struct sk_buff *skb) 444 { 445 struct c4iw_rdev *rdev = handle; 446 struct cpl_abort_req *req = cplhdr(skb); 447 448 PDBG("%s rdev %p\n", __func__, rdev); 449 req->cmd = CPL_ABORT_NO_RST; 450 c4iw_ofld_send(rdev, skb); 451 } 452 453 static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) 454 { 455 unsigned int flowclen = 80; 456 struct fw_flowc_wr *flowc; 457 int i; 458 459 skb = get_skb(skb, flowclen, GFP_KERNEL); 460 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); 461 462 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | 463 FW_FLOWC_WR_NPARAMS(8)); 464 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, 465 16)) | FW_WR_FLOWID(ep->hwtid)); 466 467 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 468 flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); 469 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 470 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 471 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 472 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); 473 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 474 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); 475 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 476 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); 477 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 478 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); 479 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 480 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win); 481 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 482 flowc->mnemval[7].val = cpu_to_be32(ep->emss); 483 /* Pad WR to 16 byte boundary */ 484 flowc->mnemval[8].mnemonic = 0; 485 flowc->mnemval[8].val = 0; 486 for (i = 0; i < 9; i++) { 487 flowc->mnemval[i].r4[0] = 0; 488 flowc->mnemval[i].r4[1] = 0; 489 flowc->mnemval[i].r4[2] = 0; 490 } 491 492 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 493 c4iw_ofld_send(&ep->com.dev->rdev, skb); 494 } 495 496 static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) 497 { 498 struct cpl_close_con_req *req; 499 struct sk_buff *skb; 500 int wrlen = roundup(sizeof *req, 16); 501 502 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 503 skb = get_skb(NULL, wrlen, gfp); 504 if (!skb) { 505 printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); 506 return -ENOMEM; 507 } 508 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 509 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 510 req = (struct cpl_close_con_req *) skb_put(skb, wrlen); 511 memset(req, 0, wrlen); 512 INIT_TP_WR(req, ep->hwtid); 513 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, 514 ep->hwtid)); 515 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 516 } 517 518 static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 519 { 520 struct cpl_abort_req *req; 521 int wrlen = roundup(sizeof *req, 16); 522 523 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 524 skb = get_skb(skb, wrlen, gfp); 525 if (!skb) { 526 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 527 __func__); 528 return -ENOMEM; 529 } 530 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 531 t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); 532 req = (struct cpl_abort_req *) skb_put(skb, wrlen); 533 memset(req, 0, wrlen); 534 INIT_TP_WR(req, ep->hwtid); 535 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); 536 req->cmd = CPL_ABORT_SEND_RST; 537 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 538 } 539 540 /* 541 * c4iw_form_pm_msg - Form a port mapper message with mapping info 542 */ 543 static void c4iw_form_pm_msg(struct c4iw_ep *ep, 544 struct iwpm_sa_data *pm_msg) 545 { 546 memcpy(&pm_msg->loc_addr, &ep->com.local_addr, 547 sizeof(ep->com.local_addr)); 548 memcpy(&pm_msg->rem_addr, &ep->com.remote_addr, 549 sizeof(ep->com.remote_addr)); 550 } 551 552 /* 553 * c4iw_form_reg_msg - Form a port mapper message with dev info 554 */ 555 static void c4iw_form_reg_msg(struct c4iw_dev *dev, 556 struct iwpm_dev_data *pm_msg) 557 { 558 memcpy(pm_msg->dev_name, dev->ibdev.name, IWPM_DEVNAME_SIZE); 559 memcpy(pm_msg->if_name, dev->rdev.lldi.ports[0]->name, 560 IWPM_IFNAME_SIZE); 561 } 562 563 static void c4iw_record_pm_msg(struct c4iw_ep *ep, 564 struct iwpm_sa_data *pm_msg) 565 { 566 memcpy(&ep->com.mapped_local_addr, &pm_msg->mapped_loc_addr, 567 sizeof(ep->com.mapped_local_addr)); 568 memcpy(&ep->com.mapped_remote_addr, &pm_msg->mapped_rem_addr, 569 sizeof(ep->com.mapped_remote_addr)); 570 } 571 572 static void best_mtu(const unsigned short *mtus, unsigned short mtu, 573 unsigned int *idx, int use_ts) 574 { 575 unsigned short hdr_size = sizeof(struct iphdr) + 576 sizeof(struct tcphdr) + 577 (use_ts ? 12 : 0); 578 unsigned short data_size = mtu - hdr_size; 579 580 cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx); 581 } 582 583 static int send_connect(struct c4iw_ep *ep) 584 { 585 struct cpl_act_open_req *req; 586 struct cpl_t5_act_open_req *t5_req; 587 struct cpl_act_open_req6 *req6; 588 struct cpl_t5_act_open_req6 *t5_req6; 589 struct sk_buff *skb; 590 u64 opt0; 591 u32 opt2; 592 unsigned int mtu_idx; 593 int wscale; 594 int wrlen; 595 int sizev4 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ? 596 sizeof(struct cpl_act_open_req) : 597 sizeof(struct cpl_t5_act_open_req); 598 int sizev6 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ? 599 sizeof(struct cpl_act_open_req6) : 600 sizeof(struct cpl_t5_act_open_req6); 601 struct sockaddr_in *la = (struct sockaddr_in *) 602 &ep->com.mapped_local_addr; 603 struct sockaddr_in *ra = (struct sockaddr_in *) 604 &ep->com.mapped_remote_addr; 605 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *) 606 &ep->com.mapped_local_addr; 607 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *) 608 &ep->com.mapped_remote_addr; 609 int win; 610 611 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? 612 roundup(sizev4, 16) : 613 roundup(sizev6, 16); 614 615 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); 616 617 skb = get_skb(NULL, wrlen, GFP_KERNEL); 618 if (!skb) { 619 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 620 __func__); 621 return -ENOMEM; 622 } 623 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 624 625 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 626 enable_tcp_timestamps); 627 wscale = compute_wscale(rcv_win); 628 629 /* 630 * Specify the largest window that will fit in opt0. The 631 * remainder will be specified in the rx_data_ack. 632 */ 633 win = ep->rcv_win >> 10; 634 if (win > RCV_BUFSIZ_MASK) 635 win = RCV_BUFSIZ_MASK; 636 637 opt0 = (nocong ? NO_CONG(1) : 0) | 638 KEEP_ALIVE(1) | 639 DELACK(1) | 640 WND_SCALE(wscale) | 641 MSS_IDX(mtu_idx) | 642 L2T_IDX(ep->l2t->idx) | 643 TX_CHAN(ep->tx_chan) | 644 SMAC_SEL(ep->smac_idx) | 645 DSCP(ep->tos) | 646 ULP_MODE(ULP_MODE_TCPDDP) | 647 RCV_BUFSIZ(win); 648 opt2 = RX_CHANNEL(0) | 649 CCTRL_ECN(enable_ecn) | 650 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 651 if (enable_tcp_timestamps) 652 opt2 |= TSTAMPS_EN(1); 653 if (enable_tcp_sack) 654 opt2 |= SACK_EN(1); 655 if (wscale && enable_tcp_window_scaling) 656 opt2 |= WND_SCALE_EN(1); 657 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 658 opt2 |= T5_OPT_2_VALID; 659 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 660 } 661 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 662 663 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 664 if (ep->com.remote_addr.ss_family == AF_INET) { 665 req = (struct cpl_act_open_req *) skb_put(skb, wrlen); 666 INIT_TP_WR(req, 0); 667 OPCODE_TID(req) = cpu_to_be32( 668 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 669 ((ep->rss_qid << 14) | ep->atid))); 670 req->local_port = la->sin_port; 671 req->peer_port = ra->sin_port; 672 req->local_ip = la->sin_addr.s_addr; 673 req->peer_ip = ra->sin_addr.s_addr; 674 req->opt0 = cpu_to_be64(opt0); 675 req->params = cpu_to_be32(cxgb4_select_ntuple( 676 ep->com.dev->rdev.lldi.ports[0], 677 ep->l2t)); 678 req->opt2 = cpu_to_be32(opt2); 679 } else { 680 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); 681 682 INIT_TP_WR(req6, 0); 683 OPCODE_TID(req6) = cpu_to_be32( 684 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 685 ((ep->rss_qid<<14)|ep->atid))); 686 req6->local_port = la6->sin6_port; 687 req6->peer_port = ra6->sin6_port; 688 req6->local_ip_hi = *((__be64 *) 689 (la6->sin6_addr.s6_addr)); 690 req6->local_ip_lo = *((__be64 *) 691 (la6->sin6_addr.s6_addr + 8)); 692 req6->peer_ip_hi = *((__be64 *) 693 (ra6->sin6_addr.s6_addr)); 694 req6->peer_ip_lo = *((__be64 *) 695 (ra6->sin6_addr.s6_addr + 8)); 696 req6->opt0 = cpu_to_be64(opt0); 697 req6->params = cpu_to_be32(cxgb4_select_ntuple( 698 ep->com.dev->rdev.lldi.ports[0], 699 ep->l2t)); 700 req6->opt2 = cpu_to_be32(opt2); 701 } 702 } else { 703 u32 isn = (prandom_u32() & ~7UL) - 1; 704 705 opt2 |= T5_OPT_2_VALID; 706 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 707 if (peer2peer) 708 isn += 4; 709 710 if (ep->com.remote_addr.ss_family == AF_INET) { 711 t5_req = (struct cpl_t5_act_open_req *) 712 skb_put(skb, wrlen); 713 INIT_TP_WR(t5_req, 0); 714 OPCODE_TID(t5_req) = cpu_to_be32( 715 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 716 ((ep->rss_qid << 14) | ep->atid))); 717 t5_req->local_port = la->sin_port; 718 t5_req->peer_port = ra->sin_port; 719 t5_req->local_ip = la->sin_addr.s_addr; 720 t5_req->peer_ip = ra->sin_addr.s_addr; 721 t5_req->opt0 = cpu_to_be64(opt0); 722 t5_req->params = cpu_to_be64(V_FILTER_TUPLE( 723 cxgb4_select_ntuple( 724 ep->com.dev->rdev.lldi.ports[0], 725 ep->l2t))); 726 t5_req->rsvd = cpu_to_be32(isn); 727 PDBG("%s snd_isn %u\n", __func__, 728 be32_to_cpu(t5_req->rsvd)); 729 t5_req->opt2 = cpu_to_be32(opt2); 730 } else { 731 t5_req6 = (struct cpl_t5_act_open_req6 *) 732 skb_put(skb, wrlen); 733 INIT_TP_WR(t5_req6, 0); 734 OPCODE_TID(t5_req6) = cpu_to_be32( 735 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 736 ((ep->rss_qid<<14)|ep->atid))); 737 t5_req6->local_port = la6->sin6_port; 738 t5_req6->peer_port = ra6->sin6_port; 739 t5_req6->local_ip_hi = *((__be64 *) 740 (la6->sin6_addr.s6_addr)); 741 t5_req6->local_ip_lo = *((__be64 *) 742 (la6->sin6_addr.s6_addr + 8)); 743 t5_req6->peer_ip_hi = *((__be64 *) 744 (ra6->sin6_addr.s6_addr)); 745 t5_req6->peer_ip_lo = *((__be64 *) 746 (ra6->sin6_addr.s6_addr + 8)); 747 t5_req6->opt0 = cpu_to_be64(opt0); 748 t5_req6->params = (__force __be64)cpu_to_be32( 749 cxgb4_select_ntuple( 750 ep->com.dev->rdev.lldi.ports[0], 751 ep->l2t)); 752 t5_req6->rsvd = cpu_to_be32(isn); 753 PDBG("%s snd_isn %u\n", __func__, 754 be32_to_cpu(t5_req6->rsvd)); 755 t5_req6->opt2 = cpu_to_be32(opt2); 756 } 757 } 758 759 set_bit(ACT_OPEN_REQ, &ep->com.history); 760 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 761 } 762 763 static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, 764 u8 mpa_rev_to_use) 765 { 766 int mpalen, wrlen; 767 struct fw_ofld_tx_data_wr *req; 768 struct mpa_message *mpa; 769 struct mpa_v2_conn_params mpa_v2_params; 770 771 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 772 773 BUG_ON(skb_cloned(skb)); 774 775 mpalen = sizeof(*mpa) + ep->plen; 776 if (mpa_rev_to_use == 2) 777 mpalen += sizeof(struct mpa_v2_conn_params); 778 wrlen = roundup(mpalen + sizeof *req, 16); 779 skb = get_skb(skb, wrlen, GFP_KERNEL); 780 if (!skb) { 781 connect_reply_upcall(ep, -ENOMEM); 782 return; 783 } 784 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 785 786 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 787 memset(req, 0, wrlen); 788 req->op_to_immdlen = cpu_to_be32( 789 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 790 FW_WR_COMPL(1) | 791 FW_WR_IMMDLEN(mpalen)); 792 req->flowid_len16 = cpu_to_be32( 793 FW_WR_FLOWID(ep->hwtid) | 794 FW_WR_LEN16(wrlen >> 4)); 795 req->plen = cpu_to_be32(mpalen); 796 req->tunnel_to_proxy = cpu_to_be32( 797 FW_OFLD_TX_DATA_WR_FLUSH(1) | 798 FW_OFLD_TX_DATA_WR_SHOVE(1)); 799 800 mpa = (struct mpa_message *)(req + 1); 801 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 802 mpa->flags = (crc_enabled ? MPA_CRC : 0) | 803 (markers_enabled ? MPA_MARKERS : 0) | 804 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 805 mpa->private_data_size = htons(ep->plen); 806 mpa->revision = mpa_rev_to_use; 807 if (mpa_rev_to_use == 1) { 808 ep->tried_with_mpa_v1 = 1; 809 ep->retry_with_mpa_v1 = 0; 810 } 811 812 if (mpa_rev_to_use == 2) { 813 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 814 sizeof (struct mpa_v2_conn_params)); 815 mpa_v2_params.ird = htons((u16)ep->ird); 816 mpa_v2_params.ord = htons((u16)ep->ord); 817 818 if (peer2peer) { 819 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 820 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 821 mpa_v2_params.ord |= 822 htons(MPA_V2_RDMA_WRITE_RTR); 823 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 824 mpa_v2_params.ord |= 825 htons(MPA_V2_RDMA_READ_RTR); 826 } 827 memcpy(mpa->private_data, &mpa_v2_params, 828 sizeof(struct mpa_v2_conn_params)); 829 830 if (ep->plen) 831 memcpy(mpa->private_data + 832 sizeof(struct mpa_v2_conn_params), 833 ep->mpa_pkt + sizeof(*mpa), ep->plen); 834 } else 835 if (ep->plen) 836 memcpy(mpa->private_data, 837 ep->mpa_pkt + sizeof(*mpa), ep->plen); 838 839 /* 840 * Reference the mpa skb. This ensures the data area 841 * will remain in memory until the hw acks the tx. 842 * Function fw4_ack() will deref it. 843 */ 844 skb_get(skb); 845 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 846 BUG_ON(ep->mpa_skb); 847 ep->mpa_skb = skb; 848 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 849 start_ep_timer(ep); 850 __state_set(&ep->com, MPA_REQ_SENT); 851 ep->mpa_attr.initiator = 1; 852 ep->snd_seq += mpalen; 853 return; 854 } 855 856 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 857 { 858 int mpalen, wrlen; 859 struct fw_ofld_tx_data_wr *req; 860 struct mpa_message *mpa; 861 struct sk_buff *skb; 862 struct mpa_v2_conn_params mpa_v2_params; 863 864 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 865 866 mpalen = sizeof(*mpa) + plen; 867 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 868 mpalen += sizeof(struct mpa_v2_conn_params); 869 wrlen = roundup(mpalen + sizeof *req, 16); 870 871 skb = get_skb(NULL, wrlen, GFP_KERNEL); 872 if (!skb) { 873 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 874 return -ENOMEM; 875 } 876 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 877 878 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 879 memset(req, 0, wrlen); 880 req->op_to_immdlen = cpu_to_be32( 881 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 882 FW_WR_COMPL(1) | 883 FW_WR_IMMDLEN(mpalen)); 884 req->flowid_len16 = cpu_to_be32( 885 FW_WR_FLOWID(ep->hwtid) | 886 FW_WR_LEN16(wrlen >> 4)); 887 req->plen = cpu_to_be32(mpalen); 888 req->tunnel_to_proxy = cpu_to_be32( 889 FW_OFLD_TX_DATA_WR_FLUSH(1) | 890 FW_OFLD_TX_DATA_WR_SHOVE(1)); 891 892 mpa = (struct mpa_message *)(req + 1); 893 memset(mpa, 0, sizeof(*mpa)); 894 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 895 mpa->flags = MPA_REJECT; 896 mpa->revision = ep->mpa_attr.version; 897 mpa->private_data_size = htons(plen); 898 899 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 900 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 901 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 902 sizeof (struct mpa_v2_conn_params)); 903 mpa_v2_params.ird = htons(((u16)ep->ird) | 904 (peer2peer ? MPA_V2_PEER2PEER_MODEL : 905 0)); 906 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 907 (p2p_type == 908 FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 909 MPA_V2_RDMA_WRITE_RTR : p2p_type == 910 FW_RI_INIT_P2PTYPE_READ_REQ ? 911 MPA_V2_RDMA_READ_RTR : 0) : 0)); 912 memcpy(mpa->private_data, &mpa_v2_params, 913 sizeof(struct mpa_v2_conn_params)); 914 915 if (ep->plen) 916 memcpy(mpa->private_data + 917 sizeof(struct mpa_v2_conn_params), pdata, plen); 918 } else 919 if (plen) 920 memcpy(mpa->private_data, pdata, plen); 921 922 /* 923 * Reference the mpa skb again. This ensures the data area 924 * will remain in memory until the hw acks the tx. 925 * Function fw4_ack() will deref it. 926 */ 927 skb_get(skb); 928 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 929 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 930 BUG_ON(ep->mpa_skb); 931 ep->mpa_skb = skb; 932 ep->snd_seq += mpalen; 933 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 934 } 935 936 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 937 { 938 int mpalen, wrlen; 939 struct fw_ofld_tx_data_wr *req; 940 struct mpa_message *mpa; 941 struct sk_buff *skb; 942 struct mpa_v2_conn_params mpa_v2_params; 943 944 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 945 946 mpalen = sizeof(*mpa) + plen; 947 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 948 mpalen += sizeof(struct mpa_v2_conn_params); 949 wrlen = roundup(mpalen + sizeof *req, 16); 950 951 skb = get_skb(NULL, wrlen, GFP_KERNEL); 952 if (!skb) { 953 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 954 return -ENOMEM; 955 } 956 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 957 958 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); 959 memset(req, 0, wrlen); 960 req->op_to_immdlen = cpu_to_be32( 961 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 962 FW_WR_COMPL(1) | 963 FW_WR_IMMDLEN(mpalen)); 964 req->flowid_len16 = cpu_to_be32( 965 FW_WR_FLOWID(ep->hwtid) | 966 FW_WR_LEN16(wrlen >> 4)); 967 req->plen = cpu_to_be32(mpalen); 968 req->tunnel_to_proxy = cpu_to_be32( 969 FW_OFLD_TX_DATA_WR_FLUSH(1) | 970 FW_OFLD_TX_DATA_WR_SHOVE(1)); 971 972 mpa = (struct mpa_message *)(req + 1); 973 memset(mpa, 0, sizeof(*mpa)); 974 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 975 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 976 (markers_enabled ? MPA_MARKERS : 0); 977 mpa->revision = ep->mpa_attr.version; 978 mpa->private_data_size = htons(plen); 979 980 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 981 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 982 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 983 sizeof (struct mpa_v2_conn_params)); 984 mpa_v2_params.ird = htons((u16)ep->ird); 985 mpa_v2_params.ord = htons((u16)ep->ord); 986 if (peer2peer && (ep->mpa_attr.p2p_type != 987 FW_RI_INIT_P2PTYPE_DISABLED)) { 988 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 989 990 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 991 mpa_v2_params.ord |= 992 htons(MPA_V2_RDMA_WRITE_RTR); 993 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 994 mpa_v2_params.ord |= 995 htons(MPA_V2_RDMA_READ_RTR); 996 } 997 998 memcpy(mpa->private_data, &mpa_v2_params, 999 sizeof(struct mpa_v2_conn_params)); 1000 1001 if (ep->plen) 1002 memcpy(mpa->private_data + 1003 sizeof(struct mpa_v2_conn_params), pdata, plen); 1004 } else 1005 if (plen) 1006 memcpy(mpa->private_data, pdata, plen); 1007 1008 /* 1009 * Reference the mpa skb. This ensures the data area 1010 * will remain in memory until the hw acks the tx. 1011 * Function fw4_ack() will deref it. 1012 */ 1013 skb_get(skb); 1014 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 1015 ep->mpa_skb = skb; 1016 __state_set(&ep->com, MPA_REP_SENT); 1017 ep->snd_seq += mpalen; 1018 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1019 } 1020 1021 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) 1022 { 1023 struct c4iw_ep *ep; 1024 struct cpl_act_establish *req = cplhdr(skb); 1025 unsigned int tid = GET_TID(req); 1026 unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); 1027 struct tid_info *t = dev->rdev.lldi.tids; 1028 1029 ep = lookup_atid(t, atid); 1030 1031 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, 1032 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); 1033 1034 mutex_lock(&ep->com.mutex); 1035 dst_confirm(ep->dst); 1036 1037 /* setup the hwtid for this connection */ 1038 ep->hwtid = tid; 1039 cxgb4_insert_tid(t, ep, tid); 1040 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid); 1041 1042 ep->snd_seq = be32_to_cpu(req->snd_isn); 1043 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 1044 1045 set_emss(ep, ntohs(req->tcp_opt)); 1046 1047 /* dealloc the atid */ 1048 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 1049 cxgb4_free_atid(t, atid); 1050 set_bit(ACT_ESTAB, &ep->com.history); 1051 1052 /* start MPA negotiation */ 1053 send_flowc(ep, NULL); 1054 if (ep->retry_with_mpa_v1) 1055 send_mpa_req(ep, skb, 1); 1056 else 1057 send_mpa_req(ep, skb, mpa_rev); 1058 mutex_unlock(&ep->com.mutex); 1059 return 0; 1060 } 1061 1062 static void close_complete_upcall(struct c4iw_ep *ep, int status) 1063 { 1064 struct iw_cm_event event; 1065 1066 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1067 memset(&event, 0, sizeof(event)); 1068 event.event = IW_CM_EVENT_CLOSE; 1069 event.status = status; 1070 if (ep->com.cm_id) { 1071 PDBG("close complete delivered ep %p cm_id %p tid %u\n", 1072 ep, ep->com.cm_id, ep->hwtid); 1073 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1074 ep->com.cm_id->rem_ref(ep->com.cm_id); 1075 ep->com.cm_id = NULL; 1076 set_bit(CLOSE_UPCALL, &ep->com.history); 1077 } 1078 } 1079 1080 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 1081 { 1082 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1083 __state_set(&ep->com, ABORTING); 1084 set_bit(ABORT_CONN, &ep->com.history); 1085 return send_abort(ep, skb, gfp); 1086 } 1087 1088 static void peer_close_upcall(struct c4iw_ep *ep) 1089 { 1090 struct iw_cm_event event; 1091 1092 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1093 memset(&event, 0, sizeof(event)); 1094 event.event = IW_CM_EVENT_DISCONNECT; 1095 if (ep->com.cm_id) { 1096 PDBG("peer close delivered ep %p cm_id %p tid %u\n", 1097 ep, ep->com.cm_id, ep->hwtid); 1098 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1099 set_bit(DISCONN_UPCALL, &ep->com.history); 1100 } 1101 } 1102 1103 static void peer_abort_upcall(struct c4iw_ep *ep) 1104 { 1105 struct iw_cm_event event; 1106 1107 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1108 memset(&event, 0, sizeof(event)); 1109 event.event = IW_CM_EVENT_CLOSE; 1110 event.status = -ECONNRESET; 1111 if (ep->com.cm_id) { 1112 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, 1113 ep->com.cm_id, ep->hwtid); 1114 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1115 ep->com.cm_id->rem_ref(ep->com.cm_id); 1116 ep->com.cm_id = NULL; 1117 set_bit(ABORT_UPCALL, &ep->com.history); 1118 } 1119 } 1120 1121 static void connect_reply_upcall(struct c4iw_ep *ep, int status) 1122 { 1123 struct iw_cm_event event; 1124 1125 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); 1126 memset(&event, 0, sizeof(event)); 1127 event.event = IW_CM_EVENT_CONNECT_REPLY; 1128 event.status = status; 1129 memcpy(&event.local_addr, &ep->com.local_addr, 1130 sizeof(ep->com.local_addr)); 1131 memcpy(&event.remote_addr, &ep->com.remote_addr, 1132 sizeof(ep->com.remote_addr)); 1133 1134 if ((status == 0) || (status == -ECONNREFUSED)) { 1135 if (!ep->tried_with_mpa_v1) { 1136 /* this means MPA_v2 is used */ 1137 event.private_data_len = ep->plen - 1138 sizeof(struct mpa_v2_conn_params); 1139 event.private_data = ep->mpa_pkt + 1140 sizeof(struct mpa_message) + 1141 sizeof(struct mpa_v2_conn_params); 1142 } else { 1143 /* this means MPA_v1 is used */ 1144 event.private_data_len = ep->plen; 1145 event.private_data = ep->mpa_pkt + 1146 sizeof(struct mpa_message); 1147 } 1148 } 1149 1150 PDBG("%s ep %p tid %u status %d\n", __func__, ep, 1151 ep->hwtid, status); 1152 set_bit(CONN_RPL_UPCALL, &ep->com.history); 1153 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1154 1155 if (status < 0) { 1156 ep->com.cm_id->rem_ref(ep->com.cm_id); 1157 ep->com.cm_id = NULL; 1158 } 1159 } 1160 1161 static int connect_request_upcall(struct c4iw_ep *ep) 1162 { 1163 struct iw_cm_event event; 1164 int ret; 1165 1166 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1167 memset(&event, 0, sizeof(event)); 1168 event.event = IW_CM_EVENT_CONNECT_REQUEST; 1169 memcpy(&event.local_addr, &ep->com.local_addr, 1170 sizeof(ep->com.local_addr)); 1171 memcpy(&event.remote_addr, &ep->com.remote_addr, 1172 sizeof(ep->com.remote_addr)); 1173 event.provider_data = ep; 1174 if (!ep->tried_with_mpa_v1) { 1175 /* this means MPA_v2 is used */ 1176 event.ord = ep->ord; 1177 event.ird = ep->ird; 1178 event.private_data_len = ep->plen - 1179 sizeof(struct mpa_v2_conn_params); 1180 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 1181 sizeof(struct mpa_v2_conn_params); 1182 } else { 1183 /* this means MPA_v1 is used. Send max supported */ 1184 event.ord = c4iw_max_read_depth; 1185 event.ird = c4iw_max_read_depth; 1186 event.private_data_len = ep->plen; 1187 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1188 } 1189 c4iw_get_ep(&ep->com); 1190 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, 1191 &event); 1192 if (ret) 1193 c4iw_put_ep(&ep->com); 1194 set_bit(CONNREQ_UPCALL, &ep->com.history); 1195 c4iw_put_ep(&ep->parent_ep->com); 1196 return ret; 1197 } 1198 1199 static void established_upcall(struct c4iw_ep *ep) 1200 { 1201 struct iw_cm_event event; 1202 1203 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1204 memset(&event, 0, sizeof(event)); 1205 event.event = IW_CM_EVENT_ESTABLISHED; 1206 event.ird = ep->ird; 1207 event.ord = ep->ord; 1208 if (ep->com.cm_id) { 1209 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1210 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1211 set_bit(ESTAB_UPCALL, &ep->com.history); 1212 } 1213 } 1214 1215 static int update_rx_credits(struct c4iw_ep *ep, u32 credits) 1216 { 1217 struct cpl_rx_data_ack *req; 1218 struct sk_buff *skb; 1219 int wrlen = roundup(sizeof *req, 16); 1220 1221 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 1222 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1223 if (!skb) { 1224 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); 1225 return 0; 1226 } 1227 1228 /* 1229 * If we couldn't specify the entire rcv window at connection setup 1230 * due to the limit in the number of bits in the RCV_BUFSIZ field, 1231 * then add the overage in to the credits returned. 1232 */ 1233 if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024) 1234 credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024; 1235 1236 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); 1237 memset(req, 0, wrlen); 1238 INIT_TP_WR(req, ep->hwtid); 1239 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 1240 ep->hwtid)); 1241 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | 1242 F_RX_DACK_CHANGE | 1243 V_RX_DACK_MODE(dack_mode)); 1244 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); 1245 c4iw_ofld_send(&ep->com.dev->rdev, skb); 1246 return credits; 1247 } 1248 1249 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1250 { 1251 struct mpa_message *mpa; 1252 struct mpa_v2_conn_params *mpa_v2_params; 1253 u16 plen; 1254 u16 resp_ird, resp_ord; 1255 u8 rtr_mismatch = 0, insuff_ird = 0; 1256 struct c4iw_qp_attributes attrs; 1257 enum c4iw_qp_attr_mask mask; 1258 int err; 1259 int disconnect = 0; 1260 1261 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1262 1263 /* 1264 * Stop mpa timer. If it expired, then 1265 * we ignore the MPA reply. process_timeout() 1266 * will abort the connection. 1267 */ 1268 if (stop_ep_timer(ep)) 1269 return 0; 1270 1271 /* 1272 * If we get more than the supported amount of private data 1273 * then we must fail this connection. 1274 */ 1275 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1276 err = -EINVAL; 1277 goto err; 1278 } 1279 1280 /* 1281 * copy the new data into our accumulation buffer. 1282 */ 1283 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1284 skb->len); 1285 ep->mpa_pkt_len += skb->len; 1286 1287 /* 1288 * if we don't even have the mpa message, then bail. 1289 */ 1290 if (ep->mpa_pkt_len < sizeof(*mpa)) 1291 return 0; 1292 mpa = (struct mpa_message *) ep->mpa_pkt; 1293 1294 /* Validate MPA header. */ 1295 if (mpa->revision > mpa_rev) { 1296 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1297 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1298 err = -EPROTO; 1299 goto err; 1300 } 1301 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1302 err = -EPROTO; 1303 goto err; 1304 } 1305 1306 plen = ntohs(mpa->private_data_size); 1307 1308 /* 1309 * Fail if there's too much private data. 1310 */ 1311 if (plen > MPA_MAX_PRIVATE_DATA) { 1312 err = -EPROTO; 1313 goto err; 1314 } 1315 1316 /* 1317 * If plen does not account for pkt size 1318 */ 1319 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1320 err = -EPROTO; 1321 goto err; 1322 } 1323 1324 ep->plen = (u8) plen; 1325 1326 /* 1327 * If we don't have all the pdata yet, then bail. 1328 * We'll continue process when more data arrives. 1329 */ 1330 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1331 return 0; 1332 1333 if (mpa->flags & MPA_REJECT) { 1334 err = -ECONNREFUSED; 1335 goto err; 1336 } 1337 1338 /* 1339 * If we get here we have accumulated the entire mpa 1340 * start reply message including private data. And 1341 * the MPA header is valid. 1342 */ 1343 __state_set(&ep->com, FPDU_MODE); 1344 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1345 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1346 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1347 ep->mpa_attr.version = mpa->revision; 1348 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1349 1350 if (mpa->revision == 2) { 1351 ep->mpa_attr.enhanced_rdma_conn = 1352 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1353 if (ep->mpa_attr.enhanced_rdma_conn) { 1354 mpa_v2_params = (struct mpa_v2_conn_params *) 1355 (ep->mpa_pkt + sizeof(*mpa)); 1356 resp_ird = ntohs(mpa_v2_params->ird) & 1357 MPA_V2_IRD_ORD_MASK; 1358 resp_ord = ntohs(mpa_v2_params->ord) & 1359 MPA_V2_IRD_ORD_MASK; 1360 1361 /* 1362 * This is a double-check. Ideally, below checks are 1363 * not required since ird/ord stuff has been taken 1364 * care of in c4iw_accept_cr 1365 */ 1366 if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { 1367 err = -ENOMEM; 1368 ep->ird = resp_ord; 1369 ep->ord = resp_ird; 1370 insuff_ird = 1; 1371 } 1372 1373 if (ntohs(mpa_v2_params->ird) & 1374 MPA_V2_PEER2PEER_MODEL) { 1375 if (ntohs(mpa_v2_params->ord) & 1376 MPA_V2_RDMA_WRITE_RTR) 1377 ep->mpa_attr.p2p_type = 1378 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1379 else if (ntohs(mpa_v2_params->ord) & 1380 MPA_V2_RDMA_READ_RTR) 1381 ep->mpa_attr.p2p_type = 1382 FW_RI_INIT_P2PTYPE_READ_REQ; 1383 } 1384 } 1385 } else if (mpa->revision == 1) 1386 if (peer2peer) 1387 ep->mpa_attr.p2p_type = p2p_type; 1388 1389 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1390 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = " 1391 "%d\n", __func__, ep->mpa_attr.crc_enabled, 1392 ep->mpa_attr.recv_marker_enabled, 1393 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1394 ep->mpa_attr.p2p_type, p2p_type); 1395 1396 /* 1397 * If responder's RTR does not match with that of initiator, assign 1398 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 1399 * generated when moving QP to RTS state. 1400 * A TERM message will be sent after QP has moved to RTS state 1401 */ 1402 if ((ep->mpa_attr.version == 2) && peer2peer && 1403 (ep->mpa_attr.p2p_type != p2p_type)) { 1404 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1405 rtr_mismatch = 1; 1406 } 1407 1408 attrs.mpa_attr = ep->mpa_attr; 1409 attrs.max_ird = ep->ird; 1410 attrs.max_ord = ep->ord; 1411 attrs.llp_stream_handle = ep; 1412 attrs.next_state = C4IW_QP_STATE_RTS; 1413 1414 mask = C4IW_QP_ATTR_NEXT_STATE | 1415 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 1416 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 1417 1418 /* bind QP and TID with INIT_WR */ 1419 err = c4iw_modify_qp(ep->com.qp->rhp, 1420 ep->com.qp, mask, &attrs, 1); 1421 if (err) 1422 goto err; 1423 1424 /* 1425 * If responder's RTR requirement did not match with what initiator 1426 * supports, generate TERM message 1427 */ 1428 if (rtr_mismatch) { 1429 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); 1430 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1431 attrs.ecode = MPA_NOMATCH_RTR; 1432 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1433 attrs.send_term = 1; 1434 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1435 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1436 err = -ENOMEM; 1437 disconnect = 1; 1438 goto out; 1439 } 1440 1441 /* 1442 * Generate TERM if initiator IRD is not sufficient for responder 1443 * provided ORD. Currently, we do the same behaviour even when 1444 * responder provided IRD is also not sufficient as regards to 1445 * initiator ORD. 1446 */ 1447 if (insuff_ird) { 1448 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", 1449 __func__); 1450 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1451 attrs.ecode = MPA_INSUFF_IRD; 1452 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1453 attrs.send_term = 1; 1454 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1455 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1456 err = -ENOMEM; 1457 disconnect = 1; 1458 goto out; 1459 } 1460 goto out; 1461 err: 1462 __state_set(&ep->com, ABORTING); 1463 send_abort(ep, skb, GFP_KERNEL); 1464 out: 1465 connect_reply_upcall(ep, err); 1466 return disconnect; 1467 } 1468 1469 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) 1470 { 1471 struct mpa_message *mpa; 1472 struct mpa_v2_conn_params *mpa_v2_params; 1473 u16 plen; 1474 1475 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1476 1477 /* 1478 * If we get more than the supported amount of private data 1479 * then we must fail this connection. 1480 */ 1481 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1482 (void)stop_ep_timer(ep); 1483 abort_connection(ep, skb, GFP_KERNEL); 1484 return; 1485 } 1486 1487 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1488 1489 /* 1490 * Copy the new data into our accumulation buffer. 1491 */ 1492 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1493 skb->len); 1494 ep->mpa_pkt_len += skb->len; 1495 1496 /* 1497 * If we don't even have the mpa message, then bail. 1498 * We'll continue process when more data arrives. 1499 */ 1500 if (ep->mpa_pkt_len < sizeof(*mpa)) 1501 return; 1502 1503 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1504 mpa = (struct mpa_message *) ep->mpa_pkt; 1505 1506 /* 1507 * Validate MPA Header. 1508 */ 1509 if (mpa->revision > mpa_rev) { 1510 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1511 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1512 (void)stop_ep_timer(ep); 1513 abort_connection(ep, skb, GFP_KERNEL); 1514 return; 1515 } 1516 1517 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { 1518 (void)stop_ep_timer(ep); 1519 abort_connection(ep, skb, GFP_KERNEL); 1520 return; 1521 } 1522 1523 plen = ntohs(mpa->private_data_size); 1524 1525 /* 1526 * Fail if there's too much private data. 1527 */ 1528 if (plen > MPA_MAX_PRIVATE_DATA) { 1529 (void)stop_ep_timer(ep); 1530 abort_connection(ep, skb, GFP_KERNEL); 1531 return; 1532 } 1533 1534 /* 1535 * If plen does not account for pkt size 1536 */ 1537 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1538 (void)stop_ep_timer(ep); 1539 abort_connection(ep, skb, GFP_KERNEL); 1540 return; 1541 } 1542 ep->plen = (u8) plen; 1543 1544 /* 1545 * If we don't have all the pdata yet, then bail. 1546 */ 1547 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1548 return; 1549 1550 /* 1551 * If we get here we have accumulated the entire mpa 1552 * start reply message including private data. 1553 */ 1554 ep->mpa_attr.initiator = 0; 1555 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1556 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1557 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1558 ep->mpa_attr.version = mpa->revision; 1559 if (mpa->revision == 1) 1560 ep->tried_with_mpa_v1 = 1; 1561 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1562 1563 if (mpa->revision == 2) { 1564 ep->mpa_attr.enhanced_rdma_conn = 1565 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1566 if (ep->mpa_attr.enhanced_rdma_conn) { 1567 mpa_v2_params = (struct mpa_v2_conn_params *) 1568 (ep->mpa_pkt + sizeof(*mpa)); 1569 ep->ird = ntohs(mpa_v2_params->ird) & 1570 MPA_V2_IRD_ORD_MASK; 1571 ep->ord = ntohs(mpa_v2_params->ord) & 1572 MPA_V2_IRD_ORD_MASK; 1573 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 1574 if (peer2peer) { 1575 if (ntohs(mpa_v2_params->ord) & 1576 MPA_V2_RDMA_WRITE_RTR) 1577 ep->mpa_attr.p2p_type = 1578 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1579 else if (ntohs(mpa_v2_params->ord) & 1580 MPA_V2_RDMA_READ_RTR) 1581 ep->mpa_attr.p2p_type = 1582 FW_RI_INIT_P2PTYPE_READ_REQ; 1583 } 1584 } 1585 } else if (mpa->revision == 1) 1586 if (peer2peer) 1587 ep->mpa_attr.p2p_type = p2p_type; 1588 1589 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1590 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, 1591 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1592 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1593 ep->mpa_attr.p2p_type); 1594 1595 /* 1596 * If the endpoint timer already expired, then we ignore 1597 * the start request. process_timeout() will abort 1598 * the connection. 1599 */ 1600 if (!stop_ep_timer(ep)) { 1601 __state_set(&ep->com, MPA_REQ_RCVD); 1602 1603 /* drive upcall */ 1604 mutex_lock(&ep->parent_ep->com.mutex); 1605 if (ep->parent_ep->com.state != DEAD) { 1606 if (connect_request_upcall(ep)) 1607 abort_connection(ep, skb, GFP_KERNEL); 1608 } else { 1609 abort_connection(ep, skb, GFP_KERNEL); 1610 } 1611 mutex_unlock(&ep->parent_ep->com.mutex); 1612 } 1613 return; 1614 } 1615 1616 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) 1617 { 1618 struct c4iw_ep *ep; 1619 struct cpl_rx_data *hdr = cplhdr(skb); 1620 unsigned int dlen = ntohs(hdr->len); 1621 unsigned int tid = GET_TID(hdr); 1622 struct tid_info *t = dev->rdev.lldi.tids; 1623 __u8 status = hdr->status; 1624 int disconnect = 0; 1625 1626 ep = lookup_tid(t, tid); 1627 if (!ep) 1628 return 0; 1629 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); 1630 skb_pull(skb, sizeof(*hdr)); 1631 skb_trim(skb, dlen); 1632 mutex_lock(&ep->com.mutex); 1633 1634 /* update RX credits */ 1635 update_rx_credits(ep, dlen); 1636 1637 switch (ep->com.state) { 1638 case MPA_REQ_SENT: 1639 ep->rcv_seq += dlen; 1640 disconnect = process_mpa_reply(ep, skb); 1641 break; 1642 case MPA_REQ_WAIT: 1643 ep->rcv_seq += dlen; 1644 process_mpa_request(ep, skb); 1645 break; 1646 case FPDU_MODE: { 1647 struct c4iw_qp_attributes attrs; 1648 BUG_ON(!ep->com.qp); 1649 if (status) 1650 pr_err("%s Unexpected streaming data." \ 1651 " qpid %u ep %p state %d tid %u status %d\n", 1652 __func__, ep->com.qp->wq.sq.qid, ep, 1653 ep->com.state, ep->hwtid, status); 1654 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1655 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1656 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1657 disconnect = 1; 1658 break; 1659 } 1660 default: 1661 break; 1662 } 1663 mutex_unlock(&ep->com.mutex); 1664 if (disconnect) 1665 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1666 return 0; 1667 } 1668 1669 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1670 { 1671 struct c4iw_ep *ep; 1672 struct cpl_abort_rpl_rss *rpl = cplhdr(skb); 1673 int release = 0; 1674 unsigned int tid = GET_TID(rpl); 1675 struct tid_info *t = dev->rdev.lldi.tids; 1676 1677 ep = lookup_tid(t, tid); 1678 if (!ep) { 1679 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n"); 1680 return 0; 1681 } 1682 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1683 mutex_lock(&ep->com.mutex); 1684 switch (ep->com.state) { 1685 case ABORTING: 1686 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 1687 __state_set(&ep->com, DEAD); 1688 release = 1; 1689 break; 1690 default: 1691 printk(KERN_ERR "%s ep %p state %d\n", 1692 __func__, ep, ep->com.state); 1693 break; 1694 } 1695 mutex_unlock(&ep->com.mutex); 1696 1697 if (release) 1698 release_ep_resources(ep); 1699 return 0; 1700 } 1701 1702 static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) 1703 { 1704 struct sk_buff *skb; 1705 struct fw_ofld_connection_wr *req; 1706 unsigned int mtu_idx; 1707 int wscale; 1708 struct sockaddr_in *sin; 1709 int win; 1710 1711 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1712 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); 1713 memset(req, 0, sizeof(*req)); 1714 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 1715 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 1716 req->le.filter = cpu_to_be32(cxgb4_select_ntuple( 1717 ep->com.dev->rdev.lldi.ports[0], 1718 ep->l2t)); 1719 sin = (struct sockaddr_in *)&ep->com.mapped_local_addr; 1720 req->le.lport = sin->sin_port; 1721 req->le.u.ipv4.lip = sin->sin_addr.s_addr; 1722 sin = (struct sockaddr_in *)&ep->com.mapped_remote_addr; 1723 req->le.pport = sin->sin_port; 1724 req->le.u.ipv4.pip = sin->sin_addr.s_addr; 1725 req->tcb.t_state_to_astid = 1726 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) | 1727 V_FW_OFLD_CONNECTION_WR_ASTID(atid)); 1728 req->tcb.cplrxdataack_cplpassacceptrpl = 1729 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); 1730 req->tcb.tx_max = (__force __be32) jiffies; 1731 req->tcb.rcv_adv = htons(1); 1732 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 1733 enable_tcp_timestamps); 1734 wscale = compute_wscale(rcv_win); 1735 1736 /* 1737 * Specify the largest window that will fit in opt0. The 1738 * remainder will be specified in the rx_data_ack. 1739 */ 1740 win = ep->rcv_win >> 10; 1741 if (win > RCV_BUFSIZ_MASK) 1742 win = RCV_BUFSIZ_MASK; 1743 1744 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | 1745 (nocong ? NO_CONG(1) : 0) | 1746 KEEP_ALIVE(1) | 1747 DELACK(1) | 1748 WND_SCALE(wscale) | 1749 MSS_IDX(mtu_idx) | 1750 L2T_IDX(ep->l2t->idx) | 1751 TX_CHAN(ep->tx_chan) | 1752 SMAC_SEL(ep->smac_idx) | 1753 DSCP(ep->tos) | 1754 ULP_MODE(ULP_MODE_TCPDDP) | 1755 RCV_BUFSIZ(win)); 1756 req->tcb.opt2 = (__force __be32) (PACE(1) | 1757 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 1758 RX_CHANNEL(0) | 1759 CCTRL_ECN(enable_ecn) | 1760 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid)); 1761 if (enable_tcp_timestamps) 1762 req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1); 1763 if (enable_tcp_sack) 1764 req->tcb.opt2 |= (__force __be32) SACK_EN(1); 1765 if (wscale && enable_tcp_window_scaling) 1766 req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1); 1767 req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0); 1768 req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2); 1769 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); 1770 set_bit(ACT_OFLD_CONN, &ep->com.history); 1771 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1772 } 1773 1774 /* 1775 * Return whether a failed active open has allocated a TID 1776 */ 1777 static inline int act_open_has_tid(int status) 1778 { 1779 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && 1780 status != CPL_ERR_ARP_MISS; 1781 } 1782 1783 /* Returns whether a CPL status conveys negative advice. 1784 */ 1785 static int is_neg_adv(unsigned int status) 1786 { 1787 return status == CPL_ERR_RTX_NEG_ADVICE || 1788 status == CPL_ERR_PERSIST_NEG_ADVICE || 1789 status == CPL_ERR_KEEPALV_NEG_ADVICE; 1790 } 1791 1792 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) 1793 { 1794 ep->snd_win = snd_win; 1795 ep->rcv_win = rcv_win; 1796 PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win); 1797 } 1798 1799 #define ACT_OPEN_RETRY_COUNT 2 1800 1801 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, 1802 struct dst_entry *dst, struct c4iw_dev *cdev, 1803 bool clear_mpa_v1) 1804 { 1805 struct neighbour *n; 1806 int err, step; 1807 struct net_device *pdev; 1808 1809 n = dst_neigh_lookup(dst, peer_ip); 1810 if (!n) 1811 return -ENODEV; 1812 1813 rcu_read_lock(); 1814 err = -ENOMEM; 1815 if (n->dev->flags & IFF_LOOPBACK) { 1816 if (iptype == 4) 1817 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip); 1818 else if (IS_ENABLED(CONFIG_IPV6)) 1819 for_each_netdev(&init_net, pdev) { 1820 if (ipv6_chk_addr(&init_net, 1821 (struct in6_addr *)peer_ip, 1822 pdev, 1)) 1823 break; 1824 } 1825 else 1826 pdev = NULL; 1827 1828 if (!pdev) { 1829 err = -ENODEV; 1830 goto out; 1831 } 1832 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 1833 n, pdev, 0); 1834 if (!ep->l2t) 1835 goto out; 1836 ep->mtu = pdev->mtu; 1837 ep->tx_chan = cxgb4_port_chan(pdev); 1838 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1839 step = cdev->rdev.lldi.ntxq / 1840 cdev->rdev.lldi.nchan; 1841 ep->txq_idx = cxgb4_port_idx(pdev) * step; 1842 step = cdev->rdev.lldi.nrxq / 1843 cdev->rdev.lldi.nchan; 1844 ep->ctrlq_idx = cxgb4_port_idx(pdev); 1845 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 1846 cxgb4_port_idx(pdev) * step]; 1847 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); 1848 dev_put(pdev); 1849 } else { 1850 pdev = get_real_dev(n->dev); 1851 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 1852 n, pdev, 0); 1853 if (!ep->l2t) 1854 goto out; 1855 ep->mtu = dst_mtu(dst); 1856 ep->tx_chan = cxgb4_port_chan(pdev); 1857 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1858 step = cdev->rdev.lldi.ntxq / 1859 cdev->rdev.lldi.nchan; 1860 ep->txq_idx = cxgb4_port_idx(pdev) * step; 1861 ep->ctrlq_idx = cxgb4_port_idx(pdev); 1862 step = cdev->rdev.lldi.nrxq / 1863 cdev->rdev.lldi.nchan; 1864 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 1865 cxgb4_port_idx(pdev) * step]; 1866 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); 1867 1868 if (clear_mpa_v1) { 1869 ep->retry_with_mpa_v1 = 0; 1870 ep->tried_with_mpa_v1 = 0; 1871 } 1872 } 1873 err = 0; 1874 out: 1875 rcu_read_unlock(); 1876 1877 neigh_release(n); 1878 1879 return err; 1880 } 1881 1882 static int c4iw_reconnect(struct c4iw_ep *ep) 1883 { 1884 int err = 0; 1885 struct sockaddr_in *laddr = (struct sockaddr_in *) 1886 &ep->com.cm_id->local_addr; 1887 struct sockaddr_in *raddr = (struct sockaddr_in *) 1888 &ep->com.cm_id->remote_addr; 1889 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *) 1890 &ep->com.cm_id->local_addr; 1891 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *) 1892 &ep->com.cm_id->remote_addr; 1893 int iptype; 1894 __u8 *ra; 1895 1896 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); 1897 init_timer(&ep->timer); 1898 1899 /* 1900 * Allocate an active TID to initiate a TCP connection. 1901 */ 1902 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); 1903 if (ep->atid == -1) { 1904 pr_err("%s - cannot alloc atid.\n", __func__); 1905 err = -ENOMEM; 1906 goto fail2; 1907 } 1908 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid); 1909 1910 /* find a route */ 1911 if (ep->com.cm_id->local_addr.ss_family == AF_INET) { 1912 ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr, 1913 raddr->sin_addr.s_addr, laddr->sin_port, 1914 raddr->sin_port, 0); 1915 iptype = 4; 1916 ra = (__u8 *)&raddr->sin_addr; 1917 } else { 1918 ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr, 1919 raddr6->sin6_addr.s6_addr, 1920 laddr6->sin6_port, raddr6->sin6_port, 0, 1921 raddr6->sin6_scope_id); 1922 iptype = 6; 1923 ra = (__u8 *)&raddr6->sin6_addr; 1924 } 1925 if (!ep->dst) { 1926 pr_err("%s - cannot find route.\n", __func__); 1927 err = -EHOSTUNREACH; 1928 goto fail3; 1929 } 1930 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false); 1931 if (err) { 1932 pr_err("%s - cannot alloc l2e.\n", __func__); 1933 goto fail4; 1934 } 1935 1936 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 1937 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 1938 ep->l2t->idx); 1939 1940 state_set(&ep->com, CONNECTING); 1941 ep->tos = 0; 1942 1943 /* send connect request to rnic */ 1944 err = send_connect(ep); 1945 if (!err) 1946 goto out; 1947 1948 cxgb4_l2t_release(ep->l2t); 1949 fail4: 1950 dst_release(ep->dst); 1951 fail3: 1952 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 1953 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 1954 fail2: 1955 /* 1956 * remember to send notification to upper layer. 1957 * We are in here so the upper layer is not aware that this is 1958 * re-connect attempt and so, upper layer is still waiting for 1959 * response of 1st connect request. 1960 */ 1961 connect_reply_upcall(ep, -ECONNRESET); 1962 c4iw_put_ep(&ep->com); 1963 out: 1964 return err; 1965 } 1966 1967 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1968 { 1969 struct c4iw_ep *ep; 1970 struct cpl_act_open_rpl *rpl = cplhdr(skb); 1971 unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( 1972 ntohl(rpl->atid_status))); 1973 struct tid_info *t = dev->rdev.lldi.tids; 1974 int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); 1975 struct sockaddr_in *la; 1976 struct sockaddr_in *ra; 1977 struct sockaddr_in6 *la6; 1978 struct sockaddr_in6 *ra6; 1979 1980 ep = lookup_atid(t, atid); 1981 la = (struct sockaddr_in *)&ep->com.mapped_local_addr; 1982 ra = (struct sockaddr_in *)&ep->com.mapped_remote_addr; 1983 la6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr; 1984 ra6 = (struct sockaddr_in6 *)&ep->com.mapped_remote_addr; 1985 1986 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, 1987 status, status2errno(status)); 1988 1989 if (is_neg_adv(status)) { 1990 printk(KERN_WARNING MOD "Connection problems for atid %u\n", 1991 atid); 1992 return 0; 1993 } 1994 1995 set_bit(ACT_OPEN_RPL, &ep->com.history); 1996 1997 /* 1998 * Log interesting failures. 1999 */ 2000 switch (status) { 2001 case CPL_ERR_CONN_RESET: 2002 case CPL_ERR_CONN_TIMEDOUT: 2003 break; 2004 case CPL_ERR_TCAM_FULL: 2005 mutex_lock(&dev->rdev.stats.lock); 2006 dev->rdev.stats.tcam_full++; 2007 mutex_unlock(&dev->rdev.stats.lock); 2008 if (ep->com.local_addr.ss_family == AF_INET && 2009 dev->rdev.lldi.enable_fw_ofld_conn) { 2010 send_fw_act_open_req(ep, 2011 GET_TID_TID(GET_AOPEN_ATID( 2012 ntohl(rpl->atid_status)))); 2013 return 0; 2014 } 2015 break; 2016 case CPL_ERR_CONN_EXIST: 2017 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 2018 set_bit(ACT_RETRY_INUSE, &ep->com.history); 2019 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, 2020 atid); 2021 cxgb4_free_atid(t, atid); 2022 dst_release(ep->dst); 2023 cxgb4_l2t_release(ep->l2t); 2024 c4iw_reconnect(ep); 2025 return 0; 2026 } 2027 break; 2028 default: 2029 if (ep->com.local_addr.ss_family == AF_INET) { 2030 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n", 2031 atid, status, status2errno(status), 2032 &la->sin_addr.s_addr, ntohs(la->sin_port), 2033 &ra->sin_addr.s_addr, ntohs(ra->sin_port)); 2034 } else { 2035 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n", 2036 atid, status, status2errno(status), 2037 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port), 2038 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port)); 2039 } 2040 break; 2041 } 2042 2043 connect_reply_upcall(ep, status2errno(status)); 2044 state_set(&ep->com, DEAD); 2045 2046 if (status && act_open_has_tid(status)) 2047 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); 2048 2049 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 2050 cxgb4_free_atid(t, atid); 2051 dst_release(ep->dst); 2052 cxgb4_l2t_release(ep->l2t); 2053 c4iw_put_ep(&ep->com); 2054 2055 return 0; 2056 } 2057 2058 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2059 { 2060 struct cpl_pass_open_rpl *rpl = cplhdr(skb); 2061 struct tid_info *t = dev->rdev.lldi.tids; 2062 unsigned int stid = GET_TID(rpl); 2063 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 2064 2065 if (!ep) { 2066 PDBG("%s stid %d lookup failure!\n", __func__, stid); 2067 goto out; 2068 } 2069 PDBG("%s ep %p status %d error %d\n", __func__, ep, 2070 rpl->status, status2errno(rpl->status)); 2071 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 2072 2073 out: 2074 return 0; 2075 } 2076 2077 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2078 { 2079 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); 2080 struct tid_info *t = dev->rdev.lldi.tids; 2081 unsigned int stid = GET_TID(rpl); 2082 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 2083 2084 PDBG("%s ep %p\n", __func__, ep); 2085 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 2086 return 0; 2087 } 2088 2089 static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, 2090 struct cpl_pass_accept_req *req) 2091 { 2092 struct cpl_pass_accept_rpl *rpl; 2093 unsigned int mtu_idx; 2094 u64 opt0; 2095 u32 opt2; 2096 int wscale; 2097 struct cpl_t5_pass_accept_rpl *rpl5 = NULL; 2098 int win; 2099 2100 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2101 BUG_ON(skb_cloned(skb)); 2102 2103 skb_get(skb); 2104 rpl = cplhdr(skb); 2105 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 2106 skb_trim(skb, roundup(sizeof(*rpl5), 16)); 2107 rpl5 = (void *)rpl; 2108 INIT_TP_WR(rpl5, ep->hwtid); 2109 } else { 2110 skb_trim(skb, sizeof(*rpl)); 2111 INIT_TP_WR(rpl, ep->hwtid); 2112 } 2113 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, 2114 ep->hwtid)); 2115 2116 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 2117 enable_tcp_timestamps && req->tcpopt.tstamp); 2118 wscale = compute_wscale(rcv_win); 2119 2120 /* 2121 * Specify the largest window that will fit in opt0. The 2122 * remainder will be specified in the rx_data_ack. 2123 */ 2124 win = ep->rcv_win >> 10; 2125 if (win > RCV_BUFSIZ_MASK) 2126 win = RCV_BUFSIZ_MASK; 2127 opt0 = (nocong ? NO_CONG(1) : 0) | 2128 KEEP_ALIVE(1) | 2129 DELACK(1) | 2130 WND_SCALE(wscale) | 2131 MSS_IDX(mtu_idx) | 2132 L2T_IDX(ep->l2t->idx) | 2133 TX_CHAN(ep->tx_chan) | 2134 SMAC_SEL(ep->smac_idx) | 2135 DSCP(ep->tos >> 2) | 2136 ULP_MODE(ULP_MODE_TCPDDP) | 2137 RCV_BUFSIZ(win); 2138 opt2 = RX_CHANNEL(0) | 2139 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 2140 2141 if (enable_tcp_timestamps && req->tcpopt.tstamp) 2142 opt2 |= TSTAMPS_EN(1); 2143 if (enable_tcp_sack && req->tcpopt.sack) 2144 opt2 |= SACK_EN(1); 2145 if (wscale && enable_tcp_window_scaling) 2146 opt2 |= WND_SCALE_EN(1); 2147 if (enable_ecn) { 2148 const struct tcphdr *tcph; 2149 u32 hlen = ntohl(req->hdr_len); 2150 2151 tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) + 2152 G_IP_HDR_LEN(hlen); 2153 if (tcph->ece && tcph->cwr) 2154 opt2 |= CCTRL_ECN(1); 2155 } 2156 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 2157 u32 isn = (prandom_u32() & ~7UL) - 1; 2158 opt2 |= T5_OPT_2_VALID; 2159 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 2160 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 2161 rpl5 = (void *)rpl; 2162 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); 2163 if (peer2peer) 2164 isn += 4; 2165 rpl5->iss = cpu_to_be32(isn); 2166 PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss)); 2167 } 2168 2169 rpl->opt0 = cpu_to_be64(opt0); 2170 rpl->opt2 = cpu_to_be32(opt2); 2171 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 2172 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 2173 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 2174 2175 return; 2176 } 2177 2178 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb) 2179 { 2180 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid); 2181 BUG_ON(skb_cloned(skb)); 2182 skb_trim(skb, sizeof(struct cpl_tid_release)); 2183 skb_get(skb); 2184 release_tid(&dev->rdev, hwtid, skb); 2185 return; 2186 } 2187 2188 static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype, 2189 __u8 *local_ip, __u8 *peer_ip, 2190 __be16 *local_port, __be16 *peer_port) 2191 { 2192 int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); 2193 int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); 2194 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); 2195 struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len); 2196 struct tcphdr *tcp = (struct tcphdr *) 2197 ((u8 *)(req + 1) + eth_len + ip_len); 2198 2199 if (ip->version == 4) { 2200 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, 2201 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), 2202 ntohs(tcp->dest)); 2203 *iptype = 4; 2204 memcpy(peer_ip, &ip->saddr, 4); 2205 memcpy(local_ip, &ip->daddr, 4); 2206 } else { 2207 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__, 2208 ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source), 2209 ntohs(tcp->dest)); 2210 *iptype = 6; 2211 memcpy(peer_ip, ip6->saddr.s6_addr, 16); 2212 memcpy(local_ip, ip6->daddr.s6_addr, 16); 2213 } 2214 *peer_port = tcp->source; 2215 *local_port = tcp->dest; 2216 2217 return; 2218 } 2219 2220 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 2221 { 2222 struct c4iw_ep *child_ep = NULL, *parent_ep; 2223 struct cpl_pass_accept_req *req = cplhdr(skb); 2224 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); 2225 struct tid_info *t = dev->rdev.lldi.tids; 2226 unsigned int hwtid = GET_TID(req); 2227 struct dst_entry *dst; 2228 __u8 local_ip[16], peer_ip[16]; 2229 __be16 local_port, peer_port; 2230 int err; 2231 u16 peer_mss = ntohs(req->tcpopt.mss); 2232 int iptype; 2233 unsigned short hdrs; 2234 2235 parent_ep = lookup_stid(t, stid); 2236 if (!parent_ep) { 2237 PDBG("%s connect request on invalid stid %d\n", __func__, stid); 2238 goto reject; 2239 } 2240 2241 if (state_read(&parent_ep->com) != LISTEN) { 2242 printk(KERN_ERR "%s - listening ep not in LISTEN\n", 2243 __func__); 2244 goto reject; 2245 } 2246 2247 get_4tuple(req, &iptype, local_ip, peer_ip, &local_port, &peer_port); 2248 2249 /* Find output route */ 2250 if (iptype == 4) { 2251 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n" 2252 , __func__, parent_ep, hwtid, 2253 local_ip, peer_ip, ntohs(local_port), 2254 ntohs(peer_port), peer_mss); 2255 dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip, 2256 local_port, peer_port, 2257 GET_POPEN_TOS(ntohl(req->tos_stid))); 2258 } else { 2259 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" 2260 , __func__, parent_ep, hwtid, 2261 local_ip, peer_ip, ntohs(local_port), 2262 ntohs(peer_port), peer_mss); 2263 dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port, 2264 PASS_OPEN_TOS(ntohl(req->tos_stid)), 2265 ((struct sockaddr_in6 *) 2266 &parent_ep->com.local_addr)->sin6_scope_id); 2267 } 2268 if (!dst) { 2269 printk(KERN_ERR MOD "%s - failed to find dst entry!\n", 2270 __func__); 2271 goto reject; 2272 } 2273 2274 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 2275 if (!child_ep) { 2276 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 2277 __func__); 2278 dst_release(dst); 2279 goto reject; 2280 } 2281 2282 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false); 2283 if (err) { 2284 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 2285 __func__); 2286 dst_release(dst); 2287 kfree(child_ep); 2288 goto reject; 2289 } 2290 2291 hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) + 2292 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); 2293 if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) 2294 child_ep->mtu = peer_mss + hdrs; 2295 2296 state_set(&child_ep->com, CONNECTING); 2297 child_ep->com.dev = dev; 2298 child_ep->com.cm_id = NULL; 2299 if (iptype == 4) { 2300 struct sockaddr_in *sin = (struct sockaddr_in *) 2301 &child_ep->com.local_addr; 2302 sin->sin_family = PF_INET; 2303 sin->sin_port = local_port; 2304 sin->sin_addr.s_addr = *(__be32 *)local_ip; 2305 sin = (struct sockaddr_in *)&child_ep->com.remote_addr; 2306 sin->sin_family = PF_INET; 2307 sin->sin_port = peer_port; 2308 sin->sin_addr.s_addr = *(__be32 *)peer_ip; 2309 } else { 2310 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) 2311 &child_ep->com.local_addr; 2312 sin6->sin6_family = PF_INET6; 2313 sin6->sin6_port = local_port; 2314 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); 2315 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr; 2316 sin6->sin6_family = PF_INET6; 2317 sin6->sin6_port = peer_port; 2318 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); 2319 } 2320 c4iw_get_ep(&parent_ep->com); 2321 child_ep->parent_ep = parent_ep; 2322 child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); 2323 child_ep->dst = dst; 2324 child_ep->hwtid = hwtid; 2325 2326 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, 2327 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); 2328 2329 init_timer(&child_ep->timer); 2330 cxgb4_insert_tid(t, child_ep, hwtid); 2331 insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid); 2332 accept_cr(child_ep, skb, req); 2333 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); 2334 goto out; 2335 reject: 2336 reject_cr(dev, hwtid, skb); 2337 out: 2338 return 0; 2339 } 2340 2341 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) 2342 { 2343 struct c4iw_ep *ep; 2344 struct cpl_pass_establish *req = cplhdr(skb); 2345 struct tid_info *t = dev->rdev.lldi.tids; 2346 unsigned int tid = GET_TID(req); 2347 2348 ep = lookup_tid(t, tid); 2349 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2350 ep->snd_seq = be32_to_cpu(req->snd_isn); 2351 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 2352 2353 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid, 2354 ntohs(req->tcp_opt)); 2355 2356 set_emss(ep, ntohs(req->tcp_opt)); 2357 2358 dst_confirm(ep->dst); 2359 state_set(&ep->com, MPA_REQ_WAIT); 2360 start_ep_timer(ep); 2361 send_flowc(ep, skb); 2362 set_bit(PASS_ESTAB, &ep->com.history); 2363 2364 return 0; 2365 } 2366 2367 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) 2368 { 2369 struct cpl_peer_close *hdr = cplhdr(skb); 2370 struct c4iw_ep *ep; 2371 struct c4iw_qp_attributes attrs; 2372 int disconnect = 1; 2373 int release = 0; 2374 struct tid_info *t = dev->rdev.lldi.tids; 2375 unsigned int tid = GET_TID(hdr); 2376 int ret; 2377 2378 ep = lookup_tid(t, tid); 2379 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2380 dst_confirm(ep->dst); 2381 2382 set_bit(PEER_CLOSE, &ep->com.history); 2383 mutex_lock(&ep->com.mutex); 2384 switch (ep->com.state) { 2385 case MPA_REQ_WAIT: 2386 __state_set(&ep->com, CLOSING); 2387 break; 2388 case MPA_REQ_SENT: 2389 __state_set(&ep->com, CLOSING); 2390 connect_reply_upcall(ep, -ECONNRESET); 2391 break; 2392 case MPA_REQ_RCVD: 2393 2394 /* 2395 * We're gonna mark this puppy DEAD, but keep 2396 * the reference on it until the ULP accepts or 2397 * rejects the CR. Also wake up anyone waiting 2398 * in rdma connection migration (see c4iw_accept_cr()). 2399 */ 2400 __state_set(&ep->com, CLOSING); 2401 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 2402 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2403 break; 2404 case MPA_REP_SENT: 2405 __state_set(&ep->com, CLOSING); 2406 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 2407 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2408 break; 2409 case FPDU_MODE: 2410 start_ep_timer(ep); 2411 __state_set(&ep->com, CLOSING); 2412 attrs.next_state = C4IW_QP_STATE_CLOSING; 2413 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2414 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2415 if (ret != -ECONNRESET) { 2416 peer_close_upcall(ep); 2417 disconnect = 1; 2418 } 2419 break; 2420 case ABORTING: 2421 disconnect = 0; 2422 break; 2423 case CLOSING: 2424 __state_set(&ep->com, MORIBUND); 2425 disconnect = 0; 2426 break; 2427 case MORIBUND: 2428 (void)stop_ep_timer(ep); 2429 if (ep->com.cm_id && ep->com.qp) { 2430 attrs.next_state = C4IW_QP_STATE_IDLE; 2431 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2432 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2433 } 2434 close_complete_upcall(ep, 0); 2435 __state_set(&ep->com, DEAD); 2436 release = 1; 2437 disconnect = 0; 2438 break; 2439 case DEAD: 2440 disconnect = 0; 2441 break; 2442 default: 2443 BUG_ON(1); 2444 } 2445 mutex_unlock(&ep->com.mutex); 2446 if (disconnect) 2447 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2448 if (release) 2449 release_ep_resources(ep); 2450 return 0; 2451 } 2452 2453 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 2454 { 2455 struct cpl_abort_req_rss *req = cplhdr(skb); 2456 struct c4iw_ep *ep; 2457 struct cpl_abort_rpl *rpl; 2458 struct sk_buff *rpl_skb; 2459 struct c4iw_qp_attributes attrs; 2460 int ret; 2461 int release = 0; 2462 struct tid_info *t = dev->rdev.lldi.tids; 2463 unsigned int tid = GET_TID(req); 2464 2465 ep = lookup_tid(t, tid); 2466 if (is_neg_adv(req->status)) { 2467 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, 2468 ep->hwtid); 2469 return 0; 2470 } 2471 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 2472 ep->com.state); 2473 set_bit(PEER_ABORT, &ep->com.history); 2474 2475 /* 2476 * Wake up any threads in rdma_init() or rdma_fini(). 2477 * However, this is not needed if com state is just 2478 * MPA_REQ_SENT 2479 */ 2480 if (ep->com.state != MPA_REQ_SENT) 2481 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2482 2483 mutex_lock(&ep->com.mutex); 2484 switch (ep->com.state) { 2485 case CONNECTING: 2486 break; 2487 case MPA_REQ_WAIT: 2488 (void)stop_ep_timer(ep); 2489 break; 2490 case MPA_REQ_SENT: 2491 (void)stop_ep_timer(ep); 2492 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) 2493 connect_reply_upcall(ep, -ECONNRESET); 2494 else { 2495 /* 2496 * we just don't send notification upwards because we 2497 * want to retry with mpa_v1 without upper layers even 2498 * knowing it. 2499 * 2500 * do some housekeeping so as to re-initiate the 2501 * connection 2502 */ 2503 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__, 2504 mpa_rev); 2505 ep->retry_with_mpa_v1 = 1; 2506 } 2507 break; 2508 case MPA_REP_SENT: 2509 break; 2510 case MPA_REQ_RCVD: 2511 break; 2512 case MORIBUND: 2513 case CLOSING: 2514 stop_ep_timer(ep); 2515 /*FALLTHROUGH*/ 2516 case FPDU_MODE: 2517 if (ep->com.cm_id && ep->com.qp) { 2518 attrs.next_state = C4IW_QP_STATE_ERROR; 2519 ret = c4iw_modify_qp(ep->com.qp->rhp, 2520 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 2521 &attrs, 1); 2522 if (ret) 2523 printk(KERN_ERR MOD 2524 "%s - qp <- error failed!\n", 2525 __func__); 2526 } 2527 peer_abort_upcall(ep); 2528 break; 2529 case ABORTING: 2530 break; 2531 case DEAD: 2532 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); 2533 mutex_unlock(&ep->com.mutex); 2534 return 0; 2535 default: 2536 BUG_ON(1); 2537 break; 2538 } 2539 dst_confirm(ep->dst); 2540 if (ep->com.state != ABORTING) { 2541 __state_set(&ep->com, DEAD); 2542 /* we don't release if we want to retry with mpa_v1 */ 2543 if (!ep->retry_with_mpa_v1) 2544 release = 1; 2545 } 2546 mutex_unlock(&ep->com.mutex); 2547 2548 rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); 2549 if (!rpl_skb) { 2550 printk(KERN_ERR MOD "%s - cannot allocate skb!\n", 2551 __func__); 2552 release = 1; 2553 goto out; 2554 } 2555 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 2556 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); 2557 INIT_TP_WR(rpl, ep->hwtid); 2558 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); 2559 rpl->cmd = CPL_ABORT_NO_RST; 2560 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); 2561 out: 2562 if (release) 2563 release_ep_resources(ep); 2564 else if (ep->retry_with_mpa_v1) { 2565 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); 2566 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 2567 dst_release(ep->dst); 2568 cxgb4_l2t_release(ep->l2t); 2569 c4iw_reconnect(ep); 2570 } 2571 2572 return 0; 2573 } 2574 2575 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2576 { 2577 struct c4iw_ep *ep; 2578 struct c4iw_qp_attributes attrs; 2579 struct cpl_close_con_rpl *rpl = cplhdr(skb); 2580 int release = 0; 2581 struct tid_info *t = dev->rdev.lldi.tids; 2582 unsigned int tid = GET_TID(rpl); 2583 2584 ep = lookup_tid(t, tid); 2585 2586 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2587 BUG_ON(!ep); 2588 2589 /* The cm_id may be null if we failed to connect */ 2590 mutex_lock(&ep->com.mutex); 2591 switch (ep->com.state) { 2592 case CLOSING: 2593 __state_set(&ep->com, MORIBUND); 2594 break; 2595 case MORIBUND: 2596 (void)stop_ep_timer(ep); 2597 if ((ep->com.cm_id) && (ep->com.qp)) { 2598 attrs.next_state = C4IW_QP_STATE_IDLE; 2599 c4iw_modify_qp(ep->com.qp->rhp, 2600 ep->com.qp, 2601 C4IW_QP_ATTR_NEXT_STATE, 2602 &attrs, 1); 2603 } 2604 close_complete_upcall(ep, 0); 2605 __state_set(&ep->com, DEAD); 2606 release = 1; 2607 break; 2608 case ABORTING: 2609 case DEAD: 2610 break; 2611 default: 2612 BUG_ON(1); 2613 break; 2614 } 2615 mutex_unlock(&ep->com.mutex); 2616 if (release) 2617 release_ep_resources(ep); 2618 return 0; 2619 } 2620 2621 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) 2622 { 2623 struct cpl_rdma_terminate *rpl = cplhdr(skb); 2624 struct tid_info *t = dev->rdev.lldi.tids; 2625 unsigned int tid = GET_TID(rpl); 2626 struct c4iw_ep *ep; 2627 struct c4iw_qp_attributes attrs; 2628 2629 ep = lookup_tid(t, tid); 2630 BUG_ON(!ep); 2631 2632 if (ep && ep->com.qp) { 2633 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, 2634 ep->com.qp->wq.sq.qid); 2635 attrs.next_state = C4IW_QP_STATE_TERMINATE; 2636 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2637 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2638 } else 2639 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); 2640 2641 return 0; 2642 } 2643 2644 /* 2645 * Upcall from the adapter indicating data has been transmitted. 2646 * For us its just the single MPA request or reply. We can now free 2647 * the skb holding the mpa message. 2648 */ 2649 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) 2650 { 2651 struct c4iw_ep *ep; 2652 struct cpl_fw4_ack *hdr = cplhdr(skb); 2653 u8 credits = hdr->credits; 2654 unsigned int tid = GET_TID(hdr); 2655 struct tid_info *t = dev->rdev.lldi.tids; 2656 2657 2658 ep = lookup_tid(t, tid); 2659 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 2660 if (credits == 0) { 2661 PDBG("%s 0 credit ack ep %p tid %u state %u\n", 2662 __func__, ep, ep->hwtid, state_read(&ep->com)); 2663 return 0; 2664 } 2665 2666 dst_confirm(ep->dst); 2667 if (ep->mpa_skb) { 2668 PDBG("%s last streaming msg ack ep %p tid %u state %u " 2669 "initiator %u freeing skb\n", __func__, ep, ep->hwtid, 2670 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); 2671 kfree_skb(ep->mpa_skb); 2672 ep->mpa_skb = NULL; 2673 } 2674 return 0; 2675 } 2676 2677 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 2678 { 2679 int err = 0; 2680 int disconnect = 0; 2681 struct c4iw_ep *ep = to_ep(cm_id); 2682 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2683 2684 mutex_lock(&ep->com.mutex); 2685 if (ep->com.state == DEAD) { 2686 mutex_unlock(&ep->com.mutex); 2687 c4iw_put_ep(&ep->com); 2688 return -ECONNRESET; 2689 } 2690 set_bit(ULP_REJECT, &ep->com.history); 2691 BUG_ON(ep->com.state != MPA_REQ_RCVD); 2692 if (mpa_rev == 0) 2693 abort_connection(ep, NULL, GFP_KERNEL); 2694 else { 2695 err = send_mpa_reject(ep, pdata, pdata_len); 2696 disconnect = 1; 2697 } 2698 mutex_unlock(&ep->com.mutex); 2699 if (disconnect) 2700 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2701 c4iw_put_ep(&ep->com); 2702 return 0; 2703 } 2704 2705 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2706 { 2707 int err; 2708 struct c4iw_qp_attributes attrs; 2709 enum c4iw_qp_attr_mask mask; 2710 struct c4iw_ep *ep = to_ep(cm_id); 2711 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 2712 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 2713 2714 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2715 2716 mutex_lock(&ep->com.mutex); 2717 if (ep->com.state == DEAD) { 2718 err = -ECONNRESET; 2719 goto err; 2720 } 2721 2722 BUG_ON(ep->com.state != MPA_REQ_RCVD); 2723 BUG_ON(!qp); 2724 2725 set_bit(ULP_ACCEPT, &ep->com.history); 2726 if ((conn_param->ord > c4iw_max_read_depth) || 2727 (conn_param->ird > c4iw_max_read_depth)) { 2728 abort_connection(ep, NULL, GFP_KERNEL); 2729 err = -EINVAL; 2730 goto err; 2731 } 2732 2733 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 2734 if (conn_param->ord > ep->ird) { 2735 ep->ird = conn_param->ird; 2736 ep->ord = conn_param->ord; 2737 send_mpa_reject(ep, conn_param->private_data, 2738 conn_param->private_data_len); 2739 abort_connection(ep, NULL, GFP_KERNEL); 2740 err = -ENOMEM; 2741 goto err; 2742 } 2743 if (conn_param->ird > ep->ord) { 2744 if (!ep->ord) 2745 conn_param->ird = 1; 2746 else { 2747 abort_connection(ep, NULL, GFP_KERNEL); 2748 err = -ENOMEM; 2749 goto err; 2750 } 2751 } 2752 2753 } 2754 ep->ird = conn_param->ird; 2755 ep->ord = conn_param->ord; 2756 2757 if (ep->mpa_attr.version != 2) 2758 if (peer2peer && ep->ird == 0) 2759 ep->ird = 1; 2760 2761 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 2762 2763 cm_id->add_ref(cm_id); 2764 ep->com.cm_id = cm_id; 2765 ep->com.qp = qp; 2766 ref_qp(ep); 2767 2768 /* bind QP to EP and move to RTS */ 2769 attrs.mpa_attr = ep->mpa_attr; 2770 attrs.max_ird = ep->ird; 2771 attrs.max_ord = ep->ord; 2772 attrs.llp_stream_handle = ep; 2773 attrs.next_state = C4IW_QP_STATE_RTS; 2774 2775 /* bind QP and TID with INIT_WR */ 2776 mask = C4IW_QP_ATTR_NEXT_STATE | 2777 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 2778 C4IW_QP_ATTR_MPA_ATTR | 2779 C4IW_QP_ATTR_MAX_IRD | 2780 C4IW_QP_ATTR_MAX_ORD; 2781 2782 err = c4iw_modify_qp(ep->com.qp->rhp, 2783 ep->com.qp, mask, &attrs, 1); 2784 if (err) 2785 goto err1; 2786 err = send_mpa_reply(ep, conn_param->private_data, 2787 conn_param->private_data_len); 2788 if (err) 2789 goto err1; 2790 2791 __state_set(&ep->com, FPDU_MODE); 2792 established_upcall(ep); 2793 mutex_unlock(&ep->com.mutex); 2794 c4iw_put_ep(&ep->com); 2795 return 0; 2796 err1: 2797 ep->com.cm_id = NULL; 2798 cm_id->rem_ref(cm_id); 2799 err: 2800 mutex_unlock(&ep->com.mutex); 2801 c4iw_put_ep(&ep->com); 2802 return err; 2803 } 2804 2805 static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) 2806 { 2807 struct in_device *ind; 2808 int found = 0; 2809 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr; 2810 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr; 2811 2812 ind = in_dev_get(dev->rdev.lldi.ports[0]); 2813 if (!ind) 2814 return -EADDRNOTAVAIL; 2815 for_primary_ifa(ind) { 2816 laddr->sin_addr.s_addr = ifa->ifa_address; 2817 raddr->sin_addr.s_addr = ifa->ifa_address; 2818 found = 1; 2819 break; 2820 } 2821 endfor_ifa(ind); 2822 in_dev_put(ind); 2823 return found ? 0 : -EADDRNOTAVAIL; 2824 } 2825 2826 static int get_lladdr(struct net_device *dev, struct in6_addr *addr, 2827 unsigned char banned_flags) 2828 { 2829 struct inet6_dev *idev; 2830 int err = -EADDRNOTAVAIL; 2831 2832 rcu_read_lock(); 2833 idev = __in6_dev_get(dev); 2834 if (idev != NULL) { 2835 struct inet6_ifaddr *ifp; 2836 2837 read_lock_bh(&idev->lock); 2838 list_for_each_entry(ifp, &idev->addr_list, if_list) { 2839 if (ifp->scope == IFA_LINK && 2840 !(ifp->flags & banned_flags)) { 2841 memcpy(addr, &ifp->addr, 16); 2842 err = 0; 2843 break; 2844 } 2845 } 2846 read_unlock_bh(&idev->lock); 2847 } 2848 rcu_read_unlock(); 2849 return err; 2850 } 2851 2852 static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) 2853 { 2854 struct in6_addr uninitialized_var(addr); 2855 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr; 2856 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr; 2857 2858 if (get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) { 2859 memcpy(la6->sin6_addr.s6_addr, &addr, 16); 2860 memcpy(ra6->sin6_addr.s6_addr, &addr, 16); 2861 return 0; 2862 } 2863 return -EADDRNOTAVAIL; 2864 } 2865 2866 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2867 { 2868 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2869 struct c4iw_ep *ep; 2870 int err = 0; 2871 struct sockaddr_in *laddr; 2872 struct sockaddr_in *raddr; 2873 struct sockaddr_in6 *laddr6; 2874 struct sockaddr_in6 *raddr6; 2875 struct iwpm_dev_data pm_reg_msg; 2876 struct iwpm_sa_data pm_msg; 2877 __u8 *ra; 2878 int iptype; 2879 int iwpm_err = 0; 2880 2881 if ((conn_param->ord > c4iw_max_read_depth) || 2882 (conn_param->ird > c4iw_max_read_depth)) { 2883 err = -EINVAL; 2884 goto out; 2885 } 2886 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2887 if (!ep) { 2888 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 2889 err = -ENOMEM; 2890 goto out; 2891 } 2892 init_timer(&ep->timer); 2893 ep->plen = conn_param->private_data_len; 2894 if (ep->plen) 2895 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 2896 conn_param->private_data, ep->plen); 2897 ep->ird = conn_param->ird; 2898 ep->ord = conn_param->ord; 2899 2900 if (peer2peer && ep->ord == 0) 2901 ep->ord = 1; 2902 2903 cm_id->add_ref(cm_id); 2904 ep->com.dev = dev; 2905 ep->com.cm_id = cm_id; 2906 ep->com.qp = get_qhp(dev, conn_param->qpn); 2907 if (!ep->com.qp) { 2908 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); 2909 err = -EINVAL; 2910 goto fail1; 2911 } 2912 ref_qp(ep); 2913 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, 2914 ep->com.qp, cm_id); 2915 2916 /* 2917 * Allocate an active TID to initiate a TCP connection. 2918 */ 2919 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); 2920 if (ep->atid == -1) { 2921 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 2922 err = -ENOMEM; 2923 goto fail1; 2924 } 2925 insert_handle(dev, &dev->atid_idr, ep, ep->atid); 2926 2927 memcpy(&ep->com.local_addr, &cm_id->local_addr, 2928 sizeof(ep->com.local_addr)); 2929 memcpy(&ep->com.remote_addr, &cm_id->remote_addr, 2930 sizeof(ep->com.remote_addr)); 2931 2932 /* No port mapper available, go with the specified peer information */ 2933 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr, 2934 sizeof(ep->com.mapped_local_addr)); 2935 memcpy(&ep->com.mapped_remote_addr, &cm_id->remote_addr, 2936 sizeof(ep->com.mapped_remote_addr)); 2937 2938 c4iw_form_reg_msg(dev, &pm_reg_msg); 2939 iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW); 2940 if (iwpm_err) { 2941 PDBG("%s: Port Mapper reg pid fail (err = %d).\n", 2942 __func__, iwpm_err); 2943 } 2944 if (iwpm_valid_pid() && !iwpm_err) { 2945 c4iw_form_pm_msg(ep, &pm_msg); 2946 iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_C4IW); 2947 if (iwpm_err) 2948 PDBG("%s: Port Mapper query fail (err = %d).\n", 2949 __func__, iwpm_err); 2950 else 2951 c4iw_record_pm_msg(ep, &pm_msg); 2952 } 2953 if (iwpm_create_mapinfo(&ep->com.local_addr, 2954 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) { 2955 iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW); 2956 err = -ENOMEM; 2957 goto fail1; 2958 } 2959 print_addr(&ep->com, __func__, "add_query/create_mapinfo"); 2960 set_bit(RELEASE_MAPINFO, &ep->com.flags); 2961 2962 laddr = (struct sockaddr_in *)&ep->com.mapped_local_addr; 2963 raddr = (struct sockaddr_in *)&ep->com.mapped_remote_addr; 2964 laddr6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr; 2965 raddr6 = (struct sockaddr_in6 *) &ep->com.mapped_remote_addr; 2966 2967 if (cm_id->remote_addr.ss_family == AF_INET) { 2968 iptype = 4; 2969 ra = (__u8 *)&raddr->sin_addr; 2970 2971 /* 2972 * Handle loopback requests to INADDR_ANY. 2973 */ 2974 if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) { 2975 err = pick_local_ipaddrs(dev, cm_id); 2976 if (err) 2977 goto fail1; 2978 } 2979 2980 /* find a route */ 2981 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n", 2982 __func__, &laddr->sin_addr, ntohs(laddr->sin_port), 2983 ra, ntohs(raddr->sin_port)); 2984 ep->dst = find_route(dev, laddr->sin_addr.s_addr, 2985 raddr->sin_addr.s_addr, laddr->sin_port, 2986 raddr->sin_port, 0); 2987 } else { 2988 iptype = 6; 2989 ra = (__u8 *)&raddr6->sin6_addr; 2990 2991 /* 2992 * Handle loopback requests to INADDR_ANY. 2993 */ 2994 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { 2995 err = pick_local_ip6addrs(dev, cm_id); 2996 if (err) 2997 goto fail1; 2998 } 2999 3000 /* find a route */ 3001 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n", 3002 __func__, laddr6->sin6_addr.s6_addr, 3003 ntohs(laddr6->sin6_port), 3004 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port)); 3005 ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr, 3006 raddr6->sin6_addr.s6_addr, 3007 laddr6->sin6_port, raddr6->sin6_port, 0, 3008 raddr6->sin6_scope_id); 3009 } 3010 if (!ep->dst) { 3011 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 3012 err = -EHOSTUNREACH; 3013 goto fail2; 3014 } 3015 3016 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true); 3017 if (err) { 3018 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 3019 goto fail3; 3020 } 3021 3022 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 3023 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 3024 ep->l2t->idx); 3025 3026 state_set(&ep->com, CONNECTING); 3027 ep->tos = 0; 3028 3029 /* send connect request to rnic */ 3030 err = send_connect(ep); 3031 if (!err) 3032 goto out; 3033 3034 cxgb4_l2t_release(ep->l2t); 3035 fail3: 3036 dst_release(ep->dst); 3037 fail2: 3038 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 3039 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 3040 fail1: 3041 cm_id->rem_ref(cm_id); 3042 c4iw_put_ep(&ep->com); 3043 out: 3044 return err; 3045 } 3046 3047 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) 3048 { 3049 int err; 3050 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) 3051 &ep->com.mapped_local_addr; 3052 3053 c4iw_init_wr_wait(&ep->com.wr_wait); 3054 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0], 3055 ep->stid, &sin6->sin6_addr, 3056 sin6->sin6_port, 3057 ep->com.dev->rdev.lldi.rxq_ids[0]); 3058 if (!err) 3059 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 3060 &ep->com.wr_wait, 3061 0, 0, __func__); 3062 if (err) 3063 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n", 3064 err, ep->stid, 3065 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port)); 3066 return err; 3067 } 3068 3069 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) 3070 { 3071 int err; 3072 struct sockaddr_in *sin = (struct sockaddr_in *) 3073 &ep->com.mapped_local_addr; 3074 3075 if (dev->rdev.lldi.enable_fw_ofld_conn) { 3076 do { 3077 err = cxgb4_create_server_filter( 3078 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3079 sin->sin_addr.s_addr, sin->sin_port, 0, 3080 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0); 3081 if (err == -EBUSY) { 3082 set_current_state(TASK_UNINTERRUPTIBLE); 3083 schedule_timeout(usecs_to_jiffies(100)); 3084 } 3085 } while (err == -EBUSY); 3086 } else { 3087 c4iw_init_wr_wait(&ep->com.wr_wait); 3088 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], 3089 ep->stid, sin->sin_addr.s_addr, sin->sin_port, 3090 0, ep->com.dev->rdev.lldi.rxq_ids[0]); 3091 if (!err) 3092 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 3093 &ep->com.wr_wait, 3094 0, 0, __func__); 3095 } 3096 if (err) 3097 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n" 3098 , err, ep->stid, 3099 &sin->sin_addr, ntohs(sin->sin_port)); 3100 return err; 3101 } 3102 3103 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 3104 { 3105 int err = 0; 3106 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 3107 struct c4iw_listen_ep *ep; 3108 struct iwpm_dev_data pm_reg_msg; 3109 struct iwpm_sa_data pm_msg; 3110 int iwpm_err = 0; 3111 3112 might_sleep(); 3113 3114 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 3115 if (!ep) { 3116 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 3117 err = -ENOMEM; 3118 goto fail1; 3119 } 3120 PDBG("%s ep %p\n", __func__, ep); 3121 cm_id->add_ref(cm_id); 3122 ep->com.cm_id = cm_id; 3123 ep->com.dev = dev; 3124 ep->backlog = backlog; 3125 memcpy(&ep->com.local_addr, &cm_id->local_addr, 3126 sizeof(ep->com.local_addr)); 3127 3128 /* 3129 * Allocate a server TID. 3130 */ 3131 if (dev->rdev.lldi.enable_fw_ofld_conn && 3132 ep->com.local_addr.ss_family == AF_INET) 3133 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, 3134 cm_id->local_addr.ss_family, ep); 3135 else 3136 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, 3137 cm_id->local_addr.ss_family, ep); 3138 3139 if (ep->stid == -1) { 3140 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); 3141 err = -ENOMEM; 3142 goto fail2; 3143 } 3144 insert_handle(dev, &dev->stid_idr, ep, ep->stid); 3145 3146 /* No port mapper available, go with the specified info */ 3147 memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr, 3148 sizeof(ep->com.mapped_local_addr)); 3149 3150 c4iw_form_reg_msg(dev, &pm_reg_msg); 3151 iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW); 3152 if (iwpm_err) { 3153 PDBG("%s: Port Mapper reg pid fail (err = %d).\n", 3154 __func__, iwpm_err); 3155 } 3156 if (iwpm_valid_pid() && !iwpm_err) { 3157 memcpy(&pm_msg.loc_addr, &ep->com.local_addr, 3158 sizeof(ep->com.local_addr)); 3159 iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_C4IW); 3160 if (iwpm_err) 3161 PDBG("%s: Port Mapper query fail (err = %d).\n", 3162 __func__, iwpm_err); 3163 else 3164 memcpy(&ep->com.mapped_local_addr, 3165 &pm_msg.mapped_loc_addr, 3166 sizeof(ep->com.mapped_local_addr)); 3167 } 3168 if (iwpm_create_mapinfo(&ep->com.local_addr, 3169 &ep->com.mapped_local_addr, RDMA_NL_C4IW)) { 3170 err = -ENOMEM; 3171 goto fail3; 3172 } 3173 print_addr(&ep->com, __func__, "add_mapping/create_mapinfo"); 3174 3175 set_bit(RELEASE_MAPINFO, &ep->com.flags); 3176 state_set(&ep->com, LISTEN); 3177 if (ep->com.local_addr.ss_family == AF_INET) 3178 err = create_server4(dev, ep); 3179 else 3180 err = create_server6(dev, ep); 3181 if (!err) { 3182 cm_id->provider_data = ep; 3183 goto out; 3184 } 3185 3186 fail3: 3187 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3188 ep->com.local_addr.ss_family); 3189 fail2: 3190 cm_id->rem_ref(cm_id); 3191 c4iw_put_ep(&ep->com); 3192 fail1: 3193 out: 3194 return err; 3195 } 3196 3197 int c4iw_destroy_listen(struct iw_cm_id *cm_id) 3198 { 3199 int err; 3200 struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 3201 3202 PDBG("%s ep %p\n", __func__, ep); 3203 3204 might_sleep(); 3205 state_set(&ep->com, DEAD); 3206 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn && 3207 ep->com.local_addr.ss_family == AF_INET) { 3208 err = cxgb4_remove_server_filter( 3209 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3210 ep->com.dev->rdev.lldi.rxq_ids[0], 0); 3211 } else { 3212 c4iw_init_wr_wait(&ep->com.wr_wait); 3213 err = cxgb4_remove_server( 3214 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3215 ep->com.dev->rdev.lldi.rxq_ids[0], 0); 3216 if (err) 3217 goto done; 3218 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 3219 0, 0, __func__); 3220 } 3221 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); 3222 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3223 ep->com.local_addr.ss_family); 3224 done: 3225 cm_id->rem_ref(cm_id); 3226 c4iw_put_ep(&ep->com); 3227 return err; 3228 } 3229 3230 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 3231 { 3232 int ret = 0; 3233 int close = 0; 3234 int fatal = 0; 3235 struct c4iw_rdev *rdev; 3236 3237 mutex_lock(&ep->com.mutex); 3238 3239 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, 3240 states[ep->com.state], abrupt); 3241 3242 rdev = &ep->com.dev->rdev; 3243 if (c4iw_fatal_error(rdev)) { 3244 fatal = 1; 3245 close_complete_upcall(ep, -EIO); 3246 ep->com.state = DEAD; 3247 } 3248 switch (ep->com.state) { 3249 case MPA_REQ_WAIT: 3250 case MPA_REQ_SENT: 3251 case MPA_REQ_RCVD: 3252 case MPA_REP_SENT: 3253 case FPDU_MODE: 3254 close = 1; 3255 if (abrupt) 3256 ep->com.state = ABORTING; 3257 else { 3258 ep->com.state = CLOSING; 3259 start_ep_timer(ep); 3260 } 3261 set_bit(CLOSE_SENT, &ep->com.flags); 3262 break; 3263 case CLOSING: 3264 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 3265 close = 1; 3266 if (abrupt) { 3267 (void)stop_ep_timer(ep); 3268 ep->com.state = ABORTING; 3269 } else 3270 ep->com.state = MORIBUND; 3271 } 3272 break; 3273 case MORIBUND: 3274 case ABORTING: 3275 case DEAD: 3276 PDBG("%s ignoring disconnect ep %p state %u\n", 3277 __func__, ep, ep->com.state); 3278 break; 3279 default: 3280 BUG(); 3281 break; 3282 } 3283 3284 if (close) { 3285 if (abrupt) { 3286 set_bit(EP_DISC_ABORT, &ep->com.history); 3287 close_complete_upcall(ep, -ECONNRESET); 3288 ret = send_abort(ep, NULL, gfp); 3289 } else { 3290 set_bit(EP_DISC_CLOSE, &ep->com.history); 3291 ret = send_halfclose(ep, gfp); 3292 } 3293 if (ret) 3294 fatal = 1; 3295 } 3296 mutex_unlock(&ep->com.mutex); 3297 if (fatal) 3298 release_ep_resources(ep); 3299 return ret; 3300 } 3301 3302 static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 3303 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 3304 { 3305 struct c4iw_ep *ep; 3306 int atid = be32_to_cpu(req->tid); 3307 3308 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, 3309 (__force u32) req->tid); 3310 if (!ep) 3311 return; 3312 3313 switch (req->retval) { 3314 case FW_ENOMEM: 3315 set_bit(ACT_RETRY_NOMEM, &ep->com.history); 3316 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 3317 send_fw_act_open_req(ep, atid); 3318 return; 3319 } 3320 case FW_EADDRINUSE: 3321 set_bit(ACT_RETRY_INUSE, &ep->com.history); 3322 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 3323 send_fw_act_open_req(ep, atid); 3324 return; 3325 } 3326 break; 3327 default: 3328 pr_info("%s unexpected ofld conn wr retval %d\n", 3329 __func__, req->retval); 3330 break; 3331 } 3332 pr_err("active ofld_connect_wr failure %d atid %d\n", 3333 req->retval, atid); 3334 mutex_lock(&dev->rdev.stats.lock); 3335 dev->rdev.stats.act_ofld_conn_fails++; 3336 mutex_unlock(&dev->rdev.stats.lock); 3337 connect_reply_upcall(ep, status2errno(req->retval)); 3338 state_set(&ep->com, DEAD); 3339 remove_handle(dev, &dev->atid_idr, atid); 3340 cxgb4_free_atid(dev->rdev.lldi.tids, atid); 3341 dst_release(ep->dst); 3342 cxgb4_l2t_release(ep->l2t); 3343 c4iw_put_ep(&ep->com); 3344 } 3345 3346 static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 3347 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 3348 { 3349 struct sk_buff *rpl_skb; 3350 struct cpl_pass_accept_req *cpl; 3351 int ret; 3352 3353 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie; 3354 BUG_ON(!rpl_skb); 3355 if (req->retval) { 3356 PDBG("%s passive open failure %d\n", __func__, req->retval); 3357 mutex_lock(&dev->rdev.stats.lock); 3358 dev->rdev.stats.pas_ofld_conn_fails++; 3359 mutex_unlock(&dev->rdev.stats.lock); 3360 kfree_skb(rpl_skb); 3361 } else { 3362 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); 3363 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 3364 (__force u32) htonl( 3365 (__force u32) req->tid))); 3366 ret = pass_accept_req(dev, rpl_skb); 3367 if (!ret) 3368 kfree_skb(rpl_skb); 3369 } 3370 return; 3371 } 3372 3373 static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 3374 { 3375 struct cpl_fw6_msg *rpl = cplhdr(skb); 3376 struct cpl_fw6_msg_ofld_connection_wr_rpl *req; 3377 3378 switch (rpl->type) { 3379 case FW6_TYPE_CQE: 3380 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 3381 break; 3382 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 3383 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data; 3384 switch (req->t_state) { 3385 case TCP_SYN_SENT: 3386 active_ofld_conn_reply(dev, skb, req); 3387 break; 3388 case TCP_SYN_RECV: 3389 passive_ofld_conn_reply(dev, skb, req); 3390 break; 3391 default: 3392 pr_err("%s unexpected ofld conn wr state %d\n", 3393 __func__, req->t_state); 3394 break; 3395 } 3396 break; 3397 } 3398 return 0; 3399 } 3400 3401 static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) 3402 { 3403 u32 l2info; 3404 u16 vlantag, len, hdr_len, eth_hdr_len; 3405 u8 intf; 3406 struct cpl_rx_pkt *cpl = cplhdr(skb); 3407 struct cpl_pass_accept_req *req; 3408 struct tcp_options_received tmp_opt; 3409 struct c4iw_dev *dev; 3410 3411 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 3412 /* Store values from cpl_rx_pkt in temporary location. */ 3413 vlantag = (__force u16) cpl->vlan; 3414 len = (__force u16) cpl->len; 3415 l2info = (__force u32) cpl->l2info; 3416 hdr_len = (__force u16) cpl->hdr_len; 3417 intf = cpl->iff; 3418 3419 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); 3420 3421 /* 3422 * We need to parse the TCP options from SYN packet. 3423 * to generate cpl_pass_accept_req. 3424 */ 3425 memset(&tmp_opt, 0, sizeof(tmp_opt)); 3426 tcp_clear_options(&tmp_opt); 3427 tcp_parse_options(skb, &tmp_opt, 0, NULL); 3428 3429 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); 3430 memset(req, 0, sizeof(*req)); 3431 req->l2info = cpu_to_be16(V_SYN_INTF(intf) | 3432 V_SYN_MAC_IDX(G_RX_MACIDX( 3433 (__force int) htonl(l2info))) | 3434 F_SYN_XACT_MATCH); 3435 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? 3436 G_RX_ETHHDR_LEN((__force int) htonl(l2info)) : 3437 G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info)); 3438 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( 3439 (__force int) htonl(l2info))) | 3440 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( 3441 (__force int) htons(hdr_len))) | 3442 V_IP_HDR_LEN(G_RX_IPHDR_LEN( 3443 (__force int) htons(hdr_len))) | 3444 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len))); 3445 req->vlan = (__force __be16) vlantag; 3446 req->len = (__force __be16) len; 3447 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | 3448 PASS_OPEN_TOS(tos)); 3449 req->tcpopt.mss = htons(tmp_opt.mss_clamp); 3450 if (tmp_opt.wscale_ok) 3451 req->tcpopt.wsf = tmp_opt.snd_wscale; 3452 req->tcpopt.tstamp = tmp_opt.saw_tstamp; 3453 if (tmp_opt.sack_ok) 3454 req->tcpopt.sack = 1; 3455 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0)); 3456 return; 3457 } 3458 3459 static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, 3460 __be32 laddr, __be16 lport, 3461 __be32 raddr, __be16 rport, 3462 u32 rcv_isn, u32 filter, u16 window, 3463 u32 rss_qid, u8 port_id) 3464 { 3465 struct sk_buff *req_skb; 3466 struct fw_ofld_connection_wr *req; 3467 struct cpl_pass_accept_req *cpl = cplhdr(skb); 3468 int ret; 3469 3470 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); 3471 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); 3472 memset(req, 0, sizeof(*req)); 3473 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); 3474 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 3475 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); 3476 req->le.filter = (__force __be32) filter; 3477 req->le.lport = lport; 3478 req->le.pport = rport; 3479 req->le.u.ipv4.lip = laddr; 3480 req->le.u.ipv4.pip = raddr; 3481 req->tcb.rcv_nxt = htonl(rcv_isn + 1); 3482 req->tcb.rcv_adv = htons(window); 3483 req->tcb.t_state_to_astid = 3484 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) | 3485 V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) | 3486 V_FW_OFLD_CONNECTION_WR_ASTID( 3487 GET_PASS_OPEN_TID(ntohl(cpl->tos_stid)))); 3488 3489 /* 3490 * We store the qid in opt2 which will be used by the firmware 3491 * to send us the wr response. 3492 */ 3493 req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid)); 3494 3495 /* 3496 * We initialize the MSS index in TCB to 0xF. 3497 * So that when driver sends cpl_pass_accept_rpl 3498 * TCB picks up the correct value. If this was 0 3499 * TP will ignore any value > 0 for MSS index. 3500 */ 3501 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); 3502 req->cookie = (unsigned long)skb; 3503 3504 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 3505 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); 3506 if (ret < 0) { 3507 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__, 3508 ret); 3509 kfree_skb(skb); 3510 kfree_skb(req_skb); 3511 } 3512 } 3513 3514 /* 3515 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt 3516 * messages when a filter is being used instead of server to 3517 * redirect a syn packet. When packets hit filter they are redirected 3518 * to the offload queue and driver tries to establish the connection 3519 * using firmware work request. 3520 */ 3521 static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) 3522 { 3523 int stid; 3524 unsigned int filter; 3525 struct ethhdr *eh = NULL; 3526 struct vlan_ethhdr *vlan_eh = NULL; 3527 struct iphdr *iph; 3528 struct tcphdr *tcph; 3529 struct rss_header *rss = (void *)skb->data; 3530 struct cpl_rx_pkt *cpl = (void *)skb->data; 3531 struct cpl_pass_accept_req *req = (void *)(rss + 1); 3532 struct l2t_entry *e; 3533 struct dst_entry *dst; 3534 struct c4iw_ep *lep; 3535 u16 window; 3536 struct port_info *pi; 3537 struct net_device *pdev; 3538 u16 rss_qid, eth_hdr_len; 3539 int step; 3540 u32 tx_chan; 3541 struct neighbour *neigh; 3542 3543 /* Drop all non-SYN packets */ 3544 if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN))) 3545 goto reject; 3546 3547 /* 3548 * Drop all packets which did not hit the filter. 3549 * Unlikely to happen. 3550 */ 3551 if (!(rss->filter_hit && rss->filter_tid)) 3552 goto reject; 3553 3554 /* 3555 * Calculate the server tid from filter hit index from cpl_rx_pkt. 3556 */ 3557 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val); 3558 3559 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); 3560 if (!lep) { 3561 PDBG("%s connect request on invalid stid %d\n", __func__, stid); 3562 goto reject; 3563 } 3564 3565 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ? 3566 G_RX_ETHHDR_LEN(htonl(cpl->l2info)) : 3567 G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info)); 3568 if (eth_hdr_len == ETH_HLEN) { 3569 eh = (struct ethhdr *)(req + 1); 3570 iph = (struct iphdr *)(eh + 1); 3571 } else { 3572 vlan_eh = (struct vlan_ethhdr *)(req + 1); 3573 iph = (struct iphdr *)(vlan_eh + 1); 3574 skb->vlan_tci = ntohs(cpl->vlan); 3575 } 3576 3577 if (iph->version != 0x4) 3578 goto reject; 3579 3580 tcph = (struct tcphdr *)(iph + 1); 3581 skb_set_network_header(skb, (void *)iph - (void *)rss); 3582 skb_set_transport_header(skb, (void *)tcph - (void *)rss); 3583 skb_get(skb); 3584 3585 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__, 3586 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr), 3587 ntohs(tcph->source), iph->tos); 3588 3589 dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source, 3590 iph->tos); 3591 if (!dst) { 3592 pr_err("%s - failed to find dst entry!\n", 3593 __func__); 3594 goto reject; 3595 } 3596 neigh = dst_neigh_lookup_skb(dst, skb); 3597 3598 if (!neigh) { 3599 pr_err("%s - failed to allocate neigh!\n", 3600 __func__); 3601 goto free_dst; 3602 } 3603 3604 if (neigh->dev->flags & IFF_LOOPBACK) { 3605 pdev = ip_dev_find(&init_net, iph->daddr); 3606 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 3607 pdev, 0); 3608 pi = (struct port_info *)netdev_priv(pdev); 3609 tx_chan = cxgb4_port_chan(pdev); 3610 dev_put(pdev); 3611 } else { 3612 pdev = get_real_dev(neigh->dev); 3613 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 3614 pdev, 0); 3615 pi = (struct port_info *)netdev_priv(pdev); 3616 tx_chan = cxgb4_port_chan(pdev); 3617 } 3618 neigh_release(neigh); 3619 if (!e) { 3620 pr_err("%s - failed to allocate l2t entry!\n", 3621 __func__); 3622 goto free_dst; 3623 } 3624 3625 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 3626 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; 3627 window = (__force u16) htons((__force u16)tcph->window); 3628 3629 /* Calcuate filter portion for LE region. */ 3630 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple( 3631 dev->rdev.lldi.ports[0], 3632 e)); 3633 3634 /* 3635 * Synthesize the cpl_pass_accept_req. We have everything except the 3636 * TID. Once firmware sends a reply with TID we update the TID field 3637 * in cpl and pass it through the regular cpl_pass_accept_req path. 3638 */ 3639 build_cpl_pass_accept_req(skb, stid, iph->tos); 3640 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr, 3641 tcph->source, ntohl(tcph->seq), filter, window, 3642 rss_qid, pi->port_id); 3643 cxgb4_l2t_release(e); 3644 free_dst: 3645 dst_release(dst); 3646 reject: 3647 return 0; 3648 } 3649 3650 /* 3651 * These are the real handlers that are called from a 3652 * work queue. 3653 */ 3654 static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { 3655 [CPL_ACT_ESTABLISH] = act_establish, 3656 [CPL_ACT_OPEN_RPL] = act_open_rpl, 3657 [CPL_RX_DATA] = rx_data, 3658 [CPL_ABORT_RPL_RSS] = abort_rpl, 3659 [CPL_ABORT_RPL] = abort_rpl, 3660 [CPL_PASS_OPEN_RPL] = pass_open_rpl, 3661 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, 3662 [CPL_PASS_ACCEPT_REQ] = pass_accept_req, 3663 [CPL_PASS_ESTABLISH] = pass_establish, 3664 [CPL_PEER_CLOSE] = peer_close, 3665 [CPL_ABORT_REQ_RSS] = peer_abort, 3666 [CPL_CLOSE_CON_RPL] = close_con_rpl, 3667 [CPL_RDMA_TERMINATE] = terminate, 3668 [CPL_FW4_ACK] = fw4_ack, 3669 [CPL_FW6_MSG] = deferred_fw6_msg, 3670 [CPL_RX_PKT] = rx_pkt 3671 }; 3672 3673 static void process_timeout(struct c4iw_ep *ep) 3674 { 3675 struct c4iw_qp_attributes attrs; 3676 int abort = 1; 3677 3678 mutex_lock(&ep->com.mutex); 3679 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 3680 ep->com.state); 3681 set_bit(TIMEDOUT, &ep->com.history); 3682 switch (ep->com.state) { 3683 case MPA_REQ_SENT: 3684 __state_set(&ep->com, ABORTING); 3685 connect_reply_upcall(ep, -ETIMEDOUT); 3686 break; 3687 case MPA_REQ_WAIT: 3688 __state_set(&ep->com, ABORTING); 3689 break; 3690 case CLOSING: 3691 case MORIBUND: 3692 if (ep->com.cm_id && ep->com.qp) { 3693 attrs.next_state = C4IW_QP_STATE_ERROR; 3694 c4iw_modify_qp(ep->com.qp->rhp, 3695 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 3696 &attrs, 1); 3697 } 3698 __state_set(&ep->com, ABORTING); 3699 close_complete_upcall(ep, -ETIMEDOUT); 3700 break; 3701 case ABORTING: 3702 case DEAD: 3703 3704 /* 3705 * These states are expected if the ep timed out at the same 3706 * time as another thread was calling stop_ep_timer(). 3707 * So we silently do nothing for these states. 3708 */ 3709 abort = 0; 3710 break; 3711 default: 3712 WARN(1, "%s unexpected state ep %p tid %u state %u\n", 3713 __func__, ep, ep->hwtid, ep->com.state); 3714 abort = 0; 3715 } 3716 if (abort) 3717 abort_connection(ep, NULL, GFP_KERNEL); 3718 mutex_unlock(&ep->com.mutex); 3719 c4iw_put_ep(&ep->com); 3720 } 3721 3722 static void process_timedout_eps(void) 3723 { 3724 struct c4iw_ep *ep; 3725 3726 spin_lock_irq(&timeout_lock); 3727 while (!list_empty(&timeout_list)) { 3728 struct list_head *tmp; 3729 3730 tmp = timeout_list.next; 3731 list_del(tmp); 3732 tmp->next = NULL; 3733 tmp->prev = NULL; 3734 spin_unlock_irq(&timeout_lock); 3735 ep = list_entry(tmp, struct c4iw_ep, entry); 3736 process_timeout(ep); 3737 spin_lock_irq(&timeout_lock); 3738 } 3739 spin_unlock_irq(&timeout_lock); 3740 } 3741 3742 static void process_work(struct work_struct *work) 3743 { 3744 struct sk_buff *skb = NULL; 3745 struct c4iw_dev *dev; 3746 struct cpl_act_establish *rpl; 3747 unsigned int opcode; 3748 int ret; 3749 3750 process_timedout_eps(); 3751 while ((skb = skb_dequeue(&rxq))) { 3752 rpl = cplhdr(skb); 3753 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 3754 opcode = rpl->ot.opcode; 3755 3756 BUG_ON(!work_handlers[opcode]); 3757 ret = work_handlers[opcode](dev, skb); 3758 if (!ret) 3759 kfree_skb(skb); 3760 process_timedout_eps(); 3761 } 3762 } 3763 3764 static DECLARE_WORK(skb_work, process_work); 3765 3766 static void ep_timeout(unsigned long arg) 3767 { 3768 struct c4iw_ep *ep = (struct c4iw_ep *)arg; 3769 int kickit = 0; 3770 3771 spin_lock(&timeout_lock); 3772 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 3773 /* 3774 * Only insert if it is not already on the list. 3775 */ 3776 if (!ep->entry.next) { 3777 list_add_tail(&ep->entry, &timeout_list); 3778 kickit = 1; 3779 } 3780 } 3781 spin_unlock(&timeout_lock); 3782 if (kickit) 3783 queue_work(workq, &skb_work); 3784 } 3785 3786 /* 3787 * All the CM events are handled on a work queue to have a safe context. 3788 */ 3789 static int sched(struct c4iw_dev *dev, struct sk_buff *skb) 3790 { 3791 3792 /* 3793 * Save dev in the skb->cb area. 3794 */ 3795 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; 3796 3797 /* 3798 * Queue the skb and schedule the worker thread. 3799 */ 3800 skb_queue_tail(&rxq, skb); 3801 queue_work(workq, &skb_work); 3802 return 0; 3803 } 3804 3805 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 3806 { 3807 struct cpl_set_tcb_rpl *rpl = cplhdr(skb); 3808 3809 if (rpl->status != CPL_ERR_NONE) { 3810 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " 3811 "for tid %u\n", rpl->status, GET_TID(rpl)); 3812 } 3813 kfree_skb(skb); 3814 return 0; 3815 } 3816 3817 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 3818 { 3819 struct cpl_fw6_msg *rpl = cplhdr(skb); 3820 struct c4iw_wr_wait *wr_waitp; 3821 int ret; 3822 3823 PDBG("%s type %u\n", __func__, rpl->type); 3824 3825 switch (rpl->type) { 3826 case FW6_TYPE_WR_RPL: 3827 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 3828 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 3829 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); 3830 if (wr_waitp) 3831 c4iw_wake_up(wr_waitp, ret ? -ret : 0); 3832 kfree_skb(skb); 3833 break; 3834 case FW6_TYPE_CQE: 3835 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 3836 sched(dev, skb); 3837 break; 3838 default: 3839 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, 3840 rpl->type); 3841 kfree_skb(skb); 3842 break; 3843 } 3844 return 0; 3845 } 3846 3847 static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) 3848 { 3849 struct cpl_abort_req_rss *req = cplhdr(skb); 3850 struct c4iw_ep *ep; 3851 struct tid_info *t = dev->rdev.lldi.tids; 3852 unsigned int tid = GET_TID(req); 3853 3854 ep = lookup_tid(t, tid); 3855 if (!ep) { 3856 printk(KERN_WARNING MOD 3857 "Abort on non-existent endpoint, tid %d\n", tid); 3858 kfree_skb(skb); 3859 return 0; 3860 } 3861 if (is_neg_adv(req->status)) { 3862 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, 3863 ep->hwtid); 3864 kfree_skb(skb); 3865 return 0; 3866 } 3867 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 3868 ep->com.state); 3869 3870 /* 3871 * Wake up any threads in rdma_init() or rdma_fini(). 3872 * However, if we are on MPAv2 and want to retry with MPAv1 3873 * then, don't wake up yet. 3874 */ 3875 if (mpa_rev == 2 && !ep->tried_with_mpa_v1) { 3876 if (ep->com.state != MPA_REQ_SENT) 3877 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 3878 } else 3879 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 3880 sched(dev, skb); 3881 return 0; 3882 } 3883 3884 /* 3885 * Most upcalls from the T4 Core go to sched() to 3886 * schedule the processing on a work queue. 3887 */ 3888 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { 3889 [CPL_ACT_ESTABLISH] = sched, 3890 [CPL_ACT_OPEN_RPL] = sched, 3891 [CPL_RX_DATA] = sched, 3892 [CPL_ABORT_RPL_RSS] = sched, 3893 [CPL_ABORT_RPL] = sched, 3894 [CPL_PASS_OPEN_RPL] = sched, 3895 [CPL_CLOSE_LISTSRV_RPL] = sched, 3896 [CPL_PASS_ACCEPT_REQ] = sched, 3897 [CPL_PASS_ESTABLISH] = sched, 3898 [CPL_PEER_CLOSE] = sched, 3899 [CPL_CLOSE_CON_RPL] = sched, 3900 [CPL_ABORT_REQ_RSS] = peer_abort_intr, 3901 [CPL_RDMA_TERMINATE] = sched, 3902 [CPL_FW4_ACK] = sched, 3903 [CPL_SET_TCB_RPL] = set_tcb_rpl, 3904 [CPL_FW6_MSG] = fw6_msg, 3905 [CPL_RX_PKT] = sched 3906 }; 3907 3908 int __init c4iw_cm_init(void) 3909 { 3910 spin_lock_init(&timeout_lock); 3911 skb_queue_head_init(&rxq); 3912 3913 workq = create_singlethread_workqueue("iw_cxgb4"); 3914 if (!workq) 3915 return -ENOMEM; 3916 3917 return 0; 3918 } 3919 3920 void __exit c4iw_cm_term(void) 3921 { 3922 WARN_ON(!list_empty(&timeout_list)); 3923 flush_workqueue(workq); 3924 destroy_workqueue(workq); 3925 } 3926